005.kubernets之pods的資源限制和健康檢查


一 POD的容器資源限制

1.1 限制內容

有兩個參數

QoS Class: BestEffort,表示盡可能的滿足使用,級別較低,但當資源不夠時,會殺掉這個容器

resources: {}這里指定為空,則使用上面的參數

一般定義

resources:
      requests:         #表示最小需求
        cpu: "0.1"
        memory: "32Mi"
      limits:           #最大限制
        cpu: "1"
        memory: "128Mi

1.2 配置資源限制

[root@docker-server1 pods]# vim nginx-pods.yaml

apiVersion: v1
kind: Pod
metadata:
  annotations:
    test: this is a test app
  labels:
    app: nginx
  name: nginx
  namespace: default
spec:
  containers:
  - env:
    - name: test
      value: aaa
    - name: test1
      value: bbb
    image: nginx
    imagePullPolicy: Always
    name: nginx
    ports:
    - containerPort: 80
      hostPort: 8080
      protocol: TCP
    resources:
      requests:
        cpu: "0.2"
        memory: "128Mi"
      limits:
        cpu: "2"
        memory: "2Gi"
  - command:
    - sh
    - -c
    - sleep 3600
    image: busybox
    imagePullPolicy: Always
    name: busybox
    resources:
      requests:
        cpu: "0.1"
        memory: "32Mi"
      limits:
        cpu: "1"
        memory: "128Mi"
  restartPolicy: Always

[root@docker-server1 pods]# kubectl delete pod nginx

1.3 創建pod

[root@docker-server1 pods]# kubectl apply -f nginx-pods.yaml

[root@docker-server1 pods]# kubectl get pods

NAME    READY   STATUS    RESTARTS   AGE
nginx   2/2     Running   0          26s

[root@docker-server1 pods]# kubectl get pods -o wide

NAME    READY   STATUS    RESTARTS   AGE   IP           NODE              NOMINATED NODE   READINESS GATES
nginx   2/2     Running   0          32s   10.244.2.9   192.168.132.133   <none>           <none>

1.4 查看資源限制信息

[root@docker-server1 pods]# kubectl describe pods nginx

Name:         nginx
Namespace:    default
Priority:     0
Node:         192.168.132.133/192.168.132.133
Start Time:   Thu, 09 Jan 2020 19:00:56 -0500
Labels:       app=nginx
Annotations:  kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{"test":"this is a test app"},"labels":{"app":"nginx"},"name":"nginx","namespace...
              test: this is a test app
Status:       Running
IP:           10.244.2.9
IPs:
  IP:  10.244.2.9
Containers:
  nginx:
    Container ID:   docker://80287ddae6b23bbb066246a00e1d764182517ae065015fa56017ecc8627f7bd5
    Image:          nginx
    Image ID:       docker-pullable://nginx@sha256:8aa7f6a9585d908a63e5e418dc5d14ae7467d2e36e1ab4f0d8f9d059a3d071ce
    Port:           80/TCP
    Host Port:      8080/TCP
    State:          Running
      Started:      Thu, 09 Jan 2020 19:01:03 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     2
      memory:  2Gi
    Requests:
      cpu:     200m
      memory:  128Mi
    Environment:
      test:   aaa
      test1:  bbb
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
  busybox:
    Container ID:  docker://91ac7538f51d8ffa33328dc0af2e2b7ef094d5439e948b0fc849f444d94ec61b
    Image:         busybox
    Image ID:      docker-pullable://busybox@sha256:6915be4043561d64e0ab0f8f098dc2ac48e077fe23f488ac24b665166898115a
    Port:          <none>
    Host Port:     <none>
    Command:
      sh
      -c
      sleep 3600
    State:          Running
      Started:      Thu, 09 Jan 2020 19:01:08 -0500
    Ready:          True
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  128Mi
    Requests:
      cpu:        100m
      memory:     32Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-bwbrn:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bwbrn
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age    From                      Message
  ----    ------     ----   ----                      -------
  Normal  Pulling    2m37s  kubelet, 192.168.132.133  Pulling image "nginx"
  Normal  Scheduled  2m35s  default-scheduler         Successfully assigned default/nginx to 192.168.132.133
  Normal  Pulled     2m31s  kubelet, 192.168.132.133  Successfully pulled image "nginx"
  Normal  Created    2m31s  kubelet, 192.168.132.133  Created container nginx
  Normal  Started    2m31s  kubelet, 192.168.132.133  Started container nginx
  Normal  Pulling    2m31s  kubelet, 192.168.132.133  Pulling image "busybox"
  Normal  Pulled     2m26s  kubelet, 192.168.132.133  Successfully pulled image "busybox"
  Normal  Created    2m26s  kubelet, 192.168.132.133  Created container busybox
  Normal  Started    2m26s  kubelet, 192.168.132.133  Started container busybox

1.5 不同時配置requests和limits

[root@docker-server1 pods]# vim nginx-pods.yaml 

apiVersion: v1
kind: Pod
metadata:
  annotations:
    test: this is a test app
  labels:
    app: nginx
  name: nginx
  namespace: default
spec:
  containers:
  - env:
    - name: test
      value: aaa
    - name: test1
      value: bbb
    image: nginx
    imagePullPolicy: Always
    name: nginx
    ports:
    - containerPort: 80
      hostPort: 8080
      protocol: TCP
    resources:
      requests:
        cpu: "0.2"
        memory: "128Mi"
  - command:
    - sh
    - -c
    - sleep 3600
    image: busybox
    imagePullPolicy: Always
    name: busybox
    resources:
      limits:
        cpu: "1"
        memory: "128Mi"
  restartPolicy: Always

[root@docker-server1 pods]# kubectl delete pod nginx

[root@docker-server1 pods]# kubectl apply -f nginx-pods.yaml 

[root@docker-server1 pods]# kubectl describe pods nginx

Name:         nginx
Namespace:    default
Priority:     0
Node:         192.168.132.133/192.168.132.133
Start Time:   Thu, 09 Jan 2020 19:07:55 -0500
Labels:       app=nginx
Annotations:  kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{"test":"this is a test app"},"labels":{"app":"nginx"},"name":"nginx","namespace...
              test: this is a test app
Status:       Pending
IP:           
IPs:          <none>
Containers:
  nginx:
    Container ID:   
    Image:          nginx
    Image ID:       
    Port:           80/TCP
    Host Port:      8080/TCP
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Requests:
      cpu:     200m
      memory:  128Mi
    Environment:
      test:   aaa
      test1:  bbb
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
  busybox:
    Container ID:  
    Image:         busybox
    Image ID:      
    Port:          <none>
    Host Port:     <none>
    Command:
      sh
      -c
      sleep 3600
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Limits:
      cpu:     1
      memory:  128Mi
    Requests:      #自己定義了requests
      cpu:        1
      memory:     128Mi
    Environment:  <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  default-token-bwbrn:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bwbrn
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type    Reason     Age   From                      Message
  ----    ------     ----  ----                      -------
  Normal  Pulling    7s    kubelet, 192.168.132.133  Pulling image "nginx"
  Normal  Scheduled  6s    default-scheduler         Successfully assigned default/nginx to 192.168.132.133

說明定義了limites,會自動定義一個和limites相同資源的requests,但是不定義limits,也不會定義limits設置

二 pods對容器的健將檢查

2.1 健康檢查種類

pod通過LivenessProbe和ReadinessProbe兩種探針來檢查容器的健康狀態:

  1 LivenessProbe用於判斷容器是否健康,如果LivenessProbe探測到容器不健康,kubelet將刪除該容器並根據容器的重啟策略做相應的處理。如果容器不包含LivenessProbe,則kubelet認為該容器的LivenessProbe探針永遠返回sucess。

  2 ReadinessProbe用於判斷容器是否啟動完成且准備接受請求。如果該探針探測到失敗,則Endpoint Controoler將會從Service的Endpoint中刪除包含該容器Pod的條目。

使用httpGet配置示例

livenessProbe: 
  httpGet: 
    path: /
    port: 80 
    httpHeaders: 
    - name: X-Custom-Header 
      value: Awesome 
  initialDelaySeconds: 3   #容器延遲檢查時間
  periodSeconds: 3         #每3秒探測一次

檢查執行操作就是:curl -H  "X-Custom-Header:Awesome"  http://127.0.01:80/     使用curl傳遞一個頭部來檢查80端口根

查看官方介紹https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/

探測器參數

initialDelaySeconds:容器啟動后要等待多少秒后存活和就緒探測器才被初始化,默認是 0 秒,最小值是 0。
periodSeconds:執行探測的時間間隔(單位是秒)。默認是 10 秒。最小值是 1。
timeoutSeconds:探測的超時后等待多少秒。默認值是 1 秒。最小值是 1。
successThreshold:探測器在失敗后,被視為成功的最小連續成功數。默認值是 1。存活探測的這個值必須是 1。最小值是 1。
failureThreshold:當 Pod 啟動了並且探測到失敗,Kubernetes 的重試次數。存活探測情況下的放棄就意味着重新啟動容器。就緒探測情況下的放棄 Pod 會被打上未就緒的標簽。默認值是 3。最小值是 1

2.2 定義一個存活態 HTTP 請求接口

示例配置

apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-http
spec:
  containers:
  - name: liveness
    image: k8s.gcr.io/liveness
    args:
    - /server
    livenessProbe:
      httpGet:
        path: /healthz
        port: 8080
        httpHeaders:
        - name: X-Custom-Header
          value: Awesome
      initialDelaySeconds: 3
      periodSeconds: 3

  在這個配置文件中,可以看到 Pod 也只有一個容器。periodSeconds 字段指定了 kubelet 每隔 3 秒執行一次存活探測。initialDelaySeconds 字段告訴 kubelet 在執行第一次探測前應該等待 3 秒。kubelet 會向容器內運行的服務(服務會監聽 8080 端口)發送一個 HTTP GET 請求來執行探測。如果服務上 /healthz 路徑下的處理程序返回成功碼,則 kubelet 認為容器是健康存活的。如果處理程序返回失敗碼,則 kubelet 會殺死這個容器並且重新啟動它。

  任何大於或等於 200 並且小於 400 的返回碼標示成功,其它返回碼都標示失敗。

HTTP 探測器可以在 httpGet 上配置額外的字段

host:連接使用的主機名,默認是 Pod 的 IP。也可以在 HTTP 頭中設置 “Host” 來代替。
scheme :用於設置連接主機的方式(HTTP 還是 HTTPS)。默認是 HTTP。
path:訪問 HTTP 服務的路徑。
httpHeaders:請求中自定義的 HTTP 頭。HTTP 頭字段允許重復。
port:訪問容器的端口號或者端口名。如果數字必須在 165535 之間。

  對於 HTTP 探測,kubelet 發送一個 HTTP 請求到指定的路徑和端口來執行檢測。除非 httpGet 中的 host 字段設置了,否則 kubelet 默認是給 Pod 的 IP 地址發送探測。如果 scheme 字段設置為了 HTTPS,kubelet 會跳過證書驗證發送 HTTPS 請求。大多數情況下,不需要設置host 字段。這里有個需要設置 host 字段的場景,假設容器監聽 127.0.0.1,並且 Pod 的 hostNetwork 字段設置為了 true。那么 httpGet 中的 host 字段應該設置為 127.0.0.1。可能更常見的情況是如果 Pod 依賴虛擬主機,你不應該設置 host 字段,而是應該在 httpHeaders 中設置 Host。

2.3 基於conmand探測

許多長時間運行的應用程序最終會過渡到斷開的狀態,除非重新啟動,否則無法恢復。Kubernetes 提供了存活探測器來發現並補救這種情況

配置實例

apiVersion: v1
kind: Pod
metadata:
  labels:
    test: liveness
  name: liveness-exec
spec:
  containers:
  - name: liveness
    image: k8s.gcr.io/busybox
    args:
    - /bin/sh
    - -c
    - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    livenessProbe:
      exec:
        command:
        - cat
        - /tmp/healthy
      initialDelaySeconds: 5
      periodSeconds: 5

在這個配置文件中,可以看到 Pod 中只有一個容器。periodSeconds 字段指定了 kubelet 應該每 5 秒執行一次存活探測。initialDelaySeconds 字段告訴 kubelet 在執行第一次探測前應該等待 5 秒。kubelet 在容器內執行命令 cat /tmp/healthy 來進行探測。如果命令執行成功並且返回值為 0,kubelet 就會認為這個容器是健康存活的。如果這個命令返回非 0 值,kubelet 會殺死這個容器並重新啟動它。

當容器啟動時,執行如下的命令:

/bin/sh -c "touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600"

2.4 操作測試

[root@docker-server1 pods]# vim busybox-healthcheck.yaml

[root@docker-server1 pods]# kubectl apply -f busybox-healthcheck.yaml

[root@docker-server1 pods]# kubectl get pods

NAME            READY   STATUS    RESTARTS   AGE
liveness-exec   1/1     Running   0          9s
nginx           2/2     Running   1          97m

[root@docker-server1 pods]# kubectl get pods


NAME            READY   STATUS    RESTARTS   AGE
liveness-exec   1/1     Running   1          79s    #已經有一次restart
nginx           2/2     Running   1          98m

[root@docker-server1 pods]# kubectl describe pods liveness-exec

Name:         liveness-exec
Namespace:    default
Priority:     0
Node:         192.168.132.132/192.168.132.132
Start Time:   Thu, 09 Jan 2020 20:45:50 -0500
Labels:       test=liveness
Annotations:  kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"test":"liveness"},"name":"liveness-exec","namespace":"default"},"s...
Status:       Running
IP:           10.244.1.6
IPs:
  IP:  10.244.1.6
Containers:
  liveness:
    Container ID:  docker://b864e74fd7fc0c16f39d7b8ecaec1d771c5a63139fe4907b5d07389cc88d9f86
    Image:         k8s.gcr.io/busybox
    Image ID:      docker-pullable://k8s.gcr.io/busybox@sha256:d8d3bc2c183ed2f9f10e7258f84971202325ee6011ba137112e01e30f206de67
    Port:          <none>
    Host Port:     <none>
    Args:
      /bin/sh
      -c
      touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    State:          Running
      Started:      Thu, 09 Jan 2020 20:48:24 -0500
    Last State:     Terminated
      Reason:       Error
      Exit Code:    137
      Started:      Thu, 09 Jan 2020 20:47:08 -0500
      Finished:     Thu, 09 Jan 2020 20:48:22 -0500
    Ready:          True
    Restart Count:  2
    Liveness:       exec [cat /tmp/healthy] delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-bwbrn:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bwbrn
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                       From                      Message
  ----     ------     ----                      ----                      -------
  Normal   Scheduled  2m36s                     default-scheduler         Successfully assigned default/liveness-exec to 192.168.132.132
  Warning  Unhealthy  3s (x6 over 87s)          kubelet, 192.168.132.132  Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory    #這里已經有一個不健康的狀態
  Normal   Killing    3s (x2 over 78s)          kubelet, 192.168.132.132  Container liveness failed liveness probe, will be restarted
  Normal   Pulling    <invalid> (x3 over 2m3s)  kubelet, 192.168.132.132  Pulling image "k8s.gcr.io/busybox"
  Normal   Pulled     <invalid> (x3 over 119s)  kubelet, 192.168.132.132  Successfully pulled image "k8s.gcr.io/busybox"
  Normal   Created    <invalid> (x3 over 119s)  kubelet, 192.168.132.132  Created container liveness
  Normal   Started    <invalid> (x3 over 119s)  kubelet, 192.168.132.132  Started container liveness

這里在殺掉后會重啟,使用不重啟操作

[root@docker-server1 pods]# kubectl delete pods liveness-exec

[root@docker-server1 pods]# vi busybox-healthcheck.yaml 

[root@docker-server1 pods]# kubectl apply -f busybox-healthcheck.yaml

[root@docker-server1 pods]# kubectl get pods

NAME            READY   STATUS    RESTARTS   AGE
liveness-exec   1/1     Running   0          7s
nginx           2/2     Running   1          109m

[root@docker-server1 pods]# kubectl get pods

NAME            READY   STATUS    RESTARTS   AGE
liveness-exec   0/1     Error     0          2m11s    #已經失敗
nginx           2/2     Running   1          111m

[root@docker-server1 pods]# kubectl describe pods liveness-exec

Name:         liveness-exec
Namespace:    default
Priority:     0
Node:         192.168.132.132/192.168.132.132
Start Time:   Thu, 09 Jan 2020 20:57:54 -0500
Labels:       test=liveness
Annotations:  kubectl.kubernetes.io/last-applied-configuration:
                {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"test":"liveness"},"name":"liveness-exec","namespace":"default"},"s...
Status:       Failed
IP:           10.244.1.7
IPs:
  IP:  10.244.1.7
Containers:
  liveness:
    Container ID:  docker://d1bc23c8d6ef3e773ebbfeeff058eea39f1363a046df58da47e05e247c28b159
    Image:         k8s.gcr.io/busybox
    Image ID:      docker-pullable://k8s.gcr.io/busybox@sha256:d8d3bc2c183ed2f9f10e7258f84971202325ee6011ba137112e01e30f206de67
    Port:          <none>
    Host Port:     <none>
    Args:
      /bin/sh
      -c
      touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
    State:          Terminated
      Reason:       Error
      Exit Code:    137
      Started:      Thu, 09 Jan 2020 20:57:56 -0500
      Finished:     Thu, 09 Jan 2020 20:59:10 -0500
    Ready:          False
    Restart Count:  0
    Liveness:       exec [cat /tmp/healthy] delay=5s timeout=1s period=5s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-bwbrn (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  default-token-bwbrn:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-bwbrn
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                From                      Message
  ----     ------     ----               ----                      -------
  Normal   Scheduled  2m9s               default-scheduler         Successfully assigned default/liveness-exec to 192.168.132.132
  Normal   Pulling    96s                kubelet, 192.168.132.132  Pulling image "k8s.gcr.io/busybox"
  Normal   Pulled     95s                kubelet, 192.168.132.132  Successfully pulled image "k8s.gcr.io/busybox"
  Normal   Created    95s                kubelet, 192.168.132.132  Created container liveness
  Normal   Started    95s                kubelet, 192.168.132.132  Started container liveness
  Warning  Unhealthy  51s (x3 over 61s)  kubelet, 192.168.132.132  Liveness probe failed: cat: can't open '/tmp/healthy': No such file or directory
  Normal   Killing    51s                kubelet, 192.168.132.132  Stopping container liveness

2.5 定義 TCP 的存活探測

第三種類型的存活探測是使用 TCP 套接字。通過配置,kubelet 會嘗試在指定端口和容器建立套接字鏈接。如果能建立鏈接,這個容器就被看作是健康的,如果不能則這個容器就被看作是有問題的。

實例:

apiVersion: v1
kind: Pod
metadata:
  name: goproxy
  labels:
    app: goproxy
spec:
  containers:
  - name: goproxy
    image: k8s.gcr.io/goproxy:0.1
    ports:
    - containerPort: 8080
    readinessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 5
      periodSeconds: 10
    livenessProbe:
      tcpSocket:
        port: 8080
      initialDelaySeconds: 15
      periodSeconds: 20

  TCP 檢測的配置和 HTTP 檢測非常相似。下面這個例子同時使用就緒和存活探測器。kubelet 會在容器啟動 5 秒后發送第一個就緒探測。這會嘗試連接 goproxy 容器的 8080 端口。如果探測成功,這個 Pod 會被標記為就緒狀態,kubelet 將繼續每隔 10 秒運行一次檢測。

  除了就緒探測,這個配置包括了一個存活探測。kubelet 會在容器啟動 15 秒后進行第一次存活探測。就像就緒探測一樣,會嘗試連接 goproxy 容器的 8080 端口。如果存活探測失敗,這個容器會被重新啟動

kubectl apply -f https://k8s.io/examples/pods/probe/tcp-liveness-readiness.yaml

三 pod的其他操作

初始化容器

Init Container在所有容器運行之前執行(run-to-completion),常用來初始化配置。就是在業務容器啟動之前,啟動一個臨時的初始化容器,用於完成業務容器啟動之前的初始化操作,當初始化容器完成初始化任務后,然后退出,業務容器開始啟動

先學習volume掛載

3.1 volume掛載

[root@docker-server1 pods]# vim nginx-pods-volumes.yaml

apiVersion: v1
kind: Pod
metadata:
  annotations:
    test: this is a test app
  labels:
    app: nginx
  name: nginx-volume
  namespace: default
spec:
  volumes: 
    - name: datadir
      hostPath:
        path: /data
  containers:
  - env:
    - name: test
      value: aaa
    - name: test1
      value: bbb
    volumeMounts:
      - name: datadir
        mountPath: /usr/share/nginx/html
    image: nginx
    imagePullPolicy: Always
    name: nginx
    ports:
    - containerPort: 80
      hostPort: 8080
      protocol: TCP
    resources:
      requests:
        cpu: "0.2"
        memory: "128Mi"
  restartPolicy: Always

[root@docker-server1 pods]# kubectl apply -f nginx-pods-volumes.yaml

[root@docker-server1 pods]# kubectl get pods

NAME           READY   STATUS    RESTARTS   AGE
goproxy        1/1     Running   0          38m
nginx          2/2     Running   2          166m
nginx-volume   1/1     Running   0          68s

[root@docker-server1 pods]# kubectl get pods -o wide

NAME           READY   STATUS    RESTARTS   AGE    IP            NODE              NOMINATED NODE   READINESS GATES
goproxy        1/1     Running   0          38m    10.244.1.8    192.168.132.132   <none>           <none>
nginx          2/2     Running   2          166m   10.244.2.10   192.168.132.133   <none>           <none>
nginx-volume   1/1     Running   0          105s   10.244.1.9    192.168.132.132   <none>           <none>

[root@docker-server1 pods]# curl http://10.244.1.9

<html>
<head><title>403 Forbidden</title></head>
<body>
<center><h1>403 Forbidden</h1></center>
<hr><center>nginx/1.17.7</center>
</body>
</html>

容器運行在docker-server2上

[root@docker-server2 ~]# echo  "index on docker-server2"  > /data/index.html

再次訪問

[root@docker-server1 pods]# curl http://10.244.1.9

通過這種方式可以掛載一個容器卷,但是不可取,因為掛載本地目錄,k8s容器換到其他節點,數據就會變化

3.2 初始化容器

[root@docker-server1 pods]# vim init-container.yaml

apiVersion: v1
kind: Pod
metadata:
  name: init-demo
spec:
  initContainers:
  - name: install
    image: busybox
    command:
      - "sh"
      - "-c"
      - >
        echo "nginx in kubernetes" > /work-dir/index.html
    volumeMounts:
      - name: workdir
        mountPath: "/work-dir"
  volumes:
  - name: workdir
    emptyDir: {}
  containers:
    - name: nginx
      image: nginx
      ports:
        - containerPort: 80
      volumeMounts:
        - name: workdir
          mountPath: /usr/share/nginx/html 

emptyDir: {}這個不指定,表示會在本地找一個臨時的目錄,掛載到容器中,生命周期的容器相同,但是這個目錄可以讓兩個容器都可以看到,這樣當初始化容器任務結束后,業務容器就可以讀取這個目錄中的數據

[root@docker-server1 pods]# kubectl apply -f init-container.yaml 

[root@docker-server1 pods]# kubectl get pods

NAME           READY   STATUS    RESTARTS   AGE
goproxy        1/1     Running   0          58m
init-demo      1/1     Running   0          40s
nginx          2/2     Running   3          3h6m
nginx-volume   1/1     Running   0          21m

[root@docker-server1 pods]# kubectl get pods -o wide

NAME           READY   STATUS    RESTARTS   AGE    IP            NODE              NOMINATED NODE   READINESS GATES
goproxy        1/1     Running   0          58m    10.244.1.8    192.168.132.132   <none>           <none>
init-demo      1/1     Running   0          44s    10.244.1.10   192.168.132.132   <none>           <none>
nginx          2/2     Running   3          3h6m   10.244.2.10   192.168.132.133   <none>           <none>
nginx-volume   1/1     Running   0          21m    10.244.1.9    192.168.132.132   <none>           <none>

[root@docker-server1 pods]# curl http://10.244.1.10

3.3 生命周期管理

容器生命周期的鈎子

  • 容器生命周期鈎子(Container Lifecycle Hooks)監聽容器生命周期的特定事件,並在事件發生時執行已注冊的回調函數。支持兩種鈎子:
  • postStart: 容器創建后立即執行,注意由於是異步執行,它無法保證一定在ENTRYPOINT之前運行。如果失敗,容器會被殺死,並根據RestartPolicy決定是否重啟
  • preStop:容器終止前執行,常用於資源清理。如果失敗,容器同樣也會被殺死
  • 而鈎子的回調函數支持兩種方式:
  • exec:在容器內執行命令,如果命令的退出狀態碼是0表示執行成功,否則表示失敗
  • httpGet:向指定URL發起GET請求,如果返回的HTTP狀態碼在[200, 400)之間表示請求成功,否則表示失敗

[root@docker-server1 pods]# vim nginx-pods-lifecycle.yaml 

apiVersion: v1
kind: Pod
metadata:
  name: lifecycle-demo
spec:
  containers:
  - name: lifecycle-demo-container
    image: nginx
    lifecycle:
      postStart:
        httpGet:
          path: /
          port: 80
      preStop:
        exec:
          command: ["/usr/sbin/nginx","-s","quit"]

[root@docker-server1 pods]# kubectl apply -f nginx-pods-lifecycle.yaml

[root@docker-server1 pods]# kubectl get pods -o wide

NAME             READY   STATUS              RESTARTS   AGE     IP            NODE              NOMINATED NODE   READINESS GATES
goproxy          1/1     Running             0          76m     10.244.1.8    192.168.132.132   <none>           <none>
init-demo        1/1     Running             0          18m     10.244.1.10   192.168.132.132   <none>           <none>
lifecycle-demo   0/1     ContainerCreating   0          4s      <none>        192.168.132.132   <none>           <none>
nginx            2/2     Running             3          3h24m   10.244.2.10   192.168.132.133   <none>           <none>
nginx-volume     1/1     Running             0          39m     10.244.1.9    192.168.132.132   <none>           <none>

3.4 靜態POD

  kubelet的運行方式有兩種,一種是通過與kubernetes的master節點連接,接受任務並執行。另外一種則是可以作為一個獨立組件運行。監聽某個目錄中的yml文件,當發現變化,就執行yml文件,我們可以在這個目錄中定義啟動Pod的yml文件,這樣不需要master端,kubelet也會自行啟動pod,但通過這方式啟動的pod沒法被master端調度。只能在當前的kubelet主機節點上運行,這種pod就被稱作靜態pod

  kubeadm初始化集群的方式就是借助了靜態Pod的方式將容器運行在kubelet管理的靜態Pod中

比如在安裝master節點的時候,kubeadm安裝kubectl,kubelet,master節點就是以靜態POD方式運行

[root@docker-server1 pods]# cd /etc/kubernetes/

[root@docker-server1 kubernetes]# ll

[root@docker-server1 kubernetes]# cd manifests/

[root@docker-server1 manifests]# ll

[root@docker-server1 manifests]# vim  etcd.yaml 

apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: null
  labels:
    component: etcd
    tier: control-plane
  name: etcd
  namespace: kube-system
spec:
  containers:
  - command:
    - etcd
    - --advertise-client-urls=https://192.168.132.131:2379
    - --cert-file=/etc/kubernetes/pki/etcd/server.crt
    - --client-cert-auth=true
    - --data-dir=/var/lib/etcd
    - --initial-advertise-peer-urls=https://192.168.132.131:2380
    - --initial-cluster=192.168.132.131=https://192.168.132.131:2380
    - --key-file=/etc/kubernetes/pki/etcd/server.key
    - --listen-client-urls=https://127.0.0.1:2379,https://192.168.132.131:2379
    - --listen-metrics-urls=http://127.0.0.1:2381
    - --listen-peer-urls=https://192.168.132.131:2380
    - --name=192.168.132.131
    - --peer-cert-file=/etc/kubernetes/pki/etcd/peer.crt
    - --peer-client-cert-auth=true
    - --peer-key-file=/etc/kubernetes/pki/etcd/peer.key
    - --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    - --snapshot-count=10000
    - --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.crt
    image: k8s.gcr.io/etcd:3.4.3-0
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 8
      httpGet:
        host: 127.0.0.1
        path: /health
        port: 2381
        scheme: HTTP
      initialDelaySeconds: 15
      timeoutSeconds: 15
    name: etcd
    resources: {}
    volumeMounts:
    - mountPath: /var/lib/etcd
      name: etcd-data
    - mountPath: /etc/kubernetes/pki/etcd
      name: etcd-certs
  hostNetwork: true
  priorityClassName: system-cluster-critical
  volumes:
  - hostPath:
      path: /etc/kubernetes/pki/etcd
      type: DirectoryOrCreate
    name: etcd-certs
  - hostPath:
      path: /var/lib/etcd
      type: DirectoryOrCreate
    name: etcd-data
status: {}

嘗試修改yml文件

這里添加一個參數

再次查看,容器已經重啟

[root@docker-server1 manifests]# docker ps -a|grep apiserver

c28921f0415        0cae8d5cc64c           "kube-apiserver --ad…"   37 seconds ago       Up 36 seconds                                    k8s_kube-apiserver_kube-apiserver-192.168.132.131_kube-system_35be3047d357a34596bdda175ae3edd5_1
f5e6441e09a0        0cae8d5cc64c           "kube-apiserver --ad…"   10 hours ago         Exited (0) 37 seconds ago                        k8s_kube-apiserver_kube-apiserver-192.168.132.131_kube-system_35be3047d357a34596bdda175ae3edd5_0
f5aff40580f5        k8s.gcr.io/pause:3.1   "/pause"                 10 hours ago         Up 10 hours                                      k8s_POD_kube-apiserver-192.168.132.131_kube-system_35be3047d357a34596bdda175ae3edd5_0

關於POD的部分學習到這里


博主聲明:本文的內容來源主要來自譽天教育晏威老師,由本人實驗完成操作驗證,需要的博友請聯系譽天教育(http://www.yutianedu.com/),獲得官方同意或者晏老師(https://www.cnblogs.com/breezey/)本人同意即可轉載,謝謝!


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM