kubernetes(三)--Pod资源清单及pod控制器

一、pod的存活性及就绪性探测

1、使用exec command方式探测

[root@master kube_manifest]# vim liveness-exec.yml 

apiVersion: v1
kind: Pod
metadata:
  name: liveness-exec-pod
  namespace: default
spec:
  containers:
  - name: liveness-exec-container
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh","-c","touch /tmp/healthy; sleep 30;rm -rf /tmp/healthy; sleep 3600"]
    livenessProbe:
      exec:
        command: ["test","-e","/tmp/healthy"]
      initialDelaySeconds: 1
      periodSeconds: 3

使用kubectl create命令创建pod

[root@master kube_manifest]# kubectl create -f liveness-exec.yml 
pod/liveness-exec-pod created

等待一段时间后,查看pod的状态,可以看到pod的最近一次状态是Error,此时会被重启

[root@master kube_manifest]# kubectl describe pods liveness-exec-pod
Name:         liveness-exec-pod
Namespace:    default
Priority:     0
Node:         node2/192.168.147.134
Start Time:   Mon, 01 Jun 2020 09:38:01 +0800
Labels:       <none>
Annotations:  <none>
Status:       Running
IP:           10.244.2.7
IPs:
  IP:  10.244.2.7
Containers:
  liveness-exec-container:
    Container ID:  docker://8f65d30ff8e4f5d2b8740cbe7fda13d530f56b10007b645cd5d4248696d48dc8
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:836945da1f3afe2cfff376d379852bbb82e0237cb2925d53a13f53d6e8a8c48c
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      touch /tmp/healthy; sleep 30;rm -rf /tmp/healthy; sleep 3600
    State:          Running
      Started:      Mon, 01 Jun 2020 09:41:28 +0800
    Last State:     podnated
      Reason:       Error
      Exit Code:    137
      Started:      Mon, 01 Jun 2020 09:40:18 +0800
      Finished:     Mon, 01 Jun 2020 09:41:27 +0800
    Ready:          True
    Restart Count:  3
    Liveness:       exec [test -e /tmp/healthy] delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-z5gjj (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-z5gjj:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-z5gjj
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                  From               Message
  ----     ------     ----                 ----               -------
  Normal   Scheduled  4m33s                default-scheduler  Successfully assigned default/liveness-exec-pod to node2
  Normal   Killing    97s (x3 over 3m55s)  kubelet, node2     Container liveness-exec-container failed liveness probe, will be restarted
  Normal   Pulled     67s (x4 over 4m32s)  kubelet, node2     Container image "busybox:latest" already present on machine
  Normal   Created    67s (x4 over 4m32s)  kubelet, node2     Created container liveness-exec-container
  Normal   Started    66s (x4 over 4m31s)  kubelet, node2     Started container liveness-exec-container
  Warning  Unhealthy  34s (x10 over 4m1s)  kubelet, node2     Liveness probe failed:

使用kubectl get pod命令查看发现pod已经重启了7次,状态为 CrashLoopBackOff

[root@master kube_manifest]# kubectl get pod 
NAME                READY   STATUS             RESTARTS   AGE
client              1/1     Running            1          2d23h
liveness-exec-pod   0/1     CrashLoopBackOff   7          15m
myapp               1/1     Running            1          2d22h
nginx-deploy        1/1     Running            2          6d19h

2、使用httpGet方式进行探测

[root@master kube_manifest]# vim liveness-httpget.yml 

apiVersion: v1
kind: Pod
metadata:
  name: liveness-httpget-pod
  namespace: default
spec:
  containers:
  - name: liveness-httpget-container
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    livenessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 1
      periodSeconds: 3

创建Pod,此时pod是可以正常运行的

[root@master kube_manifest]# kubectl create -f liveness-httpget.yml 
pod/liveness-httpget-pod created

我们连接进pod中,认为的把index.html文件删除

[root@master kube_manifest]# kubectl exec -it liveness-httpget-pod -- /bin/sh
/ # ls /usr/share/nginx/html/
50x.html    index.html
/ # rm -f /usr/share/nginx/html/index.html
/ # ls /usr/share/nginx/html/
50x.html
/ # command terminated with exit code 137
##此时发现pod不正常退出了

查看pod状态

[root@master kube_manifest]# kubectl describe pods liveness-httpget-pod
Name:         liveness-httpget-pod
Namespace:    default
Priority:     0
Node:         node2/192.168.147.134
Start Time:   Mon, 01 Jun 2020 10:40:16 +0800
Labels:       <none>
Annotations:  <none>
Status:       Running
IP:           10.244.2.9
IPs:
  IP:  10.244.2.9
Containers:
  liveness-httpget-container:
    Container ID:   docker://c82316e57267ebd04f8b6c06e6ab8b07c7745d3ce388626a2b72a416f41693e3
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Mon, 01 Jun 2020 10:55:41 +0800
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Mon, 01 Jun 2020 10:41:05 +0800
      Finished:     Mon, 01 Jun 2020 10:55:41 +0800
    Ready:          True
    Restart Count:  1
    Liveness:       http-get http://:http/index.html delay=1s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-z5gjj (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-z5gjj:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-z5gjj
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason     Age                   From               Message
  ----     ------     ----                  ----               -------
  Normal   Scheduled  23m                   default-scheduler  Successfully assigned default/liveness-httpget-pod to node2
  Normal   Pulling    23m                   kubelet, node2     Pulling image "ikubernetes/myapp:v1"
  Normal   Pulled     22m                   kubelet, node2     Successfully pulled image "ikubernetes/myapp:v1"
  Normal   Created    7m55s (x2 over 22m)   kubelet, node2     Created container liveness-httpget-container
  Normal   Started    7m55s (x2 over 22m)   kubelet, node2     Started container liveness-httpget-container
  Warning  Unhealthy  7m55s (x3 over 8m1s)  kubelet, node2     Liveness probe failed: HTTP probe failed with statuscode: 404
  Normal   Killing    7m55s                 kubelet, node2     Container liveness-httpget-container failed liveness probe, will be restarted
  Normal   Pulled     7m55s                 kubelet, node2     Container image "ikubernetes/myapp:v1" already present on machine

3、就绪性探测

[root@master kube_manifest]# vim readness-httpget.yml 

apiVersion: v1
kind: Pod
metadata:
  name: readiness-httpget-pod
  namespace: default
spec:
  containers:
  - name: readiness-httpget-container
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    readinessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 1
      periodSeconds: 3

删除index.html文件

[root@master kube_manifest]# kubectl exec -it readiness-httpget-pod -- /bin/sh
/ # rm -f /usr/share/nginx/html/index.html

查看pod信息,发现readiness-httpget-pod处于未就绪状态。

[root@master kube_manifest]# kubectl get pod
NAME                    READY   STATUS    RESTARTS   AGE
client                  1/1     Running   1          3d2h
liveness-httpget-pod    1/1     Running   2          101m
myapp                   1/1     Running   1          3d1h
nginx-deploy            1/1     Running   2          6d22h
readiness-httpget-pod   0/1     Running   0          60m

但是容器的中的nginx进程依然运行着,不会关闭pod,只是会一直处于未就绪状态。

##在readiness-httpget-pod容器中
/ # ps aux
PID   USER     TIME   COMMAND
    1 root       0:00 nginx: master process nginx -g daemon off;
    6 nginx      0:00 nginx: worker process
    7 root       0:00 /bin/sh
   13 root       0:00 ps aux
/ # 

再重新创建一个index.html文件,然后pod就会重新回到就绪状态

##在readiness-httpget-pod容器中
/ # echo "hello pod" >> /usr/share/nginx/html/index.html
/ # 

然后再次查看pod信息,发现readiness-httpget-pod已经回到就绪性状态了。

[root@master kube_manifest]# kubectl get pod
NAME                    READY   STATUS    RESTARTS   AGE
client                  1/1     Running   1          3d2h
liveness-httpget-pod    1/1     Running   2          109m
myapp                   1/1     Running   1          3d1h
nginx-deploy            1/1     Running   2          6d22h
readiness-httpget-pod   1/1     Running   0          68m

二、pod生命周期中的启动后钩子、和终止钩子。
1、pod生命周期中的启动后钩子


[root@master kube_manifest]# vim poststart-pod.yaml

apiVersion: v1
kind: Pod
metadata:
    name: poststart-pod
    namespace: default
spec:
    containers:
    - name: busybox-httpd
      image: busybox:latest
      imagePullPolicy: IfNotPresent
      lifecycle:
        postStart:
          exec:
            command: ['/bin/sh','-c','echo "hello pod" >> /tmp/index.html']
      #command: ['/bin/sh','-c','sleep 3600']
      command: ["/bin/httpd"]
      args: ["-f","-h /tmp"]

[root@master kube_manifest]# kubectl create -f poststart-pod.yaml 
pod/poststart-pod created

三、上面创建的都是自助式pod,即当删除时就不会自动创建,但在使用时,更多的是使用控制器管理的pod,接下来介绍pod的控制器管理。

1、ReplicaSet

[root@master kube_manifest]# kubectl explain rs
KIND:     ReplicaSet
VERSION:  apps/v1

DESCRIPTION:
     ReplicaSet ensures that a specified number of pod replicas are running at
     any given time.

FIELDS:
   apiVersion	<string>
     APIVersion defines the versioned schema of this representation of an
     object. Servers should convert recognized schemas to the latest internal
     value, and may reject unrecognized values. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources

   kind	<string>
     Kind is a string value representing the REST resource this object
     represents. Servers may infer this from the endpoint the client submits
     requests to. Cannot be updated. In CamelCase. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds

   metadata	<Object>
     If the Labels of a ReplicaSet are empty, they are defaulted to be the same
     as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More
     info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

   spec	<Object>
     Spec defines the specification of the desired behavior of the ReplicaSet.
     More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status

   status	<Object>
     Status is the most recently observed status of the ReplicaSet. This data
     may be out of date by some window of time. Populated by the system.
     Read-only. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status


创建一个Replicas控制器

[root@master kube_manifest]# vim re-demo.yaml

apiVersion: apps/v1
kind: ReplicaSet
metadata:
    name: myapp
    namespace: default
spec:
    replicas: 2
    selector:
      matchLabels:
        app: myapp
        release: canary
    template:
      metadata:
        name: myapp-pod
        labels:
          app: myapp
          release: canary
          enviroment: qq
      spec:
        containers:
        - name: myapp-container
          image: ikubernetes/myapp:v1
          ports:
          - name: http
            containerPort: 80

[root@master kube_manifest]# kubectl create -f re-demo.yaml 
replicaset.apps/myapp created
[root@master kube_manifest]# kubectl get rs
NAME    DESIRED   CURRENT   READY   AGE
myapp   2         2         2       5s
[root@master kube_manifest]# kubectl get pods
NAME           READY   STATUS    RESTARTS   AGE
client         1/1     Running   1          3d5h
myapp          1/1     Running   1          3d4h
myapp-hzzq9    1/1     Running   0          8s
myapp-mvtnj    1/1     Running   0          8s
nginx-deploy   1/1     Running   2          7d1h
##ReplicaSet控制器中pod的名称是由ReplicaSet控制器的名称+随机字符串组成,上面template中定义的pod名称无效

此时删除其中一个pod,验证是否会自动创建pod以保持期望定义的状态。

[root@master kube_manifest]# kubectl delete pod myapp-mvtnj
pod "myapp-mvtnj" deleted

查看pod信息,发现重新创建了一个pod,其名称是myapp-mwzvx

[root@master kube_manifest]# kubectl get pods -o wide
NAME           READY   STATUS    RESTARTS   AGE    IP            NODE    NOMINATED NODE   READINESS GATES
client         1/1     Running   1          3d5h   10.244.2.5    node2   <none>           <none>
myapp          1/1     Running   1          3d4h   10.244.1.5    node1   <none>           <none>
myapp-hzzq9    1/1     Running   0          14m    10.244.1.9    node1   <none>           <none>
myapp-mwzvx    1/1     Running   0          17s    10.244.2.13   node2   <none>           <none>
nginx-deploy   1/1     Running   2          7d1h   10.244.1.6    node1   <none>           <none>

同理如果多添加了具有相同标签的pod,则系统会随机删除一个,以确保满足期望定义的状态。也就是“多退少补”机制。

把myapp 这个pod重新打上标签

[root@master kube_manifest]# kubectl get pods --show-labels
NAME           READY   STATUS    RESTARTS   AGE    LABELS
client         1/1     Running   1          3d5h   run=client
myapp          1/1     Running   1          3d4h   run=myapp
myapp-hzzq9    1/1     Running   0          24m    app=myapp,enviroment=qq,release=canary
myapp-mwzvx    1/1     Running   0          10m    app=myapp,enviroment=qq,release=canary
nginx-deploy   1/1     Running   2          7d1h   run=nginx-deploy
[root@master kube_manifest]# kubectl label pods myapp release=canary
pod/myapp labeled
[root@master kube_manifest]# kubectl label pods myapp app=myapp
pod/myapp labeled

此时发现myapp-mwzvx这个pod被删除了

[root@master kube_manifest]# kubectl get pods --show-labels
NAME           READY   STATUS    RESTARTS   AGE    LABELS
client         1/1     Running   1          3d5h   run=client
myapp          1/1     Running   1          3d4h   app=myapp,release=canary,run=myapp
myapp-hzzq9    1/1     Running   0          36m    app=myapp,enviroment=qq,release=canary
nginx-deploy   1/1     Running   2          7d1h   run=nginx-deploy

这时候如果把myapp这个pod删除掉,则会重新创建一个pod补回去。

[root@master kube_manifest]# kubectl delete pods myapp
pod "myapp" deleted
[root@master kube_manifest]# kubectl get pods --show-labels
NAME           READY   STATUS    RESTARTS   AGE    LABELS
client         1/1     Running   1          3d5h   run=client
myapp-h7l98    1/1     Running   0          5s     app=myapp,enviroment=qq,release=canary
myapp-hzzq9    1/1     Running   0          43m    app=myapp,enviroment=qq,release=canary
nginx-deploy   1/1     Running   2          7d1h   run=nginx-deploy

因此注意:以后再创建和使用pod的时,要注意标签冲突问题,不同类型的pod,应使用不同的标签

ReplicaSet控制器扩缩容,使用kubectl edit rs可以进行实时操作。

[root@master kube_manifest]# kubectl edit rs myapp
replicaset.apps/myapp edited

 ##把replicas改为3

此时pod就是我们所期望的3个

[root@master kube_manifest]# kubectl get pods
NAME           READY   STATUS    RESTARTS   AGE
client         1/1     Running   1          3d5h
myapp-h7l98    1/1     Running   0          11m
myapp-hzzq9    1/1     Running   0          55m
myapp-vkf8b    1/1     Running   0          61s
nginx-deploy   1/1     Running   2          7d2h

ReplicaSet控制器在线升级
我们把里面的容器版本改为v2版

[root@master kube_manifest]# kubectl edit rs myapp
replicaset.apps/myapp edited

     - image: ikubernetes/myapp:v2  ##版本改为v2

使用 kubectl get rs 查看RS控制器,显示镜像已经改为了V2版本

[root@master kube_manifest]# kubectl get rs -o wide
NAME    DESIRED   CURRENT   READY   AGE   CONTAINERS        IMAGES                 SELECTOR
myapp   3         3         3       61m   myapp-container   ikubernetes/myapp:v2   app=myapp,release=canary

但是,此时pod的资源并不会修改,只有重建时pod的资源才会被修改。

[root@master kube_manifest]# kubectl get pods -o wide
NAME           READY   STATUS    RESTARTS   AGE    IP            NODE    NOMINATED NODE   READINESS GATES
client         1/1     Running   1          3d6h   10.244.2.5    node2   <none>           <none>
myapp-h7l98    1/1     Running   0          33m    10.244.2.14   node2   <none>           <none>
myapp-hzzq9    1/1     Running   0          77m    10.244.1.9    node1   <none>           <none>
myapp-vkf8b    1/1     Running   0          23m    10.244.1.10   node1   <none>           <none>
nginx-deploy   1/1     Running   2          7d2h   10.244.1.6    node1   <none>           <none>
[root@master kube_manifest]# curl 10.244.1.10
Hello MyApp | Version: v1 | <a href="hostname.html">Pod Name</a>

例如,此时删掉myapp-vkf8b这个pod,重建时新的pod资源会使用v2版本。

[root@master kube_manifest]# kubectl delete pods myapp-vkf8b 
pod "myapp-vkf8b" deleted
[root@master kube_manifest]# kubectl get pods -o wide
NAME           READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
client         1/1     Running   1          3d6h    10.244.2.5    node2   <none>           <none>
myapp-h7l98    1/1     Running   0          44m     10.244.2.14   node2   <none>           <none>
myapp-hqfcw    1/1     Running   0          2m56s   10.244.2.15   node2   <none>           <none>
myapp-hzzq9    1/1     Running   0          88m     10.244.1.9    node1   <none>           <none>
nginx-deploy   1/1     Running   2          7d2h    10.244.1.6    node1   <none>           <none>
##新创建的pod名称是myapp-hqfcw,IP为10.244.2.15 
[root@master kube_manifest]# curl 10.244.2.15
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>

当新创建的pod处于稳定状态时,我们就可以把原先旧的pod删除掉,然后系统会自动重建新的pod,并使用新的资源。此方法可用于灰度发布,还可以人为掌握节奏。

deployment控制器

[root@master ~]# kubectl explain deploy
KIND:     Deployment
VERSION:  apps/v1

DESCRIPTION:
     Deployment enables declarative updates for Pods and ReplicaSets.

FIELDS:
   apiVersion	<string>
     APIVersion defines the versioned schema of this representation of an
     object. Servers should convert recognized schemas to the latest internal
     value, and may reject unrecognized values. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources

   kind	<string>
     Kind is a string value representing the REST resource this object
     represents. Servers may infer this from the endpoint the client submits
     requests to. Cannot be updated. In CamelCase. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds

   metadata	<Object>
     Standard object metadata.

   spec	<Object>
     Specification of the desired behavior of the Deployment.

   status	<Object>
     Most recently observed status of the Deployment.


[root@master ~]# kubectl explain deploy.spec.strategy.rollingUpdate
KIND:     Deployment
VERSION:  apps/v1

RESOURCE: rollingUpdate <Object>

DESCRIPTION:
     Rolling update config params. Present only if DeploymentStrategyType =
     RollingUpdate.

     Spec to control the desired behavior of rolling update.

FIELDS:
   maxSurge	<string>
     The maximum number of pods that can be scheduled above the desired number
     of pods. Value can be an absolute number (ex: 5) or a percentage of desired
     pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number
     is calculated from percentage by rounding up. Defaults to 25%. Example:
     when this is set to 30%, the new ReplicaSet can be scaled up immediately
     when the rolling update starts, such that the total number of old and new
     pods do not exceed 130% of desired pods. Once old pods have been killed,
     new ReplicaSet can be scaled up further, ensuring that total number of pods
     running at any time during the update is at most 130% of desired pods.

   maxUnavailable	<string>
     The maximum number of pods that can be unavailable during the update. Value
     can be an absolute number (ex: 5) or a percentage of desired pods (ex:
     10%). Absolute number is calculated from percentage by rounding down. This
     can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set
     to 30%, the old ReplicaSet can be scaled down to 70% of desired pods
     immediately when the rolling update starts. Once new pods are ready, old
     ReplicaSet can be scaled down further, followed by scaling up the new
     ReplicaSet, ensuring that the total number of pods available at all times
     during the update is at least 70% of desired pods.


创建一个deployment控制器,并使用kubectl apply声明式命令创建。

[root@master kube_manifest]# vim deploy-demo.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v1
        ports:
        - name: http
          containerPort: 80

查看deploy信息,replicas信息和pod信息

[root@master kube_manifest]# kubectl get deploy
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
myapp-deploy   1/2     2            1           15s
[root@master kube_manifest]# kubectl get deploy
NAME           READY   UP-TO-DATE   AVAILABLE   AGE
myapp-deploy   2/2     2            2           21s
[root@master kube_manifest]# kubectl get rs
NAME                      DESIRED   CURRENT   READY   AGE
myapp-deploy-65fb6c8459   2         2         2       61s
[root@master kube_manifest]# kubectl get pod
NAME                            READY   STATUS    RESTARTS   AGE
client                          1/1     Running   1          4d2h
myapp-deploy-65fb6c8459-2rcsn   1/1     Running   0          75s
myapp-deploy-65fb6c8459-xkd5n   1/1     Running   0          75s
nginx-deploy                    1/1     Running   2          7d22h


根据上面的deployment,replicas和pod的信息,它们三者之间的关系模型如下图所示:

kubernetes(三)--Pod资源清单及pod控制器_第1张图片
同样的,deployment控制器也支持动态扩缩容,只需编辑deployment的资源配置清单即可,然后使用kubectl apply重新声明一次。

查看deployment的详细信息,可以发现其默认的更新策略类型是RollingUpdate,最多允许25%不可用,最多运行pod超过25%。

[root@master kube_manifest]# kubectl describe deploy myapp-deploy
Name:                   myapp-deploy
Namespace:              default
CreationTimestamp:      Tue, 02 Jun 2020 12:20:19 +0800
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision: 1
Selector:               app=myapp,release=canary
Replicas:               2 desired | 2 updated | 2 total | 2 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  25% max unavailable, 25% max surge
Pod Template:
  Labels:  app=myapp
           release=canary
  Containers:
   myapp:
    Image:        ikubernetes/myapp:v1
    Port:         80/TCP
    Host Port:    0/TCP
    Environment:  <none>
    Mounts:       <none>
  Volumes:        <none>
Conditions:
  Type           Status  Reason
  ----           ------  ------
  Available      True    MinimumReplicasAvailable
  Progressing    True    NewReplicaSetAvailable
OldReplicaSets:  <none>
NewReplicaSet:   myapp-deploy-65fb6c8459 (2/2 replicas created)
Events:
  Type    Reason             Age   From                   Message
  ----    ------             ----  ----                   -------
  Normal  ScalingReplicaSet  22m   deployment-controller  Scaled up replica set myapp-deploy-65fb6c8459 to 2

接下来我们验证一下滚动更新策略
编辑deployment的配置清单,把版本改为v2版本

[root@master kube_manifest]# vim deploy-demo.yaml
        image: ikubernetes/myapp:v2

先使用kubectl get pod -o wide -l app=myapp -w 监视pod的状态。
然后再次使用kubectl apply声明一下deployment的配置清单

[root@master kube_manifest]# kubectl apply -f deploy-demo.yaml 
deployment.apps/myapp-deploy configured

可以看到pod的滚动更新状态。

[root@master kube_manifest]# kubectl get pod -o wide -l app=myapp -w 
NAME                            READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
myapp-deploy-65fb6c8459-2rcsn   1/1     Running   0          39m   10.244.2.16   node2   <none>           <none>
myapp-deploy-65fb6c8459-xkd5n   1/1     Running   0          39m   10.244.1.11   node1   <none>           <none>
myapp-deploy-559ff5c66-c2jdc    0/1     Pending   0          0s    <none>        <none>   <none>           <none>
myapp-deploy-559ff5c66-c2jdc    0/1     Pending   0          0s    <none>        node1    <none>           <none>
myapp-deploy-559ff5c66-c2jdc    0/1     ContainerCreating   0          0s    <none>        node1    <none>           <none>
myapp-deploy-559ff5c66-c2jdc    1/1     Running             0          29s   10.244.1.12   node1    <none>           <none>
myapp-deploy-65fb6c8459-xkd5n   1/1     Terminating         0          53m   10.244.1.11   node1    <none>           <none>
myapp-deploy-559ff5c66-hq6hs    0/1     Pending             0          0s    <none>        <none>   <none>           <none>
myapp-deploy-559ff5c66-hq6hs    0/1     Pending             0          0s    <none>        node2    <none>           <none>
myapp-deploy-559ff5c66-hq6hs    0/1     ContainerCreating   0          0s    <none>        node2    <none>           <none>
myapp-deploy-65fb6c8459-xkd5n   0/1     Terminating         0          54m   10.244.1.11   node1    <none>           <none>
myapp-deploy-65fb6c8459-xkd5n   0/1     Terminating         0          54m   10.244.1.11   node1    <none>           <none>
myapp-deploy-65fb6c8459-xkd5n   0/1     Terminating         0          54m   10.244.1.11   node1    <none>           <none>
myapp-deploy-559ff5c66-hq6hs    1/1     Running             0          2s    10.244.2.17   node2    <none>           <none>
myapp-deploy-65fb6c8459-2rcsn   1/1     Terminating         0          54m   10.244.2.16   node2    <none>           <none>
myapp-deploy-65fb6c8459-2rcsn   0/1     Terminating         0          54m   10.244.2.16   node2    <none>           <none>
myapp-deploy-65fb6c8459-2rcsn   0/1     Terminating         0          54m   10.244.2.16   node2    <none>           <none>
myapp-deploy-65fb6c8459-2rcsn   0/1     Terminating         0          54m   10.244.2.16   node2    <none>           <none>

滚动更新后,replicas有了两个版本,现在使用的是v2版本,而v1版本还保留着,这样我们就可以随时滚动更新回去。

[root@master kube_manifest]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY   AGE     CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-559ff5c66    2         2         2       3m39s   myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=559ff5c66,release=canary
myapp-deploy-65fb6c8459   0         0         0       57m     myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=65fb6c8459,release=canary

也可以使用kubectl patch打补丁方式或kubectl set image 进行更新。kubectl patch打补丁方式的好处是并不会修改deployment的资源配置清单,可以保留原始的资源配置清单。

使用kubectl rollout history查看滚动历史

[root@master kube_manifest]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy 
REVISION  CHANGE-CAUSE
1         <none>
2         <none>

如果想回滚使用kubectl rollout undo即可。

使用kubectl patch打补丁方式把pod的副本改为5个。

[root@master kube_manifest]# kubectl patch deployment myapp-deploy -p '{"spec":{"replicas":5}}'
deployment.apps/myapp-deploy patched

[root@master kube_manifest]# kubectl get pod -o wide -l app=myapp
NAME                           READY   STATUS    RESTARTS   AGE   IP            NODE    NOMINATED NODE   READINESS GATES
myapp-deploy-559ff5c66-7rtjd   1/1     Running   0          32s   10.244.2.18   node2   <none>           <none>
myapp-deploy-559ff5c66-c2jdc   1/1     Running   0          37m   10.244.1.12   node1   <none>           <none>
myapp-deploy-559ff5c66-fvsvv   1/1     Running   0          32s   10.244.1.13   node1   <none>           <none>
myapp-deploy-559ff5c66-hq6hs   1/1     Running   0          37m   10.244.2.17   node2   <none>           <none>
myapp-deploy-559ff5c66-j7fwg   1/1     Running   0          32s   10.244.2.19   node2   <none>           <none>

使用kubectl patch打补丁方式更改deployment控制器的更新策略

[root@master kube_manifest]# kubectl patch deployment myapp-deploy -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0}}}}'
deployment.apps/myapp-deploy patched

kubernetes(三)--Pod资源清单及pod控制器_第2张图片
模拟金丝雀发布,即只会更新一个pod资源后就暂停,待其稳定运行后在更新后续的pod资源。

[root@master kube_manifest]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy
deployment.apps/myapp-deploy image updated
deployment.apps/myapp-deploy paused

查看更新状态

[root@master kube_manifest]# kubectl get pod -o wide -l app=myapp -w
NAME                           READY   STATUS    RESTARTS   AGE    IP            NODE    NOMINATED NODE   READINESS GATES
myapp-deploy-559ff5c66-7rtjd   1/1     Running   0          86m    10.244.2.18   node2   <none>           <none>
myapp-deploy-559ff5c66-c2jdc   1/1     Running   0          124m   10.244.1.12   node1   <none>           <none>
myapp-deploy-559ff5c66-fvsvv   1/1     Running   0          86m    10.244.1.13   node1   <none>           <none>
myapp-deploy-559ff5c66-hq6hs   1/1     Running   0          123m   10.244.2.17   node2   <none>           <none>
myapp-deploy-559ff5c66-j7fwg   1/1     Running   0          86m    10.244.2.19   node2   <none>           <none>
myapp-deploy-6b9865d969-kplqz   0/1     Pending   0          0s     <none>        <none>   <none>           <none>
myapp-deploy-6b9865d969-kplqz   0/1     Pending   0          0s     <none>        node1    <none>           <none>
myapp-deploy-6b9865d969-kplqz   0/1     ContainerCreating   0          0s     <none>        node1    <none>           <none>
myapp-deploy-6b9865d969-kplqz   1/1     Running             0          44s    10.244.1.14   node1    <none>           <none>
[root@master kube_manifest]# kubectl rollout status deployment myapp-deploy
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...

稳定运行后,接着更新后续的pod资源

[root@master kube_manifest]# kubectl rollout resume deployment myapp-deploy 
deployment.apps/myapp-deploy resumed

查看监控更新的状态

^C[root@master kube_manifest]# kubectl rollout status deployment myapp-deploy
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment spec update to be observed...
Waiting for deployment spec update to be observed...
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "myapp-deploy" rollout to finish: 1 old replicas are pending termination...
deployment "myapp-deploy" successfully rolled out

可以看到现在使用的是v3版本

[root@master kube_manifest]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY   AGE    CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-559ff5c66    0         0         0       135m   myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=559ff5c66,release=canary
myapp-deploy-65fb6c8459   0         0         0       3h8m   myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=65fb6c8459,release=canary
myapp-deploy-6b9865d969   5         5         5       11m    myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=6b9865d969,release=canary

如果,此时我们想使用之前的版本可以使用undo操作,默认是回滚到前一个版本,选项–to-revision=n可以回滚到指定的版本。
目前系统中存在3个版本,正在使用的是第三版。

[root@master kube_manifest]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy 
REVISION  CHANGE-CAUSE
1         <none>
2         <none>
3         <none>

现在我们要回滚到第1版,注意:如果已经回滚到第一版了,此时在回滚则是回滚到第三版

[root@master kube_manifest]# kubectl rollout undo deployment myapp-deploy --to-revision=1
deployment.apps/myapp-deploy rolled back

kubernetes(三)--Pod资源清单及pod控制器_第3张图片
查看rs信息,当前正在工作的事v1版

[root@master kube_manifest]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY   AGE     CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-559ff5c66    0         0         0       149m    myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=559ff5c66,release=canary
myapp-deploy-65fb6c8459   5         5         5       3h23m   myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=65fb6c8459,release=canary
myapp-deploy-6b9865d969   0         0         0       25m     myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=6b9865d969,release=canary

注意:回滚完成后,第一版就变成了第四版了,也验证了第一版再回滚就到了第三版

[root@master kube_manifest]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy 
REVISION  CHANGE-CAUSE
2         <none>
3         <none>
4         <none>

你可能感兴趣的:(kubernetes(三)--Pod资源清单及pod控制器)