虽然ReplicaSet可以独立使用,但它主要被Deployment用作Pod机制的创建,删除和更新,当使用Deployment时,你不必担心创建Pod的ReplicaSet,因为Deployment可以实现管理ReplicaSet。
apiVersion: apps/v1 #版本
kind: ReplicaSet #资源类型
metadata: #元数据
spec: #期望状态
minReadySecons: #新创建Pod准备好的最小时间,单位秒。
replicas: #副本数,默认为1。
selector: #标签选择器。
template: #模板(必要字段)
metadata: #模板中的元数据
spec: #模板的期望状态
status: #当前实际状态。
#新建一个ReplicaSet
[root@k8smaster ~]# cd /data/
[root@k8smaster data]# vim rs-demo.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: myrs
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: myrs
release: canary
template:
metadata:
name: myrs-pod
labels:
app: myrs
release: canary
environment: qa
spec:
containers:
- name: myapp-container
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
[root@k8smaster data]# kubectl apply -f rs-demo.yaml
replicaset.apps/myrs created
[root@k8smaster data]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-deployment-558f94fb55 3 3 3 40d
myrs 2 2 2 80s
nginx-deployment-6f77f65499 1 1 1 39d
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 29 126m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d
mypod 1/1 Running 2 6d2h
myrs-74ntr 1/1 Running 0 103s
myrs-vmchf 1/1 Running 0 103s
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d
pod-demo 2/2 Running 4 6d3h
readiness-exec 1/1 Running 1 4d
[root@k8smaster data]# kubectl describe pod myrs-74ntr
Name: myrs-74ntr
Namespace: default
Priority: 0
Node: k8snode2/192.168.43.176
Start Time: Tue, 10 Dec 2019 17:44:27 +0800
Labels: app=myrs
environment=qa
release=canary
Annotations:
Status: Running
IP: 10.244.2.107
IPs:
Controlled By: ReplicaSet/myrs
Containers:
myapp-container:
Container ID: docker://d3c07f789f8167479a2326ba5cb03eec094b09a507f79ca3d5386a6302605d52
Image: ikubernetes/myapp:v1
Image ID: docker-pullable://ikubernetes/myapp@sha256:40ccda7b7e2d080bee7620b6d3f5e6697894befc409582902a67c963d30a6113
Port: 80/TCP
Host Port: 0/TCP
State: Running
Started: Tue, 10 Dec 2019 17:44:28 +0800
Ready: True
Restart Count: 0
Environment:
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from default-token-kk2fq (ro)
Conditions:
Type Status
Initialized True
Ready True
ContainersReady True
PodScheduled True
Volumes:
default-token-kk2fq:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-kk2fq
Optional: false
QoS Class: BestEffort
Node-Selectors:
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 2m32s default-scheduler Successfully assigned default/myrs-74ntr to k8snode2
Normal Pulled 2m30s kubelet, k8snode2 Container image "ikubernetes/myapp:v1" already present on machine
Normal Created 2m30s kubelet, k8snode2 Created container myapp-container
Normal Started 2m30s kubelet, k8snode2 Started container myapp-container
[root@k8smaster data]# curl 10.244.2.107
Hello MyApp | Version: v1 | Pod Name
[root@k8smaster data]# kubectl delete pods myrs-74ntr
pod "myrs-74ntr" deleted
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 30 131m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d
mypod 1/1 Running 2 6d2h
myrs-2xk7k 1/1 Running 0 4s
myrs-vmchf 1/1 Running 0 6m54s
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d
pod-demo 2/2 Running 4 6d3h
readiness-exec 1/1 Running 1 4d1h
#发现依然有两个副本处于Running状态,且之前删除的myrs-74ntr变为了myrs-2xk7k,说明这个副本已经被ReplicaSet控制器重建了。
#我们将一个Pod的label修改为和myrs的label一样,这时候ReplicaSet会根据label判断现在有3个副本,不符合我们所定义的“replicas: 2”,为了达到我们定义的期望状态,ReplicaSet会删除一个副本。
[root@k8smaster data]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
memleak-pod 0/1 CrashLoopBackOff 32 141m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
mypod 1/1 Running 2 6d3h
myrs-2xk7k 1/1 Running 0 9m53s app=myrs,environment=qa,release=canary
myrs-vmchf 1/1 Running 0 16m app=myrs,environment=qa,release=canary
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d app=nginx-deployment,pod-template-hash=6f77f65499
pod-demo 2/2 Running 4 6d3h
readiness-exec 1/1 Running 1 4d1h test=readiness-exec
#修改pod-demo的label与myrs相同。
[root@k8smaster data]# kubectl label pods pod-demo -n default app=myrs
pod/pod-demo labeled
[root@k8smaster data]# kubectl label pods pod-demo -n default environment=qa
pod/pod-demo labeled
[root@k8smaster data]# kubectl label pods pod-demo -n default release=canary
pod/pod-demo labeled
#发现原先的myrs-2xk7k已经被删除。
[root@k8smaster data]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
memleak-pod 0/1 CrashLoopBackOff 32 142m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
mypod 1/1 Running 2 6d3h
myrs-vmchf 1/1 Running 0 18m app=myrs,environment=qa,release=canary
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d app=nginx-deployment,pod-template-hash=6f77f65499
pod-demo 2/2 Running 4 6d3h app=myrs,environment=qa,release=canary
readiness-exec 1/1 Running 1 4d1h test=readiness-exec
#我们再删除pod-demo。
[root@k8smaster data]# kubectl delete pod pod-demo
pod "pod-demo" deleted
#发现ReplicaSet又调整为了我们用户期望的状态。
[root@k8smaster data]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
memleak-pod 0/1 CrashLoopBackOff 34 151m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d app=myapp-deployment,pod-template-hash=558f94fb55
mypod 1/1 Running 2 6d3h
myrs-vmchf 1/1 Running 0 27m app=myrs,environment=qa,release=canary
myrs-vn9gl 1/1 Running 0 51s app=myrs,environment=qa,release=canary
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d app=nginx-deployment,pod-template-hash=6f77f65499
readiness-exec 1/1 Running 1 4d1h test=readiness-exec
#实时扩展。
[root@k8smaster data]# kubectl edit rs myrs
修改为:
replicas: 5
保存退出
[root@k8smaster data]# kubectl edit rs myrs
replicaset.extensions/myrs edited
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 36 160m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d
mypod 1/1 Running 2 6d3h
myrs-dgblp 1/1 Running 0 21s
myrs-fnznb 1/1 Running 0 21s
myrs-vmchf 1/1 Running 0 35m
myrs-vn9gl 1/1 Running 0 9m36s
myrs-wcxkt 1/1 Running 0 21s
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d
readiness-exec 1/1 Running 1 4d1h
#实时缩减
[root@k8smaster data]# kubectl edit rs myrs
修改为:
replicas: 2
保存退出
[root@k8smaster data]# kubectl edit rs myrs
replicaset.extensions/myrs edited
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 36 161m
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d
mypod 1/1 Running 2 6d3h
myrs-dgblp 1/1 Terminating 0 73s
myrs-fnznb 1/1 Terminating 0 73s
myrs-vmchf 1/1 Running 0 36m
myrs-vn9gl 1/1 Running 0 10m
myrs-wcxkt 1/1 Terminating 0 73s
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d
readiness-exec 1/1 Running 1 4d1h
#实时升级版本
[root@k8smaster data]# kubectl edit rs myrs
修改为:
- image: ikubernetes/myapp:v2
保存退出
[root@k8smaster data]# kubectl edit rs myrs
replicaset.extensions/myrs edited
[root@k8smaster data]# kubectl get rs -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
myapp-deployment-558f94fb55 3 3 3 40d myapp ikubernetes/myapp:v1 app=myapp-deployment,pod-template-hash=558f94fb55
myrs 2 2 2 40m myapp-container ikubernetes/myapp:v2 app=myrs,release=canary
nginx-deployment-6f77f65499 1 1 1 39d nginx nginx app=nginx-deployment,pod-template-hash=6f77f65499
[root@k8smaster data]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
memleak-pod 0/1 CrashLoopBackOff 42 3h27m 10.244.1.112 k8snode1
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d 10.244.2.103 k8snode2
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d 10.244.2.104 k8snode2
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d 10.244.1.110 k8snode1
mypod 1/1 Running 2 6d4h 10.244.2.105 k8snode2
myrs-vmchf 1/1 Running 0 83m 10.244.1.113 k8snode1
myrs-vn9gl 1/1 Running 0 56m 10.244.2.109 k8snode2
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d 10.244.1.109 k8snode1
readiness-exec 1/1 Running 1 4d2h 10.244.2.106 k8snode2
#通过访问Pod发现,Pod并没有升级到V2版本。
[root@k8smaster data]# curl 10.244.1.113
Hello MyApp | Version: v1 | Pod Name
#删除这个Pod。
[root@k8smaster data]# kubectl delete pods myrs-vmchf
pod "myrs-vmchf" deleted
[root@k8smaster data]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
memleak-pod 0/1 CrashLoopBackOff 42 3h29m 10.244.1.112 k8snode1
myapp-deployment-558f94fb55-plk4v 1/1 Running 3 40d 10.244.2.103 k8snode2
myapp-deployment-558f94fb55-rd8f5 1/1 Running 3 40d 10.244.2.104 k8snode2
myapp-deployment-558f94fb55-zzmpg 1/1 Running 3 40d 10.244.1.110 k8snode1
mypod 1/1 Running 2 6d4h 10.244.2.105 k8snode2
myrs-c9k2l 1/1 Running 0 20s 10.244.1.114 k8snode1
myrs-vn9gl 1/1 Running 0 59m 10.244.2.109 k8snode2
nginx-deployment-6f77f65499-8g24d 1/1 Running 3 39d 10.244.1.109 k8snode1
readiness-exec 1/1 Running 1 4d2h 10.244.2.106 k8snode2
#访问重建的Pod,发现版本变成了V2,所以只有当Pod重建的时候才会应用模块中修改的内容。
[root@k8smaster data]# curl 10.244.1.114
Hello MyApp | Version: v2 | Pod Name
apiVersion: apps/v1 #版本。
kind: Deployment #类型。
metadata: #元数据。
spec: #期望状态。
minReadySeconds: #新创建Pod准备好的最小时间,单位秒。
replicas: #副本数量,默认为1.
selector: #标签选择器。
template: #模板(必要字段)
metadata: #模板中的元数据。
spec: #模板期望状态。
strategy: #更新策略;用于将现有pod替换为新pod的部署策略。
Recreate: #重建更新。
RollingUpdate: #滚动更新。
maxSurge: #可以在所需数量的pod之上安排的最大pod数;例如:5、10%。
maxUnavailable: #更新期间可用的最大pod数。
revisionHistoryLimit: #要保留以允许回滚的旧ReplicaSet的数量,默认10。
paused: #表示部署已暂停,部署控制器不处理部署。
progressDeadlineSeconds: #在执行此操作之前部署的最长时间被认为是失败的。
status: #实际状态
[root@k8smaster data]# vim deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-dep
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: myapp
release: beta
template:
metadata:
labels:
app: myapp
release: beta
spec:
containers:
- name: myapp
image: ikubernetes/myapp:v1
ports:
- name: http
containerPort: 80
[root@k8smaster data]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-dep created
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 71 22h
myapp-dep-678d8f65bc-bg5tk 1/1 Running 0 2m15s
myapp-dep-678d8f65bc-n7dhn 1/1 Running 0 2m15s
myapp-deployment-558f94fb55-plk4v 1/1 Running 4 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 4 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 4 40d
mypod 1/1 Running 3 6d23h
myrs-c9k2l 1/1 Running 1 19h
myrs-vn9gl 1/1 Running 1 20h
nginx-deployment-6f77f65499-8g24d 1/1 Running 4 40d
readiness-exec 1/1 Running 2 4d21h
[root@k8smaster data]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
myapp-dep 2/2 2 2 2m55s
myapp-deployment 3/3 3 3 40d
nginx-deployment 1/1 1 1 40d
[root@k8smaster data]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-dep-678d8f65bc 2 2 2 3m19s
myapp-deployment-558f94fb55 3 3 3 40d
myrs 2 2 2 20h
nginx-deployment-6f77f65499 1 1 1 40d
[root@k8smaster data]# kubectl describe deploy myapp-dep
Name: myapp-dep
Namespace: default
CreationTimestamp: Wed, 11 Dec 2019 14:23:44 +0800
Labels:
Annotations: deployment.kubernetes.io/revision: 1
kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"myapp-dep","namespace":"default"},"spec":{"replicas":2,"s...
Selector: app=myapp,release=beta
Replicas: 2 desired | 2 updated | 2 total | 2 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 25% max unavailable, 25% max surge
Pod Template:
Labels: app=myapp
release=beta
Containers:
myapp:
Image: ikubernetes/myapp:v1
Port: 80/TCP
Host Port: 0/TCP
Environment:
Mounts:
Volumes:
Conditions:
Type Status Reason
---- ------ ------
Available True MinimumReplicasAvailable
Progressing True NewReplicaSetAvailable
OldReplicaSets:
NewReplicaSet: myapp-dep-678d8f65bc (2/2 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 6m deployment-controller Scaled up replica set myapp-dep-678d8f65bc to 2
#监控pod状态。
[root@k8smaster data]# kubectl get pods -l app=myapp -w
#去修改配置文件。
[root@k8smaster ~]# cd /data/
[root@k8smaster data]# vim deploy-demo.yaml
修改为:
image: ikubernetes/myapp:v2
#应用。
[root@k8smaster data]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-dep configured
#查看监控
[root@k8smaster data]# kubectl get pods -l app=myapp -w
NAME READY STATUS RESTARTS AGE
myapp-dep-678d8f65bc-bg5tk 1/1 Running 0 10m
myapp-dep-678d8f65bc-n7dhn 1/1 Running 0 10m
myapp-dep-6c458b4c54-kmq46 0/1 Pending 0 0s
myapp-dep-6c458b4c54-kmq46 0/1 Pending 0 0s
myapp-dep-6c458b4c54-kmq46 0/1 ContainerCreating 0 0s
myapp-dep-6c458b4c54-kmq46 1/1 Running 0 3s
myapp-dep-678d8f65bc-bg5tk 1/1 Terminating 0 11m
myapp-dep-6c458b4c54-sdmmv 0/1 Pending 0 0s
myapp-dep-6c458b4c54-sdmmv 0/1 Pending 0 0s
myapp-dep-6c458b4c54-sdmmv 0/1 ContainerCreating 0 0s
myapp-dep-678d8f65bc-bg5tk 0/1 Terminating 0 11m
myapp-dep-6c458b4c54-sdmmv 1/1 Running 0 1s
myapp-dep-678d8f65bc-n7dhn 1/1 Terminating 0 11m
myapp-dep-678d8f65bc-n7dhn 0/1 Terminating 0 11m
myapp-dep-678d8f65bc-bg5tk 0/1 Terminating 0 11m
myapp-dep-678d8f65bc-bg5tk 0/1 Terminating 0 11m
myapp-dep-678d8f65bc-n7dhn 0/1 Terminating 0 11m
myapp-dep-678d8f65bc-n7dhn 0/1 Terminating 0 11m
#查看rs,发现多了一个v2版本的rs模板
[root@k8smaster data]# kubectl get rs -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
myapp-dep-678d8f65bc 0 0 0 13m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=678d8f65bc,release=beta
myapp-dep-6c458b4c54 2 2 2 2m9s myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=6c458b4c54,release=beta
myapp-deployment-558f94fb55 3 3 3 40d myapp ikubernetes/myapp:v1 app=myapp-deployment,pod-template-hash=558f94fb55
myrs 2 2 2 20h myapp-container ikubernetes/myapp:v2 app=myrs,release=canary
nginx-deployment-6f77f65499 1 1 1 40d nginx nginx app=nginx-deployment,pod-template-hash=6f77f65499
#查看滚动历史。
[root@k8smaster data]# kubectl rollout history deployment myapp-dep
deployment.extensions/myapp-dep
REVISION CHANGE-CAUSE
1
2
#修改replicas为5。
[root@k8smaster data]# kubectl patch deployment myapp-dep -p '{"spec":{"replicas":5}}'
deployment.extensions/myapp-dep patched
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
memleak-pod 0/1 CrashLoopBackOff 74 23h
myapp-dep-6c458b4c54-g4sd6 1/1 Running 0 3s
myapp-dep-6c458b4c54-kmq46 1/1 Running 0 9m12s
myapp-dep-6c458b4c54-mfq76 1/1 Running 0 3s
myapp-dep-6c458b4c54-sdmmv 1/1 Running 0 9m9s
myapp-dep-6c458b4c54-z7sj6 1/1 Running 0 3s
myapp-deployment-558f94fb55-plk4v 1/1 Running 4 40d
myapp-deployment-558f94fb55-rd8f5 1/1 Running 4 40d
myapp-deployment-558f94fb55-zzmpg 1/1 Running 4 40d
mypod 1/1 Running 3 6d23h
myrs-c9k2l 1/1 Running 1 19h
myrs-vn9gl 1/1 Running 1 20h
nginx-deployment-6f77f65499-8g24d 1/1 Running 4 40d
readiness-exec 1/1 Running 2 4d21h
#修改更新策略:maxSurge为1,maxUnavailable为0。
[root@k8smaster data]# kubectl patch deployment myapp-dep -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0}}}}'
deployment.extensions/myapp-dep patched
[root@k8smaster data]# kubectl describe deployment myapp-dep
Name: myapp-dep
Namespace: default
CreationTimestamp: Wed, 11 Dec 2019 14:23:44 +0800
Labels: app=myapp
release=beta
Annotations: deployment.kubernetes.io/revision: 2
kubectl.kubernetes.io/last-applied-configuration:
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"name":"myapp-dep","namespace":"default"},"spec":{"replicas":2,"s...
Selector: app=myapp,release=beta
Replicas: 5 desired | 5 updated | 5 total | 5 available | 0 unavailable
StrategyType: RollingUpdate
MinReadySeconds: 0
RollingUpdateStrategy: 0 max unavailable, 1 max surge
Pod Template:
Labels: app=myapp
release=beta
Containers:
myapp:
Image: ikubernetes/myapp:v2
Port: 80/TCP
Host Port: 0/TCP
Environment:
Mounts:
Volumes:
Conditions:
Type Status Reason
---- ------ ------
Progressing True NewReplicaSetAvailable
Available True MinimumReplicasAvailable
OldReplicaSets:
NewReplicaSet: myapp-dep-6c458b4c54 (5/5 replicas created)
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal ScalingReplicaSet 26m deployment-controller Scaled up replica set myapp-dep-678d8f65bc to 2
Normal ScalingReplicaSet 14m deployment-controller Scaled up replica set myapp-dep-6c458b4c54 to 1
Normal ScalingReplicaSet 14m deployment-controller Scaled down replica set myapp-dep-678d8f65bc to 1
Normal ScalingReplicaSet 14m deployment-controller Scaled up replica set myapp-dep-6c458b4c54 to 2
Normal ScalingReplicaSet 14m deployment-controller Scaled down replica set myapp-dep-678d8f65bc to 0
Normal ScalingReplicaSet 5m38s deployment-controller Scaled up replica set myapp-dep-6c458b4c54 to 5
#监控Pod状态。
[root@k8smaster data]# kubectl get pods -l app=myapp -w
NAME READY STATUS RESTARTS AGE
myapp-dep-6c458b4c54-g4sd6 1/1 Running 0 10m
myapp-dep-6c458b4c54-kmq46 1/1 Running 0 19m
myapp-dep-6c458b4c54-mfq76 1/1 Running 0 10m
myapp-dep-6c458b4c54-sdmmv 1/1 Running 0 19m
myapp-dep-6c458b4c54-z7sj6 1/1 Running 0 10m
#对镜像版本进行更新,并且暂停更新。
[root@k8smaster data]# kubectl set image deployment myapp-dep myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-dep
deployment.extensions/myapp-dep image updated
deployment.extensions/myapp-dep paused
[root@k8smaster data]# kubectl get pods -l app=myapp -w
NAME READY STATUS RESTARTS AGE
myapp-dep-6c458b4c54-g4sd6 1/1 Running 0 10m
myapp-dep-6c458b4c54-kmq46 1/1 Running 0 19m
myapp-dep-6c458b4c54-mfq76 1/1 Running 0 10m
myapp-dep-6c458b4c54-sdmmv 1/1 Running 0 19m
myapp-dep-6c458b4c54-z7sj6 1/1 Running 0 10m
myapp-dep-6c8cbfb497-zmk69 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-zmk69 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-zmk69 0/1 ContainerCreating 0 0s
myapp-dep-6c8cbfb497-zmk69 1/1 Running 0 3s
#监控deployment控制器
[root@k8smaster data]# kubectl rollout status deployment myapp-dep
Waiting for deployment "myapp-dep" rollout to finish: 1 out of 5 new replicas have been updated...
#取消暂停
[root@k8smaster ~]# kubectl rollout resume deployment myapp-dep
deployment.extensions/myapp-dep resumed
#查看Pod状态
[root@k8smaster data]# kubectl get pods -l app=myapp -w
NAME READY STATUS RESTARTS AGE
myapp-dep-6c458b4c54-g4sd6 1/1 Running 0 10m
myapp-dep-6c458b4c54-kmq46 1/1 Running 0 19m
myapp-dep-6c458b4c54-mfq76 1/1 Running 0 10m
myapp-dep-6c458b4c54-sdmmv 1/1 Running 0 19m
myapp-dep-6c458b4c54-z7sj6 1/1 Running 0 10m
myapp-dep-6c8cbfb497-zmk69 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-zmk69 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-zmk69 0/1 ContainerCreating 0 0s
myapp-dep-6c8cbfb497-zmk69 1/1 Running 0 3s
myapp-dep-6c458b4c54-g4sd6 1/1 Terminating 0 15m
myapp-dep-6c8cbfb497-89nwn 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-89nwn 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-89nwn 0/1 ContainerCreating 0 0s
myapp-dep-6c458b4c54-g4sd6 0/1 Terminating 0 15m
myapp-dep-6c8cbfb497-89nwn 1/1 Running 0 3s
myapp-dep-6c458b4c54-mfq76 1/1 Terminating 0 15m
myapp-dep-6c8cbfb497-sbdpb 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-sbdpb 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-sbdpb 0/1 ContainerCreating 0 0s
myapp-dep-6c458b4c54-mfq76 0/1 Terminating 0 15m
myapp-dep-6c8cbfb497-sbdpb 1/1 Running 0 1s
myapp-dep-6c458b4c54-z7sj6 1/1 Terminating 0 15m
myapp-dep-6c8cbfb497-6zhcr 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-6zhcr 0/1 Pending 0 0s
myapp-dep-6c458b4c54-g4sd6 0/1 Terminating 0 15m
myapp-dep-6c458b4c54-g4sd6 0/1 Terminating 0 15m
myapp-dep-6c8cbfb497-6zhcr 0/1 ContainerCreating 0 0s
myapp-dep-6c8cbfb497-6zhcr 1/1 Running 0 1s
myapp-dep-6c458b4c54-sdmmv 1/1 Terminating 0 24m
myapp-dep-6c8cbfb497-wh6v8 0/1 Pending 0 0s
myapp-dep-6c8cbfb497-wh6v8 0/1 Pending 0 0s
myapp-dep-6c458b4c54-z7sj6 0/1 Terminating 0 15m
myapp-dep-6c8cbfb497-wh6v8 0/1 ContainerCreating 0 1s
myapp-dep-6c458b4c54-sdmmv 0/1 Terminating 0 24m
myapp-dep-6c8cbfb497-wh6v8 1/1 Running 0 2s
myapp-dep-6c458b4c54-kmq46 1/1 Terminating 0 24m
myapp-dep-6c458b4c54-kmq46 0/1 Terminating 0 24m
myapp-dep-6c458b4c54-z7sj6 0/1 Terminating 0 15m
myapp-dep-6c458b4c54-z7sj6 0/1 Terminating 0 15m
myapp-dep-6c458b4c54-sdmmv 0/1 Terminating 0 24m
myapp-dep-6c458b4c54-sdmmv 0/1 Terminating 0 24m
myapp-dep-6c458b4c54-kmq46 0/1 Terminating 0 24m
myapp-dep-6c458b4c54-kmq46 0/1 Terminating 0 24m
myapp-dep-6c458b4c54-mfq76 0/1 Terminating 0 15m
myapp-dep-6c458b4c54-mfq76 0/1 Terminating 0 15m
#监控Deployment控制器:
[root@k8smaster data]# kubectl rollout status deployment myapp-dep
Waiting for deployment "myapp-dep" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment spec update to be observed...
Waiting for deployment spec update to be observed...
Waiting for deployment "myapp-dep" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 1 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 2 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 3 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 4 out of 5 new replicas have been updated...
Waiting for deployment "myapp-dep" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "myapp-dep" rollout to finish: 1 old replicas are pending termination...
deployment "myapp-dep" successfully rolled out
#查看rs,发现多了一个v3版本的rs模板
[root@k8smaster ~]# kubectl get rs -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
myapp-dep-678d8f65bc 0 0 0 40m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=678d8f65bc,release=beta
myapp-dep-6c458b4c54 0 0 0 29m myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=6c458b4c54,release=beta
myapp-dep-6c8cbfb497 5 5 5 8m28s myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=6c8cbfb497,release=beta
myapp-deployment-558f94fb55 3 3 3 40d myapp ikubernetes/myapp:v1 app=myapp-deployment,pod-template-hash=558f94fb55
myrs 2 2 2 21h myapp-container ikubernetes/myapp:v2 app=myrs,release=canary
nginx-deployment-6f77f65499 1 1 1 40d nginx nginx app=nginx-deployment,pod-template-hash=6f77f65499
#查看历史版本信息。
[root@k8smaster ~]# kubectl rollout history deployment myapp-dep
deployment.extensions/myapp-dep
REVISION CHANGE-CAUSE
1
2
3
#指定需要回滚到的版本。
[root@k8smaster ~]# kubectl rollout undo deployment myapp-dep --to-revision=1
deployment.extensions/myapp-dep rolled back
#发现5个副本又都跑在了V1版本的RS模板上。
[root@k8smaster ~]# kubectl get rs -o wide
NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR
myapp-dep-678d8f65bc 5 5 5 47m myapp ikubernetes/myapp:v1 app=myapp,pod-template-hash=678d8f65bc,release=beta
myapp-dep-6c458b4c54 0 0 0 36m myapp ikubernetes/myapp:v2 app=myapp,pod-template-hash=6c458b4c54,release=beta
myapp-dep-6c8cbfb497 0 0 0 15m myapp ikubernetes/myapp:v3 app=myapp,pod-template-hash=6c8cbfb497,release=beta
myapp-deployment-558f94fb55 3 3 3 40d myapp ikubernetes/myapp:v1 app=myapp-deployment,pod-template-hash=558f94fb55
myrs 2 2 2 21h myapp-container ikubernetes/myapp:v2 app=myrs,release=canary
nginx-deployment-6f77f65499 1 1 1 40d nginx nginx app=nginx-deployment,pod-template-hash=6f77f65499
DaemonSet用来保证每个节点上都运行一个容器副本,常用来部署一些集群日志,监控或者其他系统管理应用。
apiVersion: apps/v1 #版本。
kind: DaemontSet #类型。
metadata: #元数据。
spec: #期望状态。
minReadySeconds: #新创建Pod准备好的最小时间,单位秒。
selector: #标签选择器。
template: #模板(必要字段)
metadata: #模板的元数据。
spec: #模板的期望状态。
revisionHistoryLimit: #要保留以允许回滚的旧ReplicaSet的数量,默认10。
updateStrategy: #用新pod替换现有DaemonSet pod的更新策略。
status: #实际状态。
#创建一个DaemonSet
[root@k8smaster data]# vim ds-demo.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat-ds
namespace: default
spec:
selector:
matchLabels:
app: filebeat
release: stable
template:
metadata:
labels:
app: filebeat
release: stable
spec:
containers:
- name: filebeat
image: ikubernetes/filebeat:5.6.5-alpine
env:
- name: REDIS_HOST
value: redis.default.svc.cluster.local
- name: REDIS_LOG_LEVEL
value: info
[root@k8smaster data]# kubectl apply -f ds-demo.yaml
daemonset.apps/filebeat-ds created
[root@k8smaster data]# kubectl get pods
NAME READY STATUS RESTARTS AGE
filebeat-ds-2jtgt 1/1 Running 0 3m52s
filebeat-ds-bpbcd 1/1 Running 0 3m52s
memleak-pod 0/1 CrashLoopBackOff 102 45h
myrs-c9k2l 1/1 Running 2 42h
myrs-vn9gl 1/1 Running 2 43h
nginx-deployment-6f77f65499-8g24d 1/1 Running 5 41d
readiness-exec 1/1 Running 3 5d20h
[root@k8smaster data]# kubectl exec -it filebeat-ds-2jtgt -- /bin/sh
/ # ps aux
PID USER TIME COMMAND
1 root 0:00 /usr/local/bin/filebeat -e -c /etc/filebeat/filebeat.yml
12 root 0:00 /bin/sh
17 root 0:00 ps aux
[root@k8smaster data]# kubectl set image daemonsets filebeat-ds filebeat=ikubernetes/filebeat:5.6.6-alpine
daemonset.extensions/filebeat-ds image updated
[root@k8smaster data]# kubectl get ds -o wide
NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR
filebeat-ds 2 2 1 1 1 7m38s filebeat ikubernetes/filebeat:5.6.6-alpine app=filebeat,release=stable
Job控制器用来运行一次性任务,容器中的进程再正常运行结束后不会对其进行重启操作,而是将Pod处于“Completed”状态。
Job控制器的对象有两种分别是但工作列队的串行Job和多工作列队的并行Job。
#创建一个Job
[root@k8smaster data]# vim job-demo.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: myjob
spec:
template:
metadata:
name: myapp-Job
spec:
containers:
- name: hello
image: busybox
command: ["echo","hello Job"]
restartPolicy: Never
[root@k8smaster data]# kubectl apply -f job-demo.yaml
job.batch/myjob created
[root@k8smaster data]# kubectl get job
NAME COMPLETIONS DURATION AGE
myjob 1/1 7s 12s
#创建一个并行Job
[root@k8smaster data]# kubectl delete -f job-demo.yaml
job.batch "myjob" deleted
[root@k8smaster data]# vim job-demo.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: myjob
spec:
parallelism: 2 #一次完成两个。
completions: 5 #一共完成六个。
template:
metadata:
name: myapp-Job
spec:
containers:
- name: hello
image: busybox
command: ["echo","hello Job"]
restartPolicy: Never
[root@k8smaster data]# kubectl apply -f job-demo.yaml
job.batch/myjob created
[root@k8smaster data]# kubectl get jobs
NAME COMPLETIONS DURATION AGE
myjob 2/5 3s 3s
[root@k8smaster data]# kubectl get jobs
NAME COMPLETIONS DURATION AGE
myjob 5/5 4s 9s
CronJob控制器用来管理Job控制器资源的运行实际,CronJob控制器可以周期性的执行一些任务。类似于Linux的Crontab。
#定义一个CronJob。
[root@k8smaster data]# kubectl delete -f job-demo.yaml
job.batch "myjob" deleted
[root@k8smaster data]# cp job-demo.yaml conrjob-demo.yaml
[root@k8smaster data]# vim conrjob-demo.yaml
apiVersion: batch/v2alpha1
kind: CronJob
metadata:
name: hello
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox
command: ["echo","hello job"]
restartPolicy: OnFailure
[root@k8smaster data]# vim /etc/kubernetes/manifests/kube-apiserver.yaml
在第14行添加:
- --runtime-config=batch/v2alpha1=true
[root@k8smaster data]# systemctl restart kubelet.service
[root@k8smaster data]# kubectl api-versions
admissionregistration.k8s.io/v1beta1
apiextensions.k8s.io/v1beta1
apiregistration.k8s.io/v1
apiregistration.k8s.io/v1beta1
apps/v1
apps/v1beta1
apps/v1beta2
authentication.k8s.io/v1
authentication.k8s.io/v1beta1
authorization.k8s.io/v1
authorization.k8s.io/v1beta1
autoscaling/v1
autoscaling/v2beta1
autoscaling/v2beta2
batch/v1
batch/v1beta1
batch/v2alpha1 #找到了batch/v2alpha1,说明添加成功。
certificates.k8s.io/v1beta1
coordination.k8s.io/v1
coordination.k8s.io/v1beta1
events.k8s.io/v1beta1
extensions/v1beta1
networking.k8s.io/v1
networking.k8s.io/v1beta1
node.k8s.io/v1beta1
policy/v1beta1
rbac.authorization.k8s.io/v1
rbac.authorization.k8s.io/v1beta1
scheduling.k8s.io/v1
scheduling.k8s.io/v1beta1
storage.k8s.io/v1
storage.k8s.io/v1beta1
v1
[root@k8smaster data]# kubectl apply -f conrjob-demo.yaml
cronjob.batch/hello created
[root@k8smaster data]# kubectl get cronjobs.batch
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
hello */1 * * * * False 1 7s 2m36s
[root@k8smaster ~]# kubectl get job
NAME COMPLETIONS DURATION AGE
hello-1576131060 1/1 108s 3m3s
hello-1576131120 1/1 27s 2m3s
hello-1576131180 1/1 2s 63s
hello-1576131240 1/1 2s 2s
[root@k8smaster data]# kubectl logs hello-1576131240