容器钩子; 容器探测; Pod重启策略; Pod的终止过程;
Init容器;
初始化容器与主容器区别是?
init容器没有readinessProbe…
[root@k8s-master01 pod-2]# cat init.yaml
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
initContainers:
- name: init-myservice
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "until nslookup myservice.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"]
- name: init-mydb
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"]
containers:
- name: myapp-container
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'echo The app is running! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Init:0/2 0 0s
myapp-pod 0/1 Init:0/2 0 0s
myapp-pod 0/1 Init:0/2 0 1s
# 初始化容器创建不出来,主容器就出不来
[root@k8s-master01 pod-2]# kubectl delete -f init.yaml
pod "myapp-pod" deleted
[root@k8s-master01 pod-2]# cat init.yaml
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
initContainers:
- name: init-myservice
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "sleep 2"]
- name: init-mydb
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "until nslookup mydb.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for mydb; sleep 2; done"]
containers:
- name: myapp-container
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'echo The app is running! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Init:0/2 0 0s
myapp-pod 0/1 Init:0/2 0 0s
myapp-pod 0/1 Init:0/2 0 2s
myapp-pod 0/1 Init:1/2 0 4s
myapp-pod 0/1 Init:1/2 0 5s
[root@k8s-master01 pod-2]# kubectl delete -f init.yaml
pod "myapp-pod" deleted
[root@k8s-master01 pod-2]# cat init.yaml
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
initContainers:
- name: init-myservice
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "sleep 2"]
- name: init-mydb
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', "sleep 2"]
containers:
- name: myapp-container
image: busybox:1.28
imagePullPolicy: IfNotPresent
command: ['sh', '-c', 'echo 主容器正常启动! && sleep 3600']
[root@k8s-master01 pod-2]# kubectl apply -f init.yaml
pod/myapp-pod created
[root@k8s-master01 ~]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Pending 0 0s
myapp-pod 0/1 Init:0/2 0 0s
myapp-pod 0/1 Init:0/2 0 1s
myapp-pod 0/1 Init:0/2 0 1s
myapp-pod 0/1 Init:1/2 0 4s
myapp-pod 0/1 Init:1/2 0 5s
myapp-pod 0/1 PodInitializing 0 7s
myapp-pod 1/1 Running 0 8s
# init容器必须全能执行成功,主容器才能运行起来
主容器运行nginx服务,初始化容器用来给主容器生成index.html文件
[root@k8s-master01 pod-2]# cat init-1.yaml
apiVersion: v1
kind: Pod
metadata:
name: initnginx
spec:
initContainers:
- name: install
image: docker.io/library/busybox:1.28
imagePullPolicy: IfNotPresent
command:
- wget
- "-O"
- "/work-dir/index.html"
- "https://www.baidu.com"
volumeMounts:
- name: workdir
mountPath: /work-dir
containers:
- name: nginx
image: docker.io/xianchao/nginx:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
volumeMounts:
- name: workdir
mountPath: /usr/share/nginx/html
volumes:
- name: workdir
emptyDir: {}
[root@k8s-master01 pod-2]# kubectl apply -f init-1.yaml
pod/initnginx created
[root@k8s-master01 pod-2]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
initnginx 1/1 Running 0 14s 10.244.85.195 k8s-node01 <none> <none>
[root@k8s-master01 pod-2]# curl 10.244.85.195
...
[root@k8s-master01 pod-2]# kubectl exec -it initnginx -c nginx -- /bin/bash
root@initnginx:/# ls /usr/share/nginx/html/
index.html
postStart
和preStop
[root@k8s-master01 pod-2]# kubectl explain pod.spec.containers.lifecycle
# 查看帮助及写法
[root@k8s-master01 pod-2]# kubectl delete -f init-1.yaml
pod "initnginx" deleted
[root@k8s-master01 pod-2]# cat pre-start.yaml
apiVersion: v1
kind: Pod
metadata:
name: life-demo
spec:
containers:
- name: lifecycle-demo-container
image: docker.io/xianchao/nginx:v1
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
exec:
command: ["/bin/sh", "-c","echo 'lifecycle hookshandler' > /usr/share/nginx/html/test.html"]
preStop:
exec:
command:
- "/bin/sh"
- "-c"
- "nginx -s stop"
[root@k8s-master01 pod-2]# kubectl apply -f pre-start.yaml
pod/life-demo created
[root@k8s-master01 pod-2]# kubectl get pods
NAME READY STATUS RESTARTS AGE
life-demo 1/1 Running 0 25s
[root@k8s-master01 pod-2]# kubectl exec -it life-demo -- /bin/bash
root@life-demo:/# cd /usr/share/nginx/html/
root@life-demo:/usr/share/nginx/html# cat test.html
lifecycle hookshandler
# 测试成功
[root@k8s-master01 pod-2]# kubectl delete -f pre-start.yaml
pod "life-demo" deleted
# 清除环境
pod在整个生命周期中有非常多的用户行为:
初始化容器完成初始化
主容器启动后可以做启动后钩子(postStart)
主容器结束前可以做结束前钩子(preStop)
在主容器运行中可以做一些健康检测,如:startupProbe, livenessProbe, readnessProbe
小测试
[root@k8s-master01 ~]# mkdir pod-4
[root@k8s-master01 ~]# cd pod-4
[root@k8s-master01 pod-4]# cat check.yaml
apiVersion: v1
kind: Pod
metadata:
name: check
namespace: default
labels:
app: check
spec:
containers:
- name: check
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- sleep 10; exit
# 使容器运行10秒后就退出
[root@k8s-master01 pod-4]# kubectl apply -f check.yaml
pod/check created
[root@k8s-master01 pod-2]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
check 0/1 Pending 0 0s
check 0/1 Pending 0 0s
check 0/1 ContainerCreating 0 0s
check 0/1 ContainerCreating 0 1s
check 1/1 Running 0 2s
check 0/1 Completed 0 12s
check 1/1 Running 1 (2s ago) 13s
check 0/1 Completed 1 (12s ago) 23s
# 检查pod状态
[root@k8s-master01 pod-4]# kubectl delete -f check.yaml
pod "check" deleted
# 清除环境
Pod探针相关属性
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.exec
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-exec.yaml
apiVersion: v1
kind: Pod
metadata:
name: startupprobe
spec:
containers:
- name: startup
image: xianchao/tomcat-8.5-jre8:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
startupProbe:
exec:
command:
- "/bin/sh"
- "-c"
- "ps aux | grep tomcat"
initialDelaySeconds: 20 #容器启动后多久开始探测
periodSeconds: 20 #执行探测的时间间隔
timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
successThreshold: 1 #成功多少次才算成功
failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-exec.yaml
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 Running 0 1s
startupprobe 0/1 Running 0 15s
startupprobe 0/1 Running 0 20s
startupprobe 1/1 Running 0 20s
# 测试探测时间
[root@k8s-master01 pod-4]# kubectl delete -f startup-exec.yaml
pod "startupprobe" deleted
# 删除环境
[root@k8s-master01 pod-4]# cat startup-exec.yaml
apiVersion: v1
kind: Pod
metadata:
name: startupprobe
spec:
containers:
- name: startup
image: xianchao/tomcat-8.5-jre8:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
startupProbe:
exec:
command:
- "/bin/sh"
- "-c"
- "aa ps aux | grep tomcat1" # 修改为一条不存在的命令测试
initialDelaySeconds: 20 #容器启动后多久开始探测
periodSeconds: 20 #执行探测的时间间隔
timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
successThreshold: 1 #成功多少次才算成功
failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-exec.yaml
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 Running 0 2s
startupprobe 0/1 Running 1 (0s ago) 81s
startupprobe 0/1 Running 2 (1s ago) 2m41s
startupprobe 0/1 Running 3 (1s ago) 4m1s
# 测试探测
[root@k8s-master01 pod-4]# kubectl delete -f startup-exec.yaml
pod "startupprobe" deleted
# 删除环境
根据如上测试回答,为什么是60秒?为什么是80秒
tcpSocket模式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.tcpSocket
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-tcpsocket.yaml
apiVersion: v1
kind: Pod
metadata:
name: startupprobe
spec:
containers:
- name: startup
image: xianchao/tomcat-8.5-jre8:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
startupProbe:
tcpSocket:
port: 8080
initialDelaySeconds: 20 #容器启动后多久开始探测
periodSeconds: 20 #执行探测的时间间隔
timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
successThreshold: 1 #成功多少次才算成功
failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-tcpsocket.yaml
pod/startupprobe created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 ContainerCreating 0 1s
startupprobe 0/1 Running 0 1s
startupprobe 0/1 Running 0 40s
startupprobe 1/1 Running 0 40s
# 测试tcpSocket
[root@k8s-master01 pod-4]# kubectl delete -f startup-tcpsocket.yaml
pod "startupprobe" deleted
# 清除环境
httpGet模式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.startupProbe.httpGet.
# 查看帮助
[root@k8s-master01 pod-4]# cat startup-httpget.yaml
apiVersion: v1
kind: Pod
metadata:
name: startupprobe
spec:
containers:
- name: startup
image: xianchao/tomcat-8.5-jre8:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
startupProbe:
httpGet:
path: /
port: 8080
initialDelaySeconds: 20 #容器启动后多久开始探测
periodSeconds: 20 #执行探测的时间间隔
timeoutSeconds: 10 #探针执行检测请求后,等待响应的超时时间
successThreshold: 1 #成功多少次才算成功
failureThreshold: 3 #失败多少次才算失败
[root@k8s-master01 pod-4]# kubectl apply -f startup-httpget.yaml
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 Pending 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 ContainerCreating 0 0s
startupprobe 0/1 Running 0 1s
startupprobe 0/1 Running 0 41s
startupprobe 1/1 Running 0 41s
# 测试httpGet
[root@k8s-master01 pod-4]# kubectl delete -f startup-httpget.yaml
pod "startupprobe" deleted
# 清除环境
启动探测间隔说明: 不同探针可能有误差,影响不大
exec方式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.exec.
# 查看帮助
[root@k8s-master01 pod-4]# cat liveness-exec.yaml
apiVersion: v1
kind: Pod
metadata:
name: liveness-exec
labels:
app: liveness
spec:
containers:
- name: liveness
image: busybox:1.28
imagePullPolicy: IfNotPresent
args: #创建测试探针探测的文件
- /bin/sh
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
livenessProbe:
initialDelaySeconds: 10 #延迟检测时间
periodSeconds: 5 #检测时间间隔
exec:
command:
- cat
- /tmp/healthy
[root@k8s-master01 pod-4]# kubectl apply -f liveness-exec.yaml
pod/liveness-exec created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
liveness-exec 0/1 Pending 0 0s
liveness-exec 0/1 Pending 0 0s
liveness-exec 0/1 ContainerCreating 0 0s
liveness-exec 0/1 ContainerCreating 0 1s
liveness-exec 1/1 Running 0 2s
liveness-exec 1/1 Running 1 (1s ago) 76s
liveness-exec 1/1 Running 2 (0s ago) 2m31s
# 测试exec
[root@k8s-master01 pod-4]# kubectl delete -f liveness-exec.yaml
pod "liveness-exec" deleted
# 清除环境
httpGet方式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.httpGet.
# 查看帮助
[root@k8s-master01 pod-4]# ctr -n k8s.io images import springboot.tar.gz
[root@k8s-node01 images]# ctr -n k8s.io images import springboot.tar.gz
[root@k8s-node02 images]# ctr -n k8s.io images import springboot.tar.gz
# 节点导入镜像
[root@k8s-master01 pod-4]# cat liveness-http.yaml
apiVersion: v1
kind: Pod
metadata:
name: liveness-http
labels:
test: liveness
spec:
containers:
- name: liveness
image: mydlqclub/springboot-helloworld:0.0.1
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 20 #延迟加载时间
periodSeconds: 5 #重试时间间隔
timeoutSeconds: 10 #超时时间设置
httpGet:
scheme: HTTP
port: 8081
path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f liveness-http.yaml
pod/liveness-http created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
liveness-http 0/1 Pending 0 0s
liveness-http 0/1 Pending 0 0s
liveness-http 0/1 ContainerCreating 0 0s
liveness-http 0/1 ContainerCreating 0 0s
liveness-http 1/1 Running 0 1s
[root@k8s-master01 pod-4]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
liveness-http 1/1 Running 0 5m24s 10.244.58.209 k8s-node02 <none> <none>
[root@k8s-master01 pod-4]# curl 10.244.58.209:8081/actuator/health
{"status":"UP"}
# 测试httpGet
[root@k8s-master01 pod-4]# kubectl delete -f liveness-http.yaml
pod "liveness-http" deleted
# 清除环境
tcpSocket方式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.livenessProbe.tcpSocket.
# 查看帮助
[root@k8s-master01 pod-4]# cat liveness-tcp.yaml
apiVersion: v1
kind: Pod
metadata:
name: liveness-tcp
labels:
app: liveness
spec:
containers:
- name: liveness
image: docker.io/xianchao/nginx:v1
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 15
periodSeconds: 20
tcpSocket:
port: 80
[root@k8s-master01 pod-4]# kubectl apply -f liveness-tcp.yaml
pod/liveness-tcp created
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
liveness-tcp 0/1 Pending 0 0s
liveness-tcp 0/1 Pending 0 0s
liveness-tcp 0/1 ContainerCreating 0 0s
liveness-tcp 0/1 ContainerCreating 0 0s
liveness-tcp 1/1 Running 0 2s
# 测试tcpSocket
[root@k8s-master01 pod-4]# kubectl delete -f liveness-tcp.yaml
pod "liveness-tcp" deleted
# 清除环境
exec方式
[root@k8s-master01 pod-4]# kubectl explain pod.spec.containers.readinessProbe.
# 查看帮助
[root@k8s-master01 pod-4]# cat readiness-exec.yaml
apiVersion: v1
kind: Service
metadata:
name: springboot
labels:
app: springboot
spec:
type: NodePort
ports:
- name: server
port: 8080
targetPort: 8080
nodePort: 31180
- name: management
port: 8081
targetPort: 8081
nodePort: 31181
selector:
app: springboot
---
apiVersion: v1
kind: Pod
metadata:
name: springboot
labels:
app: springboot
spec:
containers:
- name: springboot
image: mydlqclub/springboot-helloworld:0.0.1
imagePullPolicy: IfNotPresent
ports:
- name: server
containerPort: 8080
- name: management
containerPort: 8081
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 8081
path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f readiness-exec.yaml
service/springboot created
pod/springboot created
[root@k8s-master01 pod-4]# kubectl get pods -l app=springboot -w
NAME READY STATUS RESTARTS AGE
springboot 0/1 Pending 0 1s
springboot 0/1 Pending 0 3s
springboot 0/1 ContainerCreating 0 3s
springboot 0/1 ContainerCreating 0 6s
springboot 0/1 Running 0 9s
springboot 1/1 Running 0 49s
[root@k8s-master01 pod-4]# kubectl get svc -w
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 4d2h
springboot NodePort 10.106.29.40 <none> 8080:31180/TCP,8081:31181/TCP 0s
# 只有pod就绪了,才能被service关联到,所以就绪探测很重要
注意: 存活探测和就绪探测不存在先后,而是平行
[root@k8s-master01 pod-4]# cat start-read-live.yaml
apiVersion: v1
kind: Service
metadata:
name: springboot-live
labels:
app: springboot
spec:
type: NodePort
ports:
- name: server
port: 8080
targetPort: 8080
nodePort: 31180
- name: management
port: 8081
targetPort: 8081
nodePort: 31181
selector:
app: springboot
---
apiVersion: v1
kind: Pod
metadata:
name: springboot-live
labels:
app: springboot
spec:
containers:
- name: springboot
image: mydlqclub/springboot-helloworld:0.0.1
imagePullPolicy: IfNotPresent
ports:
- name: server
containerPort: 8080
- name: management
containerPort: 8081
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 8081
path: /actuator/health
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 8081
path: /actuator/health
startupProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 8081
path: /actuator/health
[root@k8s-master01 pod-4]# kubectl apply -f start-read-live.yaml
service/springboot-live created
pod/springboot-live created
[root@k8s-master01 pod-4]# kubectl exec -it springboot-live -- kill 1
# 等容器起来再执行
[root@k8s-master01 pod-4]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
springboot-live 0/1 Pending 0 0s
springboot-live 0/1 Pending 0 0s
springboot-live 0/1 ContainerCreating 0 0s
springboot-live 0/1 ContainerCreating 0 1s
springboot-live 0/1 Running 0 4s
springboot-live 0/1 Running 0 26s
# 注意这里为什么会有两个,因为我们设置了存活和就绪探测
springboot-live 1/1 Running 0 26s
springboot-live 0/1 Error 0 61s
springboot-live 0/1 Running 1 (3s ago) 63s
springboot-live 0/1 Running 1 (26s ago) 86s
# 同上
springboot-live 1/1 Running 1 (26s ago) 86s
[root@k8s-master01 pod-4]# kubectl delete -f start-read-live.yaml
service "springboot-live" deleted
pod "springboot-live" deleted
# 清除环境
三种探测大致可以理解为:
startupProbe用于pod启动探测
livenessProbe用于容器启动探测
readinessProbe用于容器内服务状态探测
[root@k8s-master01 rs]# kubectl explain replicaset.
# 查看帮助
ctr -n k8s.io images import frontend.tar.gz
ctr -n k8s.io images import myapp-blue-v1.tar.gz
ctr -n k8s.io images import myapp-blue-v2.tar.gz
ctr -n k8s.io images import myapp-v2.tar.gz
# 导入镜像
[root@k8s-master01 rs]# cat replicaset.yaml
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: frontend
namespace: default
labels:
app: guestbook
tier: frontend
spec:
replicas: 3
selector:
matchLabels:
tier1: frontend1
template:
metadata:
labels:
tier1: frontend1
spec:
containers:
- name: php-redis
image: docker.io/yecc/gcr.io-google_samples-gb-frontend:v3
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
startupProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml
replicaset.apps/frontend created
[root@k8s-master01 rs]# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
frontend-2h7l7 0/1 Pending 0 0s
frontend-2h7l7 0/1 Pending 0 0s
frontend-2xmw6 0/1 Pending 0 0s
frontend-6cb5q 0/1 Pending 0 0s
frontend-2xmw6 0/1 Pending 0 0s
frontend-6cb5q 0/1 Pending 0 0s
frontend-2xmw6 0/1 ContainerCreating 0 0s
frontend-2h7l7 0/1 ContainerCreating 0 0s
frontend-6cb5q 0/1 ContainerCreating 0 0s
frontend-2h7l7 0/1 ContainerCreating 0 1s
frontend-2xmw6 0/1 ContainerCreating 0 1s
frontend-6cb5q 0/1 ContainerCreating 0 1s
frontend-2xmw6 0/1 Running 0 2s
frontend-6cb5q 0/1 Running 0 2s
frontend-2h7l7 0/1 Running 0 2s
frontend-2h7l7 0/1 Running 0 21s
frontend-2h7l7 1/1 Running 0 21s
frontend-6cb5q 0/1 Running 0 21s
frontend-6cb5q 1/1 Running 0 21s
frontend-2xmw6 0/1 Running 0 22s
frontend-2xmw6 1/1 Running 0 22s
[root@k8s-master01 rs]# kubectl get rs
NAME DESIRED CURRENT READY AGE
frontend 3 3 0 10s
# 创建ReplicaSet
[root@k8s-master01 rs]# kubectl get pods
NAME READY STATUS RESTARTS AGE
frontend-2h7l7 1/1 Running 0 57s
frontend-2xmw6 1/1 Running 0 57s
frontend-6cb5q 1/1 Running 0 57s
[root@k8s-master01 rs]# kubectl delete pod frontend-2h7l7
pod "frontend-2h7l7" deleted
[root@k8s-master01 rs]# kubectl get pod -w
NAME READY STATUS RESTARTS AGE
frontend-2h7l7 1/1 Running 0 75s
frontend-2xmw6 1/1 Running 0 75s
frontend-6cb5q 1/1 Running 0 75s
frontend-2h7l7 1/1 Terminating 0 79s
frontend-g6prf 0/1 Pending 0 1s
frontend-g6prf 0/1 Pending 0 1s
frontend-g6prf 0/1 ContainerCreating 0 1s
frontend-2h7l7 1/1 Terminating 0 80s
frontend-g6prf 0/1 ContainerCreating 0 2s
frontend-2h7l7 0/1 Terminating 0 81s
frontend-2h7l7 0/1 Terminating 0 81s
frontend-2h7l7 0/1 Terminating 0 81s
frontend-g6prf 0/1 Running 0 3s
frontend-g6prf 0/1 Running 0 27s
frontend-g6prf 1/1 Running 0 27s
# 测试删除ReplicaSet管理的pod,如果少了replicaSet会重新创建一个pod
[root@k8s-master01 rs]# cat replicaset.yaml |grep replicas:
replicas: 4
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods
NAME READY STATUS RESTARTS AGE
frontend-2xmw6 1/1 Running 0 31m
frontend-69p98 1/1 Running 0 52s
frontend-6cb5q 1/1 Running 0 31m
frontend-g6prf 1/1 Running 0 30m
# 直接修改yaml文件然后应用实现pod扩容
[root@k8s-master01 rs]# cat replicaset.yaml |grep replicas:
replicas: 2
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods
NAME READY STATUS RESTARTS AGE
frontend-2xmw6 1/1 Running 0 33m
frontend-g6prf 1/1 Running 0 31m
# 实现pod缩容
[root@k8s-master01 rs]# cat replicaset.yaml |grep image:
image: docker.io/ikubernetes/myapp:v2
[root@k8s-master01 rs]# kubectl apply -f replicaset.yaml
replicaset.apps/frontend configured
[root@k8s-master01 rs]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
frontend-2xmw6 1/1 Running 0 35m 10.244.85.205 k8s-node01 <none> <none>
frontend-g6prf 1/1 Running 0 33m 10.244.85.206 k8s-node01 <none> <none>
[root@k8s-master01 rs]# curl 10.244.85.205 |head -n3
% Total % Received % Xferd Average Speed Time Time Time Current
Dload Upload Total Spent Left Speed
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 100 921 100 921 0 0 527k 0 --:--:-- --:--:-- --:--:-- 899k
<html ng-app="redis">
<head>
<title>Guestbook</title>
# ReplicaSet没有更新容器使用镜像,仍然是之前的内容
[root@k8s-master01 rs]# kubectl delete pod frontend-2xmw6
pod "frontend-2xmw6" deleted
[root@k8s-master01 rs]# kubectl delete pod frontend-g6prf
pod "frontend-g6prf" deleted
[root@k8s-master01 rs]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
frontend-8cdmn 1/1 Running 0 2m13s 10.244.58.221 k8s-node02 <none> <none>
frontend-mk6ln 0/1 Running 0 25s 10.244.85.207 k8s-node01 <none> <none>
[root@k8s-master01 rs]# curl 10.244.58.221
Hello MyApp | Version: v2 | <a href="hostname.html">Pod Name</a>
# ReplicaSet无法自动完成更新,必须手动删除pod
[root@k8s-master01 rs]# kubectl delete -f replicaset.yaml
replicaset.apps "frontend" deleted
# 清除环境
生产环境如果升级,可以删除一个pod,观察一段时间之后没问题再删除另一个pod,但是这样需要人工干预多次;实际生产环境一般采用蓝绿发布,原来有一个rs1,再创建一个rs2(控制器),通过修改service标签,修改service可以匹配到rs2的控制器,这样才是蓝绿发布,这个也需要我们精心的部署规划,我们有一个控制器就是建立在rs之上完成的,叫做Deployment
[root@k8s-master01 ~]# kubectl explain deploy.spec.strategy.rollingUpdate.
# 查看帮助
最多可用小数直接进一位
最多不可用小数直接舍去保留整数
replicas: 5
maxSurge: 25% 5*25%=1.25 -> 5+2=7
maxUnanvilable: 25% 5*25%=1.25 -> 5-1=4
[root@k8s-master01 ~]# mkdir deployment
[root@k8s-master01 ~]# cd deployment/
[root@k8s-master01 deployment]# kubectl explain deploy.
# 查看帮助
ctr -n k8s.io images import myapp-blue-v1.tar.gz
ctr -n k8s.io images import myapp-blue-v2.tar.gz
# 导入镜像
[root@k8s-master01 deployment]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-v1
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: myapp
version: v1
template:
metadata:
labels:
app: myapp
version: v1
spec:
containers:
- name: myapp
image: janakiramm/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
startupProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
port: 80
path: /
scheme: HTTP
livenessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
port: 80
path: /
scheme: HTTP
readinessProbe:
initialDelaySeconds: 20
periodSeconds: 5
timeoutSeconds: 10
httpGet:
port: 80
path: /
scheme: HTTP
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 created
[root@k8s-master01 deployment]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-v1-58bdffcdd7-bn79v 1/1 Running 0 77s
myapp-v1-58bdffcdd7-cqz99 1/1 Running 0 77s
[root@k8s-master01 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 2 2 2 79s
[root@k8s-master01 deployment]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
myapp-v1 2/2 2 2 86s
# 创建deployment
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
replicas: 3
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-v1-58bdffcdd7-b64k7 1/1 Running 0 32s
myapp-v1-58bdffcdd7-bn79v 1/1 Running 0 3m3s
myapp-v1-58bdffcdd7-cqz99 1/1 Running 0 3m3s
[root@k8s-master01 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 3 3 3 3m9s
[root@k8s-master01 deployment]# kubectl get deploy
NAME READY UP-TO-DATE AVAILABLE AGE
myapp-v1 3/3 3 3 3m11s
# 通过修改yaml文件中的replicas实现pod扩容
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
replicas: 2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-v1-58bdffcdd7-bn79v 1/1 Running 0 4m15s
myapp-v1-58bdffcdd7-cqz99 1/1 Running 0 4m15s
[root@k8s-master01 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 2 2 2 4m17s
[root@k8s-master01 deployment]# kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
myapp-v1 2/2 2 2 4m20s
# 实现pod缩容,会随机删除pod
[root@k8s-master01 deployment]# kubectl explain deploy.spec.strategy.
# 查看帮助
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep replicas:
replicas: 3
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
# 先将pod副本数调为3并应用
[root@k8s-master01 deployment]# kubectl describe deployments.apps myapp-v1 |grep -i strategy
StrategyType: RollingUpdate
RollingUpdateStrategy: 25% max unavailable, 25% max surge
# 可以看到默认策略是滚动更新
# 默认最多不可用是25%,最多可用也为25%
replicas: 3
maxSurge: 25% -> 3+1=4
maxUnavailabel: 25% -> 3-0=3
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
image: janakiramm/myapp:v2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
# 修改使用镜像使滚动更新
[root@k8s-master01 deployment]# kubectl get pods -w
NAME READY STATUS RESTARTS AGE
myapp-v1-58bdffcdd7-bn79v 1/1 Running 0 29m
myapp-v1-58bdffcdd7-cqz99 1/1 Running 0 29m
myapp-v1-58bdffcdd7-vxbjk 1/1 Running 0 5m4s
####################
myapp-v1-744bfb8886-8xrzt 0/1 Pending 0 0s
myapp-v1-744bfb8886-8xrzt 0/1 Pending 0 0s
myapp-v1-744bfb8886-8xrzt 0/1 ContainerCreating 0 0s
myapp-v1-744bfb8886-8xrzt 0/1 ContainerCreating 0 0s
myapp-v1-744bfb8886-8xrzt 0/1 Running 0 1s
myapp-v1-744bfb8886-8xrzt 0/1 Running 0 25s
myapp-v1-744bfb8886-8xrzt 1/1 Running 0 25s
myapp-v1-58bdffcdd7-vxbjk 1/1 Terminating 0 6m4s
myapp-v1-744bfb8886-92vvt 0/1 Pending 0 0s
myapp-v1-744bfb8886-92vvt 0/1 Pending 0 0s
myapp-v1-744bfb8886-92vvt 0/1 ContainerCreating 0 0s
myapp-v1-58bdffcdd7-vxbjk 1/1 Terminating 0 6m5s
...
[root@k8s-master01 deployment]# kubectl get rs -w
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 3 3 3 29m
###################
myapp-v1-744bfb8886 1 0 0 0s
myapp-v1-744bfb8886 1 0 0 0s
myapp-v1-744bfb8886 1 1 0 0s
myapp-v1-744bfb8886 1 1 1 25s
myapp-v1-58bdffcdd7 2 3 3 30m
...
[root@k8s-master01 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 0 0 0 31m
myapp-v1-744bfb8886 3 3 3 83s
# 滚动更新完成
[root@k8s-master01 deployment]# kubectl rollout history deployment myapp-v1
deployment.apps/myapp-v1
REVISION CHANGE-CAUSE
1 <none>
2 <none>
# 查看deployment滚动更新历史
[root@k8s-master01 deployment]# kubectl rollout undo deployment/myapp-v1 --to-revision=1
deployment.apps/myapp-v1 rolled back
[root@k8s-master01 deployment]# kubectl get pods
NAME READY STATUS RESTARTS AGE
myapp-v1-58bdffcdd7-2tgr2 1/1 Running 0 2m2s
myapp-v1-58bdffcdd7-bk6w7 1/1 Running 0 101s
myapp-v1-58bdffcdd7-lrjhp 1/1 Running 0 81s
[root@k8s-master01 deployment]# kubectl get rs
NAME DESIRED CURRENT READY AGE
myapp-v1-58bdffcdd7 3 3 3 35m
myapp-v1-744bfb8886 0 0 0 5m57s
# 实现回滚
[root@k8s-master01 deployment]# kubectl explain deploy.spec.strategy.
# 查看帮助
[root@k8s-master01 deployment]# cat deploy-demo.yaml |head -n15
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-v1
namespace: default
spec:
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
replicas: 3
selector:
matchLabels:
app: myapp
version: v1
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
image: janakiramm/myapp:v2
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods -w
...
# 测试rollingUpdate
当前pod数量为3,限定后更新过程中pod可用数量为2-4.
生产环境一定别用,因为全部重建需要时间,生产业务是不能down掉的
[root@k8s-master01 deployment]# cat deploy-demo.yaml |head -n15
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-v1
namespace: default
spec:
strategy:
type: Recreate
replicas: 3
selector:
matchLabels:
app: myapp
version: v1
template:
metadata:
[root@k8s-master01 deployment]# cat deploy-demo.yaml |grep image:
image: janakiramm/myapp:v1
[root@k8s-master01 deployment]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-v1 configured
[root@k8s-master01 deployment]# kubectl get pods -w
...
[root@k8s-master01 deployment]# kubectl delete -f deploy-demo.yaml
deployment.apps "myapp-v1" deleted
# 清除环境
蓝绿部署中,一共有两套系统:一套是正在提供服务系统,标记为“绿色”;另一套是准备发布的系统,标记为“蓝色”。两套系统都是功能完善的、正在运行的系统,只是系统版本和对外服务情况不同。(蓝绿都可代表新旧系统,理解意思即可,不必在乎颜色)
开发新版本,要用新版本替换线上的旧版本,在线上的系统之外,搭建了一个使用新版本代码的全新系统。 这时候,一共有两套系统在运行,正在对外提供服务的老系统是绿色系统,新部署的系统是蓝色系统。
优点:
1、更新过程无需停机,风险较少
2、回滚方便,只需要更改路由或者切换DNS服务器,效率较高
缺点:
1、成本较高,需要部署两套环境。如果新版本中基础服务出现问题,会瞬间影响全网用户;如果新版本有问题也会影响全网用户。
2、需要部署两套机器,费用开销大
3、在非隔离的机器(Docker、VM)上操作时,可能会导致蓝绿环境被摧毁风险
4、负载均衡器/反向代理/路由/DNS处理不当,将导致流量没有切换过来情况出现
Kubernetes不支持内置的蓝绿部署.目前最好的方式市创建新的deployment,然后更新应用程序的service以指向新的deployment部署的应用
ctr -n k8s.io images import myapp-lan.tar.gz
ctr -n k8s.io images import myapp-lv.tar.gz
# 节点导入镜像
[root@k8s-master01 deployment]# kubectl create ns blue-green
namespace/blue-green created
[root@k8s-master01 deployment]# cat lan.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-v1
namespace: blue-green
spec:
replicas: 3
selector:
matchLabels:
app: myapp
version: v1
template:
metadata:
labels:
app: myapp
version: v1
spec:
containers:
- name: myapp
image: janakiramm/myapp:v1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
[root@k8s-master01 deployment]# kubectl apply -f lan.yaml
deployment.apps/myapp-v1 created
# 部署正在提供服务的系统
[root@k8s-master01 deployment]# cat service_lanlv.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp-lan-lv
namespace: blue-green
labels:
app: myapp
spec:
type: NodePort
ports:
- port: 80
nodePort: 30062
name: http
selector:
app: myapp
version: v1
[root@k8s-master01 deployment]# kubectl apply -f service_lanlv.yaml
service/myapp-lan-lv created
[root@k8s-master01 deployment]# kubectl describe svc myapp-lan-lv -n blue-green |grep -i endpoints:
Endpoints: 10.244.58.236:80,10.244.58.237:80,10.244.85.217:80
# 编写service的yaml文件对外提供服务
网页访问正常
[root@k8s-master01 deployment]# cat lv.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp-v2
namespace: blue-green
spec:
replicas: 3
selector:
matchLabels:
app: myapp
version: v2
template:
metadata:
labels:
app: myapp
version: v2
spec:
containers:
- name: myapp
image: janakiramm/myapp:v2
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
[root@k8s-master01 deployment]# kubectl apply -f lv.yaml
deployment.apps/myapp-v2 created
# 部署第二套系统,也就是准备发布的新系统
[root@k8s-master01 deployment]# kubectl get pods -n blue-green
NAME READY STATUS RESTARTS AGE
myapp-v1-7b55fffbb5-2vpt7 1/1 Running 0 3m11s
myapp-v1-7b55fffbb5-gfnnt 1/1 Running 0 3m11s
myapp-v1-7b55fffbb5-xpk2f 1/1 Running 0 3m11s
myapp-v2-5779dc88f-55566 1/1 Running 0 16s
myapp-v2-5779dc88f-cjnrp 1/1 Running 0 16s
myapp-v2-5779dc88f-sz2m9 1/1 Running 0 16s
[root@k8s-master01 deployment]# kubectl get pods --show-labels -n blue-green
NAME READY STATUS RESTARTS AGE LABELS
myapp-v1-7b55fffbb5-2vpt7 1/1 Running 0 5m25s app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-gfnnt 1/1 Running 0 5m25s app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-xpk2f 1/1 Running 0 5m25s app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v2-5779dc88f-55566 1/1 Running 0 2m30s app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-cjnrp 1/1 Running 0 2m30s app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-sz2m9 1/1 Running 0 2m30s app=myapp,pod-template-hash=5779dc88f,version=v2
[root@k8s-master01 deployment]# cat service_lanlv.yaml
apiVersion: v1
kind: Service
metadata:
name: myapp-lan-lv
namespace: blue-green
labels:
app: myapp
spec:
type: NodePort
ports:
- port: 80
nodePort: 30062
name: http
selector:
app: myapp
version: v2
# 只需修改service的yaml文件的匹配标签即可,这里我们修改了version为v2
[root@k8s-master01 deployment]# kubectl apply -f service_lanlv.yaml
service/myapp-lan-lv configured
[root@k8s-master01 deployment]# kubectl describe svc myapp-lan-lv -n blue-green |grep -i endpoints:
Endpoints: 10.244.58.238:80,10.244.85.218:80,10.244.85.219:80
[root@k8s-master01 deployment]# kubectl get pods --show-labels -n blue-green -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES LABELS
myapp-v1-7b55fffbb5-2vpt7 1/1 Running 0 5m40s 10.244.58.237 k8s-node02 <none> <none> app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-gfnnt 1/1 Running 0 5m40s 10.244.85.217 k8s-node01 <none> <none> app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v1-7b55fffbb5-xpk2f 1/1 Running 0 5m40s 10.244.58.236 k8s-node02 <none> <none> app=myapp,pod-template-hash=7b55fffbb5,version=v1
myapp-v2-5779dc88f-55566 1/1 Running 0 2m45s 10.244.58.238 k8s-node02 <none> <none> app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-cjnrp 1/1 Running 0 2m45s 10.244.85.218 k8s-node01 <none> <none> app=myapp,pod-template-hash=5779dc88f,version=v2
myapp-v2-5779dc88f-sz2m9 1/1 Running 0 2m45s 10.244.85.219 k8s-node01 <none> <none> app=myapp,pod-template-hash=5779dc88f,version=v2
# 已经切换为新的系统
[root@k8s-master01 deployment]# kubectl delete -f lv.yaml
deployment.apps "myapp-v2" deleted
[root@k8s-master01 deployment]# kubectl delete -f lan.yaml
deployment.apps "myapp-v1" deleted
[root@k8s-master01 deployment]# kubectl delete -f service_lanlv.yaml
service "myapp-lan-lv" deleted
# 清除环境
网页访问正常
**金丝雀发布的由来:**17 世纪,英国矿井工人发现,金丝雀对瓦斯这种气体十分敏感。空气中哪怕有极其微量的瓦斯,金丝雀也会停止歌唱;当瓦斯含量超过一定限度时,虽然人类毫无察觉,金丝雀却早已毒发身亡。当时在采矿设备相对简陋的条件下,工人们每次下井都会带上一只金丝雀作为瓦斯检测指标,以便在危险状况下紧急撤离。
**金丝雀发布(又称灰度发布、灰度更新):**金丝雀发布一般先发1台,或者一个小比例,例如2%的服务器,主要做流量验证用,也称为金丝雀 (Canary) 测试 (国内常称灰度测试)。
简单的金丝雀测试一般通过手工测试验证,复杂的金丝雀测试需要比较完善的监控基础设施配合,通过监控指标反馈,观察金丝雀的健康状况,作为后续发布或回退的依据。 如果金丝测试通过,则把剩余的V1版本全部升级为V2版本。如果金丝雀测试失败,则直接回退金丝雀,发布失败。
[root@k8s-master01 deployment]# kubectl apply -f lan.yaml
deployment.apps/myapp-v1 created
[root@k8s-master01 deployment]# kubectl set image deployment myapp-v1 myapp=docker.io/xianchao/nginx:v1 -n blue-green && kubectl rollout pause deployment myapp-v1 -n blue-green
deployment.apps/myapp-v1 image updated
deployment.apps/myapp-v1 paused
[root@k8s-master01 deployment]# kubectl get pods -n blue-green -w
NAME READY STATUS RESTARTS AGE
myapp-v1-7b55fffbb5-46cqg 1/1 Running 0 5s
myapp-v1-7b55fffbb5-8mh8k 1/1 Running 0 5s
myapp-v1-7b55fffbb5-fnb8c 1/1 Running 0 5s
#######################
myapp-v1-644d75999-2dlks 0/1 Pending 0 0s
myapp-v1-644d75999-2dlks 0/1 Pending 0 0s
myapp-v1-644d75999-2dlks 0/1 ContainerCreating 0 0s
myapp-v1-644d75999-2dlks 0/1 ContainerCreating 0 0s
myapp-v1-644d75999-2dlks 1/1 Running 0 1s
# 新起一个pod做金丝雀发布,其余三个pod为暂停状态,也就是暂不更新,还是跑着旧业务
[root@k8s-master01 deployment]# kubectl rollout resume deployment myapp-v1 -n blue-green
deployment.apps/myapp-v1 resumed
[root@k8s-master01 deployment]# kubectl get pods -n blue-green -w
...
[root@k8s-master01 deployment]# kubectl get rs -n blue-green
NAME DESIRED CURRENT READY AGE
myapp-v1-644d75999 3 3 3 2m25s
myapp-v1-7b55fffbb5 0 0 0 2m32s
# 如果没有问题,就全部更新,解除暂停状态
Service概述
较新的版本使用的是coredns),service的名称解析是依赖于dns附件的,因此在部署完k8s之后需要再部署dns附件,kubernetes要想给客户端提供网络功能,需要依赖第三方的网络插件(flannel,calico等)。
每个K8s节点上都有一个组件叫做kube-proxy,kube-proxy这个组件将始终监视着apiserver中有关service资源的变动信息,需要跟master之上的apiserver交互,随时连接到apiserver上获取任何一个与service资源相关的资源变动状态,这种是通过kubernetes中固有的一种请求方法watch(监视)来实现的,一旦有service资源的内容发生变动(如创建,删除),kube-proxy都会将它转化成当前节点之上的能够实现service资源调度,把我们请求调度到后端特定的pod资源之上的规则,这个规则可能是iptables,也可能是ipvs,取决于service的实现方式。
Service工作原理
k8s在创建Service时,会根据标签选择器selector(lable selector)来查找Pod,据此创建与Service同名的endpoint对象,当Pod 地址发生变化时,endpoint也会随之发生变化,service接收前端client请求的时候,就会通过endpoint,找到转发到哪个Pod进行访问的地址。(至于转发到哪个节点的Pod,由负载均衡kube-proxy决定)
K8S集群中有三类IP地址
[root@k8s-master01 ~]# kubectl explain service.spec.
# 查看帮助
[root@k8s-node01 images]# ctr -n k8s.io images import nginx.tar.gz
[root@k8s-node02 images]# ctr -n k8s.io images import nginx.tar.gz
# 工作节点导入镜像
[root@k8s-master01 ~]# mkdir service
[root@k8s-master01 ~]# cd service/
[root@k8s-master01 service]# cat pod_test.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx
spec:
selector:
matchLabels:
run: my-nginx
replicas: 2
template:
metadata:
labels:
run: my-nginx
spec:
containers:
- name: my-nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80 #pod中的容器需要暴露的端口
startupProbe:
periodSeconds: 5
initialDelaySeconds: 60
# 初始检测时间设置长一点,方便测试
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
livenessProbe:
periodSeconds: 5
initialDelaySeconds: 60
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
readinessProbe:
periodSeconds: 5
initialDelaySeconds: 60
timeoutSeconds: 10
httpGet:
scheme: HTTP
port: 80
path: /
[root@k8s-master01 service]# kubectl apply -f pod_test.yaml
deployment.apps/my-nginx created
# 创建deployment生成pod
[root@k8s-master01 service]# kubectl get pods --show-labels
NAME READY STATUS RESTARTS AGE LABELS
my-nginx-7468bcb55b-b7vnl 0/1 Running 0 23s pod-template-hash=7468bcb55b,run=my-nginx
my-nginx-7468bcb55b-zzpw2 0/1 Running 0 23s pod-template-hash=7468bcb55b,run=my-nginx
# 查看pod标签
[root@k8s-master01 service]# cat service_test.yaml
apiVersion: v1
kind: Service
metadata:
name: my-nginx
labels:
run: my-nginx
spec:
type: ClusterIP
ports:
- port: 80
# service的端口,暴露给k8s集群内部服务访问
protocol: TCP
targetPort: 80
# pod容器中定义的端口
selector:
run: my-nginx
# 选择拥有run=my-nginx标签的pod
[root@k8s-master01 service]# kubectl apply -f service_test.yaml
service/my-nginx created
# 创建service
[root@k8s-master01 service]# kubectl get svc -l run=my-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
my-nginx ClusterIP 10.98.163.90 <none> 80/TCP 50s
[root@k8s-master01 service]# curl 10.98.163.90
...
# 测试service,ClusterIP类型只有集群内部可访问
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints: 10.244.58.246:80,10.244.85.224:80
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
my-nginx-7468bcb55b-b7vnl 1/1 Running 0 5m53s
my-nginx-7468bcb55b-zzpw2 1/1 Running 0 5m53s
[root@k8s-master01 service]# kubectl delete pod my-nginx-7468bcb55b-b7vnl
pod "my-nginx-7468bcb55b-b7vnl" deleted
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
my-nginx-7468bcb55b-8vz67 0/1 Running 0 49s
my-nginx-7468bcb55b-zzpw2 1/1 Running 0 6m46s
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints: 10.244.85.224:80
# 在pod的服务没有就绪前不会代理
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
my-nginx-7468bcb55b-8vz67 1/1 Running 0 63s
my-nginx-7468bcb55b-zzpw2 1/1 Running 0 7m
[root@k8s-master01 service]# kubectl get pods -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
my-nginx-7468bcb55b-8vz67 1/1 Running 0 74s 10.244.58.247 k8s-node02 <none> <none>
my-nginx-7468bcb55b-zzpw2 1/1 Running 0 7m11s 10.244.85.224 k8s-node01 <none> <none>
[root@k8s-master01 service]# kubectl describe svc my-nginx |grep -i endpoints:
Endpoints: 10.244.58.247:80,10.244.85.224:80
# 所以就绪探测很重要,不然可能造成service代理了但是服务没起来的情况
[root@k8s-master01 service]# kubectl delete -f pod_test.yaml
deployment.apps "my-nginx" deleted
[root@k8s-master01 service]# kubectl delete -f service_test.yaml
service "my-nginx" deleted
# 清除环境
以这个service为例,它的全称为my-nginx.default.svc.cluster.local
也就是: 服务名.命名空间.域名后缀
这个全称只能在关联的pod里访问,集群节点也是无法访问的
[root@k8s-master01 service]# cat pod_nodeport.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: my-nginx-nodeport
spec:
selector:
matchLabels:
run: my-nginx-nodeport
replicas: 2
template:
metadata:
labels:
run: my-nginx-nodeport
spec:
containers:
- name: my-nginx-nodeport-container
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
[root@k8s-master01 service]# kubectl apply -f pod_nodeport.yaml
deployment.apps/my-nginx-nodeport created
[root@k8s-master01 service]# cat service_nodeport.yaml
apiVersion: v1
kind: Service
metadata:
name: my-nginx-nodeport
labels:
run: my-nginx-nodeport
spec:
type: NodePort
ports:
- port: 80
protocol: TCP
targetPort: 80
nodePort: 30380
selector:
run: my-nginx-nodeport
[root@k8s-master01 service]# kubectl apply -f service_nodeport.yaml
service/my-nginx-nodeport created
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
my-nginx-nodeport-85c4df8944-g8s85 1/1 Running 0 19s
my-nginx-nodeport-85c4df8944-k6b9f 1/1 Running 0 19s
[root@k8s-master01 service]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8d
my-nginx-nodeport NodePort 10.110.139.100 <none> 80:30380/TCP 13s
# 创建pod和NodePort类型的service
[root@k8s-master01 service]# ss -lntup |grep 30380
# 查端口是查不到的
[root@k8s-master01 service]# ipvsadm -Ln |head -n10
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 172.17.0.1:30380 rr
-> 10.244.58.249:80 Masq 1 0 0
-> 10.244.85.227:80 Masq 1 0 0
TCP 192.168.1.181:30380 rr
-> 10.244.58.249:80 Masq 1 1 0
-> 10.244.85.227:80 Masq 1 0 1
# 查询防火墙规则
[root@k8s-master01 service]# kubectl delete -f pod_nodeport.yaml
deployment.apps "my-nginx-nodeport" deleted
[root@k8s-master01 service]# kubectl delete -f service_nodeport.yaml
service "my-nginx-nodeport" deleted
# 清除环境
客户端请求http://192.168.1.181:30380->docker0虚拟网卡:172.17.0.1:30380->10.244.121.36:80,10.244.102.86:80
应用场景:跨名称空间访问
需求:default名称空间下的client 服务想要访问nginx-ns名称空间下的nginx-svc服务
[root@k8s-node01 images]# ctr -n k8s.io images import busybox.tar.gz
[root@k8s-node02 images]# ctr -n k8s.io images import busybox.tar.gz
# 工作节点导入镜像
[root@k8s-master01 service]# kubectl create ns nginx-ns
namespace/nginx-ns created
[root@k8s-master01 service]# cat server_nginx.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
namespace: nginx-ns
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
[root@k8s-master01 service]# kubectl apply -f server_nginx.yaml
deployment.apps/nginx created
# 部署nginx-ns名称空间下的pod跑nginx服务
[root@k8s-master01 service]# cat nginx_svc.yaml
apiVersion: v1
kind: Service
metadata:
name: nginx-svc
namespace: nginx-ns
spec:
selector:
app: nginx
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
[root@k8s-master01 service]# kubectl apply -f nginx_svc.yaml
service/nginx-svc created
# 部署nginx的service,类型为ClusterIP
[root@k8s-master01 service]# cat client.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: client
spec:
replicas: 1
selector:
matchLabels:
app: busybox
template:
metadata:
labels:
app: busybox
spec:
containers:
- name: busybox
image: busybox
imagePullPolicy: IfNotPresent
command: ["/bin/sh","-c","sleep 36000"]
[root@k8s-master01 service]# kubectl apply -f client.yaml
deployment.apps/client created
# 部署默认名称空间下的client
[root@k8s-master01 service]# cat client_svc.yaml
apiVersion: v1
kind: Service
metadata:
name: client-svc
spec:
type: ExternalName
externalName: nginx-svc.nginx-ns.svc.cluster.local
ports:
- name: http
port: 80
targetPort: 80
[root@k8s-master01 service]# kubectl apply -f client_svc.yaml
service/client-svc created
# 创建ExternalName类型的service
[root@k8s-master01 service]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
client-svc ExternalName <none> nginx-svc.nginx-ns.svc.cluster.local 80/TCP 89s
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8d
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
client-798446484b-wrz86 1/1 Running 0 2m6s
[root@k8s-master01 service]# kubectl exec -it client-798446484b-wrz86 -- /bin/sh
/ # wget -q -O - client-svc
...
/ # wget -q -O - client-svc.default.svc.cluster.local
...
/ # wget -q -O - nginx-svc.nginx-ns.svc.cluster.local
...
# 上面三个请求结果一样
[root@k8s-master01 service]# kubectl delete -f client_svc.yaml
service "client-svc" deleted
[root@k8s-master01 service]# kubectl delete -f client.yaml
deployment.apps "client" deleted
[root@k8s-master01 service]# kubectl delete -f server_nginx.yaml
deployment.apps "nginx" deleted
# 清除环境
k8s最佳实践:映射外部服务案例分享
场景1:k8s集群引用外部的mysql数据库
[root@k8s-node02 ~]# yum -y install mariadb-server
[root@k8s-node02 ~]# systemctl enable --now mariadb
[root@k8s-node02 ~]# mysql
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 3
Server version: 5.5.68-MariaDB MariaDB Server
Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> quit
Bye
# 在node2安装节点外部服务mariadb-server做测试
[root@k8s-master01 service]# cat mysql_service.yaml
apiVersion: v1
kind: Service
metadata:
name: mysql
namespace: default
spec:
type: ClusterIP
ports:
- port: 3306
[root@k8s-master01 service]# kubectl apply -f mysql_service.yaml
service/mysql created
# 创建ClusterIP类型的service
[root@k8s-master01 service]# kubectl get svc |grep mysql
mysql ClusterIP 10.106.232.15 <none> 3306/TCP 43s
[root@k8s-master01 service]# kubectl describe svc mysql |grep -i endpoints:
Endpoints: <none>
# 可以看到Endpoints: 字段是空的,所以我们要去添加一个
[root@k8s-master01 service]# kubectl explain endpoints.
# 查看帮助
[root@k8s-master01 service]# cat mysql_endpoints.yaml
apiVersion: v1
kind: Endpoints
metadata:
name: mysql
# 注意这里要与service名字相同,不然找不到
namespace: default
subsets:
- addresses:
- ip: 192.168.1.192
ports:
- port: 3306
[root@k8s-master01 service]# kubectl apply -f mysql_endpoints.yaml
endpoints/mysql created
# 创建mysql的endpoints
[root@k8s-master01 service]# kubectl describe svc mysql |grep -i endpoints:
Endpoints: 192.168.1.192:3306
# 可以看到已经匹配上
[root@k8s-master01 service]# kubectl delete -f mysql_endpoints.yaml
endpoints "mysql" deleted
[root@k8s-master01 service]# kubectl delete -f mysql_service.yaml
service "mysql" deleted
[root@k8s-node02 ~]# yum -y remove mariadb-server
# 清除环境
这样集群内部就可以访问service到外部的服务了
CoreDNS 其实就是一个 DNS 服务,而 DNS 作为一种常见的服务发现手段,所以很多开源项目以及工程师都会使用 CoreDNS 为集群提供服务发现的功能,Kubernetes 就在集群中使用 CoreDNS 解决服务发现的问题。 作为一个加入 CNCF(Cloud Native Computing Foundation)的服务, CoreDNS 的实现非常简单。
验证coredns
[root@k8s-node01 ~]# ctr -n k8s.io images import dig.tar.gz
[root@k8s-node02 ~]# ctr -n k8s.io images import dig.tar.gz
# 工作节点导入镜像
[root@k8s-master01 service]# cat dig.yaml
apiVersion: v1
kind: Pod
metadata:
name: dig
namespace: default
spec:
containers:
- name: dig
image: xianchao/dig:latest
imagePullPolicy: IfnotPresent
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
[root@k8s-master01 service]# kubectl apply -f dig.yaml
pod/dig created
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dig 1/1 Running 0 114s
# 创建测试pod
[root@k8s-master01 service]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 8d
[root@k8s-master01 service]# kubectl exec -it dig -- /bin/bash
bash-4.3# nslookup kubernetes
Server: 10.96.0.10
Address: 10.96.0.10#53
Name: kubernetes.default.svc.cluster.local
Address: 10.96.0.1
# 测试集群内部
bash-4.3# nslookup baidu.com
Server: 10.96.0.10
Address: 10.96.0.10#53
Non-authoritative answer:
Name: baidu.com
Address: 39.156.66.10
Name: baidu.com
Address: 110.242.68.66
# 测试集群外部
bash-4.3# exit
exit
[root@k8s-master01 service]# kubectl delete -f dig.yaml
pod "dig" deleted
[root@k8s-master01 service]# kubectl delete -f .
# 清除环境
ot@k8s-master01 service]# kubectl delete -f mysql_endpoints.yaml
endpoints “mysql” deleted
[root@k8s-master01 service]# kubectl delete -f mysql_service.yaml
service “mysql” deleted
[root@k8s-node02 ~]# yum -y remove mariadb-server
这样集群内部就可以访问service到外部的服务了
#### coredns组件详解
**CoreDNS 其实就是一个 DNS 服务,而 DNS 作为一种常见的服务发现手段,所以很多开源项目以及工程师都会使用 CoreDNS 为集群提供服务发现的功能,Kubernetes 就在集群中使用 CoreDNS 解决服务发现的问题。 作为一个加入 CNCF(Cloud Native Computing Foundation)的服务**, **CoreDNS 的实现非常简单。**
验证coredns
```shell
[root@k8s-node01 ~]# ctr -n k8s.io images import dig.tar.gz
[root@k8s-node02 ~]# ctr -n k8s.io images import dig.tar.gz
# 工作节点导入镜像
[root@k8s-master01 service]# cat dig.yaml
apiVersion: v1
kind: Pod
metadata:
name: dig
namespace: default
spec:
containers:
- name: dig
image: xianchao/dig:latest
imagePullPolicy: IfnotPresent
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
[root@k8s-master01 service]# kubectl apply -f dig.yaml
pod/dig created
[root@k8s-master01 service]# kubectl get pods
NAME READY STATUS RESTARTS AGE
dig 1/1 Running 0 114s
# 创建测试pod
[root@k8s-master01 service]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 443/TCP 8d
[root@k8s-master01 service]# kubectl exec -it dig -- /bin/bash
bash-4.3# nslookup kubernetes
Server: 10.96.0.10
Address: 10.96.0.10#53
Name: kubernetes.default.svc.cluster.local
Address: 10.96.0.1
# 测试集群内部
bash-4.3# nslookup baidu.com
Server: 10.96.0.10
Address: 10.96.0.10#53
Non-authoritative answer:
Name: baidu.com
Address: 39.156.66.10
Name: baidu.com
Address: 110.242.68.66
# 测试集群外部
bash-4.3# exit
exit
[root@k8s-master01 service]# kubectl delete -f dig.yaml
pod "dig" deleted
[root@k8s-master01 service]# kubectl delete -f .
# 清除环境