注意:
建议采用滚动升级的方式:
在所有node节点上,将要升级的master节点从kube-lb中剔除
root@node1:~# vim /etc/kube-lb/conf/kube-lb.conf
user root;
worker_processes 1;
error_log /etc/kube-lb/logs/error.log warn;
events {
worker_connections 3000;
}
stream {
upstream backend {
server 192.168.6.81:6443 max_fails=2 fail_timeout=3s;
#server 192.168.6.79:6443 max_fails=2 fail_timeout=3s; #将要升级的master节点注释掉
server 192.168.6.80:6443 max_fails=2 fail_timeout=3s;
}
server {
listen 127.0.0.1:6443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
root@node1:~# systemctl restart kube-lb.service #重启服务,配置生效
Master上升级涉及如下组件
root@master1:~/1.21.5# ls
kubernetes-client-linux-amd64.tar.gz kubernetes-node-linux-amd64.tar.gz kubernetes-server-linux-amd64.tar.gz kubernetes.tar.gz
root@master1:~/1.21.5# tar xf kubernetes-client-linux-amd64.tar.gz
root@master1:~/1.21.5# tar xf kubernetes-node-linux-amd64.tar.gz
root@master1:~/1.21.5# tar xf kubernetes-server-linux-amd64.tar.gz
root@master1:~/1.21.5# tar xf kubernetes.tar.gz
Server端需要的二进制可执行程序位置
root@master1:~/1.21.5/kubernetes/server/bin# pwd
/root/1.21.5/kubernetes/server/bin
root@master1:~/1.21.5/kubernetes/server/bin# ./kube-apiserver --version
Kubernetes v1.21.5
root@master1:~/1.21.5/kubernetes/server/bin# ll
total 1075596
drwxr-xr-x 2 root root 4096 Sep 16 05:22 ./
drwxr-xr-x 3 root root 66 Sep 16 05:27 ../
-rwxr-xr-x 1 root root 50790400 Sep 16 05:22 apiextensions-apiserver*
-rwxr-xr-x 1 root root 48738304 Sep 16 05:22 kube-aggregator*
-rwxr-xr-x 1 root root 122322944 Sep 16 05:22 kube-apiserver*
-rw-r--r-- 1 root root 8 Sep 16 05:21 kube-apiserver.docker_tag
-rw------- 1 root root 127114240 Sep 16 05:21 kube-apiserver.tar
-rwxr-xr-x 1 root root 116359168 Sep 16 05:22 kube-controller-manager*
-rw-r--r-- 1 root root 8 Sep 16 05:21 kube-controller-manager.docker_tag
-rw------- 1 root root 121150976 Sep 16 05:21 kube-controller-manager.tar
-rwxr-xr-x 1 root root 43360256 Sep 16 05:22 kube-proxy*
-rw-r--r-- 1 root root 8 Sep 16 05:21 kube-proxy.docker_tag
-rw------- 1 root root 105362432 Sep 16 05:21 kube-proxy.tar
-rwxr-xr-x 1 root root 47321088 Sep 16 05:22 kube-scheduler*
-rw-r--r-- 1 root root 8 Sep 16 05:21 kube-scheduler.docker_tag
-rw------- 1 root root 52112384 Sep 16 05:21 kube-scheduler.tar
-rwxr-xr-x 1 root root 44851200 Sep 16 05:22 kubeadm*
-rwxr-xr-x 1 root root 46645248 Sep 16 05:22 kubectl*
-rwxr-xr-x 1 root root 55305384 Sep 16 05:22 kubectl-convert*
-rwxr-xr-x 1 root root 118353264 Sep 16 05:22 kubelet*
-rwxr-xr-x 1 root root 1593344 Sep 16 05:22 mounter*
root@master1:~/1.21.5/kubernetes/server/bin# systemctl stop kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master1:~/1.21.5/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /usr/local/bin/
root@master1:~/1.21.5/kubernetes/server/bin# systemctl start kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master1:~/1.21.5/kubernetes/server/bin# kubectl get node -A
NAME STATUS ROLES AGE VERSION
192.168.6.79 Ready,SchedulingDisabled master 6d16h v1.21.5 #变为新版本
192.168.6.80 Ready,SchedulingDisabled master 6d16h v1.21.0
192.168.6.81 Ready,SchedulingDisabled master 53m v1.21.0
192.168.6.89 Ready node 6d15h v1.21.0
192.168.6.90 Ready node 6d15h v1.21.0
192.168.6.91 Ready node 45m v1.21.0
root@node1:~# vim /etc/kube-lb/conf/kube-lb.conf
user root;
worker_processes 1;
error_log /etc/kube-lb/logs/error.log warn;
events {
worker_connections 3000;
}
stream {
upstream backend {
#server 192.168.6.81:6443 max_fails=2 fail_timeout=3s;
server 192.168.6.79:6443 max_fails=2 fail_timeout=3s;
#server 192.168.6.80:6443 max_fails=2 fail_timeout=3s;
}
server {
listen 127.0.0.1:6443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
root@node1:~# systemctl restart kube-lb
root@master2:~/1.21.5# tar xf kubernetes-client-linux-amd64.tar.gz
root@master2:~/1.21.5# tar xf kubernetes-node-linux-amd64.tar.gz
root@master2:~/1.21.5# tar xf kubernetes-server-linux-amd64.tar.gz
root@master2:~/1.21.5# tar xf kubernetes.tar.gz
root@master2:~/1.21.5# cd kubernetes/server/bin/
root@master2:~/1.21.5/kubernetes/server/bin# systemctl stop kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master2:~/1.21.5/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /usr/local/bin/
root@master2:~/1.21.5/kubernetes/server/bin# systemctl start kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master3:~/1.21.5# tar xf kubernetes-client-linux-amd64.tar.gz
root@master3:~/1.21.5# tar xf kubernetes-node-linux-amd64.tar.gz
root@master3:~/1.21.5# tar xf kubernetes-server-linux-amd64.tar.gz
root@master3:~/1.21.5# tar xf kubernetes.tar.gz
root@master3:~/1.21.5# cd kubernetes/server/bin/
root@master3:~/1.21.5/kubernetes/server/bin# systemctl stop kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master3:~/1.21.5/kubernetes/server/bin# \cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /usr/local/bin/
root@master3:~/1.21.5/kubernetes/server/bin# systemctl start kube-apiserver kube-proxy.service kube-controller-manager.service kube-scheduler.service kubelet.service
root@master1:~/1.21.5/kubernetes/server/bin# kubectl get node -A
NAME STATUS ROLES AGE VERSION
192.168.6.79 Ready,SchedulingDisabled master 6d16h v1.21.5
192.168.6.80 Ready,SchedulingDisabled master 6d16h v1.21.5
192.168.6.81 NotReady,SchedulingDisabled master 67m v1.21.5
192.168.6.89 Ready node 6d16h v1.21.0
192.168.6.90 Ready node 6d16h v1.21.0
192.168.6.91 Ready node 60m v1.21.0
root@node1:~# vim /etc/kube-lb/conf/kube-lb.conf
user root;
worker_processes 1;
error_log /etc/kube-lb/logs/error.log warn;
events {
worker_connections 3000;
}
stream {
upstream backend {
server 192.168.6.81:6443 max_fails=2 fail_timeout=3s;
server 192.168.6.79:6443 max_fails=2 fail_timeout=3s;
server 192.168.6.80:6443 max_fails=2 fail_timeout=3s;
}
server {
listen 127.0.0.1:6443;
proxy_connect_timeout 1s;
proxy_pass backend;
}
}
root@node1:~# systemctl restart kube-lb
root@node1:~# systemctl stop kubelet kube-proxy
root@master1:~/1.21.5/kubernetes/server/bin# scp kubelet kube-proxy kubectl [email protected]:/usr/local/bin/
root@node1:~# systemctl start kubelet kube-proxy
root@master1:~/1.21.5/kubernetes/server/bin# kubectl get node -A
NAME STATUS ROLES AGE VERSION
192.168.6.79 Ready,SchedulingDisabled master 6d16h v1.21.5
192.168.6.80 Ready,SchedulingDisabled master 6d16h v1.21.5
192.168.6.81 Ready,SchedulingDisabled master 83m v1.21.5
192.168.6.89 Ready node 6d16h v1.21.5
192.168.6.90 Ready node 6d16h v1.21.0
192.168.6.91 Ready node 75m v1.21.0
需要提前创建好yaml文件,并创建好pod运行所需的namespace、yaml文件等
apiVersion: v1 #API版本
kind: Namespace #类型为namespace
metadata: #定义元数据
name: n56 #namespace名称
root@master1:~# kubectl apply -f n56-namespace.yaml
namespace/n56 created
root@master1:~# kubectl get ns
NAME STATUS AGE
default Active 6d19h
kube-node-lease Active 6d19h
kube-public Active 6d19h
kube-system Active 6d19h
kubernetes-dashboard Active 6d17h
n56 Active 4s
在线yaml与json编辑器:https://www.bejson.com/validators/yaml_editor/
{
"人员名单": {
"张三": {
"年龄": 18,
"职业": "Linux运维工程师",
"爱好": ["看书", "学习", "加班"]
},
"李四": {
"年龄": 20,
"职业": "java开发工程师",
"爱好": ["开源技术", "微服务", "分布式存储"]
}
}
}
人员名单:
张三:
年龄: 18
职业: Linux运维工程师
爱好:
- 看书
- 学习
- 加班
李四:
年龄: 20
职业: java开发工程师
爱好:
- 开源技术
- 微服务
- 分布式存储
k8s中的yaml文件以及其它场景下的yaml文件,大部分包括一下类型:
如果没有模板文件,可以使用如下方法进行查询
kubectl explain namespace
kubectl explain namespace.metadata
#nginx.yaml
kind: Deployment #类型,是deployment控制器,kubectl explain Deployment
apiVersion: apps/v1 #API版本,# kubectl explain Deployment.apiVersion
metadata: #pod的元数据信息,kubectl explain Deployment.metadata
labels: #自定义pod的标签,# kubectl explain Deployment.metadata.labels
app: n56-nginx-deployment-label #标签名称为app值为n56-nginx-deployment-label,后面会用到此标签
name: n56-nginx-deployment #pod的名称
namespace: n56 #pod的namespace,默认是defaule
spec: #定义deployment中容器的详细信息,kubectl explain Deployment.spec
replicas: 1 #创建出的pod的副本数,即多少个pod,默认值为1
selector: #定义标签选择器
matchLabels: #定义匹配的标签,必须要设置
app: n56-nginx-selector #匹配的目标标签,
template: #定义模板,必须定义,模板是起到描述要创建的pod的作用
metadata: #定义模板元数据
labels: #定义模板label,Deployment.spec.template.metadata.labels
app: n56-nginx-selector #定义标签,等于Deployment.spec.selector.matchLabels
spec:
containers:
- name: n56-nginx-container #容器名称
image: nginx:1.16.1
#command: ["/apps/tomcat/bin/run_tomcat.sh"] #容器启动执行的命令或脚本
#imagePullPolicy: IfNotPresent
imagePullPolicy: Always #拉取镜像策略
ports: #定义容器端口列表
- containerPort: 80 #定义一个端口
protocol: TCP #端口协议
name: http #端口名称
- containerPort: 443 #定义一个端口
protocol: TCP #端口协议
name: https #端口名称
env: #配置环境变量
- name: "password" #变量名称。必须要用引号引起来
value: "123456" #当前变量的值
- name: "age" #另一个变量名称
value: "18" #另一个变量的值
resources: #对资源的请求设置和限制设置
limits: #资源限制设置,上限
cpu: 500m #cpu的限制,单位为core数,可以写0.5或者500m等CPU压缩值
memory: 512Mi #内存限制,单位可以为Mib/Gib,将用于docker run --memory参数
requests: #资源请求的设置
cpu: 200m #cpu请求数,容器启动的初始可用数量,可以写0.5或者500m等CPU压缩值
memory: 256Mi #内存请求大小,容器启动的初始可用数量,用于调度pod时候使用
nodeSelector:
#group: python57
project: linux56 #只将容器调度到具有project=linux56标签的node上
#nginx-svc.yaml
kind: Service #类型为service
apiVersion: v1 #service API版本, service.apiVersion
metadata: #定义service元数据,service.metadata
labels: #自定义标签,service.metadata.labels
app: n56-nginx #定义service标签的内容
name: n56-nginx-service #定义service的名称,此名称会被DNS解析
namespace: n56 #该service隶属于的namespaces名称,即把service创建到哪个namespace里面
spec: #定义service的详细信息,service.spec
type: NodePort #service的类型,定义服务的访问方式,默认为ClusterIP, service.spec.type
ports: #定义访问端口, service.spec.ports
- name: http #定义一个端口名称
port: 81 #service 80端口,客户端流量->防火墙->负载均衡->nodeport:30001->service port:81->targetpod:80
protocol: TCP #协议类型
targetPort: 80 #目标pod的端口
nodePort: 30001 #node节点暴露的端口
- name: https #SSL 端口
port: 1443 #service 443端口
protocol: TCP #端口协议
targetPort: 443 #目标pod端口
nodePort: 30043 #node节点暴露的SSL端口
selector: #service的标签选择器,定义要访问的目标pod
app: n56-nginx-selector #将流量路到选择的pod上,须等于Deployment.spec.selector.matchLabels
root@etcd1:~# export node_ip='192.168.6.84 192.168.6.85 192.168.6.86'
root@etcd1:~# for i in ${node_ip}; do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${i}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health; done
https://192.168.6.84:2379 is healthy: successfully committed proposal: took = 14.162089ms
https://192.168.6.85:2379 is healthy: successfully committed proposal: took = 16.070919ms
https://192.168.6.86:2379 is healthy: successfully committed proposal: took = 12.748962ms
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table member list --endpoints=https://192.168.6.84:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem
+------------------+---------+-------------------+---------------------------+---------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+-------------------+---------------------------+---------------------------+------------+
| 308a1368f27ba48a | started | etcd-192.168.6.85 | https://192.168.6.85:2380 | https://192.168.6.85:2379 | false |
| c16e08c8cace2cd3 | started | etcd-192.168.6.86 | https://192.168.6.86:2380 | https://192.168.6.86:2379 | false |
| ffe13c54256e7ab9 | started | etcd-192.168.6.84 | https://192.168.6.84:2380 | https://192.168.6.84:2379 | false |
+------------------+---------+-------------------+---------------------------+---------------------------+------------+
root@etcd1:~# export node_ip='192.168.6.84 192.168.6.85 192.168.6.86'
root@etcd1:~# for i in ${node_ip}; do ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table endpoint status --endpoints=https://${i}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem; done
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.6.84:2379 | ffe13c54256e7ab9 | 3.4.13 | 3.9 MB | true | false | 16 | 102137 | 102137 | |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.6.85:2379 | 308a1368f27ba48a | 3.4.13 | 3.9 MB | false | false | 16 | 102137 | 102137 | |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| ENDPOINT | ID | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
| https://192.168.6.86:2379 | c16e08c8cace2cd3 | 3.4.13 | 3.9 MB | false | false | 16 | 102137 | 102137 | |
+---------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+
root@etcd1:~# ETCDCTL_API=3 etcdctl get / --prefix --keys-only #以路径方式查看所有key
pod信息:
root@etcd1:~# ETCDCTL_API=3 etcdctl get / --prefix --keys-only| grep pods
/registry/pods/kube-system/calico-kube-controllers-759545cb9c-jw8c2
/registry/pods/kube-system/calico-node-67bv2
/registry/pods/kube-system/calico-node-hjm5j
/registry/pods/kube-system/calico-node-lkhdp
/registry/pods/kube-system/calico-node-m5nbf
/registry/pods/kube-system/calico-node-n2vxw
/registry/pods/kube-system/calico-node-wpxj4
/registry/pods/kube-system/coredns-69d445fc94-wsp2w
/registry/pods/kubernetes-dashboard/dashboard-metrics-scraper-67c9c47fc7-fcqzq
/registry/pods/kubernetes-dashboard/kubernetes-dashboard-86d88bf65-l2qh5
namespace信息:
root@etcd1:~# ETCDCTL_API=3 etcdctl get / --prefix --keys-only| grep namespaces
/registry/namespaces/default
/registry/namespaces/kube-node-lease
/registry/namespaces/kube-public
/registry/namespaces/kube-system
/registry/namespaces/kubernetes-dashboard
控制器信息:
root@etcd1:~# ETCDCTL_API=3 etcdctl get / --prefix --keys-only| grep deployments
/registry/deployments/kube-system/calico-kube-controllers
/registry/deployments/kube-system/coredns
/registry/deployments/kubernetes-dashboard/dashboard-metrics-scraper
/registry/deployments/kubernetes-dashboard/kubernetes-dashboard
calico信息:
root@etcd1:~# ETCDCTL_API=3 etcdctl get / --prefix --keys-only| grep calico
/calico/ipam/v2/assignment/ipv4/block/10.200.147.192-26
/calico/ipam/v2/assignment/ipv4/block/10.200.187.192-26
/calico/ipam/v2/assignment/ipv4/block/10.200.213.128-26
/calico/ipam/v2/assignment/ipv4/block/10.200.255.128-26
/calico/ipam/v2/assignment/ipv4/block/10.200.67.0-26
/calico/ipam/v2/assignment/ipv4/block/10.200.99.64-26
root@etcd1:~# ETCDCTL_API=3 etcdctl get /calico/ipam/v2/assignment/ipv4/block/10.200.147.192-26
/calico/ipam/v2/assignment/ipv4/block/10.200.147.192-26
{"cidr":"10.200.147.192/26","affinity":"host:node3.k8s.local","allocations":[0,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null,null],"unallocated":[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63],"attributes":[{"handle_id":"ipip-tunnel-addr-node3.k8s.local","secondary":{"node":"node3.k8s.local","type":"ipipTunnelAddress"}}],"deleted":false}
root@etcd1:~# ETCDCTL_API=3 etcdctl get --keys-only --prefix /calico | grep local
/calico/ipam/v2/handle/ipip-tunnel-addr-master1.k8s.local
/calico/ipam/v2/handle/ipip-tunnel-addr-master2.k8s.local
/calico/ipam/v2/handle/ipip-tunnel-addr-master3.k8s.local
/calico/ipam/v2/handle/ipip-tunnel-addr-node1.k8s.local
/calico/ipam/v2/handle/ipip-tunnel-addr-node2.k8s.local
/calico/ipam/v2/handle/ipip-tunnel-addr-node3.k8s.local
/calico/ipam/v2/host/master1.k8s.local/ipv4/block/10.200.213.128-26
/calico/ipam/v2/host/master2.k8s.local/ipv4/block/10.200.67.0-26
/calico/ipam/v2/host/master3.k8s.local/ipv4/block/10.200.187.192-26
/calico/ipam/v2/host/node1.k8s.local/ipv4/block/10.200.255.128-26
/calico/ipam/v2/host/node2.k8s.local/ipv4/block/10.200.99.64-26
/calico/ipam/v2/host/node3.k8s.local/ipv4/block/10.200.147.192-26
/calico/resources/v3/projectcalico.org/felixconfigurations/node.master1.k8s.local
/calico/resources/v3/projectcalico.org/felixconfigurations/node.master2.k8s.local
/calico/resources/v3/projectcalico.org/felixconfigurations/node.master3.k8s.local
/calico/resources/v3/projectcalico.org/felixconfigurations/node.node1.k8s.local
/calico/resources/v3/projectcalico.org/felixconfigurations/node.node2.k8s.local
/calico/resources/v3/projectcalico.org/felixconfigurations/node.node3.k8s.local
/calico/resources/v3/projectcalico.org/nodes/master1.k8s.local
/calico/resources/v3/projectcalico.org/nodes/master2.k8s.local
/calico/resources/v3/projectcalico.org/nodes/master3.k8s.local
/calico/resources/v3/projectcalico.org/nodes/node1.k8s.local
/calico/resources/v3/projectcalico.org/nodes/node2.k8s.local
/calico/resources/v3/projectcalico.org/nodes/node3.k8s.local
/calico/resources/v3/projectcalico.org/workloadendpoints/kube-system/node2.k8s.local-k8s-coredns--69d445fc94--wsp2w-eth0
/calico/resources/v3/projectcalico.org/workloadendpoints/kubernetes-dashboard/node1.k8s.local-k8s-dashboard--metrics--scraper--67c9c47fc7--fcqzq-eth0
/calico/resources/v3/projectcalico.org/workloadendpoints/kubernetes-dashboard/node2.k8s.local-k8s-kubernetes--dashboard--86d88bf65--l2qh5-eth0
#添加数据
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl put /name "tom"
OK
#查询数据
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl get /name
/name
tom
#修改数据,数据已经存在就直接覆盖,即为修改
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl put /name "jack"
OK
#验证修改成功
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl get /name
/name
jack
#删除数据
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl del /name
1
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl get /name
root@etcd1:~#
基于对数据的监看,在发生变化时主动通知客户端,etcd v3的watch机制支持watch某个特定的key,也可以watch一个key的范围,相比v2版本,v3版本的一些主要变化如下:
- 接口通过grpc提供rpc接口,放弃了v2的http接口,优势时长连接的效率明显提升,缺点是使用不如以前方便,尤其对不方便维护长连接的场景。
- 废弃了原来的目录结构,变成了纯粹的kv,用户可以通过前端匹配模式模拟目录。
- 内存中不再保存value,同样的内存可以支持存储更多的key。
- watch机制更加稳定,基本上可以通过watch机制实现数据的完全同步。
- 提供了批量操作以及事物机制,用户可以通过批量事物请求来实现etcd v2 的CAS机制(批量事物支持if条件判断)。
#在etcd node1 上watch一个key,没有key也可以watch,后期可以在创建
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl watch /name #watch 后终端会被占用
root@etcd2:~# ETCDCTL_API=3 /usr/local/bin/etcdctl put /name "v1"
OK
root@etcd2:~# ETCDCTL_API=3 /usr/local/bin/etcdctl put /name "v2"
OK
WAL 是write ahead log的缩写,是在执行真正的写操作之前先写一个日志,预写日志。
wal:存放预写日志,最大的作用是记录了整个数据变化的全部历程,在etcd中,所有数据修改在提交前,都要先写入到WAL中。
#备份
root@master1:/etc/kubeasz# ./ezctl backup k8s-01
root@master1:/etc/kubeasz/clusters/k8s-01/backup# ls
snapshot.db snapshot_202110061653.db
#恢复
root@master1:/etc/kubeasz# ./ezctl restore k8s-01 #恢复时会停服
#数据的备份
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot save snapshot.db
{"level":"info","ts":1633509687.2031326,"caller":"snapshot/v3_snapshot.go:119","msg":"created temporary db file","path":"snapshot.db.part"}
{"level":"info","ts":"2021-10-06T16:41:27.203+0800","caller":"clientv3/maintenance.go:200","msg":"opened snapshot stream; downloading"}
{"level":"info","ts":1633509687.2040484,"caller":"snapshot/v3_snapshot.go:127","msg":"fetching snapshot","endpoint":"127.0.0.1:2379"}
{"level":"info","ts":"2021-10-06T16:41:27.233+0800","caller":"clientv3/maintenance.go:208","msg":"completed snapshot read; closing"}
{"level":"info","ts":1633509687.2372715,"caller":"snapshot/v3_snapshot.go:142","msg":"fetched snapshot","endpoint":"127.0.0.1:2379","size":"3.9 MB","took":0.034088666}
{"level":"info","ts":1633509687.2373474,"caller":"snapshot/v3_snapshot.go:152","msg":"saved","path":"snapshot.db"}
Snapshot saved at snapshot.db
root@etcd1:~# ls
Scripts snap snapshot.db
#数据的恢复
root@etcd1:~# ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot restore snapshot.db --data-dir=/opt/etcd-testdir
#--data-dir指定的目录,一定要是不存在或者是空的,否则会还原失败,如果需要恢复到/var/lib/etcd/(即配置文件中的数据存储路径),则停掉etcd,在rm -rf /var/lib/etcd/,删掉后在恢复。
{"level":"info","ts":1633510235.7426238,"caller":"snapshot/v3_snapshot.go:296","msg":"restoring snapshot","path":"snapshot.db","wal-dir":"/opt/etcd-testdir/member/wal","data-dir":"/opt/etcd-testdir","snap-dir":"/opt/etcd-testdir/member/snap"}
{"level":"info","ts":1633510235.7613802,"caller":"mvcc/kvstore.go:380","msg":"restored last compact revision","meta-bucket-name":"meta","meta-bucket-name-key":"finishedCompactRev","restored-compact-revision":85382}
{"level":"info","ts":1633510235.767152,"caller":"membership/cluster.go:392","msg":"added member","cluster-id":"cdf818194e3a8c32","local-member-id":"0","added-peer-id":"8e9e05c52164694d","added-peer-peer-urls":["http://localhost:2380"]}
{"level":"info","ts":1633510235.7712433,"caller":"snapshot/v3_snapshot.go:309","msg":"restored snapshot","path":"snapshot.db","wal-dir":"/opt/etcd-testdir/member/wal","data-dir":"/opt/etcd-testdir","snap-dir":"/opt/etcd-testdir/member/snap"}
#自动备份数据脚本
root@etcd1:~# mkdir /data/etcd-backup-dir/ -p
root@etcd1:~# vim bp-script.sh
#!/bin/bash
source /etc/profile
DATE=`date +%Y-%m-%d_%H-%M-%S`
ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot save /data/etcd-backup-dir/etcd-snapshot-${DATA}.db
当etcd集群宕机数量超过集群总节点的一半以上时,会导致整个集群不可用,后期需要重新恢复数据,恢复流程如下:
- 恢复服务系系统
- 重新部署etcd集群
- 停止kube-apiserver/controller-manager/scheduler/kubelet/kube-proxy
- 停止etcd集群
- 各etcd节点恢复同一份数据
- 启动各节点并验证etcd集群,验证是否只有一个leader
- 启动kube-apiserver/controller-manager/scheduler/kubelet/kube-proxy
- 验证k8s master状态及pod状态
root@master1:/etc/kubeasz# ./ezctl del-etcd
root@master1:/etc/kubeasz# ./ezctl add-etcd
kubectl get service -A -o wide
kubectl get pod -A -o wide
kubectl get nodes -A -o wide
kubectl get deployment -A
kubectl get deployment -n n56 -o wide
kubectl describe pod n56-nginx-deployment-857fc5cb7f-llxmm -n n56 #镜像没起来,先看describe,在看log,在看node节点上的系统日志(ubuntu:syslog,centos:message)
kubectl create -f nginx.yaml #如果后期yaml文件发生变化,只能删掉之前的,才能create,如果最开始用的是create创建,则即使apply也不行。所以一般使用apply,或者在第一次执行create时,添加--save-config。
kubectl apply -f nginx.yaml
kubectl delete -f nginx.yaml
kubectl create -f nginx.yaml --save-config --record
kubectl apply -f nginx.yaml --record #会记录版本信息。新版本不加也行。
kubectl exec -it n56-nginx-deployment-857fc5cb7f-llxmm -n n56
kubectl logs n56-nginx-deployment-857fc5cb7f-llxmm -n n56
kubectl delete pod n56-nginx-deployment-857fc5cb7f-llxmm -n n56
kubectl edit svc n56-nginx-service -n n56 #修改API对象,修改立即生效。不会保存到配置文件。
kubectl scale -n n56 deployment n56-nginx-deployment --replicas=2 #修改pod的副本数为2.
kubectl label node 192.168.6.91 project=linux56 #给node节点添加标签,可配合deployment.spec.template.spec.nodeSelector,在yaml文件中指定将特定容器只运行行在具有特定标签的node上。
kubectl label node 192.168.6.91 project- #去掉标签
kubectl cluster-info
kubectl top node/pod #查看node或者pod的资源使用情况,需要单独安装Metrics API
kubectl cordon 192.168.6.89 #指定某个节点不参与调度
kubectl uncordon 192.168.6.89
kubectl drain 192.168.6.89 --force --ignore-daemonsets --delete-emptydir-data #pod驱散完后,会标记为不参与调度
kubectl api-resources #会显示API资源名称,包括其名称简写及API版本
kubectl config view #可以查看当前kube-config配置,可以参考生成配置文件
http://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands
https://kubernetes.io/zh/docs/concepts/workloads/controllers/deployment/
http://docs.kubernetes.org.cn/251.html
https://www.kubernetes.org.cn/kubernetes%e8%ae%be%e8%ae%a1%e7%90%86%e5%bf%b5
字段 | 解释 |
---|---|
apiVersion | 创建该对象所使用的k8s API版本。 |
kind | 想要创建的对象类型。 |
metadata | 帮助识别对象唯一性数据,包括name,可选的namespace。 |
spec | |
status | pod创建完成后k8s自动生成status状态。 |
apiVersion: v1
kind: ReplicationController
metadata:
name: ng-rc
spec:
replicas: 2
selector:
app: ng-rc-80
#app1: ng-rc-81
template:
metadata:
labels:
app: ng-rc-80
#app1: ng-rc-81
spec:
containers:
- name: ng-rc-80
image: nginx
ports:
- containerPort: 80
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: frontend
spec:
replicas: 2
selector:
# matchLabels:
# app: ng-rs-80
matchExpressions:
- {key: app, operator: In, values: [ng-rs-80,ng-rs-81]} #如果ng-rs-80,ng-rs-81的labels都有,则会在两个labels中各建一个,总数也是两个,一般会预期在这两个labels中各建两个,所以这种宽泛匹配用的不多。
template:
metadata:
labels:
app: ng-rs-80
spec:
containers:
- name: ng-rs-80
image: nginx
ports:
- containerPort: 80
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 2
selector:
#app: ng-deploy-80 #rc
matchLabels: #rs or deployment
app: ng-deploy-80
# matchExpressions:
# - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx:1.16.1
ports:
- containerPort: 80
当pod重建后,pod的IP可能发生变化,则pod之间的访问会出现问题,所以需要解藕服务和对象,即声明一个service对象。
常用的有2种service:
kube-proxy—watch–>k8s-apiserver
Kube-proxy会监听k8s-apiserver,一旦service资源发生变化(调k8s-api修改service信息),则kube-proxy就会生成对应的负载调度调整,保证service的最新状态。
#deploy_pod.yaml
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
#matchLabels: #rs or deployment
# app: ng-deploy3-80
matchExpressions:
- {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx:1.16.1
ports:
- containerPort: 80
#nodeSelector:
# env: group1
#svc-service.yaml #集群内部访问
apiVersion: v1
kind: Service
metadata:
name: ng-deploy-80
spec:
ports:
- name: http
port: 88
targetPort: 80
protocol: TCP
type: ClusterIP
selector:
app: ng-deploy-80
#svc_NodePort.yaml #集群外部访问,在所有node上规则都生效,在k8s集群外的HA上在配置负载均衡,实现外部访问,该方法效率较高。如果加入ingress(7层负载),实现多个service(7层负载)根据域名等匹配,则ingress在nodeport之后,service之前,则实际外部流量访问时,则需要2层7层负责,在访问量特别大的时候,可能出现瓶颈。ingress的转发规则支持较少,配置较难。
apiVersion: v1
kind: Service
metadata:
name: ng-deploy-80
spec:
ports:
- name: http
port: 90
targetPort: 80
nodePort: 30012
protocol: TCP
type: NodePort
selector:
app: ng-deploy-80
数据和镜像要是实现解藕,实现容器间的数据共享,k8s抽象出一个对象,用来保存数据,做为存储使用。
https://kubernetes.io/zh/docs/concepts/storage/volumes/
当Pod被分配在node上时,首先创建emptyDir卷,并且只要该pod在该node上运行,该卷就会存在,正如对象名称所述,其最初时空的。Pod中的容器可以读取和写入 emptyDir卷中的相同文件,尽管该卷可以挂载到每个容器中的相同或不同路径上。当出于任何原因,从node上删除pod时,emptyDir卷中的数据将被永久删除。
宿主机上的卷路径:/var/lib/kubelet/pods/ID/volumes/kubernetes.io~empty-dir/cache-volume/FILE
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels: #rs or deployment
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /cache
name: cache-volume-n56
volumes:
- name: cache-volume-n56
emptyDir: {}
将主机节点上的文件系统中的文件或目录挂载到集群中,pod删除时,卷不会被删除
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data/n56
name: cache-n56-volume
volumes:
- name: cache-n56-volume
hostPath:
path: /opt/n56
nfs卷允许将现有的NFS共享挂载到容器中,不同于emptyDir,当删除pod时,nfs卷的内容被保留,卷仅仅是被卸载,意味着NFS卷可以预先填充数据,并且在多个pod之间切换数据,NFS可以同时被多个写入者挂载。实际上NFS是挂载在node上,在映射给容器。
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-site2
spec:
replicas: 1
selector:
matchLabels:
app: ng-deploy-81
template:
metadata:
labels:
app: ng-deploy-81
spec:
containers:
- name: ng-deploy-81
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /usr/share/nginx/html/mysite
name: my-nfs-volume
volumes:
- name: my-nfs-volume
nfs:
server: 172.31.1.103
path: /data/k8sdata
---
apiVersion: v1
kind: Service
metadata:
name: ng-deploy-81
spec:
ports:
- name: http
port: 80
targetPort: 80
nodePort: 30017
protocol: TCP
type: NodePort
selector:
app: ng-deploy-81
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /usr/share/nginx/html/mysite
name: my-nfs-volume
- mountPath: /usr/share/nginx/html/js
name: my-nfs-js
volumes:
- name: my-nfs-volume
nfs:
server: 172.31.7.109
path: /data/magedu/n56
- name: my-nfs-js
nfs:
server: 172.31.7.109
path: /data/magedu/js
---
apiVersion: v1
kind: Service
metadata:
name: ng-deploy-80
spec:
ports:
- name: http
port: 81
targetPort: 80
nodePort: 30016
protocol: TCP
type: NodePort
selector:
app: ng-deploy-80
实现配置信息和镜像的解藕,将配置信息存放到configmap对象中,然后在pod对象中导入configmap对象,实现导入配置操作。声明一个configmap对象,作为volume挂载到pod中。
配置变更:
环境变量的传递:
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
data:
default: |
server {
listen 80;
server_name www.mysite.com;
index index.html;
location / {
root /data/nginx/html;
if (!-e $request_filename) {
rewrite ^/(.*) /index.html last;
}
}
}
---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-8080
image: tomcat
ports:
- containerPort: 8080
volumeMounts:
- name: nginx-config
mountPath: /data
- name: ng-deploy-80
image: nginx
ports:
- containerPort: 80
volumeMounts:
- mountPath: /data/nginx/html
name: nginx-static-dir
- name: nginx-config
mountPath: /etc/nginx/conf.d
volumes:
- name: nginx-static-dir
hostPath:
path: /data/nginx/linux39
- name: nginx-config
configMap:
name: nginx-config
items:
- key: default
path: mysite.conf
---
apiVersion: v1
kind: Service
metadata:
name: ng-deploy-80
spec:
ports:
- name: http
port: 81
targetPort: 80
nodePort: 30019
protocol: TCP
type: NodePort
selector:
app: ng-deploy-80
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-config
data:
username: user1
---
#apiVersion: extensions/v1beta1
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: ng-deploy-80
template:
metadata:
labels:
app: ng-deploy-80
spec:
containers:
- name: ng-deploy-80
image: nginx
env:
- name: "test"
value: "value"
- name: MY_USERNAME
valueFrom:
configMapKeyRef:
name: nginx-config
key: username
ports:
- containerPort: 80
DaemonSet在当前集群中每个节点运行同一个pod,当有新的节点加入建时,也会为新的节点配置相同的pod,当节点从集群中移除时,其pod也会被kubernetes回收,但是删除DaemonSet将删除其创建的所有pod。
https://kubernetes.io/zh/docs/concepts/workloads/controllers/daemonset/
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: fluentd-elasticsearch
namespace: kube-system
labels:
k8s-app: fluentd-logging
spec:
selector:
matchLabels:
name: fluentd-elasticsearch
template:
metadata:
labels:
name: fluentd-elasticsearch
spec:
tolerations:
# this toleration is to have the daemonset runnable on master nodes
# remove it if your masters can't run pods
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
containers:
- name: fluentd-elasticsearch
image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
#日志收集路径,采用简单的正则,配置日志文件路径:/var/lib/docker/overlay2/*/diff/data/*.log
kubernetes 官方文档:
https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options
PersistentVolume 参数:
~# kubectl explain persistentvolume.spec
capacity 当前PV空间大小,kubectl explain persistentvolume.spec.capacity
accessModes 访问模式,kubectl explainpersistentvolume.spec.accessModes
ReadWriteOnce --PV只能被单个及node以读写的权限挂载,RWO
ReadOnlyMany --PV可以被多个node挂载,但权限只是只读的,ROX
ReadWriteMany --PV可以被多个node以读写模式挂载,RWX
ReadWriteOncePod --PV只能被单个pod以读写方式挂载,需要v1.22+,RWOP
persistentVolumeReclaimPolicy
kubectl explain persistentvolumes.spec.persistentVolumeReclaimPolicy
- Retain 删除PV后保持原状,最后需要管理员手动删除
- Delete 空间回收,及删除存储卷上的所有数据(包括目录和隐藏文件)。目前仅支持NFS和hostpath
- Recycle 自动删除存储卷
volumeMode 卷类型,kubectl explain persistentvolumes.spec.volumeMode
定义存储卷使用的文件系统是块设备还是文件系统,默认是文件系统
mountOptions 附加的挂载选项列表,实现更精细的权限控制,ro等。
PersistentVolumeClaim 参数:
~# kubectl explain PersistentVolumeClaim
accessModes 访问模式,与PV一样。kubectl explain PersistentVolumeClaim.spec.volumeMode
resources 定义PVC创建存储卷的空间大小
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
selector 标签选择器,选择要绑定的PV
matchLabels 匹配标签名称
matchExpressions 基于正则表达式匹配(注意要保证匹配准确,不能把PVC绑定错了)
volumeName 要绑定的PV名称
volumeMode 卷类型。定义PVC使用的文件系统是块设备还是文件系统,默认为文件系统。