ubuntu20.04升级k8s集群,etcd备份恢复,k8s集群使用

1.kubernetes 集群升级;  2.总结yaml文件  3.etcd客户端使用、数据备份和恢复;  4.kubernetes集群维护常用命令; 5.资源对象    rc/rs/deployment、    service、    volume、      emptyDir、      hostpath、      NFS


1.kubernetes 集群升级

1.master升级:一个一个升级,先在node节点将master从配置文件中删掉,然后重启kube-lb服务,接着将master以替换二进制的方式升级master,最后再修改node节点的配置,将master加入负载均衡节点,重启node节点服务,完成升级。

2.node升级:也是一个node升级完再升级下一个,升级一个node时,需要停掉kubelete和kube-proxy服务,然后将node的二进制替换掉,升级node,最后将kubelete和kube-proxy启动。

master升级:在master1,先下载升级的二进制包,到github上下载:https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.21.md

root@k8s-master1:/etc/kubeasz# cd /usr/local/src/

root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes.tar.gz

root@k8s-master1:/usr/local/src# wget https://dl.k8s.io/v1.21.5/kubernetes-client-linux-amd64.tar.gz

root@k8s-master1:/usr/local/src#wget https://dl.k8s.io/v1.21.5/kubernetes-server-linux-amd64.tar.gz

root@k8s-master1:/usr/local/src#wget https://dl.k8s.io/v1.21.5/kubernetes-node-linux-amd64.tar.gz 

root@k8s-master1:/usr/local/src# tar xf kubernetes-client-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-node-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes-server-linux-amd64.tar.gz root@k8s-master1:/usr/local/src# tar xf kubernetes.tar.gz

###先升级master1,修改node上的lb配置将master1注掉-->重启lb服务-->拷贝二进制包到master-->放开注释-->重启lb服务

root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.51:6443/#server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

###因为master1就是部署机器,因此,先停止服务,直接拷贝服务就可以了

root@k8s-master1:/usr/local/src/kubernetes# systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service

root@k8s-master1:/usr/local/src/kubernetes# \cp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} /usr/local/bin/

启动服务

root@k8s-master1:/usr/local/src/kubernetes# systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service

#查看master1的版本应该是1.21.5

root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 102m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 102m v1.21.0192.168.241.53 Ready,SchedulingDisabled master 96m v1.21.0192.168.241.57 Ready node 19m v1.21.0192.168.241.58 Ready node 101m v1.21.0192.168.241.59 Ready node 101m v1.21.0

将node节点的lb上的master重新添加回去,并重启lb的服务

root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.51:6443/server 192.168.241.51:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

依次添加master2

root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.52:6443/#server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

root@k8s-master1:~#ssh  k8s-master2  "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"

root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master2:/usr/local/bin/

root@k8s-master1:~#ssh  k8s-master2  "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"

root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.52:6443/server 192.168.241.52:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 112m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 112m v1.21.5

升级master3

root@k8s-master1:~# for i in {1..3};do ssh k8s-node$i "sed -i 's/server 192.168.241.53:6443/#server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

root@k8s-master1:~#ssh  k8s-master3  "systemctl stop kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"

root@k8s-master1:/etc/kubeasz# cd /usr/local/src/kubernetes/ 

root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kube-apiserver,kube-controller-manager,kube-proxy,kube-scheduler,kubelet,kubectl} k8s-master3:/usr/local/bin/

root@k8s-master1:~#ssh  k8s-master3  "systemctl start kube-apiserver.service kube-controller-manager.service kube-scheduler.service kubelet.service kube-proxy.service"

root@k8s-master1:/usr/local/src/kubernetes# for i in {1..3};do ssh k8s-node$i "sed -i 's/#server 192.168.241.53:6443/server 192.168.241.53:6443/g' /etc/kube-lb/conf/kube-lb.conf" && systemctl restart kube-lb.service;done

root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 111m v1.21.5192.168.241.57 Ready node 34m v1.21.0192.168.241.58 Ready node 116m v1.21.0192.168.241.59 Ready node 116m v1.21.0

master升级完毕!


node升级:

##只需要停止kubelet和kube-proxy服务后将二进制文件拷贝到执行目录,然后启动这两服务即可

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl stop kubelet kube-proxy"

root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node1:/usr/local/bin

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node1 "systemctl start kubelet kube-proxy"

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl stop kubelet kube-proxy"

root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node2:/usr/local/bin

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node2 "systemctl start kubelet kube-proxy"

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl stop kubelet kube-proxy"root@k8s-master1:/usr/local/src/kubernetes# scp server/bin/{kubelet,kube-proxy,kubectl} k8s-node3:/usr/local/bin

root@k8s-master1:/usr/local/src/kubernetes# ssh k8s-node3 "systemctl start kubelet kube-proxy"

root@k8s-master1:/usr/local/src/kubernetes# kubectl get nodeNAME STATUS ROLES AGE VERSION192.168.241.51 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.52 Ready,SchedulingDisabled master 123m v1.21.5192.168.241.53 Ready,SchedulingDisabled master 117m v1.21.5192.168.241.57 NotReady node 40m v1.21.5192.168.241.58 Ready node 123m v1.21.5192.168.241.59 Ready node 123m v1.21.5

至此,master和node升级完毕!

2.总结yaml文件

yaml更适用于配置文件,json更适用于API数据返回,json也可以用作配置文件,json不能使用注释。yaml和json可以互相转换。

yaml格式:

大小写敏感

缩进表示层级关系

缩进不能使用table,缩进一般是两个空格,通缉缩进应该对齐

可以加注释,# 注释

比json更适用于配置文件

列表用短横线表示  -



3.etcd客户端使用、数据备份和恢复

etcd是kv分布式存储系统

到etcd任意节点,执行以下操作:

##etcd命令客户端工具etcdctl,命令使用帮助

root@k8s-etcd1:~# etcdctl member -h

root@k8s-etcd1:~# etcdctl -h

etcd健康状态查询:

root@k8s-etcd2:~# export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56" 

root@k8s-etcd2:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done

显示集群成员信息: member list

root@k8s-etcd1:~# etcdctl member list

root@k8s-etcd1:~# etcdctl --write-out=table member list #etcdctl 3版本以上可以不加证书,但是建议加上

ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://192.168.241.56:2379 --write-out=table member list --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem

以表格方式显示节点详细状态: endpoint status

export NODE_IPS="192.168.241.54 192.168.241.55 192.168.241.56"

for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done

可以看到leader是etcd3,现在可以停止etcd3的服务,

root@k8s-etcd3:/usr/local/bin# systemctl stop etcd

再到etcd1查看,leader不再是etcd3,会自动重新选举一个新的leader

root@k8s-etcd1:~# for ip in ${NODE_IPS} ;do ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints=https://${ip}:2379 --write-out=table endpoint status --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem endpoint health;done

etcd数据的增删改查

    增,改: put

    查:get

    删: del

写入数据: etcdctl put /huahualin linux  

查看数据: etcdctl get  /huahualin 

修改数据: etcdctl put /huahualin centos

删除数据: etcdctl del  /huahualin 

查看有多少key

/usr/local/bin/etcdctl get / --prefix --keys-only

获取key的值

root@k8s-etcd1:~# /usr/local/bin/etcdctl get /registry/services/endpoints/default/kubernetes

如果想查看多少个pod,但是也不准,可以过滤

/usr/local/bin/etcdctl get / --prefix --keys-only|grep pod |wc -l

etcd获取calico相关

/usr/local/bin/etcdctl get / --prefix --keys-only|grep calico

root@k8s-etcd1:~# /usr/local/bin/etcdctl get /calico/ipam/v2/assignment/ipv4/block/10.200.169.128-26

etcd V3的watch机制:

基于不断监控数据,发生变化就主动发通知客户端,保证数据的快速同步,ETCD V3版本的watch机制支持watch某个固定的key,也支持一个范围 相比如v2,内存只存key,值放在磁盘里,因此对磁盘的io很高了,

watch机制更稳定,基本上可以实现数据完全同步

通过Grpc实现远程调用,长链接效率提升明显

放弃目录结构,纯粹kv

watch的使用:可以实时监控数据的变化,这些都在etcdctl自动实现了,不需要我们去单独watch

    etcdcl watch /huahualin

备份和恢复:使用快照来进行备份和恢复 

    WAL机制:预写日志,可以用来恢复数据,记住数据变化的全部历程

    etcd是镜像集群,在每个节点正常同步数据的情况下,每个节点数据都是一样的,因此备份只备份一份就行,还原也是只还原一份就可以了。集群坏一个不需要恢复数据,极端情况是所有节点都被删了,才恢复。

   etcd数据备份:  ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot save etcd-2021-1014.db

    etcd数据恢复: ETCDCTL_API=3 /usr/local/bin/etcdctl snapshot restore etcd-2021-1014.db --data-dir=/tmp/etcd   ##还原的目标目录不能存在,否则会报错,恢复数据需要恢复到etcd的数据目录,目录一般都是在 /var/lib/etcd ,先停etcd服务,然后把数据目录删除,进行数据恢复,最后启动etcd服务。

使用kubeasz项目对生产环境的etcd数据进行备份和恢复:

    cd /etc/kubeasz/

    ./ezctl backup k8s-01   #其实也是连接到其中一台etcd然后将备份好的文件拷贝到master上的集群下的backup目录下

     恢复:./ezctl restore k8s-01

        假设删除了一个pod:   

             方法一:kubectl delete pod net-test1 -n default

             方法二:也可以到etcd下删除key,   etcdctl del /registry/x/x/net-test1,删除key很快

       数据恢复: 过程也是到etcd先停止服务避免写入,然后删除目录进行恢复

         ./ezctl restore k8s-01


4.kubernetes集群维护常用命令

kubectl get pod -A -o wide   查看所有pod

kubectl get service -A   查看所有service

kubectl get nodes -A   查看所以node节点

kubectl describe pod pod_name -n ns_name  如果不是默认namespace,需要指定ns,-n后面就是制定namespace的名称

如果pod创建失败,可以使用kubectl logs pod_name -n ns 查看容器日志

如果还看不到日志,可以到node节点查看syslog,

         cat /var/log/syslog 看有没有报错

         cat /var/log/kern.log

利用yaml创建资源: kubectl create -f file.yaml --save-config --record   这条命令类似于  kubectl apply -f file.yaml   ,

kubectl create命令,是先删除所有现有的东西,重新根据yaml文件生成新的。所以要求yaml文件中的配置必须是完整的.用同一个yaml 文件执行替换replace命令,将会不成功,fail掉。 kubectl apply命令,根据配置文件里面列出来的内容,升级现有的。所以yaml文件的内容可以只写需要升级的属性,就是说apply只会修改资源变化的部分,而create是需要删掉服务,重新创建

用kubectl  apply -f file.yaml   比较多,

获取token:

root@k8s-master2:~# kubectl get secrets 

NAME TYPE DATA AGE 

default-token-7mcjc kubernetes.io/service-account-token 3 10h 2l80Vwzag

root@k8s-master2:~# kubectl describe secret default-token-7mcjc HGRR0U


5.资源对象    rc/rs/deployment、    service、    volume、      emptyDir、      hostpath、      NFS

1.controller,总有三代:

     Replication Controller:副本控制器RC,只支持 (selector = !=),第一代pod副本控制器,主要控制副本,简称rc,现在很少用了

     Replicaset:服务控制集,除了支持rc的selector,还支持使用正则匹配,比如支持in,not in,匹配的范围更大,第二代pod副本控制器RS,也是控制副本,简称rs

     Deployment:第三代pod副本控制器,其实也是调用的replicaset,优点是有更多的高级功能,除了拥有replicaset的功能外,还有别的功能,比如滚动升级、回滚等。用此方法创建的pod名称有三段组成: deploymentname-Replicasetname-podname



2.service:ipvsadm -Ln 可以查看service的服务映射类型:分为集群内的service和集群外的访问k8s集群内的service:使用ClusterIP 访问K8S集群外的service: 使用nodePort ,这样可以通过宿主机去访问

ClusterIP:

cat nginx.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 1

  selector:

    matchExpressions:

    - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}

  template:

    metadata:       

      labels:

        app: ng-deploy-80

    spec:

      containers:

        - name:  ng-deploy-80

          image: nginx:1.16.1

          ports:

          - containerPort: 80

执行创建deployment控制器:  kubectl apply -f nginx.yaml

root@k8s-master1:~/yaml/service# cat service.yaml

apiVersion: v1

kind: Service

metadata:

  name: ng-deploy-80

spec:

  ports:

  - name: http

    port: 80

    targetPort: 80

    protocol: TCP

  type: ClusterIP

执行创建service:  kubectl apply -f service.yaml

查看: 在k8s的web页面上查看,登录任意一个节点的30002端口 https://192.168.133.59:30002,找到pods,可以看到对应的pod,

Nodeport:

还是使用上面那个nginx.yaml

root@k8s-master1:~/yaml/service# cat nodePort-svc.yaml

apiVersion: v1

kind: Service

metadata:

  name: ng-deploy-80

spec:

  ports:

  - name: http

    port: 90

    targetPort: 80

    nodePort: 30012

    protocol: TCP

  type: NodePort

  selector:

    app: ng-deploy-80 

执行创建service:  kubectl  apply -f nodePort.yaml

在浏览器访问任何一个node节点的30012端口  http://192.168.133.59:30012/

通过负载均衡器访问:因为通过node的30012访问不太方便,所以可以在ha1和ha2上面配置多个

root@k8s-ha1:~# vim /etc/haproxy//haproxy.cfg

listen huahualin-nginx-80

  bind 192.168.241.62:80

  mode tcp

  server node1 192.168.241.57:30012 check inter 3s fall 3 rise 3

  server node2 192.168.241.58:30012 check inter 3s fall 3 rise 3

  server node3 192.168.241.59:30012 check inter 3s fall 3 rise 3

root@k8s-ha1:~# systemctl restart haproxy.service

浏览器访问服务  http://192.168.241.62:80/  ,也可以把域名解析到本地,访问域名


3.Volume:支持多种类型:比如nfs, hostpath,emptyDir,  cinder,rdb等

举例:hostpath,emptyDir,nfs的挂载方式

emptyDir:本地临时卷,就是个空的目录,还是临时的,容器被删除时,emptyDir中的数据也被删除,挂载的容器里的/cache目录不存在也会自动创建,在这个目录下创建目录,可以在使用kubectl get pods -o wide找到在哪个节点上,去那个节点下的/var/lib/kubelet/pods/目录下查找这个pod的这个volumes就可以看到和cache映射的文件

root@k8s-master1:~/yaml/service# cat nginx.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 1

  selector:

    matchExpressions:

    - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}

  template:

    metadata:       

      labels:

        app: ng-deploy-80

    spec:

      containers:

        - name:  ng-deploy-80

          image: nginx:1.16.1

          ports:

          - containerPort: 80

kubectl  apply -f nginx.yaml

hostPath:容器删除时,数据不会删除

root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-98f46f4cc-2kbjd bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@nginx-deployment-98f46f4cc-2kbjd:/# cd  /cache/

root@nginx-deployment-98f46f4cc-2kbjd:/cache# echo 333 >> test

##查看pod在哪个节点上,在59上,可以去59查看这个pod挂载emptyDir下面是否生成了test文件

root@k8s-master1:~/yaml/service# kubectl get pods -o wide

NAME                              READY  STATUS    RESTARTS  AGE  IP              NODE            NOMINATED NODE  READINESS GATES

nginx-deployment-98f46f4cc-2kbjd  1/1    Running  0          11h  10.200.107.195  192.168.241.59           

##去59查看这个pod挂载emptyDir下面是否生成了test文件,

root@k8s-node3# find /var/lib/kubelet/* -name cache*

/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/plugins/kubernetes.io~empty-dir/cache-volume

/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume

cd /var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume

root@k8s-node3:/var/lib/kubelet/pods/29767367-c535-491d-b1c0-beaaff531849/volumes/kubernetes.io~empty-dir/cache-volume# echo ddd > test

现在去容器里查看就会有新的字串了

hostPath:可以持久化,但是不能共享,只能在当前主机使用,主机删除以后可能就会被重新调度,很可能会分配到别的主机,别的主机没有这个hostPath容器再创建后如果调度到别的主机那么自己的数据就看不到了,就丢了

root@k8s-master1:~/yaml/volume# cat hostpath.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 1

  selector:

    matchExpressions:

    - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}

  template:

    metadata:       

      labels:

        app: ng-deploy-80

    spec:

      containers:

      - name:  ng-deploy-80

        image: nginx:1.16.1

        ports:

        - containerPort: 80

        volumeMounts:

        - mountPath: /cache

          name: cache-volume

      volumes:

      - name: cache-volume

        hostPath:

          path: /tmp/cache

查看创建在哪个node上了,hostPath下的/tmp/cache路径会自动创建,

root@k8s-master1:~/yaml/volume# kubectl get pods -o wide

NAME                                READY  STATUS    RESTARTS  AGE    IP              NODE            NOMINATED NODE  READINESS GATES

nginx-deployment-5cc98d6c56-sdtvc  1/1    Running  0          4m43s  10.200.169.131  192.168.241.58           

然后去58这个node上查看/tmp/cache有没有,有的

root@k8s-node2:~# ls /tmp/cache/

到master1节点进入容器创建文件

root@k8s-master1:~/yaml/volume# kubectl exec -it nginx-deployment-5cc98d6c56-sdtvc  bash

kubectl exec [POD] [COMMAND] is DEPRECATED and will be removed in a future version. Use kubectl exec [POD] -- [COMMAND] instead.

root@nginx-deployment-5cc98d6c56-sdtvc:/# echo 123 > /cache/nginx.log

再到58节点查看,会有文件nginx.log生成

root@k8s-node2:~# ls /tmp/cache/

nginx.log

nfs:  网络文件系统共享存储,多个pod可以同时挂载同一个nfs

##ha01上操作:

  先安装nfs,在ha01上面安装nfs

apt update

apt install nfs-server

mkdir /data/nfs -p

vi /etc/exports

/data/huahualin_nfs *(rw,no_root_squash)  #这里授权的地方和权限不能用空格,以前要有空格

systemctl restart nfs-server.service

systemctl enable nfs-server.service

showmount -e  如果有目录说明可以挂载了

###到master1上,暴露node的30016端口

root@k8s-master1:~/yaml/volume# cat nfs.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 1

  selector:

    matchExpressions:

    - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}

  template:

    metadata:       

      labels:

        app: ng-deploy-80

    spec:

      containers:

      - name:  ng-deploy-80

        image: nginx:1.16.1

        ports:

        - containerPort: 80

        volumeMounts:

        - mountPath: /usr/share/nginx/html/mysite

          name: my-nfs-volume

      volumes:

      - name: my-nfs-volume

        nfs:

          server: 192.168.241.62               

          path: /data/nfs

---

apiVersion: v1

kind: Service

metadata:

  name: ng-deploy-80

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30016

    protocol: TCP

  type: NodePort

  selector:

    app: ng-deploy-80    

showmount -e  ha_ip  #检查是否可以挂载的共享目录,如果可以看到就可以挂载

##到ha01做负载均衡,修改端口号

root@k8s-ha1:~# cat /etc/haproxy/haproxy.cfg

listen huahualin-nginx-80

  bind 192.168.241.62:80

  mode tcp

  server node1 192.168.241.57:30016 check inter 3s fall 3 rise 3

  server node2 192.168.241.58:30016 check inter 3s fall 3 rise 3

  server node3 192.168.241.59:30016 check inter 3s fall 3 rise 3

重启服务: systemctl restart haproxy

#在浏览器放问: 192.168.241.62:80

访问dashboard: https://192.168.241.58:30002

  进入刚刚创建的容器,可以看到挂载的目录 /usr/share/nginx/html/mysite

root@nginx-deployment-7964d774d9-ntz6g:/# df -h   

Filesystem                Size  Used Avail Use% Mounted on

overlay                    29G  12G  16G  42% /

tmpfs                      64M    0  64M  0% /dev

tmpfs                    975M    0  975M  0% /sys/fs/cgroup

/dev/sda5                  29G  12G  16G  42% /etc/hosts

shm                        64M    0  64M  0% /dev/shm

192.168.241.62:/data/nfs  29G  9.1G  19G  34% /usr/share/nginx/html/mysite

tmpfs                    975M  12K  975M  1% /run/secrets/kubernetes.io/serviceaccount

tmpfs                    975M    0  975M  0% /proc/acpi

tmpfs                    975M    0  975M  0% /proc/scsi

tmpfs                    975M    0  975M  0% /sys/firmware  

在ha1的/data/nfs放图片flowers1.jpg

访问 192.168.241.62:80/mysite/flowers1.jpg

容器的挂载其实不是挂载到pod中的,容器没有内核,其实是挂载到node节点上的,然后映射给容器,查看pod创建在哪个node上

root@k8s-master1:~/yaml/volume# kubectl get pod -o wide

NAME                                READY  STATUS    RESTARTS  AGE  IP              NODE            NOMINATED NODE  READINESS GATES

nginx-deployment-7964d774d9-ntz6g  1/1    Running  1          47m  10.200.107.201  192.168.241.59           

到192.168.241.59上查看

root@k8s-node3:~# df -Th

192.168.241.62:/data/nfs nfs4      29G  9.1G  19G  34% /var/lib/kubelet/pods/0cdfdfa7-c8e5-4cad-b5a3-747f931a6a59/volumes/kubernetes.io~nfs/my-nfs-volume

如果要挂载多个nfs怎么弄?

  ##到master1上面,添加新的挂载用来挂载js文件,把nfs的/data/nfs/js挂载到/usr/share/nginx/html/js

vi  nfs.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 1

  selector:

    matchExpressions:

    - {key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}

  template:

    metadata:       

      labels:

        app: ng-deploy-80

    spec:

      containers:

      - name:  ng-deploy-80

        image: nginx:1.16.1

        ports:

        - containerPort: 80

        volumeMounts:

        - mountPath: /usr/share/nginx/html/mysite

          name: my-nfs-volume

        - mountPath: /usr/share/nginx/html/js

          name: my-nfs-js

      volumes:

      volumes:

      - name: my-nfs-volume

        nfs:

          server: 192.168.241.62               

          path: /data/nfs

      - name: my-nfs-js

        nfs:

          server: 192.168.241.62               

          path: /data/nfs/js

---

---

apiVersion: v1

kind: Service

metadata:

  name: ng-deploy-80

spec:

  ports:

  - name: http

    port: 81

    targetPort: 80

    nodePort: 30016

    protocol: TCP

  type: NodePort

  selector:

    app: ng-deploy-80

    kubectl apply -f nfs.yaml


    到ha01创建  /data/nfs/js目录

    mkdir /data/nfs/js

    任意编写个静态文件,假装是js,  vi 1.js

    到dashboard进入容器查看:两个挂载目录/usr/share/nginx/html/js和/usr/share/nginx/html/mysite

root@nginx-deployment-79454b55b8-jbh4s:/# df -h

Filesystem                  Size  Used Avail Use% Mounted on

overlay                      29G  12G  16G  42% /

tmpfs                        64M    0  64M  0% /dev

tmpfs                        975M    0  975M  0% /sys/fs/cgroup

/dev/sda5                    29G  12G  16G  42% /etc/hosts

shm                          64M    0  64M  0% /dev/shm

192.168.241.62:/data/nfs/js  29G  9.1G  19G  34% /usr/share/nginx/html/js

tmpfs                        975M  12K  975M  1% /run/secrets/kubernetes.io/serviceaccount

192.168.241.62:/data/nfs      29G  9.1G  19G  34% /usr/share/nginx/html/mysite

tmpfs                        975M    0  975M  0% /proc/acpi

tmpfs                        975M    0  975M  0% /proc/scsi

tmpfs                        975M    0  975M  0% /sys/firmware 

到浏览器访问

http://192.168.241.62/js/1.js

你可能感兴趣的:(ubuntu20.04升级k8s集群,etcd备份恢复,k8s集群使用)