Kubernetes进阶使用(二)

一、使用configmap为nginx提供一个配置文件并验证

configMap 卷 提供了向 Pod 注入配置数据的方法。 ConfigMap 对象中存储的数据可以被 configMap 类型的卷引用,然后被 Pod 中运行的 容器化应用使用。
引用 configMap 对象时,你可以在 volume 中通过它的名称来引用。 你可以自定义 ConfigMap 中特定条目所要使用的路径。

说明:

  • 在使用 ConfigMap 之前你首先要创建它。
  • 容器以 subPath 卷挂载方式使用 ConfigMap 时,将无法接收 ConfigMap 的更新。
  • 文本数据挂载成文件时采用 UTF-8 字符编码。如果使用其他字符编码形式,可使用 binaryData 字段。
#进行编写configmap的yaml文件
root@master1:~/yaml/20210925/case-yaml/case6# vim deploy_configmap.yml
apiVersion: v1
kind: ConfigMap
metadata:
  name: nginx-config
data:
 default: |
    server {
     
       listen       80;
       server_name  www.mysite.com;
       index        index.html;

       location / {
     
           root /data/nginx/html;
           if (!-e $request_filename) {
     
               rewrite ^/(.*) /index.html last;
           }
       }
    }


---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels:
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx 
        ports:
        - containerPort: 80
        volumeMounts:
        - mountPath: /data/nginx/html
          name: nginx-static-dir
        - name: nginx-config
          mountPath:  /etc/nginx/conf.d
      volumes:
      - name: nginx-static-dir
        hostPath:
          path: /data/nginx/linux39
      - name: nginx-config
        configMap:
          name: nginx-config
          items:
             - key: default
               path: mysite.conf

---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 30019
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80

root@master1:~/yaml/20210925/case-yaml/case6# kubectl apply -f deploy_configmap.yml
root@master1:~/yaml/20210925/case-yaml/case6# kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
net-test1                           1/1     Running   3          17d
net-test2                           1/1     Running   4          17d
net-test3                           1/1     Running   4          17d
nginx-deployment-6b86dd48c8-hdw2x   1/1     Running   0          6m54s

#检查配置文件,进行验证,发现配置文件和configmap的一样
root@nginx-deployment-6b86dd48c8-hdw2x:/# cat /etc/nginx/conf.d/mysite.conf 
server {
     
   listen       80;
   server_name  www.mysite.com;
   index        index.html;

   location / {
     
       root /data/nginx/html;
       if (!-e $request_filename) {
     
           rewrite ^/(.*) /index.html last;
       }
   }
}

进行网站的验证

root@master1:~/yaml/20210925/case-yaml/case6# kubectl exec -it deploy/nginx-deployment -- bash
root@nginx-deployment-6b86dd48c8-hdw2x:/# cd /data/nginx/html/
root@nginx-deployment-6b86dd48c8-hdw2x:/data/nginx/html# echo "ceshi"> index.html

在这里插入图片描述

二、PV/和PVC的特性

2.1 PV的使用

PV作为存储资源,主要包括存储能力、访问模式、存储类型、回收策略、后端存储类型等关键信息的设置。是对底层网络存储的抽象,即将网咯存储资源,一个整体的存储资源拆分成多份后给不同的业务使用。

#nfs的话要先在对应的主机上面运行nfs-server,配置好
root@node1:/data/svc# mkdir /data/k8sdata/test/redis-datadir -p
root@node1:/data/svc# apt install -y nfs-kernel-server
root@node1:~# vim /etc/exports
/data/svc 10.0.0.0/24(rw,no_root_squash,sync)
root@node1:~# systemctl restart nfs-kernel-server.service
root@node1:/data/svc# systemctl enable nfs-kernel-server.service
Synchronizing state of nfs-kernel-server.service with SysV service script with /lib/systemd/systemd-sysv-install.
Executing: /lib/systemd/systemd-sysv-install enable nfs-kernel-server
#运行pv
root@master1:~/yaml# vim redis-pv.yaml
apiVersion: v1
kind: PersistentVolume 
metadata:
  name: redis-data-pv
  namespace: test
spec:
  capacity: #当前pv大小
    storage: 10Gi
  accessModes: #访问模式
  - ReadWriteOnce
  nfs:
    path: /data/k8sdata/test/redis-datadir 
    server: 10.0.0.111

2.2 PVC的使用

PVC作为用户对存储资源的需求申请,主要包括存储空间请求、访问模式、PV选择条件和存储类别等信息的设置。是对PV资源的申请调用,就像POD消费node节点一样,pod是通过pvc将数据保存至pv,pv再保存至存储中。

root@master1:~/yaml# vim redis-pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: redis-data-pvc
  namespace: test
spec:
  volumeName: redis-data-pv #要绑定的PV名称
  accessModes:
  - ReadWriteOnce
  resources:
    requests:
      storage: 10Gi
      
root@master1:~/yaml# kubectl apply -f redis-pvc.yaml 
#绑定之后状态会从 Available变成Bound
root@master1:~/yaml# kubectl get pv
NAME            CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM                 STORAGECLASS   REASON   AGE
redis-data-pv   10Gi       RWO            Retain           Bound       test/redis-data-pvc                           33m

三、k8s实战案例

3.1 k8s实战案例之zookeeper集群:

基于PV与PVC作为后端存储,实现zookeeper集群

3.1.1 下载jdk镜像

可以参考自:https://blog.csdn.net/weixin_43719988/article/details/120220768?spm=1001.2014.3001.5501
也可以

root@master1:~/yaml# docker pull elevy/slim_java:8
8: Pulling from elevy/slim_java
88286f41530e: Pull complete 
7141511c4dad: Pull complete 
fd529fe251b3: Pull complete 
Digest: sha256:044e42fb89cda51e83701349a9b79e8117300f4841511ed853f73caf7fc98a51
Status: Downloaded newer image for elevy/slim_java:8
docker.io/elevy/slim_java:8
root@master1:~/yaml# docker tag elevy/slim_java:8 10.0.0.104/baseimages/slim_java:8
root@master1:~/yaml# docker push 10.0.0.104/baseimages/slim_java:8
The push refers to repository [10.0.0.104/baseimages/slim_java]
e053edd72ca6: Pushed 
aba783efb1a4: Pushed 
5bef08742407: Pushed 
8: digest: sha256:817d0af5d4f16c29509b8397784f5d4ec3accb1bfde4e474244ed3be7f41a604 size: 952

3.1.2 构建zookeeper镜像

zookeeper镜像下载地址: http://archive.apache.org/dist/zookeeper/
这个还在测试中

3.2 使用PV/PVC/NFS自定义镜像运行nginx与tomcat

3.2.1制作nginx镜像

准备运行脚本
root@master1:~/scripts/Dockerfile/nginx# cat run.sh 
#!/bin/bash
service nginx start
准备配置文件
root@master1:~/scripts/Dockerfile/nginx# grep -v "#|^$" nginx.conf
worker_processes  1;
events {
     
    worker_connections  1024;
}
http {
     
    include       mime.types;
    default_type  application/octet-stream;
    sendfile        on;
    keepalive_timeout  65;
    
    upstream tomcat_server{
     
        server  tomcat-svc.test.local:80;
    }

    server {
     
        listen       80;
        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;
        location / {
     
            root   html;
            index  index.html index.htm;
            
        }
        location /webapp {
     
            root   html;
            index  index.html index.htm;
            
        } 
        location /myapp {
     
            proxy_pass http://10.0.0.101:8080;
            proxy_set_header   Host    $host;
            proxy_set_header   X_forwarded_For $proxy_add_x_forwarded_for;
            proxy_set_header   X_Real-IP $remote_addr
        }
        error_page   500 502 503 504  /50x.html;
        location = /50x.html {
     
            root   html;
        }
    }
}

#编写Dockerfile
tail -f /etc/hostsroot@master1:~/scripts/Dockerfile/nginx# cat Dockerfile 
FROM nginx:latest

COPY nginx.conf /usr/local/ngin/conf/
ADD run.sh /scripts/run.sh

EXPOSE 80 443

CMD [ "/scripts/run.sh" ]

#进行编译到harbor仓库
root@master1:~/scripts/Dockerfile/nginx# cat run-nginx.sh 
#!/bin/bash
docker build -t 10.0.0.104/baseimages/nginx-base:v1.8.0 .
sleep 1
docker push 10.0.0.104/baseimages/nginx-base:v1.8.0
root@master1:~/scripts/Dockerfile/nginx# bash run-nginx.sh 

3.2.2制作tomcat镜像

准备运行脚本
root@master1:~/scripts/Dockerfile/tomcat# cat run.sh 
#!/bin/bash
/usr/local/tomcat/bin/catalina.sh start
tail -f /etc/hosts
#编写Dockerfile
root@master1:~/scripts/Dockerfile/tomcat# cat Dockerfile 
FROM 10.0.0.104/baseimages/jdk-base:v17
ADD apache-tomcat-9.0.54.tar.gz /usr/local/src
RUN ln -sv /usr/local/src/apache-tomcat-9.0.54 /usr/local/tomcat && mkdir /usr/local/jdk/jre/bin -p && ln -sv /usr/local/jdk/bin/java /usr/local/jdk/jre/bin/java
ADD app.tar.gz /usr/local/tomcat/webapp/test
ADD run.sh /usr/local/tomcat/bin/run.sh

EXPOSE 8080 443

CMD [ "/usr/local/tomcat/bin/run.sh" ]

#进行编译到harbor仓库
root@master1:~/scripts/Dockerfile/tomcat# cat run-tomcat.sh 
#!/bin/bash
docker build -t 10.0.0.104/baseimages/tomcat-base:v9.0.54 .
sleep 1
docker push 10.0.0.104/baseimages/tomcat-base:v9.0.54
#进行编译
root@master1:~/scripts/Dockerfile/tomcat# bash run-tomcat.sh

3.2.3 使用k8s运行nginx

#运行nginx
root@master1:~/yaml/nginx-tomcat# cat nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name:  nginx
  namespace: test
  labels:
    app:  nginx
spec:
  selector:
    matchLabels:
      app: nginx
  replicas: 1
  template:
    metadata:
      labels:
        app:  nginx
    spec:
      # initContainers:
        # Init containers are exactly like regular containers, except:
          # - Init containers always run to completion.
          # - Each init container must complete successfully before the next one starts.
      containers:
      - name:  nginx
        image:  10.0.0.104/baseimages/nginx-base:v1.8.0
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
          limits:
            cpu: 100m
            memory: 100Mi
        ports:
        - containerPort:  80
          name:  nginx
        volumeMounts:
        - name: localtime
          mountPath: /etc/localtime
        - name: nginx-static
          mountPath: /usr/share/nginx/html/
          readOnly: false
      volumes:
        - name: localtime
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
        - name: nginx-static  #进行nfs挂载,挂载前要先在对应的nfs server上面进行nfs server的安装
          nfs:
            server: 10.0.0.111  
            path: /data/nginx/
      restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
  name: nginx-svc
  namespace: test
spec:
  selector:
    app: nginx
  type: NodePort
  ports:
  - name: nginx-svc
    protocol: TCP
    port: 85
    targetPort: 80
    # If you set the `spec.type` field to `NodePort` and you want a specific port number,
    # you can specify a value in the `spec.ports[*].nodePort` field.
    nodePort: 30010
root@master1:~/yaml/nginx-tomcat# kubectl apply -f nginx.yaml 

3.2.4 使用k8s运行通tomcat

root@master1:~/yaml/nginx-tomcat# cat tomcat.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name:  tomcat-deploy
  namespace: test
  labels:
    app:  tomcat-deploy
spec:
  selector:
    matchLabels:
      app: tomcat-deploy
  replicas: 1
  template:
    metadata:
      labels:
        app:  tomcat-deploy
    spec:
      containers:
      - name:  tomcat-deploy
        image:  10.0.0.104/baseimages/tomcat-base:v9.0.54
        ports:
        - containerPort:  8080
          name:  tomcat-svc
        volumeMounts:
        - name: localtime
          mountPath: /etc/localtime
        - name: tomcat-html
          mountPath: /usr/local/tomcat/webapp/
      volumes:
        - name: localtime
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
        - name: tomcat-html
          nfs:
            server: 10.0.0.111
            path: /data/tomcat
      restartPolicy: Always

---

apiVersion: v1
kind: Service
metadata:
  name: tomcat-svc
  namespace: test
spec:
  selector:
    app: tomcat-deploy
  type: NodePort
  ports:
  - name: tomcat-svc
    protocol: TCP
    port: 8081
    targetPort: 8080
    nodePort: 30004
root@master1:~/yaml/nginx-tomcat# kubectl apply -f tomcat.yaml    

五、k8s结合ceph实现数据的持久化和共享

5.1 rbd结合k8s提供存储卷及动态存储卷使用

让k8s中的pod可以访问ceph中的rbd提供的镜像作为存储设备,需要在ceph创建rbd并且让k8s node节点能够通过ceph认证

5.1.1 创建初始化rbd

#创建存储池
root@ceph-deploy:~# ceph osd pool create shijie-rbd-pool1 32 32
pool 'shijie-rbd-pool1' created
root@ceph-deploy:~# ceph osd pool ls
mypool
myrbd1
shijie-rbd-pool1

#存储池启用rbd
root@ceph-deploy:~# ceph osd pool application enable shijie-rbd-pool1 rbd
enabled application 'rbd' on pool 'shijie-rbd-pool1'

#初始化rbd
root@ceph-deploy:~# rbd pool init -p shijie-rbd-pool1

5.1.2 创建img镜像

root@ceph-deploy:~# rbd create shijie-img-img1 --size 3G --pool shijie-rbd-pool1 --image-format 2 --image-feature layering
root@ceph-deploy:~# rbd ls --pool shijie-rbd-pool1
shijie-img-img1
root@ceph-deploy:~# rbd --image shijie-img-img1 --pool shijie-rbd-pool1 info
rbd image 'shijie-img-img1':
	size 3GiB in 768 objects
	order 22 (4MiB objects)
	block_name_prefix: rbd_data.375c6b8b4567
	format: 2
	features: layering
	flags: 
	create_timestamp: Wed Oct 27 00:40:41 2021

5.1.3 安装ceph-common

在所有master和node都安装上ceph-common

#更新源
wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
sudo apt-add-repository 'deb https://mirrors.tuna.tsinghua.edu.cn/ceph/debian-pacific/ focal main'
$sudo apt update

root@master1:~# apt install ceph-common -y

5.1.4 创建ceph普通用户与授权

root@ceph-deploy:~# ceph auth get-or-create client.test-shijie mon 'allow r' osd 'allow * pool=shijie-rbd-pool1'
[client.test-shijie]
	key = AQAjCXlhxg6mHRAABELoPtsB0vrQipUGfRLO9g==
验证用户
root@ceph-deploy:~# ceph auth get client.test-shijie
exported keyring for client.test-shijie
[client.test-shijie]
	key = AQAjCXlhxg6mHRAABELoPtsB0vrQipUGfRLO9g==
	caps mon = "allow r"
	caps osd = "allow * pool=shijie-rbd-pool1"

导出用户信息到keyring文件
root@ceph-deploy:~/ceph-cluster# ceph auth get client.test-shijie -o ceph.client.test-shijie.keyring

把ceph.conf和keyring文件拷贝到master和node
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.101:/etc/ceph/
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.102:/etc/ceph/
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.103:/etc/ceph/
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.111:/etc/ceph/
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.112:/etc/ceph/
root@ceph-deploy:~/ceph-cluster# scp /etc/ceph/ceph.conf ceph.client.test-shijie.keyring root@10.0.0.113:/etc/ceph/

#进行验证用户
root@node1:~# ceph --user test-shijie -s
  cluster:
    id:     7e77062f-814b-4782-ba3d-df00c48eafe6
    health: HEALTH_WARN
            application not enabled on 1 pool(s)
 
  services:
    mon: 3 daemons, quorum ceph-node1,ceph-node2,ceph-node3
    mgr: ceph-node1(active), standbys: ceph-node2
    osd: 9 osds: 9 up, 9 in
 
  data:
    pools:   3 pools, 128 pgs
    objects: 11 objects, 670KiB
    usage:   9.07GiB used, 440GiB / 449GiB avail
    pgs:     128 active+clean

#验证镜像的访问权限
root@node1:~# rbd --id test-shijie ls --pool=shijie-rbd-pool1
shijie-img-img1

5.1.5 k8s节点配置主机名解析

master和node

root@master1:~# vim /etc/hosts
10.0.0.91 ceph-deploy.example.local ceph-deploy
10.0.0.92 ceph-node1.example.local ceph-node1
10.0.0.93 ceph-node2.example.local ceph-node2
10.0.0.94 ceph-node3.example.local ceph-node3

5.1.6 通过keyring文件挂载rbd

基于ceph提供的rbd实现存储卷的动态提供,由两种实现方式,一是通过宿主机的keyring文件挂载rbd,另外一种是通过将keyring中的key定义为k8s中的secret,然后pod通过srcret挂载rbd

root@master1:~/yaml/20211010/ceph-case# cat case2-nginx-keyring.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '10.0.0.92:6789'
            - '10.0.0.93:6789'
            - '10.0.0.94:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: test-shijie
            keyring: /etc/ceph/ceph.client.test-shijie.keyring



root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case2-nginx-keyring.yaml

#进行验证挂载
root@master1:~/yaml/20211010/ceph-case# kubectl exec -it deploy/nginx-deployment sh
# df -h|grep rbd
/dev/rbd0                          2.9G  9.1M  2.9G   1% /data
#  echo "123456" > /data/test.txt

5.1.7 通过secret挂载rbd

#准备好secret
root@master1:~# cat /etc/ceph/ceph.client.test-shijie.keyring 
[client.test-shijie]
        key = AQAjCXlhxg6mHRAABELoPtsB0vrQipUGfRLO9g==  #要使用到
        caps mon = "allow r"
        caps osd = "allow * pool=shijie-rbd-pool1"

#进行base64进行加密获取key
root@master1:~# echo AQAjCXlhxg6mHRAABELoPtsB0vrQipUGfRLO9g== |base64

#准备secret的配置文件生成secretRef
root@master1:~/yaml/20211010/ceph-case# cat case3-secret-client-shijie.yaml
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-test-shijie
type: "kubernetes.io/rbd"
data:
  key: QVFBakNYbGh4ZzZtSFJBQUJFTG9QdHNCMHZyUWlwVUdmUkxPOWc9PQo= #就是刚才生成的加密后

#生成secretRef
root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case3-secret-client-shijie.yaml

准备好deploy的配置文件
root@master1:~/yaml/20211010/ceph-case# cat case4-nginx-secret.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: rbd-data1
          mountPath: /data
      volumes:
        - name: rbd-data1
          rbd:
            monitors:
            - '10.0.0.92:6789'
            - '10.0.0.93:6789'
            - '10.0.0.94:6789'
            pool: shijie-rbd-pool1
            image: shijie-img-img1
            fsType: ext4
            readOnly: false
            user: test-shijie
            secretRef:
              name: ceph-secret-test-shijie   #生成的秘钥文件

#进行验证
root@master1:~/yaml/20211010/ceph-case# kubectl get pod 
NAME                                READY   STATUS    RESTARTS   AGE
net-test1                           1/1     Running   4          22d
net-test3                           1/1     Running   4          22d
nginx-deployment-6f874c88dc-mmlsf   1/1     Running   0          6m11s
# df -h|grep rbd
/dev/rbd0                          2.9G  9.1M  2.9G   1% /data
# ls /data
lost+found  test.txt

5.1.8 通过StorageClass挂载rbd

获取admin的key
root@ceph-deploy:~/ceph-cluster# cat ceph.client.admin.keyring 
[client.admin]
	key = AQDkindhXMU6HxAA2wOghbO8vNRjhF5Z2ZM4Yg==

#进行base64加密
root@ceph-deploy:~/ceph-cluster# echo AQDkindhXMU6HxAA2wOghbO8vNRjhF5Z2ZM4Yg== | base64
QVFEa2luZGhYTVU2SHhBQTJ3T2doYk84dk5SamhGNVoyWk00WWc9PQo=

#进行生成admin的Secret
root@master1:~/yaml/20211010/ceph-case# cat case5-secret-admin.yaml 
apiVersion: v1
kind: Secret
metadata:
  name: ceph-secret-admin
type: "kubernetes.io/rbd"
data:
  key: QVFEa2luZGhYTVU2SHhBQTJ3T2doYk84dk5SamhGNVoyWk00WWc9PQo=  #base64生成的key

root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case5-secret-admin.yaml 
secret/ceph-secret-admin created

#进行验证
root@master1:~/yaml/20211010/ceph-case# kubectl get secret
NAME                  TYPE                                  DATA   AGE
ceph-secret-admin     kubernetes.io/rbd                     1      31s
default-token-bbngc   kubernetes.io/service-account-token   3      22d

#创建一个普通用户
root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case3-secret-client-shijie.yaml 
secret/ceph-secret-test-shijie created
root@master1:~/yaml/20211010/ceph-case# kubectl get secret
NAME                      TYPE                                  DATA   AGE
ceph-secret-admin         kubernetes.io/rbd                     1      9m28s
ceph-secret-test-shijie   kubernetes.io/rbd                     1      21s
default-token-bbngc       kubernetes.io/service-account-token   3      22d

#创建存储类
root@master1:~/yaml/20211010/ceph-case# cat case6-ceph-storage-class.yaml 
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: ceph-storage-class-shijie
  annotations:
    storageclass.kubernetes.io/is-default-class: "true" #设置为默认存储类
provisioner: kubernetes.io/rbd
parameters:
  monitors: 10.0.0.92:6789,10.0.0.93:6789,10.0.0.94:6789
  adminId: admin
  adminSecretName: ceph-secret-admin
  adminSecretNamespace: default 
  pool: shijie-rbd-pool1
  userId: test-shijie
  userSecretName: ceph-secret-test-shijie 
root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case6-ceph-storage-class.yaml 
storageclass.storage.k8s.io/ceph-storage-class-shijie created

#进行验证
root@master1:~/yaml/20211010/ceph-case# kubectl get storageclass
NAME                                  PROVISIONER         RECLAIMPOLICY   VOLUMEBINDINGMODE   ALLOWVOLUMEEXPANSION   AGE
ceph-storage-class-shijie (default)   kubernetes.io/rbd   Delete          Immediate           false                  4s

#调用存储类,创建pvc
root@master1:~/yaml/20211010/ceph-case# cat case7-mysql-pvc.yaml 
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-data-pvc
spec:
  accessModes:
    - ReadWriteOnce
  storageClassName: ceph-storage-class-shijie 
  resources:
    requests:
      storage: '5Gi'

root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case7-mysql-pvc.yaml 
persistentvolumeclaim/mysql-data-pvc created

#验证查看
root@master1:~/yaml/20211010/ceph-case# kubectl get pvc
NAME             STATUS    VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS                AGE
mysql-data-pvc   Bound     pvc-7704d1ac-d43c-4391-b4b6-25da14cb7d1b   5Gi        RWO            ceph-storage-class-shijie   3m31s


#进行启动mysql服务
root@master1:~/yaml/20211010/ceph-case# cat case8-mysql-single.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: 10.0.0.104/baseimages/mysql:5.6.46 
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: test123456
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-data-pvc 


---
kind: Service
apiVersion: v1
metadata:
  labels:
    app: mysql-service-label 
  name: mysql-service
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    protocol: TCP
    targetPort: 3306
    nodePort: 43306
  selector:
    app: mysql

root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case8-mysql-single.yaml 
deployment.apps/mysql created
service/mysql-service unchanged

#进行验证
root@master1:~/yaml/20211010/ceph-case# kubectl exec -it  deploy/mysql bash
root@mysql-55f8f7d588-jzm5m:/# df -h
Filesystem                         Size  Used Avail Use% Mounted on
overlay                             19G  7.7G   11G  44% /
tmpfs                               64M     0   64M   0% /dev
tmpfs                              980M     0  980M   0% /sys/fs/cgroup
/dev/mapper/ubuntu--vg-ubuntu--lv   19G  7.7G   11G  44% /etc/hosts
shm                                 64M     0   64M   0% /dev/shm
/dev/rbd0                          4.9G  131M  4.8G   3% /var/lib/mysql   #有的话证明成功
tmpfs                              980M   12K  980M   1% /run/secrets/kubernetes.io/serviceaccount
tmpfs                              980M     0  980M   0% /proc/acpi
tmpfs                              980M     0  980M   0% /proc/scsi
tmpfs                              980M     0  980M   0% /sys/firmware

5.1.9 多节点挂载cephfs

要确保node下面有ceph admin的认证文件
root@ceph-deploy:~/ceph-cluster# scp ceph.client.admin.keyring 10.0.0.111:/etc/ceph/
root@10.0.0.111's password: 
ceph.client.admin.keyring                                                                                                             100%   63    26.0KB/s   00:00    
root@ceph-deploy:~/ceph-cluster# scp ceph.client.admin.keyring 10.0.0.112:/etc/ceph/
root@10.0.0.112's password: 
ceph.client.admin.keyring                                                                                                             100%   63    59.1KB/s   00:00    
root@ceph-deploy:~/ceph-cluster# scp ceph.client.admin.keyring 10.0.0.113:/etc/ceph/
root@10.0.0.113's password: 
ceph.client.admin.keyring  

root@master1:~/yaml/20211010/ceph-case# cat case9-nginx-cephfs.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 3
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx
        ports:
        - containerPort: 80

        volumeMounts:
        - name: test-staticdata-cephfs 
          mountPath: /usr/share/nginx/html/ 
      volumes:
        - name: test-staticdata-cephfs
          cephfs:
            monitors:
            - '10.0.0.92:6789'
            - '10.0.0.93:6789'
            - '10.0.0.94:6789'
            path: /
            user: admin
            secretRef:
              name: ceph-secret-admin

root@master1:~/yaml/20211010/ceph-case# kubectl apply -f case9-nginx-cephfs.yaml 
deployment.apps/nginx-deployment created

#进行验证
root@master1:~/yaml/20211010/ceph-case# kubectl exec -it nginx-deployment-5bf64679b6-2v8tv bash
root@nginx-deployment-5bf64679b6-2v8tv:/# df -h
Filesystem                                      Size  Used Avail Use% Mounted on
10.0.0.92:6789,10.0.0.93:6789,10.0.0.94:6789:/  140G     0  140G   0% /usr/share/nginx/html

六、Pod状态和探针

6.1 Pod 常见的状态

  • Pending:挂起,我们在请求创建pod时,条件不满足,调度没有完成,没有任何一个节点能满足调度条件。已经创建了但是没有适合它运行的节点叫做挂起,这其中也包含集群为容器创建网络,或者下载镜像的过程。

  • Running:Pod内所有的容器都已经被创建,且至少一个容器正在处于运行状态、正在启动状态或者重启状态。

  • Succeeded:Pod中所以容器都执行成功后退出,并且没有处于重启的容器。

  • Failed:Pod中所以容器都已退出,但是至少还有一个容器退出时为失败状态。

  • Unknown:未知状态,所谓pod是什么状态是apiserver和运行在pod节点的kubelet进行通信获取状态信息的,如果节点之上的kubelet本身出故障,那么apiserver就连不上kubelet,得不到信息了,就会看Unknown

探针是由 kubelet 对容器执行的定期诊断。要执行诊断,kubelet 调用由容器实现的 Handler。有三种类型的处理程序:

  • ExecAction:在容器内执行指定命令。如果命令退出时返回码为 0 则认为诊断成功。
  • TCPSocketAction:对指定端口上的容器的 IP 地址进行 TCP 检查。如果端口打开,则诊断被认为是成功的。
  • HTTPGetAction:对指定的端口和路径上的容器的 IP 地址执行 HTTP Get 请求。如果响应的状态码大于等于200 且小于 400,则诊断被认为是成功的

6.2 每次探测都将获得以下三种结果之一:

成功:容器通过了诊断。
失败:容器未通过诊断。
未知:诊断失败,因此不会采取任何行动

6.3 Pod探针

6.3.1 Pod 两种探针 livenessProbe和readinessProbe

  • livenessProbe(存活探针):指示容器是否正在运行。如果存活探测失败,则 kubelet 会杀死容器,并且容器将受到其 重启策略 的影响。如果容器不提供存活探针,则默认状态为 Success ,livenessProbe用于控制是否重启pod
  • readinessProbe(就绪探针):指示容器是否准备好服务请求。如果就绪探测失败,端点控制器将从与 Pod 匹配的所有 Service 的端点中删除该 Pod 的 IP 地址。初始延迟之前的就绪状态默认为 Failure。如果容器不提供就绪探针,则默认状态为 Success , readinessProbe用于控制pod是否添加到service

6.3.2 HTTP探针

用HTTP的OPTIONS:这个方法极少使用。它用于获取当前URL所支持的方法。若请求成功,则它会在HTTP头中包含一个名为“Allow”的头,其中的值是所支持的方法,如“GET, POST”。这样就能够检测出服务是否支持该方法继而检测存活

HTTP探针可以在 httpGet 上配置额外的字段:
host:连接使用的主机名,默认是 Pod 的 IP。也可以在 HTTP 头中设置 “Host” 来代替。
scheme:用于设置连接主机的方式(HTTP 还是 HTTPS)。默认是 HTTP。
path:访问 HTTP 服务的路径。
httpHeaders:请求中自定义的 HTTP 头。HTTP 头字段允许重复。
port:访问容器的端口号或者端口名。如果数字必须在 1 ~ 65535 之间。

案例:

root@master1:~/yaml/20211010/n56-yaml-20211010# cat nginx.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
    #matchExpressions:
    #  - {
     key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.17.5 
        ports:
        - containerPort: 80
        #readinessProbe:
        livenessProbe:
          httpGet:
            #path: /monitor/monitor.html
            path: /index.html
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3


---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80 
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 40012
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80


root@master1:~/yaml/20211010/n56-yaml-20211010# kubectl apply -f nginx.yaml 
deployment.apps/nginx-deployment created
service/ng-deploy-80 created

6.3.3 TCP探针

如果仅需要检查是否可以建立 TCP 连接,则可以指定 TCP 探针。如果建立 TCP 连接,则将 Pod 标记为运行状况良好。对于不适合使用 HTTP 探针的 gRPC 或 FTP 服务器,TCP 探针可能会有用。

root@master1:~/yaml/20211010/n56-yaml-20211010# cat tcp.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: ng-deploy-80
    #matchExpressions:
    #  - {
     key: app, operator: In, values: [ng-deploy-80,ng-rs-81]}
  template:
    metadata:
      labels:
        app: ng-deploy-80
    spec:
      containers:
      - name: ng-deploy-80
        image: nginx:1.17.5 
        ports:
        - containerPort: 80
        livenessProbe:
          tcpSocket:
            port: 80
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3

      
---
apiVersion: v1
kind: Service
metadata:
  name: ng-deploy-80 
spec:
  ports:
  - name: http
    port: 81
    targetPort: 80
    nodePort: 40012
    protocol: TCP
  type: NodePort
  selector:
    app: ng-deploy-80
root@master1:~/yaml/20211010/n56-yaml-20211010# kubectl apply -f tcp.yaml 
deployment.apps/nginx-deployment configured
service/ng-deploy-80 unchanged

6.3.2 ExecAction探针

可以基于指定命令对pod进行特定的状态检查

root@master1:~/yaml/20211010/n56-yaml-20211010# cat redis.yaml 
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis-deployment
spec:
  replicas: 1
  selector:
    matchLabels: #rs or deployment
      app: redis-deploy-6379
    #matchExpressions:
    #  - {
     key: app, operator: In, values: [redis-deploy-6379,ng-rs-81]}
  template:
    metadata:
      labels:
        app: redis-deploy-6379
    spec:
      containers:
      - name: redis-deploy-6379
        image: redis
        ports:
        - containerPort: 6379
        readinessProbe:
          exec:
            command:
            - /usr/local/bin/redis-cli
            - quit
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3

        livenessProbe:
          exec:
            command:
            - /usr/local/bin/redis-cli
            - quit
          initialDelaySeconds: 5
          periodSeconds: 3
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 3
      
---
apiVersion: v1
kind: Service
metadata:
  name: redis-deploy-6379 
spec:
  ports:
  - name: http
    port: 6379
    targetPort: 6379
    nodePort: 40016
    protocol: TCP
  type: NodePort
  selector:
    app: redis-deploy-6379

root@master1:~/yaml/20211010/n56-yaml-20211010# kubectl apply -f redis.yaml 
deployment.apps/redis-deployment created
service/redis-deploy-6379 created

你可能感兴趣的:(k8s,ceph,kubernetes,nginx,docker)