Linux学习-kubernetes之资源清单定义

资源:对象
  • workload:Pod、ReplicaSet、Deployment、StatefulSet、DaemonSet、Job、Cronjob,…
  • 服务发现及均衡:Service、Ingress
  • 配置及存储:Volume、CSI
    • ConfigMap、Secret
    • DownwardAPI
  • 集群级资源
    • NameSpace、Node、Role、ClusterRole、RoleBinding、ClusterRoleBinding
  • 元数据型资源
    • HPA、PodTemplate、LimitRange
#获取apiServer所支持的apiversion
[root@master ~]# kubectl api-versions
admissionregistration.k8s.io/v1
apiextensions.k8s.io/v1
apiregistration.k8s.io/v1
apps/v1
authentication.k8s.io/v1
authorization.k8s.io/v1
autoscaling/v1
autoscaling/v2beta1
autoscaling/v2beta2
batch/v1
batch/v1beta1
certificates.k8s.io/v1
coordination.k8s.io/v1
discovery.k8s.io/v1
discovery.k8s.io/v1beta1
events.k8s.io/v1
events.k8s.io/v1beta1
flowcontrol.apiserver.k8s.io/v1beta1
networking.k8s.io/v1
node.k8s.io/v1
node.k8s.io/v1beta1
policy/v1
policy/v1beta1
rbac.authorization.k8s.io/v1
scheduling.k8s.io/v1
storage.k8s.io/v1
storage.k8s.io/v1beta1
v1
[root@master ~]# kubectl get pods
NAME                            READY   STATUS             RESTARTS         AGE
client                          0/1     Error              0                23h
client-7c75c79d7c-h9994         0/1     CrashLoopBackOff   19 (2m14s ago)   23h
nginx                           1/1     Running            2 (9m13s ago)    37h
nginx-deploy-7c948bcff4-jclqg   1/1     Running            1 (9m11s ago)    24h
nginx-deploy-7c948bcff4-mczxp   1/1     Running            1 (9m13s ago)    24h
#输出为yaml格式
[root@master ~]# kubectl get pods nginx -o yaml
#版本号,group/版本号,默认group为核心组
apiVersion: v1
#kind表示使用何种资源
kind: Pod
#元数据
metadata:
  creationTimestamp: "2021-11-07T00:16:21Z"
  labels:
    run: nginx
  name: nginx
  namespace: default
  resourceVersion: "17833"
  uid: 8f1cd26f-b981-428f-a156-a92b7dee2cfd
#特性,用于定义目标资源所期望的目标状态(用户定义)
spec:
  containers:
  #容器使用的镜像
  - image: nginx:1.14
    imagePullPolicy: IfNotPresent
    #容器名称
    name: nginx
    ports:
    - containerPort: 80
      protocol: TCP
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-85lz5
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: node2
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: default
  serviceAccountName: default
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-85lz5
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
#当前资源的当前状态(只读)
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2021-11-07T00:16:21Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2021-11-08T14:01:53Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2021-11-08T14:01:53Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2021-11-07T00:16:21Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: docker://ef084460d6dc3183abd768dc89dbbbb0d9a0fab5809220d02f97e9230819ddff
    image: nginx:1.14
    imageID: docker-pullable://nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d
    lastState:
      terminated:
        containerID: docker://05be395a286ac086568ec8a80a8ce017c8165a46e0e27e2f7be1e302e1c5c2d2
        exitCode: 255
        finishedAt: "2021-11-08T14:01:16Z"
        reason: Error
        startedAt: "2021-11-07T13:18:48Z"
    name: nginx
    ready: true
    restartCount: 2
    started: true
    state:
      running:
        startedAt: "2021-11-08T14:01:52Z"
  hostIP: 192.168.88.103
  phase: Running
  podIP: 10.244.1.6
  podIPs:
  - ip: 10.244.1.6
  qosClass: BestEffort
  startTime: "2021-11-07T00:16:21Z"

#kubectl proxy命令就可以使API server监听在本地的8001端口上
[root@master ~]# kubectl proxy
Starting to serve on 127.0.0.1:8001
#通过curl访问本地资源,会返回json格式配置信息
[root@master ~]# curl http://localhost:8001/api/v1/namespaces/default/pods/myapp-7b595df7fc-9d92c
创建资源的方法:
  • apiServer仅接收JSON格式的资源定义
  • yaml格式提供配置清单,apiserver自动转化为JSON格式,然后提交
生成资源清单方法
#创建资源时生成yaml,--dry-run即空运行,-o yaml,生成yaml文件
[root@k8s-master ~]# kubectl create deployment web --image=nginx -o yaml --dry-run=client -o yaml
#通过已经创建的资源获得yaml文件
[root@k8s-master ~]# kubectl get deployment nginx -o yaml  >my2.yaml
资源的配置清单
  • apiVersion[group/version]
  • kind:资源类别
  • metadata:元数据
    • name:必须是唯一的(同一类别下)
    • namespace:名称空间
    • labels:标签
    • annotation:注解
    • 每个资源的引用PATH
      • /api/GROUP/version/namespaces/NAMESPACE/TYPE/NAME
    • spec:期望的状态,disired state
    • status:当前状态(current state)本字段由kubernetes集群维护
#通过explain来查看pods资源如何定义
[root@master ~]# kubectl explain pods
KIND:     Pod
VERSION:  v1

DESCRIPTION:
     Pod is a collection of containers that can run on a host. This resource is
     created by clients and scheduled onto hosts.
FIELDS:
   ... ...
   metadata	<Object>
     Standard object's metadata. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
   ... ...
#注表示有嵌套二级对象,表示为多个key=value键值对,<[]Object>表示多个对象组成的组
#查看资源二级定义
[root@master ~]# kubectl explain pods.metadata
KIND:     Pod
VERSION:  v1

RESOURCE: metadata 

DESCRIPTION:
     Standard object's metadata. More info:
     https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata

     ObjectMeta is metadata that all persisted resources must have, which
     includes all objects users must create.

FIELDS:
   annotations	<map[string]string>
     Annotations is an unstructured key value map stored with a resource that
     may be set by external tools to store and retrieve arbitrary metadata. They
     are not queryable and should be preserved when modifying objects. More
     info: http://kubernetes.io/docs/user-guide/annotations

通过yaml创建pod
#yaml文件--注:yaml文件书写时,参数后面记得有空格
#通过kubectl explain查看配置属性,如有列表数据([]objects)书写yaml文件时可以使用[],对象数据使用{}
#通过kubectl explain查看配置属性,参数后面加【-required-】表示此参数为必须项
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: nginx-new
    image: nginx:1.14
  - name: busybox
    image: busybox:latest
    command:
    - "/bin/sh"
    - "-c"
    - "sleep 3600"
#通过yaml创建pod
[root@master manifests]# kubectl create -f nginx.yaml
pod/pod-demo created
[root@master manifests]# kubectl get pods
NAME                            READY   STATUS      RESTARTS         AGE
client                          0/1     Error       0                24h
client-7c75c79d7c-h9994         0/1     Completed   32 (5m25s ago)   24h
nginx                           1/1     Running     2 (73m ago)      38h
nginx-deploy-7c948bcff4-jclqg   1/1     Running     1 (73m ago)      25h
nginx-deploy-7c948bcff4-mczxp   1/1     Running     1 (73m ago)      25h
pod-demo                        2/2     Running     0                8s
#删除pod第一种方法
[root@master manifests]# kubectl delete -f nginx.yaml 
pod "pod-demo" deleted
#删除pod第二种方法
[root@master manifests]# kubectl delete pod-demo
#查看日志
[root@master manifests]# kubectl logs pod-demo busybox
/bin/sh: can't create /usr/share/nginx/html/index.html: nonexistent directory
#-l 或 --selector 标签过滤,获取对应标签的资源列表
[root@master ~]# kubectl get pods --selector app=nginx-deploy
NAME                            READY   STATUS    RESTARTS      AGE
nginx-deploy-7c948bcff4-jclqg   1/1     Running   2 (46m ago)   2d
nginx-deploy-7c948bcff4-mczxp   1/1     Running   2 (46m ago)   2d
#-L显示多个标签的值
[root@master ~]# kubectl get pods -L app,run
NAME                            READY   STATUS             RESTARTS       AGE     APP            RUN
client                          0/1     Error              0              47h                    client
client-7c75c79d7c-h9994         0/1     CrashLoopBackOff   47 (20s ago)   47h     client         
nginx                           1/1     Running            3 (48m ago)    2d14h                  nginx
nginx-deploy-7c948bcff4-jclqg   1/1     Running            2 (48m ago)    2d      nginx-deploy   
nginx-deploy-7c948bcff4-mczxp   1/1     Running            2 (48m ago)    2d      nginx-deploy 
#kubectl label给指定的资源打标签
[root@master ~]# kubectl label pods nginx release=tye
pod/nginx labeled
# 查看标签设置情况
[root@master ~]# kubectl get pods -l release --show-labels
NAME    READY   STATUS    RESTARTS      AGE     LABELS
nginx   1/1     Running   3 (52m ago)   2d14h   release=tye,run=nginx
#如果设置标签时,标签已经存在值,需要使用--overwrite参数,否则不能进行修改
[root@master ~]# kubectl label pods nginx release=edison
error: 'release' already has a value (tye), and --overwrite is false
[root@master ~]# kubectl label pods nginx release=edison --overwrite
pod/nginx labeled
[root@master ~]# kubectl get pods -l release --show-labels
NAME    READY   STATUS    RESTARTS      AGE     LABELS
nginx   1/1     Running   3 (54m ago)   2d14h   release=edison,run=nginx
[root@master ~]# kubectl label pods nginx-deploy-7c948bcff4-jclqg release=tye
pod/nginx-deploy-7c948bcff4-jclqg labeled
[root@master ~]# kubectl get pods -l release
NAME                            READY   STATUS    RESTARTS      AGE
nginx                           1/1     Running   3 (61m ago)   2d14h
nginx-deploy-7c948bcff4-jclqg   1/1     Running   2 (61m ago)   2d
[root@master ~]# kubectl get pods -l release --show-labels
NAME                            READY   STATUS    RESTARTS      AGE     LABELS
nginx                           1/1     Running   3 (61m ago)   2d14h   release=edison,run=nginx
nginx-deploy-7c948bcff4-jclqg   1/1     Running   2 (61m ago)   2d      app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye
[root@master ~]# kubectl get pods -l release,app --show-labels
NAME                            READY   STATUS    RESTARTS      AGE   LABELS
nginx-deploy-7c948bcff4-jclqg   1/1     Running   2 (62m ago)   2d    app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye
[root@master ~]# kubectl get pods -l release=edison,run=nginx --show-labels
NAME    READY   STATUS    RESTARTS      AGE     LABELS
nginx   1/1     Running   3 (62m ago)   2d14h   release=edison,run=nginx
[root@master ~]# kubectl get pods -l release!=edison --show-labels
NAME                            READY   STATUS             RESTARTS         AGE   LABELS
client                          0/1     Error              0                47h   run=client
client-7c75c79d7c-h9994         0/1     CrashLoopBackOff   49 (4m54s ago)   47h   app=client,pod-template-hash=7c75c79d7c
nginx-deploy-7c948bcff4-jclqg   1/1     Running            2 (63m ago)      2d    app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye
nginx-deploy-7c948bcff4-mczxp   1/1     Running            2 (63m ago)      2d    app=nginx-deploy,pod-template-hash=7c948bcff4
#查找标签release为edison或tye的pod
[root@master ~]# kubectl get pods -l " release in (edison,tye)" --show-labels
NAME                            READY   STATUS    RESTARTS      AGE     LABELS
nginx                           1/1     Running   3 (76m ago)   2d14h   release=edison,run=nginx
nginx-deploy-7c948bcff4-jclqg   1/1     Running   2 (76m ago)   2d      app=nginx-deploy,pod-template-hash=7c948bcff4,release=tye
#查找标签release非edison和tye的pod
[root@master ~]# kubectl get pods -l " release notin (edison,tye)" --show-labels
NAME                            READY   STATUS             RESTARTS        AGE   LABELS
client                          0/1     Error              0               2d    run=client
client-7c75c79d7c-h9994         0/1     CrashLoopBackOff   52 (4m5s ago)   2d    app=client,pod-template-hash=7c75c79d7c
nginx-deploy-7c948bcff4-mczxp   1/1     Running            2 (77m ago)     2d    app=nginx-deploy,pod-template-hash=7c948bcff4,release=dizzy
#查看node列表并显示标签
[root@master ~]# kubectl get node --show-labels
NAME     STATUS   ROLES                  AGE     VERSION   LABELS
master   Ready    control-plane,master   2d23h   v1.22.3   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master,kubernetes.io/os=linux,node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.kubernetes.io/exclude-from-external-load-balancers=
node1    Ready    <none>                 2d23h   v1.22.3   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node1,kubernetes.io/os=linux
node2    Ready    <none>                 2d23h   v1.22.3   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=node2,kubernetes.io/os=linux

资源配置清单
自主式Pod资源

Pod资源:
spec.containers <[]Object>
- name
image
imagePullPolicy
Always:总是下载,不管本地是否已存在镜像,如果镜像标签【:latest】,默认使用此项
Never:总是不下载,需要用户手动下载镜像
IfNotPresent:如果镜像不使用latest,则默认使用此项,如果本地存在则不下载,不存在则从registry下载。
command:
labels:
key = value
key:字母、数字、_、-、.
value:可以为空,字母,数字开头及结尾,中间可使用字母,数字,下划线
标签选择器:
等值关系:=,==,!=
集合关系:
key in (value1,value2…)
key not in (value1,value2…)
key
!key不存在此键
许多资源支持内嵌字段定义标签选择器
matchLabels:直接给定键值
matchExpressions:基于给定的表达式来定义使用标签选择器,{key:“key”,operator:“operator”,values:[value1,value2…]}
操作符:
in,notin:values字段必须为非空列表
Exists,NotExists:values字段的值必须为空列表


[root@master ~]# kubectl explain pods.spec | grep nodeSelector
   nodeSelector	<map[string]string>

节点标签选择器:资源运行于指定的node节点上
nodeSelector
NodeSelector is a selector which must be true for the pod to fit on a node.
Selector which must match a node’s labels for the pod to be scheduled on
that node. More info:
https://kubernetes.io/docs/concepts/configuration/assign-pod-node/

#先给node1节点打上标签
[root@master yaml]# kubectl label nodes node1 disktype=ssd
node/node1 labeled
#修改yaml文件,添加nodeSelector属性
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  - name: busybox
    image: busybox:latest
    command: ["/bin/sh","-c","sleep 3600"]
  nodeSelector:
    disktype: ssd
#删除原有名称pod-demo的pod,并重新创建
[root@master yaml]# kubectl delete pods pod-demo
pod "pod-demo" deleted
[root@master yaml]# kubectl apply -f nginx.yaml 
pod/pod-demo created
#查看pod-demo的运行情况,Node选择了node1
[root@master yaml]# kubectl get pods -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
client                          1/1     Running   4          47h     10.244.2.20   node1   <none>           <none>
myapp-7b595df7fc-8rchf          1/1     Running   4          47h     10.244.2.19   node1   <none>           <none>
myapp-7b595df7fc-9d92c          1/1     Running   4          47h     10.244.1.21   node2   <none>           <none>
nginx-deploy-6895496777-4d4nd   1/1     Running   5          2d13h   10.244.1.20   node2   <none>           <none>
pod-demo                        2/2     Running   0          30s     10.244.2.21   node1   <none>           <none>
#----------------------------
指定node运行资源
nodeName	<string>
     NodeName is a request to schedule this pod onto a specific node. If it is
     non-empty, the scheduler simply schedules this pod onto that node, assuming
     that it fits resource requirements.
annotations:
  与label不同的地方,它不能用于挑选资源对象,仅用于为对象提供“元数据”
#----------------------------
#在yaml文件的metadata中添加以下行
  annotations:
    tye.com/createby: cluster admin
[root@master manifests]# kubectl describe pods pod-demo
Name:         pod-demo
Namespace:    default
Priority:     0
Node:         node2/192.168.88.103
Start Time:   Tue, 09 Nov 2021 10:07:07 -0500
Labels:       app=myapp
              tier=frontend
Annotations:  tye.com/createby: cluster admin
Pod的生命周期:
状态:
  • Pending:调试尚未完成
  • Running:运行状态
  • Failed:失败
  • Successed:成功
  • Unknown:未知
    注:主节点apiServer与node节点上的kubelet通信获取Pod信息
    创建Pod:先将创建pod的指令提交至apiServer,apiServer将请求的目标状态存储在etcd中,apiServer请求调度器(Scheduler)调度资源,调度成功后,apiServer将调度信息存储至etcd的Pod资源状态中,node节点获取调度信息后,node节点上的kubelet通过master节点apiserver获取用户提交的用户清单,根据清单创建Pod,创建后将状态(成功或失败)通知至master节点的apiServer,master节点的apiServer再次更新etcd状态。
Pod生命周期重要行为:
  • 初使化容器
  • 容器探测
    • liveness【存活性探测】:判断主容器是否处于存活状态
      • failureThreshold :
      • exec :command探针
      • httpGet :http探针
      • tcpSocket :tcp探针
      • initialDelaySeconds :延迟探测时间,即在容器启动多少秒后再进行存活性探测,默认容器一启动即进行探测
      • periodSeconds :探针的操作频率(默认为10秒,最小1秒)
      • timeoutSeconds :探针的超时时间,默认1秒
    • readiness【就绪性探测】:判断容器主进程是否已经就绪,可以对外提供服务
    • 容器启动后钩子(poststart)、容器结束前钩子(prestop)
RestartPolicy:重启策略
  • Always:自动重启
  • OnFailure:只有状态为错误时重启
  • Never:不重启
  • Default to Always:
    https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy
探针类型
  • ExecAction
  • TCPSocketAction
  • HTTPGetAction
#Exec探针yaml文件,判断目录是否存在
apiVersion: v1
kind: Pod
metadata:
  name: liveness-exec-pod
  namespace: default
spec:
  containers:
  - name: liveness-exec-container
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["/bin/sh","-c","touch /tmp/healty;sleep 30;rm -rf /tmp/healty;sleep 3600"]
    livenessProbe:
      exec:
        command: ["test","-e","/tmp/healty"]
      initialDelaySeconds: 2
      periodSeconds: 3
#查看pod信息
[root@master manifests]# kubectl describe pod liveness-exec-pod
Name:         liveness-exec-pod
Namespace:    default
Priority:     0
Node:         node1/192.168.88.102
Start Time:   Wed, 10 Nov 2021 08:51:30 -0500
Labels:       <none>
Annotations:  <none>
Status:       Running
IP:           10.244.3.14
IPs:
  IP:  10.244.3.14
Containers:
  liveness-exec-container:
    Container ID:  docker://09cfbe11120e3ff96c0cacfc5f83569a599dcfad1486581ce3215f3bc36d6343
    Image:         busybox:latest
    Image ID:      docker-pullable://busybox@sha256:139abcf41943b8bcd4bc5c42ee71ddc9402c7ad69ad9e177b0a9bc4541f14924
    Port:          <none>
    Host Port:     <none>
    Command:
      /bin/sh
      -c
      touch /tmp/healty;sleep 30;rm -rf /tmp/healty;sleep 3600
    State:          Running
      Started:      Wed, 10 Nov 2021 08:53:48 -0500
    Last State:     Terminated
      Reason:       Error
      Exit Code:    137
      Started:      Wed, 10 Nov 2021 08:52:39 -0500
      Finished:     Wed, 10 Nov 2021 08:53:48 -0500
    Ready:          True
    Restart Count:  2
    Liveness:       exec [test -e /tmp/healty] delay=2s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-t76fk (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  kube-api-access-t76fk:
    Type:                    Projected (a volume that contains injected data from multiple sources)
    TokenExpirationSeconds:  3607
    ConfigMapName:           kube-root-ca.crt
    ConfigMapOptional:       <nil>
    DownwardAPI:             true
QoS Class:                   BestEffort
Node-Selectors:              <none>
Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age                  From               Message
  ----     ------     ----                 ----               -------
  Normal   Scheduled  2m44s                default-scheduler  Successfully assigned default/liveness-exec-pod to node1
  Warning  Unhealthy  56s (x6 over 2m11s)  kubelet            Liveness probe failed:
  Normal   Killing    56s (x2 over 2m5s)   kubelet            Container liveness-exec-container failed liveness probe, will be restarted
  Normal   Pulled     26s (x3 over 2m44s)  kubelet            Container image "busybox:latest" already present on machine
  Normal   Created    26s (x3 over 2m44s)  kubelet            Created container liveness-exec-container
  Normal   Started    26s (x3 over 2m44s)  kubelet            Started container liveness-exec-container
#-------------------httpGet探测
apiVersion: v1
kind: Pod
metadata:
  name: liveness-httpget-pod
  namespace: default
spec:
  containers:
  - name: liveness-httpget-container
    image: nginx:1.14
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    livenessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 2
      periodSeconds: 3
#通过kubectl exec连接至容器内,删除index.html会触发httpGet探针生效,容器重新启动
[root@master ~]# kubectl exec liveness-httpget-pod -it -- /bin/sh
# rm -rf /usr/share/nginx/html/index.html
#查看Pod状态信息
[root@master yaml]# kubectl describe pods liveness-httpget-pod
Name:         liveness-httpget-pod
Namespace:    default
Priority:     0
Node:         node1/192.168.88.102
Start Time:   Tue, 23 Nov 2021 08:32:48 -0500
Labels:       <none>
Annotations:  <none>
Status:       Running
IP:           10.244.2.25
IPs:          <none>
Containers:
  liveness-httpget-container:
    Container ID:   docker://570a4d84b6056fbeb9d06a278c75843f860e7cb76ff5ad9935c5fc74f304c979
    Image:          nginx:1.14
    Image ID:       docker-pullable://nginx@sha256:f7988fb6c02e0ce69257d9bd9cf37ae20a60f1df7563c3a2a6abe24160306b8d
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Tue, 23 Nov 2021 08:34:39 -0500
    Last State:     Terminated
      Reason:       Completed
      Exit Code:    0
      Started:      Tue, 23 Nov 2021 08:33:12 -0500
      Finished:     Tue, 23 Nov 2021 08:34:39 -0500
    Ready:          True
    Restart Count:  1
    Liveness:       http-get http://:http/index.html delay=2s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hdqt6 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             True 
  ContainersReady   True 
  PodScheduled      True 
Volumes:
  default-token-hdqt6:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-hdqt6
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age                 From               Message
  ----     ------     ----                ----               -------
  Normal   Scheduled  2m11s               default-scheduler  Successfully assigned default/liveness-httpget-pod to node1
  Normal   Pulling    2m10s               kubelet            Pulling image "nginx:1.14"
  Normal   Pulled     108s                kubelet            Successfully pulled image "nginx:1.14"
  Normal   Created    20s (x2 over 107s)  kubelet            Created container liveness-httpget-container
  Normal   Started    20s (x2 over 107s)  kubelet            Started container liveness-httpget-container
  Warning  Unhealthy  20s (x3 over 26s)   kubelet            Liveness probe failed: **HTTP probe failed with statuscode: 404**
  Normal   Killing    20s                 kubelet            Container liveness-httpget-container failed liveness probe, will be restarted
  Normal   Pulled     20s                 kubelet            Container image "nginx:1.14" already present on machine

readiness
#readiness-yaml文件
apiVersion: v1
kind: Pod
metadata:
  name: readiness-httpget-pod
  namespace: default
spec:
  containers:
  - name: readiness-httpget-container
    image: ikubernetes/myapp:v1
    imagePullPolicy: IfNotPresent
    ports:
    - name: http
      containerPort: 80
    readinessProbe:
      httpGet:
        port: http
        path: /index.html
      initialDelaySeconds: 2
      periodSeconds: 3
[root@master yaml]# kubectl apply -f readiness-httpget-pod.yaml 
pod/readiness-httpget-pod created
#连接至readiness-httpget-pod中的容器,并删除index.html
[root@master yaml]# kubectl exec readiness-httpget-pod -it -- /bin/sh 
/ # cd /usr/share/nginx/html/
/usr/share/nginx/html # rm -rf index.html
#查看Pod列表,发现readiness-httpget-pod处于运行状态,但Ready状态显示有问题
[root@master yaml]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
client                          1/1     Running   5          3d
liveness-httpget-pod            1/1     Running   1          20m
myapp-7b595df7fc-8rchf          1/1     Running   5          2d23h
myapp-7b595df7fc-9d92c          1/1     Running   5          2d23h
nginx-deploy-6895496777-4d4nd   1/1     Running   6          3d14h
pod-demo                        1/2     Running   2          24h
readiness-httpget-pod           0/1     Running   0          86s
[root@master yaml]# kubectl describe pods readiness-httpget-pod
Name:         readiness-httpget-pod
Namespace:    default
Priority:     0
Node:         node1/192.168.88.102
Start Time:   Tue, 23 Nov 2021 08:51:27 -0500
Labels:       <none>
Annotations:  <none>
Status:       Running
IP:           10.244.2.26
IPs:          <none>
Containers:
  readiness-httpget-container:
    Container ID:   docker://b1441f1ad2006ce217f8020ba216f2c4f20c42a8fcaa3439af6571579c0c803b
    Image:          ikubernetes/myapp:v1
    Image ID:       docker-pullable://ikubernetes/myapp@sha256:9c3dc30b5219788b2b8a4b065f548b922a34479577befb54b03330999d30d513
    Port:           80/TCP
    Host Port:      0/TCP
    State:          Running
      Started:      Tue, 23 Nov 2021 08:51:28 -0500
    Ready:          False
    Restart Count:  0
    Readiness:      http-get http://:http/index.html delay=2s timeout=1s period=3s #success=1 #failure=3
    Environment:    <none>
    Mounts:
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-hdqt6 (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  default-token-hdqt6:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-hdqt6
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  <none>
Tolerations:     node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
                 node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
  Type     Reason     Age                 From               Message
  ----     ------     ----                ----               -------
  Normal   Scheduled  2m35s               default-scheduler  Successfully assigned default/readiness-httpget-pod to node1
  Normal   Pulled     2m35s               kubelet            Container image "ikubernetes/myapp:v1" already present on machine
  Normal   Created    2m35s               kubelet            Created container readiness-httpget-container
  Normal   Started    2m34s               kubelet            Started container readiness-httpget-container
  Warning  Unhealthy  21s (x22 over 84s)  kubelet            Readiness probe failed: HTTP probe failed with statuscode: 404
# 重新在容器中创建index.html文件
/usr/share/nginx/html # echo "Hi kubernetes!!" > index.html
#再次查看Pod状态正常
[root@master yaml]# kubectl get pods
NAME                            READY   STATUS    RESTARTS   AGE
client                          1/1     Running   5          3d
liveness-httpget-pod            1/1     Running   1          22m
myapp-7b595df7fc-8rchf          1/1     Running   5          2d23h
myapp-7b595df7fc-9d92c          1/1     Running   5          2d23h
nginx-deploy-6895496777-4d4nd   1/1     Running   6          3d14h
pod-demo                        2/2     Running   3          24h
readiness-httpget-pod           1/1     Running   0          4m12s
lifecycle
  • poststart
  • prestop
#容器启动后,修改nginx服务器上的index.html页面
apiVersion: v1
kind: Pod
metadata:
  name: lifecycle-poststart-pod
  namespace: default
spec:
  containers:
  - name: nginx-httpd
    image: nginx:1.14
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command: ["/bin/sh","-c","echo 'Home Page' > /usr/share/nginx/html/index.html"]
[root@master yaml]# kubectl create -f lifecycle-poststart-pod.yaml 
pod/lifecycle-poststart-pod created
[root@master yaml]# kubectl get pods -o wide
NAME                            READY   STATUS    RESTARTS   AGE     IP            NODE    NOMINATED NODE   READINESS GATES
client                          1/1     Running   5          3d      10.244.2.23   node1   <none>           <none>
lifecycle-poststart-pod         1/1     Running   0          13s     10.244.2.27   node1   <none>           <none>
#查看运行结果
[root@master yaml]# curl 10.244.2.27
Home Page

Pod控制器

ReplicaSet:管理无状态的Pod副本

  • 用户期望的Pod副本数
  • 标签选择器, 以便服务和管理Pod资源
  • Pod模板:
    Deployment:控制ReplicaSet
    DaemonSet:
    Job:一次性作业
    CronJob:
    StatefulSet:
    TPR(Third Party Resources)
    CDR(Custom Defined Resources)

Operator:

Helm:

通过yaml文件创建ReplicaSet
apiVersion: apps/v1
kind: ReplicaSet
metadata:
  name: myapp
  namespace: default
spec:
  replicas: 2
  selector:
    matchLabels:
      app: myapp
      release: tye
  template:
    metadata:
      name: myapp-pod
      labels:
        app: myapp
        release: tye
        enviroment: qa
    spec:
      containers:
      - name: myapp-container
        image: nginx:1.14
        ports:
        - name: http
          containerPort: 80
          
[root@master manifests]# kubectl create -f rs-demo.yaml 
replicaset.apps/myapp created
#查看ReplicaSet
[root@master manifests]# kubectl get rs
NAME                      DESIRED   CURRENT   READY   AGE
client-7c75c79d7c         1         1         0       3d
myapp                     2         2         2       5s
nginx-deploy-7c948bcff4   2         2         2       3d1h
[root@master manifests]# kubectl get pods
NAME                            READY   STATUS             RESTARTS         AGE
myapp-5v76c                     1/1     Running            0                8m16s
myapp-ftfnd                     1/1     Running            0                8m16s
#RS扩容
#通过kubectl edit rs [rsname]接修改replicas = [num]数量即可
#deploy-pod.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: tye
  template:
    metadata:
      labels:
        app: myapp
        release: tye
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
#应用一个配置至资源上,如果资源不存在则创建,如果存在则更新
[root@master manifests]# kubectl apply -f deploy-pod.yaml

#kubectl patch打补丁
[root@master ~]# kubectl patch deploy myapp-deploy -p '{"spec":{"replicas":5}}'
deployment.apps/myapp-deploy patched
[root@master ~]# kubectl get pods
NAME                            READY   STATUS    RESTARTS      AGE
myapp-deploy-545b89888c-6gd2h   1/1     Running   0             11m
myapp-deploy-545b89888c-r49rk   1/1     Running   0             10m
myapp-deploy-545b89888c-svswl   1/1     Running   0             4s
myapp-deploy-545b89888c-vz967   1/1     Running   0             4s
myapp-deploy-545b89888c-zxcr5   1/1     Running   0             10m

[root@master manifests]# kubectl patch deployment myapp-deploy -p '{"spec":{"strategy":{"rollingUpdate":{"maxSurge":1,"maxUnavailable":0}}}}'
deployment.apps/myapp-deploy patched
[root@master manifests]# kubectl describe deploy myapp-deploy
Name:                   myapp-deploy
Namespace:              default
CreationTimestamp:      Thu, 11 Nov 2021 08:57:29 -0500
Labels:                 <none>
Annotations:            deployment.kubernetes.io/revision: 2
Selector:               app=myapp,release=tye
Replicas:               5 desired | 5 updated | 5 total | 5 available | 0 unavailable
StrategyType:           RollingUpdate
MinReadySeconds:        0
RollingUpdateStrategy:  0 max unavailable, 1 max surge
[root@master manifests]# kubectl set image deployment myapp-deploy myapp=ikubernetes/myapp:v3 && kubectl rollout pause deployment myapp-deploy
deployment.apps/myapp-deploy image updated
deployment.apps/myapp-deploy paused


[root@master ~]# kubectl get pods -l app=myapp -w
NAME                            READY   STATUS    RESTARTS   AGE
myapp-deploy-545b89888c-6gd2h   1/1     Running   0          21m
myapp-deploy-545b89888c-r49rk   1/1     Running   0          20m
myapp-deploy-545b89888c-svswl   1/1     Running   0          10m
myapp-deploy-545b89888c-vz967   1/1     Running   0          10m
myapp-deploy-545b89888c-zxcr5   1/1     Running   0          20m
myapp-deploy-fbd4c499b-m9258    0/1     Pending   0          0s
myapp-deploy-fbd4c499b-m9258    0/1     Pending   0          0s
myapp-deploy-fbd4c499b-m9258    0/1     ContainerCreating   0          0s
myapp-deploy-fbd4c499b-m9258    1/1     Running             0          18s
#继续更新
[root@master manifests]# kubectl rollout resume deployment myapp-deploy
deployment.apps/myapp-deploy resumed

#监视deployment
[root@master manifests]# kubectl rollout status deployment myapp-deploy
Waiting for deployment "myapp-deploy" rollout to finish: 1 out of 5 new replicas have been updated...
[root@master manifests]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY   AGE     CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-545b89888c   0         0         0       28m     myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=545b89888c,release=tye
myapp-deploy-7dfc7b4f66   0         0         0       52m     myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=7dfc7b4f66,release=tye
myapp-deploy-fbd4c499b    5         5         5       4m24s   myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=fbd4c499b,release=tye
#查看回滚历史版本
[root@master manifests]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy 
REVISION  CHANGE-CAUSE
1         <none>
2         <none>
3         <none>
#回滚至第1版
[root@master manifests]# kubectl rollout undo deployment myapp-deploy --to-revision=1
deployment.apps/myapp-deploy rolled back
[root@master manifests]# kubectl rollout history deployment myapp-deploy
deployment.apps/myapp-deploy 
REVISION  CHANGE-CAUSE
2         <none>
3         <none>
4         <none>
#查看当前正在工作的deployment是v1版
[root@master manifests]# kubectl get rs -o wide
NAME                      DESIRED   CURRENT   READY   AGE   CONTAINERS   IMAGES                 SELECTOR
myapp-deploy-545b89888c   0         0         0       34m   myapp        ikubernetes/myapp:v2   app=myapp,pod-template-hash=545b89888c,release=tye
myapp-deploy-7dfc7b4f66   5         5         5       58m   myapp        ikubernetes/myapp:v1   app=myapp,pod-template-hash=7dfc7b4f66,release=tye
myapp-deploy-fbd4c499b    0         0         0       10m   myapp        ikubernetes/myapp:v3   app=myapp,pod-template-hash=fbd4c499b,release=tye

DeamonSet
[root@node1 ~]# docker pull ikubernetes/filebeat:5.6.5-alpine
5.6.5-alpine: Pulling from ikubernetes/filebeat
e6faa08065ed: Pull complete 
1956334c4aa9: Pull complete 
f3f537aca9dd: Pull complete 
Digest: sha256:530f31ebf9194b0400303320579cafe21da890bd06446746fcedc2b65875e4eb
Status: Downloaded newer image for ikubernetes/filebeat:5.6.5-alpine
docker.io/ikubernetes/filebeat:5.6.5-alpine
# DaemonSet的yaml文件
apiVersion: apps/v1
kind: Deployment
metadata:
  name: redis
  namespace: default
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
      role: logstor
  template:
    metadata:
      labels:
        app: redis
        role: logstor
    spec:
      containers:
        - name: redis
          image: redis:4.0-alpine
          ports:
          - name: redis
            containerPort: 6379
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: myapp-ds
  namespace: default
spec:
  selector:
    matchLabels:
      app: filebeat
      release: stable
  template:
    metadata:
      labels:
        app: filebeat
        release: stable
    spec:
      containers:
      - name: myapp
        image: ikubernetes/filebeat:5.6.5-alpine
        env:
        - name: REDIS_HOST
          value: redis.default.svc.cluster.local
        - name: REDIS_LOG_LEVEL
          value: info
[root@master manifests]# kubectl apply -f ds-demo.yaml 
daemonset.apps/myapp-ds created

你可能感兴趣的:(Linux,linux,kubernetes,docker)