KUBERNETES-1-18-高级调度

1.mkdir schedule创建测试目录。vim pod-demo.yaml编辑文件。cat pod-demo.yaml查看文件(主要是nodeSelector:选择器)。

[root@master manifests]# mkdir schedule
[root@master manifests]# cd schedule
[root@master schedule]# cp ../pod-demo.yaml .
[root@master schedule]# vim pod-demo.yaml
[root@master schedule]# cat pod-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-demo
  namespace: default
  labels:
    app: myapp
    tier: frontend
  annotations:
    example.com/created-by: "cluster admin"
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  nodeSelector:
    disktype: ssd

 

2.kubectl apply -f pod-demo.yaml声明资源。kubectl get pods -o wide查看pod部署的节点。kubectl get nodes --show-labels | grep ssd通过标签内容手动筛选节点进行印证。
[root@master schedule]# kubectl apply -f pod-demo.yaml
pod/pod-demo created

[root@master manifests]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP           NODE
pod-demo   1/1       Running   0          3m        10.244.1.3   node1.example.com
[root@master manifests]# kubectl get nodes --show-labels | grep ssd
node1.example.com    Ready         5d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node1.example.com

 

3.kubectl delete -f pod-demo.yaml删除资源。vim pod-demo.yaml修改文件。 cat pod-demo.yaml | grep disktype看到disktype值修改。kubectl get nodes --show-labels | grep harddisk重新匹配无合适节点。kubectl apply -f pod-demo.yaml尝试声明资源。kubectl get pods -o wide看到pod被挂起。kubectl describe pods pod-demo | grep -i events -A4查看时间发现调度失败。

[root@master schedule]# kubectl delete -f pod-demo.yaml
pod "pod-demo" deleted
[root@master schedule]# vim pod-demo.yaml
[root@master schedule]# cat pod-demo.yaml | grep disktype
    disktype: harddisk
[root@master schedule]# kubectl get nodes --show-labels | grep harddisk
[root@master schedule]# kubectl apply -f pod-demo.yaml
pod/pod-demo created
[root@master schedule]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP        NODE
pod-demo   0/1       Pending   0          11s          
[root@master schedule]# kubectl describe pods pod-demo | grep -i events -A4
Events:
  Type     Reason            Age                From               Message
  ----     ------            ----               ----               -------
  Warning  FailedScheduling  1s (x16 over 48s)  default-scheduler  0/3 nodes are available: 3 node(s) didn't match node selector.

 

4.kubectl label nodes node2.example.com disktype=harddisk手动将节点打上标签。kubectl get pods -o wide发现pod被成功部署。kubectl delete -f pod-demo.yaml删除资源。

[root@master schedule]# kubectl label nodes node2.example.com disktype=harddisk
node/node2.example.com labeled
[root@master schedule]# kubectl get pods -o wide
NAME       READY     STATUS    RESTARTS   AGE       IP           NODE
pod-demo   1/1       Running   0          3m        10.244.2.3   node2.example.com
[root@master schedule]# kubectl delete -f pod-demo.yaml
pod "pod-demo" deleted

 

5.vim pod-nodeaffinity-demo.yaml编辑文件。cat pod-nodeaffinity-demo.yaml查看文件(注意这里requiredDuringSchedulingIgnoredDuringExecution:为强制参数)。kubectl apply -f pod-nodeaffinity-demo.yaml声明资源。kubectl get pods -o wide因为没有找到匹配所以pod被挂起。

[root@master schedule]# scp pod-demo.yaml pod-nodeaffinity-demo.yaml

[root@master schedule]# vim pod-nodeaffinity-demo.yaml
[root@master schedule]# cat pod-nodeaffinity-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
[root@master schedule]# kubectl apply -f pod-nodeaffinity-demo.yaml
pod/pod-node-affinity-demo created
[root@master schedule]# kubectl get pods -o wide
NAME                     READY     STATUS    RESTARTS   AGE       IP        NODE
pod-node-affinity-demo   0/1       Pending   0          18s          

 

6.kubectl get pods -o wide编辑文件。cat pod-nodeaffinity-demo-2.yaml查看文件(preferredDuringSchedulingIgnoredDuringExecution:注意这里为非强制参数)。kubectl apply -f pod-nodeaffinity-demo-2.yaml声明资源。kubectl get pods -o wide发现pod可以进行部署。

[root@master schedule]# scp pod-nodeaffinity-demo.yaml pod-nodeaffinity-demo-2.yaml

[root@master schedule]# vim pod-nodeaffinity-demo-2.yaml
[root@master schedule]# cat pod-nodeaffinity-demo-2.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-node-affinity-demo-2
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
  affinity:
    nodeAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - preference:
          matchExpressions:
          - key: zone
            operator: In
            values:
            - foo
            - bar
        weight: 60
[root@master schedule]# kubectl apply -f pod-nodeaffinity-demo-2.yaml
pod/pod-node-affinity-demo-2 created
[root@master schedule]# kubectl get pods -o wide
NAME                       READY     STATUS    RESTARTS   AGE       IP           NODE
pod-node-affinity-demo     0/1       Pending   0          7m              
pod-node-affinity-demo-2   1/1       Running   0          11s       10.244.2.4   node2.example.com

 

7.vim pod-required-affinity-demo.yaml编辑文件。cat pod-required-affinity-demo.yaml查看文件(注意这里的affinity:属性,优先选择kubernetes.io/hostname进行部署)。kubectl apply -f pod-required-affinity-demo.yaml声明资源。kubectl get pods -o wide获取pod资源信息。 kubectl describe pods pod-second | grep -i event -A6查看部署过程信息。kubectl delete -f pod-required-affinity-demo.yaml清除资源。

[root@master schedule]# cp pod-demo.yaml pod-required-affinity-demo.yaml

[root@master schedule]# vim pod-required-affinity-demo.yaml
[root@master schedule]# cat pod-required-affinity-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: kubernetes.io/hostname
[root@master schedule]# kubectl apply -f pod-required-affinity-demo.yaml
pod/pod-first created
pod/pod-second created

[root@master schedule]# kubectl get pods -o wide
NAME         READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first    1/1       Running   0          1m        10.244.2.9    node2.example.com
pod-second   1/1       Running   0          1m        10.244.2.10   node2.example.com

[root@master schedule]# kubectl describe pods pod-second | grep -i event -A6
Events:
  Type    Reason     Age   From                        Message
  ----    ------     ----  ----                        -------
  Normal  Scheduled  3m    default-scheduler           Successfully assigned default/pod-second to node2.example.com
  Normal  Pulled     3m    kubelet, node2.example.com  Container image "busybox:latest" already present on machine
  Normal  Created    3m    kubelet, node2.example.com  Created container
  Normal  Started    3m    kubelet, node2.example.com  Started container

[root@master schedule]# kubectl delete -f pod-required-affinity-demo.yaml
pod "pod-first" deleted
pod "pod-second" deleted

 

8.vim pod-required-anti-affinity-demo.yaml编辑文件。cat pod-required-anti-affinity-demo.yaml查看文件(这次使用 podAntiAffinity:反亲和)。kubectl apply -f pod-required-anti-affinity-demo.yaml声明资源(此时两个Pod已经部署在不同节点)。kubectl get pods -o wide获取pod资源信息。

[root@master schedule]# cp pod-required-affinity-demo.yaml pod-required-anti-affinity-demo.yaml
[root@master schedule]# vim pod-required-anti-affinity-demo.yaml
[root@master schedule]# cat pod-required-anti-affinity-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: kubernetes.io/hostname
[root@master schedule]# kubectl apply -f pod-required-anti-affinity-demo.yaml
pod/pod-first created
pod/pod-second created
[root@master schedule]# kubectl get pods -o wide
NAME         READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first    1/1       Running   0          17s       10.244.2.11   node2.example.com
pod-second   1/1       Running   0          17s       10.244.1.9    node1.example.com

[root@master schedule]# kubectl delete -f pod-required-anti-affinity-demo.yaml
pod "pod-first" deleted
pod "pod-second" deleted

 

9.kubectl get nodes --show-labels查看节点标签。kubectl label nodes node1.example.com zone=foo为节点打标签。kubectl label nodes node2.example.com zone=foo为节点打标签。vim pod-required-anti-affinity-demo.yaml编辑文件。cat pod-required-anti-affinity-demo.yaml查看文件。cat pod-required-anti-affinity-demo.yaml | grep topologyKey文件中的topologyKey标签。kubectl apply -f pod-required-anti-affinity-demo.yaml声明资源。kubectl get pods -o wide获取Pod资源信息,第二个pod被挂起,原因在于反亲和策略导致没有匹配节点可部署。kubectl delete -f pod-required-anti-affinity-demo.yaml清除资源。

[root@master schedule]# kubectl get nodes --show-labels
NAME                 STATUS    ROLES     AGE       VERSION   LABELS
master.example.com   Ready     master    6d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/hostname=master.example.com,node-role.kubernetes.io/master=
node1.example.com    Ready         6d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=ssd,kubernetes.io/hostname=node1.example.com
node2.example.com    Ready         6d        v1.11.1   beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,disktype=harddisk,kubernetes.io/hostname=node2.example.com
[root@master schedule]# kubectl label nodes node1.example.com zone=foo
node/node1.example.com labeled
[root@master schedule]# kubectl label nodes node2.example.com zone=foo
node/node2.example.com labeled

[root@master schedule]# vim pod-required-anti-affinity-demo.yaml
[root@master schedule]# cat pod-required-anti-affinity-demo.yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod-first
  labels:
    app: myapp
    tier: frontend
spec:
  containers:
  - name: myapp
    image: ikubernetes/myapp:v1
---
apiVersion: v1
kind: Pod
metadata:
  name: pod-second
  labels:
    app: backend
    tier: db
spec:
  containers:
  - name: busybox
    image: busybox:latest
    imagePullPolicy: IfNotPresent
    command: ["sh","-c","sleep 3600"]
  affinity:
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchExpressions:
          - {key: app, operator: In, values: ["myapp"]}
        topologyKey: zone
[root@master schedule]# cat pod-required-anti-affinity-demo.yaml | grep topologyKey
        topologyKey: zone

[root@master schedule]# kubectl apply -f pod-required-anti-affinity-demo.yaml
pod/pod-first created
pod/pod-second created

[root@master schedule]# kubectl get pods -o wide
NAME         READY     STATUS    RESTARTS   AGE       IP            NODE
pod-first    1/1       Running   0          6m        10.244.2.12   node2.example.com
pod-second   0/1       Pending   0          6m               

[root@master schedule]# kubectl delete -f pod-required-anti-affinity-demo.yaml
pod "pod-first" deleted
pod "pod-second" deleted

 

10.kubectl describe node master.example.com | grep -i taints查看管理节点的taints。kubectl get pods -n kube-system -o wide | grep api查看api系统pod。kubectl describe pods kube-apiserver-master.example.com -n kube-system | grep -i tolerations查看其容忍度(Tolerations:       :NoExecute容忍一切taints)。kubectl get pods -n kube-system -o wide | grep proxy查看poxy的pod。 kubectl describe pods kube-proxy-r4j2h -n kube-system | grep -i tolerations -A8查看其容忍度信息。kubectl get nodes master.example.com -o yaml | grep -i taints -A2将管理节点信息输出查看taints信息。

[root@master schedule]# kubectl describe node master.example.com | grep -i taints
Taints:             node-role.kubernetes.io/master:NoSchedule

[root@master schedule]# kubectl get pods -n kube-system -o wide | grep api
kube-apiserver-master.example.com            1/1       Running   8          6d        172.20.0.128   master.example.com

[root@master schedule]# kubectl describe pods kube-apiserver-master.example.com -n kube-system | grep -i tolerations
Tolerations:       :NoExecute

[root@master schedule]# kubectl get pods -n kube-system -o wide | grep proxy
kube-proxy-56hs9                             1/1       Running   6          6d        172.20.0.129   node1.example.com
kube-proxy-r4j2h                             1/1       Running   11         6d        172.20.0.128   master.example.com
kube-proxy-t985x                             1/1       Running   10         6d        172.20.0.130   node2.example.com
[root@master schedule]# kubectl describe pods kube-proxy-r4j2h -n kube-system | grep -i tolerations -A8
Tolerations:     
                 CriticalAddonsOnly
                 node.kubernetes.io/disk-pressure:NoSchedule
                 node.kubernetes.io/memory-pressure:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute
                 node.kubernetes.io/unreachable:NoExecute
Events:         

[root@master schedule]# kubectl get nodes master.example.com -o yaml | grep -i taints -A2
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master

 

11.kubectl taint node node1.example.com node-type=prodution:NoSchedule进行taint标签(注意:标明的是限制调度)。vim deploy-demo.yaml编辑文件。cat deploy-demo.yaml查看文件(注意:没有容忍度信息)。kubectl apply -f deploy-demo.yaml声明资源。kubectl get pods -o wide发现Pod都运行在另一个节点。kubectl taint node node2.example.com node-type=dev:NoExecute进行taint标签(注意:标明的是限制运行)。kubectl get pods -o wide查看pod发现被挂起(因为没有既可以调度又可以运行的节点)。

[root@master schedule]# kubectl taint node node1.example.com node-type=production:NoSchedule
node/node1.example.com tainted

[root@master schedule]# cp ../deploy-demo.yaml .
[root@master schedule]# vim deploy-demo.yaml
[root@master schedule]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
[root@master schedule]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-deploy created
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-67f6f6b4dc-dtdgn   1/1       Running   0          9s        10.244.2.14   node2.example.com
myapp-deploy-67f6f6b4dc-fbrr7   1/1       Running   0          9s        10.244.2.13   node2.example.com
myapp-deploy-67f6f6b4dc-r6ccw   1/1       Running   0          9s        10.244.2.15   node2.example.com

[root@master schedule]# kubectl taint node node2.example.com node-type=dev:NoExecute
node/node2.example.com tainted

[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP        NODE
myapp-deploy-67f6f6b4dc-2vnxl   0/1       Pending   0          27s          
myapp-deploy-67f6f6b4dc-7mvtz   0/1       Pending   0          27s          
myapp-deploy-67f6f6b4dc-ffrrq   0/1       Pending   0          27s          

 

12.vim deploy-demo.yaml编辑文件。cat deploy-demo.yaml查看文件( tolerations:增加容忍度信息)。kubectl apply -f deploy-demo.yaml声明资源。kubectl get pods -o wide查看Pod信息(还是被挂起,因为production无法被调度,容忍运行是在调度之后,所以无法启用)。

[root@master schedule]# vim deploy-demo.yaml
[root@master schedule]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Equal"
        value: "production"
        effect: "NoExecute"
        tolerationSeconds: 3600
[root@master schedule]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-deploy configured
[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP        NODE
myapp-deploy-67f6f6b4dc-2vnxl   0/1       Pending   0          6m           
myapp-deploy-67f6f6b4dc-7mvtz   0/1       Pending   0          6m           
myapp-deploy-67f6f6b4dc-ffrrq   0/1       Pending   0          6m           
myapp-deploy-77fb48ff96-xsh5s   0/1       Pending   0          9s           

 

13.vim deploy-demo.yaml编辑文件。cat deploy-demo.yaml查看文件(注意此时容忍度已变为可容忍禁止调度)。kubectl apply -f deploy-demo.yaml重新声明资源。kubectl get pods -o wide发现pod已经可以运行(只能在node1上因为node2有禁止运行taints)。

[root@master schedule]# vim deploy-demo.yaml
[root@master schedule]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Equal"
        value: "production"
        effect: "NoSchedule"
[root@master schedule]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-deploy configured

[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-65cc47f858-hwsjd   1/1       Running   0          4m        10.244.1.10   node1.example.com
myapp-deploy-65cc47f858-nmdr9   1/1       Running   0          4m        10.244.1.11   node1.example.com
myapp-deploy-65cc47f858-rbtc7   1/1       Running   0          4m        10.244.1.12   node1.example.com

 

14.vim deploy-demo.yaml编辑文件。 cat deploy-demo.yaml查看文件(注意此时改为存在性检测,即只要存在node-type这一类的taints都可以容忍)。kubectl apply -f deploy-demo.yaml声明资源。kubectl get pods -o wide发现pod开始随机部署。

[root@master schedule]# vim deploy-demo.yaml
[root@master schedule]# cat deploy-demo.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: myapp-deploy
  namespace: default
spec:
  replicas: 3
  selector:
    matchLabels:
      app: myapp
      release: canary
  template:
    metadata:
      labels:
        app: myapp
        release: canary
    spec:
      containers:
      - name: myapp
        image: ikubernetes/myapp:v2
        ports:
        - name: http
          containerPort: 80
      tolerations:
      - key: "node-type"
        operator: "Exists"
        value: ""
        effect: ""
[root@master schedule]# kubectl apply -f deploy-demo.yaml
deployment.apps/myapp-deploy configured

[root@master schedule]# kubectl get pods -o wide
NAME                            READY     STATUS    RESTARTS   AGE       IP            NODE
myapp-deploy-5d9c6985f5-g4cwf   1/1       Running   0          18s       10.244.1.13   node1.example.com
myapp-deploy-5d9c6985f5-jwn56   1/1       Running   0          17s       10.244.2.17   node2.example.com
myapp-deploy-5d9c6985f5-qm7g2   1/1       Running   0          20s       10.244.2.16   node2.example.com

 

 

你可能感兴趣的:(KUBERNETES)