云原生第三课作业

一、COREDNS

1.1yml文件

root@master1:~/maniftest/coredns# cat coredns.yaml

# __MACHINE_GENERATED_WARNING__

apiVersion: v1

kind: ServiceAccount

metadata:

  name: coredns

  namespace: kube-system

  labels:

      kubernetes.io/cluster-service: "true"

      addonmanager.kubernetes.io/mode: Reconcile

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: Reconcile

  name: system:coredns

rules:

- apiGroups:

  - ""

  resources:

  - endpoints

  - services

  - pods

  - namespaces

  verbs:

  - list

  - watch

- apiGroups:

  - ""

  resources:

  - nodes

  verbs:

  - get

- apiGroups:                            #后添加

  - discovery.k8s.io                  #后添加

  resources:                             #后添加

  - endpointslices                    #后添加

  verbs:                                  #后添加

  - list                                     #后添加

  - watch                               #后添加

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  annotations:

    rbac.authorization.kubernetes.io/autoupdate: "true"

  labels:

    kubernetes.io/bootstrapping: rbac-defaults

    addonmanager.kubernetes.io/mode: EnsureExists

  name: system:coredns

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: system:coredns

subjects:

- kind: ServiceAccount

  name: coredns

  namespace: kube-system

---

apiVersion: v1

kind: ConfigMap

metadata:

  name: coredns

  namespace: kube-system

  labels:

      addonmanager.kubernetes.io/mode: EnsureExists

data:

  Corefile: |

    .:53 {

        errors

        health {

            lameduck 5s

        }

        ready

        kubernetes fchiaas.local in-addr.arpa ip6.arpa {         #fchiaas.local是k8s集群的名称

            pods insecure

            fallthrough in-addr.arpa ip6.arpa

            ttl 30

        }

        prometheus :9153

        forward . 10.0.0.20 {              #10.0.0.20公司内部DNS

            max_concurrent 1000

        }

        cache 30

        loop

        reload

        loadbalance

    }

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: coredns

  namespace: kube-system

  labels:

    k8s-app: kube-dns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  # replicas: not specified here:

  # 1. In order to make Addon Manager do not reconcile this replicas parameter.

  # 2. Default is 1.

  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.

  strategy:

    type: RollingUpdate

    rollingUpdate:

      maxUnavailable: 1

  selector:

    matchLabels:

      k8s-app: kube-dns

  template:

    metadata:

      labels:

        k8s-app: kube-dns

    spec:

      securityContext:

        seccompProfile:

          type: RuntimeDefault

      priorityClassName: system-cluster-critical

      serviceAccountName: coredns

      affinity:

        podAntiAffinity:

          preferredDuringSchedulingIgnoredDuringExecution:

          - weight: 100

            podAffinityTerm:

              labelSelector:

                matchExpressions:

                  - key: k8s-app

                    operator: In

                    values: ["kube-dns"]

              topologyKey: kubernetes.io/hostname

      tolerations:

        - key: "CriticalAddonsOnly"

          operator: "Exists"

      nodeSelector:

        kubernetes.io/os: linux

      containers:

      - name: coredns

        image:  coredns/coredns:v1.8.6

        imagePullPolicy: IfNotPresent

        resources:

          limits:

            memory: 256Mi

          requests:

            cpu: 100m

            memory: 70Mi

        args: [ "-conf", "/etc/coredns/Corefile" ]

        volumeMounts:

        - name: config-volume

          mountPath: /etc/coredns

          readOnly: true

        ports:

        - containerPort: 53

          name: dns

          protocol: UDP

        - containerPort: 53

          name: dns-tcp

          protocol: TCP

        - containerPort: 9153

          name: metrics

          protocol: TCP

        livenessProbe:

          httpGet:

            path: /health

            port: 8080

            scheme: HTTP

          initialDelaySeconds: 60

          timeoutSeconds: 5

          successThreshold: 1

          failureThreshold: 5

        readinessProbe:

          httpGet:

            path: /ready

            port: 8181

            scheme: HTTP

        securityContext:

          allowPrivilegeEscalation: false

          capabilities:

            add:

            - NET_BIND_SERVICE

            drop:

            - all

          readOnlyRootFilesystem: true

      dnsPolicy: Default

      volumes:

        - name: config-volume

          configMap:

            name: coredns

            items:

            - key: Corefile

              path: Corefile

---

apiVersion: v1

kind: Service

metadata:

  name: kube-dns

  namespace: kube-system

  annotations:

    prometheus.io/port: "9153"

    prometheus.io/scrape: "true"

  labels:

    k8s-app: kube-dns

    kubernetes.io/cluster-service: "true"

    addonmanager.kubernetes.io/mode: Reconcile

    kubernetes.io/name: "CoreDNS"

spec:

  selector:

    k8s-app: kube-dns

  clusterIP: 10.100.0.2                 #10.0.0.2 是coredns服务的IP地址

  ports:

  - name: dns

    port: 53

    protocol: UDP

  - name: dns-tcp

    port: 53

    protocol: TCP

  - name: metrics

    port: 9153

    protocol: TCP

1.2部署

root@master1:~/maniftest/coredns# kubectl apply -f coredns.yaml

serviceaccount/coredns created

clusterrole.rbac.authorization.k8s.io/system:coredns created

clusterrolebinding.rbac.authorization.k8s.io/system:coredns created

configmap/coredns created

deployment.apps/coredns created

service/kube-dns created

root@master1:~/maniftest/coredns# kubectl get pods -n kube-system -w

NAME                                      READY  STATUS    RESTARTS      AGE

calico-kube-controllers-59df8b6856-qll6h  1/1    Running  4 (144m ago)  11d

calico-node-74687                          1/1    Running  4 (144m ago)  11d

calico-node-f98lx                          1/1    Running  4 (144m ago)  11d

calico-node-j2jhv                          1/1    Running  4 (144m ago)  11d

calico-node-ljnwb                          1/1    Running  7 (5d3h ago)  11d

coredns-69d84cdc49-rtpnd                  1/1    Running  0              5m

coredns-69d84cdc49-xk7nq                  1/1    Running  0              2m36s


root@master1:~/maniftest/coredns# kubectl get svc -n kube-system

NAME      TYPE        CLUSTER-IP  EXTERNAL-IP  PORT(S)                  AGE

kube-dns  ClusterIP  10.100.0.2          53/UDP,53/TCP,9153/TCP  13m

root@master1:~/maniftest/coredns# kubectl get svc -A

NAMESPACE    NAME        TYPE        CLUSTER-IP  EXTERNAL-IP  PORT(S)                  AGE

default      kubernetes  ClusterIP  10.100.0.1          443/TCP                  11d

kube-system  kube-dns    ClusterIP  10.100.0.2          53/UDP,53/TCP,9153/TCP  13m


1.3测试

root@master1:~/maniftest/coredns# kubectl exec -it test -- /bin/sh

sh-4.4# ping kube-dns

ping: kube-dns: Name or service not known

sh-4.4# ping kube-dns.kube-system

PING kube-dns.kube-system.svc.fchiaas.local (10.100.0.2) 56(84) bytes of data.

64 bytes from kube-dns.kube-system.svc.fchiaas.local (10.100.0.2): icmp_seq=1 ttl=64 time=0.036 ms

64 bytes from kube-dns.kube-system.svc.fchiaas.local (10.100.0.2): icmp_seq=2 ttl=64 time=0.045 ms

^C

--- kube-dns.kube-system.svc.fchiaas.local ping statistics ---

2 packets transmitted, 2 received, 0% packet loss, time 1002ms

rtt min/avg/max/mdev = 0.036/0.040/0.045/0.007 ms

sh-4.4# ping kube-dns.kube-system.svc.fchiaas.local

PING kube-dns.kube-system.svc.fchiaas.local (10.100.0.2) 56(84) bytes of data.

64 bytes from kube-dns.kube-system.svc.fchiaas.local (10.100.0.2): icmp_seq=1 ttl=64 time=0.024 ms

64 bytes from kube-dns.kube-system.svc.fchiaas.local (10.100.0.2): icmp_seq=2 ttl=64 time=0.047 ms

^C

--- kube-dns.kube-system.svc.fchiaas.local ping statistics ---

2 packets transmitted, 2 received, 0% packet loss, time 1013ms

rtt min/avg/max/mdev = 0.024/0.035/0.047/0.012 ms

二、dashboard

2.1下载yml文件

wget   https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml -o dashbord-2.4.0.yaml

修改文件

spec:

  type: NodePort                       #后添加

  ports:

    - port: 443

      targetPort: 8443

      nodePort: 30443               #后添加

2.2创建

root@master1:~/maniftest/dashboard# kubectl apply -f dashbord-2.4.0.yaml

namespace/kubernetes-dashboard created

serviceaccount/kubernetes-dashboard created

service/kubernetes-dashboard created

secret/kubernetes-dashboard-certs created

secret/kubernetes-dashboard-csrf created

secret/kubernetes-dashboard-key-holder created

configmap/kubernetes-dashboard-settings created

role.rbac.authorization.k8s.io/kubernetes-dashboard created

clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created

rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created

clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created

deployment.apps/kubernetes-dashboard created

service/dashboard-metrics-scraper created

deployment.apps/dashboard-metrics-scraper created

root@master1:~/maniftest/dashboard# kubectl get pods -A -o wide -w

NAMESPACE              NAME                                        READY  STATUS    RESTARTS      AGE    IP              NODE        NOMINATED NODE  READINESS GATES

default                deploy-nginx-6899665dbb-2j2d6              1/1    Running  2 (3h6m ago)  5d11h  10.200.104.32    10.0.0.25           

default                deploy-nginx-6899665dbb-8m27p              1/1    Running  2 (3h6m ago)  5d23h  10.200.104.31    10.0.0.25           

default                deploy-nginx-6899665dbb-bpk6x              1/1    Running  2 (3h6m ago)  5d23h  10.200.104.34    10.0.0.25           

default                deploy-nginx-6899665dbb-g7fms              1/1    Running  2 (3h6m ago)  5d23h  10.200.104.33    10.0.0.25           

default                deploy-nginx-6899665dbb-q6btz              1/1    Running  2 (3h6m ago)  5d23h  10.200.104.36    10.0.0.25           

default                deploy-nginx-6899665dbb-tzwbq              1/1    Running  2 (3h6m ago)  5d11h  10.200.104.35    10.0.0.25           

default                test                                        1/1    Running  0              36m    10.200.166.146  10.0.0.24           

kube-system            calico-kube-controllers-59df8b6856-qll6h    1/1    Running  4 (3h6m ago)  11d    10.0.0.25        10.0.0.25           

kube-system            calico-node-74687                          1/1    Running  4 (3h6m ago)  11d    10.0.0.22        10.0.0.22           

kube-system            calico-node-f98lx                          1/1    Running  4 (3h6m ago)  11d    10.0.0.24        10.0.0.24           

kube-system            calico-node-j2jhv                          1/1    Running  4 (3h6m ago)  11d    10.0.0.25        10.0.0.25           

kube-system            calico-node-ljnwb                          1/1    Running  7 (5d4h ago)  11d    10.0.0.21        10.0.0.21           

kube-system            coredns-69d84cdc49-rtpnd                    1/1    Running  0              46m    10.200.104.37    10.0.0.25           

kube-system            coredns-69d84cdc49-xk7nq                    1/1    Running  0              44m    10.200.166.145  10.0.0.24           

kubernetes-dashboard  dashboard-metrics-scraper-c45b7869d-zdt8p  1/1    Running  0              53s    10.200.166.147  10.0.0.24           

kubernetes-dashboard  kubernetes-dashboard-576cb95f94-wl8xb      1/1    Running  0              53s    10.200.166.148  10.0.0.24           

2.3创建用户

apiVersion: v1

kind: ServiceAccount

metadata:

  name: admin-user

  namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

  name: admin-user

roleRef:

  apiGroup: rbac.authorization.k8s.io

  kind: ClusterRole

  name: cluster-admin

subjects:

- kind: ServiceAccount

  name: admin-user

  namespace: kubernetes-dashboard

root@master1:~/maniftest/dashboard# kubectl apply -f admin-user.yaml

serviceaccount/admin-user created

clusterrolebinding.rbac.authorization.k8s.io/admin-user created

root@master1:~/maniftest/dashboard# kubectl describe secret admin-user-token-b4c8n -n kubernetes-dashboard

Name:        admin-user-token-b4c8n

Namespace:    kubernetes-dashboard

Labels:     

Annotations:  kubernetes.io/service-account.name: admin-user

              kubernetes.io/service-account.uid: 2c3de8ea-a919-4b94-aa58-4a99aeef9e2e

Type:  kubernetes.io/service-account-token

Data

====

ca.crt:    1350 bytes

namespace:  20 bytes

token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IkhWb0RMRkFCQUFTTm13Q1VzSFNzaVd3QW5fZjZOeW5QQklqSlBIclptUUEifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJhZG1pbi11c2VyLXRva2VuLWI0YzhuIiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZXJ2aWNlLWFjY291bnQubmFtZSI6ImFkbWluLXVzZXIiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiIyYzNkZThlYS1hOTE5LTRiOTQtYWE1OC00YTk5YWVlZjllMmUiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6a3ViZXJuZXRlcy1kYXNoYm9hcmQ6YWRtaW4tdXNlciJ9.RJuNGpH4ekbO73TOaB7v9w7RuL5ag1lfcgV0q3nq9u9l404Lqt2x4uOVH1uhQo3A6MdluKTYDC_l1xI2f1FKq-Q2G0a8e5XHFhaCOZKPfhG1-bqAYC9CRwvqmB3DUdGsHjznKINsfmcpkGZu4_V91h5DE_iyULT3ym6v466Bv37E_3P4D8aKXRaWWVWKARs7O1bMx_yp19aioAuQOMAYnF5ua7yJtZfFVUDwg81MyQ0C0qJIT6FhQmkTCEp2VbbOnWjoLUwEuqlWF1w_zMo0QJy83fTdrvY30hqvY1bQzs2lJ93UvEylPqdl0iVui2FgmLufJW7pZt45sAZwfMu_jw

2.4登录



三、kubectl常用命令

3.1kubectl get 类型  名字     -n  名称空间        查看资源   

- o wide  显示更多    

-w 动态显示

3.2kubectl describe 类型  名字  -n  名称空间        查看资源详细信息

3.3kubectl explain 类型   查看资源配置选项

3.4 kubectl logs 容器       查看容器的日志

3.5 kubectl create/run    创建资源对象

3.6 kubectl apply -f     文件名 从配置文件创建对象

3.7 kubectl  delete  删除对象

四、ETCD

4.1 etcdctl 管理命令

/usr/bin/etcdctl --write-out=table member list --endpoints=https://10.0.0.31:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem

+------------------+---------+----------------+------------------------+------------------------+------------+

|        ID        | STATUS  |      NAME      |      PEER ADDRS      |      CLIENT ADDRS      | IS LEARNER |

+------------------+---------+----------------+------------------------+------------------------+------------+

| 54e62bec6a0fc430 | started | etcd-10.0.0.29 | https://10.0.0.29:2380 | https://10.0.0.29:2379 |      false |

| 56dc5cf57fee97f0 | started | etcd-10.0.0.31 | https://10.0.0.31:2380 | https://10.0.0.31:2379 |      false |

| 730ef4b6cd5f7961 | started | etcd-10.0.0.30 | https://10.0.0.30:2380 | https://10.0.0.30:2379 |      false |

+------------------+---------+----------------+------------------------+------------------------+------------+

root@etcd03:~# export NODE_IPS="10.0.0.29 10.0.0.30 10.0.0.31"

root@etcd03:~# for ip in ${NODE_IPS}; do /usr/bin/etcdctl --write-out=table endpoint status --endpoints=https://${ip}:2379 --cacert=/etc/kubernetes/ssl/ca.pem --cert=/etc/kubernetes/ssl/etcd.pem --key=/etc/kubernetes/ssl/etcd-key.pem; done

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

| https://10.0.0.29:2379 | 54e62bec6a0fc430 |  3.5.0 |  3.8 MB |    false |      false |        8 |    270378 |            270378 |        |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

| https://10.0.0.30:2379 | 730ef4b6cd5f7961 |  3.5.0 |  3.9 MB |    false |      false |        8 |    270378 |            270378 |        |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

|        ENDPOINT        |        ID        | VERSION | DB SIZE | IS LEADER | IS LEARNER | RAFT TERM | RAFT INDEX | RAFT APPLIED INDEX | ERRORS |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

| https://10.0.0.31:2379 | 56dc5cf57fee97f0 |  3.5.0 |  3.9 MB |      true |      false |        8 |    270378 |            270378 |        |

+------------------------+------------------+---------+---------+-----------+------------+-----------+------------+--------------------+--------+

root@etcd03:~# etcdctl get / --prefix --keys-only |more

/calico/ipam/v2/assignment/ipv4/block/10.200.104.0-26

/calico/ipam/v2/assignment/ipv4/block/10.200.166.128-26

/calico/ipam/v2/assignment/ipv4/block/10.200.180.0-26

/calico/ipam/v2/assignment/ipv4/block/10.200.199.64-26

/calico/ipam/v2/handle/ipip-tunnel-addr-master1.fchiaas.lcoal

/calico/ipam/v2/handle/ipip-tunnel-addr-master2

/calico/ipam/v2/handle/ipip-tunnel-addr-node1

/calico/ipam/v2/handle/ipip-tunnel-addr-node2

/calico/ipam/v2/handle/k8s-pod-network.05d2b3a59141b1292e2b766487486c72791e47bf23909fc5a02621b2689c6aa7

/calico/ipam/v2/handle/k8s-pod-network.0bc7559a48f40b946f3e47720539c21d544bee879fce8ac129a99a97a550d1c1

/calico/ipam/v2/handle/k8s-pod-network.15aaeff70b699297ab10df3673b25d2f4a0773f77016d47ea063f980ff09c570

/calico/ipam/v2/handle/k8s-pod-network.1bb56fbf532b66f3879772a7838bd38152773f64ad2b2124c7b01866902dfcbe

/calico/ipam/v2/handle/k8s-pod-network.1d28e1e9a173647705d05cb4d8329b8e852206172437193841dea37743046643

/calico/ipam/v2/handle/k8s-pod-network.3418cd370d8e070f1ddd948f9d8d3afdff5831ad8533611870b01bc19650e89b

/calico/ipam/v2/handle/k8s-pod-network.628b48a4cd51db026e7d0346e2c5336f05f8c85065c76ea7efcde8a264a74362

/calico/ipam/v2/handle/k8s-pod-network.679862357d2a47e326303d8aaf4ef621092d3d3a5e265db7f4d13bf097784210

/calico/ipam/v2/handle/k8s-pod-network.83309a03c182d93bf07ee228850f5089dd60c3b3e3e52847663628fa9df5fdda

/calico/ipam/v2/handle/k8s-pod-network.d52996649ccef1df19aafa4417caf5d70ac37afec20eed4e4066fca133405e80

/calico/ipam/v2/handle/k8s-pod-network.df5f0bcf96df77a8d778fb47f5a1d5ed4217b718a75252d39c64ceffbd930119

/calico/ipam/v2/host/master1.fchiaas.lcoal/ipv4/block/10.200.199.64-26

/calico/ipam/v2/host/master2/ipv4/block/10.200.180.0-26

/calico/ipam/v2/host/node1/ipv4/block/10.200.166.128-26

--More--


root@etcd03:~# etcdctl get /registry/pods/default/test

/registry/pods/default/test

k8s

v1Pod¥

testdefault"*$c2645c73-f9ce-4f6f-9280-8aba8c0c9d662Z

runtestz½

kubectl-runUpdatevFieldsV1:

{"f:metadata":{"f:labels":{".":{},"f:run":{}}},"f:spec":{"f:containers":{"k:{\"name\":\"test\"}":{".":{},"f:args":{},"f:image":{},"f:imagePullPolicy":{},"f:name":{},"f:resources":{},"f:terminationMessagePath":{},"f:terminationMessagePolicy":{}}},"f:dnsPolicy":{},"f:enableServiceLinks":{},"f:restartPolicy":{},"f:schedulerName":{},"f:securityContext":{},"f:terminationGracePeriodSeconds":{}}}B

kubeletUpdatev¢¯FieldsV1:

{"f:status":{"f:conditions":{"k:{\"type\":\"ContainersReady\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Initialized\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}},"k:{\"type\":\"Ready\"}":{".":{},"f:lastProbeTime":{},"f:lastTransitionTime":{},"f:status":{},"f:type":{}}},"f:containerStatuses":{},"f:hostIP":{},"f:phase":{},"f:podIP":{},"f:podIPs":{".":{},"k:{\"ip\":\"10.200.166.150\"}":{".":{},"f:ip":{}}},"f:startTime":{}}}Bstatus

kube-api-access-zqw5xkЁh

"

token

(&

kube-root-ca.crt

ca.crtca.crt

)'

%

namespace

v1metadata.namespace¤

testcentos"sleep"100000*BJL

kube-api-access-zqw5x-/var/run/secrets/kubernetes.io/serviceaccount"2j/dev/termination-logrAlways¢FileAlways 2

                                                                                                              ClusterFirstBdefaultJdefaultR 10.0.0.24X`hrdefault-scheduler²6

node.kubernetes.io/not-readyExists" NoExecute(¬²8

node.kubernetes.io/unreachableExists" NoExecute(¬ƁPreemptLowerPriority¯

Running#

InitializedTru*2

ReadyTru¢¯*2'

ContainersReadyTru¢¯*2$

PodScheduledTru*2"* 10.0.0.24210.200.166.15Bł

test


                                                                                                                                                                                            ¡¯centos:latest:`docker-pullable://centos@sha256:a27fd8080b517143cbbbab9dfb7c8571c40d67d534bbdee55bd6c473f432b177BIdocker://bb960f24afeb61e4311587d1293984656707e2c773626da0fca1296f4db860b5HJ

BestEffortZb

10.200.166.150"


root@etcd03:~# etcdctl put /data 1

OK

root@etcd03:~# etcdctl put /data 2

OK

root@etcd03:~# etcdctl del /data 2

341


root@etcd03:~# etcdctl watch /data

PUT

/data

1

PUT

/data

2

DELETE

/data

4.2数据备份和恢复

etcdctl snaphshot save 备份文件

root@etcd03:~# etcdctl snapshot save backup1

{"level":"info","ts":1642224623.165505,"caller":"snapshot/v3_snapshot.go:68","msg":"created temporary db file","path":"backup1.part"}

{"level":"info","ts":1642224623.1690366,"logger":"client","caller":"v3/maintenance.go:211","msg":"opened snapshot stream; downloading"}

{"level":"info","ts":1642224623.1690714,"caller":"snapshot/v3_snapshot.go:76","msg":"fetching snapshot","endpoint":"127.0.0.1:2379"}

{"level":"info","ts":1642224623.2504776,"logger":"client","caller":"v3/maintenance.go:219","msg":"completed snapshot read; closing"}

{"level":"info","ts":1642224623.2565012,"caller":"snapshot/v3_snapshot.go:91","msg":"fetched snapshot","endpoint":"127.0.0.1:2379","size":"3.9 MB","took":"now"}

{"level":"info","ts":1642224623.2565756,"caller":"snapshot/v3_snapshot.go:100","msg":"saved","path":"backup1"}

Snapshot saved at backup1

root@etcd03:~# ls

backup1  snap

root@etcd03:~# ls


4.2数据恢复

etcdctl snapshot restore 备份名字   --data-dir=恢复的目录      #要求恢复目录内不能有东西

root@etcd03:~# etcdctl snapshot restore backup1 --data-dir=/opt/etcd

Deprecated: Use `etcdutl snapshot restore` instead.

2022-01-15T13:34:12+08:00 info snapshot/v3_snapshot.go:251 restoring snapshot {"path": "backup1", "wal-dir": "/opt/etcd/member/wal", "data-dir": "/opt/etcd", "snap-dir": "/opt/etcd/member/snap", "stack": "go.etcd.io/etcd/etcdutl/v3/snapshot.(*v3Manager).Restore\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdutl/snapshot/v3_snapshot.go:257\ngo.etcd.io/etcd/etcdutl/v3/etcdutl.SnapshotRestoreCommandFunc\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdutl/etcdutl/snapshot_command.go:147\ngo.etcd.io/etcd/etcdctl/v3/ctlv3/command.snapshotRestoreCommandFunc\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdctl/ctlv3/command/snapshot_command.go:128\ngithub.com/spf13/cobra.(*Command).execute\n\t/home/remote/sbatsche/.gvm/pkgsets/go1.16.3/global/pkg/mod/github.com/spf13/[email protected]/command.go:856\ngithub.com/spf13/cobra.(*Command).ExecuteC\n\t/home/remote/sbatsche/.gvm/pkgsets/go1.16.3/global/pkg/mod/github.com/spf13/[email protected]/command.go:960\ngithub.com/spf13/cobra.(*Command).Execute\n\t/home/remote/sbatsche/.gvm/pkgsets/go1.16.3/global/pkg/mod/github.com/spf13/[email protected]/command.go:897\ngo.etcd.io/etcd/etcdctl/v3/ctlv3.Start\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdctl/ctlv3/ctl.go:107\ngo.etcd.io/etcd/etcdctl/v3/ctlv3.MustStart\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdctl/ctlv3/ctl.go:111\nmain.main\n\t/tmp/etcd-release-3.5.0/etcd/release/etcd/etcdctl/main.go:59\nruntime.main\n\t/home/remote/sbatsche/.gvm/gos/go1.16.3/src/runtime/proc.go:225"}

2022-01-15T13:34:12+08:00 info membership/store.go:119 Trimming membership information from the backend...

2022-01-15T13:34:12+08:00 info membership/cluster.go:393 added member {"cluster-id": "cdf818194e3a8c32", "local-member-id": "0", "added-peer-id": "8e9e05c52164694d", "added-peer-peer-urls": ["http://localhost:2380"]}

2022-01-15T13:34:12+08:00 info snapshot/v3_snapshot.go:272 restored snapshot {"path": "backup1", "wal-dir": "/opt/etcd/member/wal", "data-dir": "/opt/etcd", "snap-dir": "/opt/etcd/member/snap"}

root@etcd03:~# etcdctl snapshot restore backup1 --data-dir=/opt/etcd

Deprecated: Use `etcdutl snapshot restore` instead.

Error: data-dir "/opt/etcd" not empty or could not be read

root@etcd03:~#

五、增加master和node节点

5.1、现状

root@master1:~/manifests/coredns# kubectl get nodes

NAME        STATUS                    ROLES    AGE    VERSION

10.0.0.21  Ready,SchedulingDisabled  master  11m    v1.22.2

10.0.0.22  Ready,SchedulingDisabled  master  11m    v1.22.2

10.0.0.24  Ready                      node    9m54s  v1.22.2

10.0.0.25  Ready                      node    9m54s  v1.22.2

5.2添加master和node

root@master1:/etc/kubeasz# ./ezctl add-master k8s-cluster1 10.0.0.23

root@master1:/etc/kubeasz# kubectl get nodes

NAME        STATUS                    ROLES    AGE    VERSION

10.0.0.21  Ready,SchedulingDisabled  master  21m    v1.22.2

10.0.0.22  Ready,SchedulingDisabled  master  21m    v1.22.2

10.0.0.23  Ready,SchedulingDisabled  master  114s  v1.22.2

10.0.0.24  Ready                      node    20m    v1.22.2

10.0.0.25  Ready                      node    20m    v1.22.2

root@node2:/etc/kube-lb# cat conf/kube-lb.conf

user root;

worker_processes 1;

error_log  /etc/kube-lb/logs/error.log warn;

events {

    worker_connections  3000;

}

stream {

    upstream backend {

        server 10.0.0.23:6443    max_fails=2 fail_timeout=3s;

        server 10.0.0.21:6443    max_fails=2 fail_timeout=3s;

        server 10.0.0.22:6443    max_fails=2 fail_timeout=3s;

    }

    server {

        listen 127.0.0.1:6443;

        proxy_connect_timeout 1s;

        proxy_pass backend;

    }

}

root@master1:/etc/kubeasz# ./ezctl add-node k8s-cluster1 10.0.0.26

root@master1:/etc/kubeasz# kubectl get nodes

NAME        STATUS                    ROLES    AGE    VERSION

10.0.0.21  Ready,SchedulingDisabled  master  29m    v1.22.2

10.0.0.22  Ready,SchedulingDisabled  master  29m    v1.22.2

10.0.0.23  Ready,SchedulingDisabled  master  10m    v1.22.2

10.0.0.24  Ready                      node    28m    v1.22.2

10.0.0.25  Ready                      node    28m    v1.22.2

10.0.0.26  Ready                      node    2m13s  v1.22.2

5.3 升级

下载下面的文件(升级到的版本)

kubernetes-client-linux-amd64.tar.gz kubernetes-node-linux-amd64.tar.gz kubernetes-server-linux-amd64.tar.gz kubernetes.tar.gz

解压

升级时先升级master,然后升级node

升级master

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# systemctl stop kube-apiserver.service kube-controller-manager.service kubelet.service kube-proxy.service kube-scheduler.service

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# scp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet 10.0.0.21:/usr/bin/

kube-apiserver                                                                                                                                              100%  119MB 124.8MB/s  00:00   

kube-controller-manager                                                                                                                                    100%  113MB 125.2MB/s  00:00   

kube-proxy                                                                                                                                                  100%  41MB 129.1MB/s  00:00   

kube-scheduler                                                                                                                                              100%  47MB  81.9MB/s  00:00   

kubelet                                                                                                                                                    100%  116MB 121.7MB/s  00:00 


root@master1:/opt/soft/kubernetes/kubernetes/server/bin# systemctl start kube-apiserver.service kube-controller-manager.service kubelet.service kube-proxy.service kube-scheduler.service

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# kubectl get nodes

NAME        STATUS                    ROLES    AGE  VERSION

10.0.0.21  Ready,SchedulingDisabled  master  66m  v1.22.5

10.0.0.22  Ready,SchedulingDisabled  master  66m  v1.22.2

10.0.0.23  Ready,SchedulingDisabled  master  47m  v1.22.2

10.0.0.24  Ready                      node    65m  v1.22.2

10.0.0.25  Ready                      node    65m  v1.22.2

10.0.0.26  Ready                      node    39m  v1.22.2

相同的步骤升级剩余的master

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# kubectl get nodes

NAME        STATUS                    ROLES    AGE  VERSION

10.0.0.21  Ready,SchedulingDisabled  master  71m  v1.22.5

10.0.0.22  Ready,SchedulingDisabled  master  71m  v1.22.5

10.0.0.23  Ready,SchedulingDisabled  master  51m  v1.22.5

10.0.0.24  Ready                      node    69m  v1.22.2

10.0.0.25  Ready                      node    69m  v1.22.2

10.0.0.26  Ready                      node    43m  v1.22.2

然后升级node

root@node1:~# systemctl stop kubelet.service kube-proxy.service

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# scp kubectl kube-proxy kubelet 10.0.0.24:/usr/bin/

kubectl                                                                                                                                                    100%  45MB 118.5MB/s  00:00   

kube-proxy                                                                                                                                                  100%  41MB 113.4MB/s  00:00   

kubelet                                                                                                                                                    100%  116MB  92.0MB/s  00:01

root@node1:~# systemctl start kubelet.service kube-proxy.service

安装上述步骤升级其余节点

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# kubectl get nodes

NAME        STATUS                    ROLES    AGE  VERSION

10.0.0.21  Ready,SchedulingDisabled  master  79m  v1.22.5

10.0.0.22  Ready,SchedulingDisabled  master  79m  v1.22.5

10.0.0.23  Ready,SchedulingDisabled  master  59m  v1.22.5

10.0.0.24  Ready                      node    78m  v1.22.5

10.0.0.25  Ready                      node    78m  v1.22.5

10.0.0.26  Ready                      node    51m  v1.22.5

把部署用的原始文件替换掉

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# cp kube-apiserver kube-controller-manager kube-proxy kube-scheduler kubelet kubectl /etc/kubeasz/bin/

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# ll /etc/kubeasz/bin/

total 878000

drwxr-xr-x  3 root root      4096 Jan  3 08:10 ./

drwxrwxr-x 12 root root      4096 Jan 15 17:19 ../

-rwxr-xr-x  1 root root  4536104 Feb  5  2021 bridge*

-rwxr-xr-x  1 root root  44978176 Aug 24 22:13 calicoctl*

-rwxr-xr-x  1 root root  10376657 Aug 24 22:12 cfssl*

-rwxr-xr-x  1 root root  6595195 Aug 24 22:13 cfssl-certinfo*

-rwxr-xr-x  1 root root  2277873 Aug 24 22:12 cfssljson*

-rwxr-xr-x  1 root root  1046424 Aug 24 22:12 chronyd*

-rwxr-xr-x  1 root root  36789288 Jan  3 08:09 containerd*

drwxr-xr-x  2 root root      4096 Aug 24 22:13 containerd-bin/

-rwxr-xr-x  1 root root  7172096 Jan  3 08:09 containerd-shim*

-rwxr-xr-x  1 root root  19161064 Jan  3 08:09 ctr*

-rwxr-xr-x  1 root root  61133792 Jan  3 08:09 docker*

-rwxr-xr-x  1 root root  11748168 Aug 24 22:13 docker-compose*

-rwxr-xr-x  1 root root  71555008 Jan  3 08:09 dockerd*

-rwxr-xr-x  1 root root    708616 Jan  3 08:09 docker-init*

-rwxr-xr-x  1 root root  2928566 Jan  3 08:09 docker-proxy*

-rwxr-xr-x  1 root root  23560192 Jun 16  2021 etcd*

-rwxr-xr-x  1 root root  17969152 Jun 16  2021 etcdctl*

-rwxr-xr-x  1 root root  3357992 Feb  5  2021 flannel*

-rwxr-xr-x  1 root root  41603072 Dec  9  2020 helm*

-rwxr-xr-x  1 root root  3565330 Feb  5  2021 host-local*

-rwxr-xr-x  1 root root  1305408 Aug 24 22:12 keepalived*

-rwxr-xr-x  1 root root 124809216 Jan 15 18:52 kube-apiserver*

-rwxr-xr-x  1 root root 118497280 Jan 15 18:52 kube-controller-manager*

-rwxr-xr-x  1 root root  46936064 Jan 15 18:52 kubectl*

-rwxr-xr-x  1 root root 121213080 Jan 15 18:52 kubelet*

-rwxr-xr-x  1 root root  43479040 Jan 15 18:52 kube-proxy*

-rwxr-xr-x  1 root root  49168384 Jan 15 18:52 kube-scheduler*

-rwxr-xr-x  1 root root  3530531 Feb  5  2021 loopback*

-rwxr-xr-x  1 root root  1777808 Aug 24 22:12 nginx*

-rwxr-xr-x  1 root root  3966455 Feb  5  2021 portmap*

-rwxr-xr-x  1 root root  9600824 Jan  3 08:09 runc*

-rwxr-xr-x  1 root root  3668289 Feb  5  2021 tuning*

root@master1:/opt/soft/kubernetes/kubernetes/server/bin# /etc/kubeasz/bin/kube-apiserver --version

六、基于yaml文件创建nginx及tomcat并实现动静分离

1.1 ubuntu的Dockerfile

Xshellroot@cncf-docker:/opt/dockerfile/system/ubuntu/20.04# cat Dockerfile

FROM ubuntu:20.04

maintainer  "dongxikui"

RUN apt update &&  apt -y install vim wget tree lrzsz gcc build-essential automake libpcre3 libpcre3-dev zlib1g-dev zlib1g openssl libssl-dev iproute2 net-tools iotop make  curl  &&useradd nginx -u 2001 && groupadd www -g 2022 && useradd www -u 2022 -g www

ADD Shanghai  /usr/share/zoneinfo/Asia/

RUN  rm -fr /etc/localtime && ln -snf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime

1.2 nginx的Dockerfile和配置文件

root@cncf-docker:/opt/dockerfile/web/nginx# cat Dockerfile

FROM reg.fchiaas.local/cncf/ubuntu-20-04:v1

maintainer  "dongxikui"

ADD nginx-1.18.0.tar.gz /usr/local/src/

RUN cd /usr/local/src/nginx-1.18.0 && ./configure --prefix=/apps/nginx --with-http_sub_module && make && make install

ADD nginx.conf /apps/nginx/conf/nginx.conf

ADD code.tar.gz /data/nginx/html/webapp

ADD run_nginx.sh /apps/nginx/sbin/

RUN mkdir -p /data/nginx/html/webapp/static /data/nginx/html/webapp/images  && chmod a+x /apps/nginx/sbin/run_nginx.sh && chown -R nginx.nginx /apps/* /data/*

EXPOSE 80 443

CMD ["/apps/nginx/sbin/run_nginx.sh"]

root@cncf-docker:/opt/dockerfile/web/nginx# cat nginx.conf

user  nginx nginx;

worker_processes  auto;

#error_log  logs/error.log;

#error_log  logs/error.log  notice;

#error_log  logs/error.log  info;

#pid        logs/nginx.pid;

daemon off;

events {

    worker_connections  1024;

}

http {

    include      mime.types;

    default_type  application/octet-stream;

    #log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '

    #                  '$status $body_bytes_sent "$http_referer" '

    #                  '"$http_user_agent" "$http_x_forwarded_for"';

    #access_log  logs/access.log  main;

    sendfile        on;

    #tcp_nopush    on;

    #keepalive_timeout  0;

    keepalive_timeout  65;

    #gzip  on;

upstream  tomcat_webserver {

        server  magedu-tomcat-app1-service.magedu.svc.fchiaas.local:80;

}

    server {

        listen      80;

        server_name  localhost;

        #charset koi8-r;

        #access_log  logs/host.access.log  main;

        location / {

            root  /data/nginx/html/;

            index  index.html index.htm;

        }

        location /data/nginx/html/webapp {

            root  html;

            index  index.html index.htm;

        }

        location /myapp {

            proxy_pass  http://tomcat_webserver;

            proxy_set_header  Host    $host;

            proxy_set_header  X-Forwarded-For $proxy_add_x_forwarded_for;

            proxy_set_header X-Real-IP $remote_addr;

        }

        #error_page  404              /404.html;

        # redirect server error pages to the static page /50x.html

        #

        error_page  500 502 503 504  /50x.html;

        location = /50x.html {

            root  html;

        }

        # proxy the PHP scripts to Apache listening on 127.0.0.1:80

        #

        #location ~ \.php$ {

        #    proxy_pass  http://127.0.0.1;

        #}

        # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000

        #

        #location ~ \.php$ {

        #    root          html;

        #    fastcgi_pass  127.0.0.1:9000;

        #    fastcgi_index  index.php;

        #    fastcgi_param  SCRIPT_FILENAME  /scripts$fastcgi_script_name;

        #    include        fastcgi_params;

        #}

        # deny access to .htaccess files, if Apache's document root

        # concurs with nginx's one

        #

        #location ~ /\.ht {

        #    deny  all;

        #}

    }

    # another virtual host using mix of IP-, name-, and port-based configuration

    #

    #server {

    #    listen      8000;

    #    listen      somename:8080;

    #    server_name  somename  alias  another.alias;

    #    location / {

    #        root  html;

    #        index  index.html index.htm;

    #    }

    #}

    # HTTPS server

    #

    #server {

    #    listen      443 ssl;

    #    server_name  localhost;

    #    ssl_certificate      cert.pem;

    #    ssl_certificate_key  cert.key;

    #    ssl_session_cache    shared:SSL:1m;

    #    ssl_session_timeout  5m;

    #    ssl_ciphers  HIGH:!aNULL:!MD5;

    #    ssl_prefer_server_ciphers  on;

    #    location / {

    #        root  html;

    #        index  index.html index.htm;

    #    }

    #}

}

1.3JDK的Dockerfile

root@cncf-docker:/opt/dockerfile/web/jdk/jdk1.18.0# cat Dockerfile

。。。。。。

unset i

unset -f pathmunge

添加的部分

export JAVA_HOME=/usr/local/jdk

export TOMCAT_HOME=/apps/tomcat

export PATH=$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$TOMCAT_HOME/bin:$PATH

export CLASSPATH=.$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib:$JAVA_HOME/lib/tools.jar

1.4tomcat的Dockerfile

root@cncf-docker:/opt/dockerfile/web/tomcat/tomcat-ubuntu# cat Dockerfile

FROM reg.fchiaas.local/cncf/ubuntu-2004-jdk-1_8_241:v1

LABEL maintainer="dongxikui"

ADD apache-tomcat-8.5.65.tar.gz /apps/

RUN ln -sv /apps/apache-tomcat-8.5.65 /apps/tomcat

ADD run_tomcat.sh /apps/tomcat/bin/run_tomcat.sh

RUN chmod a+x /apps/tomcat/bin/run_tomcat.sh

ADD myapp/* /data/tomcat/webapps/myapp/

ADD server.xml /apps/tomcat/conf/server.xml

RUN chown -R www.www /apps/* /data/*

EXPOSE 8080 8009

CMD ["/apps/tomcat/bin/run_tomcat.sh"]

root@cncf-docker:/opt/dockerfile/web/tomcat/tomcat-ubuntu# cat myapp/index.html

Test myapp1 tomcat page

root@cncf-docker:/opt/dockerfile/web/tomcat/tomcat-ubuntu# cat server.xml

下面是修改的部分

1.5nginx的yaml文件

root@master1:~/manifests/case/dynamic_static# cat nginx.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  labels:

    app: magedu-nginx-deployment-label

  name: mangedu-nginx-deployment

  namespace: magedu

spec:

  replicas: 1

  selector:

    matchLabels:

      app: magedu-nginx-selector

  template:

    metadata:

      labels:

        app: magedu-nginx-selector

    spec:

      containers:

      - name: magedu-nginx-container

        image: reg.fchiaas.local/cncf/ubuntu-2004-nginx-1_18_0:v1

        imagePullPolicy: Always

        ports:

        - containerPort: 80

          protocol: TCP

          name: http

        - containerPort: 443

          protocol: TCP

          name: https

        env:

        - name: "password"

          value: "123456"

        - name: "age"

          value: "20"

        resources:

          limits:

            cpu: 1

            memory: 2Gi

          requests:

            cpu: 500m

            memory: 1Gi

        volumeMounts:

        - name: magedu-images

          mountPath: /data/nginx/html/webapp/images

          readOnly: false

        - name: magedu-static

          mountPath: /data/nginx/html/webapp/static

          readOnly: false

      volumes:

      - name: magedu-images

        nfs:

          server: 10.0.0.20

          path: /nfs/k8sdata/magedu/images

      - name: magedu-static

        nfs:

          server: 10.0.0.20

          path: /nfs/k8sdata/magedu/static

---

kind: Service

apiVersion: v1

metadata:

  labels:

    app: magedu-nginx-service-label

  name: magedu-nginx-service

  namespace: magedu

spec:

  type: NodePort

  selector:

      app: magedu-nginx-selector

  ports:

  - name: http

    port: 80

    protocol: TCP

    targetPort: 80

    nodePort: 30090

  - name: https

    port: 443

    protocol: TCP

    targetPort: 443

    nodePort: 30091

1.6tomcat的yaml文件

root@master1:~/manifests/case/dynamic_static# cat tomcat.yaml

apiVersion: apps/v1

kind: Deployment

metadata:

  labels:

    app: magedu-tomcat-app1-deployment-label

  name: magedu-tomcat-app1-deployment

  namespace: magedu

spec:

  replicas: 1

  selector:

    matchLabels:

      app: magedu-tomcat-app1-selector

  template:

    metadata:

      labels:

        app: magedu-tomcat-app1-selector

    spec:

      containers:

      - name: magedu-tomcat-app1-container

        image: reg.fchiaas.local/cncf/ubuntu-2004-jdk-1_8_241-tomcat-8_5_65:v1

        imagePullPolicy: Always

        ports:

        - containerPort: 8080

          protocol: TCP

          name: http

        env:

        - name: "password"

          value: "123456"

        - name: "age"

          value: "20"

        resources:

          limits:

            cpu: 1

            memory: 512Mi

          requests:

            cpu: 0.5

            memory: 512Mi

#        volumeMounts:

#        - name: magedu-images

#          mountPath: /data/nginx/html/webapp/images

#          readOnly: false

#        - name: magedu-static

#          mountPath: /data/nginx/html/webapp/static

#          readOnly: false

#      volumes:

#      - name: magedu-images

#        server: 10.0.0.20

#        path: /nfs/k8sdata/magedu/images

#      - name: magedu-static

#        server: 10.0.0.20

#        path: /nfs/k8sdata/magedu/static

#      nodeSelector:

#        project: magedu

#        app: tomcat

---

kind: Service

apiVersion: v1

metadata:

  labels:

    app: magedu-tomcat-app1-service-label

  name: magedu-tomcat-app1-service

  namespace: magedu

spec:

#  type: NodePort

  ports:

  - name: http

    port: 80

    protocol: TCP

    targetPort: 8080

#    nodePort: 30092

  selector:

    app: magedu-tomcat-app1-selector

1.7 测试

root@master1:~/manifests/case/dynamic_static# kubectl apply -f tomcat.yaml

deployment.apps/magedu-tomcat-app1-deployment created

service/magedu-tomcat-app1-service created

root@master1:~/manifests/case/dynamic_static# kubectl apply -f nginx.yaml

deployment.apps/mangedu-nginx-deployment created

service/magedu-nginx-service created

root@master1:~/manifests/case/dynamic_static# kubectl get pod -A

NAMESPACE              NAME                                            READY  STATUS    RESTARTS        AGE

kube-system            calico-kube-controllers-59df8b6856-44lmg        1/1    Running  8 (3h30m ago)    10d

kube-system            calico-node-4sk4x                                1/1    Running  9 (3h31m ago)    11d

kube-system            calico-node-7hkv2                                1/1    Running  9 (3h30m ago)    11d

kube-system            calico-node-7mh6m                                1/1    Running  9 (3h31m ago)    11d

kube-system            calico-node-qqndf                                1/1    Running  10 (3h30m ago)  11d

kube-system            calico-node-szrbs                                1/1    Running  9 (3h30m ago)    11d

kube-system            calico-node-xrdlz                                1/1    Running  11 (46h ago)    11d

kube-system            coredns-69d84cdc49-d2lcq                        1/1    Running  8 (3h30m ago)    10d

kube-system            coredns-69d84cdc49-ztgv4                        1/1    Running  10 (3h30m ago)  11d

kubernetes-dashboard  dashboard-metrics-scraper-c45b7869d-rnm7t        1/1    Running  7 (3h30m ago)    7d22h

kubernetes-dashboard  kubernetes-dashboard-576cb95f94-flnsj            1/1    Running  6 (3h30m ago)    6d3h

magedu                magedu-tomcat-app1-deployment-5dcbbd96b4-htfwg  1/1    Running  0                15s

magedu                mangedu-nginx-deployment-775c8d479b-mxwxt        1/1    Running  0                5s

root@master1:~/manifests/case/dynamic_static# kubectl get svc -A

NAMESPACE              NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP  PORT(S)                      AGE

default                kubernetes                  ClusterIP  10.100.0.1              443/TCP                      11d

default                service-nfs-probe            NodePort    10.100.130.39            81:30081/TCP                4d5h

kube-system            kube-dns                    ClusterIP  10.100.0.2              53/UDP,53/TCP,9153/TCP      11d

kubernetes-dashboard  dashboard-metrics-scraper    ClusterIP  10.100.21.70            8000/TCP                    7d22h

kubernetes-dashboard  kubernetes-dashboard        NodePort    10.100.89.237            443:30443/TCP                7d22h

magedu                magedu-nginx-service        NodePort    10.100.205.204          80:30090/TCP,443:30091/TCP  36s

magedu                magedu-tomcat-app1-service  ClusterIP  10.100.120.246          80/TCP                      46s

你可能感兴趣的:(云原生第三课作业)