CoreDNS安装及集群验证

目录

  • 叙述
  • 安装
  • 测试 一
  • 测试 二 CoreDNS

叙述

截止到目前为止,整个集群的核心组件已经安装完成。

此时集群内部还需要 CoreDNS 组件的支持。

安装

CoreDNS 是以 Pod 的形式运行在 k8s 集群内部;

创建下面的 yaml 文件:

[root@node01 work]# cd /opt/k8s/work/
[root@node01 work]# cat coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
  labels:
      kubernetes.io/cluster-service: "true"
      addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: Reconcile
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
    addonmanager.kubernetes.io/mode: EnsureExists
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
  labels:
      addonmanager.kubernetes.io/mode: EnsureExists
data:
  Corefile: |
    .:53 {
        errors
        health
        kubernetes cluster.local in-addr.arpa ip6.arpa {
            pods insecure
            upstream
            fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  # replicas: not specified here:
  # 1. In order to make Addon Manager do not reconcile this replicas parameter.
  # 2. Default is 1.
  # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.4.0
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: host-time
          mountPath: /etc/localtime
          readOnly: true
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
      dnsPolicy: Default
      volumes:
        - name: host-time
          hostPath:
            path: /etc/localtime
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP

执行 coredns.yaml 文件:

[root@node01 work]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created

查看结果

[root@node01 work]# kubectl get pods -n kube-system -o wide
NAME                       READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
coredns-5c6c9cf6c8-gbhvd   1/1     Running   0          41s   172.30.160.2   node02              
coredns-5c6c9cf6c8-rtrc5   1/1     Running   0          41s   172.30.48.2    node04              

测试 一

创建测试yaml文件

cd /opt/k8s/work
cat > nginx-ds.yml <

执行

[root@node01 work]# kubectl apply -f nginx-ds.yml
service/nginx-ds created
daemonset.extensions/nginx-ds created
[root@node01 work]# kubectl get pods -o wide
NAME             READY   STATUS    RESTARTS   AGE   IP             NODE     NOMINATED NODE   READINESS GATES
nginx-ds-4cdb6   1/1     Running   0          28s   172.30.160.3   node02              
nginx-ds-4l8pv   1/1     Running   0          28s   172.30.80.2    node03              
nginx-ds-jfz8l   1/1     Running   0          28s   172.30.48.3    node04              
nginx-ds-pmhw7   1/1     Running   0          28s   172.30.224.2   node01              
[root@node01 work]# kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.254.0.1             443/TCP        7h1m
nginx-ds     NodePort    10.254.7.236           80:22415/TCP   33s

测试访问

[root@node01 work]# curl -I 10.0.20.11:22415
HTTP/1.1 200 OK
Server: nginx/1.13.0
Date: Thu, 05 Dec 2019 13:31:18 GMT
Content-Type: text/html
Content-Length: 612
Last-Modified: Wed, 10 May 2017 21:50:27 GMT
Connection: keep-alive
ETag: "59138b23-264"
Accept-Ranges: bytes

可以看到 NodePort 模式的 SVC 已经可以访问;

测试 二 CoreDNS

创建一个 busybox 的 pod,进入pod 解析

cat<
[root@node01 work]# kubectl get pods
NAME             READY   STATUS    RESTARTS   AGE
busybox          1/1     Running   0          18s

测试解析集群内部解析:

[root@node01 work]# kubectl exec -it busybox -- nslookup kubernetes
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local

Name:      kubernetes
Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local

测试解析刚刚创建的nginx 的SVC:

[root@node01 work]# kubectl exec -it busybox -- nslookup nginx-ds.default.svc.cluster.local.
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local

Name:      nginx-ds.default.svc.cluster.local.
Address 1: 10.254.7.236 nginx-ds.default.svc.cluster.local
[root@node01 work]# kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
kubernetes   ClusterIP   10.254.0.1             443/TCP        7h5m
nginx-ds     NodePort    10.254.7.236           80:22415/TCP   4m7s

至此集群安装结束;

你可能感兴趣的:(CoreDNS安装及集群验证)