dns服务部署

注:这里使用coredns

coredns官网地址

参考博客

1.下载coredns配置文件

需要用到两个文件,coredns.yaml.sed和deploy.sh.

wget https://github.com/coredns/deployment/raw/master/kubernetes/coredns.yaml.sed
wget https://github.com/coredns/deployment/raw/master/kubernetes/deploy.sh
[root@k8s-node1 coredns]# cat coredns.yaml.sed 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health
        ready
        kubernetes CLUSTER_DOMAIN REVERSE_CIDRS {
          pods insecure
          fallthrough in-addr.arpa ip6.arpa
        }FEDERATIONS
        prometheus :9153
        forward . UPSTREAMNAMESERVER
        cache 30
        loop
        reload
        loadbalance
    }STUBDOMAINS
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.6.2
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: CLUSTER_DNS_IP
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
[root@k8s-node1 coredns]# cat deploy.sh 
#!/bin/bash

# Deploys CoreDNS to a cluster currently running Kube-DNS.

show_help () {
cat << USAGE
usage: $0 [ -r REVERSE-CIDR ] [ -i DNS-IP ] [ -d CLUSTER-DOMAIN ] [ -t YAML-TEMPLATE ]

    -r : Define a reverse zone for the given CIDR. You may specify this option more
         than once to add multiple reverse zones. If no reverse CIDRs are defined,
         then the default is to handle all reverse zones (i.e. in-addr.arpa and ip6.arpa)
    -i : Specify the cluster DNS IP address. If not specified, the IP address of
         the existing "kube-dns" service is used, if present.
    -s : Skips the translation of kube-dns configmap to the corresponding CoreDNS Corefile configuration.

USAGE
exit 0
}

# Simple Defaults
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
CLUSTER_DOMAIN=cluster.local
YAML_TEMPLATE="$DIR/coredns.yaml.sed"
STUBDOMAINS=""
UPSTREAM=\\/etc\\/resolv\.conf
FEDERATIONS=""

# Translates the kube-dns ConfigMap to equivalent CoreDNS Configuration.
function translate-kube-dns-configmap {
    kube-dns-federation-to-coredns
    kube-dns-upstreamnameserver-to-coredns
    kube-dns-stubdomains-to-coredns
}

function kube-dns-federation-to-coredns {
  fed=$(kubectl -n kube-system get configmap kube-dns  -ojsonpath='{.data.federations}' 2> /dev/null | jq . | tr -d '":,')
  if [[ ! -z ${fed} ]]; then
  FEDERATIONS=$(sed -e '1s/^/federation /' -e 's/^/        /' -e '1i\\' <<< "${fed}") # add federation to the stanza
  fi
}

function kube-dns-upstreamnameserver-to-coredns {
  up=$(kubectl -n kube-system get configmap kube-dns  -ojsonpath='{.data.upstreamNameservers}' 2> /dev/null | tr -d '[",]')
  if [[ ! -z ${up} ]]; then
    UPSTREAM=${up}
  fi
}

function kube-dns-stubdomains-to-coredns {
  STUBDOMAIN_TEMPLATE='
    SD_DOMAIN:53 {
      errors
      cache 30
      loop
      forward . SD_DESTINATION
    }'

  function dequote {
    str=${1#\"} # delete leading quote
    str=${str%\"} # delete trailing quote
    echo ${str}
  }

  function parse_stub_domains() {
    sd=$1

  # get keys - each key is a domain
  sd_keys=$(echo -n $sd | jq keys[])

  # For each domain ...
  for dom in $sd_keys; do
    dst=$(echo -n $sd | jq '.['$dom'][0]') # get the destination

    dom=$(dequote $dom)
    dst=$(dequote $dst)

    sd_stanza=${STUBDOMAIN_TEMPLATE/SD_DOMAIN/$dom} # replace SD_DOMAIN
    sd_stanza=${sd_stanza/SD_DESTINATION/$dst} # replace SD_DESTINATION
    echo "$sd_stanza"
  done
}

  sd=$(kubectl -n kube-system get configmap kube-dns  -ojsonpath='{.data.stubDomains}' 2> /dev/null)
  STUBDOMAINS=$(parse_stub_domains "$sd")
}

# Get Opts
while getopts "hsr:i:d:t:k:" opt; do
    case "$opt" in
    h)  show_help
        ;;
    s)  SKIP=1
        ;;
    r)  REVERSE_CIDRS="$REVERSE_CIDRS $OPTARG"
        ;;
    i)  CLUSTER_DNS_IP=$OPTARG
        ;;
    d)  CLUSTER_DOMAIN=$OPTARG
        ;;
    t)  YAML_TEMPLATE=$OPTARG
        ;;
    esac
done

# Conditional Defaults
if [[ -z $REVERSE_CIDRS ]]; then
  REVERSE_CIDRS="in-addr.arpa ip6.arpa"
fi
if [[ -z $CLUSTER_DNS_IP ]]; then
  # Default IP to kube-dns IP
  CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")
  if [ $? -ne 0 ]; then
      >&2 echo "Error! The IP address for DNS service couldn't be determined automatically. Please specify the DNS-IP with the '-i' option."
      exit 2
  fi
fi

if [[ "${SKIP}" -ne 1 ]] ; then
    translate-kube-dns-configmap
fi

orig=$'\n'
replace=$'\\\n'
sed -e "s/CLUSTER_DNS_IP/$CLUSTER_DNS_IP/g" \
    -e "s/CLUSTER_DOMAIN/$CLUSTER_DOMAIN/g" \
    -e "s?REVERSE_CIDRS?$REVERSE_CIDRS?g" \
    -e "s@STUBDOMAINS@${STUBDOMAINS//$orig/$replace}@g" \
    -e "s@FEDERATIONS@${FEDERATIONS//$orig/$replace}@g" \
    -e "s/UPSTREAMNAMESERVER/$UPSTREAM/g" \
    "${YAML_TEMPLATE}"
[root@k8s-node1 coredns]# 

2.版本匹配对比

k8s和coredns版本匹配对照

k8s集群的版本是v1.15对应的coredns版本是v1.3.1

奇怪的是,我的k8sv1.15.5版本使用corednsv1.3.1不匹配,直接使用corednsv1.6.2,反而正常

3.部署方法

3.1

如果原先部署了kube-dns,执行命令(注意coredns.yaml.sed和deploy.sh两个文件再同一目录下才能执行成功下面的命令):

k8s集群部署v1.15实践13:dns服务部署

3.2

前面没有部署kube-dns,全新部署

coredns.yaml.sed用到两个变量:CLUSTER_DOMAIN REVERSE_CIDRS和CLUSTER_DNS_IP.

个人理解(集群变量定义参考/opt/k8s/bin/environment.sh):

CLUSTER_DOMAIN REVERSE_CIDRS是集群变量的SERVICE_CIDR="10.254.0.0/16"

CLUSTER_DNS_IP是CLUSTER_DNS_SVC_IP="10.254.0.2".

执行安装命令:

[root@k8s-node1 coredns]# ./deploy.sh -r 10.254.0.0/16 -i 10.254.0.2  -d cluster.local -t coredns.yaml.sed -s >coredns.yaml
[root@k8s-node1 coredns]# ls
coredns.yaml  coredns.yaml.sed  deploy.sh
[root@k8s-node1 coredns]# cat coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: coredns
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
rules:
- apiGroups:
  - ""
  resources:
  - endpoints
  - services
  - pods
  - namespaces
  verbs:
  - list
  - watch
- apiGroups:
  - ""
  resources:
  - nodes
  verbs:
  - get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: system:coredns
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: system:coredns
subjects:
- kind: ServiceAccount
  name: coredns
  namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: coredns
  namespace: kube-system
data:
  Corefile: |
    .:53 {
        errors
        health
        ready
        kubernetes cluster.local  10.254.0.0/16 {
          pods insecure
          fallthrough in-addr.arpa ip6.arpa
        }
        prometheus :9153
        forward . /etc/resolv.conf
        cache 30
        loop
        reload
        loadbalance
    }
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: coredns
  namespace: kube-system
  labels:
    k8s-app: kube-dns
    kubernetes.io/name: "CoreDNS"
spec:
  replicas: 2
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxUnavailable: 1
  selector:
    matchLabels:
      k8s-app: kube-dns
  template:
    metadata:
      labels:
        k8s-app: kube-dns
    spec:
      priorityClassName: system-cluster-critical
      serviceAccountName: coredns
      tolerations:
        - key: "CriticalAddonsOnly"
          operator: "Exists"
      nodeSelector:
        beta.kubernetes.io/os: linux
      containers:
      - name: coredns
        image: coredns/coredns:1.6.2
        imagePullPolicy: IfNotPresent
        resources:
          limits:
            memory: 170Mi
          requests:
            cpu: 100m
            memory: 70Mi
        args: [ "-conf", "/etc/coredns/Corefile" ]
        volumeMounts:
        - name: config-volume
          mountPath: /etc/coredns
          readOnly: true
        ports:
        - containerPort: 53
          name: dns
          protocol: UDP
        - containerPort: 53
          name: dns-tcp
          protocol: TCP
        - containerPort: 9153
          name: metrics
          protocol: TCP
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            add:
            - NET_BIND_SERVICE
            drop:
            - all
          readOnlyRootFilesystem: true
        livenessProbe:
          httpGet:
            path: /health
            port: 8080
            scheme: HTTP
          initialDelaySeconds: 60
          timeoutSeconds: 5
          successThreshold: 1
          failureThreshold: 5
        readinessProbe:
          httpGet:
            path: /ready
            port: 8181
            scheme: HTTP
      dnsPolicy: Default
      volumes:
        - name: config-volume
          configMap:
            name: coredns
            items:
            - key: Corefile
              path: Corefile
---
apiVersion: v1
kind: Service
metadata:
  name: kube-dns
  namespace: kube-system
  annotations:
    prometheus.io/port: "9153"
    prometheus.io/scrape: "true"
  labels:
    k8s-app: kube-dns
    kubernetes.io/cluster-service: "true"
    kubernetes.io/name: "CoreDNS"
spec:
  selector:
    k8s-app: kube-dns
  clusterIP: 10.254.0.2
  ports:
  - name: dns
    port: 53
    protocol: UDP
  - name: dns-tcp
    port: 53
    protocol: TCP
  - name: metrics
    port: 9153
    protocol: TCP
[root@k8s-node1 coredns]# kubectl apply -f coredns.yaml
serviceaccount/coredns created
clusterrole.rbac.authorization.k8s.io/system:coredns created
clusterrolebinding.rbac.authorization.k8s.io/system:coredns created
configmap/coredns created
deployment.apps/coredns created
service/kube-dns created
[root@k8s-node1 coredns]#
[root@k8s-node1 coredns]# kubectl get svc,pod -n kube-system -o wide
NAME               TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                  AGE     SELECTOR
service/kube-dns   ClusterIP   10.254.0.2           53/UDP,53/TCP,9153/TCP   2m20s   k8s-app=kube-dns
NAME                         READY   STATUS    RESTARTS   AGE     IP            NODE        NOMINATED NODE   READINESS GATES
pod/coredns-5fb99965-brwjq   1/1     Running   0          2m20s   172.30.78.3   k8s-node1              
pod/coredns-5fb99965-svpvn   1/1     Running   0          2m20s   172.30.86.4   k8s-node3              

4.测试

验证版本

[root@k8s-node1 coredns]# docker ps
CONTAINER ID        IMAGE                                                        COMMAND                  CREATED             STATUS              PORTS               NAMES
1652d0865a39        bf261d157914                                                 "/coredns -conf /etc…"   18 minutes ago      Up 18 minutes                           k8s_coredns_coredns-5fb99965-brwjq_kube-system_6b041d84-1347-416e-a965-97b33426eb1d_0
e4940bfd1ad8        registry.access.redhat.com/rhel7/pod-infrastructure:latest   "/usr/bin/pod"           18 minutes ago      Up 18 minutes                           k8s_POD_coredns-5fb99965-brwjq_kube-system_6b041d84-1347-416e-a965-97b33426eb1d_0
4968ba4cce04        httpd                                                        "httpd-foreground"       5 hours ago         Up 5 hours                              k8s_httpd-app_httpd-app-764bb697c5-l9x4p_default_953f4979-7064-4dfb-ba52-43f69de379d8_2
de5088ec2d26        registry.access.redhat.com/rhel7/pod-infrastructure:latest   "/usr/bin/pod"           5 hours ago         Up 5 hours                              k8s_POD_httpd-app-764bb697c5-l9x4p_default_953f4979-7064-4dfb-ba52-43f69de379d8_2
[root@k8s-node1 coredns]# docker logs 1652d0865a39
.:53
2019-11-06T06:32:25.055Z [INFO] plugin/reload: Running configuration MD5 = 2e454467f9f65cbbbb3cfe344eaf952f
2019-11-06T06:32:25.056Z [INFO] CoreDNS-1.6.2
2019-11-06T06:32:25.056Z [INFO] linux/amd64, go1.12.8, 795a3eb
CoreDNS-1.6.2
linux/amd64, go1.12.8, 795a3eb
[root@k8s-node1 coredns]#

设置解析

修改所有节点的 /etc/systemd/system/kube-kubelet.service文件,添加:

--cluster-dns=10.254.0.2 
--cluster-domain=cluster.local.
--resolv-conf=/etc/resolv.conf
[root@k8s-node1 coredns]# cat /etc/systemd/system/kubelet.service 
[Unit]
Description=Kubernetes Kubelet
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
After=docker.service
Requires=docker.service
[Service]
WorkingDirectory=/var/lib/kubelet
ExecStart=/opt/k8s/bin/kubelet \
  --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \
  --cert-dir=/etc/kubernetes/cert \
  --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
  --config=/etc/kubernetes/kubelet.config.json \
  --hostname-override=k8s-node1 \
  --pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest \
  --alsologtostderr=true \
  --logtostderr=false \
  --log-dir=/var/log/kubernetes \
  --v=2 \
  --cluster-dns=10.254.0.2 \
  --cluster-domain=cluster.local. \
  --resolv-conf=/etc/resolv.conf
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
[root@k8s-node1 coredns]# 

重启kubelet服务

[root@k8s-node1 coredns]# systemctl daemon-reload && systemctl restart kubelet

启动个Busybox测试,注意busybox版本,必须用busybox:1.28.4前的版本测试,不然有问题

[root@k8s-node1 test]# cat busybox.yaml 
apiVersion: v1
kind: Pod
metadata:
  name: busybox
  namespace: default
spec:
  containers:
  - image: busybox:1.28.4
    command:
      - sleep
      - "3600"
    imagePullPolicy: IfNotPresent
    name: busybox
  restartPolicy: Always
[root@k8s-node1 test]# kubectl apply -f busybox.yaml 
pod/busybox created
[root@k8s-node1 test]# kubectl get pod
NAME                         READY   STATUS    RESTARTS   AGE
busybox                      1/1     Running   0          56s
httpd-app-764bb697c5-5qxfr   1/1     Running   2          22h
httpd-app-764bb697c5-g7z8v   1/1     Running   3          22h
httpd-app-764bb697c5-l9nrd   1/1     Running   2          22h
httpd-app-764bb697c5-l9x4p   1/1     Running   3          22h
httpd-app-764bb697c5-qfhbd   1/1     Running   3          22h

集群目前运行的svc

[root@k8s-node1 test]# kubectl get svc
NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)       AGE
httpd-svc    NodePort    10.254.82.41           80:8936/TCP   22h
kubernetes   ClusterIP   10.254.0.1             443/TCP       46h
[root@k8s-node1 test]# kubectl exec -it busybox /bin/sh
/ # nslookup httpd-svc
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name:      httpd-svc
Address 1: 10.254.82.41 httpd-svc.default.svc.cluster.local
/ # nslookup kubernetes
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name:      kubernetes
Address 1: 10.254.0.1 kubernetes.default.svc.cluster.local
/ # ping httpd-svc
PING httpd-svc (10.254.82.41): 56 data bytes
64 bytes from 10.254.82.41: seq=0 ttl=64 time=0.090 ms
64 bytes from 10.254.82.41: seq=1 ttl=64 time=0.079 ms
64 bytes from 10.254.82.41: seq=2 ttl=64 time=0.076 ms
^C
--- httpd-svc ping statistics ---
3 packets transmitted, 3 packets received, 0% packet loss
round-trip min/avg/max = 0.076/0.081/0.090 ms

重点注意,解析不同的命令空间必须加上namespace名字,否则解析失败,见下

成功

# nslookup kube-dns.kube-system
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
Name:      kube-dns.kube-system
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local

失败

# nslookup kube-dns
Server:    10.254.0.2
Address 1: 10.254.0.2 kube-dns.kube-system.svc.cluster.local
nslookup: can't resolve 'kube-dns'