1、查看k8是本身的DNS(coredns),发现本身是有DNS的(kubeadm默认安装时有coredns)
[root@kubernetes bak1]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-6694fb884c-mgn79 1/1 Running 2 3d3h
coredns-6694fb884c-ncqh6 1/1 Running 2 3d3h
etcd-kubernetes 1/1 Running 13 52d
kube-apiserver-kubernetes 1/1 Running 12 52d
kube-controller-manager-kubernetes 1/1 Running 8 52d
kube-flannel-ds-amd64-5cv9n 1/1 Running 8 52d
kube-flannel-ds-amd64-6tzvm 1/1 Running 7 52d
kube-flannel-ds-amd64-827f9 1/1 Running 8 52d
kube-proxy-h6kwp 1/1 Running 2 3d
kube-proxy-kbxcr 1/1 Running 2 3d
kube-proxy-s86dr 1/1 Running 2 3d
kube-scheduler-kubernetes 1/1 Running 8 52d
2、测试coredns被删除后是否可以自动恢复(测试coredns部署是否正常)
若是正常,会恢复如下,则可以进行dns解析测试,见下文
[root@kubernetes bak1]# kubeget pods -n kube-system|grep coredns
coredns-6694fb884c-8tklp 1/1 Running 0 11s
coredns-6694fb884c-qfcnq 1/1 Running 0 11s
若是如下,不能恢复,则说明coredns部署有问题,需要重新部署
[root@kubernetes bak1]# kubeget pods -n kube-system|grep coredns
coredns-6694fb884c-8tklp 0/1 Running 0 11s
coredns-6694fb884c-qfcnq 0/1 Running 0 11s
3、部署coredns
# 10.96.0.10 为k8s官方指定的kube-dns地址
mkdir coredns && cd coredns
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/deploy.sh
chmod +x deploy.sh
#可能会报这两个错误,可以不用理会 ./deploy.sh:行37: jq: 未找到命令,./deploy.sh:行69: jq: 未找到命令
./deploy.sh -i 10.96.0.10 > coredns.yml
#可能会报错,10.96.0.10超出了IP范围,需要将coredns.yaml中IP改成提示IP范围内的IP
kubectl apply -f coredns.yml
#coredns.yaml.sed,deploy.sh,coredns.yml三个文件的内容请看文档最后
4、测试coredns被删除后是否可以自动恢复(测试coredns部署是否正常)
[root@kubernetes bak1]# kubeget pods -n kube-system|grep coredns
coredns-6694fb884c-8tklp 1/1 Running 0 11s
coredns-6694fb884c-qfcnq 1/1 Running 0 11s
5、测试dns是否可以正常解析主机名等
查看集群内的服务,下面的测试会用到
[root@kubernetes coredns]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1
mysqlserver ClusterIP 10.102.10.64
zabbixserver ClusterIP 10.96.0.3
zabbixweb NodePort 10.104.8.6
创建一个busybox容器进行测试
[root@kubernetes coredns]# cat busybox.yaml
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
[root@kubernetes coredns]# kubectl create -f busybox.yaml
[root@kubernetes coredns]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
busybox 1/1 Running 7 3d3h 10.244.2.35 kubernetes-node2
mysqlserver-884cdffb8-jdmn5 1/1 Running 0 112m 10.244.1.48 kubernetes-node1
zabbixserver-5f785f9cbf-2xzg5 1/1 Running 0 112m 10.244.1.49 kubernetes-node1
zabbixweb-78757f97df-8hv5n 1/1 Running 0 112m 10.244.2.39 kubernetes-node2
从上面的信息可以看见busybox的容器在kubernetes-node2节点上,可以到该节点上进入容器测试
[root@kubernetes-node2 ~]# docker ps| grep busybox
761d5ca8348c 8c811b4aec35 "sleep 3600" 27 minutes ago Up 27 minutes k8s_busybox_busybox_default_e0be6e6e-03b8-11ea-aef5-000c296c2dec_7
22c2e26d20b6 k8s.gcr.io/pause:3.1 "/pause" 2 hours ago Up 2 hours k8s_POD_busybox_default_e0be6e6e-03b8-11ea-aef5-000c296c2dec_8
[root@kubernetes-node2 ~]# docker exec -it 761 /bin/sh
/ # ping www.baidu.com
PING www.baidu.com (14.215.177.39): 56 data bytes
64 bytes from 14.215.177.39: seq=0 ttl=127 time=14.071 ms
64 bytes from 14.215.177.39: seq=1 ttl=127 time=10.249 ms
64 bytes from 14.215.177.39: seq=2 ttl=127 time=12.100 ms
64 bytes from 14.215.177.39: seq=3 ttl=127 time=9.546 ms
64 bytes from 14.215.177.39: seq=4 ttl=127 time=9.362 ms
^C
--- www.baidu.com ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 9.362/11.065/14.071 ms
/ #
上面测试显示可以出外网
/ # ping zabbixserver
PING zabbixserver (10.96.0.3): 56 data bytes
64 bytes from 10.96.0.3: seq=0 ttl=64 time=0.041 ms
64 bytes from 10.96.0.3: seq=1 ttl=64 time=0.072 ms
64 bytes from 10.96.0.3: seq=2 ttl=64 time=0.067 ms
64 bytes from 10.96.0.3: seq=3 ttl=64 time=0.095 ms
64 bytes from 10.96.0.3: seq=4 ttl=64 time=0.078 ms
^C
--- zabbixserver ping statistics ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.041/0.070/0.095 ms
/ #
上面测试显示可以解析集群内的服务(zabbixserver是自己在集群内搭建的服务)
/ # ping kubernetes
PING kubernetes (10.96.0.1): 56 data bytes
64 bytes from 10.96.0.1: seq=0 ttl=64 time=0.141 ms
64 bytes from 10.96.0.1: seq=1 ttl=64 time=0.068 ms
64 bytes from 10.96.0.1: seq=2 ttl=64 time=0.064 ms
64 bytes from 10.96.0.1: seq=3 ttl=64 time=0.064 ms
^C
--- kubernetes ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.064/0.084/0.141 ms
/ #
/ # ping 10.96.0.3
PING 10.96.0.3 (10.96.0.3): 56 data bytes
64 bytes from 10.96.0.3: seq=0 ttl=64 time=0.079 ms
64 bytes from 10.96.0.3: seq=1 ttl=64 time=0.092 ms
64 bytes from 10.96.0.3: seq=2 ttl=64 time=0.063 ms
64 bytes from 10.96.0.3: seq=3 ttl=64 time=0.072 ms
^C
--- 10.96.0.3 ping statistics ---
4 packets transmitted, 4 packets received, 0% packet loss
round-trip min/avg/max = 0.063/0.076/0.092 ms
/ #
上面测试显示可以测试集群的ClusterIP
到此,k8s的dns(coredns)集群搭建完成
上面提到的三个配置文件如下
coredns.yaml.sed文件内容
[root@kubernetes coredns]# cat coredns.yaml.sed
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes CLUSTER_DOMAIN REVERSE_CIDRS {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}FEDERATIONS
prometheus :9153
forward . UPSTREAMNAMESERVER
cache 30
loop
reload
loadbalance
}STUBDOMAINS
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.6.2
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: CLUSTER_DNS_IP
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
deploy.sh文件内容
[root@kubernetes coredns]# cat deploy.sh
#!/bin/bash
# Deploys CoreDNS to a cluster currently running Kube-DNS.
show_help () {
cat << USAGE
usage: $0 [ -r REVERSE-CIDR ] [ -i DNS-IP ] [ -d CLUSTER-DOMAIN ] [ -t YAML-TEMPLATE ]
-r : Define a reverse zone for the given CIDR. You may specify this option more
than once to add multiple reverse zones. If no reverse CIDRs are defined,
then the default is to handle all reverse zones (i.e. in-addr.arpa and ip6.arpa)
-i : Specify the cluster DNS IP address. If not specified, the IP address of
the existing "kube-dns" service is used, if present.
-s : Skips the translation of kube-dns configmap to the corresponding CoreDNS Corefile configuration.
USAGE
exit 0
}
# Simple Defaults
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
CLUSTER_DOMAIN=cluster.local
YAML_TEMPLATE="$DIR/coredns.yaml.sed"
STUBDOMAINS=""
UPSTREAM=\\/etc\\/resolv\.conf
FEDERATIONS=""
# Translates the kube-dns ConfigMap to equivalent CoreDNS Configuration.
function translate-kube-dns-configmap {
kube-dns-federation-to-coredns
kube-dns-upstreamnameserver-to-coredns
kube-dns-stubdomains-to-coredns
}
function kube-dns-federation-to-coredns {
fed=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.federations}' 2> /dev/null | jq . | tr -d '":,')
if [[ ! -z ${fed} ]]; then
FEDERATIONS=$(sed -e '1s/^/federation /' -e 's/^/ /' -e '1i\\' <<< "${fed}") # add federation to the stanza
fi
}
function kube-dns-upstreamnameserver-to-coredns {
up=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.upstreamNameservers}' 2> /dev/null | tr -d '[",]')
if [[ ! -z ${up} ]]; then
UPSTREAM=${up}
fi
}
function kube-dns-stubdomains-to-coredns {
STUBDOMAIN_TEMPLATE='
SD_DOMAIN:53 {
errors
cache 30
loop
forward . SD_DESTINATION
}'
function dequote {
str=${1#\"} # delete leading quote
str=${str%\"} # delete trailing quote
echo ${str}
}
function parse_stub_domains() {
sd=$1
# get keys - each key is a domain
sd_keys=$(echo -n $sd | jq keys[])
# For each domain ...
for dom in $sd_keys; do
dst=$(echo -n $sd | jq '.['$dom'][0]') # get the destination
dom=$(dequote $dom)
dst=$(dequote $dst)
sd_stanza=${STUBDOMAIN_TEMPLATE/SD_DOMAIN/$dom} # replace SD_DOMAIN
sd_stanza=${sd_stanza/SD_DESTINATION/$dst} # replace SD_DESTINATION
echo "$sd_stanza"
done
}
sd=$(kubectl -n kube-system get configmap kube-dns -ojsonpath='{.data.stubDomains}' 2> /dev/null)
STUBDOMAINS=$(parse_stub_domains "$sd")
}
# Get Opts
while getopts "hsr:i:d:t:k:" opt; do
case "$opt" in
h) show_help
;;
s) SKIP=1
;;
r) REVERSE_CIDRS="$REVERSE_CIDRS $OPTARG"
;;
i) CLUSTER_DNS_IP=$OPTARG
;;
d) CLUSTER_DOMAIN=$OPTARG
;;
t) YAML_TEMPLATE=$OPTARG
;;
esac
done
# Conditional Defaults
if [[ -z $REVERSE_CIDRS ]]; then
REVERSE_CIDRS="in-addr.arpa ip6.arpa"
fi
if [[ -z $CLUSTER_DNS_IP ]]; then
# Default IP to kube-dns IP
CLUSTER_DNS_IP=$(kubectl get service --namespace kube-system kube-dns -o jsonpath="{.spec.clusterIP}")
if [ $? -ne 0 ]; then
>&2 echo "Error! The IP address for DNS service couldn't be determined automatically. Please specify the DNS-IP with the '-i' option."
exit 2
fi
fi
if [[ "${SKIP}" -ne 1 ]] ; then
translate-kube-dns-configmap
fi
orig=$'\n'
replace=$'\\\n'
sed -e "s/CLUSTER_DNS_IP/$CLUSTER_DNS_IP/g" \
-e "s/CLUSTER_DOMAIN/$CLUSTER_DOMAIN/g" \
-e "s?REVERSE_CIDRS?$REVERSE_CIDRS?g" \
-e "s@STUBDOMAINS@${STUBDOMAINS//$orig/$replace}@g" \
-e "s@FEDERATIONS@${FEDERATIONS//$orig/$replace}@g" \
-e "s/UPSTREAMNAMESERVER/$UPSTREAM/g" \
"${YAML_TEMPLATE}"
coredns.yml文件内容
[root@kubernetes coredns]# vi coredns.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf
#forward . 10.96.0.100
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
spec:
priorityClassName: system-cluster-critical
serviceAccountName: coredns
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: coredns
image: coredns/coredns:1.6.2
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
#下面的IP需要为k8s范围内的IP
clusterIP: 10.96.0.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP