从Kubernetes v1.3开始,DNS使用插件管理器集群附件自动启动的内置服务 。
正在运行的Kubernetes DNS pod 拥有3个容器:
kubedns
:kubedns
进程监视Kubernetes主服务器和端点的变化,并维护内存查找结构来服务于DNS请求。dnsmasq
:dnsmasq
容器添加DNS缓存以提高性能。healthz
:healthz
容器在执行双健康检查(dnsmasq kubedns
)时提供单个健康检查端点。
DNS pod作为具有静态IP的Kubernetes服务公开。一旦分配,kubelet将使用该--cluster-dns=
标志配置的DNS传递给每个容器。
DNS名称也需要域名。本地域可以使用标志在kubelet中配置--cluster-domain=
。
Kubernetes集群DNS服务器基于 SkyDNS库。它支持正向查找(A记录),服务查找(SRV记录)和反向IP地址查找(PTR记录)。
一、准备
1、镜像
andyshinn/dnsmasq:2.72
gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
2、创建文件夹
/etc/dnsmasq.d
/etc/dnsmasq.d-available
/etc/kubernetes/addons/dnsmasq/
/etc/kubernetes/addons/kube-dns/
二、配置文件
1、/etc/dnsmasq.d-available/01-kube-dns.conf
2、/etc/dnsmasq.d/01-kube-dns.conf 两个文件内容一样链接一下
#Listen on localhost
bind-interfaces
listen-address=0.0.0.0
addn-hosts=/etc/hosts
strict-order
# Forward k8s domain to kube-dns
server=/cluster.local/10.233.0.3
# Reply NXDOMAIN to bogus domains requests like com.cluster.local.cluster.local
local=/cluster.local.default.svc.cluster.local./default.svc.cluster.local.default.svc.cluster.local./com.default.svc.cluster.local./cluster.local.svc.cluster.local./svc.cluster.local.svc.cluster.local./com.svc.cluster.local./
#Set upstream dns servers
server=192.168.1.1
no-resolv
bogus-priv
no-negcache
cache-size=1000
dns-forward-max=150
max-cache-ttl=10
max-ttl=20
log-facility=-
三、部署dnsmasq
1、/etc/kubernetes/addons/dnsmasq/dnsmasq-clusterrolebinding.yml
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: dnsmasq
namespace: "kube-system"
subjects:
- kind: ServiceAccount
name: dnsmasq
namespace: "kube-system"
roleRef:
kind: ClusterRole
name: cluster-admin
apiGroup: rbac.authorization.k8s.io
2、/etc/kubernetes/addons/dnsmasq/dnsmasq-serviceaccount.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: dnsmasq
namespace: "kube-system"
labels:
kubernetes.io/cluster-service: "true"
3、/etc/kubernetes/addons/dnsmasq/dnsmasq-deploy.yml
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: dnsmasq
namespace: "kube-system"
labels:
k8s-app: dnsmasq
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
matchLabels:
k8s-app: dnsmasq
strategy:
type: "Recreate"
template:
metadata:
labels:
k8s-app: dnsmasq
kubernetes.io/cluster-service: "true"
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: dnsmasq
image: "andyshinn/dnsmasq:2.72"
imagePullPolicy: IfNotPresent
command:
- dnsmasq
args:
- -k
- -C
- /etc/dnsmasq.d/01-kube-dns.conf
securityContext:
capabilities:
add:
- NET_ADMIN
resources:
limits:
cpu: 100m
memory: 170Mi
requests:
cpu: 40m
memory: 50Mi
ports:
- name: dns
containerPort: 53
protocol: UDP
- name: dns-tcp
containerPort: 53
protocol: TCP
volumeMounts:
- name: etcdnsmasqd
mountPath: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
mountPath: /etc/dnsmasq.d-available
volumes:
- name: etcdnsmasqd
hostPath:
path: /etc/dnsmasq.d
- name: etcdnsmasqdavailable
hostPath:
path: /etc/dnsmasq.d-available
dnsPolicy: Default # Don't use cluster DNS.
4、/etc/kubernetes/addons/dnsmasq/dnsmasq-svc.yml
---
apiVersion: v1
kind: Service
metadata:
labels:
kubernetes.io/cluster-service: 'true'
k8s-app: dnsmasq
name: dnsmasq
namespace: kube-system
spec:
ports:
- port: 53
name: dns-tcp
targetPort: 53
protocol: TCP
- port: 53
name: dns
targetPort: 53
protocol: UDP
type: ClusterIP
clusterIP: 10.233.0.2
selector:
k8s-app: dnsmasq
5、/etc/kubernetes/addons/dnsmasq/dnsmasq-autoscaler.yml
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: dnsmasq-autoscaler
namespace: kube-system
labels:
k8s-app: dnsmasq-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
template:
metadata:
labels:
k8s-app: dnsmasq-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
spec:
serviceAccountName: dnsmasq
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=dnsmasq-autoscaler
- --target=Deployment/dnsmasq
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"nodesPerReplica":10,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
6、创建
kubectl create -f /etc/kubernetes/addons/dnsmasq/
四、部署kube-dns
1、/etc/kubernetes/addons/kube-dns/kubedns-autoscaler-clusterrole.yml
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
2、/etc/kubernetes/addons/kube-dns/kubedns-autoscaler-clusterrolebinding.yml
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system
subjects:
- kind: ServiceAccount
name: cluster-proportional-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: cluster-proportional-autoscaler
apiGroup: rbac.authorization.k8s.io
3、/etc/kubernetes/addons/kube-dns/kubedns-autoscaler-sa.yml
---
kind: ServiceAccount
apiVersion: v1
metadata:
name: cluster-proportional-autoscaler
namespace: kube-system
4、/etc/kubernetes/addons/kube-dns/kubedns-autoscaler.yml
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubedns-autoscaler
namespace: kube-system
labels:
k8s-app: kubedns-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
template:
metadata:
labels:
k8s-app: kubedns-autoscaler
spec:
tolerations:
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: "gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.1"
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kubedns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
- --default-params={"linear":{"nodesPerReplica":10,"min":2}}
- --logtostderr=true
- --v=2
serviceAccountName: cluster-proportional-autoscaler
5、/etc/kubernetes/addons/kube-dns/kubedns-deploy.yml
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: "kube-system"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7"
imagePullPolicy: IfNotPresent
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 40m
memory: 50Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7"
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7"
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
6、/etc/kubernetes/addons/kube-dns/kube-dns-sa.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.233.0.3
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
7、创建
kubectl create -f /etc/kubernetes/addons/kube-dns/