使用kubeadm部署kubernetes

使用kubeadm部署kubernetes_第1张图片

 

Kubertenes集群部署

Kubertenes集群部署系统环境系统环境初始化安装软件源配置安装docker 1.12.6安装kubernetes下载相关镜像初始化kubernetes集群初始化成功输出:查看集群节点信息:初始化集群报错及问题解决:安装网络插件:一、weave网络模式二、flannel网络模式添加节点安装dashboard安装heapster插件

本文档是使用kubeadm的方式来自动部署kubernetes集群

系统环境

操作系统:Centos 7.3
节点角色:172.17.1.52  k8s-master
         172.17.1.53  k8s-slave7.3
节点角色:172.17.1.52  k8s-master
         172.17.1.53  k8s-slave

系统环境初始化

#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
#修改内核参数
cat <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#执行sysctl -p /etc/sysctl.d/k8s.conf生效(sysctl --system)
#如果有如下报错:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
#解决方法:
安装bridge-util软件,加载bridge模块,加载br_netfilter模块
yum install -y bridge-utils.x86_64
modprobe bridge
modprobe br_netfilter
​
#关闭Selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce = 0
#关闭swap
swapoff -a
echo "vm.swappiness=0" >> /etc/sysctl.d/k8s.conf
systemctl stop firewalld
systemctl disable firewalld
#修改内核参数
cat <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
#执行sysctl -p /etc/sysctl.d/k8s.conf生效(sysctl --system)
#如果有如下报错:
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-ip6tables: No such file or directory
sysctl: cannot stat /proc/sys/net/bridge/bridge-nf-call-iptables: No such file or directory
#解决方法:
安装bridge-util软件,加载bridge模块,加载br_netfilter模块
yum install -y bridge-utils.x86_64
modprobe bridge
modprobe br_netfilter
​
#关闭Selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce = 0
#关闭swap
swapoff -a
echo "vm.swappiness=0" >> /etc/sysctl.d/k8s.conf

安装软件源配置


#配置k8s软件源
cat < /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#配置docker软件源
cat < /etc/yum.repos.d/docker.repo 
[dockerrepo]
name=Docker
baseurl=https://yum.dockerproject.org/repo/main/centos/7
enabled=1
gpgcheck=0
EOF
#配置centos73的仓库源
cat < /etc/yum.repos.d/local.repo 
[local]
name=local
baseurl=http://172.17.1.80:8090/centos73
gpgcheck=0
enable=1
EOF#配置k8s软件源
cat < /etc/yum.repos.d/kubernetes.repo 
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
#配置docker软件源
cat < /etc/yum.repos.d/docker.repo 
[dockerrepo]
name=Docker
baseurl=https://yum.dockerproject.org/repo/main/centos/7
enabled=1
gpgcheck=0
EOF
#配置centos73的仓库源
cat < /etc/yum.repos.d/local.repo 
[local]
name=local
baseurl=http://172.17.1.80:8090/centos73
gpgcheck=0
enable=1
EOF

安装docker 1.12.6

docker-engine-1.12.6-1.el7.centos.x86_64.rpm

docker-engine-selinux-1.12.6-1.el7.centos.noarch.rpm


启动docker并设置开机启动
systemctl enable docker && systemctl start docker启动docker并设置开机启动
systemctl enable docker && systemctl start docker

安装kubernetes


#查看软件包版本
yum list --showduplicates | grep 'kubeadm\|kubectl\|kubelet'
#安装软件
yum install -y kubelet kubeadm kubectl kubernetes-cni
#修改配置
sed -e 's/KUBELET_CGROUP_ARGS=--cgroup-driver=systemd/KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs/' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
#启动服务并设置开机自启
systemctl start kubelet && systemctl enable kubelet#查看软件包版本
yum list --showduplicates | grep 'kubeadm\|kubectl\|kubelet'
#安装软件
yum install -y kubelet kubeadm kubectl kubernetes-cni
#修改配置
sed -e 's/KUBELET_CGROUP_ARGS=--cgroup-driver=systemd/KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs/' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
#启动服务并设置开机自启
systemctl start kubelet && systemctl enable kubelet

下载相关镜像

  • (可以在国内网站下载镜像然后修改tag)

Image Name v1.7 release branch version v1.8 release branch version
gcr.io/google_containers/kube apiserver-${ARCH} v1.7.x v1.8.x
gcr.io/google_containers/kube-controller-manager-${ARCH} v1.7.x v1.8.x
gcr.io/google_containers/kube-scheduler-${ARCH} v1.7.x v1.8.x
gcr.io/google_containers/kube-proxy-${ARCH} v1.7.x v1.8.x
gcr.io/google_containers/etcd-${ARCH} 3.0.17 3.0.17
gcr.io/google_containers/pause-${ARCH} 3.0 3.0
gcr.io/google_containers/k8s-dns-sidecar-${ARCH} 1.14.4 1.14.4
gcr.io/google_containers/k8s-dns-kube-dns-${ARCH} 1.14.4 1.14.4
gcr.io/google_containers/k8s-dns-dnsmasq-nanny-${ARCH} 1.14.4 1.14.4

初始化kubernetes集群


kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=v1.7.5
#重置集群节点状态
kubeadm reset
#配置kubectl命令执行权限
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
#设置master为不可调度
kubectl taint nodes k8s-master node-role.kubernetes.io/master=:NoSchedule
#解除配置,设置master可以被调度运行pod
kubectl taint nodes --all node-role.kubernetes.io/master-kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=v1.7.5
#重置集群节点状态
kubeadm reset
#配置kubectl命令执行权限
mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config
#设置master为不可调度
kubectl taint nodes k8s-master node-role.kubernetes.io/master=:NoSchedule
#解除配置,设置master可以被调度运行pod
kubectl taint nodes --all node-role.kubernetes.io/master-

初始化成功输出:


[root@k8s-master manifests]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=v1.7.5
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.7.11
[init] Using Authorization modes: [Node RBAC]
[preflight] Skipping pre-flight checks
[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.17.1.52]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 205.500957 seconds
[token] Using token: 468fb4.3e3da54eae58c115
[apiconfig] Created RBAC rules
[addons] Applied essential addon: kube-proxy
[addons] Applied essential addon: kube-dns
​
Your Kubernetes master has initialized successfully!
​
To start using your cluster, you need to run (as a regular user):
​
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
​
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  http://kubernetes.io/docs/admin/addons/
​
You can now join any number of machines by running the following on each node
as root:
​
  kubeadm join --token 468fb4.3e3da54eae58c115 172.17.1.52:6443[root@k8s-master manifests]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=v1.7.5
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.7.11
[init] Using Authorization modes: [Node RBAC]
[preflight] Skipping pre-flight checks
[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 172.17.1.52]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 205.500957 seconds
[token] Using token: 468fb4.3e3da54eae58c115
[apiconfig] Created RBAC rules
[addons] Applied essential addon: kube-proxy
[addons] Applied essential addon: kube-dns
​
Your Kubernetes master has initialized successfully!
​
To start using your cluster, you need to run (as a regular user):
​
  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config
​
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  http://kubernetes.io/docs/admin/addons/
​
You can now join any number of machines by running the following on each node
as root:
​
  kubeadm join --token 468fb4.3e3da54eae58c115 172.17.1.52:6443

 

查看集群节点信息:


[root@k8s-master ~]# kubectl get nodes
NAME         STATUS    AGE       VERSION
k8s-master   Ready     34m       v1.7.5[root@k8s-master ~]# kubectl get nodes
NAME         STATUS    AGE       VERSION
k8s-master   Ready     34m       v1.7.5

初始化集群报错及问题解决:

问题一:


[root@k8s-master ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=stable-1.7.5
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
unable to fetch release information. URL: "https://storage.googleapis.com/kubernetes-release/release/stable-1.7.5.txt" Status: 404 Not Found
#解决:
添加版本信息“--kubernetes-version=v1.7.5”,kubeadm reset,再次执行init[root@k8s-master ~]# kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address=172.17.1.52 --skip-preflight-checks --kubernetes-version=stable-1.7.5
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
unable to fetch release information. URL: "https://storage.googleapis.com/kubernetes-release/release/stable-1.7.5.txt" Status: 404 Not Found
#解决:
添加版本信息“--kubernetes-version=v1.7.5”,kubeadm reset,再次执行init

问题二:


Dec 05 18:49:21 k8s-master kubelet[106548]: W1205 18:49:21.323220  106548 cni.go:189] Unable to update cni config: No networks found in /etc/cni/net.d
Dec 05 18:49:21 k8s-master kubelet[106548]: E1205 18:49:21.323313  106548 kubelet.go:2136] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
#解决:
修改文件内容: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin""Dec 05 18:49:21 k8s-master kubelet[106548]: W1205 18:49:21.323220  106548 cni.go:189] Unable to update cni config: No networks found in /etc/cni/net.d
Dec 05 18:49:21 k8s-master kubelet[106548]: E1205 18:49:21.323313  106548 kubelet.go:2136] Container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized
#解决:
修改文件内容: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin""

问题三:


Dec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.825136  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:46: Failed to list *v1.Pod: Get https://172.17.1.52:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dk8s-master&resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.827944  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/kubelet.go:400: Failed to list *v1.Service: Get https://172.17.1.52:6443/api/v1/services?resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.839976  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/kubelet.go:408: Failed to list *v1.Node: Get https://172.17.1.52:6443/api/v1/nodes?fieldSelector=metadata.name%3Dk8s-master&resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:50 k8s-master kubelet[106548]: E1205 18:48:50.293031  106548 event.go:209] Unable to write event: 'Post https://172.17.1.52:6443/api/v1/namespaces/kube-system/events: dial tcp 172.17.1.52:6443: getsockopt: connection refused' (may retry after sleeping)
Dec 05 18:48:50 k8s-master kubelet[106548]: W1205 18:48:50.677732  106548 status_manager.go:431] Failed to get status for pod "etcd-k8s-master_kube-system(5802ae0664772d031dee332b3c63498e)": Get https://172.17.1.52:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master: dial tcp 172.17.1.52:6443: getsockopt: connection refused
#解决:
打开防火墙:
systemctl start firewalld
添加火墙规则:
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=6443/tcp --permanent
firewall-cmd --zone=public --add-port=2379-2380/tcp --permanent
firewall-cmd --zone=public --add-port=10250-10255/tcp --permanent
firewall-cmd --zone=public --add-port=30000-32767/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-portsDec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.825136  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:46: Failed to list *v1.Pod: Get https://172.17.1.52:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dk8s-master&resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.827944  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/kubelet.go:400: Failed to list *v1.Service: Get https://172.17.1.52:6443/api/v1/services?resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:49 k8s-master kubelet[106548]: E1205 18:48:49.839976  106548 reflector.go:190] k8s.io/kubernetes/pkg/kubelet/kubelet.go:408: Failed to list *v1.Node: Get https://172.17.1.52:6443/api/v1/nodes?fieldSelector=metadata.name%3Dk8s-master&resourceVersion=0: dial tcp 172.17.1.52:6443: getsockopt: connection refused
Dec 05 18:48:50 k8s-master kubelet[106548]: E1205 18:48:50.293031  106548 event.go:209] Unable to write event: 'Post https://172.17.1.52:6443/api/v1/namespaces/kube-system/events: dial tcp 172.17.1.52:6443: getsockopt: connection refused' (may retry after sleeping)
Dec 05 18:48:50 k8s-master kubelet[106548]: W1205 18:48:50.677732  106548 status_manager.go:431] Failed to get status for pod "etcd-k8s-master_kube-system(5802ae0664772d031dee332b3c63498e)": Get https://172.17.1.52:6443/api/v1/namespaces/kube-system/pods/etcd-k8s-master: dial tcp 172.17.1.52:6443: getsockopt: connection refused
#解决:
打开防火墙:
systemctl start firewalld
添加火墙规则:
firewall-cmd --zone=public --add-port=80/tcp --permanent
firewall-cmd --zone=public --add-port=6443/tcp --permanent
firewall-cmd --zone=public --add-port=2379-2380/tcp --permanent
firewall-cmd --zone=public --add-port=10250-10255/tcp --permanent
firewall-cmd --zone=public --add-port=30000-32767/tcp --permanent
firewall-cmd --reload
firewall-cmd --zone=public --list-ports

问题四:


[root@localhost kubernetes-1.9.1]# kubectl get node
Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
​
#解决:
[root@localhost kubernetes-1.9.1]# mv  $HOME/.kube $HOME/.kube.bak
[root@localhost kubernetes-1.9.1]# mkdir -p $HOME/.kube
[root@localhost kubernetes-1.9.1]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@localhost kubernetes-1.9.1]# sudo chown $(id -u):$(id -g) $HOME/.kube/config[root@localhost kubernetes-1.9.1]# kubectl get node
Unable to connect to the server: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
​
#解决:
[root@localhost kubernetes-1.9.1]# mv  $HOME/.kube $HOME/.kube.bak
[root@localhost kubernetes-1.9.1]# mkdir -p $HOME/.kube
[root@localhost kubernetes-1.9.1]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@localhost kubernetes-1.9.1]# sudo chown $(id -u):$(id -g) $HOME/.kube/config

安装网络插件:

一、weave网络模式


[root@k8s-master k8s.images]# kubectl apply -f weave-daemonset-k8s-1.6.yaml 
serviceaccount "weave-net" created
clusterrole "weave-net" created
clusterrolebinding "weave-net" created
role "weave-net-kube-peer" created
rolebinding "weave-net-kube-peer" created
daemonset "weave-net" created[root@k8s-master k8s.images]# kubectl apply -f weave-daemonset-k8s-1.6.yaml 
serviceaccount "weave-net" created
clusterrole "weave-net" created
clusterrolebinding "weave-net" created
role "weave-net-kube-peer" created
rolebinding "weave-net-kube-peer" created
daemonset "weave-net" created

# weave-daemonset-k8s-1.6.yaml文件内容
apiVersion: v1
kind: List
items:
  - apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: weave-net
      labels:
        name: weave-net
      namespace: kube-system
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRole
    metadata:
      name: weave-net
      labels:
        name: weave-net
    rules:
      - apiGroups:
          - ''
        resources:
          - pods
          - namespaces
          - nodes
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
        resources:
          - networkpolicies
        verbs:
          - get
          - list
          - watch
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
      name: weave-net
      labels:
        name: weave-net
    roleRef:
      kind: ClusterRole
      name: weave-net
      apiGroup: rbac.authorization.k8s.io
    subjects:
      - kind: ServiceAccount
        name: weave-net
        namespace: kube-system
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: Role
    metadata:
      name: weave-net-kube-peer
      namespace: kube-system
      labels:
        name: weave-net-kube-peer
    rules:
      - apiGroups:
          - ''
        resources:
          - configmaps
        resourceNames:
          - weave-net
        verbs:
          - get
          - update
      - apiGroups:
          - ''
        resources:
          - configmaps
        verbs:
          - create
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: RoleBinding
    metadata:
      name: weave-net-kube-peer
      namespace: kube-system
      labels:
        name: weave-net-kube-peer
    roleRef:
      kind: Role
      name: weave-net-kube-peer
      apiGroup: rbac.authorization.k8s.io
    subjects:
      - kind: ServiceAccount
        name: weave-net
        namespace: kube-system
  - apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      name: weave-net
      labels:
        name: weave-net
      namespace: kube-system
    spec:
      template:
        metadata:
          labels:
            name: weave-net
        spec:
          containers:
            - name: weave
              command:
                - /home/weave/launch.sh
              env:
                - name: HOSTNAME
                  valueFrom:
                    fieldRef:
                      apiVersion: v1
                      fieldPath: spec.nodeName
              image: 'weaveworks/weave-kube:2.1.3'
              livenessProbe:
                httpGet:
                  host: 127.0.0.1
                  path: /status
                  port: 6784
                initialDelaySeconds: 30
              resources:
                requests:
                  cpu: 10m
              securityContext:
                privileged: true
              volumeMounts:
                - name: weavedb
                  mountPath: /weavedb
                - name: cni-bin
                  mountPath: /host/opt
                - name: cni-bin2
                  mountPath: /host/home
                - name: cni-conf
                  mountPath: /host/etc
                - name: dbus
                  mountPath: /host/var/lib/dbus
                - name: lib-modules
                  mountPath: /lib/modules
            - name: weave-npc
              env:
                - name: HOSTNAME
                  valueFrom:
                    fieldRef:
                      apiVersion: v1
                      fieldPath: spec.nodeName
              image: 'weaveworks/weave-npc:2.1.3'
              resources:
                requests:
                  cpu: 10m
              securityContext:
                privileged: true
          hostNetwork: true
          hostPID: true
          restartPolicy: Always
          securityContext:
            seLinuxOptions: {}
          serviceAccountName: weave-net
          tolerations:
            - effect: NoSchedule
              operator: Exists
          volumes:
            - name: weavedb
              hostPath:
                path: /var/lib/weave
            - name: cni-bin
              hostPath:
                path: /opt
            - name: cni-bin2
              hostPath:
                path: /home
            - name: cni-conf
              hostPath:
                path: /etc
            - name: dbus
              hostPath:
                path: /var/lib/dbus
            - name: lib-modules
              hostPath:
                path: /lib/modules
      updateStrategy:
        type: RollingUpdate# weave-daemonset-k8s-1.6.yaml文件内容
apiVersion: v1
kind: List
items:
  - apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: weave-net
      labels:
        name: weave-net
      namespace: kube-system
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRole
    metadata:
      name: weave-net
      labels:
        name: weave-net
    rules:
      - apiGroups:
          - ''
        resources:
          - pods
          - namespaces
          - nodes
        verbs:
          - get
          - list
          - watch
      - apiGroups:
          - extensions
        resources:
          - networkpolicies
        verbs:
          - get
          - list
          - watch
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: ClusterRoleBinding
    metadata:
      name: weave-net
      labels:
        name: weave-net
    roleRef:
      kind: ClusterRole
      name: weave-net
      apiGroup: rbac.authorization.k8s.io
    subjects:
      - kind: ServiceAccount
        name: weave-net
        namespace: kube-system
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: Role
    metadata:
      name: weave-net-kube-peer
      namespace: kube-system
      labels:
        name: weave-net-kube-peer
    rules:
      - apiGroups:
          - ''
        resources:
          - configmaps
        resourceNames:
          - weave-net
        verbs:
          - get
          - update
      - apiGroups:
          - ''
        resources:
          - configmaps
        verbs:
          - create
  - apiVersion: rbac.authorization.k8s.io/v1beta1
    kind: RoleBinding
    metadata:
      name: weave-net-kube-peer
      namespace: kube-system
      labels:
        name: weave-net-kube-peer
    roleRef:
      kind: Role
      name: weave-net-kube-peer
      apiGroup: rbac.authorization.k8s.io
    subjects:
      - kind: ServiceAccount
        name: weave-net
        namespace: kube-system
  - apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      name: weave-net
      labels:
        name: weave-net
      namespace: kube-system
    spec:
      template:
        metadata:
          labels:
            name: weave-net
        spec:
          containers:
            - name: weave
              command:
                - /home/weave/launch.sh
              env:
                - name: HOSTNAME
                  valueFrom:
                    fieldRef:
                      apiVersion: v1
                      fieldPath: spec.nodeName
              image: 'weaveworks/weave-kube:2.1.3'
              livenessProbe:
                httpGet:
                  host: 127.0.0.1
                  path: /status
                  port: 6784
                initialDelaySeconds: 30
              resources:
                requests:
                  cpu: 10m
              securityContext:
                privileged: true
              volumeMounts:
                - name: weavedb
                  mountPath: /weavedb
                - name: cni-bin
                  mountPath: /host/opt
                - name: cni-bin2
                  mountPath: /host/home
                - name: cni-conf
                  mountPath: /host/etc
                - name: dbus
                  mountPath: /host/var/lib/dbus
                - name: lib-modules
                  mountPath: /lib/modules
            - name: weave-npc
              env:
                - name: HOSTNAME
                  valueFrom:
                    fieldRef:
                      apiVersion: v1
                      fieldPath: spec.nodeName
              image: 'weaveworks/weave-npc:2.1.3'
              resources:
                requests:
                  cpu: 10m
              securityContext:
                privileged: true
          hostNetwork: true
          hostPID: true
          restartPolicy: Always
          securityContext:
            seLinuxOptions: {}
          serviceAccountName: weave-net
          tolerations:
            - effect: NoSchedule
              operator: Exists
          volumes:
            - name: weavedb
              hostPath:
                path: /var/lib/weave
            - name: cni-bin
              hostPath:
                path: /opt
            - name: cni-bin2
              hostPath:
                path: /home
            - name: cni-conf
              hostPath:
                path: /etc
            - name: dbus
              hostPath:
                path: /var/lib/dbus
            - name: lib-modules
              hostPath:
                path: /lib/modules
      updateStrategy:
        type: RollingUpdate

二、flannel网络模式


[root@k8s-master k8s.images]# kubectl apply -f kube-flannel.yml 
clusterrole "flannel" created
clusterrolebinding "flannel" created
serviceaccount "flannel" created
configmap "kube-flannel-cfg" created
daemonset "kube-flannel-ds" created[root@k8s-master k8s.images]# kubectl apply -f kube-flannel.yml 
clusterrole "flannel" created
clusterrolebinding "flannel" created
serviceaccount "flannel" created
configmap "kube-flannel-cfg" created
daemonset "kube-flannel-ds" created

#kube-flannel.yml文件内容
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "type": "flannel",
      "delegate": {
        "isDefaultGateway": true
      }
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: amd64
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.9.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conf
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.9.0-amd64
        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
        securityContext:
          privileged: true
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg#kube-flannel.yml文件内容
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
rules:
  - apiGroups:
      - ""
    resources:
      - pods
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes/status
    verbs:
      - patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: flannel
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: flannel
subjects:
- kind: ServiceAccount
  name: flannel
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: flannel
  namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
  name: kube-flannel-cfg
  namespace: kube-system
  labels:
    tier: node
    app: flannel
data:
  cni-conf.json: |
    {
      "name": "cbr0",
      "type": "flannel",
      "delegate": {
        "isDefaultGateway": true
      }
    }
  net-conf.json: |
    {
      "Network": "10.244.0.0/16",
      "Backend": {
        "Type": "vxlan"
      }
    }
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
  name: kube-flannel-ds
  namespace: kube-system
  labels:
    tier: node
    app: flannel
spec:
  template:
    metadata:
      labels:
        tier: node
        app: flannel
    spec:
      hostNetwork: true
      nodeSelector:
        beta.kubernetes.io/arch: amd64
      tolerations:
      - key: node-role.kubernetes.io/master
        operator: Exists
        effect: NoSchedule
      serviceAccountName: flannel
      initContainers:
      - name: install-cni
        image: quay.io/coreos/flannel:v0.9.0-amd64
        command:
        - cp
        args:
        - -f
        - /etc/kube-flannel/cni-conf.json
        - /etc/cni/net.d/10-flannel.conf
        volumeMounts:
        - name: cni
          mountPath: /etc/cni/net.d
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.9.0-amd64
        command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
        securityContext:
          privileged: true
        env:
        - name: POD_NAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              fieldPath: metadata.namespace
        volumeMounts:
        - name: run
          mountPath: /run
        - name: flannel-cfg
          mountPath: /etc/kube-flannel/
      volumes:
        - name: run
          hostPath:
            path: /run
        - name: cni
          hostPath:
            path: /etc/cni/net.d
        - name: flannel-cfg
          configMap:
            name: kube-flannel-cfg

添加节点


kubeadm join --token 468fb4.3e3da54eae58c115 172.17.1.52:6443
#配置kubelet执行权限
拷贝master下admin.conf至slave的/root/.kube目录下
执行chown $(id -u):$(id -g) $HOME/.kube/config命令
#安装flannel网络
kubectl apply -f kube-flannel.ymlkubeadm join --token 468fb4.3e3da54eae58c115 172.17.1.52:6443
#配置kubelet执行权限
拷贝master下admin.conf至slave的/root/.kube目录下
执行chown $(id -u):$(id -g) $HOME/.kube/config命令
#安装flannel网络
kubectl apply -f kube-flannel.yml

如果报错查看以下文件,并修改如下:


[root@k8s-slave kubernetes]# cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf --require-kubeconfig=true"
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"
#Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local"
Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt"
Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port=0"
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_EXTRA_ARGS# 修改之后重启docker和kubelet
systemctl restart docker
systemctl daemon-reload
systemctl start kubelet[root@k8s-slave kubernetes]# cat /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 
[Service]
Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf --require-kubeconfig=true"
Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true"
#Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/ --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin"
Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local"
Environment="KUBELET_AUTHZ_ARGS=--authorization-mode=Webhook --client-ca-file=/etc/kubernetes/pki/ca.crt"
Environment="KUBELET_CADVISOR_ARGS=--cadvisor-port=0"
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
ExecStart=
ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_AUTHZ_ARGS $KUBELET_CADVISOR_ARGS $KUBELET_CGROUP_ARGS $KUBELET_EXTRA_ARGS# 修改之后重启docker和kubelet
systemctl restart docker
systemctl daemon-reload
systemctl start kubelet

安装dashboard


# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.6 (RBAC enabled).
#
# Example usage: kubectl create -f 
​
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 9090
  selector:
    k8s-app: kubernetes-dashboard# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.6 (RBAC enabled).
#
# Example usage: kubectl create -f 
​
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
  labels:
    k8s-app: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
      - name: kubernetes-dashboard
        image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.8.1
        ports:
        - containerPort: 9090
          protocol: TCP
        args:
          # Uncomment the following line to manually specify Kubernetes API server Host
          # If not specified, Dashboard will attempt to auto discover the API server and connect
          # to it. Uncomment only if the default does not work.
          # - --apiserver-host=http://my-address:port
        livenessProbe:
          httpGet:
            path: /
            port: 9090
          initialDelaySeconds: 30
          timeoutSeconds: 30
      serviceAccountName: kubernetes-dashboard
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 9090
  selector:
    k8s-app: kubernetes-dashboard
  • 将dashboard服务端口设置为nodeport模式


kubectl expose deployment kubernetes-dashboard -n kube-system --type=NodePort --name=dashboard-test --port=9090kubectl expose deployment kubernetes-dashboard -n kube-system --type=NodePort --name=dashboard-test --port=9090

 

 

安装heapster插件

heapster插件的作用就是完善dashboard的监控图表


#镜像准备
images=heapster-amd64-v1.4.0:v1.4.0
docker pull docker.io/wanghkkk/$images
docker tag docker.io/wanghkkk/$images k8s.gcr.io/heapster-amd64:v1.4.2
​
#配置heapster-controller.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: k8s.gcr.io/heapster-amd64:v1.4.2
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://192.168.8.251:32189
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
    
#创建服务
kubectl create -f heapster-controller.yaml#镜像准备
images=heapster-amd64-v1.4.0:v1.4.0
docker pull docker.io/wanghkkk/$images
docker tag docker.io/wanghkkk/$images k8s.gcr.io/heapster-amd64:v1.4.2
​
#配置heapster-controller.yaml 
apiVersion: v1
kind: ServiceAccount
metadata:
  name: heapster
  namespace: kube-system
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
  name: heapster
  namespace: kube-system
spec:
  replicas: 1
  template:
    metadata:
      labels:
        task: monitoring
        k8s-app: heapster
    spec:
      serviceAccountName: heapster
      containers:
      - name: heapster
        image: k8s.gcr.io/heapster-amd64:v1.4.2
        imagePullPolicy: IfNotPresent
        command:
        - /heapster
        - --source=kubernetes:https://192.168.8.251:32189
---
apiVersion: v1
kind: Service
metadata:
  labels:
    task: monitoring
    # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)
    # If you are NOT using this as an addon, you should comment out this line.
    kubernetes.io/cluster-service: 'true'
    kubernetes.io/name: Heapster
  name: heapster
  namespace: kube-system
spec:
  ports:
  - port: 80
    targetPort: 8082
  selector:
    k8s-app: heapster
    
#创建服务
kubectl create -f heapster-controller.yaml

 

 

你可能感兴趣的:(使用kubeadm部署kubernetes)