kubeadm国内部署-Ubuntu16.04

原文地址:https://blog.csdn.net/u010827484/article/details/83025404

系统信息

root@ubuntu:~# cat /etc/*release
DISTRIB_ID=Ubuntu
DISTRIB_RELEASE=16.04
DISTRIB_CODENAME=xenial
DISTRIB_DESCRIPTION="Ubuntu 16.04.2 LTS"
NAME="Ubuntu"
VERSION="16.04.2 LTS (Xenial Xerus)"
ID=ubuntu
ID_LIKE=debian
PRETTY_NAME="Ubuntu 16.04.2 LTS"
VERSION_ID="16.04"
HOME_URL="http://www.ubuntu.com/"
SUPPORT_URL="http://help.ubuntu.com/"
BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
VERSION_CODENAME=xenial
UBUNTU_CODENAME=xenial

安装docker-ce

创建文件夹,准备docker的配置

root@ubuntu:~# mkdir /etc/docker
root@ubuntu:~# mkdir -p /data/docker

使用ustc的镜像源

root@ubuntu:~# apt-get update
root@ubuntu:~# apt-get install -y apt-transport-https ca-certificates curl software-properties-common
root@ubuntu:~# curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - (获取公钥,显示ok即表示正确)
OK
root@ubuntu:~# add-apt-repository \
>     "deb [arch=amd64] https://mirrors.ustc.edu.cn/docker-ce/linux/$(. /etc/os-release; echo "$ID") \
>     $(lsb_release -cs) \
>     stable"
root@ubuntu:~#

从源中查找grep抓取出版本号,并安装docker-ce 18.06版本

root@ubuntu:~# apt-get update
Get:1 https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial InRelease [66.2 kB]
Get:2 https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages [4,684 B]                                      
Get:3 http://security.ubuntu.com/ubuntu xenial-security InRelease [107 kB]                                                                  
Hit:4 http://us.archive.ubuntu.com/ubuntu xenial InRelease                                                
Get:5 http://us.archive.ubuntu.com/ubuntu xenial-updates InRelease [109 kB]        
Get:6 http://us.archive.ubuntu.com/ubuntu xenial-backports InRelease [107 kB]
Fetched 394 kB in 5s (72.2 kB/s)   
Reading package lists... Done
root@ubuntu:~# 
root@ubuntu:~# apt-cache madison docker-ce
 docker-ce | 18.06.1~ce~3-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 18.06.0~ce~3-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 18.03.1~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 18.03.0~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.12.1~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.12.0~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.09.1~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.09.0~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.06.2~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.06.1~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.06.0~ce-0~ubuntu | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.03.3~ce-0~ubuntu-xenial | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.03.2~ce-0~ubuntu-xenial | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.03.1~ce-0~ubuntu-xenial | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
 docker-ce | 17.03.0~ce-0~ubuntu-xenial | https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial/stable amd64 Packages
root@ubuntu:~# apt-get update && apt-get install -y docker-ce=$(apt-cache madison docker-ce | grep 18.06 | head -1 | awk '{print $3}')

确认docker是否安装成功

root@ubuntu:~# docker version
Client:
 Version:           18.06.1-ce
 API version:       1.38
 Go version:        go1.10.3
 Git commit:        e68fc7a
 Built:             Tue Aug 21 17:24:56 2018
 OS/Arch:           linux/amd64
 Experimental:      false

Server:
 Engine:
  Version:          18.06.1-ce
  API version:      1.38 (minimum version 1.12)
  Go version:       go1.10.3
  Git commit:       e68fc7a
  Built:            Tue Aug 21 17:23:21 2018
  OS/Arch:          linux/amd64
  Experimental:     false
root@ubuntu:~# 

添加用户到docker的组中

root@ubuntu:~# groupadd docker
groupadd: group 'docker' already exists
root@ubuntu:~# gpasswd -a root docker
Adding user root to group docker
root@ubuntu:~# 

master节点安装kubelet,kubeadm,kubectl

1.添加gpg公钥(国内):

从该链接https://raw.githubusercontent.com/EagleChen/kubernetes_init/master/kube_apt_key.gpg
下载kube_apt_key.gpg到当前工作目录下,并按如下指令添加

root@ubuntu:~# wget https://raw.githubusercontent.com/EagleChen/kubernetes_init/master/kube_apt_key.gpg
--2018-10-10 07:05:40--  https://raw.githubusercontent.com/EagleChen/kubernetes_init/master/kube_apt_key.gpg
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.108.133
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.108.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 663 [application/octet-stream]
Saving to: ‘kube_apt_key.gpg’

kube_apt_key.gpg  100%[============>]     663  --.-KB/s    in 0s      

2018-10-10 07:05:42 (175 MB/s) - ‘kube_apt_key.gpg’ saved [663/663]

root@ubuntu:~# cat kube_apt_key.gpg | sudo apt-key add -
OK
root@ubuntu:~# 

显示OK即表示成功

2.添加镜像源

ustc镜像源

root@ubuntu:~# echo "deb [arch=amd64] https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-$(lsb_release -cs) main" | sudo tee -a /etc/apt/sources.list
deb [arch=amd64] https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial main
root@ubuntu:~# 

安装

root@ubuntu:~# apt-get update
Hit:1 https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu xenial InRelease
Get:2 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease [8,993 B]
Ign:2 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease
Ign:3 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages
Get:3 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 Packages [20.2 kB]
Hit:4 http://us.archive.ubuntu.com/ubuntu xenial InRelease            
Get:5 http://us.archive.ubuntu.com/ubuntu xenial-updates InRelease [109 kB]
Get:6 http://us.archive.ubuntu.com/ubuntu xenial-backports InRelease [107 kB]
Get:7 http://security.ubuntu.com/ubuntu xenial-security InRelease [107 kB]
Fetched 352 kB in 12s (27.6 kB/s)                                     
Reading package lists... Done
W: GPG error: https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease: The following signatures couldn't be verified because the public key is not available: NO_PUBKEY 6A030B21BA07F4FB
W: The repository 'https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial InRelease' is not signed.
N: Data from such a repository can't be authenticated and is therefore potentially dangerous to use.
N: See apt-secure(8) manpage for repository creation and user configuration details.
root@ubuntu:~# apt-get install -y kubelet kubeadm kubectl --allow-unauthenticated
Reading package lists... Done
Building dependency tree       
Reading state information... Done
The following additional packages will be installed:
  cri-tools ebtables kubernetes-cni socat
The following NEW packages will be installed:
  cri-tools ebtables kubeadm kubectl kubelet kubernetes-cni socat
0 upgraded, 7 newly installed, 0 to remove and 581 not upgraded.
Need to get 54.9 MB of archives.
After this operation, 364 MB of additional disk space will be used.
WARNING: The following packages cannot be authenticated!
  cri-tools kubernetes-cni kubelet kubectl kubeadm
Authentication warning overridden.
Get:1 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 cri-tools amd64 1.12.0-00 [5,343 kB]
Get:2 http://us.archive.ubuntu.com/ubuntu xenial-updates/main amd64 ebtables amd64 2.0.10.4-3.4ubuntu2.16.04.2 [79.9 kB]
Get:3 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 kubernetes-cni amd64 0.6.0-00 [5,910 kB]
Get:4 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 kubelet amd64 1.12.1-00 [24.7 MB]
Get:5 http://us.archive.ubuntu.com/ubuntu xenial/universe amd64 socat amd64 1.7.3.1-1 [321 kB]
Get:6 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 kubectl amd64 1.12.1-00 [9,594 kB]
Get:7 https://mirrors.ustc.edu.cn/kubernetes/apt kubernetes-xenial/main amd64 kubeadm amd64 1.12.1-00 [8,987 kB]
Fetched 54.9 MB in 30s (1,822 kB/s)                                   
Selecting previously unselected package cri-tools.
(Reading database ... 177444 files and directories currently installed.)
Preparing to unpack .../cri-tools_1.12.0-00_amd64.deb ...
Unpacking cri-tools (1.12.0-00) ...
Selecting previously unselected package ebtables.
Preparing to unpack .../ebtables_2.0.10.4-3.4ubuntu2.16.04.2_amd64.deb ...
Unpacking ebtables (2.0.10.4-3.4ubuntu2.16.04.2) ...
Selecting previously unselected package kubernetes-cni.
Preparing to unpack .../kubernetes-cni_0.6.0-00_amd64.deb ...
Unpacking kubernetes-cni (0.6.0-00) ...
Selecting previously unselected package socat.
Preparing to unpack .../socat_1.7.3.1-1_amd64.deb ...
Unpacking socat (1.7.3.1-1) ...
Selecting previously unselected package kubelet.
Preparing to unpack .../kubelet_1.12.1-00_amd64.deb ...
Unpacking kubelet (1.12.1-00) ...
Selecting previously unselected package kubectl.
Preparing to unpack .../kubectl_1.12.1-00_amd64.deb ...
Unpacking kubectl (1.12.1-00) ...
Selecting previously unselected package kubeadm.
Preparing to unpack .../kubeadm_1.12.1-00_amd64.deb ...
Unpacking kubeadm (1.12.1-00) ...
Processing triggers for man-db (2.7.5-1) ...
Processing triggers for systemd (229-4ubuntu16) ...
Processing triggers for ureadahead (0.100.0-19) ...
Processing triggers for doc-base (0.10.7) ...
Processing 1 added doc-base file...
Setting up cri-tools (1.12.0-00) ...
Setting up ebtables (2.0.10.4-3.4ubuntu2.16.04.2) ...
update-rc.d: warning: start and stop actions are no longer supported; falling back to defaults
Setting up kubernetes-cni (0.6.0-00) ...
Setting up socat (1.7.3.1-1) ...
Setting up kubelet (1.12.1-00) ...
Setting up kubectl (1.12.1-00) ...
Setting up kubeadm (1.12.1-00) ...
Processing triggers for systemd (229-4ubuntu16) ...
Processing triggers for ureadahead (0.100.0-19) ...

重启kubelet

Some users on RHEL/CentOS 7 have reported issues with traffic being routed incorrectly due to iptables being bypassed. You should ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config, e.g.

root@ubuntu:~# sysctl net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-iptables = 1
root@ubuntu:~# sed -i "s,ExecStart=$,Environment=\"KUBELET_EXTRA_ARGS=--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1\"\nExecStart=,g" /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
root@ubuntu:~# systemctl daemon-reload
root@ubuntu:~# systemctl restart kubelet
root@ubuntu:~# 

查看当前kubeadm版本号和所需组件

root@ubuntu:~# kubeadm version
kubeadm version: &version.Info{Major:"1", Minor:"12", GitVersion:"v1.12.1", GitCommit:"4ed3216f3ec431b140b1d899130a69fc671678f4", GitTreeState:"clean", BuildDate:"2018-10-05T16:43:08Z", GoVersion:"go1.10.4", Compiler:"gc", Platform:"linux/amd64"}
root@ubuntu:~# kubeadm config images list --kubernetes-version=v1.12.1
k8s.gcr.io/kube-apiserver:v1.12.1
k8s.gcr.io/kube-controller-manager:v1.12.1
k8s.gcr.io/kube-scheduler:v1.12.1
k8s.gcr.io/kube-proxy:v1.12.1
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.2.24
k8s.gcr.io/coredns:1.2.2
root@ubuntu:~# 

启动服务

建立配置文件

root@ubuntu:~# cat kubeadm.conf 
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
api:
  advertiseAddress: 0.0.0.0
networking:
  podSubnet: 10.244.0.0/16
etcd:
  image: registry.cn-hangzhou.aliyuncs.com/google_containers/etcd-amd64:3.2.24
kubernetesVersion: v1.12.1
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
root@ubuntu:~#

开始部署

使用kubeadm init 配合配置文件开始部署

root@ubuntu:~# kubeadm init --config kubeadm.conf 
[init] using Kubernetes version: v1.12.1
[preflight] running pre-flight checks
[preflight] Some fatal errors occurred:
	[ERROR Swap]: running with swap on is not supported. Please disable swap
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
root@ubuntu:~# 

出现错误,分析错误提示,是由于交换分区导致。清理环境,关闭交换分区(swapoff -a),重新部署

root@ubuntu:~# kubeadm reset 
[reset] WARNING: changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] are you sure you want to proceed? [y/N]: y
[preflight] running pre-flight checks
[reset] stopping the kubelet service
[reset] unmounting mounted directories in "/var/lib/kubelet"
[reset] no etcd manifest found in "/etc/kubernetes/manifests/etcd.yaml". Assuming external etcd
[reset] please manually reset etcd to prevent further issues
[reset] deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes]
[reset] deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
root@ubuntu:~# swapoff -a
root@ubuntu:~# kubeadm init --config kubeadm.conf 
[init] using Kubernetes version: v1.12.1
[preflight] running pre-flight checks
[preflight/images] Pulling images required for setting up a Kubernetes cluster
[preflight/images] This might take a minute or two, depending on the speed of your internet connection
[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[preflight] Activating the kubelet service
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [ubuntu kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.52.134]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [ubuntu localhost] and IPs [127.0.0.1 ::1]
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [ubuntu localhost] and IPs [192.168.52.134 127.0.0.1 ::1]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] valid certificates and keys now exist in "/etc/kubernetes/pki"
[certificates] Generated sa key and public key.
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" 
[init] this might take a minute or longer if the control plane images have to be pulled

Unfortunately, an error has occurred:
	timed out waiting for the condition

This error is likely caused by:
	- The kubelet is not running
	- The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)

If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
	- 'systemctl status kubelet'
	- 'journalctl -xeu kubelet'

Additionally, a control plane component may have crashed or exited when started by the container runtime.
To troubleshoot, list all containers using your preferred container runtimes CLI, e.g. docker.
Here is one example how you may list all Kubernetes containers running in docker:
	- 'docker ps -a | grep kube | grep -v pause'
	Once you have found the failing container, you can inspect its logs with:
	- 'docker logs CONTAINERID'
couldn't initialize a Kubernetes cluster
root@ubuntu:~# 

继续出错。。。查看错误提示进行分析。使用journalctl -xeu kubelet命令查看错误提示,发现拉取“k8s.gcr.io/pause:3.1”镜像出错,查看docker本地镜像

root@ubuntu:~# docker image list 
REPOSITORY                                                                    TAG                 IMAGE ID            CREATED             SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.12.1             61afff57f010        4 days ago          96.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.12.1             dcb029b5e3ad        4 days ago          194MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.12.1             aa2dd57c7329        4 days ago          164MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.12.1             d773ad20fd80        4 days ago          58.3MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.2.24              3cab8e1b9802        2 weeks ago         220MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.2.2               367cdc8433a4        6 weeks ago         39.2MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.1                 da86e6ba6ca1        9 months ago        742kB
root@ubuntu:~#

发现镜像tag都是registry.cn-hangzhou.aliyuncs.com,根据“kubeadm config images list --kubernetes-version=v1.12.1”命令返回的结果提示所需的镜像tag是“k8s.gcr.io”,所以需要重新拉取镜像,但目前的环境并没有科学上网,所以拉取失败,导致部署失败。修改全部镜像tag:

root@ubuntu:~# docker image list 
REPOSITORY                                                                    TAG                 IMAGE ID            CREATED             SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.12.1             61afff57f010        4 days ago          96.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.12.1             dcb029b5e3ad        4 days ago          194MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.12.1             aa2dd57c7329        4 days ago          164MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.12.1             d773ad20fd80        4 days ago          58.3MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.2.24              3cab8e1b9802        2 weeks ago         220MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.2.2               367cdc8433a4        6 weeks ago         39.2MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.1                 da86e6ba6ca1        9 months ago        742kB
root@ubuntu:~# kubeadm config images list --kubernetes-version=v1.12.1
k8s.gcr.io/kube-apiserver:v1.12.1
k8s.gcr.io/kube-controller-manager:v1.12.1
k8s.gcr.io/kube-scheduler:v1.12.1
k8s.gcr.io/kube-proxy:v1.12.1
k8s.gcr.io/pause:3.1
k8s.gcr.io/etcd:3.2.24
k8s.gcr.io/coredns:1.2.2
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.12.1 k8s.gcr.io/kube-proxy:v1.12.1
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.12.1 k8s.gcr.io/kube-apiserver:v1.12.1
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.12.1 k8s.gcr.io/kube-controller-manager:v1.12.1
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.12.1 k8s.gcr.io/kube-scheduler:v1.12.1
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.2.24 k8s.gcr.io/etcd:3.2.24
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.2.2 k8s.gcr.io/coredns:1.2.2
root@ubuntu:~# docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.1 k8s.gcr.io/pause:3.1
root@ubuntu:~# docker image list 
REPOSITORY                                                                    TAG                 IMAGE ID            CREATED             SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy                v1.12.1             61afff57f010        4 days ago          96.6MB
k8s.gcr.io/kube-proxy                                                         v1.12.1             61afff57f010        4 days ago          96.6MB
k8s.gcr.io/kube-scheduler                                                     v1.12.1             d773ad20fd80        4 days ago          58.3MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler            v1.12.1             d773ad20fd80        4 days ago          58.3MB
k8s.gcr.io/kube-apiserver                                                     v1.12.1             dcb029b5e3ad        4 days ago          194MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver            v1.12.1             dcb029b5e3ad        4 days ago          194MB
k8s.gcr.io/kube-controller-manager                                            v1.12.1             aa2dd57c7329        4 days ago          164MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager   v1.12.1             aa2dd57c7329        4 days ago          164MB
k8s.gcr.io/etcd                                                               3.2.24              3cab8e1b9802        2 weeks ago         220MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd                      3.2.24              3cab8e1b9802        2 weeks ago         220MB
k8s.gcr.io/coredns                                                            1.2.2               367cdc8433a4        6 weeks ago         39.2MB
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns                   1.2.2               367cdc8433a4        6 weeks ago         39.2MB
k8s.gcr.io/pause                                                              3.1                 da86e6ba6ca1        9 months ago        742kB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause                     3.1                 da86e6ba6ca1        9 months ago        742kB
root@ubuntu:~# 

重新部署

root@ubuntu:~# kubeadm reset 
[reset] WARNING: changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted.
[reset] are you sure you want to proceed? [y/N]: y
[preflight] running pre-flight checks
[reset] stopping the kubelet service
[reset] unmounting mounted directories in "/var/lib/kubelet"
[reset] deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes /var/lib/etcd]
[reset] deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki]
[reset] deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf]
root@ubuntu:~#
root@ubuntu:~#
root@ubuntu:~# kubeadm init --config kubeadm.conf 
[init] using Kubernetes version: v1.12.1
[preflight] running pre-flight checks
[preflight/images] Pulling images required for setting up a Kubernetes cluster
[preflight/images] This might take a minute or two, depending on the speed of your internet connection
[preflight/images] You can also perform this action in beforehand using 'kubeadm config images pull'
[kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[preflight] Activating the kubelet service
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Generated etcd/ca certificate and key.
[certificates] Generated etcd/server certificate and key.
[certificates] etcd/server serving cert is signed for DNS names [ubuntu localhost] and IPs [127.0.0.1 ::1]
[certificates] Generated apiserver-etcd-client certificate and key.
[certificates] Generated etcd/peer certificate and key.
[certificates] etcd/peer serving cert is signed for DNS names [ubuntu localhost] and IPs [192.168.52.134 127.0.0.1 ::1]
[certificates] Generated etcd/healthcheck-client certificate and key.
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [ubuntu kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.52.134]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] valid certificates and keys now exist in "/etc/kubernetes/pki"
[certificates] Generated sa key and public key.
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[controlplane] wrote Static Pod manifest for component kube-apiserver to "/etc/kubernetes/manifests/kube-apiserver.yaml"
[controlplane] wrote Static Pod manifest for component kube-controller-manager to "/etc/kubernetes/manifests/kube-controller-manager.yaml"
[controlplane] wrote Static Pod manifest for component kube-scheduler to "/etc/kubernetes/manifests/kube-scheduler.yaml"
[etcd] Wrote Static Pod manifest for a local etcd instance to "/etc/kubernetes/manifests/etcd.yaml"
[init] waiting for the kubelet to boot up the control plane as Static Pods from directory "/etc/kubernetes/manifests" 
[init] this might take a minute or longer if the control plane images have to be pulled
[apiclient] All control plane components are healthy after 22.540011 seconds
[uploadconfig] storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.12" in namespace kube-system with the configuration for the kubelets in the cluster
[markmaster] Marking the node ubuntu as master by adding the label "node-role.kubernetes.io/master=''"
[markmaster] Marking the node ubuntu as master by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ubuntu" as an annotation
[bootstraptoken] using token: 1vq6rr.l1aipybd91l9vlu2
[bootstraptoken] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstraptoken] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstraptoken] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstraptoken] creating the "cluster-info" ConfigMap in the "kube-public" namespace
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join 192.168.52.134:6443 --token 1vq6rr.l1aipybd91l9vlu2 --discovery-token-ca-cert-hash sha256:304e1303bfeb08f9024d7e6a80f5f386ff1f2e05941e833246cf86b0ab82fac8

root@ubuntu:~# 

部署成功

查看kubernetes pod

root@ubuntu:~# kubectl get pod --all-namespaces
The connection to the server localhost:8080 was refused - did you specify the right host or port?

提示错误,是由于API认证失败导致,执行以下命令解决:

root@ubuntu:~# mkdir -p $HOME/.kube
root@ubuntu:~# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
root@ubuntu:~# sudo chown $(id -u):$(id -g) $HOME/.kube/config
root@ubuntu:~# kubectl get pod --all-namespaces
NAMESPACE     NAME                             READY   STATUS              RESTARTS   AGE
kube-system   coredns-6c66ffc55b-55pd5         0/1     ContainerCreating   0          13m
kube-system   coredns-6c66ffc55b-bxwzn         0/1     ContainerCreating   0          13m
kube-system   etcd-ubuntu                      1/1     Running             0          13m
kube-system   kube-apiserver-ubuntu            1/1     Running             0          13m
kube-system   kube-controller-manager-ubuntu   1/1     Running             0          13m
kube-system   kube-proxy-xpqdk                 1/1     Running             0          13m
kube-system   kube-scheduler-ubuntu            1/1     Running             0          13m
root@ubuntu:~# 

“coredns-6c66ffc55b-55pd5”显示不是READY状态是因为网络插件没有加载

root@ubuntu:~# kubectl describe pod coredns-6c66ffc55b-55pd5 -n kube-system
Name:               coredns-6c66ffc55b-55pd5
Namespace:          kube-system
Priority:           0
PriorityClassName:  
Node:               ubuntu/192.168.52.134
Start Time:         Wed, 10 Oct 2018 07:48:43 -0700
Labels:             k8s-app=kube-dns
                    pod-template-hash=6c66ffc55b
Annotations:        
Status:             Pending
IP:                 
Controlled By:      ReplicaSet/coredns-6c66ffc55b
Containers:
  coredns:
    Container ID:  
    Image:         registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.2.2
    Image ID:      
    Ports:         53/UDP, 53/TCP, 9153/TCP
    Host Ports:    0/UDP, 0/TCP, 0/TCP
    Args:
      -conf
      /etc/coredns/Corefile
    State:          Waiting
      Reason:       ContainerCreating
    Ready:          False
    Restart Count:  0
    Limits:
      memory:  170Mi
    Requests:
      cpu:        100m
      memory:     70Mi
    Liveness:     http-get http://:8080/health delay=60s timeout=5s period=10s #success=1 #failure=5
    Environment:  
    Mounts:
      /etc/coredns from config-volume (ro)
      /var/run/secrets/kubernetes.io/serviceaccount from coredns-token-mxvzj (ro)
Conditions:
  Type              Status
  Initialized       True 
  Ready             False 
  ContainersReady   False 
  PodScheduled      True 
Volumes:
  config-volume:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      coredns
    Optional:  false
  coredns-token-mxvzj:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  coredns-token-mxvzj
    Optional:    false
QoS Class:       Burstable
Node-Selectors:  
Tolerations:     CriticalAddonsOnly
                 node-role.kubernetes.io/master:NoSchedule
                 node.kubernetes.io/not-ready:NoExecute for 300s
                 node.kubernetes.io/unreachable:NoExecute for 300s
Events:
  Type     Reason           Age                From               Message
  ----     ------           ----               ----               -------
  Normal   Scheduled        15m                default-scheduler  Successfully assigned kube-system/coredns-6c66ffc55b-55pd5 to ubuntu
  Warning  NetworkNotReady  4s (x71 over 15m)  kubelet, ubuntu    network is not ready: [runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:docker: network plugin is not ready: cni config uninitialized]
root@ubuntu:~# 

安装网络插件

root@ubuntu:~# kubectl apply -f https://git.io/weave-kube-1.6
serviceaccount/weave-net created
clusterrole.rbac.authorization.k8s.io/weave-net created
clusterrolebinding.rbac.authorization.k8s.io/weave-net created
role.rbac.authorization.k8s.io/weave-net created
rolebinding.rbac.authorization.k8s.io/weave-net created
daemonset.extensions/weave-net created
root@ubuntu:~#

加入NODE到K8S集群

启动NODE节点,参考masterr部署过程按照以下顺序部署NODE节点

1、安装docker-ce
2、添加用户到docker的组中
3、master节点安装kubelet,kubeadm,kubectl
4、重启kubelet

使用master部署完成后的join提示,加入NODE节点到集群

root@ubuntu:~# kubeadm join 192.168.52.134:6443 --token 828lqz.vortjhiv3qki0re9 --discovery-token-ca-cert-hash sha256:60f875f524fd0ed1ad40ef1b2dad37e41a752692f33048bb7fde095aff88d4ca
[preflight] running pre-flight checks
[discovery] Trying to connect to API Server "192.168.52.134:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.52.134:6443"
[discovery] Requesting info from "https://192.168.52.134:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.52.134:6443"
[discovery] Successfully established connection with API Server "192.168.52.134:6443"
[kubelet] Downloading configuration for the kubelet from the "kubelet-config-1.12" ConfigMap in the kube-system namespace
[kubelet] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[preflight] Activating the kubelet service
[tlsbootstrap] Waiting for the kubelet to perform the TLS Bootstrap...
[patchnode] Uploading the CRI Socket information "/var/run/dockershim.sock" to the Node API object "ubuntu" as an annotation

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the master to see this node join the cluster.

root@ubuntu:~#

master节点查看NODE状态

root@ubuntu:~# kubectl get pod -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-6c66ffc55b-l6782         1/1     Running   2          2m29s
coredns-6c66ffc55b-vr7x6         1/1     Running   2          2m29s
etcd-ubuntu                      1/1     Running   0          105s
kube-apiserver-ubuntu            1/1     Running   0          103s
kube-controller-manager-ubuntu   1/1     Running   0          2m
kube-proxy-jflpz                 1/1     Running   0          2m29s
kube-scheduler-ubuntu            1/1     Running   0          95s
weave-net-sd2ld                  2/2     Running   0          65s
root@ubuntu:~# kubectl get nodes 
NAME     STATUS   ROLES    AGE    VERSION
ubuntu   Ready    master   4m9s   v1.12.1
root@ubuntu:~# kubectl get pod -n kube-system
NAME                       READY   STATUS              RESTARTS   AGE
coredns-6c66ffc55b-l6782   0/1     ContainerCreating   2          4m3s
coredns-6c66ffc55b-vr7x6   0/1     ContainerCreating   3          4m3s
etcd-ubuntu                0/1     Pending             0          0s
kube-apiserver-ubuntu      0/1     Pending             0          0s
kube-proxy-jflpz           1/1     Running             0          4m3s
weave-net-sd2ld            2/2     Running             0          2m39s
root@ubuntu:~# kubectl get nodes 
NAME     STATUS   ROLES    AGE     VERSION
ubuntu   Ready    master   4m27s   v1.12.1
root@ubuntu:~# kubectl get pod -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-6c66ffc55b-l6782         1/1     Running   0          4m10s
coredns-6c66ffc55b-vr7x6         1/1     Running   0          4m10s
kube-controller-manager-ubuntu   0/1     Pending   0          1s
kube-proxy-jflpz                 1/1     Running   0          4m10s
weave-net-sd2ld                  2/2     Running   0          2m46s
root@ubuntu:~# kubectl get pod -n kube-system
NAME                             READY   STATUS             RESTARTS   AGE
coredns-6c66ffc55b-l6782         0/1     CrashLoopBackOff   4          4m30s
coredns-6c66ffc55b-vr7x6         1/1     Running            2          4m30s
kube-apiserver-ubuntu            0/1     Pending            0          1s
kube-controller-manager-ubuntu   0/1     Pending            0          1s
kube-proxy-jflpz                 1/1     Running            0          4m30s
kube-scheduler-ubuntu            0/1     Pending            0          1s
weave-net-sd2ld                  2/2     Running            0          3m6s
root@ubuntu:~# kubectl get pod -n kube-system
NAME                             READY   STATUS    RESTARTS   AGE
coredns-6c66ffc55b-l6782         0/1     Error     5          5m56s
coredns-6c66ffc55b-vr7x6         0/1     Error     5          5m56s
etcd-ubuntu                      1/1     Running   0          1s
kube-apiserver-ubuntu            0/1     Pending   0          0s
kube-controller-manager-ubuntu   1/1     Running   0          1s
kube-proxy-jflpz                 1/1     Running   0          5m56s
kube-scheduler-ubuntu            0/1     Pending   0          1s
weave-net-sd2ld                  2/2     Running   1          4m32s
root@ubuntu:~# 

master节点没有NODE加入的状态,而且master的系统组件也在不停的重建,查看kubelet系统(journalctl用于检索systemd日志)日志(journalctl -exu kubelet):

Oct 15 06:32:14 ubuntu kubelet[21032]: E1015 06:32:14.665343   21032 azure_dd.go:147] failed to get azure cloud in GetVolumeLimits, plugin.host: ubuntu
Oct 15 06:32:14 ubuntu kubelet[21032]: W1015 06:32:14.675132   21032 kubelet.go:1622] Deleting mirror pod "etcd-ubuntu_kube-system(b88890b9-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:14 ubuntu kubelet[21032]: W1015 06:32:14.675908   21032 kubelet.go:1622] Deleting mirror pod "kube-apiserver-ubuntu_kube-system(b8a72b35-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:14 ubuntu kubelet[21032]: W1015 06:32:14.771417   21032 kubelet.go:1622] Deleting mirror pod "kube-controller-manager-ubuntu_kube-system(b906e6ad-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:15 ubuntu kubelet[21032]: W1015 06:32:15.682986   21032 kubelet.go:1622] Deleting mirror pod "kube-apiserver-ubuntu_kube-system(b9bde629-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:15 ubuntu kubelet[21032]: W1015 06:32:15.683031   21032 kubelet.go:1622] Deleting mirror pod "kube-controller-manager-ubuntu_kube-system(b906e6ad-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:15 ubuntu kubelet[21032]: W1015 06:32:15.683091   21032 kubelet.go:1622] Deleting mirror pod "kube-scheduler-ubuntu_kube-system(b9211840-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:15 ubuntu kubelet[21032]: W1015 06:32:15.683158   21032 kubelet.go:1622] Deleting mirror pod "etcd-ubuntu_kube-system(b9be1b4f-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:16 ubuntu kubelet[21032]: W1015 06:32:16.831308   21032 status_manager.go:501] Failed to update status for pod "kube-apiserver-ubuntu_kube-system(bacc5a0d-d07e-11e8-aa0c-000c29b23b73)": failed to patch status "{\"status\":{\"conditions\":[{\"lastProbeTime\":null,\"lastTransitionTime\":\"2018-10-15T13:29:34Z\",\"status\":\"True\",\"type\":\"Initialized\"},{\"lastProbeTime\":null,\"lastTransitionTime\":\"2018-10-15T13:29:36Z\",\"status\":\"True\",\"type\":\"Ready\"},{\"lastProbeTime\":null,\"lastTransitionTime\":\"2018-10-15T13:29:36Z\",\"status\":\"True\",\"type\":\"ContainersReady\"},{\"lastProbeTime\":null,\"lastTransitionTime\":\"2018-10-15T13:29:34Z\",\"status\":\"True\",\"type\":\"PodScheduled\"}],\"containerStatuses\":[{\"containerID\":\"docker://232c447bf98969bc21e01e41bcf2a3c70f96dbb12cd91beeb5e618b31eee46ff\",\"image\":\"k8s.gcr.io/kube-apiserver:v1.12.1\",\"imageID\":\"docker-pullable://registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver@sha256:0a45517b1eeab5bd036f9ecd24e33ccd1e62542d3899cea917ab55248dc2a7d6\",\"lastState\":{},\"name\":\"kube-apiserver\",\"ready\":true,\"restartCount\":0,\"state\":{\"running\":{\"startedAt\":\"2018-10-15T13:29:35Z\"}}}],\"hostIP\":\"192.168.52.135\",\"phase\":\"Running\",\"podIP\":\"192.168.52.134\",\"startTime\":\"2018-10-15T13:29:34Z\"}}" for pod "kube-system"/"kube-apiserver-ubuntu": pods "kube-apiserver-ubuntu" not found
Oct 15 06:32:17 ubuntu kubelet[21032]: W1015 06:32:17.692972   21032 kubelet.go:1622] Deleting mirror pod "kube-controller-manager-ubuntu_kube-system(baadf46e-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:18 ubuntu kubelet[21032]: W1015 06:32:18.698033   21032 kubelet.go:1622] Deleting mirror pod "kube-scheduler-ubuntu_kube-system(bbfd837e-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:18 ubuntu kubelet[21032]: W1015 06:32:18.698169   21032 kubelet.go:1622] Deleting mirror pod "etcd-ubuntu_kube-system(bc1c04fa-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:18 ubuntu kubelet[21032]: W1015 06:32:18.698250   21032 kubelet.go:1622] Deleting mirror pod "kube-apiserver-ubuntu_kube-system(bbc082ee-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:20 ubuntu kubelet[21032]: W1015 06:32:20.008839   21032 kubelet.go:1622] Deleting mirror pod "kube-controller-manager-ubuntu_kube-system(bc961a05-d07e-11e8-aa0c-000c29b23b73)" because it is outdated
Oct 15 06:32:20 ubuntu kubelet[21032]: W1015 06:32:20.737155   21032 kubelet.go:1622] Deleting mirror pod "etcd-ubuntu_kube-system(bc1c04fa-d07e-11e8-aa0c-000c29b23b73)" because it is outdated

发现kubelet的系统组件一直再重新创建,提示重建原因是组件过时。。。

google后发现是因为masterr和node节点的节点名字相同导致的问题,修改master节点主机名为master,node节点主机名为node-1解决:

/etc/hostname:修改主机名
/etc/hosts:修改主机命映射文件

修改完成后重新加入NODE节点到master节点,node和master都显示Ready表示成功

root@master:~# kubectl get pod --all-namespaces -o wide
NAMESPACE     NAME                             READY   STATUS    RESTARTS   AGE     IP               NODE     NOMINATED NODE
kube-system   coredns-6c66ffc55b-6ljpk         1/1     Running   0          109s    10.32.0.4        master   
kube-system   coredns-6c66ffc55b-df85w         1/1     Running   0          2m3s    10.32.0.3        master   
kube-system   etcd-master                      1/1     Running   0          3m22s   192.168.52.134   master   
kube-system   kube-apiserver-master            1/1     Running   0          3m49s   192.168.52.134   master   
kube-system   kube-controller-manager-master   1/1     Running   0          3m22s   192.168.52.134   master   
kube-system   kube-proxy-48wxn                 1/1     Running   0          4m12s   192.168.52.134   master   
kube-system   kube-proxy-6g6m6                 1/1     Running   0          77s     192.168.52.135   node-1   
kube-system   kube-scheduler-master            1/1     Running   0          3m33s   192.168.52.134   master   
kube-system   weave-net-qfqdm                  2/2     Running   1          77s     192.168.52.135   node-1   
kube-system   weave-net-x9mxl                  2/2     Running   0          4m5s    192.168.52.134   master   
root@master:~# kubectl get nodes
NAME     STATUS   ROLES    AGE     VERSION
master   Ready    master   4m35s   v1.12.1
node-1   Ready       79s     v1.12.1
root@master:~# 

kubelet启动失败:

[root@develop ~]# systemctl status kubelet 
● kubelet.service - kubelet: The Kubernetes Node Agent
   Loaded: loaded (/etc/systemd/system/kubelet.service; enabled; vendor preset: disabled)
  Drop-In: /etc/systemd/system/kubelet.service.d
           └─10-kubeadm.conf
   Active: active (running) since Fri 2019-02-01 00:40:39 CST; 11s ago
     Docs: https://kubernetes.io/docs/
 Main PID: 25128 (kubelet)
    Tasks: 59
   CGroup: /system.slice/kubelet.service
           └─25128 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --config=/var/lib/kubelet/config.yaml --cgroup-driver=sys...

Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.063238   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.150205   25128 reflector.go:134] k8s.io/kubernetes/pkg/kubelet/kubelet.go:444: Failed to list *v1.Service: Get https://10.66.240....ction refused
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.163434   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.263638   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.363840   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.464061   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.551324   25128 reflector.go:134] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://10.66....ction refused
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.564244   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.664408   25128 kubelet.go:2266] node "develop" not found
Feb 01 00:40:51 develop kubelet[25128]: E0131 16:40:51.764609   25128 kubelet.go:2266] node "develop" not found
Hint: Some lines were ellipsized, use -l to show in full.
[root@develop ~]# 

提示develop没有找到,查看docker启动状态:

[root@develop ~]# docker ps -a
CONTAINER ID        IMAGE                                               COMMAND                  CREATED              STATUS                          PORTS               NAMES
e91ac8b06cde        b9027a78d94c                                        "kube-controller-m..."   About a minute ago   Exited (1) 59 seconds ago                           k8s_kube-controller-manager_kube-controller-manager-develop_kube-system_450c90d90ecdd385f4dd73c165741a85_4
2b1278b4cb30        3cab8e1b9802                                        "etcd --advertise-..."   About a minute ago   Exited (1) About a minute ago                       k8s_etcd_etcd-develop_kube-system_cb6a672b3cc07b22cb17e78fcb4402f2_4
89911daa6631        177db4b8e93a                                        "kube-apiserver --..."   About a minute ago   Exited (1) About a minute ago                       k8s_kube-apiserver_kube-apiserver-develop_kube-system_e3baede458ce45b952471adb98fe1a09_4
87e350205cce        3193be46e0b3                                        "kube-scheduler --..."   2 minutes ago        Up 2 minutes                                        k8s_kube-scheduler_kube-scheduler-develop_kube-system_15771bb86175c846e989776d98b76eb2_0
34719c83c53b        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 2 minutes ago        Up 2 minutes                                        k8s_POD_etcd-develop_kube-system_cb6a672b3cc07b22cb17e78fcb4402f2_0
f58abe8eb5f7        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 2 minutes ago        Up 2 minutes                                        k8s_POD_kube-scheduler-develop_kube-system_15771bb86175c846e989776d98b76eb2_0
3e810e0b9d56        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 2 minutes ago        Up 2 minutes                                        k8s_POD_kube-controller-manager-develop_kube-system_450c90d90ecdd385f4dd73c165741a85_0
157ca028b209        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 2 minutes ago        Up 2 minutes                                        k8s_POD_kube-apiserver-develop_kube-system_e3baede458ce45b952471adb98fe1a09_0
[root@develop ~]# 

有三个容器都没有启动,继续分析启动失败原因:

[root@develop ~]# docker ps -a
CONTAINER ID        IMAGE                                               COMMAND                  CREATED              STATUS                          PORTS               NAMES
54fb1c4e2505        b9027a78d94c                                        "kube-controller-m..."   38 seconds ago       Exited (1) 36 seconds ago                           k8s_kube-controller-manager_kube-controller-manager-develop_kube-system_450c90d90ecdd385f4dd73c165741a85_5
954e5d43afe1        3cab8e1b9802                                        "etcd --advertise-..."   About a minute ago   Exited (1) About a minute ago                       k8s_etcd_etcd-develop_kube-system_cb6a672b3cc07b22cb17e78fcb4402f2_5
bb783d8d40f8        177db4b8e93a                                        "kube-apiserver --..."   About a minute ago   Exited (1) About a minute ago                       k8s_kube-apiserver_kube-apiserver-develop_kube-system_e3baede458ce45b952471adb98fe1a09_5
87e350205cce        3193be46e0b3                                        "kube-scheduler --..."   3 minutes ago        Up 3 minutes                                        k8s_kube-scheduler_kube-scheduler-develop_kube-system_15771bb86175c846e989776d98b76eb2_0
34719c83c53b        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 3 minutes ago        Up 3 minutes                                        k8s_POD_etcd-develop_kube-system_cb6a672b3cc07b22cb17e78fcb4402f2_0
f58abe8eb5f7        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 3 minutes ago        Up 3 minutes                                        k8s_POD_kube-scheduler-develop_kube-system_15771bb86175c846e989776d98b76eb2_0
3e810e0b9d56        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 3 minutes ago        Up 3 minutes                                        k8s_POD_kube-controller-manager-develop_kube-system_450c90d90ecdd385f4dd73c165741a85_0
157ca028b209        registry.aliyuncs.com/google_containers/pause:3.1   "/pause"                 3 minutes ago        Up 3 minutes                                        k8s_POD_kube-apiserver-develop_kube-system_e3baede458ce45b952471adb98fe1a09_0
[root@develop ~]# 
[root@develop ~]# 
[root@develop ~]# docker logs 54fb1c4e2505
Flag --address has been deprecated, see --bind-address instead.
I0131 16:43:57.905073       1 serving.go:318] Generated self-signed cert in-memory
unable to load client CA file: unable to load client CA file: open /etc/kubernetes/pki/ca.crt: permission denied
[root@develop ~]# 

都是因为/etc/kubernetes/pki/目录没有权限导致启动失败,查看很多资料。。。发现是因为selinux没有关闭导致的。。。

[root@develop ~]# setenforce 0 //临时关闭
[root@develop ~]# getenforce
Permissive
[root@develop ~]# vim /etc/sysconfig/selinux //永久关闭
将SELINUX=enforcing 改为 SELINUX=disabled 。

你可能感兴趣的:(kubernets)