一、环境介绍:

参考文章:

使用 kubeadmin 离线部署 kubernetes1.9
kubeadm安装1.9版本
使用kubeadm离线部署kubernetes v1.9.0

1、操作系统:

esxi6.7虚拟机:Centos7.3

2、主机名

node1 192.168.123.123
node2 192.168.123.122
node3 192.168.123.121

二、配置基础环境

1、配置node3结点与其他结点互信

[root@node3 ~]# ssh-keygen -t rsa
[root@node3 ~]# ssh-copy-id node1
[root@node3 ~]# ssh-copy-id node2
[root@node3 ~]# ssh-copy-id node3

2、下载网盘的离线包,解压后复制到所有结点

链接:https://pan.baidu.com/s/13AyMVUBn2Cr8sb6apUnzrg 密码:loyc

3、安装docker软件包(所有结点)

#安装必要的软件
yum install -y vim ntp

# 解压下载得到的k8s.tar.gz
tar zxf k8s.tar.gz /root/

# 使用本地安装docker1.17版本
cd ~/k8s/rpm/

yum localinstall -y docker-ce-selinux-17.03.2.ce-1.el7.centos.noarch.rpm docker-ce-17.03.2.ce-1.el7.centos.x86_64.rpm 

# 启动docker
systemctl start docker && systemctl enable docker

4、根据kubernetes需要调整centos(所有节点)

# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld

# 关闭selinux
setenforce 0
sed -i 's#enforcing#disabled#g' /etc/selinux/config

# 关闭swap
swapoff -a && sysctl -w vm.swappiness=0
vim /etc/fstab   注释掉swap

# 同步时间
ntpdate cn.pool.ntp.org

4、系统路由参数,防止kubeadm报路由警告(所有结点)

echo "
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
" >> /etc/sysctl.conf

sysctl -p

5、导入docker镜像(所有结点)

[root@node3 k8s]# cd /root/k8s/
[root@node3 k8s]# find . -name "*.tar" -exec docker image load -i {} \;

6、安装RPM包(所有结点)

rpm -ivh socat-1.7.3.2-2.el7.x86_64.rpm
rpm -ivh kubernetes-cni-0.6.0-0.x86_64.rpm  kubelet-1.9.9-9.x86_64.rpm  kubectl-1.9.0-0.x86_64.rpm
rpm -ivh kubeadm-1.9.0-0.x86_64.rpm

7、修改kubernetes使用docker默认driver(所有结点)

sed -i 's#systemd#cgroupfs#g' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf

8、配置node3结点,使用kubectl命令补全功能

[root@node3 k8s]# echo "source <(kubectl completion bash)" >> ~/.bashrc

三、使用kubeadm生成kubernetes集群

1、node3结点,启动kubelet服务

[root@node3 rpm]# systemctl enable kubelet && systemctl start kubelet

2、node3结点,初始化kubernetes集群

[root@node3 rpm]# kubeadm init --kubernetes-version=v1.9.0 --pod-network-cidr=10.224.0.0/16 --token-ttl=0 --ignore-preflight-errors=all

To start using your cluster, you need to run the following as a regular user:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of machines by running the following on each node
as root:
#kubeadm join非常重要,需要记录保存下来,用于添加节点,丢失不能找回
kubeadm join --token bb3b01.42f41afdd2623461 192.168.123.123:6443 --discovery-token-ca-cert-hash sha256:8b7188df2ec8f3a1617c4eb2482712bbbcc97320422a4c5ecad0c52fa16905b0

注意看输出,这里需要在master设置:

mkdir -p $HOME/.kube

cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

chown $(id -u):$(id -g) $HOME/.kube/config

如果在master节点kubeadm join出错,可用kubeadm reset重置

kubeadm reset

kubeadm init初始化失败命令:
初始化 master


systemctl stop kubelet
# 注意: 下面这条命令会干掉所有正在运行的 docker 容器,
# 如果要进行重置操作,最好先确定当前运行的所有容器都能干掉(干掉不影响业务),
# 否则的话最好手动删除 kubeadm 创建的相关容器(gcr.io 相关的)
docker rm -f -v $(docker ps -q)
find /var/lib/kubelet | xargs -n 1 findmnt -n -t tmpfs -o TARGET -T | uniq | xargs -r umount -v
rm -r -f /etc/kubernetes /var/lib/kubelet /var/lib/etcd

3、按上述操作,验证加入集群

分别在node1、node2同步时间后使用kubeadm join加入集群

ntpdate cn.pool.ntp.org

kubeadm join --token bb3b01.42f41afdd2623461 192.168.123.123:6443 --discovery-token-ca-cert-hash sha256:8b7188df2ec8f3a1617c4eb2482712bbbcc97320422a4c5ecad0c52fa16905b0
[root@node3 ~]# kubectl get node
NAME      STATUS     ROLES     AGE       VERSION
node1     NotReady       56m       v1.9.0
node2     NotReady       56m       v1.9.0
node3     NotReady   master    58m       v1.9.0

4、使用calico插件,修改CIDR为初始化使用的网段;

...

  • name: CALICO_IPV4POOL_CIDR
    value: "10.244.0.0/16" # 原 value: "10.244.0.0/16"
    ...
[root@node3 k8s]# kubectl create -f calico.yaml

[root@node3 k8s]# kubectl get node
NAME      STATUS    ROLES     AGE       VERSION
node1     Ready         1h        v1.9.0
node2     Ready         1h        v1.9.0
node3     Ready     master    1h        v1.9.0

[root@node3 k8s]# kubectl get pod --all-namespaces
NAMESPACE     NAME                                       READY     STATUS    RESTARTS   AGE
kube-system   calico-etcd-tkmpj                          1/1       Running   0          7m
kube-system   calico-kube-controllers-559b575f97-2zngp   1/1       Running   10         11m
kube-system   calico-node-6gxx8                          2/2       Running   10         11m
kube-system   calico-node-rmd7h                          2/2       Running   11         11m
kube-system   calico-node-tjwct                          2/2       Running   11         11m
kube-system   etcd-node3                                 1/1       Running   1          1h
kube-system   kube-apiserver-node3                       1/1       Running   1          1h
kube-system   kube-controller-manager-node3              1/1       Running   1          1h
kube-system   kube-dns-6f4fd4bdf-5vw7d                   3/3       Running   3          1h
kube-system   kube-proxy-d994x                           1/1       Running   1          1h
kube-system   kube-proxy-k2bdk                           1/1       Running   1          1h
kube-system   kube-proxy-ksp6b                           1/1       Running   1          1h
kube-system   kube-scheduler-node3                       1/1       Running   1          1h

五、添加服务等内容

参考文章:
安装部署 Kubernetes 集群