目录
一、集群规划
二、基础安装(所有机器)
1、SSH主机免密
2、host文件解析
3、安装依赖包
4、关闭防火墙
5、关闭swap以及selinux
6、调整内核参数
7、设置时区
8、降低环境资源,关闭邮件服务
三、环境部署(所有节点)
1、Kube-proxy开启ipvs的前置条件
2、部署k8s
3、部署calico网络
主机名 | IP | 备注 |
node1 | 192.168.100.1 | master |
node2 | 192.168.100.2 | node1 |
node3 | 192.168.100.3 | node2 |
[root@node1 ~]# ssh-keygen -t rsa
Generating public/private rsa key pair.
Enter file in which to save the key (/root/.ssh/id_rsa):
Enter passphrase (empty for no passphrase):
Enter same passphrase again:
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
SHA256:/mKLWNjKS7Dsuyp7O7FlZ7yWE+Z/92LAEBNaQ/Tl4nk root@node1
The key's randomart image is:
+---[RSA 2048]----+
| o*. . |
| ooo o |
| . oo . |
| .. o |
| . . Soo E |
| ..ooo*. o. |
| o=o=o+. . |
|..+o +*.o.. + |
|++===o.+o+.o o. |
+----[SHA256]-----+
##拷贝公钥(需要给本机以及其他机器也发一份):
[root@node1 ~]# ssh-copy-id -i ~/.ssh/id_rsa.pub node1
##验证:
[root@node1 ~]# ssh node3
Last login: Thu Jun 10 22:58:16 2021 from 192.168.100.254
vim /etc/hosts
192.168.100.1 node1
192.168.100.2 node2
192.168.100.3 node3
[root@node1 ~]# yum -y install conntrack ntpdate ntp ipset jq iptables curl sysstat libseccomp wget git vim net-tools
[root@node1 ~]# systemctl stop firewalld
[root@node1 ~]# systemctl disable firewalld
[root@node1 ~]# vim /etc/fstab
#/dev/mapper/centos-swap swap swap defaults 0 0
[root@node1 ~]# swapoff -a //临时关闭
[root@node1 ~]# vim /etc/selinux/config //将enabled改成disabled,需要重启服务器
SELINUX=disabled
[root@node1 ~]# modprobe br_netfilter
[root@node1 ~]#vim /etc/sysctl.d/kubernetes.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.ipv4.tcp_tw_recycle = 0
vm.swappiness = 0
vm.overcommit_memory = 1
vm.panic_on_oom = 0
fs.inotify.max_user_instances = 8192
fs.inotify.max_user_watches = 524288
fs.file-max = 52706963
fs.nr_open = 52706963
net.ipv6.conf.all.disable_ipv6 = 1
##生效
[root@node1 ~]# sysctl -p /etc/sysctl.d/kubernetes.conf
[root@node1 ~]# ntpdate time.windows.com
[root@node1 ~]# systemctl stop postfix && systemctl disable postfix
[root@node1 ~]# cat >/etc/sysconfig/modules/ipvs.modules <
[root@node1 ~]# yum -y install yum-utils device-mapper-persistemt-data lvm2
2.1、下载阿里源(docker)
[root@node1 ~]# yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
2.2、安装docker
[root@node1 ~]# yum -y install docker-ce-19.03.15 docker-ce-cli-19.03.15 containerd.io
##启动并设置成开机自启
[root@node1 ~]# systemctl start docker && systemctl enable docker
##配置daemon
cat > /etc/docker/daemon.json <
2.3、安装Kubeadm(主从配置)
##Kubernetes yum源配置
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
##下载包
[root@node1 ~]#yum install -y kubelet-1.18.19 kubeadm-1.18.19 kubectl-1.18.19
##kubelet开机自启
[root@node1 ~]# systemctl enable kubelet.service
##修改kubelet的cgroups也为systemd
[root@node1 ~]# vim /usr/lib/systemd/system/kubelet.service.d/10-kubeadm.conf
Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml --cgroup-driver=systemd"
##重启服务
[root@node1 ~]# systemctl daemon-reload
2.4、初始化主节点
##生成kubeadm-config.yaml
[root@node1 ~]# kubeadm config print init-defaults > kubeadm-config.yaml
[root@node1 ~]# vim kubeadm-config.yaml
advertiseAddress: 192.168.100.1 //自己机器的ip
kubernetesVersion: v1.18.19 //改成当前版本
image-repository: registry.cn-hangzhou.aliyuncs.com/google_containers
networking: //添加一行内容
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16" //添加此行
在页尾添加: //使用IPvs网络模式进行调度
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
##可以提前下载镜像,节约时间(下载镜像方式1)
kubeadm config images pull –-config /root/new.yaml
##因为需要下载很多镜像提前下载(下载镜像方式2)
[root@node1 ~]# kubeadm config images list //查看需要什么镜像
k8s.gcr.io/kube-apiserver:v1.18.19
k8s.gcr.io/kube-controller-manager:v1.18.19
k8s.gcr.io/kube-scheduler:v1.18.19
k8s.gcr.io/kube-proxy:v1.18.19
k8s.gcr.io/pause:3.2
k8s.gcr.io/etcd:3.4.3-0
k8s.gcr.io/coredns:1.6.7
##下载镜像
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.19
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.19
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.19
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.19
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7
##修改tag
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.18.19 k8s.gcr.io/kube-apiserver:v1.18.19
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.18.19 k8s.gcr.io/kube-controller-manager:v1.18.19
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.18.19 k8s.gcr.io/kube-scheduler:v1.18.19
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.18.19 k8s.gcr.io/kube-proxy:v1.18.19
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2 k8s.gcr.io/pause:3.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.4.3-0 k8s.gcr.io/etcd:3.4.3-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:1.6.7 k8s.gcr.io/coredns:1.6.7
[root@node1 ~]# docker images //确认包是否拉取下来
##初始化
##如果kubelet、kubeadm、kubectl版本过高,且想用kubeadm得方式初始化得话,可以使用下面命令来更新config初始化文件
kubeadm config migrat --old-config kubeadm-config.yaml --new-config new.yaml
##如果初始化失败了,可以使用以下命令清掉相关配置,重新初始化
Kubuadm reset
[root@node1 ~]# kubeadm init --config kubeadm-config.yaml
##执行成功结果:
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.100.1:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:18cd7c97516d56a2a739935b73d9cebf3daae1e9615d83180d9fb568853a8c97
##根据提示创建对应目录并赋权
[root@node1 ~]# mkdir -p $HOME/.kube
[root@node1 ~]# sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
[root@node1 ~]# sudo chown $(id -u):$(id -g) $HOME/.kube/config
2.5、Node2与Node3加入集群(node2跟node3都需要执行)
[root@node2 ~]# kubeadm join 192.168.100.1:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:18cd7c97516d56a2a739935b73d9cebf3daae1e9615d83180d9fb568853a8c97
2.6、查看是否成功加入集群
[root@node1 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
node1 NotReady master 106s v1.18.19
node2 NotReady 14s v1.18.19
node3 NotReady 4s v1.18.19
[root@node1 ~]# wget https://docs.projectcalico.org/manifests/calico.yaml
[root@node1 ~]# kubectl apply -f calico.yaml
##报错:##如果执行calico.yaml报错:
error: unable to recognize "calico.yaml": no matches for kind "PodDisruptionBudget" in version "policy/v1"
##处理:
原因是版本不支持,去官网下载对应的calico.yaml重新上传
[root@node1 ~]# curl https://docs.projectcalico.org/archive/v3.20/manifests/calico.yaml -O
[root@node1 ~]# kubectl apply -f calico.yaml
##验证:
[root@node1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node1 Ready master 6h37m v1.18.19
node2 Ready 6h36m v1.18.19
node3 Ready 6h36m v1.18.19