关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭selinux
sed -i 's/enforcing/disabled/' /etc/selinux/config
setenforce 0
关闭swap
swapoff -a
sed -ri 's/.*swap.*/#&/' /etc/fstab
修改主机名(所有主机按照角色分工命名)
Hostnamectl set-hostname k8s-node1
bash
vim /etc/hosts (最后一个是虚拟IP)
将桥接的IPv4流量传递到iptables的链
cat << EOF >> /etc/sysctl.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
modprobe br_netfilter
sysctl -p
yum install -y keepalived
cat > /etc/keepalived/keepalived.conf <
systemctl start keepalived
systemctl enable keepalived
ip a s ens33
每台master节点中的配置均相同,配置中声明了后端代理的每个master节点服务器,指定了haproxy的端口为16443,因此16443端口为集群的入口。
yum install -y haproxy
cat > /etc/haproxy/haproxy.cfg << EOF
#-------------------------------
# Global settings
#-------------------------------
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
stats socket /var/lib/haproxy/stats
#--------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# usr if not designated in their block
#--------------------------------
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 3000
#--------------------------------
# kubernetes apiserver frontend which proxys to the backends
#--------------------------------
frontend kubernetes-apiserver
mode tcp
bind *:16443
option tcplog
default_backend kubernetes-apiserver
#---------------------------------
#round robin balancing between the various backends
#---------------------------------
backend kubernetes-apiserver
mode tcp
balance roundrobin
server master1.k8s.io 192.168.77.14:6443 check
server master2.k8s.io 192.168.77.15:6443 check
server master3.k8s.io 192.168.77.16:6443 check
#---------------------------------
# collection haproxy statistics message
#---------------------------------
listen stats
bind *:1080
stats auth admin:awesomePassword
stats refresh 5s
stats realm HAProxy\ Statistics
stats uri /admin?stats
EOF
所有master节点都要执行
systemctl start haproxy
systemctl enable haproxy
netstat -lntup|grep haproxy
所有主机上分别部署 Docker 环境,因为 Kubernetes 对容器的编排需要 Docker 的支持。
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum clean all && yum makecache fast
yum -y install docker-ce
systemctl start docker
systemctl enable docker
配置镜像加速
cat << END > /etc/docker/daemon.json
{
"registry-mirrors":[ "https://nyakyfun.mirror.aliyuncs.com" ]
}
END
systemctl daemon-reload
systemctl restart docker
使用 YUM 方式安装Kubernetes时,推荐使用阿里的yum。
所有主机配置
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum install -y kubelet-1.20.0 kubeadm-1.20.0 kubectl-1.20.0
systemctl enable kubelet
在具有vip的master上操作。此处的vip节点为k8s-master1。
创建kubeadm-config.yaml文件
cat > kubeadm-config.yaml << EOF
apiServer:
certSANs:
- k8s-master1
- k8s-master2
- k8s-master3
- master.k8s.io
- 192.168.77.14
- 192.168.77.15
- 192.168.77.16
- 192.168.200.154
- 127.0.0.1
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:6443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.1.0.0/16
scheduler: {}
EOF
查看所需的镜像
kubeadm config images list --config kubeadm-config.yaml
下载所需镜像
kubeadm config images pull --config kubeadm-config.yaml
echo "1" >/proc/sys/net/bridge/bridge-nf-call-iptables
初始化K8S
kubeadm init --config kubeadm-config.yaml
根据初始化的结果操作
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
查看集群状态
kubectl get cs
查看发现没有连接上集群
是因为/etc/kubernetes/manifests/下的kube-controller-manager.yaml和kube-scheduler.yaml设置的默认端口为0导致的解决方式是注释掉对应的port即可
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
vim /etc/kubernetes/manifests/kube-scheduler.yaml
然后再查看(需要等一小会才会出现ok)
kubectl get pods -n kube-system (这个也需要时间加载)
可以接着往下部署
所有节点都要操作(需要flannel网络插件和flannel.yaml文件可以到网上下载)
tar xf cni-plugins-linux-amd64-v0.8.6.tgz
cp flannel /opt/cni/bin/
docker load < flannel_v0.12.0-amd64.tar
主节点执行yaml文件
kubectl apply -f kube-flannel.yml