1.区别于单master的二进制集群,多master集群对master做了一个高可用,如果master1宕机,Load Balance就会将VIP转移到master2,这样就保证了master的可靠性。
2.多节点的核心点就是需要指向一个核心的地址,我们之前在做单节点的时候已经将vip地址定义过写入k8s-cert.sh脚本文件中(192.168.18.100),vip开启apiserver,多master开启端口接受node节点的apiserver请求,此时若有新的节点加入,不是直接找moster节点,而是直接找到vip进行spiserver的请求,然后vip再进行调度,分发到某一个master中进行执行,此时master收到请求之后就会给改node节点颁发证书。
3.建立负载均衡缓解了nodes对master的请求压力,减轻了master资源使用
服务器 | ip | 组件 |
---|---|---|
master1 | 192.168.88.11 | kube-apiserver kube-controller-manager kube-scheduler etcd |
master2 | 192.168.88.12 | kube-apiserver kube-controller-manager kube-scheduler |
node1 | 192.168.88.13 | kubelet kube-proxy docker flannel etcd |
node2 | 192.168.88.14 | kubelet kube-proxy docker flannel etcd |
nginx_lbm | 192.168.88.15 | nginx keepalived |
nginx_lbb | 192.168.88.16 | nginx keepalived |
VIP | 192.168.88.17 |
[root@localhost ~]# systemctl stop firewalld.service
[root@localhost ~]# setenforce 0
[root@localhost ~]# systemctl stop NetworkManager
[root@localhost ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
[root@localhost ~]# iptables -F
//复制/opt/kubernetes和/opt/etcd下的所有文件到master2中
[root@localhost ~]# scp -r /opt/kubernetes/ [email protected]:/opt/
//复制etcd的证书,否则apiserver无法启动
[root@localhost ~]# scp -r /opt/etcd/ [email protected]:/opt/
//复制执行脚本到master2中
[root@localhost ~]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service [email protected]:/usr/lib/systemd/system/
[root@localhost ~]# cd /opt/kubernetes/cfg/
[root@localhost cfg]# ls
kube-apiserver kube-controller-manager kube-scheduler token.csv
//修改2处本地地址
[root@localhost cfg]# vim kube-apiserver
--bind-address=192.168.88.12 \
--advertise-address=192.168.88.12 \
//开启apiserver服务
[root@localhost cfg]# systemctl start kube-apiserver.service
[root@localhost cfg]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.
//开启控制器服务
[root@localhost cfg]# systemctl start kube-controller-manager.service
[root@localhost cfg]# systemctl enable kube-controller-manager.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
//开启调度器服务
[root@localhost cfg]# systemctl start kube-scheduler.service
[root@localhost cfg]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
[root@localhost cfg]# echo "export PATH=$PATH:/opt/kubernetes/bin/" >> /etc/profile
[root@localhost cfg]# source /etc/profile
[root@localhost cfg]# kubectl get node
NAME STATUS ROLES AGE VERSION
192.168.88.13 Ready 81m v1.12.3
192.168.88.14 Ready 63m v1.12.3
##### 1.关闭防火墙,安全功能
[root@localhost ~]# systemctl stop firewalld.service
[root@localhost ~]# setenforce 0
[root@localhost ~]# systemctl stop NetworkManager
[root@localhost ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
[root@localhost ~]# cat keepalived.conf
! Configuration File for keepalived
global_defs {
//接收邮件地址
notification_email {
[email protected]
[email protected]
[email protected]
}
//邮件发送地址
notification_email_from [email protected]
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/usr/local/nginx/sbin/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER
interface eth0
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.188/24
}
track_script {
check_nginx
}
}
[root@localhost ~]# vim /etc/yum.repos.d/nginx.repo
[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0
//重新加载yum仓库
[root@localhost ~]# yum list
//安装nginx
[root@localhost ~]# yum install nginx -y
[root@localhost ~]# vi /etc/nginx/nginx.conf
//12行添加
stream {
log_format main '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
access_log /var/log/nginx/k8s-access.log main; //日志目录
upstream k8s-apiserver {
server 192.168.88.11:6443; //master1地址
server 192.168.88.12:6443; //master2地址
}
server {
listen 6443;
proxy_pass k8s-apiserver;
}
}
//验证语法
[root@localhost ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful
//编辑主页面,以区别master和backup
[root@localhost ~]# cd /usr/share/nginx/html/
[root@localhost html]# ls
50x.html index.html
[root@localhost html]# vi index.html
//开启nginx
[root@localhost html]# systemctl start nginx
[root@localhost html]# netstat -natp | grep nginx
tcp 0 0 0.0.0.0:6443 0.0.0.0:* LISTEN 3744/nginx: master
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 3744/nginx: master
[root@localhost html]# yum install keepalived -y
//覆盖原配置文件
[root@localhost ~]# cp keepalived.conf /etc/keepalived/keepalived.conf
cp:是否覆盖"/etc/keepalived/keepalived.conf"? yes
[root@localhost ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
//接收邮件地址
notification_email {
[email protected]
[email protected]
[email protected]
}
//邮件发送地址
notification_email_from [email protected]
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id NGINX_MASTER
}
vrrp_script check_nginx {
script "/usr/local/nginx/sbin/check_nginx.sh"
}
vrrp_instance VI_1 {
state MASTER //第二台改为BACKUP
interface ens33
virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的
priority 100 # 优先级,备服务器设置 90
advert_int 1 # 指定VRRP 心跳包通告间隔时间,默认1秒
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.88.17/24
}
track_script {
check_nginx
}
}
//创建keepalived服务关闭脚本
[root@localhost ~]# vi /etc/nginx/check_nginx.sh
count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
systemctl stop keepalived
fi
//添加权限,开服务
[root@localhost ~]# chmod +x /etc/nginx/check_nginx.sh
[root@localhost ~]# systemctl start keepalived.service
[root@localhost ~]# ip addr
2: ens33: mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 00:0c:29:a1:3f:b1 brd ff:ff:ff:ff:ff:ff
inet 192.168.88.15/24 brd 192.168.88.255 scope global ens33
valid_lft forever preferred_lft forever
inet 192.168.88.17/24 scope global secondary ens33
valid_lft forever preferred_lft forever
inet6 fe80::b644:3703:17de:c90/64 scope link
valid_lft forever preferred_lft forever
//关闭nginx
[root@localhost ~]# pkill nginx
[root@localhost ~]# systemctl status keepalived
//恢复
[root@localhost ~]# systemctl start nginx
[root@localhost ~]# systemctl start keepalived
[root@localhost ~]# cd /opt/kubernetes/cfg/
[root@localhost cfg]# vi bootstrap.kubeconfig
server: https://192.168.88.17:6443
[root@localhost cfg]# vi kubelet.kubeconfig
server: https://192.168.88.17:6443
[root@localhost cfg]# vi kube-proxy.kubeconfig
server: https://192.168.88.17:6443
[root@localhost cfg]# grep 17 *
bootstrap.kubeconfig: server: https://192.168.88.17:6443
kubelet.kubeconfig: server: https://192.168.88.17:6443
kube-proxy.kubeconfig: server: https://192.168.88.17:6443
[root@localhost cfg]# systemctl restart kubelet.service
[root@localhost cfg]# systemctl restart kube-proxy.service
[root@localhost ~]# cd /var/log/nginx/
[root@localhost nginx]# ls
access.log error.log k8s-access.log
[root@localhost nginx]# tail -f k8s-access.log
192.168.88.13 192.168.88.11:6443 - [06/May/2020:03:44:16 +0800] 200 1119
192.168.88.13 192.168.88.12:6443 - [06/May/2020:03:44:16 +0800] 200 1118
192.168.88.14 192.168.88.12:6443 - [06/May/2020:03:45:53 +0800] 200 1120
192.168.88.14 192.168.88.11:6443 - [06/May/2020:03:45:53 +0800] 200 1119
//创建pod
[root@localhost ~]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created
//查看pod状态
[root@localhost ~]# kubectl get pods
NAME READY STATUS RESTARTS AGE
nginx-dbddb74b8-864v5 1/1 Running 0 30s
//绑定群集中的匿名用户赋予管理员权限(解决日志不可看问题)
[root@localhost ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created
//查看pod的网络,包括pod处在的node和pod中容器的地址
[root@localhost ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE
nginx-dbddb74b8-864v5 1/1 Running 0 2m41s 172.17.94.2 192.168.88.13
//查看pod日志
[root@localhost cfg]# curl 172.17.94.2 //node1上查看
[root@localhost ~]# kubectl logs nginx-dbddb74b8-864v5 //master上查看
172.17.94.1 - - [05/May/2020:19:53:09 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"