K8S---多节点部署---基于单节点(5)

K8S—多节点部署—基于单节点(5)

一.多节点的优势

1.区别于单master的二进制集群,多master集群对master做了一个高可用,如果master1宕机,Load Balance就会将VIP转移到master2,这样就保证了master的可靠性。

2.多节点的核心点就是需要指向一个核心的地址,我们之前在做单节点的时候已经将vip地址定义过写入k8s-cert.sh脚本文件中(192.168.18.100),vip开启apiserver,多master开启端口接受node节点的apiserver请求,此时若有新的节点加入,不是直接找moster节点,而是直接找到vip进行spiserver的请求,然后vip再进行调度,分发到某一个master中进行执行,此时master收到请求之后就会给改node节点颁发证书。

3.建立负载均衡缓解了nodes对master的请求压力,减轻了master资源使用

二.部署多节点

基于单master基础之上操作,添加一个master2(可参考前篇博客)
利用nginx做负载均衡,利用keepalived做负载均衡器的高可用
利用keepalived给master提供的虚拟IP地址,给node访问连接apiserver
服务器 ip 组件
master1 192.168.88.11 kube-apiserver kube-controller-manager kube-scheduler etcd
master2 192.168.88.12 kube-apiserver kube-controller-manager kube-scheduler
node1 192.168.88.13 kubelet kube-proxy docker flannel etcd
node2 192.168.88.14 kubelet kube-proxy docker flannel etcd
nginx_lbm 192.168.88.15 nginx keepalived
nginx_lbb 192.168.88.16 nginx keepalived
VIP 192.168.88.17
1.修改IP,关闭防火墙,关闭网络管理
[root@localhost ~]# systemctl stop firewalld.service 
[root@localhost ~]# setenforce 0
[root@localhost ~]# systemctl stop NetworkManager
[root@localhost ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
[root@localhost ~]# iptables -F
2.把master1上的文件和脚本传给master2
//复制/opt/kubernetes和/opt/etcd下的所有文件到master2中
[root@localhost ~]# scp -r /opt/kubernetes/ [email protected]:/opt/

//复制etcd的证书,否则apiserver无法启动
[root@localhost ~]# scp -r /opt/etcd/ [email protected]:/opt/

//复制执行脚本到master2中
[root@localhost ~]# scp /usr/lib/systemd/system/{kube-apiserver,kube-controller-manager,kube-scheduler}.service [email protected]:/usr/lib/systemd/system/
3.master2中修改kube-apiserver中的IP地址
[root@localhost ~]# cd /opt/kubernetes/cfg/
[root@localhost cfg]# ls
kube-apiserver  kube-controller-manager  kube-scheduler  token.csv

//修改2处本地地址
[root@localhost cfg]# vim kube-apiserver 
--bind-address=192.168.88.12 \
--advertise-address=192.168.88.12 \
4.开服务并验证
//开启apiserver服务

[root@localhost cfg]# systemctl start kube-apiserver.service 
[root@localhost cfg]# systemctl enable kube-apiserver.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-apiserver.service to /usr/lib/systemd/system/kube-apiserver.service.

//开启控制器服务

[root@localhost cfg]# systemctl start kube-controller-manager.service
[root@localhost cfg]# systemctl enable kube-controller-manager.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.

//开启调度器服务

[root@localhost cfg]# systemctl start kube-scheduler.service
[root@localhost cfg]# systemctl enable kube-scheduler.service
Created symlink from /etc/systemd/system/multi-user.target.wants/kube-scheduler.service to /usr/lib/systemd/system/kube-scheduler.service.
5.添加执行脚本到全局变量
[root@localhost cfg]# echo "export PATH=$PATH:/opt/kubernetes/bin/" >> /etc/profile
[root@localhost cfg]# source /etc/profile
6.查看群集节点
[root@localhost cfg]# kubectl get node
NAME            STATUS   ROLES    AGE   VERSION
192.168.88.13   Ready       81m   v1.12.3
192.168.88.14   Ready       63m   v1.12.3

三.部署负载均衡

##### 1.关闭防火墙,安全功能

[root@localhost ~]# systemctl stop firewalld.service
[root@localhost ~]# setenforce 0
[root@localhost ~]# systemctl stop NetworkManager
[root@localhost ~]# systemctl disable NetworkManager
Removed symlink /etc/systemd/system/multi-user.target.wants/NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.NetworkManager.service.
Removed symlink /etc/systemd/system/dbus-org.freedesktop.nm-dispatcher.service.
2.编辑keepalived的配置文件模板
[root@localhost ~]# cat keepalived.conf 
! Configuration File for keepalived 

global_defs { 

//接收邮件地址 

   notification_email { 
     [email protected] 
     [email protected] 
     [email protected] 
   } 

//邮件发送地址 

   notification_email_from [email protected]  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/usr/local/nginx/sbin/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 
    interface eth0
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        10.0.0.188/24 
    } 
    track_script {
        check_nginx
    } 
}
3.编辑nginx的源,并且安装nginx
[root@localhost ~]# vim /etc/yum.repos.d/nginx.repo

[nginx]
name=nginx repo
baseurl=http://nginx.org/packages/centos/7/$basearch/
gpgcheck=0

//重新加载yum仓库

[root@localhost ~]# yum list

//安装nginx

[root@localhost ~]# yum install nginx -y
4.编辑nginx配置文件,添加负载均衡,开服务
[root@localhost ~]# vi /etc/nginx/nginx.conf

//12行添加

stream {
        log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
        access_log  /var/log/nginx/k8s-access.log  main;	//日志目录
        upstream k8s-apiserver {
                server 192.168.88.11:6443;	//master1地址
                server 192.168.88.12:6443;	//master2地址
        }
        server {
                listen 6443;
                proxy_pass k8s-apiserver;
        }
}

//验证语法

[root@localhost ~]# nginx -t
nginx: the configuration file /etc/nginx/nginx.conf syntax is ok
nginx: configuration file /etc/nginx/nginx.conf test is successful

//编辑主页面,以区别master和backup

[root@localhost ~]# cd /usr/share/nginx/html/
[root@localhost html]# ls
50x.html  index.html
[root@localhost html]# vi index.html 

//开启nginx

[root@localhost html]# systemctl start nginx
[root@localhost html]# netstat -natp | grep nginx
tcp        0      0 0.0.0.0:6443            0.0.0.0:*               LISTEN      3744/nginx: master  
tcp        0      0 0.0.0.0:80              0.0.0.0:*               LISTEN      3744/nginx: master 
5.安装keepalived
[root@localhost html]# yum install keepalived -y

//覆盖原配置文件

[root@localhost ~]# cp keepalived.conf /etc/keepalived/keepalived.conf
cp:是否覆盖"/etc/keepalived/keepalived.conf"? yes
6.配置keepalived(backup,需要修改优先级)
[root@localhost ~]# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived 

global_defs { 

//接收邮件地址 

   notification_email { 
     [email protected] 
     [email protected] 
     [email protected] 
   } 

//邮件发送地址 

   notification_email_from [email protected]  
   smtp_server 127.0.0.1 
   smtp_connect_timeout 30 
   router_id NGINX_MASTER 
} 

vrrp_script check_nginx {
    script "/usr/local/nginx/sbin/check_nginx.sh"
}

vrrp_instance VI_1 { 
    state MASTER 	//第二台改为BACKUP
    interface ens33
    virtual_router_id 51 # VRRP 路由 ID实例,每个实例是唯一的 
    priority 100    # 优先级,备服务器设置 90 
    advert_int 1    # 指定VRRP 心跳包通告间隔时间,默认1秒 
    authentication { 
        auth_type PASS      
        auth_pass 1111 
    }  
    virtual_ipaddress { 
        192.168.88.17/24 
    } 
    track_script {
        check_nginx
    } 
}

//创建keepalived服务关闭脚本

[root@localhost ~]# vi /etc/nginx/check_nginx.sh

count=$(ps -ef |grep nginx |egrep -cv "grep|$$")
if [ "$count" -eq 0 ];then
    systemctl stop keepalived
fi

//添加权限,开服务

[root@localhost ~]# chmod +x /etc/nginx/check_nginx.sh
[root@localhost ~]# systemctl start keepalived.service
7.查看VIP(backup中没有)
[root@localhost ~]# ip addr
2: ens33:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 00:0c:29:a1:3f:b1 brd ff:ff:ff:ff:ff:ff
    inet 192.168.88.15/24 brd 192.168.88.255 scope global ens33
       valid_lft forever preferred_lft forever
    inet 192.168.88.17/24 scope global secondary ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::b644:3703:17de:c90/64 scope link 
       valid_lft forever preferred_lft forever
8.验证负载均衡中的漂移地址
//关闭nginx

[root@localhost ~]# pkill nginx

[root@localhost ~]# systemctl status keepalived

//恢复

[root@localhost ~]# systemctl start nginx

[root@localhost ~]# systemctl start keepalived
9.node节点指向LB高可用群集(2个node都做)
修改两个node节点配置文件,server ip 地址为统一的VIP地址
[root@localhost ~]# cd /opt/kubernetes/cfg/
[root@localhost cfg]# vi bootstrap.kubeconfig
server: https://192.168.88.17:6443

[root@localhost cfg]# vi kubelet.kubeconfig
server: https://192.168.88.17:6443

[root@localhost cfg]#  vi kube-proxy.kubeconfig
server: https://192.168.88.17:6443
10.自检
[root@localhost cfg]# grep 17 *
bootstrap.kubeconfig:    server: https://192.168.88.17:6443
kubelet.kubeconfig:    server: https://192.168.88.17:6443
kube-proxy.kubeconfig:    server: https://192.168.88.17:6443
11.重启服务
[root@localhost cfg]# systemctl restart kubelet.service
[root@localhost cfg]# systemctl restart kube-proxy.service
12.查看nginx的日志,看是否有node访问vip
[root@localhost ~]# cd /var/log/nginx/
[root@localhost nginx]# ls
access.log  error.log  k8s-access.log
[root@localhost nginx]# tail -f k8s-access.log
192.168.88.13 192.168.88.11:6443 - [06/May/2020:03:44:16 +0800] 200 1119
192.168.88.13 192.168.88.12:6443 - [06/May/2020:03:44:16 +0800] 200 1118
192.168.88.14 192.168.88.12:6443 - [06/May/2020:03:45:53 +0800] 200 1120
192.168.88.14 192.168.88.11:6443 - [06/May/2020:03:45:53 +0800] 200 1119
13.在master上创建pod测试
//创建pod
[root@localhost ~]# kubectl run nginx --image=nginx
kubectl run --generator=deployment/apps.v1beta1 is DEPRECATED and will be removed in a future version. Use kubectl create instead.
deployment.apps/nginx created

//查看pod状态

[root@localhost ~]# kubectl get pods
NAME                    READY   STATUS    RESTARTS   AGE
nginx-dbddb74b8-864v5   1/1     Running   0          30s

//绑定群集中的匿名用户赋予管理员权限(解决日志不可看问题)
[root@localhost ~]# kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=system:anonymous
clusterrolebinding.rbac.authorization.k8s.io/cluster-system-anonymous created

//查看pod的网络,包括pod处在的node和pod中容器的地址
[root@localhost ~]# kubectl get pods -o wide
NAME                    READY   STATUS    RESTARTS   AGE     IP            NODE            NOMINATED NODE
nginx-dbddb74b8-864v5   1/1     Running   0          2m41s   172.17.94.2   192.168.88.13   

//查看pod日志
[root@localhost cfg]# curl 172.17.94.2		//node1上查看

[root@localhost ~]# kubectl logs nginx-dbddb74b8-864v5	//master上查看
172.17.94.1 - - [05/May/2020:19:53:09 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"

你可能感兴趣的:(K8S)