kube-apiserver-k8s-master01
etcd-k8s-master01
kube-controller-manager-k8s-master01
kube-scheduler-k8s-master01
kubelet
kube-proxy-lcgr4
原理:mater节点高可用,主要解决API server的高可用问题(keepalived/ heartbeat + nginx/haproxy)
方案:睿云Breeze_keeplived+haproxy
涉及的脚本及配置脚本文件有5个:
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
#chroot /usr/share/haproxy
#user haproxy
#group haproxy
daemon
defaults
log global
mode http
option httplog
option dontlognull
retries 3
option redispatch
timeout connect 5000
timeout client 50000
timeout server 50000
frontend stats-front
bind *:8081
mode http
default_backend stats-back
# keypoint_01 bind *:6444
frontend fe_k8s_6444
bind *:6444
mode tcp
timeout client 1h
log global
option tcplog
default_backend be_k8s_6443
acl is_websocket hdr(Upgrade) -i WebSocket
acl is_websocket hdr_beg(Host) -i ws
backend stats-back
mode http
balance roundrobin
stats uri /haproxy/stats
stats auth pxcstats:secret
backend be_k8s_6443
mode tcp
timeout queue 1h
timeout server 1h
timeout connect 1h
log global
balance roundrobin
# keypoint_02 启动的第一个节点ip
server rancher01 192.168.43.110:6443
# server rancher02 192.168.43.120:6443
# server rancher03 192.168.43.130:6443
修改haproxy容器启动脚本
[root@k8s-ha-master01 lb]# vim /data/lb/start-haproxy.sh
#!/bin/bash
MasterIP1=192.168.43.110
MasterIP2=192.168.43.120
MasterIP3=192.168.43.130
MasterPort=6443
docker run -d --restart=always --name HAProxy-K8S -p 6444:6444 \
-e MasterIP1=$MasterIP1 \
-e MasterIP2=$MasterIP2 \
-e MasterIP3=$MasterIP3 \
-e MasterPort=$MasterPort \
-v /data/lb/etc/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg \
wise2c/haproxy-k8s
启动haproxy容器创建脚本
[root@k8s-ha-master01 lb]# ./data/lb/start-haproxy.sh
[root@k8s-ha-master01 lb]# netstat -antpu | grep 6444
tcp6 0 0 :::6444 :::* LISTEN 1795/docker-proxy
#!/bin/bash
# keyporint_01
VIRTUAL_IP=192.168.43.100
INTERFACE=ens34
NETMASK_BIT=24
CHECK_PORT=6444
RID=10
VRID=160
MCAST_GROUP=224.0.0.18
docker run -itd --restart=always --name=Keepalived-K8S \
--net=host --cap-add=NET_ADMIN \
-e VIRTUAL_IP=$VIRTUAL_IP \
-e INTERFACE=$INTERFACE \
-e CHECK_PORT=$CHECK_PORT \
-e RID=$RID \
-e VRID=$VRID \
-e NETMASK_BIT=$NETMASK_BIT \
-e MCAST_GROUP=$MCAST_GROUP \
wise2c/keepalived-k8s
启动
[root@k8s-ha-master01 lb]# ./start-keepalived.sh
[root@k8s-ha-master01 lb]# ip addr show
3: ens34:
link/ether 00:50:56:2c:0b:2f brd ff:ff:ff:ff:ff:ff
inet 192.168.43.110/24 brd 192.168.43.255 scope global noprefixroute ens34
valid_lft forever preferred_lft forever
inet 192.168.43.100/24 scope global secondary ens34
valid_lft forever preferred_lft forever
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
#keypoint_01 当前主机ip
advertiseAddress: 192.168.43.110
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
#keypoint_02 mmm
name: k8s-ha-master01
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
#keypoint_03 mmm
controlPlaneEndpoint: "192.168.43.100:6444"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
#keypoint_04 使用的版本
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
#keypoint_05 flannel网络插件的默认网关
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
scheduler: {}
#keypoint_06 指定ipvs转发
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
SupportIPVSProxyMode: true
mode: ipvs
[root@k8s-ha-master01 ~]# kubectl edit configmaps -n kube-system kubeadm-config
[root@k8s-ha-master01 k8s-install]# kubectl get node
[root@k8s-ha-master01 k8s-install]# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-5c98db65d4-5x5hj 0/1 Pending 0 3m7s
coredns-5c98db65d4-b6mzd 0/1 Pending 0 3m7s
etcd-k8s-ha-master01 1/1 Running 0 2m4s
kube-apiserver-k8s-ha-master01 1/1 Running 0 2m22s
kube-controller-manager-k8s-ha-master01 1/1 Running 0 2m20s
kube-proxy-n2knf 1/1 Running 0 3m7s
kube-scheduler-k8s-ha-master01 1/1 Running 0 2m16s
[root@k8s-ha-master01 k8s-install]# kubectl get svc -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kube-dns ClusterIP 10.96.0.10
[root@k8s-ha-master01 k8s-install]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1
[root@k8s-ha-master02 k8s-install]# vim $HOME/.kube/config
server: https://192.168.43.130:6443
需要修改的地方,红色已标出
[root@k8s-ha-master01 k8s-install]# vim /data/lb/etc/haproxy.cfg
*********
server rancher01 192.168.43.110:6443
server rancher02 192.168.43.120:6443
server rancher03 192.168.43.130:6443
[root@k8s-ha-master01 etc]# docker rm -f HAProxy-K8S && bash /data/lb/start-haproxy.sh
任一主节点进行以下操作
找出镜像版本,pull必需的镜像
[root@k8s-ha-master01 k8s-install]# cat kube-flannel.yml | grep image
[root@k8s-ha-master01 k8s-install]# docker pull quay.io/coreos/flannel:v0.11.0-amd64
[root@k8s-ha-master01 k8s_install]# kubectl apply -f kube-flannel.yml
[root@k8s-ha-master01 k8s_install]# kubectl get pod -n kube-system
[root@k8s-master01 k8s_documents]# kubectl get node
NAME STATUS ROLES AGE VERSION
k8s-ha-master01 Ready master 50m v1.15.1
[root@k8s-master01 k8s_documents]# ifconfig
flannel.1: flags=4163
[root@k8s-ha-master02 ~]# kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml
[root@k8s-ha-master02 ~]# kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml
[root@k8s-ha-master02 ~]# kubectl -n kube-system exec etcd-k8s-ha-master03 -- etcdctl --endpoints=https://192.168.43.110:2379 --ca-file=/etc/kubernetes/pki/etcd/ca.crt --cert-file=/etc/kubernetes/pki/etcd/server.crt --key-file=/etc/kubernetes/pki/etcd/server.key cluster-health
member 10260bfec73117a0 is healthy: got healthy result from https://192.168.43.110:2379
member 258562861e8b997b is healthy: got healthy result from https://192.168.43.130:2379
member 8b0e262770f25357 is healthy: got healthy result from https://192.168.43.120:2379
[root@k8s-ha-master03 ~]# kubectl edit configmaps -n kube-system kubeadm-config
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
data:
ClusterConfiguration: |
apiServer:
extraArgs:
authorization-mode: Node,RBAC
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
# keypoint_01
controlPlaneEndpoint: 192.168.43.100:6444
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.15.1
networking:
dnsDomain: cluster.local
# keypoint_02
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
ClusterStatus: |
apiEndpoints:
# keypoint_03
k8s-ha-master01:
advertiseAddress: 192.168.43.110
bindPort: 6443
k8s-ha-master02:
advertiseAddress: 192.168.43.120
bindPort: 6443
k8s-ha-master03:
advertiseAddress: 192.168.43.130
bindPort: 6443
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterStatus
kind: ConfigMap
metadata:
creationTimestamp: "2020-03-11T07:38:32Z"
name: kubeadm-config
namespace: kube-system
resourceVersion: "4496"
selfLink: /api/v1/namespaces/kube-system/configmaps/kubeadm-config
uid: c75e3ed8-c857-408c-96c8-0b7d176ddb2b