环境配置及部署情况:
服务器IP | 服务器配置 | 部署内容 |
---|---|---|
192.168.1.11 | 2核CPU + 2G内存 + 20G磁盘 | master节点(etcd,kubernetes-master,flannel) |
192.168.1.12 | 2核CPU + 2G内存 + 20G磁盘 | node节点1(etcd,kubernetes-node,docker,flannel) |
192.168.1.13 | 2核CPU + 2G内存 + 20G磁盘 | node节点2(etcd,kubernetes-node,docker,flannel) |
分别在master、node1、node2三台服务器上安装etcd服务
[root@master ~]# yum install -y etcd
配置etcd:
[root@master ~]# vim /etc/etcd/etcd.conf
# 以下是修改内容,注意IP、端口、etcd名称
ETCD_DATA_DIR="/var/lib/etcd/etcd1.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.11:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.11:2379,http://127.0.0.1:2379"
ETCD_NAME="etcd1"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.11:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.11:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.11:2380,etcd2=http://192.168.1.12:2380,etcd3=http://192.168.1.13:2380"
启动etcd服务
[root@master ~]# systemctl enable etcd
[root@master ~]# systemctl start etcd
[root@master ~]# systemctl status etcd
[root@node1 ~]# yum install -y etcd
配置etcd:
[root@node1 ~]# vim /etc/etcd/etcd.conf
# 以下是修改内容,注意IP、端口、etcd名称
ETCD_DATA_DIR="/var/lib/etcd/etcd2.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.12:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.12:2379,http://127.0.0.1:2379"
ETCD_NAME="etcd2"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.12:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.12:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.11:2380,etcd2=http://192.168.1.12:2380,etcd3=http://192.168.1.13:2380"
启动etcd服务
[root@node1 ~]# systemctl enable etcd
[root@node1 ~]# systemctl start etcd
[root@node1 ~]# systemctl status etcd
[root@node2 ~]# yum install -y etcd
配置etcd:
[root@node2 ~]# vim /etc/etcd/etcd.conf
# 以下是修改内容,注意IP、端口、etcd名称
ETCD_DATA_DIR="/var/lib/etcd/etcd3.etcd"
ETCD_LISTEN_PEER_URLS="http://192.168.1.13:2380"
ETCD_LISTEN_CLIENT_URLS="http://192.168.1.13:2379,http://127.0.0.1:2379"
ETCD_NAME="etcd3"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.1.13:2380"
ETCD_ADVERTISE_CLIENT_URLS="http://192.168.1.13:2379"
ETCD_INITIAL_CLUSTER="etcd1=http://192.168.1.11:2380,etcd2=http://192.168.1.12:2380,etcd3=http://192.168.1.13:2380"
启动etcd服务
[root@node2 ~]# systemctl enable etcd
[root@node2 ~]# systemctl start etcd
[root@node2 ~]# systemctl status etcd
etcd配置项解释
[member]
ETCD_NAME :ETCD的节点名
ETCD_DATA_DIR:ETCD的数据存储目录
ETCD_SNAPSHOT_COUNTER:多少次的事务提交将触发一次快照
ETCD_HEARTBEAT_INTERVAL:ETCD节点之间心跳传输的间隔,单位毫秒
ETCD_ELECTION_TIMEOUT:该节点参与选举的最大超时时间,单位毫秒
ETCD_LISTEN_PEER_URLS:该节点与其他节点通信时所监听的地址列表,多个地址使用逗号隔开,其格式可以划分为scheme://IP:PORT,这里的scheme可以是http、https
ETCD_LISTEN_CLIENT_URLS:该节点与客户端通信时监听的地址列表
[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS:该成员节点在整个集群中的通信地址列表,这个地址用来传输集群数据的地址。因此这个地址必须是可以连接集群中所有的成员的。
ETCD_INITIAL_CLUSTER:配置集群内部所有成员地址,其格式为:ETCD_NAME=ETCD_INITIAL_ADVERTISE_PEER_URLS,如果有多个使用逗号隔开
ETCD_ADVERTISE_CLIENT_URLS:广播给集群中其他成员自己的客户端地址列表
[root@master ~]# etcdctl cluster-health
member 108d28f155454496 is healthy: got healthy result from http://192.168.1.13:2379
member caf8261ac09ba8aa is healthy: got healthy result from http://192.168.1.12:2379
member ff3afdb92f5c1db7 is healthy: got healthy result from http://192.168.1.11:2379
cluster is healthy
在master上安装kubernetes-master,在node上安装docker-ce、kubernetes-node、flannel
[root@master ~]# yum install -y kubernetes-master flannel
配置APIServer
[root@master ~]# vim /etc/kubernetes/apiserver
# 以下是apiserver配置,注意IP、端口
KUBE_API_ADDRESS="--address=0.0.0.0"
KUBE_API_PORT="--port=8080"
KUBELET_PORT="--kubelet-port=10250"
KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.1.11:2379,http://192.168.1.12:2379,http://192.168.1.13:2379"
KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
KUBE_API_ARGS=""
启动master上核心组件
[root@master ~]# for SERVER in kube-apiserver kube-controller-manager kube-scheduler
do
systemctl enable $SERVER &&
systemctl restart $SERVER &&
systemctl status $SERVER
done
[root@node1 ~]# yum install -y docker-ce kubernetes-node flannel
配置config
[root@node1 ~]# vim /etc/kubernetes/config
# 以下是config配置,注意master IP和端口
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.1.11:8080"
配置kubelet:
[root@node1 ~]# vim /etc/kubernetes/kubelet
# 以下是config配置,注意IP、端口
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.1.12"
KUBELET_API_SERVER="--api-servers=http://192.168.1.11:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
启动node1上核心组件
[root@node1 ~]# for SERVER in kubelet kube-proxy
do
systemctl enable $SERVER &&
systemctl restart $SERVER &&
systemctl status $SERVER
done
[root@node2 ~]# yum install -y docker-ce kubernetes-node flannel
配置config
[root@node2 ~]# vim /etc/kubernetes/config
# 以下是config配置,注意master IP和端口
KUBE_LOGTOSTDERR="--logtostderr=true"
KUBE_LOG_LEVEL="--v=0"
KUBE_ALLOW_PRIV="--allow-privileged=false"
KUBE_MASTER="--master=http://192.168.1.11:8080"
配置kubelet:
[root@node2 ~]# vim /etc/kubernetes/kubelet
# 以下是config配置,注意IP、端口
KUBELET_ADDRESS="--address=0.0.0.0"
KUBELET_HOSTNAME="--hostname-override=192.168.1.13"
KUBELET_API_SERVER="--api-servers=http://192.168.1.11:8080"
KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
KUBELET_ARGS=""
启动node2上核心组件
[root@node2 ~]# for SERVER in kubelet kube-proxy
do
systemctl enable $SERVER &&
systemctl restart $SERVER &&
systemctl status $SERVER
done
安装集群组件时,以安装flannel网络组件,下面配置网络环境。
[root@master ~]# vim /etc/sysconfig/flanneld
# 以下是flannel配置,注意IP、端口、etcd的key值
FLANNEL_ETCD_ENDPOINTS="http://192.168.1.11:2379"
FLANNEL_ETCD_KEY="/coreos.com/network"
[root@node1 ~]# vim /etc/sysconfig/flanneld
# 以下是flannel配置,注意IP、端口、etcd的key值
FLANNEL_ETCD_ENDPOINTS="http://192.168.1.11:2379"
FLANNEL_ETCD_KEY="/coreos.com/network"
[root@node2 ~]# vim /etc/sysconfig/flanneld
# 以下是flannel配置,注意IP、端口、etcd的key值
FLANNEL_ETCD_ENDPOINTS="http://192.168.1.11:2379"
FLANNEL_ETCD_KEY="/coreos.com/network"
[root@master ~]# etcdctl mk /coreos.com/network/config '{"Network": "10.1.0.0/16"}'
注意
若要重新建网络,则需要先删除现有网络
[root@master ~]# etcdctl rm /coreos.com/network/ --recursive
重置docker0网桥的配置
删除docker启动时默认创建的docker0网桥,flannel启动时会获取到一个网络地址,并且配置docker0的IP地址,作为该网络的网关地址,如果此时docker0上配置有IP地址,那么flannel将会启动失败。
[root@node1 ~]# ip link del docker0
[root@node2 ~]# ip link del docker0
[root@master ~]# kubectl get nodes
NAME STATUS AGE
192.168.1.12 Ready 3d
192.168.1.13 Ready 3d
[root@master ~]#etcdctl member list
108d28f155454496: name=etcd3 peerURLs=http://192.168.1.13:2380 clientURLs=http://192.168.1.13:2379 isLeader=false
caf8261ac09ba8aa: name=etcd2 peerURLs=http://192.168.1.12:2380 clientURLs=http://192.168.1.12:2379 isLeader=true
ff3afdb92f5c1db7: name=etcd1 peerURLs=http://192.168.1.11:2380 clientURLs=http://192.168.1.11:2379 isLeader=false