Etcd是Kubernetes集群中的一个十分重要的组件,用于保存集群所有的网络配置和对象的状态信息,K8S中所有持久化的状态信息都是以Key-Value的形式存储在ETCD中,提供分布式协调服务。之所以说kubenetes各个组件是无状态的,就是因为其中把数据都存放在ETCD中。
由于ETCD支持集群,本实验中在三台主机上都部署上ETCD.
[root@linux-node1 src]# pwd
/usr/local/src
[root@linux-node1 src]# wget https://github.com/coreos/etcd/releases/download/v3.2.18/etcd-v3.2.18-linux-amd64.tar.gz
[root@linux-node1 src]# tar zxf etcd-v3.2.18-linux-amd64.tar.gz #解压etcd
[root@linux-node1 src]# cd etcd-v3.2.18-linux-amd64 #有2个文件,etcdctl是操作etcd的命令
[root@linux-node1 etcd-v3.2.18-linux-amd64]# cp etcd etcdctl /opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.219.136:/opt/kubernetes/bin/
[root@linux-node1 etcd-v3.2.18-linux-amd64]# scp etcd etcdctl 192.168.219.137:/opt/kubernetes/bin/
[root@linux-node1 ~]# cd /usr/local/src/ssl
[root@linux-node1 ssl]# vim etcd-csr.json
{
"CN": "etcd",
#此处的ip是etcd集群中各个节点的ip地址
"hosts": [
"127.0.0.1",
"192.168.219.135",
"192.168.219.136",
"192.168.219.137"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "ShangHai",
"L": "ShangHai",
"O": "k8s",
"OU": "System"
}
]
}
[root@linux-node1 ~]# cfssl gencert -ca=/opt/kubernetes/ssl/ca.pem \
-ca-key=/opt/kubernetes/ssl/ca-key.pem \
-config=/opt/kubernetes/ssl/ca-config.json \
-profile=kubernetes etcd-csr.json | cfssljson -bare etcd
[root@linux-node1 ssl]# ls -l etcd*
-rw-r--r--. 1 root root 1066 5月 8 09:45 etcd.csr
-rw-r--r--. 1 root root 307 5月 8 10:35 etcd-csr.json
-rw-------. 1 root root 1675 5月 8 09:45 etcd-key.pem
-rw-r--r--. 1 root root 1440 5月 8 09:45 etcd.pem
[root@linux-node1 ~]# cp etcd*.pem /opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.219.136:/opt/kubernetes/ssl
[root@linux-node1 ~]# scp etcd*.pem 192.168.219.137:/opt/kubernetes/ssl
2379端口用于外部通信,2380用于内部通信
[root@linux-node1 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node1"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#
##ETCD监听的URL,每个节点不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.135:2380"
#外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.135:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.135:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.135:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
[root@linux-node2 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node2"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD监听的URL,每个节点不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.136:2380"
#外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.136:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.136:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# #添加集群访问
#
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.136:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
[root@linux-node3 ~]# vim /opt/kubernetes/cfg/etcd.conf
#[member]
##ETCD节点名称修改,这个ETCD_NAME每个节点必须不同
ETCD_NAME="etcd-node3"
#ETCD数据目录
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_SNAPSHOT_COUNTER="10000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
##ETCD监听的URL,每个节点不同需要修改
ETCD_LISTEN_PEER_URLS="https://192.168.219.137:2380"
#外部通信监听URL修改,每个节点不同需要修改
ETCD_LISTEN_CLIENT_URLS="https://192.168.219.137:2379,https://127.0.0.1:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
#ETCD_CORS=""
#[cluster]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.219.137:2380"
# if you use different ETCD_NAME (e.g. test),
# set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
# 添加集群访问
ETCD_INITIAL_CLUSTER="etcd-node1=https://192.168.219.135:2380,etcd-node2=https://192.168.219.136:2380,etcd-node3=https://192.168.219.137:2380"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.219.137:2379"
#[security]
CLIENT_CERT_AUTH="true"
ETCD_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
PEER_CLIENT_CERT_AUTH="true"
ETCD_PEER_CA_FILE="/opt/kubernetes/ssl/ca.pem"
ETCD_PEER_CERT_FILE="/opt/kubernetes/ssl/etcd.pem"
ETCD_PEER_KEY_FILE="/opt/kubernetes/ssl/etcd-key.pem"
[root@linux-node1 ~]# vim /etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=simple
WorkingDirectory=/var/lib/etcd
EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf
# set GOMAXPROCS to number of processors
ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /opt/kubernetes/bin/etcd"
Type=notify
[Install]
WantedBy=multi-user.target
[root@linux-node1 ~]# systemctl daemon-reload
[root@linux-node1 ~]# systemctl enable etcd
[root@linux-node1 ~]# mkdir /var/lib/etcd
[root@linux-node1 ~]# systemctl start etcd
[root@linux-node1 ~]# systemctl status etcd
[root@linux-node2 ~]# mkdir /var/lib/etcd
[root@linux-node2 ~]# systemctl start etcd
[root@linux-node2 ~]# systemctl status etcd
[root@linux-node3 ~]# mkdir /var/lib/etcd
[root@linux-node3 ~]# systemctl start etcd
[root@linux-node3 ~]# systemctl status etc
#在各节点上查看是否监听了2379和2380端口
#linux-node1
[root@linux-node1 ~]# ss -tulnp | grep etcd
tcp LISTEN 0 128 192.168.219.135:2379 *:* users:(("etcd",pid=24791,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=24791,fd=6))
tcp LISTEN 0 128 192.168.219.135:2380 *:* users:(("etcd",pid=24791,fd=5))
#linux-node2
[root@localhost ~]# ss -tulnp | grep etcd
tcp LISTEN 0 128 192.168.219.136:2379 *:* users:(("etcd",pid=21073,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=21073,fd=6))
tcp LISTEN 0 128 192.168.219.136:2380 *:* users:(("etcd",pid=21073,fd=5))
#linux-node3
[root@localhost ~]# ss -tulnp |grep etcd
tcp LISTEN 0 128 192.168.219.137:2379 *:* users:(("etcd",pid=9336,fd=7))
tcp LISTEN 0 128 127.0.0.1:2379 *:* users:(("etcd",pid=9336,fd=6))
tcp LISTEN 0 128 192.168.219.137:2380 *:* users:(("etcd",pid=9336,fd=5))
[root@linux-node1 ~]#etcdctl --endpoints=https://192.168.219.135:2379 \
> --ca-file=/opt/kubernetes/ssl/ca.pem \
> --cert-file=/opt/kubernetes/ssl/etcd.pem \
> --key-file=/opt/kubernetes/ssl/etcd-key.pem cluster-health
member 252a9cd8ec6cef2d is healthy: got healthy result from https://192.168.219.137:2379
member 2de3a11ecb96f087 is healthy: got healthy result from https://192.168.219.136:2379
member a60cce210fbb41a1 is healthy: got healthy result from https://192.168.219.135:2379
cluster is healthy
参考:《每天5分钟玩转Kubernetes》、https://www.cnblogs.com/linuxk/