#!/bin/bash
IP=123.123.123.123
BACKUP_DIR=/alauda/etcd_bak/
mkdir -p $BACKUP_DIR
export ETCDCTL_API=3
etcdctl --endpoints=http://$IP:2379 snapshot save $BACKUP/snap-$(date +%Y%m%d%H%M).db
# 备份一个节点的数据就可以恢复,实践中,为了防止定时任务配置的节点异常没有生成备份,建议多加几个
#!/bin/bash
# 使用 etcdctl snapshot restore 生成各个节点的数据
# 比较关键的变量是
# --data-dir 需要是实际 etcd 运行时的数据目录
# --name --initial-advertise-peer-urls 需要用各个节点的配置
# --initial-cluster initial-cluster-token 需要和原集群一致
ETCD_1=10.1.0.5
ETCD_2=10.1.0.6
ETCD_3=10.1.0.7
for i in ETCD_1 ETCD_2 ETCD_3
do
export ETCDCTL_API=3
etcdctl snapshot restore snapshot.db \
--data-dir=/var/lib/etcd \
--name $i \
--initial-cluster ${ETCD_1}=http://${ETCD_1}:2380,${ETCD_2}=http://${ETCD_2}:2380,${ETCD_3}=http://${ETCD_3}:2380 \
--initial-cluster-token k8s_etcd_token \
--initial-advertise-peer-urls http://$i:2380 && \
mv /var/lib/etcd/ etcd_$i
done
# 把 etcd_10.1.0.5 复制到 10.1.0.5节点,覆盖/var/lib/etcd(同--data-dir路径)
# 其他节点依次类推
#!/bin/bash
export ETCDCTL_API=3
etcdctl snapshot restore snapshot.db \
--skip-hash-check \
--data-dir=/var/lib/etcd \
--name 10.1.0.5 \
--initial-cluster 10.1.0.5=http://10.1.0.5:2380,10.1.0.6=http://10.1.0.6:2380,10.1.0.7=http://10.1.0.7:2380 \
--initial-cluster-token k8s_etcd_token \
--initial-advertise-peer-urls http://10.1.0.5:2380
# 也是所有节点都需要生成自己的数据目录,参考上一条
# 和上一条命令唯一的差别是多了 --skip-hash-check (跳过完整性校验)
# 这种方式不能确保 100% 可恢复,建议还是自己加备份
# 通常恢复后需要做一下数据压缩和碎片整理,可参考相应章节
#!/bin/bash
export ETCDCTL_API=2
etcdctl --endpoints=http://10.1.0.6:2379 member add 10.1.0.6 http://10.1.0.6:2380
etcdctl --endpoints=http://10.1.0.7:2379 member add 10.1.0.7 http://10.1.0.7:2380
# ETCD_NAME="etcd_10.1.0.6"
# ETCD_INITIAL_CLUSTER="10.1.0.6=http://10.1.0.6:2380,10.1.0.5=http://10.1.0.5:2380"
# ETCD_INITIAL_CLUSTER_STATE="existing"
#!/bin/bash
/usr/local/bin/etcd
--data-dir=/data.etcd
--name 10.1.0.6
--initial-advertise-peer-urls http://10.1.0.6:2380
--listen-peer-urls http://10.1.0.6:2380
--advertise-client-urls http://10.1.0.6:2379
--listen-client-urls http://10.1.0.6:2379
--initial-cluster 10.1.0.6=http://10.1.0.6:2380,10.1.0.5=http://10.1.0.5:2380
--initial-cluster-state exsiting
--initial-cluster-token k8s_etcd_token
# --initial-cluster 集群所有节点的 name=ip:peer_url
# --initial-cluster-state exsiting 告诉 etcd 自己归属一个已存在的集群,不要自立门户
curl -s -L -o /usr/bin/cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
curl -s -L -o /usr/bin/cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
chmod +x /usr/bin/{cfssl,cfssljson}
cd /etc/kubernetes/pki/etcd
# cat ca-config.json
{
"signing": {
"default": {
"expiry": "100000h"
},
"profiles": {
"server": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "100000h"
},
"client": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "100000h"
}
}
}
}
# cat ca-csr.json
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"C": "CN",
"L": "Beijing",
"O": "Alauda",
"OU": "PaaS",
"ST": "Beijing"
}
]
}
# cat server-csr.json
{
"CN": "etcd-server",
"hosts": [
"localhost",
"0.0.0.0",
"127.0.0.1",
"所有master 节点ip ",
"所有master 节点ip ",
"所有master 节点ip "
],
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"C": "CN",
"L": "Beijing",
"O": "Alauda",
"OU": "PaaS",
"ST": "Beijing"
}
]
}
# cat client-csr.json
{
"CN": "etcd-client",
"hosts": [
""
],
"key": {
"algo": "rsa",
"size": 4096
},
"names": [
{
"C": "CN",
"L": "Beijing",
"O": "Alauda",
"OU": "PaaS",
"ST": "Beijing"
}
]
}
cd /etc/kubernetes/pki/etcd
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server server-csr.json | cfssljson -bare server
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client-csr.json | cfssljson -bare client
export ETCDCTL_API=3
etcdctl --endpoints=http://x.x.x.x:2379 member list
# 1111111111 ..........
# 2222222222 ..........
# 3333333333 ..........
etcdctl --endpoints=http://172.30.0.123:2379 member update 1111111111 --peer-urls=https://x.x.x.x:2380
# 执行三次把三个节点的peer-urls都改成https
# vim /etc/kubernetes/main*/etcd.yaml
# etcd启动命令部分修改 http 为 https,启动状态改成 existing
- --advertise-client-urls=https://x.x.x.x:2379
- --initial-advertise-peer-urls=https://x.x.x.x:2380
- --initial-cluster=xxx=https://x.x.x.x:2380,xxx=https://x.x.x.x:2380,xxx=https://x.x.x.x:2380
- --listen-client-urls=https://x.x.x.x:2379
- --listen-peer-urls=https://x.x.x.x:2380
- --initial-cluster-state=existing
# etcd 启动命令部分插入
- --cert-file=/etc/kubernetes/pki/etcd/server.pem
- --key-file=/etc/kubernetes/pki/etcd/server-key.pem
- --peer-cert-file=/etc/kubernetes/pki/etcd/server.pem
- --peer-key-file=/etc/kubernetes/pki/etcd/server-key.pem
- --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem
- --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem
- --peer-client-cert-auth=true
- --client-cert-auth=true
# 检索hostPath在其后插入
- hostPath:
path: /etc/kubernetes/pki/etcd
type: DirectoryOrCreate
name: etcd-certs
# 检索mountPath在其后插入
- mountPath: /etc/kubernetes/pki/etcd
name: etcd-certs
# vim /etc/kubernetes/main*/kube-apiserver.yaml
# apiserver 启动部分插入,修改 http 为https
- --etcd-cafile=/etc/kubernetes/pki/etcd/ca.pem
- --etcd-certfile=/etc/kubernetes/pki/etcd/client.pem
- --etcd-keyfile=/etc/kubernetes/pki/etcd/client-key.pem
- --etcd-servers=https://x.x.x.x:2379,https://x.x.x.x:2379,https://x.x.x.x:2379
2018-02-06 12:41:06.905234 I | embed: rejected connection from "127.0.0.1:35574" (error "EOF", ServerName "")
#!/bin/bash
# 如果证书的话,去掉--cert --key --cacert 即可
# --endpoints= 需要写了几个节点的url,endpoint status就输出几条信息
export ETCDCTL_API=3
etcdctl \
--endpoints=https://x.x.x.x:2379 \
--cert=/etc/kubernetes/pki/etcd/client.pem \
--key=/etc/kubernetes/pki/etcd/client-key.pem \
--cacert=/etc/kubernetes/pki/etcd/ca.pem \
endpoint status -w table
etcdctl --endpoints=xxxx endpoint health
etcdctl --endpoints=xxxx member list
kubectl get cs
ETCDCTL_API=2 etcdctl rm --recursive # v2 的 api 可以这样删除一个“目录”
ETCDCTL_API=3 etcdctl --endpoints=xxx del /xxxxx --prefix # v3 的版本
# 带证书的话,参考上一条添加 --cert --key --cacert 即可
ETCDCTL_API=3 etcdctl --endpoints=xx:xx,xx:xx,xx:xx defrag
ETCDCTL_API=3 etcdctl --endpoints=xx:xx,xx:xx,xx:xx endpoint status # 看数据量
ETCDCTL_API=3 etcdctl --endpoints=xx:xx,xx:xx,xx:xx compact
# 这个在只有 K8s 用的 etcd 集群里作用不太大,可能具体场景我没遇到
# 可参考这个文档
# https://www.cnblogs.com/davygeek/p/8524477.html
# 不过跑一下不碍事
etcd --auto-compaction-retention=1
# 添加这个参数让 etcd 运行时自己去做压缩
etcd 对时间很依赖,所以集群里的节点时间一定要同步
磁盘空间不足,如果磁盘是被 etcd 自己吃完了,就需要考虑压缩和删数据啦
加证书后所有请求就都要带证书了,要不会提示 context deadline exceeded
做各个操作时 etcd 启动参数里标明节点状态的要小心,否则需要重新做一遍前面的步骤很麻烦
etcd --xxxx --xxxx > /var/log/etcd.log
# 配合 logratate 来做日志切割
# 将日志通过 volume 挂载到宿主机
strace -e trace=write -s 200 -f -p 1
kubectl get cm etcdcfg -n kube-system -o yaml
etcd:
local:
serverCertSANs:
- "192.168.8.21"
peerCertSANs:
- "192.168.8.21"
extraArgs:
initial-cluster: 192.168.8.21=https://192.168.8.21:2380,192.168.8.22=https://192.168.8.22:2380,192.168.8.20=https://192.168.8.20:2380
initial-cluster-state: new
name: 192.168.8.21
listen-peer-urls: https://192.168.8.21:2380
listen-client-urls: https://192.168.8.21:2379
advertise-client-urls: https://192.168.8.21:2379
initial-advertise-peer-urls: https://192.168.8.21:2380