192.168.137.11 CentOS7.7.1908 2核2G+50G存储
192.168.137.12 CentOS7.7.1908 2核2G+50G存储
192.168.137.13 CentOS7.7.1908 2核2G+50G存储
192.168.137.14 CentOS7.7.1908 2核2G+50G存储
对应的主机上执行
hostnamectl set-hostname k8s-m1
hostnamectl set-hostname k8s-m2
hostnamectl set-hostname k8s-m3
hostnamectl set-hostname habor
所有主机上执行
echo '192.168.137.11 k8s-m1' >> /etc/hosts
echo '192.168.137.12 k8s-m2' >> /etc/hosts
echo '192.168.137.13 k8s-m3' >> /etc/hosts
echo '192.168.137.14 habor' >> /etc/hosts
yum install -y conntrack ntpdate ntp ipvsadm ipset jq iptables curl sysstat libseccomp wget vim net-tools git
systemctl stop firewalld
systemctl disable firewalld
yum -y install iptables-services
systemctl start iptables
systemctl enable iptables
iptables -F #清空策略
service iptables save #保存策略
swapoff -a && sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/kubernetes.conf
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules
bash /etc/sysconfig/modules/ipvs.modules
lsmod | grep -e ip_vs -e nf_conntrack_ipv4
timedatectl set-timezone Asia/Shanghai #将当前的UTC时间写入硬件时钟
timedatectl set-local-rtc 0 #重启依赖于系统时间的服务
systemctl restart rsyslog && systemctl restart crond
systemctl stop postfix && systemctl disable postfix
mkdir /var/log/journal #持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
Storage=persistent #持久化保存到磁盘
Compress=yes #压缩历史日志
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
SystemMaxUse=10G #最大占用空间10G
SystemMaxFileSize=200M #单日志文件最大 200M
MaxRetentionSec=2week #日志保存时间 2 周
ForwardToSyslog=no #不将日志转发到 syslog
EOF
systemctl restart systemd-journald
echo '192.168.137.11 etcd01' >> /etc/hosts
echo '192.168.137.12 etcd02' >> /etc/hosts
echo '192.168.137.13 etcd03' >> /etc/hosts
export ETCD_version=v3.3.12
export ETCD_SSL_DIR=/etc/etcd/ssl
export SYSTEM_SERVICE_DIR=/usr/lib/systemd/system
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson
wget https://github.com/etcd-io/etcd/releases/download/${ETCD_version}/etcd-${ETCD_version}-linux-amd64.tar.gz
tar -zxvf etcd-${ETCD_version}-linux-amd64.tar.gz && cd etcd-${ETCD_version}-linux-amd64
for node in etcd0{1,2,3};do scp etcd* root@$node:/usr/local/bin/;done
mkdir -p ${ETCD_SSL_DIR} && cd ${ETCD_SSL_DIR}
cat > ca-config.json <<EOF
{"signing":{"default":{"expiry":"87600h"},"profiles":{"kubernetes":{"usages":["signing","key encipherment","server auth","client auth"],"expiry":"87600h"}}}}
EOF
cat > etcd-ca-csr.json <<EOF
{"CN":"etcd","key":{"algo":"rsa","size":2048},"names":[{"C":"CN","ST":"BeiJing","L":"BeiJing","O":"etcd","OU":"etcd"}]}
EOF
cfssl gencert -initca etcd-ca-csr.json | cfssljson -bare etcd-ca
cfssl gencert -ca=etcd-ca.pem -ca-key=etcd-ca-key.pem -config=ca-config.json -hostname=127.0.0.1,192.168.137.11,192.168.137.12,192.168.137.13 -profile=kubernetes etcd-ca-csr.json | cfssljson -bare etcd
rm -rf *.json *.csr
for node in etcd0{2,3};do ssh root@$node mkdir -p ${ETCD_SSL_DIR} /var/lib/etcd;scp * root@$node:${ETCD_SSL_DIR};done
cat /etc/etcd/config
#[Member]
ETCD_NAME="etcd01" #多个节点的话,名字不一样;如etcd02
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.137.11:2380" #对应实例的IP地址;如:https://192.168.137.12:2380
ETCD_LISTEN_CLIENT_URLS="https://192.168.137.11:2379" #对应实例的IP地址;如:https://192.168.137.12:2379
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.137.11:2380" #对应实例的IP地址;如:https://192.168.137.12:2380
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.137.11:2379" #对应实例的IP地址;如:https://192.168.137.12:2379
ETCD_INITIAL_CLUSTER="etcd01=https://192.168.137.11:2380,etcd02=https://192.168.137.12:2380,etcd03=https://192.168.137.13:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
cat /usr/lib/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=-/etc/etcd/config #也可以不要这个文件,下面的变量全部用config文件中的值代替
ExecStart=/root/etcd-v3.3.12/etcd \
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--peer-cert-file=/etc/etcd/ssl/etcd.pem \
--peer-key-file=/etc/etcd/ssl/etcd-key.pem \
--trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem \
--peer-trusted-ca-file=/etc/etcd/ssl/etcd-ca.pem
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
scp /etc/etcd/config 192.168.137.12:/etc/etcd/ #需要修改config中的配置信息
scp /etc/etcd/config 192.168.137.13:/etc/etcd/ #需要修改config中的配置信息
scp /usr/lib/systemd/system/etcd.service 192.168.137.12:/usr/lib/systemd/system/
scp /usr/lib/systemd/system/etcd.service 192.168.137.13:/usr/lib/systemd/system/
#修改配置文件后,主节点分别启动etcd
systemctl enable --now etcd
etcdctl \
--ca-file=/etc/etcd/ssl/etcd-ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem \
--endpoints="https://192.168.137.11:2379,https://192.168.137.12:2379,https://192.168.137.13:2379" cluster-health
或者
etcdctl --ca-file=/etc/etcd/ssl/etcd-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
或者
ETCDCTL_API=3
etcdctl --endpoints="https://192.168.137.11:2379,https://192.168.137.12:2379,https://192.168.137.13:2379" --cert="/etc/etcd/ssl/etcd.pem" --key="/etc/etcd/ssl/etcd-key.pem" --cacert="/etc/etcd/ssl/etcd-ca.pem" endpoint health
摘除etcd节点
./etcdctl --ca-file=/etc/etcd/ssl/etcd-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem member remove <member_id>
systemctl stop etcd #停止etcd服务
yum remove -y etcd-xxxx #卸载etcd
./etcdctl --ca-file=/etc/etcd/ssl/etcd-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem member list #再次查看
新增etcd节点
./etcdctl --ca-file=/etc/etcd/ssl/etcd-ca.pem --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem member add <etcd_name> http://<etcd_node_address>:2380
systemctl start etcd
更新etcd节点
ETCDCTL_API=3
etcdctl member update <member-ID> http://<etcd_node_address_ip>:2380
查看集群成员
ETCDCTL_API=3
etcdctl --endpoints="https://192.168.137.11:2379,https://192.168.137.12:2379,https://192.168.137.13:2379" --cert="/etc/etcd/ssl/etcd.pem" --key="/etc/etcd/ssl/etcd-key.pem" --cacert="/etc/etcd/ssl/etcd-ca.pem" member list --write-out=table
查看节点状态(可以看到leader节点)
ETCDCTL_API=3
etcdctl --endpoints="https://192.168.137.11:2379,https://192.168.137.12:2379,https://192.168.137.13:2379" --cert="/etc/etcd/ssl/etcd.pem" --key="/etc/etcd/ssl/etcd-key.pem" --cacert="/etc/etcd/ssl/etcd-ca.pem" endpoint status --write-out=table
ETCDCTL_API=3
etcdctl --cert="/etc/etcd/ssl/etcd.pem" --key="/etc/etcd/ssl/etcd-key.pem" --cacert="/etc/etcd/ssl/etcd-ca.pem" --endpoints="https://192.168.137.11:2379,https://192.168.137.12:2379,https://192.168.137.13:2379" snapshot save snapshot.db
Python应该是 2.7或更高版本
Docker引擎应为 1.10或更高版本
Docker Compose需要为 1.6.0或更高版本
Harbor 官方地址: https://github.com/vmware/harbor/releases
https://github.com/vmware/harbor/releases/download/v1.2.0/harbor-offline-installer-v1.2.0.tgz
或者
wget https://storage.googleapis.com/harbor-releases/harbor-offline-installer-v1.5.0.tgz
hostname:目标的主机名或者完全限定域名
ui_url_protocol:http或https。默认为 http,修改为https时必须要做3、制作自签名证书文件
这步骤
db_password:用于 db_auth的MySQL数据库的根密码。更改此密码进行任何生产用途
max_job_workers:(默认值为 3)作业服务中的复制工作人员的最大数量。
对于每个映像复制作业,工作人员将存储库的所有标签同步到远程目标。增加此数字允许系统中更多的并发复制作业。但是,由于每个工作人员都会消耗一定数量的网络/CPU/IO资源,请根据主机的硬件资源,仔细选择该属性的值
customize_crt:( on或off。默认为 on)当此属性打开时,prepare脚本将为注册表的令牌的生成/验证创建私钥和根证书
ssl_cert:SSL证书的路径,仅当协议设置为https时才应用
ssl_cert_key:SSL密钥的路径,仅当协议设置为https时才应用
secretkey_path:用于在复制策略中加密或解密远程注册表的密码的密钥路径
mkdir -p /data/cert
cd /data/cert
openssl req -newkey rsa:4096 -nodes -sha256 -keyout ca.key -x509 -days 365 -out ca.crt
#国家名称CN--->省份BJ--->城市BJ--->公司名Myhabor(可以自定义)--->组织名myhabor(可自定义)--->主机名或者域名www.myhabor.com(最好接habor.cfg种对应)--->邮箱号[email protected]>回车即可
openssl req -newkey rsa:4096 -nodes -sha256 -keyout harbor01.key -out harbor01.csr
#国家名称CN--->省份BJ--->城市BJ--->公司名Myhabor(可以自定义)--->组织名myhabor(可自定义)--->主机名或者域名www.myhabor.com(最好接habor.cfg种对应)--->邮箱号[email protected]>连续按两次回车即可
openssl x509 -req -days 365 -in harbor01.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out harbor01.crt
chmod +x install.sh
./install.sh
https://www.myhabor.com 的管理员门户(将 reg.yourdomain.com更改为您的主机名harbor.cfg)。请注意,默认管理员用户名 /密码为 admin / Harbor12345
cat /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {"max-size": "100m"},
"registry-mirrors": ["http://7eb68c46.m.daocloud.io"],
"insecure-registries": ["https://www.myhabor.com","https://pee6w651.mirror.aliyuncs.com"]
}
docker pull centos
docker tag centos:latest www.myhabor.com/library/centos:v1
docker login http://www.myhabor.com
docker push www.myhabor.com/library/centos:v1
cat /etc/docker/daemon.json
{
"insecure-registries": ["serverip"]
}
docker pull www.myhabor.com/library/centos:v1
Harbor是VMware公司开源的企业级DockerRegistry项目,项目地址为 https://github.com/vmware/harbor。其目标是帮助用户迅速搭建一个企业级的Dockerregistry服务。它以Docker公司开源的registry为基础,提供了管理UI,基于角色的访问控制(Role Based Access Control),AD/LDAP集成、以及审计日志 (Auditlogging) 等企业用户需求的功
能,同时还原生支持中文。Harbor的每个组件都是以Docker容器的形式构建的,使用Docker Compose来对它进行部署。用于部署 Harbor的Docker Compose模板位于/Deployer/docker-compose.yml,由 5个容器组成,这几个容器通过Docker link的形式连接在一起,在容器之间通过容器名字互相访问。对终端用户而言,只需要暴露proxy ( 即
Nginx)的服务端口
1、3、4的操作同一、环境准备
中的操作一样
192.168.137.11 CentOS7.7.1908 2核2G+50G存储
192.168.137.12 CentOS7.7.1908 2核2G+50G存储
192.168.137.13 CentOS7.7.1908 2核2G+50G存储
主机说明
k8s-vip:192.168.137.100
k8s-m1:192.168.137.11
k8s-m2:192.168.137.12
k8s-m3:192.168.137.13
对应的主机上执行
hostnamectl set-hostname k8s-m1
hostnamectl set-hostname k8s-m2
hostnamectl set-hostname k8s-m3
hostnamectl set-hostname habor
所有主机上执行
echo '192.168.137.11 k8s-m1' >> /etc/hosts
echo '192.168.137.12 k8s-m2' >> /etc/hosts
echo '192.168.137.13 k8s-m3' >> /etc/hosts
ssh-keygen
ssh-copy-id [email protected]/12/13
cat init_env.sh
#!/bin/bash
#关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
#关闭SELINUX
setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=disable/' /etc/selinux/config
#关闭swap
swapoff -a && sysctl -w vm.swappiness=0
sed -i 's/.*swap.*/#&/g' /etc/fstab
#设置Docker所需参数
cat > /etc/sysctl.d/kubernetes.conf <<EOF
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 #禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 #不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/kubernetes.conf
#加载ip_vs模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
yum -y install yum-utils device-mapper-persistent-data lvm2 wget epel-release ipvsadm vim ntpdate
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install -y docker-ce-18.06.1.ce-3.el7
cat > /etc/docker/daemon.json <<EOF
{
"exec-opts": ["native.cgroupdriver=systemd"],
"registry-mirrors": ["https://gco4rcsp.mirror.aliyuncs.com"],
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m",
"max-file": "3"
}
}
EOF
systemctl enable docker
systemctl daemon-reload
systemctl restart docker
cat > /etc/yum.repos.d/kubernetes.repo <<EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
yum -y install kubelet-1.15.0 kubeadm-1.15.0 kubectl-1.15.0 --disableexcludes=kubernetes
0 * * * * ntpdate 202.112.10.36
或
rm -rf /etc/localtime
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
参考二、搭建etcd集群
kubeadm config images list #查看需要拉取的容器镜像
images=(kube-apiserver:v1.15.0 kube-controller-manager:v1.15.0 kube-scheduler:v1.15.0 kube-proxy:v1.15.0 pause:3.1 etcd:3.3.10 coredns:1.3.1) #下载要拉取的容器镜像组信息(三个主节点都要拉取)
#写成for循环拉取镜像
for image in ${images[@]}
do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/${image}
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/${image} k8s.gcr.io/${image}
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/${image}
done
cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
kubernetesVersion: v1.15.0
controlPlaneEndpoint: "192.168.137.100:8443"
etcd:
external:
endpoints:
- https://192.168.137.11:2379
- https://192.168.137.12:2379
- https://192.168.137.13:2379
caFile: /etc/etcd/ssl/etcd-ca.pem
certFile: /etc/etcd/ssl/etcd.pem
keyFile: /etc/etcd/ssl/etcd-key.pem
networking:
podSubnet: 10.244.0.0/16
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
kubeadm init --config=kubeadm-config.yaml --upload-certs
kubeadm join 192.168.137.100:8443 \
--token vqisuo.iedkkpt9rp8mc8sj \
--discovery-token-ca-cert-hash sha256:c58d0ba7dc6249b031fb37be2e02000ee1ccc3c423f7dc24a94389fd72400cce \
--experimental-control-plane \
--certificate-key 3729cfcb665558ab3adb007cde46b43399e7a1674493ba36b66e3c1ba9f88a3c
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/62e44c867a2846fefb68bd5f178daf4da3095ccb/Documentation/kube-flannel.yml
netstat -tanlp |grep kube-proxy #查找kube-proxy占用本地的
curl http://127.0.0.1:10249/proxyMode #看是否是ipvs模式
kubectl run -it --rm busybox --image=busybox:1.28(输入nslookup kubernetes )
cat /root/.kube/config
kubectl describe node k8s-m1 #查看节点的信息
kubectl taint node k8s-m1 node-role.kubernetes.io/master- #让k8s-m1可以运行pod,可以承担work的功能
kubectl taint nodes k8s-m1 node-role.kubernetes.io/master=:NoSchedule #让k8s-m1可以不运行pod,仅承担主节点的功能
ipvsadm -Ln 查看里面的规则
关掉一个节点,仍可用说明已经高可用
kubectl get endpoints kube-controller-manager --namespace=kube-system -o yaml #可以看到目前kube-controller-manager在哪个节点上运行,其他两个处于阻塞状态
kubectl get endpoints kube-scheduler --namespace=kube-system -o yaml #可以看到目前kube-scheduler在哪个节点上运行,其他两个处于阻塞状态
cd /etc/kubernetes/
[root@node2 kubernetes]# tree
.
├── admin.conf
├── controller-manager.conf
├── kubelet.conf
├── manifests
│ ├── etcd.yaml
│ ├── kube-apiserver.yaml
│ ├── kube-controller-manager.yaml
│ └── kube-scheduler.yaml
├── pki
│ ├── apiserver.crt
│ ├── apiserver-etcd-client.crt
│ ├── apiserver-etcd-client.key
│ ├── apiserver.key
│ ├── apiserver-kubelet-client.crt
│ ├── apiserver-kubelet-client.key
│ ├── ca.crt #Kubernetes
│ ├── ca.key #Kubernetes
│ ├── etcd
│ │ ├── ca.crt
│ │ ├── ca.key
│ │ ├── healthcheck-client.crt
│ │ ├── healthcheck-client.key
│ │ ├── peer.crt
│ │ ├── peer.key
│ │ ├── server.crt
│ │ └── server.key
│ ├── front-proxy-ca.crt
│ ├── front-proxy-ca.key
│ ├── front-proxy-client.crt
│ ├── front-proxy-client.key
│ ├── sa.key
│ └── sa.pub
└── scheduler.conf
证书分组
Kubernetes把证书放在了两个文件夹中
/etc/kubernetes/pki
/etc/kubernetes/pki/etcd
Kubernetes 集群根证书
Kubernetes 集群根证书CA(Kubernetes集群组件的证书签发机构)
/etc/kubernetes/pki/ca.crt
/etc/kubernetes/pki/ca.key
以上这组证书为签发其他Kubernetes组件证书使用的根证书, 可以认为是Kubernetes集群中证书签发机构之一
由此根证书签发的证书有:
kube-apiserver 组件持有的服务端证书
/etc/kubernetes/pki/apiserver.crt
/etc/kubernetes/pki/apiserver.key
kubelet 组件持有的客户端证书, 用作 kube-apiserver 主动向 kubelet 发起请求时的客户端认证
/etc/kubernetes/pki/apiserver-kubelet-client.crt
/etc/kubernetes/pki/apiserver-kubelet-client.key
注意: Kubernetes集群组件之间的交互是双向的, kubelet 既需要主动访问 kube-apiserver, kube-apiserver 也需要主动向 kubelet 发起请求, 所以双方都需要有自己的根证书以及使用该根证书签发的服务端证书和客户端证书. 在 kube-apiserver 中, 一般明确指定用于 https 访问的服务端证书和带有CN 用户名信息的客户端证书. 而在 kubelet 的启动配置中, 一般只指定了 ca 根证书, 而没有明确指定用于 https 访问的服务端证书, 这是因为, 在生成服务端证书时, 一般会指定服务端地址或主机名, kube-apiserver 相对变化不是很频繁, 所以在创建集群之初就可以预先分配好用作 kube-apiserver 的 IP 或主机名/域名, 但是由于部署在 node 节点上的 kubelet 会因为集群规模的变化而频繁变化, 而无法预知 node 的所有 IP 信息, 所以 kubelet 上一般不会明确指定服务端证书, 而是只指定 ca 根证书, 让 kubelet 根据本地主机信息自动生成服务端证书并保存到配置的cert-dir文件夹中.