目录
一、系统初始化
二、部署master组件etcd(master节点操作)
三、kubernetes 软件包下载及安装(master节点操作)
四、部署master组件apiserver(master节点操作)
五、部署kubectl工具(master执行)
六、部署controller-manager(master操作)
七、部署master组件scheduler服务(master操作)
八、部署安装docker(工作节点node安装操作)
九、node节点kubelet组件安装
十、node节点kube-proxy组件安装
十一、网络组件flannel安装
十二、coredns插件安装(master节点操作)
主机IP |
主机名 |
角色 |
软件列表 |
192.16.20.110 |
k8s-master |
master |
etcd kube-apiserver kube-controller-manager kube-scheduler |
192.16.20.111 |
K8s-node1 |
worker |
kubelet kube-proxy docker |
网络名称 |
网段 |
Service网络 |
10.40.0.0/16 |
Pod网络 |
172.40.0.0/16 |
# 环境初始化
cat >> /etc/hosts << EOF
192.168.20.110 k8s-master
192.168.20.111 k8s-node1
EOF
systemctl stop firewalld && systemctl disable firewalld && setenforce 0 && sed -ri 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
cat <> /etc/security/limits.conf
* soft nofile 655360
* hard nofile 131072
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
#升级内核
rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org
yum -y install https://www.elrepo.org/elrepo-release-7.0-4.el7.elrepo.noarch.rpm
yum --enablerepo="elrepo-kernel" -y install kernel-ml.x86_64
grub2-set-default 0
grub2-mkconfig -o /boot/grub2/grub.cfg
reboot #重启生效
cat >/etc/modules-load.d/ipvs.conf < /etc/modules-load.d/containerd.conf << EOF
overlay
br_netfilter
EOF
cat < /etc/sysctl.d/k8s.conf
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
# 创建工作目录
mkdir /opt/packages
# 获取 cfssl 工具 外网下载很慢
cd /opt/packages/
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
# 赋予执行权限
chmod +x cf*
# 将 cfssl 工具 cp 至 /usr/local/bin 下
cp ./cfssl_linux-amd64 /usr/local/bin/cfssl
cp ./cfssljson_linux-amd64 /usr/local/bin/cfssljson
cp ./cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
# 查看 cfssl 版本
cfssl version
# 部署etcd
mkdir /opt/packages/cert
cd /opt/packages/cert
# 配置 ca 证书请求文件
cat > ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
],
"ca": {
"expiry": "87600h"
}
}
EOF
# 创建 ca 证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
# 配置 ca 证书策略
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "87600h"
}
}
}
}
EOF
# 配置 ectd 请求文件,下面ip节点的ip地址需要根据自身情况修改
cat > etcd-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.20.110",
"192.168.20.111"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}]
}
EOF
# 生成 etcd 证书
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes etcd-csr.json | cfssljson -bare etcd
# 下载 etcd 软件包
cd /opt/packages
wget https://github.com/etcd-io/etcd/releases/download/v3.5.2/etcd-v3.5.2-linux-amd64.tar.gz
tar xf etcd-v3.5.2-linux-amd64.tar.gz
cp ./etcd-v3.5.2-linux-amd64/etcd* /usr/local/bin/
# 查看当前 etcd 版本
etcdctl version
# 创建 etcd 配置目录,以及数据目录
mkdir /opt/kubernetes/etcd/default.etcd -p
mkdir /opt/kubernetes/ssl -p
# etcd配置文件(修改对应的ip)
cat > /opt/kubernetes/etcd/etcd.conf << EOF
# 成员信息
#[Member]
ETCD_NAME="etcd1"
ETCD_DATA_DIR="/opt/kubernetes/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.20.110:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.20.110:2379,http://127.0.0.1:2379"
# 集群信息
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.20.110:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.20.110:2379"
ETCD_INITIAL_CLUSTER="etcd1=https://192.168.20.110:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
ETCD_ENABLE_V2=true
EOF
# cp 证书到指定目录
cp /opt/packages/cert/ca*.pem /opt/kubernetes/ssl
cp /opt/packages/cert/etcd*.pem /opt/kubernetes/ssl
# 创建etcd启动文件(注意修改对应的信息)
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
EnvironmentFile=/opt/kubernetes/etcd/etcd.conf
WorkingDirectory=/opt/kubernetes/etcd/
ExecStart=/usr/local/bin/etcd \
--cert-file=/opt/kubernetes/ssl/etcd.pem \
--key-file=/opt/kubernetes/ssl/etcd-key.pem \
--trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-cert-file=/opt/kubernetes/ssl/etcd.pem \
--peer-key-file=/opt/kubernetes/ssl/etcd-key.pem \
--peer-trusted-ca-file=/opt/kubernetes/ssl/ca.pem \
--peer-client-cert-auth --client-cert-auth
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 启动etcd服务
systemctl daemon-reload && systemctl enable --now etcd && systemctl status etcd
# 查看 etcd 信息
ETCDCTL_API=3 /usr/local/bin/etcdctl --write-out=table \
--cacert=/opt/kubernetes/ssl/ca.pem --cert=/opt/kubernetes/ssl/etcd.pem \
--key=/opt/kubernetes/ssl/etcd-key.pem \
--endpoints=https://192.168.20.110:2379 endpoint status
# 下载软件包,也可至官方网站进行下载
wget https://dl.k8s.io/v1.21.10/kubernetes-server-linux-amd64.tar.gz
# 解压软件包
tar -xf kubernetes-server-linux-amd64.tar.gz
# 拷贝 kube-apiserver kube-controller-manager kube-scheduler kubectl 到 master 节点
cd /opt/packages/kubernetes/server/bin
cp kube-apiserver kube-controller-manager kubectl kube-scheduler /usr/local/bin/
# 拷贝 kubelet kube-proxy 到 worker 节点
scp kubelet kube-proxy 192.168.20.111:/usr/local/bin/
#创建 apiserver 证书请求文件 (注意修改对应的hosts,这里我多加了几个ip)
cd /opt/packages/cert
cat > /opt/packages/cert/kube-apiserver-csr.json << "EOF"
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"192.168.20.110",
"192.168.20.111",
"192.168.20.112",
"192.168.20.113",
"10.40.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
# 生成 apiserver 证书及 token文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-apiserver-csr.json | cfssljson -bare kube-apiserver
cat > token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:kubelet-bootstrap"
EOF
# 配置文件(修改对应得ip和证书文件路径)
mkdir /opt/kubernetes/conf -p
mkdir /opt/kubernetes/logs/api-server/ -p
cat > /opt/kubernetes/conf/kube-apiserver.conf << "EOF"
KUBE_APISERVER_OPTS="--enable-admission-plugins=NamespaceLifecycle,NodeRestriction,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota \
--anonymous-auth=false \
--bind-address=192.168.20.110 \
--secure-port=6443 \
--advertise-address=192.168.20.110 \
--insecure-port=0 \
--authorization-mode=Node,RBAC \
--runtime-config=api/all=true \
--enable-bootstrap-token-auth \
--service-cluster-ip-range=10.40.0.0/16 \
--token-auth-file=/opt/kubernetes/ssl/token.csv \
--service-node-port-range=30000-50000 \
--tls-cert-file=/opt/kubernetes/ssl/kube-apiserver.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kube-apiserver-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--kubelet-client-certificate=/opt/kubernetes/ssl/kube-apiserver.pem \
--kubelet-client-key=/opt/kubernetes/ssl/kube-apiserver-key.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
--service-account-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--service-account-issuer=api --etcd-cafile=/opt/kubernetes/ssl/ca.pem \
--etcd-certfile=/opt/kubernetes/ssl/etcd.pem \
--etcd-keyfile=/opt/kubernetes/ssl/etcd-key.pem \
--etcd-servers=https://192.168.20.110:2379 \
--enable-swagger-ui=true \
--allow-privileged=true \
--apiserver-count=2 \
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/api-server/kube-apiserver-audit.log \
--event-ttl=1h \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/kubernetes/logs/api-server \
--v=4"
EOF
# 创建 apiserver 服务管理配置文件
cat > /usr/lib/systemd/system/kube-apiserver.service << "EOF"
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=kube-apiserver.service
[Service]
EnvironmentFile=/opt/kubernetes/conf/kube-apiserver.conf
ExecStart=/usr/local/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
RestartSec=5
Type=notify
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 拷贝证书文件到ssl目录
cp kube-apiserver*.pem /opt/kubernetes/ssl/
cp token.csv /opt/kubernetes/ssl/
# 启动 apiserver 服务
systemctl daemon-reload && systemctl enable --now kube-apiserver && systemctl status kube-apiserver
#创建 kubectl 证书请求文件
cd /opt/packages/cert
cat > admin-csr.json << "EOF"
{
"CN": "admin",
"hosts": [],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
# 生成证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes admin-csr.json | cfssljson -bare admin
# 复制文件到指定目录
cp admin*pem /opt/kubernetes/ssl/
# 配置证书
# 配置管理的集群以及证书和证书访问链接(注意ip)
kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.20.110:6443 --kubeconfig=/root/.kube/config
# 配置证书角色 admin
kubectl config set-credentials admin --client-certificate=/opt/kubernetes/ssl/admin.pem --client-key=/opt/kubernetes/ssl/admin-key.pem --embed-certs=true --kubeconfig=/root/.kube/config
# 设置安全上下文
kubectl config set-context kubernetes --cluster=kubernetes --user=admin --kubeconfig=/root/.kube/config
# 使用安全上下文进行管理
kubectl config use-context kubernetes --kubeconfig=/root/.kube/config
# 进行角色绑定
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kubernetes --kubeconfig=/root/.kube/config
#新增环境变量/etc/profile里面
export KUBECONFIG=$HOME/.kube/config
# 查看集群组件状态
kubectl get componentstatuses
kubectl cluster-info
# 查看命名空间中资源对象
kubectl get all --all-namespaces
# 创建 kube-controller-manager 证书请求文件
cd /opt/packages/cert
cat > kube-controller-manager-csr.json << "EOF"
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"hosts": [
"127.0.0.1",
"192.168.20.110",
"192.168.20.111",
"192.168.20.112",
"192.168.20.113"
],
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
# 创建 kube-controller-manager 证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
# 将证书拷贝至ssl目录
cp kube-controller-manager-key.pem kube-controller-manager.pem /opt/kubernetes/ssl/
# 配置管理的集群以及证书和证书访问链接
kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.20.110:6443 --kubeconfig=/opt/kubernetes/conf/kube-controller-manager.kubeconfig
# 设置集群需要的证书
kubectl config set-credentials system:kube-controller-manager --client-certificate=/opt/kubernetes/ssl/kube-controller-manager.pem --client-key=/opt/kubernetes/ssl/kube-controller-manager-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/conf/kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager --cluster=kubernetes --user=system:kube-controller-manager --kubeconfig=/opt/kubernetes/conf/kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager --kubeconfig=/opt/kubernetes/conf/kube-controller-manager.kubeconfig
# 创建 kube-controller-manager 配置文件(对应信息需要修改)
cat > /opt/kubernetes/conf/kube-controller-manager.conf << "EOF"
KUBE_CONTROLLER_MANAGER_OPTS="--port=10252 \
--secure-port=10257 \
--bind-address=127.0.0.1 \
--kubeconfig=/opt/kubernetes/conf/kube-controller-manager.kubeconfig \
--service-cluster-ip-range=10.40.0.0/16 \
--cluster-name=kubernetes \
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
--allocate-node-cidrs=true \
--cluster-cidr=172.40.0.0/16 \
--experimental-cluster-signing-duration=87600h \
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
--leader-elect=true \
--feature-gates=RotateKubeletServerCertificate=true \
--controllers=*,bootstrapsigner,tokencleaner \
--horizontal-pod-autoscaler-use-rest-clients=true \
--horizontal-pod-autoscaler-sync-period=10s \
--tls-cert-file=/opt/kubernetes/ssl/kube-controller-manager.pem \
--tls-private-key-file=/opt/kubernetes/ssl/kube-controller-manager-key.pem \
--use-service-account-credentials=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/kubernetes/logs/control-manager \
--v=2"
EOF
# 配置服务启动文件
cat > /usr/lib/systemd/system/kube-controller-manager.service << "EOF"
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/conf/kube-controller-manager.conf
ExecStart=/usr/local/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
# 启动并验证
systemctl daemon-reload && systemctl enable --now kube-controller-manager && systemctl status kube-controller-manager
kubectl get componentstatuses
# 创建 kube-scheduler 证书请求文件
cd /opt/packages/cert
cat > kube-scheduler-csr.json << "EOF"
{
"CN": "system:kube-scheduler",
"hosts": [
"127.0.0.1",
"192.168.20.110",
"192.168.20.111",
"192.168.20.112",
"192.168.20.113"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
# 创建 kube-scheduler 证书文件
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-scheduler-csr.json | cfssljson -bare kube-scheduler
cp kube-scheduler.pem kube-scheduler-key.pem /opt/kubernetes/ssl/
kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.20.110:6443 --kubeconfig=/opt/kubernetes/conf/kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler --client-certificate=/opt/kubernetes/ssl/kube-scheduler.pem --client-key=/opt/kubernetes/ssl/kube-scheduler-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/conf/kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler --cluster=kubernetes --user=system:kube-scheduler --kubeconfig=/opt/kubernetes/conf/kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler --kubeconfig=/opt/kubernetes/conf/kube-scheduler.kubeconfig
# 创建服务配置文件
mkdir /opt/kubernetes/logs/scheduler -p
cat > /opt/kubernetes/conf/kube-scheduler.conf << "EOF"
KUBE_SCHEDULER_OPTS="--address=127.0.0.1 \
--kubeconfig=/opt/kubernetes/conf/kube-scheduler.kubeconfig \
--leader-elect=true \
--alsologtostderr=true \
--logtostderr=false \
--log-dir=/opt/kubernetes/logs/scheduler \
--v=2"
EOF
# 配置服务启动文件
cat > /usr/lib/systemd/system/kube-scheduler.service << "EOF"
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/conf/kube-scheduler.conf
ExecStart=/usr/local/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
#启动并验证
systemctl daemon-reload && systemctl enable --now kube-scheduler && systemctl status kube-scheduler
kubectl get componentstatuses
# docker的安装
wget -P /etc/yum.repos.d/ https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce-19.03.*
#创建数据目录
mkdir /data/docker -p
# 配置 docker daemon.json 文件
cat >/etc/docker/daemon.json <
# 启动docker服务
systemctl daemon-reload && systemctl enable --now docker
# node节点创建目录(node节点操作)
mkdir /opt/kubernetes/{ssl,logs,conf} -p
# 创建 kubelet-bootstrap.kubeconfig(master节点执行)
BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /opt/kubernetes/ssl/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.20.110:6443 \
--kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig
# 设置集群需要的证书(master节点执行)
kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig
kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig
# 拷贝证书和文件到node节点上面(master节点执行)
scp -r /opt/kubernetes/ssl/ca* 192.168.20.111:/opt/kubernetes/ssl
scp /opt/kubernetes/conf/kubelet-bootstrap.kubeconfig 192.168.20.111:/opt/kubernetes/conf/
# 创建kubelet的配置文件(node节点执行)
cat > /opt/kubernetes/conf/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: 192.168.20.111
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.40.0.2
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/ssl/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
#创建system启动文件(如果不加cni插件参数,后面flanneld不需要安装cni插件)
mkdir /opt/kubernetes/logs/kubelet -p
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
ExecStart=/usr/local/bin/kubelet \
--alsologtostderr=true --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs/kubelet \
--hostname-override=k8s-node1 --network-plugin=cni \
--kubeconfig=/opt/kubernetes/conf/kubelet.kubeconfig \
--bootstrap-kubeconfig=/opt/kubernetes/conf/kubelet-bootstrap.kubeconfig \
--config=/opt/kubernetes/conf/kubelet-config.yml \
--cert-dir=/opt/kubernetes/ssl \
--pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.2
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
# 启动并验证
systemctl daemon-reload && systemctl enable --now kubelet && systemctl status kubelet
# node节点状态(get csr会出现请求文件内容则是正常的)
kubectl get csr
kubectl get node
# 创建 kube-proxy 证书请求文件(master节点执行)
cd /opt/packages/cert
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Beijing",
"L": "Beijing",
"O": "kubemsb",
"OU": "CN"
}
]
}
EOF
# 生成配置kube-proxy证书(master节点执行)
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes --certificate-authority=/opt/kubernetes/ssl/ca.pem --embed-certs=true --server=https://192.168.20.110:6443 --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig
cp kube-proxy-key.pem kube-proxy.pem /opt/kubernetes/ssl/
kubectl config set-credentials kube-proxy --client-certificate=/opt/kubernetes/ssl/kube-proxy.pem --client-key=/opt/kubernetes/ssl/kube-proxy-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig
kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig
kubectl config use-context default --kubeconfig=/opt/kubernetes/conf/kube-proxy.kubeconfig
# 拷贝到node节点(master节点执行)
scp /opt/kubernetes/ssl/kube-proxy* 192.168.20.111:/opt/kubernetes/ssl
scp /opt/kubernetes/conf/kube-proxy.kubeconfig 192.168.20.111:/opt/kubernetes/conf/
# 创建配置文件(node节点执行)
cat > /opt/kubernetes/conf/kube-proxy.yaml << "EOF"
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 192.168.20.111
clientConnection:
kubeconfig: /opt/kubernetes/conf/kube-proxy.kubeconfig
clusterCIDR: 172.40.0.0/16
healthzBindAddress: 192.168.20.111:10256
kind: KubeProxyConfiguration
metricsBindAddress: 192.168.20.111:10249
mode: "ipvs"
EOF
# 创建system启动文件(node节点执行)
mkdir /opt/kubernetes/logs/kube-proxy -p
cat > /usr/lib/systemd/system/kube-proxy.service << "EOF"
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
WorkingDirectory=/opt/kubernetes
ExecStart=/usr/local/bin/kube-proxy \
--config=/opt/kubernetes/conf/kube-proxy.yaml \
--alsologtostderr=true --logtostderr=false \
--log-dir=/opt/kubernetes/logs/kube-proxy \
--v=2
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#启动
# 先安装这个组件不然重启有报错信息
yum -y install conntrack
systemctl daemon-reload && systemctl enable --now kube-proxy && systemctl status kube-proxy.service
# fannel网络组件安装(node节点执行),节点处于NotReady状态需要安装网络组件
# 下载安装(node节点操作)
wget https://github.com/coreos/flannel/releases/download/v0.11.0/flannel-v0.11.0-linux-amd64.tar.gz
mkdir /usr/local/flannel -p
tar xf flannel-v0.11.0-linux-amd64.tar.gz -C /usr/local/flannel
#拷贝etcd证书到node节点
scp /opt/kubernetes/ssl/etcd* 192.168.20.111:/opt/kubernetes/ssl
# 创建flannel systemd unit文件(生成flannel网络文件/run/flannel/subnet.env)
cat > /usr/lib/systemd/system/flanneld.service << EOF
[Unit]
Description=Flanneld overlay address etcd agent
After=network-online.target network.target
Before=docker.service
[Service]
Type=notify
ExecStart=/usr/local/flannel/flanneld --ip-masq \
-etcd-endpoints=https://192.168.20.110:2379 \
-etcd-cafile=/opt/kubernetes/ssl/ca.pem \
-etcd-certfile=/opt/kubernetes/ssl/etcd.pem \
-etcd-keyfile=/opt/kubernetes/ssl/etcd-key.pem \
-etcd-prefix=/coreos.com/network
#生成/run/flannel/docker如果用的docker网络(kubelet不加cni参数)就需要配置到docker服务启动
ExecStartPost=/usr/local/flannel/mk-docker-opts.sh -k DOCKER_NETWORK_OPTIONS -d /run/flannel/docker
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
# 在etcd中设置flannel的相关信息(master节点执行) 要用etcd_v2不然识别不到
ETCDCTL_API=2 etcdctl \
--endpoints https://192.168.20.110:2379 \
--ca-file /opt/kubernetes/ssl/ca.pem \
--cert-file /opt/kubernetes/ssl/etcd.pem \
--key-file /opt/kubernetes/ssl/etcd-key.pem \
set /coreos.com/network/config '{"Network":"172.40.0.0/16","Backend":{"Type":"vxlan"}}'
# 重启flanneld服务(会生成/run/flannel/docker /run/flannel/subnet.env文件)
systemctl daemon-reload && systemctl enable --now flanneld
# 新增配置docker启动文件(#kubelet要是没有加cni参数需要在docker启动服务里面加上这,比较重要,如果用的cni插件就不用加这些了,重启docker后docker0的ip段会变成/run/flannel/docker里面的网段)
vim /usr/lib/systemd/system/docker.service
EnvironmentFile=/run/flannel/docker
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock $DOCKER_NETWORK_OPTIONS
systemctl daemon-reload && systemctl restart docker
# 如果kubelet没有加cni参数则无需执行下面的
# 安装flannel完成后kubelet报错Unable to update cni config" err="no networks found in /etc/cni/net.d
# kubectl get node 还是NotReady状态原因是kubelet加了cni参数后会调用cni的插件,需要安装下插件
mkdir -p /etc/cni/net.d
mkdir -p /opt/cni/bin
wget https://github.com/containernetworking/plugins/releases/download/v0.9.0/cni-plugins-linux-amd64-v0.9.0.tgz
# 解压到bin目录里面
tar xf cni-plugins-linux-amd64-v0.9.0.tgz -C /opt/cni/bin
# 创建配置文件
cat > /etc/cni/net.d/10-flannel.conflist << EOF
{
"cniVersion": "0.4.0",
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOF
# 重启kubelet就可以看到node节点ready了,要出现cni0网卡代表cni插件成功(网段必须要和/run/flannel/subnet.env FLANNEL_SUBNET=172.40.4.1一致)
systemctl restart kubelet.service
systemctl restart flanneld
https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/dns/coredns/coredns.yaml.base
#将里面内容复制进去
vim coredns.yaml
#修改k8s集群后缀名称__DNS__DOMAIN__,一般为cluster.local
# kubernetes __DNS__DOMAIN__ in-addr.arpa ip6.arpa {
kubernetes cluster.local in-addr.arpa ip6.arpa {
#修改coredns谷歌地址为dockerhub地址,容易下载
# image: k8s.gcr.io/coredns/coredns:v1.8.6
image: coredns/coredns:1.8.6
#修改pod启动内存限制大小,300Mi即可
# memory: __DNS__MEMORY__LIMIT__
memory: 300Mi
#修改coredns的svcIP地址,一般为svc网段的第二位,10.40.0.2,第一位为apiserver的svc
# clusterIP: __DNS__SERVER__
clusterIP: 10.40.0.2
# 部署coredns
kubectl apply -f coredns.yaml