IP地址 | 主机名 | 角色 | 软件 |
---|---|---|---|
192.168.100.201 | m-1 | master | kube-apiserver、kube-controller-manager、kube-scheduler |
192.168.100.202 | m-2 | master | kube-apiserver、kube-controller-manager、kube-scheduler |
192.168.100.203 | m-3 | master | kube-apiserver、kube-controller-manager、kube-scheduler |
192.168.100.211 | n-1 | node | kubelet、kube-proxy |
192.168.100.212 | n-2 | node | kubelet、kube-proxy |
192.168.100.200 | / | vip | / |
软件 | 版本 |
---|---|
debian | 11 |
etcd | 3.5.0 |
calico | 3.20 |
coredns | 1.8.4 |
docker | 20.10.8 |
名称 | 网段 |
---|---|
host | 192.168.100.0/24 |
service | 20.0.0.0/16 |
pod | 10.0.0.0/16 |
#m-1
hostnamectl --static set-hostname m-1
#m-2
hostnamectl --static set-hostname m-2
#m-3
hostnamectl --static set-hostname m-3
#n-1
hostnamectl --static set-hostname n-1
#n-2
hostnamectl --static set-hostname n-2
#m-1 m-2 m-3 n-1 n-2
cp /etc/hosts{,.bak}
cat >> /etc/hosts << EOF
192.168.100.201 m-1
192.168.100.202 m-2
192.168.100.203 m-3
192.168.100.211 n-1
192.168.100.212 n-2
EOF
#m-1 m-2 m-3 n-1 n-2
mkdir /opt/script
cat > /opt/script/swap-off.sh << EOF
#!/bin/bash
set -ex
case \$1 in
start)
swapoff -a
;;
stop)
swapon -a
;;
*)
swapoff -a
;;
esac
EOF
chmod +x /opt/script/swap-off.sh
cat >> /usr/lib/systemd/system/swap-off.service << EOF
[Unit]
Description=swap off
After=networking.service
[Service]
Type=oneshot
RemainAfterExit=true
ExecStart=/opt/script/swap-off.sh start
ExecStop=/opt/script/swap-off.sh stop
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now swap-off.service
systemctl status swap-off.service
#m-1 m-2 m-3 n-1 n-2
apt install -y chrony
#m-1 m-2 m-3
sed -i 's/pool.*/pool\ ntp\.aliyun\.com\ iburst/g' /etc/chrony/chrony.conf
sed -i '/aliyun/a allow\ 192\.168\.100\.0\/24' /etc/chrony/chrony.conf
sed -i '/allow/a local\ stratum\ 10' /etc/chrony/chrony.conf
#n-1 n-2
cp /etc/chrony/chrony.conf{,.bak}
sed -i 's/pool.*/pool\ m-1\ iburst/g' /etc/chrony/chrony.conf
sed -i '/m-1/a pool\ m-2\ iburst' /etc/chrony/chrony.conf
sed -i '/m-2/a pool\ m-3\ iburst' /etc/chrony/chrony.conf
#m-1 m-2 m-3 n-1 n-2
systemctl restart chrony.service
systemctl enable --now chrony.service
systemctl status chrony.service
##m-1 m-2 m-3 n-1 n-2
chronyc sources -v
#m-1 m-2 m-3 n-1 n-2
cat >> /etc/security/limits.conf << EOF
root soft nofile 655350
root hard nofile 655350
root soft nproc 655350
root hard nproc 655350
root soft memlock unlimited
root hard memlock unlimited
* soft nofile 655350
* hard nofile 655350
* soft nproc 655350
* hard nproc 655350
* soft memlock unlimited
* hard memlock unlimited
EOF
#m-1 m-2 m-3 n-1 n-2
apt install -y ipvsadm ipset sysstat conntrack
#m-1 m-2 m-3 n-1 n-2
cat > /etc/modules-load.d/ipvs.conf << EOF
module=(
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_fo
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
ipip
nf_conntrack
br_netfilter
)
EOF
systemctl restart --now systemd-modules-load.service
systemctl enable --now systemd-modules-load.service
lsmod | grep -e ip_vs -e nf_conntrack
#m-1 m-2 m-3 n-1 n-2
cat > /etc/sysctl.d/k8s.conf << EOF
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
fs.may_detach_mounts = 1
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.netfilter.nf_conntrack_max=2310720
net.ipv4.tcp_keepalive_time = 600
net.ipv4.tcp_keepalive_probes = 3
net.ipv4.tcp_keepalive_intvl =15
net.ipv4.tcp_max_tw_buckets = 36000
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_max_orphans = 327680
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.ip_conntrack_max = 131072
net.ipv4.tcp_max_syn_backlog = 16384
net.ipv4.tcp_timestamps = 0
net.core.somaxconn = 16384
EOF
sysctl -p /etc/sysctl.d/k8s.conf
sysctl --system
lsmod | grep --color=auto -e ip_vs -e nf_conntrack
#m-1 m-2 m-3 n-1 n-2
ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa
apt install -y sshpass
user="root"
password="debian"
for ip in m-1 m-2 m-3 n-1 n-2; do
sshpass -p $password ssh-copy-id -i ~/.ssh/id_rsa.pub -o StrictHostKeyChecking=no $user@$ip;
done
mv /etc/apt/sources.list{,.bak}
cat > /etc/apt/sources.list <<EOF
deb http://mirrors.aliyun.com/debian/ bullseye main
deb-src http://mirrors.aliyun.com/debian/ bullseye main
deb http://mirrors.aliyun.com/debian-security/ bullseye-security main
deb-src http://mirrors.aliyun.com/debian-security/ bullseye-security main
deb http://mirrors.aliyun.com/debian/ bullseye-updates main
deb-src http://mirrors.aliyun.com/debian/ bullseye-updates main
EOF
apt update
apt upgrade
#m-1 m-2 m-3
apt install -y haproxy
cat > /etc/haproxy/haproxy.cfg << EOF
global
log /dev/log local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
defaults
log global
mode http
option httplog
timeout connect 5000
timeout client 50000
timeout server 50000
timeout http-request 15s
timeout http-keep-alive 15s
frontend k8s-api
bind *:16443
mode tcp
option tcplog
default_backend k8s-api
backend k8s-api
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
server master1 192.168.100.201:6443 check
server master2 192.168.100.202:6443 check
server master3 192.168.100.203:6443 check
listen stats
bind *:9000
mode http
stats enable
stats hide-version
stats uri /stats
stats refresh 30s
stats realm Haproxy\ Statistics
stats auth admin:password
EOF
systemctl restart haproxy
systemctl enable --now haproxy
systemctl status haproxy
#m-1 m-2 m-3
apt install -y keepalived
#检测脚本
cat > /etc/keepalived/check_haproxy.sh << EOF
#!/bin/sh
if [ $(ps -C haproxy --no-header | wc -l) -eq 0 ]; then
systemctl restart haproxy
fi
sleep 2
if [ $(ps -C haproxy --no-header | wc -l) -eq 0 ]; then
killall keepalived
fi
EOF
chmod +x /etc/keepalived/check_haproxy.sh
#m-1
cat << EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_1
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 5
weight -10
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 100
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass k8s
}
virtual_ipaddress {
192.168.100.200/24
}
track_script {
check_haproxy
}
}
EOF
#m-2
cat << EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_2
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 5
weight -10
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 100
priority 95
advert_int 1
authentication {
auth_type PASS
auth_pass k8s
}
virtual_ipaddress {
192.168.100.200/24
}
track_script {
check_haproxy
}
}
EOF
#m-3
cat << EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_3
script_user root
enable_script_security
}
vrrp_script check_haproxy {
script "/etc/keepalived/check_haproxy.sh"
interval 5
weight -10
fall 2
rise 1
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 100
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass k8s
}
virtual_ipaddress {
192.168.100.200/24
}
track_script {
check_haproxy
}
}
EOF
systemctl restart keepalived
systemctl enable --now keepalived
systemctl status keepalived
#m-1 m-2 m-3 n-1 n-2
#配置源
Sources_URL='http://mirrors.aliyun.com';echo -e "# deb-src [arch=amd64] ${Sources_URL}/docker-ce/linux/debian buster stable\ndeb [arch=amd64] ${Sources_URL}/docker-ce/linux/debian buster stable\n# deb-src [arch=amd64] ${Sources_URL}/docker-ce/linux/debian buster stable" > /etc/apt/sources.list.d/docker.list
#添加key
apt install -y gpg
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 7EA0A9C3F273FCD8
apt update
apt install -y docker-ce
#加速器
echo -e "{\n\t\"exec-opts\": [\"native.cgroupdriver=systemd\"],\n\t\"registry-mirrors\": [\"http://hub-mirror.c.163.com\"]\n}" > /etc/docker/daemon.json
#重载
systemctl daemon-reload
systemctl restart docker
systemctl enable --now docker
docker info
#m-1 m-2 m-3 n-1 n-2
sed -i '/export/i PATH=$PATH:/opt/etcd/bin/:/opt/kubernetes/bin/' /etc/profile
#m-1 m-2 m-3
mkdir /opt/etcd/
mkdir /opt/etcd/bin/
mkdir /opt/etcd/cfssl/
mkdir /opt/etcd/pki/
mkdir /opt/etcd/ssl/
mkdir /opt/etcd/cfg/
mkdir /opt/etcd/default.etcd
mkdir /opt/kubernetes/
mkdir /opt/kubernetes/bin/
mkdir /opt/kubernetes/cfssl/
mkdir /opt/kubernetes/pki/
mkdir /opt/kubernetes/ssl/
mkdir /opt/kubernetes/cfg/
mkdir /opt/kubernetes/logs
https://github.com/cloudflare/cfssl
#m-1
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssljson_1.6.1_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.1/cfssl-certinfo_1.6.1_linux_amd64
root@m-1:~/k8s# tree
.
└── soft
├── cfssl_1.6.1_linux_amd64
├── cfssl-certinfo_1.6.1_linux_amd64
└── cfssljson_1.6.1_linux_amd64
cp soft/cfssl_1.6.1_linux_amd64 /usr/local/bin/cfssl
cp soft/cfssl-certinfo_1.6.1_linux_amd64 /usr/local/bin/cfssl-certinfo
cp soft/cfssljson_1.6.1_linux_amd64 /usr/local/bin/cfssljson
chmod +x /usr/local/bin/cfssl*
https://github.com/etcd-io/etcd
#m-1
wget https://github.com/etcd-io/etcd/releases/download/v3.5.0/etcd-v3.5.0-linux-amd64.tar.gz
root@m-1:~/k8s# tree
.
└── soft
├── cfssl_1.6.1_linux_amd64
├── cfssl-certinfo_1.6.1_linux_amd64
├── cfssljson_1.6.1_linux_amd64
└── etcd-v3.5.0-linux-amd64.tar.gz
tar -xvf soft/etcd-v3.5.0-linux-amd64.tar.gz -C /opt/etcd/bin/ --strip-components 1 etcd-v3.5.0-linux-amd64/etcd etcd-v3.5.0-linux-amd64/etcdctl
for m in m-2 m-3; do
scp /opt/etcd/bin/* $m:/opt/etcd/bin/;
done
cat > /opt/etcd/cfssl/ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
cat > /opt/etcd/cfssl/etcd-ca-csr.json << EOF
{
"CN": "etcd",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "etcd",
"OU": "Etcd Security"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
cat > /opt/etcd/cfssl/etcd-csr.json << EOF
{
"CN": "etcd",
"hosts": [
"192.168.100.201",
"192.168.100.202",
"192.168.100.203"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "etcd",
"OU": "Etcd Security"
}
]
}
EOF
#m-1
cfssl gencert \
-initca /opt/etcd/cfssl/etcd-ca-csr.json | cfssljson \
-bare /opt/etcd/pki/ca
cfssl gencert \
-ca=/opt/etcd/pki/ca.pem \
-ca-key=/opt/etcd/pki/ca-key.pem \
-config=/opt/etcd/cfssl/ca-config.json \
-profile="kubernetes" /opt/etcd/cfssl/etcd-csr.json | cfssljson \
-bare /opt/etcd/ssl/etcd
for m in m-2 m-3; do
scp /opt/etcd/pki/* $m:/opt/etcd/pki/;
scp /opt/etcd/ssl/* $m:/opt/etcd/ssl;
done
#m-1 m-2 m-3
lip=$(hostname -i)
lip1="192.168.100.201"
lip2="192.168.100.202"
lip3="192.168.100.203"
lnm=$(hostname)
lnm1="m-1"
lnm2="m-2"
lnm3="m-3"
cat > /opt/etcd/cfg/etcd.conf << EOF
#[Member]
ETCD_NAME="$lnm"
ETCD_DATA_DIR="/opt/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://$lip:2380"
ETCD_LISTEN_CLIENT_URLS="https://$lip:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://$lip:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://$lip:2379"
ETCD_INITIAL_CLUSTER="$lnm1=https://$lip1:2380,$lnm2=https://$lip2:2380,$lnm3=https://$lip3:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
EOF
#m-1 m-2 m-3
cat > /usr/lib/systemd/system/etcd.service << EOF
[Unit]
Description=Etcd Server
After=networking.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf
ExecStart=/opt/etcd/bin/etcd \\
--cert-file=/opt/etcd/ssl/etcd.pem \\
--key-file=/opt/etcd/ssl/etcd-key.pem \\
--trusted-ca-file=/opt/etcd/pki/ca.pem \\
--peer-cert-file=/opt/etcd/ssl/etcd.pem \\
--peer-key-file=/opt/etcd/ssl/etcd-key.pem \\
--peer-trusted-ca-file=/opt/etcd/pki/ca.pem \\
--peer-client-cert-auth \\
--client-cert-auth
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
#m-1 m-2 m-3
systemctl daemon-reload
systemctl enable --now etcd
systemctl status etcd
lip1="192.168.100.201"
lip2="192.168.100.202"
lip3="192.168.100.203"
ETCDCTL_API=3 etcdctl --write-out=table --cacert=/opt/etcd/pki/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints=https://$lip1:2379,https://$lip2:2379,https://$lip3:2379 endpoint health
ETCDCTL_API=3 etcdctl --write-out=table --cacert=/opt/etcd/pki/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints=https://$lip1:2379,https://$lip2:2379,https://$lip3:2379 member list
ETCDCTL_API=3 etcdctl --write-out=table --cacert=/opt/etcd/pki/ca.pem --cert=/opt/etcd/ssl/etcd.pem --key=/opt/etcd/ssl/etcd-key.pem --endpoints=https://$lip1:2379,https://$lip2:2379,https://$lip3:2379 endpoint status
#m-1
wget https://storage.googleapis.com/kubernetes-release/release/v1.22.1/kubernetes-server-linux-amd64.tar.gz
root@m-1:~# tree
.
└── k8s
└── soft
├── cfssl_1.6.1_linux_amd64
├── cfssl-certinfo_1.6.1_linux_amd64
├── cfssljson_1.6.1_linux_amd64
├── etcd-v3.5.0-linux-amd64.tar.gz
└── kubernetes-server-linux-amd64.tar.gz
tar -xvf soft/kubernetes-server-linux-amd64.tar.gz -C /opt/kubernetes/bin/ --strip-components 3 kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kubectl,kubelet,kube-proxy,kube-scheduler}
for m in m-2 m-3; do
scp /opt/kubernetes/bin/{kube-apiserver,kube-controller-manager,kubectl,kube-scheduler} $m:/opt/kubernetes/bin/;
done
for m in n-1 n-2; do
scp /opt/kubernetes/bin/{kubelet,kube-proxy} $m:/opt/kubernetes/bin/;
done
#m-1
#ca
cat > /opt/kubernetes/cfssl/ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "876000h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "876000h"
}
}
}
}
EOF
cat > /opt/kubernetes/cfssl/ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "k8s",
"OU": "System"
}
],
"ca": {
"expiry": "876000h"
}
}
EOF
#front-proxy
cat > /opt/kubernetes/cfssl/front-proxy-ca-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
#m-1
#ca
cfssl gencert \
-initca /opt/kubernetes/cfssl/ca-csr.json | cfssljson \
-bare /opt/kubernetes/pki/ca
#front-proxy
cfssl gencert \
-initca /opt/kubernetes/cfssl/front-proxy-ca-csr.json | cfssljson \
--bare /opt/kubernetes/pki/front-proxy-ca
for m in m-2 m-3 n-1 n-2; do
scp /opt/kubernetes/pki/ca* $m:/opt/kubernetes/pki/;
done
#m-1
cat > /opt/kubernetes/cfssl/apiserver-csr.json << EOF
{
"CN": "kube-apiserver",
"hosts": [
"127.0.0.1",
"20.0.0.1",
"192.168.100.200",
"192.168.100.201",
"192.168.100.202",
"192.168.100.203",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "k8s",
"OU": "System"
}
]
}
EOF
#front-proxy-client
cat > /opt/kubernetes/cfssl/front-proxy-client-csr.json << EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
#apiserver
cfssl gencert \
-ca=/opt/kubernetes/pki/ca.pem \
-ca-key=/opt/kubernetes/pki/ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/cfssl/apiserver-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/kube-apiserver
#front-proxy-client
cfssl gencert \
-ca=/opt/kubernetes/pki/front-proxy-ca.pem \
-ca-key=/opt/kubernetes/pki/front-proxy-ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/pki/front-proxy-client-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/front-proxy-client
#sa key
openssl genrsa -out /opt/kubernetes/ssl/sa.key 2048
openssl rsa -in /opt/kubernetes/ssl/sa.key -pubout -out /opt/kubernetes/ssl/sa.pub
#token
cat > /opt/kubernetes/cfg/token.csv << EOF
$(head -c 16 /dev/urandom | od -An -t x | tr -d ' '),kubelet-bootstrap,10001,"system:node-bootstrapper"
EOF
for m in m-2 m-3; do
scp /opt/kubernetes/ssl/kube-apiserver* $m:/opt/kubernetes/ssl/;
scp /opt/kubernetes/ssl/sa* $m:/opt/kubernetes/ssl/;
scp /opt/kubernetes/cfg/token.csv $m:/opt/kubernetes/cfg/;
done
for m in m-2 m-3 n-1 n-2; do
scp /opt/kubernetes/pki/front-proxy* $m:/opt/kubernetes/pki/;
done
for m in m-2 m-3; do
scp /opt/kubernetes/ssl/front-proxy-client* $m:/opt/kubernetes/ssl/;
done
#m-1 m-2 m-3
lip=$(hostname -i)
lip1="192.168.100.201"
lip2="192.168.100.202"
lip3="192.168.100.203"
lnm=$(hostname)
lnm1="m-1"
lnm2="m-2"
lnm3="m-3"
cat > /opt/kubernetes/cfg/kube-apiserver.conf << EOF
KUBE_APISERVER_OPTS="\\
--advertise-address=$lip \\
--allow-privileged=true \\
--authorization-mode=Node,RBAC \\
--client-ca-file=/opt/kubernetes/pki/ca.pem \\
--enable-admission-plugins=NodeRestriction \\
--enable-bootstrap-token-auth=true \\
--token-auth-file=/opt/kubernetes/cfg/token.csv \\
--enable-aggregator-routing=true \\
--etcd-cafile=/opt/etcd/pki/ca.pem \\
--etcd-certfile=/opt/etcd/ssl/etcd.pem \\
--etcd-keyfile=/opt/etcd/ssl/etcd-key.pem \\
--etcd-servers=https://192.168.100.201:2379,https://192.168.100.202:2379,https://192.168.100.203:2379 \\
--kubelet-client-certificate=/opt/kubernetes/ssl/kube-apiserver.pem \\
--kubelet-client-key=/opt/kubernetes/ssl/kube-apiserver-key.pem \\
--proxy-client-cert-file=/opt/kubernetes/ssl/front-proxy-client.pem \\
--proxy-client-key-file=/opt/kubernetes/ssl/front-proxy-client-key.pem \\
--requestheader-allowed-names=front-proxy-client \\
--requestheader-client-ca-file=/opt/kubernetes/pki/front-proxy-ca.pem \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-username-headers=X-Remote-User \\
--secure-port=6443 \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--service-account-key-file=/opt/kubernetes/ssl/sa.pub \\
--service-account-signing-key-file=/opt/kubernetes/ssl/sa.key \\
--service-cluster-ip-range=20.0.0.0/16 \\
--tls-cert-file=/opt/kubernetes/ssl/kube-apiserver.pem \\
--tls-private-key-file=/opt/kubernetes/ssl/kube-apiserver-key.pem \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/logs/"
EOF
#m-1 m-2 m-3
cat > /usr/lib/systemd/system/kube-apiserver.service << EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=-/opt/kubernetes/bin/kube-apiserver \$KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kube-apiserver
systemctl status kube-apiserver
cat > /opt/kubernetes/cfssl/admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "system:masters",
"OU": "system"
}
]
}
EOF
cfssl gencert \
-ca=/opt/kubernetes/pki/ca.pem \
-ca-key=/opt/kubernetes/pki/ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/cfssl/admin-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/admin
4.3.3.3 配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.200:16443 \
--kubeconfig=/opt/kubernetes/cfg/admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=/opt/kubernetes/ssl/admin.pem \
--client-key=/opt/kubernetes/ssl/admin-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/cfg/admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=/opt/kubernetes/cfg/admin.kubeconfig
kubectl config use-context default \
--kubeconfig=/opt/kubernetes/cfg/admin.kubeconfig
mkdir ~/.kube
cp /opt/kubernetes/cfg/admin.kubeconfig ~/.kube/config
#不然kubectl logs命令提出kube-apiserver无权限
kubectl create clusterrolebinding kube-apiserver:kubelet-apis --clusterrole=system:kubelet-api-admin --user kube-apiserver --kubeconfig ~/.kube/config
apt install -y bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
kubectl completion bash > ~/.kube/completion.bash.inc
source '/root/.kube/completion.bash.inc'
cat > /opt/kubernetes/cfssl/kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "system:kube-controller-manager",
"OU": "system"
}
]
}
EOF
cfssl gencert \
-ca=/opt/kubernetes/pki/ca.pem \
-ca-key=/opt/kubernetes/pki/ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/cfssl/kube-controller-manager-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/kube-controller-manager
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.200:16443 \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=/opt/kubernetes/ssl/kube-controller-manager.pem \
--client-key=/opt/kubernetes/ssl/kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig
kubectl config set-context system:kube-controller-manager \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig
kubectl config use-context system:kube-controller-manager \
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig
for m in m-2 m-3; do
scp /opt/kubernetes/ssl/kube-controller-manager* $m:/opt/kubernetes/ssl;
scp /opt/kubernetes/cfg/kube-controller-manager.kubeconfig $m:/opt/kubernetes/cfg/;
done
lip=$(hostname -i)
cat > /opt/kubernetes/cfg/kube-controller-manager.conf << EOF
KUBE_CONTROLLER_MANAGER_OPTS=" \\
--allocate-node-cidrs=true \\
--authentication-kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--authorization-kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--bind-address=127.0.0.1 \\
--client-ca-file=/opt/kubernetes/pki/ca.pem \\
--cluster-cidr=10.0.0.0/16 \\
--cluster-name=kubernetes \\
--cluster-signing-cert-file=/opt/kubernetes/pki/ca.pem \\
--cluster-signing-key-file=/opt/kubernetes/pki/ca-key.pem \\
--controllers=*,bootstrapsigner,tokencleaner
--kubeconfig=/opt/kubernetes/cfg/kube-controller-manager.kubeconfig \\
--leader-elect=true \\
--requestheader-client-ca-file=/opt/kubernetes/pki/front-proxy-ca.pem \\
--root-ca-file=/opt/kubernetes/pki/ca.pem \\
--service-account-private-key-file=/opt/kubernetes/ssl/sa.key \\
--service-cluster-ip-range=20.0.0.0/16 \\
--use-service-account-credentials=true \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/logs"
EOF
cat > /usr/lib/systemd/system/kube-controller-manager.service << EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-controller-manager.conf
ExecStart=-/opt/kubernetes/bin/kube-controller-manager \$KUBE_CONTROLLER_MANAGER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
4.3.4.7 启动验证
systemctl daemon-reload
systemctl enable --now kube-controller-manager
systemctl status kube-controller-manager
cat > /opt/kubernetes/cfssl/kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "system:kube-scheduler",
"OU": "system"
}
]
}
EOF
cfssl gencert \
-ca=/opt/kubernetes/pki/ca.pem \
-ca-key=/opt/kubernetes/pki/ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/cfssl/kube-scheduler-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/kube-scheduler
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.200:16443 \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=/opt/kubernetes/ssl/kube-scheduler.pem \
--client-key=/opt/kubernetes/ssl/kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config set-context system:kube-scheduler \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
kubectl config use-context system:kube-scheduler \
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig
for m in m-2 m-3; do
scp /opt/kubernetes/ssl/kube-scheduler* $m:/opt/kubernetes/ssl;
scp /opt/kubernetes/cfg/kube-scheduler.kubeconfig $m:/opt/kubernetes/cfg/;
done
cat > /opt/kubernetes/cfg/kube-scheduler.conf << EOF
KUBE_SCHEDULER_OPTS=" \\
--bind-address=127.0.0.1 \\
--leader-elect=true \\
--authentication-kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--authorization-kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--kubeconfig=/opt/kubernetes/cfg/kube-scheduler.kubeconfig \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/logs"
EOF
cat > /usr/lib/systemd/system/kube-scheduler.service << EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-scheduler.conf
ExecStart=/opt/kubernetes/bin/kube-scheduler \$KUBE_SCHEDULER_OPTS
Restart=on-failure
RestartSec=5
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kube-scheduler
systemctl status kube-scheduler
TOKEN=$(awk -F "," '{print $1}' /opt/kubernetes/cfg/token.csv)
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.200:16443 \
--kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig
kubectl config set-credentials "kubelet-bootstrap" \
--token=${TOKEN} \
--kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user="kubelet-bootstrap" \
--kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig
kubectl config use-context default --kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig
kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
for m in n-1 n-2; do
scp /opt/kubernetes/cfg/bootstrap.kubeconfig $m:/opt/kubernetes/cfg/;
done
lip=$(hostname -i)
cat > /opt/kubernetes/cfg/kubelet.yml << EOF
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 0s
enabled: true
x509:
clientCAFile: /opt/kubernetes/pki/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 0s
cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 20.0.0.10
clusterDomain: cluster.local
healthzBindAddress: 127.0.0.1
healthzPort: 10248
kind: KubeletConfiguration
EOF
cat > /opt/kubernetes/cfg/kubelet.conf << EOF
KUBELET_OPTS="\\
--network-plugin=cni \\
--kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \\
--config=/opt/kubernetes/cfg/kubelet.yml \\
--cert-dir=/opt/kubernetes/ssl/ \\
--pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.5 \\
--log-dir=/opt/kubernetes/logs/ \\
--alsologtostderr=true \\
--logtostderr=false"
EOF
cat > /usr/lib/systemd/system/kubelet.service << EOF
[Unit]
Description=Kubernetes Kubelet
Wants=network-online.target
After=network-online.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kubelet.conf
ExecStart=/opt/kubernetes/bin/kubelet \$KUBELET_OPTS
Restart=always
StartLimitInterval=0
RestartSec=10
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kubelet
systemctl status kubelet
kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
kubectl get nodes
cat > /opt/kubernetes/cfssl/kube-proxy-csr.json << "EOF"
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "Chongqing",
"L": "Chongqing",
"O": "k8s",
"OU": "system"
}
]
}
EOF
cfssl gencert \
-ca=/opt/kubernetes/pki/ca.pem \
-ca-key=/opt/kubernetes/pki/ca-key.pem \
-config=/opt/kubernetes/cfssl/ca-config.json \
-profile=kubernetes /opt/kubernetes/cfssl/kube-proxy-csr.json | cfssljson \
-bare /opt/kubernetes/ssl/kube-proxy
kubectl config set-cluster kubernetes \
--certificate-authority=/opt/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://192.168.100.200:16443 \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
kubectl config set-credentials kube-proxy \
--client-certificate=/opt/kubernetes/ssl/kube-proxy.pem \
--client-key=/opt/kubernetes/ssl/kube-proxy-key.pem \
--embed-certs=true \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=kube-proxy \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
kubectl config use-context default \
--kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig
for m in n-1 n-2; do
scp /opt/kubernetes/ssl/kube-proxy* $m:/opt/kubernetes/ssl;
scp /opt/kubernetes/cfg/kube-proxy.kubeconfig $m:/opt/kubernetes/cfg/;
done
lip=$(hostname -i)
cat > /opt/kubernetes/cfg/kube-proxy.yml << EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: $lip
clientConnection:
kubeconfig: /opt/kubernetes/cfg/kube-proxy.kubeconfig
clusterCIDR: 10.0.0.0/16
healthzBindAddress: 127.0.0.1
healthzPort: 10248
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1
metricsPort: 10248
mode: "ipvs"
EOF
lhn=$(hostname)
cat > /opt/kubernetes/cfg/kube-proxy.conf << EOF
KUBE_PROXY_OPTS="--v=2 \\
--config=/opt/kubernetes/cfg/kube-proxy.yml \\
--hostname-override=$lhn \\
--alsologtostderr=true \\
--logtostderr=false \\
--log-dir=/opt/kubernetes/logs/
EOF
cat > /usr/lib/systemd/system/kube-proxy.service << EOF
[Unit]
Description=Kubernetes Kube-Proxy Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-proxy.conf
ExecStart=-/opt/kubernetes/bin/kube-proxy \$KUBE_PROXY_OPTS
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable --now kube-proxy
systemctl status kube-proxy
wget https://docs.projectcalico.org/v3.20/manifests/calico.yaml
kubectl apply -f calico.yaml
wget https://raw.githubusercontent.com/coredns/deployment/master/kubernetes/coredns.yaml.sed
#修改如下内容
clusterIP: 20.0.0.10
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa {
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . /etc/resolv.conf {
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
kubectl -n kube-system get pod | awk '{print $1}' | grep -E 'canal|calico|flannel' | xargs kubectl -n kube-system delete pod