1.环境准备
【所有节点上执行】
1.1 关闭无用服务
- 关闭selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/sysconfig/selinux
# sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
- 关闭交换分区
# sed -i "/swap/{s/^/#/g}" /etc/fstab
# swapoff -a
- 关闭防火墙
# systemctl stop firewalld
# systemctl disable firewalld
- 关闭其他无用模块
# systemctl disable auditd || \
systemctl disable postfix || \
systemctl disable irqbalance || \
systemctl disable remote-fs || \
systemctl disable tuned || \
systemctl disable rhel-configure
1.2 环境和网络
- hostname
# hostname xxxx
# vim /etc/hostmame
- hosts
# cat >> /etc/hosts << EOF
10.10.xxx.47 t-master
10.10.xxx.46 t-node-01
10.10.xxx.45 t-node-02
10.10.xxx.44 t-node-03
EOF
- 打开路由
# cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
# modprobe br_netfilter
# sysctl -p /etc/sysctl.d/k8s.conf
1.3 依赖安装
1.3.1 设置代理(非必要)
本地内网服务器访问不了外网环境,但是有一台可以做代理的机服务器
- 设置wget 代理
# cat>> /etc/wgetrc <
- 设置yum代理
#cat>> /etc/yum.conf <
1.3.2 添加yum源
# mkdir /etc/yum.repos.d/bak && cp -rf /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak
# wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
# wget -O /etc/yum.repos.d/epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo
# yum clean all && yum makecache
# cat> /etc/yum.repos.d/kubernetes.repo <
1.3.3 安装依赖和服务升级
# yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
# yum -y update
- ip_vs模块
# cat > /etc/sysconfig/modules/ipvs.modules <
查看结果
# lsmod | grep -e ip_vs -e nf_conntrack_ipv4
nf_conntrack_ipv4 15053 0
nf_defrag_ipv4 12729 1 nf_conntrack_ipv4
ip_vs_sh 12688 0
ip_vs_wrr 12697 0
ip_vs_rr 12600 0
ip_vs 145458 6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack 139264 2 ip_vs,nf_conntrack_ipv4
libcrc32c 12644 3 xfs,ip_vs,nf_conntrack
1.4 系统优化
- 内核优化
# cat >>/etc/sysctl.conf <
- 句柄数
ulimit -n 655350
永修生效修改如下两个文件
# cat >>/etc/security/limits.conf <
vim /etc/systemd/system.conf
DefaultLimitNOFILE=655350
或者
echo ulimit -n 655350 >>/etc/profile
- 加载内核模块
# cat </etc/sysconfig/modules/ipvs.modules
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
# chmod +x /etc/sysconfig/modules/ipvs.modules
# bash /etc/sysconfig/modules/ipvs.modules
2. 安装Containerd
【所有节点安装】
2.1 安装
# yum install -y yum-utils device-mapper-persistent-data lvm2
# yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
# yum list | grep containerd
# yum install containerd.io-1.4.4 -y
2.2 修改配置文件
- 生成配置文件
# mkdir -p /etc/containerd
# containerd config default > /etc/containerd/config.toml
- 修改为阿里云镜像
# sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g" /etc/containerd/config.toml
# sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml
- 添加 SystemdCgroup = true
# sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml
- 设置runtime
不设置的话,后边会一直报警告
# /etc/crictl.yaml
runtime-endpoint: unix:///run/containerd/containerd.sock
image-endpoint: unix:///run/containerd/containerd.sock
timeout: 0
debug: false
或者
crictl config runtime-endpoint /run/containerd/containerd.sock
2.3 设置代理(非必要)
[root@t-master ~]# mkdir /etc/systemd/system/containerd.service.d
[root@t-master ~]# cat > /etc/systemd/system/containerd.service.d/http_proxy.conf << EOF
[Service]
Environment="HTTP_PROXY=http://10.10.222.191:808/"
EOF
# systemctl restart containerd
2.4 启动
# systemctl daemon-reload
# systemctl enable containerd
# systemctl restart containerd
2.5 测试
2.5.1 ctr命令
- 下载镜像
# ctr images pull docker.io/library/nginx:alpine
- 查看下载的镜像
[root@t-master ~]# ctr images ls
REF TYPE DIGEST SIZE PLATFORMS LABELS
docker.io/library/nginx:alpine application/vnd.docker.distribution.manifest.list.v2+json sha256:d8da873105d3eb0d1e59f188b90ec412409ac213c63c0652d287fc2e9f9b6178 9.4 MiB linux/386,linux/amd64,linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x -
2.5.1 crictl 命令
[root@k8s-master ~]# crictl pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.10
Image is up to date for sha256:9ca5fafbe8dc1fc9fd5d7d62c4a80924795d8012acef12b3f9c4267731fe0473
[root@k8s-master ~]# crictl images
IMAGE TAG IMAGE ID SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver v1.23.10 9ca5fafbe8dc1 32.6MB
3. k8s集群安装
3.1 安装服务
【所有节点执行】
- 安装服务
安装kubeadm、kubelet、kubectl
注意:确保前边yum源已经添加。
yum install -y kubelet-1.23.10 kubeadm-1.23.10 kubectl-1.23.10
- 启动服务
systemctl daemon-reload
systemctl enable kubelet && systemctl start kubelet
3.2 master 初始化
【master上执行】
3.2.1 初始化文件
- 生成初始化文件
# kubeadm config print init-defaults > kubeadm.yaml
修改如下几处:
- imageRepository修改为国内库(如阿里云)
- criSocket 修改为刚才定义的containerd的socket
- serviceSubnet: 10.1.0.0/16 给service定义一个子网络。
- podSubnet: 10.244.0.0/16 pod子网要和后边的flannel里的网络对上,否则DNS启动不起来。
- kube-proxy 的模式为 ipvs
- 使用的containerd作为运行时,指定cgroupDriver为systemd
- 修改后结果如下:(也可以直接用下边这个文件改一改然后初始化)
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 10.10.239.47
bindPort: 6443
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: t-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
kind: ClusterConfiguration
kubernetesVersion: v1.20.0
networking:
dnsDomain: cluster.local
serviceSubnet: 10.1.0.0/16
podSubnet: 10.244.0.0/16
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
cgroupDriver: systemd
3.2.3 下载镜像(非必要)
- 提前下载镜像
# kubeadm config images pull --image-repository registry.cn-hangzhou.aliyuncs.com/google_containers
- 输出如下
W0918 16:47:28.097399 1773 version.go:104] falling back to the local client version: v1.23.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.10
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
[config/images] Pulled registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
- 查看下载的镜像
[root@k8s-master ~]# crictl images watch
IMAGE TAG IMAGE ID SIZE
registry.cn-hangzhou.aliyuncs.com/google_containers/coredns v1.8.6 a4ca41631cc7a 13.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/etcd 3.5.1-0 25f8c7f3da61c 98.9MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver v1.23.10 9ca5fafbe8dc1 32.6MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager v1.23.10 91a4a0d5de4e9 30.2MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy v1.23.10 71b9bf9750e1f 39.3MB
registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler v1.23.10 d5c0efb802d95 15.1MB
registry.cn-hangzhou.aliyuncs.com/google_containers/pause 3.6 6270bb605e12e 302kB
3.2.3 初始化
[init] Using Kubernetes version: v1.23.10
[preflight] Running pre-flight checks
[WARNING Service-Kubelet]: kubelet service is not enabled, please run 'systemctl enable kubelet.service'
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.10.181.32]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [k8s-master localhost] and IPs [10.10.181.32 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [k8s-master localhost] and IPs [10.10.181.32 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests". This can take up to 4m0s
[apiclient] All control plane components are healthy after 16.502464 seconds
[upload-config] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[kubelet] Creating a ConfigMap "kubelet-config-1.23" in namespace kube-system with the configuration for the kubelets in the cluster
NOTE: The "kubelet-config-1.23" naming of the kubelet ConfigMap is deprecated. Once the UnversionedKubeletConfigMap feature gate graduates to Beta the default name will become just "kubelet-config". Kubeadm upgrade will handle this transition transparently.
[upload-certs] Skipping phase. Please see --upload-certs
[mark-control-plane] Marking the node k8s-master as control-plane by adding the labels: [node-role.kubernetes.io/master(deprecated) node-role.kubernetes.io/control-plane node.kubernetes.io/exclude-from-external-load-balancers]
[mark-control-plane] Marking the node k8s-master as control-plane by adding the taints [node-role.kubernetes.io/master:NoSchedule]
[bootstrap-token] Using token: 014px5.1tqdvt4dp7s0akc4
[bootstrap-token] Configuring bootstrap tokens, cluster-info ConfigMap, RBAC Roles
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to get nodes
[bootstrap-token] configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[bootstrap-token] configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[bootstrap-token] configured RBAC rules to allow certificate rotation for all node client certificates in the cluster
[bootstrap-token] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[kubelet-finalize] Updating "/etc/kubernetes/kubelet.conf" to point to a rotatable kubelet client certificate and key
[addons] Applied essential addon: CoreDNS
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 10.10.181.32:6443 --token 014px5.1tqdvt4dp7s0akc4 \
--discovery-token-ca-cert-hash sha256:9a26373788a4e6805da511094bde4dce05240f76c8ef75830046a44b9bf22f43
- FAQ
初始化报错如下:
[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher
[解决]
# modprobe br_netfilter
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables
3.2.3 使用
- 配置kubelete
# mkdir -p $HOME/.kube
# cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
# chown $(id -u):$(id -g) $HOME/.kube/config
当然你是root,也可以按着输出提示设置变量 export KUBECONFIG=/etc/kubernetes/admin.conf
- 查看节点
如下可见,有一个master节点,因为没有配置网络所以是not ready。
[root@t-master ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
t-master NotReady control-plane,master 3h10m v1.20.5
3.3 node节点接入和网络安装
后边没有坑了,参考之前1.20 的安装文档:《k8s1.20 搭建-抛弃docker使用containerd》