kubespray部署kubernetes(containerd + cilium)

参考地址:https://kubespray.io/#/

创建虚拟机

# 安装虚拟机管理工具
$ brew install multipass
# 创建虚拟节点
$ multipass launch -n kubespray -m 1G -c 1 -d 40G
$ multipass launch -n cilium-node1 -m 4G -c 4 -d 40G
$ multipass launch -n cilium-node2 -m 4G -c 4 -d 40G
$ multipass launch -n cilium-node3 -m 4G -c 4 -d 40G
# 查看节点
$ multipass list
Name                    State             IPv4             Image
kubespray               Running           192.168.64.7     Ubuntu 22.04 LTS
cilium-node1            Running           192.168.64.8     Ubuntu 22.04 LTS
cilium-node2            Running           192.168.64.9     Ubuntu 22.04 LTS
cilium-node3            Running           192.168.64.10    Ubuntu 22.04 LTS

确保每台台服务器都能root登录(后面用到root用户,非root用户可以自己探索)

sudo sed -i '/PermitRootLogin /c PermitRootLogin yes' /etc/ssh/sshd_config
sudo systemctl restart sshd
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa

安装 containerd 容器环境(这里及以下操作只在kubespray服务器上)

# nerdctl 与docker 命令基本一致,下载不了,可以去github下down下来
root@kubespray:~# wget https://github.com/containerd/nerdctl/releases/download/v1.4.0/nerdctl-full-1.4.0-linux-amd64.tar.gz
root@kubespray:~# tar -zxvf nerdctl-full-1.4.0-linux-amd64.tar.gz -C /usr/local
root@kubespray:~# systemctl enable --now containerd buildkit

配置免密登录

root@kubespray:~# ssh-copy-id [email protected]
root@kubespray:~# ssh-copy-id [email protected]
root@kubespray:~# ssh-copy-id [email protected]
# 如果提示权限不够,手动将kubespray节点的/root/.ssh/id_rsa.pub的内容复制后添加在其他三个节点/root/.ssh/authorized_keys,注意需要另起一行添加

安装kubespray

# 下载源码
root@kubespray:~# git clone https://github.com/kubernetes-sigs/kubespray.git
root@kubespray:~# cd kubespray

拉取镜像

root@kubespray:~# nerdctl pull quay.io/kubespray/kubespray:v2.20.0
# 等待镜像拉取完成,过程可能会有点长

修改配置

root@kubespray# cp -rfp inventory/sample inventory/mycluster
root@kubespray# declare -a IPS=(192.168.64.8 192.168.64.9 192.168.64.10)
root@kubespray# CONFIG_FILE=inventory/mycluster/hosts.yaml python3 contrib/inventory_builder/inventory.py ${IPS[@]}
# 如果提示ssh连接其他三个节点失败需要手动连接一次
root@kubespray# ssh [email protected] # 回车后提示输入yes
root@kubespray# ssh [email protected] # 回车后提示输入yes
root@kubespray# ssh [email protected] # 回车后提示输入yes

# 规划集群节点名称、角色及分组
root@kubespray# vim inventory/mycluster/hosts.yaml
all:
  hosts:
    node1:
      ansible_host: 192.168.64.8
      ip: 192.168.64.8
      access_ip: 192.168.64.8
    node2:
      ansible_host: 192.168.64.9
      ip: 192.168.64.9
      access_ip: 192.168.64.9
    node3:
      ansible_host: 192.168.64.10
      ip: 192.168.64.10
      access_ip: 192.168.64.10
  children:
    kube_control_plane:
      hosts:
        node1:
        node2:
    kube_node:
      hosts:
        node1:
        node2:
        node3:
    etcd:
      hosts:
        node1:
        node2:
        node3:
    k8s_cluster:
      children:
        kube_control_plane:
        kube_node:
    calico_rr:
      hosts: {}

# 国内能否安装的关键
root@kubespray# cp inventory/mycluster/group_vars/all/offline.yml inventory/mycluster/group_vars/all/mirror.yml
root@kubespray# sed -i -E '/# .*\{\{ files_repo/s/^# //g' inventory/mycluster/group_vars/all/mirror.yml
root@kubespray# tee -a inventory/mycluster/group_vars/all/mirror.yml <
gcr_image_repo: "gcr.m.daocloud.io"
kube_image_repo: "k8s.m.daocloud.io"
docker_image_repo: "docker.m.daocloud.io"
quay_image_repo: "quay.m.daocloud.io"
github_image_repo: "ghcr.m.daocloud.io"
files_repo: "https://files.m.daocloud.io"
EOF

# 配置集群版本、网络插件、运行时等
root@kubespray# vim inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml

# 选择网络插件,支持 cilium, calico, weave 和 flannel
# 这里我选择的是cilium
kube_network_plugin: cilium
 
# 如果ip和我不一样的,一定要确认一下这两个网段是否有冲突
# 设置 Service 网段
kube_service_addresses: 10.233.0.0/18
 
# 设置 Pod 网段
kube_pods_subnet: 10.233.64.0/18
 
# 支持 docker, crio 和 containerd,推荐 containerd.
container_manager: containerd
 
# 是否开启 kata containers
kata_containers_enabled: false
 
# 集群名称 因为带了.符号,所以不建议使用自带的,可以改为自己的
cluster_name: k8s-cilium

# 如果想配置集群相关的其他插件
root@kubespray# vim inventory/mycluster/group_vars/k8s_cluster/addons.yml

# 校验改动是否成功
root@kubespray# cat inventory/mycluster/group_vars/all/all.yml
root@kubespray# cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml

# 可以通过kubespray节点可以控制所有节点执行命令,关闭防火墙(可选)
root@kubespray:~# ansible all -i inventory/mycluster/hosts.yml -m shell -a "systemctl stop firewalld && systemctl disabled firewalld"

# 可以通过kubespray节点可以控制所有节点执行命令,开启IPV4转发(可选)
root@kubespray:~# ansible all -i inventory/mycluster/hosts.yml -m shell -a "echo 'net.ipv4.ip_forward=1' | tee -a /etc/sysctl.conf"

# # 可以通过kubespray节点可以控制所有节点执行命令,禁用swap分区(可选)
root@kubespray:~# ansible all -i inventory/mycluster/hosts.yml -m shell -a "sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab && swapoff -a"

安装kubenetes

# 进入容器
root@kubespray:~# nerdctl run --rm -it --mount type=bind,source="$(pwd)"/inventory/mycluster,dst=/kubespray/inventory/mycluster   --mount type=bind,source="${HOME}"/.ssh/id_rsa,dst=/root/.ssh/id_rsa   quay.io/kubespray/kubespray:v2.20.0 bash

# 这里的become-user可以指定k8s集群免密登录用户
root@abba3b870324:/kubespray# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=root cluster.yml
 
# 也可以指定其他用户,必须是免密登录用户
root@abba3b870324:/kubespray# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=ubuntu --private-key /root/.ssh/id_rs cluster.yml # 等待集群安装

# 如果这里安装报错:TASK [container-engine/runc : runc | Download runc binary] *******  
# 参考:https://github.com/kubernetes-sigs/kubespray/issues/9206
# 使用自定义配置
root@abba3b870324:/kubespray# rm -rf /opt/kubespray/inventory/mycluster/group_vars
root@abba3b870324:/kubespray# mkdir -p /opt/kubespray/inventory/mycluster/group_vars/all
root@abba3b870324:/kubespray# curl https://gist.githubusercontent.com/yankay/a863cf2e300bff6f9040ab1c6c58fbae/raw/ab96867375325c6f43dc1e00ddd4f3d60e472487/customize.yml > /opt/kubespray/inventory/mycluster/group_vars/all/customize.yml 
root@abba3b870324:/kubespray# mkdir -p /opt/kubespray/inventory/my/group_vars/k8s_cluster
root@abba3b870324:/kubespray# curl https://gist.githubusercontent.com/yankay/a863cf2e300bff6f9040ab1c6c58fbae/raw/ab96867375325c6f43dc1e00ddd4f3d60e472487/k8s-customize.yml > /opt/kubespray/inventory/mycluster/group_vars/all/k8s-customize.yml
# 运行
root@abba3b870324:/kubespray# cd /opt/kubespray/
root@abba3b870324:/kubespray# export ANSIBLE_CONFIG=/opt/kubespray/ansible.cfg
root@abba3b870324:/kubespray# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=root cluster.yml

验证集群

root@cilium-node1:~# kubectl get node
NAME    STATUS   ROLES           AGE     VERSION
node1   Ready    control-plane   3h23m   v1.24.6
node2   Ready    control-plane   3h21m   v1.24.6
node3   Ready    <none>          3h17m   v1.24.6
root@cilium-node1:~# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE                         ERROR
etcd-0               Healthy   {"health":"true","reason":""}   
etcd-2               Healthy   {"health":"true","reason":""}   
etcd-1               Healthy   {"health":"true","reason":""}   
controller-manager   Healthy   ok                              
scheduler            Healthy   ok                              
root@cilium-node1:~# kubectl get pod -A
NAMESPACE     NAME                              READY   STATUS    RESTARTS      AGE
kube-system   cilium-operator-f6648bc78-5qqn8   1/1     Running   2 (17m ago)   67m
kube-system   cilium-operator-f6648bc78-v44zf   1/1     Running   1 (17m ago)   67m
kube-system   cilium-qwkkf                      1/1     Running   0             67m
kube-system   cilium-s4rdr                      1/1     Running   0             67m
kube-system   cilium-v2dnw                      1/1     Running   1 (58m ago)   67m
kube-system   coredns-665c4cc98d-k47qm          1/1     Running   0             56m
kube-system   coredns-665c4cc98d-l7mwz          1/1     Running   0             54m
kube-system   dns-autoscaler-6567c8b74f-ql5bw   1/1     Running   0             62m
kube-system   kube-apiserver-node1              1/1     Running   1             3h23m
kube-system   kube-apiserver-node2              1/1     Running   1             3h21m
kube-system   kube-controller-manager-node1     1/1     Running   6 (77m ago)   3h23m
kube-system   kube-controller-manager-node2     1/1     Running   7 (45m ago)   3h21m
kube-system   kube-proxy-2dhd6                  1/1     Running   0             69m
kube-system   kube-proxy-92vgt                  1/1     Running   0             69m
kube-system   kube-proxy-n9vjn                  1/1     Running   0             69m
kube-system   kube-scheduler-node1              1/1     Running   5 (66m ago)   3h23m
kube-system   kube-scheduler-node2              1/1     Running   8 (45m ago)   3h21m
kube-system   nginx-proxy-node3                 1/1     Running   0             3h17m
kube-system   nodelocaldns-jwvjq                1/1     Running   0             61m
kube-system   nodelocaldns-l5plk                1/1     Running   0             61m
kube-system   nodelocaldns-sjjwk                1/1     Running   0             61m

添加节点

root@abba3b870324:/kubespray# vim inventory/mycluster/hosts.yaml
all:
  hosts:
    node1:
      ansible_host: 192.168.64.8
      ip: 192.168.64.8
      access_ip: 192.168.64.8
    node2:
      ansible_host: 192.168.64.9
      ip: 192.168.64.9
      access_ip: 192.168.64.9
    node3:
      ansible_host: 192.168.64.10
      ip: 192.168.64.10
      access_ip: 192.168.64.10
    node4: # 添加节点
      ansible_host: 192.168.64.11
      ip: 192.168.64.11
      access_ip: 192.168.64.11
  children:
    kube_control_plane:
      hosts:
        node1:
        node2:
    kube_node:
      hosts:
        node1:
        node2:
        node3:
        node4: # 添加节点
    etcd:
      hosts:
        node1:
        node2:
        node3:
    k8s_cluster:
      children:
        kube_control_plane:
        kube_node:
    calico_rr:
      hosts: {}
root@kubespray:~# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=root scale.yml -v -b

移除某个节点

root@kubespray:~# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=root remove-node.yml -v -b --extra-vars "node=node3" # 这里移除node3,注意,节点名称是kubespray自动分配的

一键清理集群

root@kubespray:~# ansible-playbook -i inventory/mycluster/hosts.yaml  --become --become-user=root reset.yml 

你可能感兴趣的:(Kubernetes,kubernetes,容器,云原生)