Ubuntu22.04 && K8s1.27.2

Ubuntu22.04 && K8s1.27.2

1. 服务器配置

Ip Server MEM
192.168.56.11 k8smaster 6G
192.168.56.16 k8snode1 4G
192.168.56.17 k8snode2 4G

2. 获取源

$ sudo apt-get update
$ sudo apt-get install -y apt-transport-https ca-certificates curl

# packages.cloud.google.com 科学上网完成,否则后面有太多问题会出现
$ curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg

$ echo "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list

$ sudo apt-get update
$ sudo apt-get install -y kubelet kubeadm kubectl
$ sudo apt-mark hold kubelet kubeadm kubectl

3. 设置必要配置

$ cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
overlay
br_netfilter
EOF

$ sudo modprobe overlay
$ sudo modprobe br_netfilter

# sysctl params required by setup, params persist across reboots
$ cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables  = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward                 = 1
EOF

# Apply sysctl params without reboot
$ sudo sysctl --system

# Verify that the br_netfilter, overlay modules are loaded by running the following commands:
$ lsmod | grep br_netfilter
$ lsmod | grep overlay

4. 安装容器运行时

Container runtimes
Installing containerd

$ sudo tar Cxzvf /usr/local containerd-1.7.1-linux-amd64.tar.gz 
bin/
bin/containerd-stress
bin/ctr
bin/containerd-shim-runc-v2
bin/containerd
bin/containerd-shim
bin/containerd-shim-runc-v1
$ wget https://raw.githubusercontent.com/containerd/containerd/main/containerd.service
$ sudo mkdir -p /usr/local/lib/systemd/system
$ sudo mv containerd.service /usr/local/lib/systemd/system/containerd.service
$ sudo systemctl daemon-reload
$ sudo systemctl enable --now containerd
Removed /etc/systemd/system/multi-user.target.wants/containerd.service.
Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /usr/local/lib/systemd/system/containerd.service.
$ sudo systemctl status containerd
● containerd.service - containerd container runtime
     Loaded: loaded (/usr/local/lib/systemd/system/containerd.service; enabled; vendor preset: enabled)
     Active: active (running) since Fri 2023-06-02 17:35:04 CST; 29min ago
       Docs: https://containerd.io
   Main PID: 625 (containerd)
      Tasks: 8
     Memory: 53.5M
        CPU: 658ms
     CGroup: /system.slice/containerd.service
             └─625 /usr/bin/containerd

Installing runc

$ sudo install -m 755 runc.amd64 /usr/local/sbin/runc

Installing CNI plugins

$ sudo mkdir -p /opt/cni/bin
$ sudo tar Cxzvf /opt/cni/bin cni-plugins-linux-amd64-v1.3.0.tgz
./
./loopback
./bandwidth
./ptp
./vlan
./host-device
./tuning
./vrf
./sbr
./tap
./dhcp
./static
./firewall
./macvlan
./dummy
./bridge
./ipvlan
./portmap
./host-local

5. 关闭swap分区

$ sudo swapoff -a
$ sudo sed -i '/swap/ s%/swap%#/swap%g' /etc/fstab

6. kubelet 启动

$ sudo systemctl enable --now kubelet
$ systemctl status kubelet
● kubelet.service - kubelet: The Kubernetes Node Agent
     Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
    Drop-In: /etc/systemd/system/kubelet.service.d
             └─10-kubeadm.conf
     Active: activating (auto-restart) (Result: exit-code) since Fri 2023-06-02 18:19:33 CST; 9s ago
       Docs: https://kubernetes.io/docs/home/
    Process: 3321 ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARG>
   Main PID: 3321 (code=exited, status=1/FAILURE)
        CPU: 52ms

Jun 02 18:19:33 K8sNode1 systemd[1]: kubelet.service: Main process exited, code=exited, status=1/FAILURE
Jun 02 18:19:33 K8sNode1 systemd[1]: kubelet.service: Failed with result 'exit-code'.

$ sudo vim /lib/systemd/system/kubelet.service 
[Service]
# 加Environment这一行,否则kubelet -- Active: activating (auto-restart)
Environment="KUBELET_KUBECONFIG_ARGS=--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf --cgroup-driver=cgroupfs"
ExecStart=/usr/bin/kubelet
...

$ sudo systemctl restart kubelet && sudo systemctl status kubelet
Warning: The unit file, source configuration file or drop-ins of kubelet.service changed on disk. Run 'systemctl daemon-reload' to reload units.
Warning: The unit file, source configuration file or drop-ins of kubelet.service changed on disk. Run 'systemctl daemon-reload' to >
● kubelet.service - kubelet: The Kubernetes Node Agent
     Loaded: loaded (/lib/systemd/system/kubelet.service; enabled; vendor preset: enabled)
    Drop-In: /etc/systemd/system/kubelet.service.d
             └─10-kubeadm.conf
     Active: active (running) since Fri 2023-06-02 18:20:58 CST; 15ms ago
       Docs: https://kubernetes.io/docs/home/
   Main PID: 3417 (kubelet)
      Tasks: 6 (limit: 4572)
     Memory: 3.0M
        CPU: 13ms
     CGroup: /system.slice/kubelet.service
             └─3417 /usr/bin/kubelet --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kub>

Jun 02 18:20:58 K8sNode1 systemd[1]: Started kubelet: The Kubernetes Node Agent.

7. 配置 adm 启动时相关依赖

$ cat /etc/containerd/config.toml 
#   Copyright 2018-2022 Docker Inc.

#   Licensed under the Apache License, Version 2.0 (the "License");
#   you may not use this file except in compliance with the License.
#   You may obtain a copy of the License at

#       http://www.apache.org/licenses/LICENSE-2.0

#   Unless required by applicable law or agreed to in writing, software
#   distributed under the License is distributed on an "AS IS" BASIS,
#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#   See the License for the specific language governing permissions and
#   limitations under the License.

disabled_plugins = ["cri"]

#root = "/var/lib/containerd"
#state = "/run/containerd"
#subreaper = true
#oom_score = 0

#[grpc]
#  address = "/run/containerd/containerd.sock"
#  uid = 0
#  gid = 0

#[debug]
#  address = "/run/containerd/debug.sock"
#  uid = 0
#  gid = 0
#  level = "info"

修改/etc/containerd/config.toml

$ # ls -l /etc/containerd/config.toml 
-rw-r--r-- 1 root root 8247 Jun  2 18:34 /etc/containerd/config.toml

# 修改两个变量值
# 这里需要提升root权限
$ containerd config default > /etc/containerd/config.toml
$ vim /etc/containerd/config.toml
[plugins."io.containerd.grpc.v1.cri"]
...
  sandbox_image = "registry.k8s.io/pause:3.9"
  
...
[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
...
  SystemdCgroup = true


$ sudo systemctl daemon-reload
$ sudo systemctl enable --now containerd
$ kubeadm init --apiserver-advertise-address=192.168.56.11 --pod-network-cidr=192.168.0.0/16 --service-cidr=10.1.0.0/16 --control-plane-endpoint=k8smaster

8. 异常情况

tester@k8smaster:~$ kubectl get nodes
E0602 19:44:17.370235    2866 memcache.go:265] couldn't get current server API group list: Get "https://k8smaster:6443/api?timeout=32s": tls: failed to verify certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
E0602 19:44:17.376209    2866 memcache.go:265] couldn't get current server API group list: Get "https://k8smaster:6443/api?timeout=32s": tls: failed to verify certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")
E0602 19:44:17.379734    2866 memcache.go:265] couldn't get current server API group list: Get "https://k8smaster:6443/api?timeout=32s": tls: failed to verify certificate: x509: certificate signed by unknown authority (possibly because of "crypto/rsa: verification error" while trying to verify candidate authority certificate "kubernetes")

# 如果出现以上问题,性需要在创建 kubeadm init 时删除 相关的配置
$ rm  $HOME/.kube/config
$ kubeadm init --apiserver-advertise-address=192.168.56.11 --pod-network-cidr=192.168.0.0/16 --service-cidr=10.1.0.0/16 --control-plane-endpoint=k8smaster
$ mkdir -p $HOME/.kube
$ sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
$ sudo chown $(id -u):$(id -g) $HOME/.kube/config
tester@k8smaster:~/.kube$ kubectl get nodes
NAME        STATUS     ROLES           AGE   VERSION
k8smaster   NotReady   control-plane   69s   v1.27.2

# 下载 calico
$ wget https://raw.githubusercontent.com/projectcalico/calico/v3.26.0/manifests/calico.yaml -
$ grep -A1 CALICO_IPV4POOL_CIDR calico.yaml 
            # - name: CALICO_IPV4POOL_CIDR
            #   value: "192.168.0.0/16"
# 修改后            
$ grep -A1 CALICO_IPV4POOL_CIDR calico.yaml
            - name: CALICO_IPV4POOL_CIDR
              value: "192.168.0.0/16"
# 生效
$ kubectl apply -f calico.yaml
poddisruptionbudget.policy/calico-kube-controllers created
serviceaccount/calico-kube-controllers created
serviceaccount/calico-node created
configmap/calico-config created
customresourcedefinition.apiextensions.k8s.io/bgpconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/bgppeers.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/blockaffinities.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/caliconodestatuses.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/clusterinformations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/felixconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/globalnetworksets.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/hostendpoints.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamblocks.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamconfigs.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipamhandles.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ippools.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/ipreservations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/kubecontrollersconfigurations.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networkpolicies.crd.projectcalico.org created
customresourcedefinition.apiextensions.k8s.io/networksets.crd.projectcalico.org created
clusterrole.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrole.rbac.authorization.k8s.io/calico-node created
clusterrolebinding.rbac.authorization.k8s.io/calico-kube-controllers created
clusterrolebinding.rbac.authorization.k8s.io/calico-node created
daemonset.apps/calico-node created
deployment.apps/calico-kube-controllers created

# StATUS=Ready 可能时间比较长
$ kubectl get nodes
NAME        STATUS   ROLES           AGE   VERSION
k8smaster   Ready    control-plane   14m   v1.27.2

节点加入集群

$ sudo kubeadm join k8smaster:6443 --token whri61.tg76jbabwrnb057o --discovery-token-ca-cert-hash sha256:91de49a3a1566acf990ccad9674b9356af78c20cfc0c03d102d0a2c2b681b91f
[preflight] Running pre-flight checks
[preflight] Reading configuration from the cluster...
[preflight] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -o yaml'
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Starting the kubelet
[kubelet-start] Waiting for the kubelet to perform the TLS Bootstrap...

This node has joined the cluster:
* Certificate signing request was sent to apiserver and a response was received.
* The Kubelet was informed of the new secure connection details.

Run 'kubectl get nodes' on the control-plane to see this node join the cluster.
$ journalctl -u kubelet -f
Jun 02 20:33:53 K8sNode1 kubelet[6629]: E0602 20:33:53.182577    6629 dns.go:158] "Nameserver limits exceeded" err="Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 8.8.8.8 192.168.56.2 10.16.0.1"
Jun 02 20:34:31 K8sNode1 kubelet[6629]: E0602 20:34:31.183620    6629 dns.go:158] "Nameserver limits exceeded" err="Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 8.8.8.8 192.168.56.2 10.16.0.1"
Jun 02 20:35:03 K8sNode1 kubelet[6629]: E0602 20:35:03.183107    6629 dns.go:158] "Nameserver limits exceeded" err="Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 8.8.8.8 192.168.56.2 10.16.0.1"
Jun 02 20:35:58 K8sNode1 kubelet[6629]: E0602 20:35:58.182340    6629 dns.go:158] "Nameserver limits exceeded" err="Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 8.8.8.8 192.168.56.2 10.16.0.1"
Jun 02 20:36:21 K8sNode1 kubelet[6629]: E0602 20:36:21.189171    6629 dns.go:158] "Nameserver limits exceeded" err="Nameserver limits were exceeded, some nameservers have been omitted, the applied nameserver line is: 8.8.8.8 192.168.56.2 10.16.0.1"

# 删除多余的Nameserver
# 如网卡中的配置

获取或者刷新token

$ kubeadm token list
$ kubeadm token create
tester@k8smaster:~$ kubeadm token list
TOKEN                     TTL         EXPIRES                USAGES                   DESCRIPTION                                                EXTRA GROUPS
5e2bxn.rpr4gsqy4bwy8nbo   23h         2023-06-07T08:10:16Z   authentication,signing   <none>                                                     system:bootstrappers:kubeadm:default-node-token
$ openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outfor der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855
$ sha:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855

$ kubectl create deployment nginx --image=nginx
$ kubectl expose deployment nginx --port=80 --type=NodePort
$ kubectl get pod,svc
NAME                         READY   STATUS              RESTARTS   AGE
pod/nginx-77b4fdf86c-dq2jj   0/1     ContainerCreating   0          38s

NAME                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP        3d17h
service/nginx        NodePort    10.111.246.9   <none>        80:30985/TCP   17s
$ kubectl get pod,svc
NAME                         READY   STATUS    RESTARTS   AGE
pod/nginx-77b4fdf86c-dq2jj   1/1     Running   0          4m44s

NAME                 TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
service/kubernetes   ClusterIP   10.96.0.1      <none>        443/TCP        3d18h
service/nginx        NodePort    10.111.246.9   <none>        80:30985/TCP   4m23s
# curl k8smaster:30985
$ curl 192.168.56.11:30985
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
# Node1
$ curl 192.168.56.16:30985
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>
# Node2
$ curl 192.168.56.17:30985
<!DOCTYPE html>
<html>
<head>
<title>Welcome to nginx!</title>
<style>
html { color-scheme: light dark; }
body { width: 35em; margin: 0 auto;
font-family: Tahoma, Verdana, Arial, sans-serif; }
</style>
</head>
<body>
<h1>Welcome to nginx!</h1>
<p>If you see this page, the nginx web server is successfully installed and
working. Further configuration is required.</p>

<p>For online documentation and support please refer to
<a href="http://nginx.org/">nginx.org</a>.<br/>
Commercial support is available at
<a href="http://nginx.com/">nginx.com</a>.</p>

<p><em>Thank you for using nginx.</em></p>
</body>
</html>

$ kubectl scale deploy --replicas=3 nginx
NAME                     READY   STATUS              RESTARTS   AGE
nginx-77b4fdf86c-dq2jj   1/1     Running             0          8m3s
nginx-77b4fdf86c-q4jb4   0/1     ContainerCreating   0          2s
nginx-77b4fdf86c-stdqt   0/1     ContainerCreating   0          2s
# fix node try to get po issue, as the master's /etc/kubernetes/admin.conf contains 
# 'server: https://k8smaster:6443', but node doesnt have this file, so try to type this command will have error as below
$ kubectl get po
E0606 17:07:48.849098    3356 memcache.go:265] couldn't get current server API group list: Get "http://localhost:8080/api?timeout=32s": dial tcp 127.0.0.1:8080: connect: connection refused

# copy master /etc/kubernetes/admin.conf to node /etc/kubernetes
$ scp /etc/kubernets/admin.conf tester@k8snode1:/etc/kubernetes
$ scp /etc/kubernets/admin.conf tester@k8snode2:/etc/kubernetes
# above steps might be failed, if failed, pls copy to $HOME/temp folder, and then copy again

$ echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> ~/.bash_profile
$ source ~/.bash_profile
# one of the node type command, this will not show localhost:8080 error again
$ kubectl get po
NAME                     READY   STATUS    RESTARTS   AGE
nginx-77b4fdf86c-dq2jj   1/1     Running   0          28m
# might replace this
#$ sed -i 's#docker.io/##g' calico.yaml
# nginx-demo.yaml 
apiVersion: v1
# 资源对象类型,也可以配置为Deployment, StatefulSet等
kind: Pod
# Pod 相关的元数据,用于描述Pod的数据
metadata:
  name: nginx-demo
  labels:
    # 自定义Label 标签,名字为type,值为app
    type: app
    # 自定义Label 标签,名字为 version,值为2.0.0
    version: 2.0.0
  # 命令恐惧的配置
  namespace: 'default'
# 期望Pod按这里的描述进行创建
spec:
  # 对于Pod中容器描述
  containers: 
  - name: nginx 
    image: nginx:1.18
    # 镜像拉取策略,本地有就用本地的,没有就拉取远程的
    imagePullPolicy: IfNotPresent
    # 指定容器启动时,执行的命令
    # same as nginx -g 'daemon off;'
    command: 
    - nginx
    - -g
    - 'daemon off;'
    # 定义容器启动后的工作目录
    workingDir: /usr/share/nginx/html
    ports:
    - name: http
      # 暴露端口
      containerPort: 80
      protocol: TCP
    # 环境变量
    env:
    - name: NG_OPTS_1
      value: 'vvv-test'
    resources:
      # 需要多少资源 最少
      requests:
        cpu: 100m
        memory: 128M
      # 需要多少资源 最多
      limits:
        cpu: 200m
        memory: 256M
  # 重启策略,失败才会重启
  restartPolicy: OnFailure
  #imagePullSecrets:

你可能感兴趣的:(Ubuntu,kubernetes,容器,云原生)