目录
一 containerd部署
1.containerd安装与配置
2.生成配置文件并修改
二 runc部署
三 部署kubelet
1,创建kubelet-bootstrap-kubconfig
2.创建kubelet配置文件
3.启动服务
四 部署kube-proxy
1.创建证书请求文件
2.生成证书文件
3.创建kuubeconfig文件
4.创建配置文件
5.发送文件
五 网络插件部署 calico
1.下载插件
2.修改文件
3.验证
六 部署coredns
1.创建配置文件
2.部署
##在github上下载软件包 [root@master k8s-work]# wget https://github.com/containerd/containerd/releases/download/v1.6.19/cri-containerd-cni-1.6.19-linux-amd64.tar.gz
##解压安装包 master和node1同时安装 [root@master k8s-work]# tar xf cri-containerd-cni-1.6.19-linux-amd64.tar.gz -C /
1.创建文件目录 ##所有节点都需要 [root@node1 ~]# mkdir /etc/containerd 2.创建默认配置文件 [root@master containerd]# containerd config default > /etc/containerd/config.toml 3.替换镜像源 下面的配置文件中已修改,可不执行,仅修改默认时执行。 sed -i 's@systemd_cgroup = false@systemd_cgroup = true@' /etc/containerd/config.toml sed -i '[email protected]/pause:[email protected]/google_containers/pause:3.6@' /etc/containerd/config.toml
完整文件
cat >/etc/containerd/config.toml<
https://github.com/opencontainers/runchttps://github.com/opencontainers/runc
[root@master k8s-work]# chmod +x runc.amd64 [root@master k8s-work]# which runc /usr/local/sbin/runc [root@master k8s-work]# mv runc.amd64 /usr/local/sbin/runc mv:是否覆盖"/usr/local/sbin/runc"? y [root@master k8s-work]# runc -v runc version 1.1.0 commit: v1.1.0-0-g067aaf85 spec: 1.0.2-dev go: go1.17.6 libseccomp: 2.5.3 [root@master k8s-work]# scp /usr/local/sbin/runc @node1:/usr/local/sbin/runc ##发送给node1主机
[root@master k8s-work]# systemctl enable --now containerd ##开机自启动
containerd就类似docker
1.取出固定值 [root@master k8s-work]# BOOTSTRAP_TOKEN=$(awk -F "," '{print $1}' /etc/kubernetes/ssl/token.csv)
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.110:6443 --kubeconfig=kubelet-bootstrap.kubeconfig kubectl config set-credentials kubelet-bootstrap --token=${BOOTSTRAP_TOKEN} --kubeconfig=kubelet-bootstrap.kubeconfig kubectl config set-context default --cluster=kubernetes --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig kubectl config use-context default --kubeconfig=kubelet-bootstrap.kubeconfig kubectl create clusterrolebinding cluster-system-anonymous --clusterrole=cluster-admin --user=kubelet-bootstrap kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap --kubeconfig=kubelet-bootstrap.kubeconfig
##验证 kubectl describe clusterrolebinding cluster-system-anonymous kubectl describe clusterrolebinding kubelet-bootstrap
##安装kubectl的IP对应主机ip cat > kubelet.json << "EOF" { "kind": "KubeletConfiguration", "apiVersion": "kubelet.config.k8s.io/v1beta1", "authentication": { "x509": { "clientCAFile": "/etc/kubernetes/ssl/ca.pem" }, "webhook": { "enabled": true, "cacheTTL": "2m0s" }, "anonymous": { "enabled": false } }, "authorization": { "mode": "Webhook", "webhook": { "cacheAuthorizedTTL": "5m0s", "cacheUnauthorizedTTL": "30s" } }, "address": "192.168.1.110", "port": 10250, "readOnlyPort": 10255, "cgroupDriver": "systemd", "hairpinMode": "promiscuous-bridge", "serializeImagePulls": false, "clusterDomain": "cluster.local.", "clusterDNS": ["10.96.0.2"] } EOF
cat > kubelet.service << "EOF" [Unit] Description=Kubernetes Kubelet Documentation=https://github.com/kubernetes/kubernetes After=containerd.service Requires=containerd.service [Service] WorkingDirectory=/var/lib/kubelet ExecStart=/usr/local/bin/kubelet \ --bootstrap-kubeconfig=/etc/kubernetes/kubelet-bootstrap.kubeconfig \ --cert-dir=/etc/kubernetes/ssl \ --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ --config=/etc/kubernetes/kubelet.json \ --cni-bin-dir=/opt/cni/bin \ --cni-conf-dir=/etc/cni/net.d \ --container-runtime=remote \ --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ --network-plugin=cni \ --rotate-certificates \ --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.2 \ --root-dir=/etc/cni/net.d \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 [Install] WantedBy=multi-user.target EOF
1.发送配置文件 [root@master k8s-work]# cp kubelet-bootstrap.kubeconfig /etc/kubernetes/ [root@master k8s-work]# cp kubelet.json /etc/kubernetes/ [root@master k8s-work]# cp kubelet.service /usr/lib/systemd/system/ 2.node节点需要创建目录 [root@master k8s-work]# scp kubelet-bootstrap.kubeconfig kubelet.json @node1:/etc/kubernetes/ kubelet-bootstrap.kubeconfig 100% 2135 2.4MB/s 00:00 kubelet.json 100% 801 1.1MB/s 00:00 [root@master k8s-work]# scp kubelet.service @node1:/usr/lib/systemd/system/ kubelet.service 100% 854 931.9KB/s 00:00 [root@master k8s-work]# scp ca.pem @node1:/etc/kubernetes/ssl ca.pem 100% 1273 1.3MB/s 00:00 ##kubelet的json文件 需要将address 改为当前主机ip 3.创建启动目录 [root@node1 ~]# mkdir -p /var/lib/kubelet [root@node1 ~]# mkdir -p /var/log/kubeletes 4.启动服务 systemctl daemon-reload systemctl enable --now kubelet systemctl status kubelet 5.验证服务 kubectl get nodes kubectl get csr
cat > kube-proxy-csr.json << "EOF" { "CN": "system:kube-proxy", "key": { "algo": "rsa", "size": 2048 }, "names": [ { "C": "CN", "ST": "nanjing", "L": "nanjing", "O": "kubemsb", "OU": "CN" } ] } EOF
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
kubectl config set-cluster kubernetes --certificate-authority=ca.pem --embed-certs=true --server=https://192.168.1.110:6443 --kubeconfig=kube-proxy.kubeconfig kubectl config set-credentials kube-proxy --client-certificate=kube-proxy.pem --client-key=kube-proxy-key.pem --embed-certs=true --kubeconfig=kube-proxy.kubeconfig kubectl config set-context default --cluster=kubernetes --user=kube-proxy --kubeconfig=kube-proxy.kubeconfig kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
cat > kube-proxy.yaml << "EOF" apiVersion: kubeproxy.config.k8s.io/v1alpha1 bindAddress: 192.168.1.110 clientConnection: kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig clusterCIDR: 10.244.0.0/16 healthzBindAddress: 192.168.1.110:10256 kind: KubeProxyConfiguration metricsBindAddress: 192.168.1.110:10249 mode: "ipvs" EOF
cat > kube-proxy.service << "EOF" [Unit] Description=Kubernetes Kube-Proxy Server Documentation=https://github.com/kubernetes/kubernetes After=network.target [Service] WorkingDirectory=/var/lib/kube-proxy ExecStart=/usr/local/bin/kube-proxy \ --config=/etc/kubernetes/kube-proxy.yaml \ --alsologtostderr=true \ --logtostderr=false \ --log-dir=/var/log/kubernetes \ --v=2 Restart=on-failure RestartSec=5 LimitNOFILE=65536 [Install] WantedBy=multi-user.target EOF
cp kube-proxy*.pem /etc/kubernetes/ssl/ cp kube-proxy.kubeconfig kube-proxy.yaml /etc/kubernetes/ cp kube-proxy.service /usr/lib/systemd/system/ ##同时也要发送给node1 kube-proxy.yaml的ip要修改当前主机ip 1002 scp kube-proxy*.pem @node1:/etc/kubernetes/ssl/ 1003 scp kube-proxy.kubeconfig kube-proxy.yaml @node1:/etc/kubernetes/ 1004 scp kube-proxy.service @node1:/usr/lib/systemd/system/ 创建启动文件 mkdir -p /var/lib/kube-proxy 启动服务 systemctl daemon-reload systemctl enable --now kube-proxy systemctl status kube-proxy
wget https://docs.projectcalico.org/v3.19/manifests/calico.yaml
只需要在master节点使用即可
##修改这两行,注意对齐 3683 - name: CALICO_IPV4POOL_CIDR 3684 value: "10.244.0.0/16"
[root@master ~]# kubectl get pod -A NAMESPACE NAME READY STATUS RESTARTS AGE kube-system calico-kube-controllers-848c5d445f-b4jc2 1/1 Running 0 4m31s kube-system calico-node-gtmdv 1/1 Running 0 4m31s kube-system calico-node-xngj9 0/1 Running 0 4m31s
cat > coredns.yaml << "EOF" apiVersion: v1 kind: ServiceAccount metadata: name: coredns namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns rules: - apiGroups: - "" resources: - endpoints - services - pods - namespaces verbs: - list - watch - apiGroups: - discovery.k8s.io resources: - endpointslices verbs: - list - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: annotations: rbac.authorization.kubernetes.io/autoupdate: "true" labels: kubernetes.io/bootstrapping: rbac-defaults name: system:coredns roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:coredns subjects: - kind: ServiceAccount name: coredns namespace: kube-system --- apiVersion: v1 kind: ConfigMap metadata: name: coredns namespace: kube-system data: Corefile: | .:53 { errors health { lameduck 5s } ready kubernetes cluster.local in-addr.arpa ip6.arpa { fallthrough in-addr.arpa ip6.arpa } prometheus :9153 forward . /etc/resolv.conf { max_concurrent 1000 } cache 30 loop reload loadbalance } --- apiVersion: apps/v1 kind: Deployment metadata: name: coredns namespace: kube-system labels: k8s-app: kube-dns kubernetes.io/name: "CoreDNS" spec: # replicas: not specified here: # 1. Default is 1. # 2. Will be tuned in real time if DNS horizontal auto-scaling is turned on. strategy: type: RollingUpdate rollingUpdate: maxUnavailable: 1 selector: matchLabels: k8s-app: kube-dns template: metadata: labels: k8s-app: kube-dns spec: priorityClassName: system-cluster-critical serviceAccountName: coredns tolerations: - key: "CriticalAddonsOnly" operator: "Exists" nodeSelector: kubernetes.io/os: linux affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: k8s-app operator: In values: ["kube-dns"] topologyKey: kubernetes.io/hostname containers: - name: coredns image: coredns/coredns:1.8.4 imagePullPolicy: IfNotPresent resources: limits: memory: 170Mi requests: cpu: 100m memory: 70Mi args: [ "-conf", "/etc/coredns/Corefile" ] volumeMounts: - name: config-volume mountPath: /etc/coredns readOnly: true ports: - containerPort: 53 name: dns protocol: UDP - containerPort: 53 name: dns-tcp protocol: TCP - containerPort: 9153 name: metrics protocol: TCP securityContext: allowPrivilegeEscalation: false capabilities: add: - NET_BIND_SERVICE drop: - all readOnlyRootFilesystem: true livenessProbe: httpGet: path: /health port: 8080 scheme: HTTP initialDelaySeconds: 60 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 5 readinessProbe: httpGet: path: /ready port: 8181 scheme: HTTP dnsPolicy: Default volumes: - name: config-volume configMap: name: coredns items: - key: Corefile path: Corefile --- apiVersion: v1 kind: Service metadata: name: kube-dns namespace: kube-system annotations: prometheus.io/port: "9153" prometheus.io/scrape: "true" labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" kubernetes.io/name: "CoreDNS" spec: selector: k8s-app: kube-dns clusterIP: 10.96.0.2 ports: - name: dns port: 53 protocol: UDP - name: dns-tcp port: 53 protocol: TCP - name: metrics port: 9153 protocol: TCP EOF
[root@master k8s-work]# kubectl apply -f coredns.yaml
##验证 [root@master k8s-work]# kubectl get svc -n kube-system NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kube-dns ClusterIP 10.96.0.2
53/UDP,53/TCP,9153/TCP 51s