centos7.8部署kubernetes1.17

目录

  • 一、基础环境
  • 二、 基础配置
    • 1.关闭防火墙和selinux
    • 2. 配置DNS
    • 3. 配置yum源
    • 4. 安装基础命令包
    • 5. 修改主机名和host文件(4台设备均修改)
    • 5. 将桥接的IPV4流量传递到iptables 的链
  • 三、安装docker
    • 1. 安装需要用到的工具,并设置docker源
    • 2. 安装指定版本docker
  • 四、安装K8S1.17
    • 1. 添加K8S的yum源并安装
    • 2. 初始化 k8s-master01 节点执行
      • 2.1
      • 2.2 初始化成功后,执行提示的后续命令
      • 2.3 查看镜像
      • 2.4 查看节点状态
      • 2.5 安装flannel
    • 3. 添加一个node节点
      • 3.1 在master节点查看节点状态
      • 4. 添加一个master节点
  • 五、 安装dashboard
  • 六、 安装kube-prometheus监控

一、基础环境

环境
K8S 1.17.7
docker 18.09.9

IP 节点
192.168.31.171 k8s-master01
192.168.31.172 k8s-master02
192.168.31.173 k8s-node01
192.168.31.174 k8s-node02

二、 基础配置

1.关闭防火墙和selinux

setenforce 0	# 临时关闭selinux
sed -i 's/SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config    # 永久关闭selinux
systemctl disable --now firewalld    # 关闭防火墙,并且关闭开机自启

2. 配置DNS

echo "nameserver 114.114.114.114">/etc/resolv.conf

3. 配置yum源

[root@localhost yum.repos.d]# cat /etc/yum.repos.d/Centos-Base.repo
[base]
name=CentOS-$releasever - Base
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=os&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/os/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#released updates
[updates]
name=CentOS-$releasever - Updates
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=updates&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/updates/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that may be useful
[extras]
name=CentOS-$releasever - Extras
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=extras&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/extras/$basearch/
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

#additional packages that extend functionality of existing packages
[centosplus]
name=CentOS-$releasever - Plus
mirrorlist=http://mirrorlist.centos.org/?release=$releasever&arch=$basearch&repo=centosplus&infra=$infra
#baseurl=http://mirror.centos.org/centos/$releasever/centosplus/$basearch/
gpgcheck=1
enabled=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-7

[root@localhost yum.repos.d]# yum makecache

4. 安装基础命令包

yum -y install vim wget telnet curl net-tools

5. 修改主机名和host文件(4台设备均修改)

hostnamectl set-hostname k8s-master01
cat /etc/hosts
192.168.31.171  k8s-master01
192.168.31.172  k8s-master02
192.168.31.173  k8s-node01
192.168.31.174  k8s-node02

5. 将桥接的IPV4流量传递到iptables 的链

cat > /etc/sysctl.d/k8s.conf << EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

sysctl --system
# 加载网桥过滤及内核转发配置文件
sysctl -p /etc/sysctl.d/k8s.conf

三、安装docker

1. 安装需要用到的工具,并设置docker源

yum install -y yum-utils   device-mapper-persistent-data   lvm2
yum-config-manager   --add-repo    https://download.docker.com/linux/centos/docker-ce.repo
yum list docker-ce.x86_64 --showduplicates | sort -r

2. 安装指定版本docker

yum -y install docker-ce-18.06.0.ce-3.el7
systemctl enable --now docker

四、安装K8S1.17

1. 添加K8S的yum源并安装

cat > /etc/yum.repos.d/kubernetes.repo << EOF
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

yum makecache -y
yum install -y kubelet-1.17.7-0.x86_64 kubeadm-1.17.7-0.x86_64 kubectl-1.17.7-0.x86_64

2. 初始化 k8s-master01 节点执行

2.1

# --apiserver-advertise-address 是master节点的地址
kubeadm init --apiserver-advertise-address=192.168.31.171 --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.17.7 --service-cidr=10.1.0.0/16 --pod-network-cidr=10.244.0.0/16

#显示以下信息,说明初始化成功
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.31.171:6443 --token 7fu3bw.loyl212crr2dohv1 \
    --discovery-token-ca-cert-hash sha256:6d35cddca3ed61e4f828e891c05a689848f4fa6481a5c56c71366ac308235968
    
# 命令中的token有效期为24小时,后续需要添加node节点,需要在master重新生成token
[root@k8s-master01 ~]# kubeadm token create --print-join-command
W0401 15:47:34.545584   18518 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0401 15:47:34.545622   18518 validation.go:28] Cannot validate kubelet config - no validator is available
kubeadm join 192.168.31.171:6443 --token bvcvdj.hcuu4a1167ecxcli     --discovery-token-ca-cert-hash sha256:6d35cddca3ed61e4f828e891c05a689848f4fa6481a5c56c71366ac308235968 
   
    
    如果init失败,需要重新init时,先执行kubeadm reset清除配置, 再重新init

2.2 初始化成功后,执行提示的后续命令

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

2.3 查看镜像

[root@k8s-master01 ~]# docker images
REPOSITORY                                                        TAG                 IMAGE ID            CREATED             SIZE
registry.aliyuncs.com/google_containers/kube-proxy                v1.17.7             36c0070f736a        2 years ago         117MB
registry.aliyuncs.com/google_containers/kube-apiserver            v1.17.7             6892a236f2e6        2 years ago         171MB
registry.aliyuncs.com/google_containers/kube-controller-manager   v1.17.7             36fdf6408a20        2 years ago         161MB
registry.aliyuncs.com/google_containers/kube-scheduler            v1.17.7             642f6b26e0e3        2 years ago         94.4MB
registry.aliyuncs.com/google_containers/coredns                   1.6.5               70f311871ae1        3 years ago         41.6MB
registry.aliyuncs.com/google_containers/etcd                      3.4.3-0             303ce5db0e90        3 years ago         288MB
registry.aliyuncs.com/google_containers/pause                     3.1                 da86e6ba6ca1        5 years ago         742kB

2.4 查看节点状态

[root@k8s-master01 ~]# kubectl get nodes
NAME           STATUS     ROLES    AGE     VERSION
k8s-master01   NotReady   master   5m24s   v1.17.7
看到节点状态为NotReady
需要安装网络插件,calico 或者  flannel
这里安装flannel

2.5 安装flannel

# 下载flannel的yml文件
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
sed -i '[email protected]@quay.azk8s.cn@g' kube-flannel.yml
kubectl apply -f kube-flannel.yml

3. 添加一个node节点

kubeadm join 192.168.31.171:6443 --token bvcvdj.hcuu4a1167ecxcli     --discovery-token-ca-cert-hash sha256:6d35cddca3ed61e4f828e891c05a689848f4fa6481a5c56c71366ac308235968

3.1 在master节点查看节点状态

[root@k8s-master01 ~]# kubectl get nodes -o wide
NAME           STATUS   ROLES    AGE   VERSION   INTERNAL-IP      EXTERNAL-IP   OS-IMAGE                KERNEL-VERSION           CONTAINER-RUNTIME
k8s-master01   Ready    master   34m   v1.17.7   192.168.31.171   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.6.0
k8s-node01     Ready    <none>   96s   v1.17.7   192.168.31.173   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.6.0
k8s-node02     Ready    <none>   91s   v1.17.7   192.168.31.174   <none>        CentOS Linux 7 (Core)   3.10.0-1127.el7.x86_64   docker://18.6.0

4. 添加一个master节点

# 获取key
[root@k8s-master01 ~]# kubeadm init phase upload-certs --upload-certs
I0401 16:04:49.014078   24856 version.go:251] remote version is much newer: v1.26.3; falling back to: stable-1.17
W0401 16:04:49.899688   24856 validation.go:28] Cannot validate kube-proxy config - no validator is available
W0401 16:04:49.899705   24856 validation.go:28] Cannot validate kubelet config - no validator is available
[upload-certs] Storing the certificates in Secret "kubeadm-certs" in the "kube-system" Namespace
[upload-certs] Using certificate key:
86e447d75ccfc706f071fc0712f64e485cb4d3bd0ad03e68787146d1a67d9312

# 创建一个token
[root@k8s-master01 ~]# kubeadm token create --print-join-command
W0401 16:05:29.147153   25105 validation.go:28] Cannot validate kubelet config - no validator is available
W0401 16:05:29.147246   25105 validation.go:28] Cannot validate kube-proxy config - no validator is available
kubeadm join 192.168.31.171:6443 --token f3r23y.e9o8dfdr7rcxe47d     --discovery-token-ca-cert-hash sha256:6d35cddca3ed61e4f828e891c05a689848f4fa6481a5c56c71366ac308235968 

最终join master节点的命令为
kubeadm join 192.168.31.171:6443 
--token f3r23y.e9o8dfdr7rcxe47d 
--discovery-token-ca-cert-hash sha256:6d35cddca3ed61e4f828e891c05a689848f4fa6481a5c56c71366ac308235968 
--control-plane  
--certificate-key 86e447d75ccfc706f071fc0712f64e485cb4d3bd0ad03e68787146d1a67d9312 
--apiserver-advertise-address=192.168.31.172 

--token和--discovery-token-ca-cert-hash参数为生成的token
--control-plane     参数,指将当前节点添加到控制面板中,
    具体来说,--control-plane 参数用于指定新节点是否要加入到控制平面中。
    如果指定了此参数,则新节点将被添加到现有的控制平面中,以增强集群的高可用性。
    在此模式下,kubeadm join 命令会自动将新节点添加到 etcd 集群、API Server 集群、Controller Manager 集群和 Scheduler 集群中
    并在新节点上配置 Kubernetes 组件的运行环境。
--certificate-key    参数为在master节点生成的key
--apiserver-advertise-address  参数为当前准备添加的master节点的IP



join master节点报错:
error execution phase preflight: 
One or more conditions for hosting a new control plane instance is not satisfied.

unable to add a new control plane instance a cluster that doesn't have a stable controlPlaneEndpoint address

Please ensure that:
* The cluster has a stable controlPlaneEndpoint address.
* The certificates that must be shared among control plane instances are provided.


To see the stack trace of this error execute with --v=5 or higher


解决方法:
#master02节点上清除配置
kubeadm reset

#master01节点执行以下操作
#查看kubeadm-config.yaml  --  集群的配置
kubectl -n kube-system get cm kubeadm-config -oyaml
#发现没有controlPlaneEndpoint
#添加controlPlaneEndpoint
kubectl -n kube-system edit cm kubeadm-config
#大概在这么个位置:
kind: ClusterConfiguration
kubernetesVersion: v1.17.0
controlPlaneEndpoint: 192.168.31.171:6443
#然后再在准备添加为master的节点上执行kubeadm join的命令

五、 安装dashboard

kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
[root@k8s-master01 ~]# kubectl get pods --namespace kubernetes-dashboard -owide
NAME                                        READY   STATUS    RESTARTS   AGE   IP           NODE         NOMINATED NODE   READINESS GATES
dashboard-metrics-scraper-894c58c65-bfhlg   1/1     Running   0          45s   10.244.1.3   k8s-node01   <none>           <none>
kubernetes-dashboard-555f8cc76f-9sk88       1/1     Running   0          45s   10.244.2.3   k8s-node02   <none>           <none>


[root@k8s-master01 ~]# kubectl edit svc kubernetes-dashboard  -n kubernetes-dashboard
# Please edit the object below. Lines beginning with a '#' will be ignored,
# and an empty file will abort the edit. If an error occurs while saving this file will be
# reopened with the relevant failures.
#
apiVersion: v1
kind: Service
metadata:
  creationTimestamp: "2023-04-01T08:45:25Z"
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
  resourceVersion: "11520"
  selfLink: /api/v1/namespaces/kubernetes-dashboard/services/kubernetes-dashboard
  uid: a8763b26-7d2a-459c-b2f7-0e0aa79ac4a0
spec:
  clusterIP: 10.1.91.245
  ports:
  - port: 443
    nodePort: 30001 # 新增
    protocol: TCP
    targetPort: 8443
  selector:
    k8s-app: kubernetes-dashboard
  sessionAffinity: None
  type: NodePort  # 修改
status:
  loadBalancer: {}
  
  
  
  查看登录的token
[root@k8s-master01 ~]# kubectl  get sa,secrets -n kubernetes-dashboard
NAME                                  SECRETS   AGE
serviceaccount/default                1         21m
serviceaccount/kubernetes-dashboard   1         21m

NAME                                      TYPE                                  DATA   AGE
secret/default-token-bclgm                kubernetes.io/service-account-token   3      21m
secret/kubernetes-dashboard-certs         Opaque                                0      21m
secret/kubernetes-dashboard-csrf          Opaque                                1      21m
secret/kubernetes-dashboard-key-holder    Opaque                                2      21m
secret/kubernetes-dashboard-token-646w5   kubernetes.io/service-account-token   3      21m
[root@k8s-master01 ~]# kubectl describe secrets kubernetes-dashboard-token-646w5 -n kubernetes-dashboard
Name:         kubernetes-dashboard-token-646w5
Namespace:    kubernetes-dashboard
Labels:       <none>
Annotations:  kubernetes.io/service-account.name: kubernetes-dashboard
              kubernetes.io/service-account.uid: 08c1a03e-a3e8-4ff9-b004-3918ea67f6a9

Type:  kubernetes.io/service-account-token

Data
====
ca.crt:     1025 bytes
namespace:  20 bytes
token:      eyJhbGciOiJSUzI1NiIsImtpZCI6IjFYMUtiRVltaV9Jc1puaXpNQjlIeEljNXp2ZVpwWFVmUEM4M2VGUF9vQ2MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VjcmV0Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZC10b2tlbi02NDZ3NSIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJrdWJlcm5ldGVzLWRhc2hib2FyZCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjA4YzFhMDNlLWEzZTgtNGZmOS1iMDA0LTM5MThlYTY3ZjZhOSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDprdWJlcm5ldGVzLWRhc2hib2FyZDprdWJlcm5ldGVzLWRhc2hib2FyZCJ9.igfDrJnHaxYzJ0bd-8TsJBOfsM7hvttACAD-T8L-djoDo8L4N0RsadFxaOy_fOftFDcueBM-zb-x-P9hu99jkIPCvOmj6eBdtpXt9__adfKoQTHnRKDksgSfXgr_owwAkdVRALrsM4bqWEP2YOnhMF1xJt9zHgK40SBFeEjHSHqO4bf4HoSTFR-0AFbGAAJ_MPOZB1nxNNLWejhNy-XaBeOVlgsepA11YwfTBeC6k5BXPGk8Qyn9Ios6b01vljPG0GlKegaZUV4hKjrQg_3ArbpK5nOzsaiKPTVNNgtkHWuJl2sUI7phlMZANgv0kZZqYeO_yB8Tte7jlH788bAAdQ

网页输入https://192.168.31.171:30001

六、 安装kube-prometheus监控

[root@k8s-master01 opt]# wget https://github.com/prometheus-operator/kube-prometheus/archive/refs/tags/v0.4.0.tar.gz
[root@k8s-master01 opt]# cd kube-prometheus-0.4.0/manifests

# 创建文件夹
mkdir -p node-exporter alertmanager grafana kube-state-metrics prometheus serviceMonitor adapter

# 移动 yaml 文件,进行分类到各个文件夹下
mv *-serviceMonitor* serviceMonitor/
mv grafana-* grafana/
mv kube-state-metrics-* kube-state-metrics/
mv alertmanager-* alertmanager/
mv node-exporter-* node-exporter/
mv prometheus-adapter* adapter/
mv prometheus-* prometheus/
 # 修改 prometheus-service.yaml 文件
 [root@k8s-master01 manifests]# vim prometheus/prometheus-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    prometheus: k8s
  name: prometheus-k8s
  namespace: monitoring
spec:
  type: NodePort  # 新增
  ports:
  - name: web
    nodePort: 32101
    port: 9090
    targetPort: web
  selector:
    app: prometheus
    prometheus: k8s
  sessionAffinity: ClientIP

修改 grafana-service.yaml 文件:
[root@k8s-master01 manifests]# vim grafana/grafana-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    app: grafana
  name: grafana
  namespace: monitoring
spec:
  type: NodePort  # 新增
  ports:
  - name: http
    port: 3000
    nodePort: 32102  # 新增
    targetPort: http
  selector:
    app: grafana

# 安装Operator
[root@k8s-master01 opt]# cd kube-prometheus-0.4.0/manifests
[root@k8s-master01 manifests]# kubectl apply -f setup/
[root@k8s-master01 manifests]# kubectl get pods -n monitoring
NAME                                  READY   STATUS    RESTARTS   AGE
prometheus-operator-99dccdc56-n2g7l   1/1     Running   0          7m1s

安装其他组件
kubectl apply -f adapter/
kubectl apply -f alertmanager/
kubectl apply -f node-exporter/
kubectl apply -f kube-state-metrics/
kubectl apply -f grafana/
kubectl apply -f prometheus/
kubectl apply -f serviceMonitor/


#验证
http://192.168.31.173:32102

你可能感兴趣的:(linux,K8S,运维,linux,docker,kubernetes)