Kubernetes安装

安装 k8s 单主集群

一、环境准备

1. 准备虚拟机

Kubenetes支持在物理服务器或虚拟机中运行。以下部署需要准备三台安装了centos7系统的物理或者虚拟机。

# 在master节点执行
hostnamectl set-hostname master
# 在work1节点执行
hostnamectl set-hostname worker1
# 在work2节点执行
hostnamectl set-hostname worker2

配置主机和IP映射, 编辑 /etc/hosts 文件

cat <<EOF >>/etc/hosts
192.168.114.104 master
192.168.114.105 worker1
192.168.114.106 worker2
EOF

以下操作需要在所有主机上操作

配置国内yum源(如果你的服务器是国外这步骤可以省略)

curl -o /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
rm -rf /var/cache/yum && yum makecache && yum -y update && yum -y autoremove
# 安装依赖
yum install -y epel-release conntrack ipvsadm ipset jq sysstat curl iptables libseccomp
# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld
# 设置iptables
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat && iptables -P FORWARD ACCEPT
# 禁用 SELinux
sed -i "s/^SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
# 加载内核模块
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
modprobe -- br_netfilter
EOF

# 修改访问权限
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules
# 配置内核参数,将桥接的IPv4流量传递到iptables的链
cat << EOF | tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
fs.inotify.max_user_watches=89100
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF

sysctl -p /etc/sysctl.d/k8s.conf

2. 安装 Docker


# step 1: 安装必要的一些系统工具
sudo yum install -y yum-utils device-mapper-persistent-data lvm2

# Step 2: 添加软件源信息
sudo yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# Step 3: 更新并安装Docker-CE
sudo yum makecache fast
sudo yum -y install docker-ce

# Step 4: 开启Docker服务
sudo systemctl start docker
sudo systemctl enable docker
## 安装完成后配置启动时的命令,否则 docker 会将 iptables FORWARD chain 的默认策略设置为DROP

sed -i "13i ExecStartPost=/usr/sbin/iptables -P FORWARD ACCEPT" /usr/lib/systemd/system/docker.service
## 一般在国内无法访问k8s.gcr.io,所以需要配置国内镜像托管站点
## Kubernetes 默认设置cgroup驱动位为 "systemd" ,而 Docker 服务的cgroup驱动默认为 "cgroupfs", 建议将其修改为 “systemd", 与 Kubernetes 保持一致 ##

tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": ["https://bk6kzfqm.mirror.aliyuncs.com"],
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF

重启Docker 服务

systemctl daemon-reload
systemctl restart docker

二、安装 Kubernetes

1. 配置Kubernetes源

以下命令在所有node上执行

# 配置yum源(国内),根据需要选择合适的源

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF
# 配置yum源(国外)根据需要选择合适的源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes Repository
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kubelet kubeadm kubectl
EOF

2. 安装

以下命令在master节点执行

## 安装指定版本
yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0 --disableexcludes=kubernetes

# 安装kubeadm, kubelet, kubectl
yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes
# 启动 kubelet 服务
systemctl start kubelet && systemctl enable kubelet
## 查看已经安装的版本
kubelet --version

3. 配置 Master

在 Master 节点运行下面的命令,以下命令在master 节点运行

kubeadm init \
--kubernetes-version=1.23.1 \
--apiserver-advertise-address=192.168.114.104 \
--image-repository registry.aliyuncs.com/google_containers \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

命令解析:

  • –pod-network-cidr: 定义pod网段为:10.244.0.0/16
  • –apiserver-advertise-address:master主机内网IP地址
  • –image-repository:指定阿里云镜像仓库地址。由于kubeadm 默认从官网http://k8s.grc.io下载所需镜像,国内无法访问,因此需要通过–image-repository指定阿里云镜像仓库地址。(国外主机可以不用设定)

集群初始化会出现如下结果

Your Kubernetes control-plane has initialized successfully!

4. 配置Kubectl工具

mkdir -p $HOME/.kube
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

配置完成之后,可以执行以下命令查看节点状态

[root@master ~]# kubectl get node 
NAME     STATUS     ROLES    AGE   VERSION
master   NotReady   master   42m   v1.18.0

5. 部署 Flannel网络

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

如果这个文件无法访问,可以直接下载下来,然后,在本地运行

kubectl apply -f kube-flannel.yml

查看所有pod的运行情况,flannel 网络已经在运行了

[root@master home]# kubectl get pods --all-namespaces
NAMESPACE     NAME                             READY   STATUS    RESTARTS   AGE
kube-system   coredns-7ff77c879f-g2qx9         1/1     Running   0          18h
kube-system   coredns-7ff77c879f-p559z         1/1     Running   0          18h
kube-system   etcd-master                      1/1     Running   1          18h
kube-system   kube-apiserver-master            1/1     Running   1          18h
kube-system   kube-controller-manager-master   1/1     Running   1          18h
kube-system   kube-flannel-ds-amd64-5kp8c      1/1     Running   1          18h
kube-system   kube-flannel-ds-amd64-gdprq      1/1     Running   0          143m
kube-system   kube-flannel-ds-amd64-xxfft      1/1     Running   0          14m
kube-system   kube-proxy-6f977                 1/1     Running   0          14m
kube-system   kube-proxy-dvmvd                 1/1     Running   1          18h
kube-system   kube-proxy-md8cf                 1/1     Running   0          143m
kube-system   kube-scheduler-master            1/1     Running   1          18h

6. 工作node部署

以下命令在worker node上运行

## 安装指定版本
yum install -y kubelet-1.18.0 kubeadm-1.18.0 --disableexcludes=kubernetes

## 启动 kubelet 服务
systemctl start kubelet && systemctl enable kubelet

7. 将worker node 加入集群

## 只在 master 节点执行, 获取加入集群的命令
kubeadm token create --print-join-command
## 将得到的命令在所有的work node 上执行
kubeadm join 192.168.114.104:6443 --token u3hx52.8rzncjujua07kijo     --discovery-token-ca-cert-hash sha256:bbb4c4cc5dc2422959a1055800f18ccab83f6141cf4b5fd8e016ec08b9068290

8. 查看集群状态

## 查看节点状态
kubectl get node

## 查看所有pods
kubectl get pods --all-namespaces

三、安装Dashboard

以下命令在Master节点执行

1. 创建证书

mkdir dashboard-certs
cd dashboard-certs/
#创建命名空间
kubectl create namespace kubernetes-dashboard
# 创建私钥key文件
openssl genrsa -out dashboard.key 2048
#证书请求
openssl req -days 36000 -new -out dashboard.csr -key dashboard.key -subj '/CN=dashboard-cert'
#自签证书
openssl x509 -req -in dashboard.csr -signkey dashboard.key -out dashboard.crt
#创建kubernetes-dashboard-certs对象
kubectl create secret generic kubernetes-dashboard-certs --from-file=dashboard.key --from-file=dashboard.crt -n kubernetes-dashboard

2. 创建管理员账号

kubectl apply -f https://kuboard.cn/install-script/k8s-dashboard/auth.yaml

3. 安装dashboard

kubectl create -f  ~/recommended.yaml

下载 recommended.yaml,并修改以满足我们的需求: 将dashboard部署为NodePod

# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# apiVersion: v1
# kind: Namespace
# metadata:
#   name: kubernetes-dashboard

---

apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard

---

# kind: Service
# apiVersion: v1
# metadata:
#   labels:
#     k8s-app: kubernetes-dashboard
#   name: kubernetes-dashboard
#   namespace: kubernetes-dashboard
# spec:
#   ports:
#     - port: 443
#       targetPort: 8443
#   selector:
#     k8s-app: kubernetes-dashboard

---

# apiVersion: v1
# kind: Secret
# metadata:
#   labels:
#     k8s-app: kubernetes-dashboard
#   name: kubernetes-dashboard-certs
#   namespace: kubernetes-dashboard
# type: Opaque

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  type: NodePort
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 30008
  selector:
    k8s-app: kubernetes-dashboard

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-csrf
  namespace: kubernetes-dashboard
type: Opaque
data:
  csrf: ""

---

apiVersion: v1
kind: Secret
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-key-holder
  namespace: kubernetes-dashboard
type: Opaque

---

kind: ConfigMap
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard-settings
  namespace: kubernetes-dashboard

---

kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
rules:
  # Allow Dashboard to get, update and delete Dashboard exclusive secrets.
  - apiGroups: [""]
    resources: ["secrets"]
    resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
    verbs: ["get", "update", "delete"]
    # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
  - apiGroups: [""]
    resources: ["configmaps"]
    resourceNames: ["kubernetes-dashboard-settings"]
    verbs: ["get", "update"]
    # Allow Dashboard to get metrics.
  - apiGroups: [""]
    resources: ["services"]
    resourceNames: ["heapster", "dashboard-metrics-scraper"]
    verbs: ["proxy"]
  - apiGroups: [""]
    resources: ["services/proxy"]
    resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
    verbs: ["get"]

---

kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
rules:
  # Allow Metrics Scraper to get metrics from the Metrics server
  - apiGroups: ["metrics.k8s.io"]
    resources: ["pods", "nodes"]
    verbs: ["get", "list", "watch"]

---

apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: kubernetes-dashboard
subjects:
  - kind: ServiceAccount
    name: kubernetes-dashboard
    namespace: kubernetes-dashboard

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: kubernetes-dashboard
  template:
    metadata:
      labels:
        k8s-app: kubernetes-dashboard
    spec:
      containers:
        - name: kubernetes-dashboard
          image: kubernetesui/dashboard:v2.0.0-beta5
          imagePullPolicy: Always
          ports:
            - containerPort: 8443
              protocol: TCP
          args:
            - --auto-generate-certificates
            - --namespace=kubernetes-dashboard
            # Uncomment the following line to manually specify Kubernetes API server Host
            # If not specified, Dashboard will attempt to auto discover the API server and connect
            # to it. Uncomment only if the default does not work.
            # - --apiserver-host=http://my-address:port
          volumeMounts:
            - name: kubernetes-dashboard-certs
              mountPath: /certs
              # Create on-disk volume to store exec logs
            - mountPath: /tmp
              name: tmp-volume
          livenessProbe:
            httpGet:
              scheme: HTTPS
              path: /
              port: 8443
            initialDelaySeconds: 30
            timeoutSeconds: 30
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      volumes:
        - name: kubernetes-dashboard-certs
          secret:
            secretName: kubernetes-dashboard-certs
        - name: tmp-volume
          emptyDir: {}
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "beta.kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule

---

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 8000
      targetPort: 8000
  selector:
    k8s-app: dashboard-metrics-scraper

---

kind: Deployment
apiVersion: apps/v1
metadata:
  labels:
    k8s-app: dashboard-metrics-scraper
  name: dashboard-metrics-scraper
  namespace: kubernetes-dashboard
spec:
  replicas: 1
  revisionHistoryLimit: 10
  selector:
    matchLabels:
      k8s-app: dashboard-metrics-scraper
  template:
    metadata:
      labels:
        k8s-app: dashboard-metrics-scraper
      annotations:
        seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
    spec:
      containers:
        - name: dashboard-metrics-scraper
          image: kubernetesui/metrics-scraper:v1.0.1
          ports:
            - containerPort: 8000
              protocol: TCP
          livenessProbe:
            httpGet:
              scheme: HTTP
              path: /
              port: 8000
            initialDelaySeconds: 30
            timeoutSeconds: 30
          volumeMounts:
          - mountPath: /tmp
            name: tmp-volume
          securityContext:
            allowPrivilegeEscalation: false
            readOnlyRootFilesystem: true
            runAsUser: 1001
            runAsGroup: 2001
      serviceAccountName: kubernetes-dashboard
      nodeSelector:
        "beta.kubernetes.io/os": linux
      # Comment the following tolerations if Dashboard must not be deployed on master
      tolerations:
        - key: node-role.kubernetes.io/master
          effect: NoSchedule
      volumes:
        - name: tmp-volume
          emptyDir: {}

4. 检查结果

# 查看所有 pods
kubectl get pods -A  -o wide

## 查看 Pods service 
[root@master k8s]# kubectl get service -n kubernetes-dashboard  -o wide
NAME                        TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)         AGE   SELECTOR
dashboard-metrics-scraper   ClusterIP   10.1.87.1     <none>        8000/TCP        20m   k8s-app=dashboard-metrics-scraper
kubernetes-dashboard        NodePort    10.1.166.43   <none>        443:30008/TCP   20m   k8s-app=kubernetes-dashboard

## 查看 pod 详情
kubectl describe pod -n kubernetes-dashboard

5. 访问dashboard

获取Bear Token, 在master节点运行以下命令

kubectl -n kubernetes-dashboard describe secret $(kubectl -n kubernetes-dashboard get secret | grep admin-user | awk '{print $1}')

然后,使用 https://master-node-ip:30008

你可能感兴趣的:(Kubernetes,kubernetes,centos,linux)