注意:一般3个节点就行,搭建好后,根据需要节点可以再扩充
# 1 修改为静态ip地址
ssh [email protected]
sudo su
cp /etc/netplan/00-installer-config.yaml /etc/netplan/00-installer-config.yaml.bbk
vim /etc/netplan/00-installer-config.yaml # ubuntu22.04的设置方法
network:
ethernets:
ens33:
dhcp4: false
addresses: [192.168.48.210/24]
optional: true
routes:
- to: default
via: 192.168.48.2
nameservers:
addresses: [192.168.48.2]
version: 2
netplan apply
# 2 修改为root登录
sudo su
passwd root
vim /etc/ssh/sshd_config
...
PermitRootLogin yes
sudo service ssh restart
# 3 修改主机名和hosts
hostnamectl set-hostname xxx
vim /etc/hosts
...
192.168.48.210 kmaster
192.168.48.211 knode1
192.168.48.212 knode2
192.168.48.213 knode3 ##新增的然后同步到其他主机
192.168.48.214 knode4 ##新增的然后同步到其他主机
# 设置转发 IPv4 并让 iptables 看到桥接流量
cat <
有2种设置方式:cgroupfs和systemd,k8s官网建议使用systemd
要将 systemd 设置为 cgroup 驱动,需编辑 KubeletConfiguration 的 cgroupDriver 选项,并将其设置为 systemd。例如:
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
...
cgroupDriver: systemd
说明:从 v1.22 开始,在使用 kubeadm 创建集群时,如果用户没有在 KubeletConfiguration 下设置 cgroupDriver 字段,kubeadm 默认使用 systemd。
目的:安装 CNI plugins
参考下载地址: https://github.com/containernetworking/plugins/releases
#下载出现问题就请到参考地址重新找资源下载
wget -c https://github.com/containernetworking/plugins/releases/download/v1.3.0/cni-plugins-linux-amd64-v1.3.0.tgz
#根据官网的安装步骤来,创建一个目录用于存放cni插件
mkdir -p /opt/cni/bin
tar -xzvf cni-plugins-linux-amd64-v1.3.0.tgz -C /opt/cni/bin/
# 卸载docker相关的配置
for pkg in docker.io docker-doc docker-compose docker-compose-v2 podman-docker containerd runc; do sudo apt-get remove $pkg; done
sudo apt-get purge docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin docker-ce-rootless-extras
sudo rm -rf /var/lib/docker
sudo rm -rf /var/lib/containerd
# Add Docker's official GPG key:
# https://docs.docker.com/engine/install/ubuntu/
sudo apt-get update
sudo apt-get install ca-certificates curl gnupg
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
sudo chmod a+r /etc/apt/keyrings/docker.gpg
# Add the repository to Apt sources:
echo \
"deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
"$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
apt-cache madison containerd.io
apt install containerd.io=1.6.24-1
containerd --version
containerd containerd.io 1.6.24 61f9fd88f79f081d64d6fa3bb1a0dc71ec870523
systemctl daemon-reload
systemctl enable --now containerd
containerd config default >/etc/containerd/config.toml
vim /etc/containerd/config.toml
...
sandbox_image = "registry.k8s.io/pause:3.6" # 值修改为registry.aliyuncs.com/google_containers/pause:3.9
...
SystemdCgroup = true #设置cgroup为systemd,对应前面第2步没执行的,这里设置cgroups驱动
systemctl daemon-reload
systemctl restart containerd.service
#安装runc
#runc是容器运行时,runc实现了容器的init,run,create,ps...我们在运行容器所需要的cmd:
#https://github.com/opencontainers/runc/releases/
curl -LO https://github.com/opencontainers/runc/releases/download/v1.1.10/runc.amd64
install -m 755 runc.amd64 /usr/local/sbin/runc
# 确保没个节点上的mac地址和product_uuid的唯一性
cat /sys/class/dmi/id/product_uuid
ip link | grep brd
# 关闭swap
swapoff -a
vim /etc/fstab
...
#/swap.img
free -h
# 更正时区,时间同步
#查看时区,时间
tzselect
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
date
#安装chrony,联网同步时间(非必须,一般没问题,我这里没执行)
apt install chrony -y && systemctl enable --now chrony
# lvm磁盘扩容(系统安装时没有启用lvm可以忽略,目的是将存储资源最大化利用)
lvextend -r -l +100%FREE /dev/mapper/ubuntu--vg-ubuntu--lv
# 关闭ufw和firewalled
systemctl stop ufw
systemctl disable ufw
# 禁用selinux(ubuntu没有启用,centos才默认启用,需要注意一下)
#默认ubunt默认是不安装selinux的,如果没有selinux命令和配置文件则说明没有安装selinux,则下面步骤就不用做了
sed -ri 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
#安装k8s的3个工具
apt-get update
apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat </etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt install apt-file -y
apt-file update
# 查看所有包,会列出可以安装的所有版本
apt list kubeadm -a
apt install kubeadm=1.28.0-00 kubectl=1.28.0-00 kubelet=1.28.0-00 -y
systemctl enable kubelet
# 导出配置
kubeadm config print init-defaults > Kubernetes-cluster.yaml
#文档可以直接复制这个
vim Kubernetes-cluster.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: abcdef.0123456789abcdef
ttl: 24h0m0s
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 192.168.48.210 ##改为自己的
bindPort: 6443
nodeRegistration:
criSocket: unix:///run/containerd/containerd.sock #改成这样
imagePullPolicy: IfNotPresent
name: kmaster ##改为自己的主机名
taints: null
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #改成阿里的仓库,加快镜像下载速度
kind: ClusterConfiguration
kubernetesVersion: 1.28.0
networking:
dnsDomain: cluster.local
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
scheduler: {}
注意设置pause的版本,根据提示来,修改为提示建议的版本即可,读取的/etc/containerd/config.toml的设置。
#执行命令
kubeadm init --config Kubernetes-cluster.yaml
#成功后请将以下输出保存
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.48.210:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:121e29b7f40844aca55b1b57d3115910d62c48906fa9e56c98e9f204027f60b5
#run the following as a regular user
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
#这里集群状态还是noready,需要设置网络,这里用的flannel,请继续执行
#如果某个节点notready,请检查kubelet的状态,然后启动它以及是否安装网络插件比如flannel
#设置网络flannel
#下载flannel配置文件
wget https://github.com/flannel-io/flannel/releases/latest/download/kube-flannel.yml
cat kube-flannel.yml ##网络地址和配置kubeadm初始化的pod一致
apiVersion: v1
kind: Namespace
metadata:
labels:
k8s-app: flannel
pod-security.kubernetes.io/enforce: privileged
name: kube-flannel
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: flannel
name: flannel
namespace: kube-flannel
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
k8s-app: flannel
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- networking.k8s.io
resources:
- clustercidrs
verbs:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
k8s-app: flannel
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-flannel
---
apiVersion: v1
data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
kind: ConfigMap
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-cfg
namespace: kube-flannel
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
name: kube-flannel-ds
namespace: kube-flannel
spec:
selector:
matchLabels:
app: flannel
k8s-app: flannel
template:
metadata:
labels:
app: flannel
k8s-app: flannel
tier: node
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/os
operator: In
values:
- linux
containers:
- args:
- --ip-masq
- --kube-subnet-mgr
command:
- /opt/bin/flanneld
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: EVENT_QUEUE_DEPTH
value: "5000"
image: docker.io/flannel/flannel:v0.23.0
name: kube-flannel
resources:
requests:
cpu: 100m
memory: 50Mi
securityContext:
capabilities:
add:
- NET_ADMIN
- NET_RAW
privileged: false
volumeMounts:
- mountPath: /run/flannel
name: run
- mountPath: /etc/kube-flannel/
name: flannel-cfg
- mountPath: /run/xtables.lock
name: xtables-lock
hostNetwork: true
initContainers:
- args:
- -f
- /flannel
- /opt/cni/bin/flannel
command:
- cp
image: docker.io/flannel/flannel-cni-plugin:v1.2.0
name: install-cni-plugin
volumeMounts:
- mountPath: /opt/cni/bin
name: cni-plugin
- args:
- -f
- /etc/kube-flannel/cni-conf.json
- /etc/cni/net.d/10-flannel.conflist
command:
- cp
image: docker.io/flannel/flannel:v0.23.0
name: install-cni
volumeMounts:
- mountPath: /etc/cni/net.d
name: cni
- mountPath: /etc/kube-flannel/
name: flannel-cfg
priorityClassName: system-node-critical
serviceAccountName: flannel
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- hostPath:
path: /run/flannel
name: run
- hostPath:
path: /opt/cni/bin
name: cni-plugin
- hostPath:
path: /etc/cni/net.d
name: cni
- configMap:
name: kube-flannel-cfg
name: flannel-cfg
- hostPath:
path: /run/xtables.lock
type: FileOrCreate
name: xtables-lock
#应用flannel网络配置
kubectl apply -f kube-flannel.yml
#打开其他节点虚拟机
kubeadm join 192.168.48.210:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:121e29b7f40844aca55b1b57d3115910d62c48906fa9e56c98e9f204027f60b5
#参考文档
https://blog.csdn.net/qq_45654671/article/details/128238921
#在master上重新生成token(对于过了1小时再加入集群的新节点都要执行以下步骤生成新的token)
kubeadm token generate #生成toke
#会打印出新生成的token
cn4y74.ngzahx3ul01og1et
kubeadm token create cn4y74.ngzahx3ul01og1et --print-join-command --ttl=0
#利用上面的token输出添加命令
#会打印出新的join命令
kubeadm join 192.168.48.210:6443 --token cn4y74.ngzahx3ul01og1et --discovery-token-ca-cert-hash sha256:121e29b7f40844aca55b1b57d3115910d62c48906fa9e56c98e9f204027f60b5
root@kmaster:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
kmaster Ready control-plane 5d17h v1.28.0
knode1 Ready 5d17h v1.28.0
knode2 Ready 5d17h v1.28.0
knode3 Ready 2m40s v1.28.0
knode4 Ready 2m30s v1.28.0
一般主节点设置即可。
# 设置补充命令
apt install bash-completion -y
echo "source <(kubectl completion bash)" >> ~/.bashrc
source .bashrc
注意:这里大家可以参考github的发布版本,本次实验用的是dashboard2.7.0
GitHub - kubernetes/dashboard: General-purpose web UI for Kubernetes clusters
mkdir -p /root/app/dashboard
cd /root/app/dashboard
#github可以正常访问的情况可以直接执行下面步骤(如果网略异常请跳过此步骤)
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml
#github访问异常可以直接本地创建recommended.yaml文件,文件内容如下
vim recommended.yaml
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.7.0
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.8
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
#应用recommended.yaml文件
kubectl apply -f recommended.yaml
#应用结果输出如下
root@kmaster:~/app/dashboard# kubectl apply -f recommended.yaml
namespace/kubernetes-dashboard created
serviceaccount/kubernetes-dashboard created
service/kubernetes-dashboard created
secret/kubernetes-dashboard-certs created
secret/kubernetes-dashboard-csrf created
secret/kubernetes-dashboard-key-holder created
configmap/kubernetes-dashboard-settings created
role.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrole.rbac.authorization.k8s.io/kubernetes-dashboard created
rolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
clusterrolebinding.rbac.authorization.k8s.io/kubernetes-dashboard created
deployment.apps/kubernetes-dashboard created
service/dashboard-metrics-scraper created
deployment.apps/dashboard-metrics-scraper created
#查看安装情况
root@kmaster:~/app/dashboard# kubectl get pods --all-namespaces -owide | grep dashboard
kubernetes-dashboard dashboard-metrics-scraper-5657497c4c-cqp7b 1/1 Running 0 6m45s 10.244.3.2 knode3
kubernetes-dashboard kubernetes-dashboard-78f87ddfc-l8k9g 1/1 Running 0 6m45s 10.244.1.6 knode1
#查看svc情况
root@kmaster:~/app/dashboard# kubectl get svc --all-namespaces -owide
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.96.0.1 443/TCP 8d
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 8d k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.103.119.96 8000/TCP 8m33s k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard ClusterIP 10.96.109.150 443/TCP 8m33s k8s-app=kubernetes-dashboard
注意:这里需要删除默认的service,将clusterip设置nodeport
#删除默认创建的service
kubectl delete service kubernetes-dashboard --namespace=kubernetes-dashboard
#创建自定义的service文件
vim dashboard-svc.yaml
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
selector:
k8s-app: kubernetes-dashboard
#具体过程如下
root@kmaster:~/app/dashboard# kubectl get svc --all-namespaces -owide
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.96.0.1 443/TCP 8d
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 8d k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.103.119.96 8000/TCP 8m33s k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard ClusterIP 10.96.109.150 443/TCP 8m33s k8s-app=kubernetes-dashboard
root@kmaster:~/app/dashboard# kubectl delete service kubernetes-dashboard --namespace=kubernetes-dashboard
service "kubernetes-dashboard" deleted
root@kmaster:~/app/dashboard# vim dashboard_svc.yaml
root@kmaster:~/app/dashboard# kubectl get svc --all-namespaces -owide
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.96.0.1 443/TCP 8d
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 8d k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.103.119.96 8000/TCP 28m k8s-app=dashboard-metrics-scraper
root@kmaster:~/app/dashboard# kubectl apply -f dashboard_svc.yaml
service/kubernetes-dashboard created
root@kmaster:~/app/dashboard# kubectl get svc --all-namespaces -owide
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.96.0.1 443/TCP 8d
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 8d k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.103.119.96 8000/TCP 29m k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard NodePort 10.103.5.118 443:31184/TCP 25s k8s-app=kubernetes-dashboard
#设置管理员用户文件
vim admin-user.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kubernetes-dashboard
#应用此文件
kubectl apply -f admin-user.yaml
#设置管理员角色连接
vim admin-user-role-binding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kubernetes-dashboard
#应用此文件
kubectl apply -f admin-user-role-binding.yaml
#查看节点以及端口(NodePort)
kubectl get svc --all-namespaces -owide
NAMESPACE NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
default kubernetes ClusterIP 10.96.0.1 443/TCP 21d
kube-system kube-dns ClusterIP 10.96.0.10 53/UDP,53/TCP,9153/TCP 21d k8s-app=kube-dns
kubernetes-dashboard dashboard-metrics-scraper ClusterIP 10.111.53.171 8000/TCP 22m k8s-app=dashboard-metrics-scraper
kubernetes-dashboard kubernetes-dashboard NodePort 10.106.152.11 443:32078/TCP 10m k8s-app=kubernetes-dashboard
##关键步骤-创建登录token的步骤,token就是输出内容
kubectl -n kubernetes-dashboard create token admin-user
##访问 https://节点ip:port
kubectl -n kubernetes-dashboard delete serviceaccount admin-user
kubectl -n kubernetes-dashboard delete clusterrolebinding admin-user