目录
1准备3台虚拟机(centos7.9)
2 每台虚拟机更新yum的软件包,时间设置等
3 关闭防火墙
4 添加三台服务器的域名设置
5 设置三台服务器之间免密
6 安装kubesphere必要依赖,每个节点都要装,不然报错:socat not found in system path
7 安装nfs-server
配置nfs-client(选做)
配置默认存储 sc.yaml
8 只用在主节点k8s-node1文件夹中下载k8s安装脚本
9 集群配置,创建配置文件,config-sample.yaml
10 编辑config-sample.yaml
11 启动脚本和配置文件
12 耐心等待安装完成,会把所有工作节点添加到k8s-node1(时间大概5-10分钟)
13 查看日志
14 查看节点状态
15 删除集群,重新安装
cd /etc/sysconfig/network-scripts
vim ifcfg-ens33
BOOTPROTO=static
ONBOOT=yes
IPADDR=192.168.1.211
GATEWAY=192.168.1.1
NETMASK=255.255.255.0
DNS1=114.114.114.114
yum -y update
yum install -y conntrack
yum makecache fast
yum install -y ntpdate
ntpdate time.windows.com
ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
date
systemctl stop firewalld
systemctl disable firewalld
vim /etc/hosts
192.168.1.211 node1
192.168.1.212 node2
192.168.1.213 node3
1、先在所有服务器上执行命令:
ssh-keygen -t dsa -P '' -f ~/.ssh/id_dsa
2、而后在所有服务器上执行命令:这样自身就能免密登陆
cat ~/.ssh/id_dsa.pub >>~/.ssh/authorized_keys
3、之后将每台服务器上的id_dsa.pub公钥发送到其他机器的/tmp文件夹下,如在master上执行
scp ~/.ssh/id_dsa.pub node2:/tmp/
scp ~/.ssh/id_dsa.pub node3:/tmp/
4、之后在其他的机器上将公钥追加到各自的authorized_keys里,执行以下命令:
cat /tmp/id_dsa.pub >>~/.ssh/authorized_keys
5、同样的,在其他的机器上将公钥发送到其他服务器上,然后在其他服务器上将公钥追加到各自的authorized_keys即可
6、最后是测试免密钥连接。
ssh node1
yum install -y socat conntrack ebtables ipset
#在每个机器。
yum install-y nfs-utils
#在master执行以下命令
echo "/nfs/data/ *(insecure,rw,sync,no_root_squash)" > /etc/exports
#执行以下命令, 启动nfs服务;创建共享目录
mkdir -p /nfs/data
#在master执行
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
#使配置生效
exportfs-r
#检查配置是否生效
exportfs
#改成自己的master的ip,只在从节点执行
showmount -e 192.168.1.211
mkdir -p /nfs/data
mount -t nfs 192.168.1.211:/nfs/data /nfs/data
## 创建了一个存储类
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nfs-storage
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: k8s-sigs.io/nfs-subdir-external-provisioner
parameters:
archiveOnDelete: "true" ## 删除pv的时候,pv的内容是否要备份
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-hangzhou.aliyuncs.com/lfy_k8s_images/nfs-subdir-external-provisioner:v4.0.2
# resources:
# limits:
# cpu: 10m
# requests:
# cpu: 10m
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: k8s-sigs.io/nfs-subdir-external-provisioner
- name: NFS_SERVER
value: 192.168.1.211 ## 指定自己nfs服务器地址
- name: NFS_PATH
value: /nfs/data ## nfs服务器共享的目录
volumes:
- name: nfs-client-root
nfs:
server: 192.168.1.211
path: /nfs/data
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
#应用
kubectl apply -f sc.yaml
#确认配置是否生效
kubectl get sc
export KKZONE=cn
curl -sfL https://get-kk.kubesphere.io | VERSION=v2.2.1 sh -
chmod +x kk
./kk create config
./kk create cluster --with-kubernetes v1.23.7 --with-kubesphere v3.3.0 -f config-sample.yaml -y
kubectl logs -n kubesphere-system $(kubectl get pod -n kubesphere-system -l app=ks-installer -o jsonpath='{.items[0].metadata.name}') -f
kubectl get nodes
./kk delete cluster
高级模式删除
./kk delete cluster [-f config-sample.yaml]