前往github上下载最新的V3版本的helm
https://github.com/helm/helm/releases
解压(tar -zxvf helm-v3.0.0-linux-386.tar.gz)
在解压目录中找到helm程序,移动到需要的目录中(mv linux-386/helm /usr/local/bin/helm)
#添加官方的仓库
helm repo add bitnami https://charts.bitnami.com/bitnami
#添加微软的仓库
helm repo add stable http://mirror.azure.cn/kubernetes/charts
#添加阿里的仓库
helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
#更新
helm repo update
#查看配置的存储库
helm repo list
helm search repo stable
#删除存储库
helm repo remove aliyun
#有时候会出现添加源显示证书无效的情况,加上以下参数就可以了
helm repo add --insecure-skip-tls-verify jetstack https://charts.jetstack.io/
yum install -y nfs-utils
vi /etc/exports
/home/nfs *(rw,no_root_squash,insecure,async)
systemctl enable nfs-server
systemctl start nfs-server
systemctl status nfs-server
修改完/etc/exports文件并保存后,可使用exportfs命令更新配置
更新exports配置:
sudo exportfs -arv
mount -t nfs zxhy-nacos:/home/kubernetes -o nolock
# rbac.yaml:#唯一需要修改的地方只有namespace,根据实际情况定义
apiVersion: v1
kind: ServiceAccount # 创建一个账户,主要用来管理NFS provisioner在k8s集群中运行的权限
metadata:
name: nfs-client-provisioner
namespace: default
---
kind: ClusterRole # 创建集群角色
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner # 角色名
rules: # 角色权限
- apiGroups: [""]
resources: ["persistentvolumes"] # 操作的资源
verbs: ["get", "list", "watch", "create", "delete"] # 对该资源的操作权限
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding # 集群角色绑定
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects: # 角色绑定对象
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole # 哪个角色
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role # 创建角色
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner # 角色名
namespace: default # Role需要指定名称空间,ClusterRole 不需要
rules: # 角色权限
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding # 角色绑定
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
subjects: # 角色绑定对象
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef: # 绑定哪个角色
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
apiVersion: storage.k8s.io/v1
kind: StorageClass # 创建StorageClass
metadata:
name: managed-nfs-storage
provisioner: zxhy-nfs-storage #这里的名称要和provisioner配置文件中的环境变量PROVISIONER_NAME保持一致
parameters:
archiveOnDelete: "false"
kubectl patch storageclass managed-nfs-storage -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
apiVersion: apps/v1
kind: Deployment # 部署nfs-client-provisioner
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default #与RBAC文件中的namespace保持一致
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner # 指定serviceAccount!
containers:
- name: nfs-client-provisioner
image: hub.kaikeba.com/java12/nfs-client-provisioner:v1 #镜像地址
volumeMounts: # 挂载数据卷到容器指定目录
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME # 配置provisioner的Name
value: zxhy-nfs-storage # 确保该名称与 StorageClass 资源中的provisioner名称保持一致
- name: NFS_SERVER #绑定的nfs服务器
value: zxhy-nacos
- name: NFS_PATH #绑定的nfs服务器目录
value: /home/nfs
volumes: # 申明nfs数据卷
- name: nfs-client-root
nfs:
server: zxhy-nacos
path: /home/nfs
#查询
helm search repo redis
helm install zxhy-redis bitnami/redis-cluster --version 9.0.13 --set global.storageClass=managed-nfs-storage --set usePassword=false --set cluster.nodes=6
#测试ipvs
yum install ipvsadm ipset -y
#可以查看kubeproxy的相关网络路由信息
ipvsadm -Ln
用一个nodeport 暴露出服务进行测试访问
apiVersion: v1
kind: Service
metadata:
name: redis-service
spec:
type: NodePort # 配置为NodePort,外部可以访问
ports:
- port: 6379 #容器间,服务调用的端口
targetPort: 6379 #容器暴露的端口,与Dockerfile暴露端口保持一致
selector:
app.kubernetes.io/instance: zxhy-redis
app.kubernetes.io/name: redis
git clone https://github.com/nacos-group/nacos-k8s.git
data:
mysql.host: "zxhy-mysql"
mysql.db.name: "ldt_config"
mysql.port: "3306"
mysql.user: "root"
mysql.password: "111111"
kubectl apply -f nacos-k8s/deploy/nacos/nacos-pvc-nfs.yaml
apiVersion: v1
kind: Service
metadata:
name: nacos-service
spec:
type: NodePort # 配置为NodePort,外部可以访问
ports:
- port: 8848 #容器间,服务调用的端口
targetPort: 8848 #容器暴露的端口,与Dockerfile暴露端口保持一致
selector:
app: nacos
helm upgrade --install rocketmq \
--set broker.size.master="3" \
--set broker.size.replica="1" \
--set broker.master.jvmMemory="-Xms2g -Xmx2g -Xmn1g" \
--set broker.master.resources.requests.memory="4Gi" \
--set nameserver.replicaCount="3" \
--set dashboard.enabled="true" \
--set dashboard.ingress.enabled="true" \
--set dashboard.ingress.hosts[0].host="rocketmq-ha.example.com" \
rocketmq-repo/rocketmq
apiVersion: v1
kind: Service
metadata:
name: rocketmq-dashboard-service
spec:
type: NodePort # 配置为NodePort,外部可以访问
ports:
- port: 8080 #容器间,服务调用的端口
targetPort: 8080 #容器暴露的端口,与Dockerfile暴露端口保持一致
selector:
app.kubernetes.io/instance: rocketmq
app.kubernetes.io/name: rocketmq
component: dashboard