1、创建NFS服务端
[root@work03 ~]# yum install nfs-utils rpcbind -y
[root@work03 ~]# systemctl start nfs
[root@work03 ~]# systemctl start rpcbind
[root@work03 ~]# systemctl enable nfs
[root@work03 ~]# systemctl enable rpcbind
[root@work03 ~]# mkdir -p /data/mongodb/
[root@work03 ~]# vim /etc/exports
/data/mongodb/ *(rw,sync,no_root_squash,no_all_squash)
[root@work03 ~]# systemctl restart rpcbind
[root@work03 ~]# systemctl restart nfs
[root@work03 ~]# showmount -e localhost
Export list for localhost:
/data/nfs *
2、创建动态卷提供者
(1)创建serviceaccount
vim serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-provisioner
kubectl apply -f serviceaccount.yaml
(2)创建service-rbac
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
# replace with namespace where provisioner is deployed
namespace: kube-system
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
kubectl apply -f service-rbac.yaml
(3)创建nfs-provisioner-deploy
vim nfs-provisioner-deploy.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: mongodb-nfs
namespace: kube-system
spec:
selector:
matchLabels:
app: mongodb-nfs
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: mongodb-nfs
spec:
serviceAccount: nfs-client-provisioner
imagePullSecrets:
- name: regcred
containers:
- name: mongodb-nfs
image: 192.168.0.107:80/heosun/nfs-client-provisioner:v1.0
volumeMounts:
- name: mongodb-nfs-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: asd
- name: NFS_SERVER
value: 192.168.0.108
- name: NFS_PATH
value: /data/mongodb
volumes:
- name: mongodb-nfs-root
nfs:
server: 192.168.0.108
path: /data/mongodb
(4)创建storageclass
vim storageclass.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: mongodb-nfs
provisioner: asd
kubectl apply -f storageclass.yaml
1、制作镜像
openssl rand -base64 741 > mongodb-keyfile
vim Dockerfile
FROM mongo:3.0.6
ADD mongodb-keyfile /data/config/mongodb-keyfile
RUN chown mongodb:mongodb /data/config/mongodb-keyfile && chmod 600 /data/config/mongodb-keyfile
创建
docker build .
Sending build context to Docker daemon 31.74kB
Step 1/3 : FROM mongo:3.0.6
---> 03e046b4ffb9
Step 2/3 : ADD mongodb-keyfile /data/config/mongodb-keyfile
---> daf64af24565
Step 3/3 : RUN chown mongodb:mongodb /data/config/mongodb-keyfile && chmod 600 /data/config/mongodb-keyfile
---> Running in d00d1db35458
Removing intermediate container d00d1db35458
---> ee1e3a6e5d0f
Successfully built ee1e3a6e5d0f
docker tag ee1e3a6e5d0f 192.168.0.107:80/heosun/mongo:3.0.6
docker push 192.168.0.107:80/heosun/mongo:3.0.6
1、创建mongo-service
vim mongo-service.yaml
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
app: mongo
spec:
ports:
- name: mongo
port: 27017
targetPort: 27017
clusterIP: None
selector:
role: mongo
---
apiVersion: v1
kind: Service
metadata:
name: mongo-service
labels:
app: mongo
spec:
ports:
- name: mongo-http
port: 27017
targetPort: 27017
nodePort: 27017
selector:
role: mongo
type: NodePort
kubectl apply -f mongo-service.yaml
2、创建mongo-statefulset
vim mongo-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongo
spec:
serviceName: mongo
replicas: 3
selector:
matchLabels:
role: mongo
environment: test
replicaset: MainRepSet
template:
metadata:
labels:
role: mongo
environment: test
replicaset: MainRepSet
spec:
containers:
- name: mongod-container
image: 192.168.0.107:80/heosun/mongo:3.0.6
command:
- "mongod"
- "--bind_ip"
- "0.0.0.0"
- "--replSet"
- "MainRepSet"
- "--clusterAuthMode"
- "keyFile"
- "--keyFile"
- "/etc/secrets-volume/internal-auth-mongodb-keyfile"
resources:
requests:
cpu: 0.2
memory: 200Mi
ports:
- containerPort: 27017
volumeMounts:
- name: secrets-volume
readOnly: true
mountPath: /etc/secrets-volume
- name: mongodb-persistent-storage-claim
mountPath: /data/db
volumes:
- name: secrets-volume
secret:
secretName: shared-bootstrap-data
defaultMode: 256
volumeClaimTemplates:
- metadata:
name: mongodb-persistent-storage-claim
annotations:
volume.beta.kubernetes.io/storage-class: mongodb-nfs
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 15Gi
kubectl apply -f mongo-statefulset.yaml
3、副本集群初始化
(1)获取pod的hostname
kubectl run busybox -it --image=datica/busybox-dig --restart=Never --rm sh
nslookup svc
(2)初始化集群
kubectl exec -it mongo-0 -- /bin/bash
rs.initiate({_id: "MainRepSet", version: 1, members: [
{ _id: 0, host : "mongo-0.mongo.default.svc.cluster.local:27017" },
{ _id: 1, host : "mongo-1.mongo.default.svc.cluster.local:27017" },
{ _id: 2, host : "mongo-2.mongo.default.svc.cluster.local:27017" }
]});
(3)创建用户
use admin;
db.createUser({user:'admin', pwd:'heosun@2020', roles:[{ role: "userAdminAnyDatabase", db: "admin"}]});
赋权管理集群
db.auth("admin","heosun@2020");
db.grantRolesToUser("admin", ["clusterAdmin"]);
查看集群状态
rs.status();
外部连接后需先
rs.slaveOk()
db.createUser({user:'heosun',pwd:'heosun@2020',roles: [{role:'readWrite',db:'heosun'}]})