前言:
这次是在部署后很久才想起来整理了下文档,如有遗漏见谅,期间也遇到过很多坑有些目前还没头绪希望有大佬让我学习下
一、环境准备
k8s-master01 | 3.127.10.209 |
---|---|
k8s-master02 | 3.127.10.95 |
k8s-master03 | 3.127.10.66 |
k8s-node01 | 3.127.10.233 |
k8s-node02 | 3.127.33.173 |
harbor | 3.127.33.174 |
1、k8s各节点部署nfs
挂载目录为 /home/k8s/elasticsearch/storage
2、安装制备器Provisioner
镜像为quay.io/external_storage/nfs-client-provisioner:latest
# cat rbac.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: default
rules:
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: leader-locking-nfs-client-provisioner
namespace: default
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: Role
name: leader-locking-nfs-client-provisioner
apiGroup: rbac.authorization.k8s.io
# cat deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: nfs-client-provisioner
labels:
app: nfs-client-provisioner
namespace: default
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: nfs-client-provisioner
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: 3.127.33.174:8443/kubernetes/nfs-client-provisioner:latest
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 3.127.10.95
- name: NFS_PATH
value: /home/k8s/elasticsearch/storage
volumes:
- name: nfs-client-root
nfs:
server: 3.127.10.95
path: /home/k8s/elasticsearch/storage
# cat es-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: managed-nfs-storage
provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME'
parameters:
archiveOnDelete: "true"
reclaimPolicy: Retain
3、ES集群部署
# cat es-cluster-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: es-svc
namespace: elk
labels:
app: es-cluster-svc
spec:
selector:
app: es
type: ClusterIP
clusterIP: None
sessionAffinity: None
ports:
- name: outer-port
port: 9200
protocol: TCP
targetPort: 9200
- name: cluster-port
port: 9300
protocol: TCP
targetPort: 9300
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es-cluster
namespace: elk
labels:
app: es-cluster
spec:
podManagementPolicy: OrderedReady
replicas: 3
serviceName: es-svc
selector:
matchLabels:
app: es
template:
metadata:
labels:
app: es
namespace: elk
spec:
containers:
- name: es-cluster
image: 3.127.33.174:8443/elk/elasticsearch:8.1.0
imagePullPolicy: IfNotPresent
resources:
limits:
memory: "16Gi"
cpu: "200m"
ports:
- name: outer-port
containerPort: 9200
protocol: TCP
- name: cluster-port
containerPort: 9300
protocol: TCP
env:
- name: cluster.name
value: "es-cluster"
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
# - name: discovery.zen.ping.unicast.hosts
- name: discovery.seed_hosts
value: "es-cluster-0.es-svc,es-cluster-1.es-svc,es-cluster-2.es-svc"
# - name: discovery.zen.minimum_master_nodes
# value: "2"
- name: cluster.initial_master_nodes
value: "es-cluster-0"
- name: ES_JAVA_OPTS
value: "-Xms1024m -Xmx1024m"
- name: xpack.security.enabled
value: "false"
volumeMounts:
- name: es-volume
mountPath: /usr/share/elasticsearch/data
initContainers:
- name: fix-permissions
image: 3.127.33.174:8443/elk/busybox:latest
imagePullPolicy: IfNotPresent
# uid,gid为1000
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: es-volume
mountPath: /usr/share/elasticsearch/data
- name: increase-vm-max-map
image: 3.127.33.174:8443/elk/busybox:latest
imagePullPolicy: IfNotPresent
command: ["sysctl","-w","vm.max_map_count=655360"]
securityContext:
privileged: true
- name: increase-ulimit
image: 3.127.33.174:8443/elk/busybox:latest
imagePullPolicy: IfNotPresent
command: ["sh","-c","ulimit -n 65536"]
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: es-volume
namespace: elk
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: "150Gi"
storageClassName: managed-nfs-storage
kubectl get pods -n elk -o wide
至此es集群正常部署完毕
4、kibana部署
# cat kibana.yaml
apiVersion: v1
kind: Service
metadata:
name: kibana-svc
namespace: elk
labels:
app: kibana-svc
spec:
selector:
app: kibana-8.1.0
ports:
- name: kibana-port
port: 5601
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana-deployment
namespace: elk
labels:
app: kibana-dep
spec:
replicas: 1
selector:
matchLabels:
app: kibana-8.1.0
template:
metadata:
name: kibana
labels:
app: kibana-8.1.0
spec:
containers:
- name: kibana
image: 3.127.33.174:8443/elk/kibana:8.1.0
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: "1000m"
requests:
cpu: "200m"
ports:
- name: kibana-web
containerPort: 5601
protocol: TCP
env:
- name: ELASTICSEARCH_HOSTS
value: http://es-svc:9200
readinessProbe:
initialDelaySeconds: 10
periodSeconds: 10
httpGet:
port: 5601
timeoutSeconds: 100
---
# 部署ingress通过域名来访问kibana
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kibana-ingress
namespace: elk
labels:
app: kibana-ingress
spec:
ingressClassName: nginx
defaultBackend:
service:
name: kibana-svc
port:
name: kibana-port
rules:
- host: jszw.kibana.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kibana-svc
port:
name: kibana-port