Install Zookeeper Cluster on OpenShift3.9

1 Grant the privileges to default user.

[root@oskm1 zookeeper]# oc adm policy add-scc-to-user anyuid system:serviceaccount:bigdata:default
oc adm policy add-scc-to-user privileged system:serviceaccount:bigdata:defaultscc "anyuid" added to: ["system:serviceaccount:bigdata:default"]
[root@oskm1 zookeeper]# oc adm policy add-scc-to-user privileged system:serviceaccount:bigdata:default
scc "privileged" added to: ["system:serviceaccount:bigdata:default"]
[root@oskm1 zookeeper]# 

2 Prepare the installation Yaml.

Based on the default version from kubernetes.
https://github.com/kubernetes/contrib/tree/master/statefulsets/zookeeper

# vi zookeeper.yaml
---
apiVersion: v1
kind: Service
metadata:
  name: zk-svc
  namespace: bigdata   # here is the namespace for Zookeeper
  labels:
    app: zk-svc
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: zk-cm
  namespace: bigdata
data:
  jvm.heap: "1G"
  tick: "2000"
  init: "10"
  sync: "5"
  client.cnxns: "60"
  snap.retain: "3"
  purge.interval: "0"
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
  namespace: bigdata
spec:
  selector:
    matchLabels:
      app: zk
  minAvailable: 2
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: zk
  namespace: bigdata
spec:
  serviceName: zk-svc
  replicas: 3
  template:
    metadata:
      labels:
        app: zk
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values: 
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: k8szk
        imagePullPolicy: Always
        #image: gcr.io/google_samples/k8szk:v3 # changed to our private image center. 
        image: hub.docker.gemii.cc:7443/google_samples/k8szk:v3 
        resources:
          requests:
            memory: "2Gi"
            cpu: "500m"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        env:
        - name : ZK_REPLICAS
          value: "3"
        - name : ZK_HEAP_SIZE
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: jvm.heap
        - name : ZK_TICK_TIME
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: tick
        - name : ZK_INIT_LIMIT
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: init
        - name : ZK_SYNC_LIMIT
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: tick
        - name : ZK_MAX_CLIENT_CNXNS
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: client.cnxns
        - name: ZK_SNAP_RETAIN_COUNT
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: snap.retain
        - name: ZK_PURGE_INTERVAL
          valueFrom:
            configMapKeyRef:
                name: zk-cm
                key: purge.interval
        - name: ZK_CLIENT_PORT
          value: "2181"
        - name: ZK_SERVER_PORT
          value: "2888"
        - name: ZK_ELECTION_PORT
          value: "3888"
        command:
        - sh
        - -c
        - zkGenConfig.sh && zkServer.sh start-foreground
        readinessProbe:
          exec:
            command:
            - "zkOk.sh"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - "zkOk.sh"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      imagePullSecrets:  # this is the secret for our private image center.
      - name: bd-sec
      securityContext: {}  # we are using default user to do the installtion
#        runAsUser: 1000
#        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      storageClassName: glusterfs-storage # here is the storage class for the persistence volume claim.
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 10Gi

3 Install the zookeeper.

[root@oskm1 zookeeper]# oc create -f zookeeper.yaml 
service "zk-svc" created
configmap "zk-cm" created
poddisruptionbudget "zk-pdb" created
statefulset "zk" created
[root@oskm1 zookeeper]# 

Several minutes later, check the pods' status.

[root@oskm1 zookeeper]# oc get pods -n bigdata
NAME      READY     STATUS    RESTARTS   AGE
zk-0      1/1       Running   0          8m
zk-1      1/1       Running   0          7m
zk-2      1/1       Running   0          6m
[root@oskm1 zookeeper]# 
[root@oskm1 zookeeper]# 

Now, the zookeeper cluster is ready!

你可能感兴趣的:(Install Zookeeper Cluster on OpenShift3.9)