K8S 1.23.1部署 及 ElasticSearch/Kafka/MySQL部署

k8s&Docker安装

# 将 SELinux 设置为 permissive 模式(相当于将其禁用)
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
#关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
# 关闭swap
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
# 配置内核参数:
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF

#若需要离线安装,可通过 yum reinstall --downloadonly --downloaddir=~即可下载部署安装包即可
#wget -O /etc/yum.repos.d/docker-ce.repo https://repo.huaweicloud.com/docker-ce/linux/centos/docker-ce.repo
#sudo sed -i 's+download.docker.com+repo.huaweicloud.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo
wget -O /etc/yum.repos.d/aliyun-docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast

sudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2

#安装Docker,安装前可使用yum list docker-ce --showduplicates  |sort -r查看yum源中的docker列表
sudo yum install -y  docker-ce
sudo systemctl enable docker 
sudo systemctl start docker
sudo systemctl status docker
docker --version

#Cgroup Driver配置
cat <<EOF>> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF


#停止所有容器
docker stop $(docker ps -q)
#删除所有容器
docker rm $(docker ps -aq)
#删除所有镜像
docker rmi `docker images -q`
#kube-proxy开启ipvs的前置条件
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
#加载模块
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
#安装了ipset软件包
sudo yum install ipset -y
#安装管理工具ipvsadm
sudo yum install ipvsadm -y
---------------------------------------------------------------------------------------------------
#导入yum源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

#如遇到signature could not be verified for kubernetes,可调整gpgcheck,若设为1,会进行校验,就会报错如下,可将这里设为0

sudo yum list kubelet  --showduplicates |sort -r
sudo yum install kubelet-1.23.1-0 kubeadm-1.23.1-0 kubectl-1.23.1-0 -y

sudo systemctl enable --now kubelet
---------------------------------------------------------------------------------------------------
#查看需下载的k8s镜像
kubeadm config images list

#通过脚本进行离线下载
#!/bin/bash
for i in k8s.gcr.io/kube-apiserver:v1.23.1 k8s.gcr.io/kube-controller-manager:v1.23.1 k8s.gcr.io/kube-scheduler:v1.23.1 k8s.gcr.io/kube-proxy:v1.23.1 k8s.gcr.io/pause:3.6 k8s.gcr.io/etcd:3.5.1-0 k8s.gcr.io/coredns:v1.8.6; do
  temp=${i#k8s.gcr.io/}
  docker pull registry.aliyuncs.com/google_containers/${temp}
  docker tag registry.aliyuncs.com/google_containers/${temp} k8s.gcr.io/${temp}
  docker rmi registry.aliyuncs.com/google_containers/${temp};
done;

#或配置docker镜像下载镜像
vim /etc/sysconfig/docker 
OPTIONS='--selinux-enabled --log-driver=journald --registry-mirror=http://xxxx.mirror.aliyuncs.com'



#!/bin/bash
images=(kube-apiserver:v1.23.1 kube-controller-manager:v1.23.1 kube-scheduler:v1.23.1 kube-proxy:v1.23.1 pause:3.6 etcd:3.5.1-0 coredns/coredns:v1.8.6)
for imageName in ${images[@]} ; do
  docker pull keveon/$imageName
  docker tag keveon/$imageName k8s.gcr.io/$imageName
  docker rmi keveon/$imageName
done

# 手动操作
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1  k8s.gcr.io/kube-apiserver:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1  k8s.gcr.io/kube-controller-manager:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1  k8s.gcr.io/kube-scheduler:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1  k8s.gcr.io/kube-proxy:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6  k8s.gcr.io/pause:3.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0  k8s.gcr.io/etcd:3.5.1-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6  k8s.gcr.io/coredns/coredns:v1.8.6



docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6

docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2  google_containers/elasticsearch:7.16.2:newTag
#设置docker的Cgroup Driver
cat <<EOF>> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
#修改kubelet的Cgroup Driver
cat <<EOF>> /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF
sudo systemctl restart docker
#master执行init初始化k8s 集群,指定pod网络为10.244.0.0/16,服务网络为10.1.0.0/16,这两个均为集群内部网络,API-server为master节点IP
kubeadm init \
--kubernetes-version=1.23.1 \
--apiserver-advertise-address=10.0.20.1 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16

---------------------------------------------------------------------------------------------------
#flannel网络配置
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

#或者进行calico网络配置
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml

vim calico.yaml
- name: CALICO_IPV4POOL_IPIP
  value: "off"
    
- name: IP_AUTODETECTION_METHOD
  value: "interface=ens.*"
  
  replicas: 1
  revisionHistoryLimit: 2
  
- name: CALICO_IPV4POOL_CIDR
  value: "10.244.0.0/16"

kubectl apply -f calico.yaml
---------------------------------------------------------------------------------------------------
#slave节点kubectl join加入集群
kubeadm join 10.0.20.1:6443 --token kjgine.g0fafdff1ro505wj \
        --discovery-token-ca-cert-hash sha256:da5a7952ef25b8a4eb77d46aa4765009fd5d9a4f1ced493d5698af361ba5d07d
#若后续忘记该命令
kubeadm token create --print-join-command
#若需移除节点
kubectl delete node demo01
#移除的节点上执行
kubeadm reset -f 

#执行kubectl命令若提示The connection to the server localhost:8080 was refused - did you specify the right host or port?
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile
# 检视节点状态
kubectl get nodes
# 检视pods状态
kubectl get pods --all-namespaces
kubectl get ns
# 检视K8S丛集状态
kubectl get cs
kubectl get pods -nkube-system

#配置集群角色
kubectl label node 节点名称 node-role.kubernetes.io/worker=worker
kubectl label node --all node-role.kubernetes.io/worker=worker

#如果需要使用master节点作为woker还需执行以下命令
kubectl taint nodes 节点名称 node-role.kubernetes.io/master-

Kuboard面板部署

kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
# 您也可以使用下面的指令,唯一的区别是,该指令使用华为云的镜像仓库替代 docker hub 分发 Kuboard 所需要的镜像
# kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3-swr.yaml

watch kubectl get pods -n kuboard

在浏览器中打开链接 http://your-node-ip-address:30080
输入初始用户名和密码,并登录
用户名: admin
密码: Kuboard123


#若需要卸载
kubectl delete -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
#在 master 节点以及带有 k8s.kuboard.cn/role=etcd 标签的节点上执行
rm -rf /usr/share/kuboard

KubeSphere面板部署

kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml

#kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/kubesphere-installer.yaml
#kubectl apply -f https://github.com/kubesphere/ks-installer/releases/download/v3.2.1/cluster-configuration.yaml
#执行指令 watch kubectl get pods -n kuboard,等待 kuboard 名称空间中所有的 Pod 就绪,如下所示,
#通过NodePort (IP:30880) 使用默认帐户和密码 (admin/P@88w0rd) 访问 Web 控制台。

#如需要使用 KuboardSpray 安装kubernetes_v1.23.1
docker run -d \
  --restart=unless-stopped \
  --name=kuboard-spray \
  -p 80:80/tcp \
  -v /var/run/docker.sock:/var/run/docker.sock \
  -v ~/kuboard-spray-data:/data \
  eipwork/kuboard-spray:latest-amd64
  # 如果抓不到这个镜像,可以尝试一下这个备用地址:
  # swr.cn-east-2.myhuaweicloud.com/kuboard/kuboard-spray:latest-amd64

Kubernetes Dashboard

kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml

kubectl apply -f https://kuboard.cn/install-script/k8s-dashboard/v2.0.0-beta5.yaml

ES StatefulSet 本地部署

#建议访问nfs共享
#mkdir -p /data/share/pv/es
mkdir -p /data/es
#命名空间创建
cat <<EOF>> elastic.namespace.yaml 
---
apiVersion: v1
kind: Namespace
metadata:
   name: elasticsearch
---
EOF
kubectl apply -f elastic.namespace.yaml

---------------------------------------------------------------------------------------------------
#配置storageclass,用于动态创建pvc,并自动绑定pv
cat <<EOF>> sc.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
  name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer 
EOF


---------------------------------------------------------------------------------------------------
cat <<EOF>> pv.yaml 
apiVersion: v1
kind: PersistentVolume
metadata:
  name: local-storage-pv-1
  namespace: elasticsearch
  labels:
    name: local-storage-pv-1
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/es
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - demo01
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: local-storage-pv-2
  namespace: elasticsearch
  labels:
    name: local-storage-pv-2
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/es
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - demo02
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: local-storage-pv-3
  namespace: elasticsearch
  labels:
    name: local-storage-pv-3
spec:
  capacity:
    storage: 10Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: local-storage
  local:
    path: /data/es
  nodeAffinity:
    required:
      nodeSelectorTerms:
      - matchExpressions:
        - key: kubernetes.io/hostname
          operator: In
          values:
          - demo03
#---
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: local-storage-pv-4
#  namespace: elasticsearch
#  labels:
#    name: local-storage-pv-4
#spec:
#  capacity:
#    storage: 1Gi
#  accessModes:
#  - ReadWriteOnce
#  persistentVolumeReclaimPolicy: Retain
#  storageClassName: local-storage
#  local:
#    path: /data/es
#  nodeAffinity:
#    required:
#      nodeSelectorTerms:
#      - matchExpressions:
#        - key: kubernetes.io/hostname
#          operator: In
#          values:
#          - node1
#---
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: local-storage-pv-5
#  namespace: elasticsearch
#  labels:
#    name: local-storage-pv-5
#spec:
#  capacity:
#    storage: 1Gi
#  accessModes:
#  - ReadWriteOnce
#  persistentVolumeReclaimPolicy: Retain
#  storageClassName: local-storage
#  local:
#    path: /data/es
#  nodeAffinity:
#    required:
#      nodeSelectorTerms:
#      - matchExpressions:
#        - key: kubernetes.io/hostname
#          operator: In
#          values:
#          - node2
EOF
#一共是5个PV,每个都通过nodeSelectorTerms跟k8s节点绑定。

---------------------------------------------------------------------------------------------------
#创建StatefulSet,ES属于数据库类型的应用,此类应用适合StatefulSet类型
cat <<EOF>> sts.yaml 
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es7-cluster
  namespace: elasticsearch
spec:
  serviceName: elasticsearch7
  replicas: 3
  selector:
    matchLabels:
      app: elasticsearch7
  template:
    metadata:
      labels:
        app: elasticsearch7
    spec:
      containers:
      - name: elasticsearch7
        image: elasticsearch:7.16.2
        resources:
            limits:
              cpu: 1000m
            requests:
              cpu: 100m
        ports:
        - containerPort: 9200
          name: rest
          protocol: TCP
        - containerPort: 9300
          name: inter-node
          protocol: TCP
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        env:
          - name: cluster.name
            value: k8s-logs
          - name: node.name
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: discovery.zen.minimum_master_nodes
            value: "2"
          - name: discovery.seed_hosts
            value: "es7-cluster-0.elasticsearch7,es7-cluster-1.elasticsearch7,es7-cluster-2.elasticsearch7"
          - name: cluster.initial_master_nodes
            value: "es7-cluster-0,es7-cluster-1,es7-cluster-2"
          - name: ES_JAVA_OPTS
            value: "-Xms1g -Xmx1g"
      initContainers:
      - name: fix-permissions
        image: busybox:1.35.0
        command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
      - name: increase-vm-max-map
        image: busybox:1.35.0
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
      - name: increase-fd-ulimit
        image: busybox:1.35.0
        command: ["sh", "-c", "ulimit -n 65536"]
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "local-storage"
      resources:
        requests:
          storage: 10Gi
EOF
#该ES集群通过volumeClaimTemplates来关联storageClass,并自动绑定相应的PV。

---------------------------------------------------------------------------------------------------
#创建NodePort类型的Service来蒋ES集群暴漏出去
cat <<EOF>> svc.yaml 
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch7
  namespace: elasticsearch
spec:
  selector:
    app: elasticsearch7
  type: NodePort
  ports:
  - port: 9200
    nodePort: 30002
    targetPort: 9200
EOF

---------------------------------------------------------------------------------------------------
kubectl apply -f sc.yaml
kubectl apply -f pv.yaml
kubectl apply -f sts.yaml
kubectl apply -f svc.yaml
---------------------------------------------------------------------------------------------------
#当前资源状态
kubectl get sc
[root@demo01 ~]# kubectl get sc
NAME            PROVISIONER                    RECLAIMPOLICY   VOLUMEBINDINGMODE      ALLOWVOLUMEEXPANSION   AGE
local-storage   kubernetes.io/no-provisioner   Delete          WaitForFirstConsumer   false                  4m45s
#PV
[root@demo01 ~]# kubectl get pv
NAME                 CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS      CLAIM   STORAGECLASS    REASON   AGE
local-storage-pv-1   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-2   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-3   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-4   1Gi        RWO            Retain           Available           local-storage            3m28s
local-storage-pv-5   1Gi        RWO            Retain           Available           local-storage            3m28s
#StatefulSet
[root@demo01 elk]# kubectl get pods -n elasticsearch
[root@demo01 ~]# kubectl get statefulset -n elasticsearch
NAME          READY   AGE
es7-cluster   3/3     57m
[root@master1 tmp]# watch kubectl get pod -n elasticsearch
NAME            READY   STATUS    RESTARTS   AGE
es7-cluster-0   1/1     Running   0          18m
es7-cluster-1   1/1     Running   0          18m
es7-cluster-2   1/1     Running   0          54m

NFS部署

yum -y install nfs-utils rpcbind
mkdir -p /data/k8s
chmod 755 /data/k8s
vim /etc/exports
/data/k8s  10.0.0.0/8(rw,sync,no_root_squash)
systemctl start rpcbind.service
systemctl start nfs.service
journalctl -xlu nfs

#访问节点安装客户端
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs

ElasticSearch NFS部署

#命名空间创建
cat <<EOF>> elastic.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: elasticsearch-ns
EOF
kubectl apply -f elastic.namespace.yaml
kubectl get ns
---------------------------------------------------------------------------------------------------
#svc
#如果仅内部访问
cat <<EOF>> elasticsearch-svc.yaml
#kind: Service
#apiVersion: v1
#metadata:
#  name: elasticsearch
#  namespace: elasticsearch-ns
#  labels:
#    app: elasticsearch
#spec:
#  selector:
#    app: elasticsearch
#  clusterIP: None
#  ports:
#    - port: 9200
#      name: rest
#    - port: 9300
#      name: inter-node

#若需暴露给外部访问
apiVersion: v1
kind: Service
metadata:
  name: elasticsearch
  namespace: elasticsearch-ns
  labels:
    app: elasticsearch
spec:
  selector:
    app: elasticsearch
  type: NodePort
  ports:
  - port: 9200
    name: rest
    targetPort: 9200
    nodePort: 31200
  - port: 9300
    targetPort: 9300
    nodePort: 31300
    name: inter-node
    
EOF
kubectl apply -f elasticsearch-svc.yaml
kubectl get svc -n elasticsearch-ns
kubectl edit svc elasticsearch  -n elasticsearch-ns
---------------------------------------------------------------------------------------------------
#创建statefulset的资源清单
cat <<EOF>> elasticsearch-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: es
  namespace: elasticsearch-ns
spec:
  serviceName: elasticsearch
  replicas: 3
  selector:
    matchLabels:
      app: elasticsearch
  template:
    metadata:
      labels: 
        app: elasticsearch
    spec:
      nodeSelector:
        es: log
      initContainers:
      - name: increase-vm-max-map
        image: busybox:1.35.0
        command: ["sysctl", "-w", "vm.max_map_count=262144"]
        securityContext:
          privileged: true
      - name: increase-fd-ulimit
        image: busybox:1.35.0
        command: ["sh", "-c", "ulimit -n 65536"]
        securityContext:
          privileged: true
      containers:
      - name: elasticsearch
        image: docker.elastic.co/elasticsearch/elasticsearch:7.16.2
        ports:
        - name: rest
          containerPort: 9200
        - name: inter
          containerPort: 9300
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 1000m
        volumeMounts:
        - name: data
          mountPath: /usr/share/elasticsearch/data
        env:
        - name: cluster.name
          value: k8s-logs
        - name: node.name
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: cluster.initial_master_nodes
          value: "es-0,es-1,es-2"
        - name: discovery.zen.minimum_master_nodes
          value: "2"
        - name: discovery.seed_hosts
          value: "elasticsearch"
        - name: ES_JAVA_OPTS
          value: "-Xms512m -Xmx512m"
        - name: network.host
          value: "0.0.0.0"
  volumeClaimTemplates:
  - metadata:
      name: data
      labels:
        app: elasticsearch
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: es-data-db
      resources:
        requests:
          storage: 50Gi
EOF
#注意添加了nodeSelector策略,需要在每个节点上加上label标签为es=log,es集群才能部署成功,执行以下命令。
#或将es: log修改为kubernetes.io/worker=worker
kubectl label nodes node名 es=log
kubectl get nodes --show-labels
---------------------------------------------------------------------------------------------------
#创建 Provisioner,使用nfs-client 的自动配置程序
#kubectl explain DaemonSet.apiVersion
cat <<EOF>> nfs-client.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
  name: nfs-client-provisioner
  #namespace: elasticsearch-ns
spec:
  replicas: 1
  selector: 
    matchLabels: 
        app: nfs-client-provisioner
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          #由于SelfLink。此字段不提供任何新信息,更换为nfs-subdir-external-provisioner
          #image: quay.io/external_storage/nfs-client-provisioner:latest
          image: registry.cn-shenzhen.aliyuncs.com/shuhui/nfs-subdir-external-provisioner:v4.0.2
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: fuseim.pri/ifs
            - name: NFS_SERVER
              value: 10.0.21.1
            - name: NFS_PATH
              value: /data/k8s
      volumes:
        - name: nfs-client-root
          nfs:
            server: 10.0.21.1
            path: /data/k8s
EOF
---------------------------------------------------------------------------------------------------
#创建 sa,然后绑定上对应的权限
cat <<EOF>>  nfs-client-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nfs-client-provisioner
  #namespace: elasticsearch-ns

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["list", "watch", "create", "update", "patch"]
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: default
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
EOF
---------------------------------------------------------------------------------------------------
#创建StorageClass
cat <<EOF>> elasticsearch-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: es-data-db
  #namespace: elasticsearch-ns
provisioner: fuseim.pri/ifs
EOF
---------------------------------------------------------------------------------------------------
#部署
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml 
kubectl get po -n elasticsearch-ns
kubectl describe pvc data-es-0  -n elasticsearch-ns
kubectl logs nfs-client-provisioner-5c66746f46-hrlqm
---------------------------------------------------------------------------------------------------
#现在直接使用 kubectl 工具部署elasticsearch statefulset资源
kubectl create -f elasticsearch-statefulset.yaml
kubectl get sts -n elasticsearch-ns
kubectl get po -n elasticsearch-ns
kubectl get pv
kubectl get pods
kubectl describe pods
kubectl describe pod nfs-client-provisioner-5c66746f46-w277s
#若需要远程访问,使用下面的命令将本地端口9200 转发到 Elasticsearch 节点(如es-0)对应的端口
kubectl port-forward es-0 9200:9200 --namespace=elasticsearch-ns
#测试
curl http://localhost:9200/
curl http://localhost:9200/_cluster/state?pretty
#docker pull quay.io/external_storage/nfs-client-provisioner:latest --registry-mirror=https://docker.mirrors.ustc.edu.cn
---------------------------------------------------------------------------------------------------

kubectl apply -f elastic.namespace.yaml
kubectl apply -f elasticsearch-svc.yaml
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml 
kubectl create -f elasticsearch-statefulset.yaml


kubectl delete -f elasticsearch-statefulset.yaml
kubectl delete -f elasticsearch-storageclass.yaml 
kubectl delete -f nfs-client-sa.yaml
kubectl delete -f nfs-client.yaml
kubectl delete -f elasticsearch-svc.yaml
kubectl delete -f elastic.namespace.yaml

kibana部署

cat <<EOF>> kibana.yaml 
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: elasticsearch
  labels:
    app: kibana
spec:
  ports:
  - port: 5601
    targetPort: 5601
    nodePort: 30001
  type: NodePort
  selector:
    app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: elasticsearch
  labels:
    app: kibana
spec:
  selector:
    matchLabels:
      app: kibana
  template:
    metadata:
      labels:
        app: kibana
    spec:
      nodeSelector:
        node: node2
      containers:
      - name: kibana
        image: kibana:7.16.2
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 1000m
        env:
        - name: ELASTICSEARCH_HOSTS
          value: http://elasticsearch7:9200
        - name: SERVER_PUBLICBASEURL
          value: "0.0.0.0:5601"
        - name: I18N.LOCALE
          value: zh-CN
        ports: 
        - containerPort: 5601
EOF

kubectl apply -f kibana.yaml 

zookeeper*kafka leolee32部署

#命名空间创建
cat <<EOF>> zk-kafka.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: zk-kafka
   labels:
     name: zk-kafka
EOF
kubectl apply -f zk-kafka.namespace.yaml

---------------------------------------------------------------------------------------------------
#配置pv
cat <<EOF>>  zk_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: zk-data1
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/zk/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: zk-data2
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/zk/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: zk-data3
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/zk/data3
EOF
mkdir /data/k8s/zk/{data1,data2,data3}
kubectl apply -f zk_pv.yaml

---------------------------------------------------------------------------------------------------
cat <<EOF>> zk.ymal
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: zk-hs
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: zk-cs
  labels:
    app: zk
spec:
  type: NodePort
  ports:
  - port: 2181
    targetPort: 2181
    name: client
    nodePort: 32181
  selector:
    app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  namespace: zk-kafka
  name: zk-pdb
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: zk-kafka
  name: zok
spec:
  serviceName: zk-hs
  replicas: 3
  selector:
    matchLabels:
      app: zk
  template:
    metadata:
      labels:
        app: zk
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: Always
        image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
        resources:
          requests:
            memory: "1Gi"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log"
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: zk-hs
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: zk-cs
  labels:
    app: zk
spec:
  type: NodePort
  ports:
  - port: 2181
    targetPort: 2181
    name: client
    nodePort: 32181
  selector:
    app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  namespace: zk-kafka
  name: zk-pdb
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: zk-kafka
  name: zok
spec:
  serviceName: zk-hs
  replicas: 3
  selector:
    matchLabels:
      app: zk
  template:
    metadata:
      labels:
        app: zk
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: Always
        image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
        resources:
          requests:
            memory: "1Gi"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 10Gi

EOF
kubectl apply -f zk.ymal

---------------------------------------------------------------------------------------------------
mkdir -p /data/k8s/kafka/{data1,data2,data3}
cat <<EOF>> kafka_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: kafka-data1
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/kafka/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: kafka-data2
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/kafka/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:
  namespace: zk-kafka
  name: kafka-data3
spec:
  capacity:
    storage: 10Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: /data/k8s/kafka/data3
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: kafka-hs
  labels:
    app: kafka
spec:
  ports:
  - port: 1099
    name: jmx
  clusterIP: None
  selector:
    app: kafka
---
apiVersion: v1
kind: Service
metadata:
  namespace: zk-kafka
  name: kafka-cs
  labels:
    app: kafka
spec:
  type: NodePort
  ports:
  - port: 9092
    targetPort: 9092
    name: client
    nodePort: 9092
  selector:
    app: kafka
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  namespace: zk-kafka
  name: kafka-pdb
spec:
  selector:
    matchLabels:
      app: kafka
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  namespace: zk-kafka
  name: kafoka
spec:
  serviceName: kafka-hs
  replicas: 3
  selector:
    matchLabels:
      app: kafka
  template:
    metadata:
      labels:
        app: kafka
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - kafka
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: k8skafka
        imagePullPolicy: Always
        image: leey18/k8skafka
        resources:
          requests:
            memory: "1Gi"
            cpu: "0.5"
        ports:
        - containerPort: 9092
          name: client
        - containerPort: 1099
          name: jmx
        command:
        - sh
        - -c
        - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
          --override listeners=PLAINTEXT://:9092 \
          --override zookeeper.connect=zok-0.zk-hs.zk-kafka.svc.cluster.local:2181,zok-1.zk-hs.zk-kafka.svc.cluster.local:2181,zok-2.zk-hs.zk-kafka.svc.cluster.local:2181 \
          --override log.dirs=/var/lib/kafka \
          --override auto.create.topics.enable=true \
          --override auto.leader.rebalance.enable=true \
          --override background.threads=10 \
          --override compression.type=producer \
          --override delete.topic.enable=false \
          --override leader.imbalance.check.interval.seconds=300 \
          --override leader.imbalance.per.broker.percentage=10 \
          --override log.flush.interval.messages=9223372036854775807 \
          --override log.flush.offset.checkpoint.interval.ms=60000 \
          --override log.flush.scheduler.interval.ms=9223372036854775807 \
          --override log.retention.bytes=-1 \
          --override log.retention.hours=168 \
          --override log.roll.hours=168 \
          --override log.roll.jitter.hours=0 \
          --override log.segment.bytes=1073741824 \
          --override log.segment.delete.delay.ms=60000 \
          --override message.max.bytes=1000012 \
          --override min.insync.replicas=1 \
          --override num.io.threads=8 \
          --override num.network.threads=3 \
          --override num.recovery.threads.per.data.dir=1 \
          --override num.replica.fetchers=1 \
          --override offset.metadata.max.bytes=4096 \
          --override offsets.commit.required.acks=-1 \
          --override offsets.commit.timeout.ms=5000 \
          --override offsets.load.buffer.size=5242880 \
          --override offsets.retention.check.interval.ms=600000 \
          --override offsets.retention.minutes=1440 \
          --override offsets.topic.compression.codec=0 \
          --override offsets.topic.num.partitions=50 \
          --override offsets.topic.replication.factor=3 \
          --override offsets.topic.segment.bytes=104857600 \
          --override queued.max.requests=500 \
          --override quota.consumer.default=9223372036854775807 \
          --override quota.producer.default=9223372036854775807 \
          --override replica.fetch.min.bytes=1 \
          --override replica.fetch.wait.max.ms=500 \
          --override replica.high.watermark.checkpoint.interval.ms=5000 \
          --override replica.lag.time.max.ms=10000 \
          --override replica.socket.receive.buffer.bytes=65536 \
          --override replica.socket.timeout.ms=30000 \
          --override request.timeout.ms=30000 \
          --override socket.receive.buffer.bytes=102400 \
          --override socket.request.max.bytes=104857600 \
          --override socket.send.buffer.bytes=102400 \
          --override unclean.leader.election.enable=true \
          --override zookeeper.session.timeout.ms=6000 \
          --override zookeeper.set.acl=false \
          --override broker.id.generation.enable=true \
          --override connections.max.idle.ms=600000 \
          --override controlled.shutdown.enable=true \
          --override controlled.shutdown.max.retries=3 \
          --override controlled.shutdown.retry.backoff.ms=5000 \
          --override controller.socket.timeout.ms=30000 \
          --override default.replication.factor=1 \
          --override fetch.purgatory.purge.interval.requests=1000 \
          --override group.max.session.timeout.ms=300000 \
          --override group.min.session.timeout.ms=6000 \
          --override inter.broker.protocol.version=0.10.2-IV0 \
          --override log.cleaner.backoff.ms=15000 \
          --override log.cleaner.dedupe.buffer.size=134217728 \
          --override log.cleaner.delete.retention.ms=86400000 \
          --override log.cleaner.enable=true \
          --override log.cleaner.io.buffer.load.factor=0.9 \
          --override log.cleaner.io.buffer.size=524288 \
          --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
          --override log.cleaner.min.cleanable.ratio=0.5 \
          --override log.cleaner.min.compaction.lag.ms=0 \
          --override log.cleaner.threads=1 \
          --override log.cleanup.policy=delete \
          --override log.index.interval.bytes=4096 \
          --override log.index.size.max.bytes=10485760 \
          --override log.message.timestamp.difference.max.ms=9223372036854775807 \
          --override log.message.timestamp.type=CreateTime \
          --override log.preallocate=false \
          --override log.retention.check.interval.ms=300000 \
          --override max.connections.per.ip=2147483647 \
          --override num.partitions=1 \
          --override producer.purgatory.purge.interval.requests=1000 \
          --override replica.fetch.backoff.ms=1000 \
          --override replica.fetch.max.bytes=1048576 \
          --override replica.fetch.response.max.bytes=10485760 \
          --override reserved.broker.max.id=1000 "
        env:
        - name: KAFKA_HEAP_OPTS
          value : "-Xmx512M -Xms512M"
        - name: KAFKA_OPTS
          value: "-Dlogging.level=INFO"
        volumeMounts:
        - name: kafkadatadir
          mountPath: /var/lib/kafka
        readinessProbe:
          exec:
           command:
            - sh
            - -c
            - "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=localhost:9092"
  volumeClaimTemplates:
  - metadata:
      name: kafkadatadir
    spec:
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 10Gi
EOF
---------------------------------------------------------------------------------------------------

zk&kafka部署

#命名空间创建
cat <<EOF>> zookeeper.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: zk-kafka
   labels:
     name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml

---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: zoo1
  namespace: zk-kafka
  labels:
    app: zookeeper-1
spec:
  ports:
  - name: client
    port: 2181
    protocol: TCP
  - name: follower
    port: 2888
    protocol: TCP
  - name: leader
    port: 3888
    protocol: TCP
  selector:
    app: zookeeper-1
---
apiVersion: v1
kind: Service
metadata:
  name: zoo2
  namespace: zk-kafka
  labels:
    app: zookeeper-2
spec:
  ports:
  - name: client
    port: 2181
    protocol: TCP
  - name: follower
    port: 2888
    protocol: TCP
  - name: leader
    port: 3888
    protocol: TCP
  selector:
    app: zookeeper-2
---
apiVersion: v1
kind: Service
metadata:
  name: zoo3
  namespace: zk-kafka
  labels:
    app: zookeeper-3
spec:
  ports:
  - name: client
    port: 2181
    protocol: TCP
  - name: follower
    port: 2888
    protocol: TCP
  - name: leader
    port: 3888
    protocol: TCP
  selector:
    app: zookeeper-3
EOF
kubectl apply -f zookeeper-svc.yaml


---------------------------------------------------------------------------------------------------
cat > zookeeper-sts.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: zookeeper-deployment-1
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper-1
      name: zookeeper-1
  template:
    metadata:
      labels:
        app: zookeeper-1
        name: zookeeper-1
    spec:
      containers:
      - name: zoo1
        image: zookeeper:3.7.0
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 2181
        env:
        - name: ZOO_MY_ID
          value: "1"
        - name: ZOO_SERVERS
          value: "server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: zookeeper-deployment-2
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper-2
      name: zookeeper-2
  template:
    metadata:
      labels:
        app: zookeeper-2
        name: zookeeper-2
    spec:
      containers:
      - name: zoo2
        image: zookeeper:3.7.0
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 2181
        env:
        - name: ZOO_MY_ID
          value: "2"
        - name: ZOO_SERVERS
          value: "server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: zookeeper-deployment-3
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: zookeeper-3
      name: zookeeper-3
  template:
    metadata:
      labels:
        app: zookeeper-3
        name: zookeeper-3
    spec:
      containers:
      - name: zoo3
        image: zookeeper:3.7.0
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 2181
        env:
        - name: ZOO_MY_ID
          value: "3"
        - name: ZOO_SERVERS
          value: "server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181"
EOF

kubectl apply -f zookeeper-sts.yaml

---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-svc.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-1
  namespace: zk-kafka
  labels:
    app: kafka-service-1
spec:
  type: NodePort
  ports:
  - port: 9092
    name: kafka-service-1
    targetPort: 9092
    nodePort: 30901
    protocol: TCP
  selector:
    app: kafka-service-1
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-2
  namespace: zk-kafka
  labels:
    app: kafka-service-2
spec:
  type: NodePort
  ports:
  - port: 9092
    name: kafka-service-2
    targetPort: 9092
    nodePort: 30902
    protocol: TCP
  selector:
    app: kafka-service-2
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-3
  namespace: zk-kafka
  labels:
    app: kafka-service-3
spec:
  type: NodePort
  ports:
  - port: 9092
    name: kafka-service-3
    targetPort: 9092
    nodePort: 30903
    protocol: TCP
  selector:
    app: kafka-service-3
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-1
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      name: kafka-service-1
  template:
    metadata:
      labels:
        name: kafka-service-1
        app: kafka-service-1
    spec:
      containers:
      - name: kafka-1
        image: wurstmeister/kafka:2.12-2.4.1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9092
        env:
        - name: KAFKA_ADVERTISED_PORT
          value: "9092"
        - name: KAFKA_ADVERTISED_HOST_NAME
          value: <kafka-svc1-CLUSTER-IP>
        - name: KAFKA_ZOOKEEPER_CONNECT
          value: zoo1:2181,zoo2:2181,zoo3:2181
        - name: KAFKA_BROKER_ID
          value: "1"
        - name: KAFKA_CREATE_TOPICS
          value: mytopic:2:1
        - name: KAFKA_ADVERTISED_LISTENERS
          value: PLAINTEXT://10.0.22.1:30901
          #value: PLAINTEXT://:30903
        - name: KAFKA_LISTENERS
          value: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-2
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      name: kafka-service-2
  template:
    metadata:
      labels:
        name: kafka-service-2
        app: kafka-service-2
    spec:
      containers:
      - name: kafka-2
        image: wurstmeister/kafka:2.12-2.4.1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9092
        env:
        - name: KAFKA_ADVERTISED_PORT
          value: "9092"
        - name: KAFKA_ADVERTISED_HOST_NAME
          value: <kafka-svc2-CLUSTER-IP>
        - name: KAFKA_ZOOKEEPER_CONNECT
          value: zoo1:2181,zoo2:2181,zoo3:2181
        - name: KAFKA_BROKER_ID
          value: "2"
        - name: KAFKA_ADVERTISED_LISTENERS
          value: PLAINTEXT://10.0.20.2:30902
          #value: PLAINTEXT://:30903
        - name: KAFKA_LISTENERS
          value: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-3
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      name: kafka-service-3
  template:
    metadata:
      labels:
        name: kafka-service-3
        app: kafka-service-3
    spec:
      containers:
      - name: kafka-3
        image: wurstmeister/kafka:2.12-2.4.1
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 9092
        env:
        - name: KAFKA_ADVERTISED_PORT
          value: "9092"
        - name: KAFKA_ADVERTISED_HOST_NAME
          value: <kafka-svc3-CLUSTER-IP>
        - name: KAFKA_ZOOKEEPER_CONNECT
          value: zoo1:2181,zoo2:2181,zoo3:2181
        - name: KAFKA_BROKER_ID
          value: "3"
        - name: KAFKA_ADVERTISED_LISTENERS
          #value: PLAINTEXT://:30903
          value: PLAINTEXT://10.0.20.3:30903
        - name: KAFKA_LISTENERS
          value: PLAINTEXT://0.0.0.0:9092
EOF

kubectl apply -f kafka-svc.yaml
kubectl apply -f kafka-deployment.yaml

---------------------------------------------------------------------------------------------------
kubectl api-versions
kubectl get pods
kubectl get service
kubectl get pv
kubectl get pvc -n zookeeper
kubectl describe pvc datadir-zk-0 -n zookeeper
#强制删除pv
kubectl patch pv zk-data3 -p '{"metadata":{"finalizers":null}}'

#测试
kubectl exec -it kafka-deployment-1-xxxxxxxxxxx -n zookeeper /bin/bash
cd cd opt/kafka
 
# 查看topics
bin/kafka-topics.sh --list --zookeeper <任意zookeeper-svc-clusterIP>:2181
# 手动创建主题
bin/kafka-topics.sh --create --zookeeper <zookeeper-svc1-clusterIP>:2181,<zookeeper-svc2-clusterIP>:2181,<zookeeper-svc3-clusterIP>:2181 --topic test --partitions 3 --replication-factor 1
# 写(CTRL+D结束写内容)
bin/kafka-console-producer.sh --broker-list <kafka-svc1-clusterIP>:9092,<kafka-svc2-clusterIP>:9092,<kafka-svc3-clusterIP>:9092 --topic test
# 读(CTRL+C结束读内容)
bin/kafka-console-consumer.sh --bootstrap-server <任意kafka-svc-clusterIP>:9092 --topic test --from-beginning

zk&kafka nfs部署

#命名空间创建
cat <<EOF>> zookeeper.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: zk-kafka
   labels:
     name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml

---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-pv.ymal
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk01
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data1"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk02
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data2"
  persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: k8s-pv-zk03
  namespace: zk-kafka
  labels:
    app: zk
  annotations:
    volume.beta.kubernetes.io/storage-class: "anything"
spec:
  capacity:
    storage: 1Gi
  accessModes:
    - ReadWriteOnce
  nfs:
    server: 10.0.21.1
    path: "/data/k8s/zk/data3"
  persistentVolumeReclaimPolicy: Recycle
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper.ymal
apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  namespace: zk-kafka
  labels:
    app: zk
spec:
  selector:
    app: zk
  clusterIP: None
  ports:
    - name: server
      port: 2888
    - name: leader-election
      port: 3888
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  namespace: zk-kafka
  labels:
    app: zk
spec:
  selector:
    app: zk
  type: NodePort
  ports:
    - name: client
      port: 2181
      nodePort: 31811
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
  namespace: zk-kafka
spec:
  serviceName: "zk-hs"
  replicas: 3 # by default is 1
  selector:
    matchLabels:
      app: zk # has to match .spec.template.metadata.labels
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zk # has to match .spec.selector.matchLabels
    spec:
      containers:
        - name: zk
          imagePullPolicy: Always
          image: guglecontainers/kubernetes-zookeeper:1.0-3.4.10
          ports:
            - containerPort: 2181
              name: client
            - containerPort: 2888
              name: server
            - containerPort: 3888
              name: leader-election
          command:
            - sh
            - -c
            - "start-zookeeper \
        --servers=3 \
        --data_dir=/var/lib/zookeeper/data \
        --data_log_dir=/var/lib/zookeeper/data/log \
        --conf_dir=/opt/zookeeper/conf \
        --client_port=2181 \
        --election_port=3888 \
        --server_port=2888 \
        --tick_time=2000 \
        --init_limit=10 \
        --sync_limit=5 \
        --heap=4G \
        --max_client_cnxns=60 \
        --snap_retain_count=3 \
        --purge_interval=12 \
        --max_session_timeout=40000 \
        --min_session_timeout=4000 \
        --log_level=INFO"
          readinessProbe:
            exec:
              command:
                - sh
                - -c
                - "zookeeper-ready 2181"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          livenessProbe:
            exec:
              command:
                - sh
                - -c
                - "zookeeper-ready 2181"
            initialDelaySeconds: 10
            timeoutSeconds: 5
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/zookeeper
  volumeClaimTemplates:
    - metadata:
        name: datadir
        annotations:
          volume.beta.kubernetes.io/storage-class: "anything"
      spec:
        accessModes: [ "ReadWriteOnce" ]
        resources:
          requests:
            storage: 1Gi
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-hs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2888
#    name: server
#  - port: 3888
#    name: leader-election
#  clusterIP: None
#  selector:
#    app: zk
#---
#apiVersion: v1
#kind: Service
#metadata:
#  name: zk-cs
#  labels:
#    app: zk
#spec:
#  ports:
#  - port: 2181
#    name: client
#  selector:
#    app: zk
#---
#apiVersion: policy/v1beta1
#kind: PodDisruptionBudget
#metadata:
#  name: zk-pdb
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  maxUnavailable: 1
#---
#apiVersion: apps/v1
#kind: StatefulSet
#metadata:
#  name: zk
#spec:
#  selector:
#    matchLabels:
#      app: zk
#  serviceName: zk-hs
#  replicas: 3
#  updateStrategy:
#    type: RollingUpdate
#  podManagementPolicy: OrderedReady
#  template:
#    metadata:
#      labels:
#        app: zk
#    spec:
#      affinity:
#        podAntiAffinity:
#          requiredDuringSchedulingIgnoredDuringExecution:
#            - labelSelector:
#                matchExpressions:
#                  - key: "app"
#                    operator: In
#                    values:
#                    - zk
#              topologyKey: "kubernetes.io/hostname"
#      containers:
#      - name: kubernetes-zookeeper
#        #imagePullPolicy: Always
#        imagePullPolicy: ifNotPresent
#        image: "registry.k8s.com/test/zookeeper:1.0-3.4.10"
#        resources:
#          requests:
#            memory: "1Gi"
#            cpu: "0.5"
#        ports:
#        - containerPort: 2181
#          name: client
#        - containerPort: 2888
#          name: server
#        - containerPort: 3888
#          name: leader-election
#        command:
#        - sh
#        - -c
#        - "start-zookeeper \
#          --servers=3 \
#          --data_dir=/var/lib/zookeeper/data \
#          --data_log_dir=/var/lib/zookeeper/data/log \
#          --conf_dir=/opt/zookeeper/conf \
#          --client_port=2181 \
#          --election_port=3888 \
#          --server_port=2888 \
#          --tick_time=2000 \
#          --init_limit=10 \
#          --sync_limit=5 \
#          --heap=512M \
#          --max_client_cnxns=60 \
#          --snap_retain_count=3 \
#          --purge_interval=12 \
#          --max_session_timeout=40000 \
#          --min_session_timeout=4000 \
#          --log_level=INFO"
#        readinessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        livenessProbe:
#          exec:
#            command:
#            - sh
#            - -c
#            - "zookeeper-ready 2181"
#          initialDelaySeconds: 10
#          timeoutSeconds: 5
#        volumeMounts:
#        - name: datadir
#          mountPath: /var/lib/zookeeper
#      securityContext:
#        # runAsUser: 1000
#        fsGroup: 1000
#  volumeClaimTemplates:
#  - metadata:
#      name: datadir
#    spec:
#      accessModes: [ "ReadWriteOnce" ]
#      resources:
#        requests:
#          storage: 5Gi
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f zookeeper-pv.ymal
kubectl apply -f zookeeper.ymal
kubectl get pods
kubectl get service

---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-1
  namespace: zk-kafka
  labels:
    app: kafka-service-1
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-1
      targetPort: 9092
      nodePort: 30901
      protocol: TCP
  selector:
    app: kafka-1
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-2
  namespace: zk-kafka
  labels:
    app: kafka-service-2
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-2
      targetPort: 9092
      nodePort: 30902
      protocol: TCP
  selector:
    app: kafka-2
---
apiVersion: v1
kind: Service
metadata:
  name: kafka-service-3
  namespace: zk-kafka
  labels:
    app: kafka-service-3
spec:
  type: NodePort
  ports:
    - port: 9092
      name: kafka-service-3
      targetPort: 9092
      nodePort: 30903
      protocol: TCP
  selector:
    app: kafka-3
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-1
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-1
  template:
    metadata:
      labels:
        app: kafka-1
    spec:
      containers:
        - name: kafka-1
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_BROKER_ID
              value: "1"
            - name: KAFKA_CREATE_TOPICS
              value: mytopic:2:1
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30901"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-2
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-2
  template:
    metadata:
      labels:
        app: kafka-2
    spec:
      containers:
        - name: kafka-2
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_BROKER_ID
              value: "2"
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30902"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv2"
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-deployment-3
  namespace: zk-kafka
spec:
  replicas: 1
  selector:
    matchLabels:
      app: kafka-3
  template:
    metadata:
      labels:
        app: kafka-3
    spec:
      containers:
        - name: kafka-3
          image: wurstmeister/kafka:2.12-2.4.1
          imagePullPolicy: IfNotPresent
          ports:
            - containerPort: 9092
          env:
            - name: KAFKA_ZOOKEEPER_CONNECT
              value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
            - name: KAFKA_BROKER_ID
              value: "3"
            - name: KAFKA_LISTENERS
              value: PLAINTEXT://0.0.0.0:9092
            - name: KAFKA_ADVERTISED_PORT
              value: "30903"
            - name: KAFKA_ADVERTISED_HOST_NAME
              valueFrom:
                fieldRef:
                  fieldPath: status.hostIP
          volumeMounts:
            - name: datadir
              mountPath: /var/lib/kafka
      volumes:
        - name: datadir
          nfs:
            server: 10.0.21.1
            path: "/data/k8s/kafka/pv3"
EOF

---------------------------------------------------------------------------------------------------
mkdir /data/k8s/kafka/{pv1,pv2,pv3} -p
kubectl apply -f kafka.yaml
kubectl get pods
kubectl get service

---------------------------------------------------------------------------------------------------



---------------------------------------------------------------------------------------------------

mysql

#命名空间创建
cat <<EOF>> mysql.namespace.yaml 
apiVersion: v1
kind: Namespace
metadata:
   name: mysql
   labels:
     name: mysql
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pv.ymal
#apiVersion: v1
#kind: PersistentVolume
#metadata:
#  name: model-db-pv
#spec:
#  storageClassName: ml-pv1
#  accessModes:
#  - ReadWriteOnce
#  capacity:
#    storage: 5Gi
#  hostPath:
#    path: /home/work/share/model-db
#  persistentVolumeReclaimPolicy: Retain
#  volumeMode: Filesystem

apiVersion: v1
kind: PersistentVolume
metadata:
  name: model-db-pv
  namespace: mysql
spec:
  storageClassName: ml-pv1
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 5Gi
  persistentVolumeReclaimPolicy: Retain
  #storageClassName: nfs
  nfs:
    path: /data/k8s/mysql
    server: 10.0.21.1
  volumeMode: Filesystem
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pvc.ymal
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: model-db-pv-claim
  namespace: mysql
spec:
  storageClassName: ml-pv1
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 5Gi
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-configMap.ymal
apiVersion: v1
kind: ConfigMap
metadata:
  name: model-db-config
  namespace: mysql
  labels:
    app: model-db
data:
  my.cnf: |-
    [client]
    default-character-set=utf8mb4
    [mysql]
    default-character-set=utf8mb4
    [mysqld]
    max_connections = 2000
    secure_file_priv=/var/lib/mysql
    sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-deployment.ymal
apiVersion: apps/v1
kind: Deployment
metadata:
  name: model-db
  namespace: mysql
spec:
  replicas: 1
  selector:
    matchLabels:
      app: model-mysql
  template:
    metadata:
      labels:
        app: model-mysql
        namespace: mysql
    spec:
      containers:
      - args:
        - --datadir
        - /var/lib/mysql/datadir
        env:
          - name: MYSQL_ROOT_PASSWORD
            value: root
          - name: MYSQL_USER
            value: user
          - name: MYSQL_PASSWORD
            value: user
        image: mysql:8.0.27
        name: model-db-container
        ports:
        - containerPort: 3306
          name: dbapi
        volumeMounts:
        - mountPath: /var/lib/mysql
          name: model-db-storage
        - name: config
          mountPath: /etc/mysql/conf.d/my.cnf
          subPath: my.cnf
      volumes:
      - name: model-db-storage
        persistentVolumeClaim:
          claimName: model-db-pv-claim
      - name: config      
        configMap:
          name: model-db-config
      - name: localtime
        hostPath:
          type: File
          path: /etc/localtime
EOF

---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-svc.ymal
#ClusterIP:只对集群内部可见
#apiVersion: v1
#kind: Service
#metadata:
#  labels:
#    app: model-mysql
#  name: model-db-svc
#  namespace: mysql
#spec:
#  type: ClusterIP
#  ports:
#  - port: 3306
#    protocol: TCP
#    targetPort: 3306
#  selector:
#    app: model-mysql

apiVersion: v1
kind: Service
metadata:
  labels:
    app: model-mysql
  name: model-db-svc
  namespace: mysql
spec:
  type: NodePort
  ports:
  - name: http
    port: 3306
    nodePort: 30336
    protocol: TCP
    targetPort: 3306
  selector:
    app: model-mysql
EOF

---------------------------------------------------------------------------------------------------
kubectl apply -f mysql.namespace.yaml
kubectl apply -f mysql-pv.ymal -n mysql
kubectl get pv -n mysql
kubectl apply -f mysql-pvc.ymal -n mysql
kubectl get pvc -n mysql
kubectl apply -f mysql-configMap.ymal -n mysql
kubectl apply -f mysql-deployment.ymal -n mysql
kubectl apply -f mysql-svc.ymal -n mysql
kubectl describe pvc model-db-pv-claim
---------------------------------------------------------------------------------------------------
kubectl get pods -n mysql
kubectl exec -it model-db-569b698fb8-qc62f bash -n mysql

#先在mysql中创建test_db库,再修改环境变量,重启test_db后报错“Access denied for user ‘root’@‘172.17.0.1’ (using password: NO)”。但mysql用Navicat能够连接上。
#对于熟悉mysql的人,这个错误应该很容易定位。从MySQL8.0 开始,默认的验证方式是 caching_sha2_password(参见 MySQL 8.0.4 : New Default Authentication Plugin : caching_sha2_password)。
#方案1:修改配置文件spring.datasource.password:***
#方案2:将验证方式修改为“mysql_native_password”
mysql -uroot -proot
USE mysql; 
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'root';
FLUSH PRIVILEGES;

FROM kamisamak.com

你可能感兴趣的:(docker,容器,k8s,kubernetes,elasticsearch)