k8s&Docker安装
sudo setenforce 0
sudo sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config
systemctl stop firewalld
systemctl disable firewalld
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab
cat <<EOF >/etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
wget -O /etc/yum.repos.d/aliyun-docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
sudo yum makecache fast
sudo yum remove docker docker-common docker-selinux docker-engine
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum install -y docker-ce
sudo systemctl enable docker
sudo systemctl start docker
sudo systemctl status docker
docker --version
cat <<EOF>> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
docker stop $(docker ps -q)
docker rm $(docker ps -aq)
docker rmi `docker images -q`
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
sudo yum install ipset -y
sudo yum install ipvsadm -y
---------------------------------------------------------------------------------------------------
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF
sudo yum list kubelet --showduplicates |sort -r
sudo yum install kubelet-1.23.1-0 kubeadm-1.23.1-0 kubectl-1.23.1-0 -y
sudo systemctl enable --now kubelet
---------------------------------------------------------------------------------------------------
kubeadm config images list
for i in k8s.gcr.io/kube-apiserver:v1.23.1 k8s.gcr.io/kube-controller-manager:v1.23.1 k8s.gcr.io/kube-scheduler:v1.23.1 k8s.gcr.io/kube-proxy:v1.23.1 k8s.gcr.io/pause:3.6 k8s.gcr.io/etcd:3.5.1-0 k8s.gcr.io/coredns:v1.8.6; do
temp=${i#k8s.gcr.io/}
docker pull registry.aliyuncs.com/google_containers/${temp}
docker tag registry.aliyuncs.com/google_containers/${temp} k8s.gcr.io/${temp}
docker rmi registry.aliyuncs.com/google_containers/${temp};
done;
vim /etc/sysconfig/docker
OPTIONS='--selinux-enabled --log-driver=journald --registry-mirror=http://xxxx.mirror.aliyuncs.com'
images=(kube-apiserver:v1.23.1 kube-controller-manager:v1.23.1 kube-scheduler:v1.23.1 kube-proxy:v1.23.1 pause:3.6 etcd:3.5.1-0 coredns/coredns:v1.8.6)
for imageName in ${images[@]} ; do
docker pull keveon/$imageName
docker tag keveon/$imageName k8s.gcr.io/$imageName
docker rmi keveon/$imageName
done
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-apiserver:v1.23.1 k8s.gcr.io/kube-apiserver:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-controller-manager:v1.23.1 k8s.gcr.io/kube-controller-manager:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-scheduler:v1.23.1 k8s.gcr.io/kube-scheduler:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/kube-proxy:v1.23.1 k8s.gcr.io/kube-proxy:v1.23.1
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6 k8s.gcr.io/pause:3.6
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/etcd:3.5.1-0 k8s.gcr.io/etcd:3.5.1-0
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/coredns:v1.8.6 k8s.gcr.io/coredns/coredns:v1.8.6
docker rmi registry.cn-hangzhou.aliyuncs.com/google_containers/pause:3.6
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/elasticsearch:7.16.2 google_containers/elasticsearch:7.16.2:newTag
cat <<EOF>> /etc/docker/daemon.json
{
"exec-opts": ["native.cgroupdriver=systemd"]
}
EOF
cat <<EOF>> /etc/sysconfig/kubelet
KUBELET_CGROUP_ARGS="--cgroup-driver=systemd"
KUBE_PROXY_MODE="ipvs"
EOF
sudo systemctl restart docker
kubeadm init \
--kubernetes-version=1.23.1 \
--apiserver-advertise-address=10.0.20.1 \
--service-cidr=10.1.0.0/16 \
--pod-network-cidr=10.244.0.0/16
---------------------------------------------------------------------------------------------------
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
wget https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
vim calico.yaml
- name: CALICO_IPV4POOL_IPIP
value: "off"
- name: IP_AUTODETECTION_METHOD
value: "interface=ens.*"
replicas: 1
revisionHistoryLimit: 2
- name: CALICO_IPV4POOL_CIDR
value: "10.244.0.0/16"
kubectl apply -f calico.yaml
---------------------------------------------------------------------------------------------------
kubeadm join 10.0.20.1:6443 --token kjgine.g0fafdff1ro505wj \
--discovery-token-ca-cert-hash sha256:da5a7952ef25b8a4eb77d46aa4765009fd5d9a4f1ced493d5698af361ba5d07d
kubeadm token create --print-join-command
kubectl delete node demo01
kubeadm reset -f
echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
source /etc/profile
kubectl get nodes
kubectl get pods --all-namespaces
kubectl get ns
kubectl get cs
kubectl get pods -nkube-system
kubectl label node 节点名称 node-role.kubernetes.io/worker=worker
kubectl label node --all node-role.kubernetes.io/worker=worker
kubectl taint nodes 节点名称 node-role.kubernetes.io/master-
Kuboard面板部署
kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
watch kubectl get pods -n kuboard
在浏览器中打开链接 http://your-node-ip-address:30080
输入初始用户名和密码,并登录
用户名: admin
密码: Kuboard123
kubectl delete -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
rm -rf /usr/share/kuboard
KubeSphere面板部署
kubectl apply -f https://addons.kuboard.cn/kuboard/kuboard-v3.yaml
docker run -d \
--restart=unless-stopped \
--name=kuboard-spray \
-p 80:80/tcp \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ~/kuboard-spray-data:/data \
eipwork/kuboard-spray:latest-amd64
Kubernetes Dashboard
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta5/aio/deploy/recommended.yaml
kubectl apply -f https://kuboard.cn/install-script/k8s-dashboard/v2.0.0-beta5.yaml
ES StatefulSet 本地部署
mkdir -p /data/es
cat <<EOF>> elastic.namespace.yaml
---
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch
---
EOF
kubectl apply -f elastic.namespace.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> sc.yaml
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local-storage
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-storage-pv-1
namespace: elasticsearch
labels:
name: local-storage-pv-1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/es
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- demo01
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-storage-pv-2
namespace: elasticsearch
labels:
name: local-storage-pv-2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/es
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- demo02
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: local-storage-pv-3
namespace: elasticsearch
labels:
name: local-storage-pv-3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: local-storage
local:
path: /data/es
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- demo03
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es7-cluster
namespace: elasticsearch
spec:
serviceName: elasticsearch7
replicas: 3
selector:
matchLabels:
app: elasticsearch7
template:
metadata:
labels:
app: elasticsearch7
spec:
containers:
- name: elasticsearch7
image: elasticsearch:7.16.2
resources:
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200
name: rest
protocol: TCP
- containerPort: 9300
name: inter-node
protocol: TCP
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: k8s-logs
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: discovery.zen.minimum_master_nodes
value: "2"
- name: discovery.seed_hosts
value: "es7-cluster-0.elasticsearch7,es7-cluster-1.elasticsearch7,es7-cluster-2.elasticsearch7"
- name: cluster.initial_master_nodes
value: "es7-cluster-0,es7-cluster-1,es7-cluster-2"
- name: ES_JAVA_OPTS
value: "-Xms1g -Xmx1g"
initContainers:
- name: fix-permissions
image: busybox:1.35.0
command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
securityContext:
privileged: true
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
- name: increase-vm-max-map
image: busybox:1.35.0
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox:1.35.0
command: ["sh", "-c", "ulimit -n 65536"]
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: "local-storage"
resources:
requests:
storage: 10Gi
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> svc.yaml
apiVersion: v1
kind: Service
metadata:
name: elasticsearch7
namespace: elasticsearch
spec:
selector:
app: elasticsearch7
type: NodePort
ports:
- port: 9200
nodePort: 30002
targetPort: 9200
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f sc.yaml
kubectl apply -f pv.yaml
kubectl apply -f sts.yaml
kubectl apply -f svc.yaml
---------------------------------------------------------------------------------------------------
kubectl get sc
[root@demo01 ~]
NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
local-storage kubernetes.io/no-provisioner Delete WaitForFirstConsumer false 4m45s
[root@demo01 ~]
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
local-storage-pv-1 1Gi RWO Retain Available local-storage 3m28s
local-storage-pv-2 1Gi RWO Retain Available local-storage 3m28s
local-storage-pv-3 1Gi RWO Retain Available local-storage 3m28s
local-storage-pv-4 1Gi RWO Retain Available local-storage 3m28s
local-storage-pv-5 1Gi RWO Retain Available local-storage 3m28s
[root@demo01 elk]
[root@demo01 ~]
NAME READY AGE
es7-cluster 3/3 57m
[root@master1 tmp]
NAME READY STATUS RESTARTS AGE
es7-cluster-0 1/1 Running 0 18m
es7-cluster-1 1/1 Running 0 18m
es7-cluster-2 1/1 Running 0 54m
NFS部署
yum -y install nfs-utils rpcbind
mkdir -p /data/k8s
chmod 755 /data/k8s
vim /etc/exports
/data/k8s 10.0.0.0/8(rw,sync,no_root_squash)
systemctl start rpcbind.service
systemctl start nfs.service
journalctl -xlu nfs
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs
ElasticSearch NFS部署
cat <<EOF>> elastic.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: elasticsearch-ns
EOF
kubectl apply -f elastic.namespace.yaml
kubectl get ns
---------------------------------------------------------------------------------------------------
cat <<EOF>> elasticsearch-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: elasticsearch-ns
labels:
app: elasticsearch
spec:
selector:
app: elasticsearch
type: NodePort
ports:
- port: 9200
name: rest
targetPort: 9200
nodePort: 31200
- port: 9300
targetPort: 9300
nodePort: 31300
name: inter-node
EOF
kubectl apply -f elasticsearch-svc.yaml
kubectl get svc -n elasticsearch-ns
kubectl edit svc elasticsearch -n elasticsearch-ns
---------------------------------------------------------------------------------------------------
cat <<EOF>> elasticsearch-statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: es
namespace: elasticsearch-ns
spec:
serviceName: elasticsearch
replicas: 3
selector:
matchLabels:
app: elasticsearch
template:
metadata:
labels:
app: elasticsearch
spec:
nodeSelector:
es: log
initContainers:
- name: increase-vm-max-map
image: busybox:1.35.0
command: ["sysctl", "-w", "vm.max_map_count=262144"]
securityContext:
privileged: true
- name: increase-fd-ulimit
image: busybox:1.35.0
command: ["sh", "-c", "ulimit -n 65536"]
securityContext:
privileged: true
containers:
- name: elasticsearch
image: docker.elastic.co/elasticsearch/elasticsearch:7.16.2
ports:
- name: rest
containerPort: 9200
- name: inter
containerPort: 9300
resources:
limits:
cpu: 1000m
requests:
cpu: 1000m
volumeMounts:
- name: data
mountPath: /usr/share/elasticsearch/data
env:
- name: cluster.name
value: k8s-logs
- name: node.name
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: cluster.initial_master_nodes
value: "es-0,es-1,es-2"
- name: discovery.zen.minimum_master_nodes
value: "2"
- name: discovery.seed_hosts
value: "elasticsearch"
- name: ES_JAVA_OPTS
value: "-Xms512m -Xmx512m"
- name: network.host
value: "0.0.0.0"
volumeClaimTemplates:
- metadata:
name: data
labels:
app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: es-data-db
resources:
requests:
storage: 50Gi
EOF
kubectl label nodes node名 es=log
kubectl get nodes --show-labels
---------------------------------------------------------------------------------------------------
cat <<EOF>> nfs-client.yaml
kind: Deployment
apiVersion: apps/v1
metadata:
name: nfs-client-provisioner
spec:
replicas: 1
selector:
matchLabels:
app: nfs-client-provisioner
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-client-provisioner
spec:
serviceAccountName: nfs-client-provisioner
containers:
- name: nfs-client-provisioner
image: registry.cn-shenzhen.aliyuncs.com/shuhui/nfs-subdir-external-provisioner:v4.0.2
volumeMounts:
- name: nfs-client-root
mountPath: /persistentvolumes
env:
- name: PROVISIONER_NAME
value: fuseim.pri/ifs
- name: NFS_SERVER
value: 10.0.21.1
- name: NFS_PATH
value: /data/k8s
volumes:
- name: nfs-client-root
nfs:
server: 10.0.21.1
path: /data/k8s
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> nfs-client-sa.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: nfs-client-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: nfs-client-provisioner-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["create", "delete", "get", "list", "watch", "patch", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: run-nfs-client-provisioner
subjects:
- kind: ServiceAccount
name: nfs-client-provisioner
namespace: default
roleRef:
kind: ClusterRole
name: nfs-client-provisioner-runner
apiGroup: rbac.authorization.k8s.io
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> elasticsearch-storageclass.yaml
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: es-data-db
provisioner: fuseim.pri/ifs
EOF
---------------------------------------------------------------------------------------------------
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml
kubectl get po -n elasticsearch-ns
kubectl describe pvc data-es-0 -n elasticsearch-ns
kubectl logs nfs-client-provisioner-5c66746f46-hrlqm
---------------------------------------------------------------------------------------------------
kubectl create -f elasticsearch-statefulset.yaml
kubectl get sts -n elasticsearch-ns
kubectl get po -n elasticsearch-ns
kubectl get pv
kubectl get pods
kubectl describe pods
kubectl describe pod nfs-client-provisioner-5c66746f46-w277s
kubectl port-forward es-0 9200:9200 --namespace=elasticsearch-ns
curl http://localhost:9200/
curl http://localhost:9200/_cluster/state?pretty
---------------------------------------------------------------------------------------------------
kubectl apply -f elastic.namespace.yaml
kubectl apply -f elasticsearch-svc.yaml
kubectl create -f nfs-client.yaml
kubectl create -f nfs-client-sa.yaml
kubectl create -f elasticsearch-storageclass.yaml
kubectl create -f elasticsearch-statefulset.yaml
kubectl delete -f elasticsearch-statefulset.yaml
kubectl delete -f elasticsearch-storageclass.yaml
kubectl delete -f nfs-client-sa.yaml
kubectl delete -f nfs-client.yaml
kubectl delete -f elasticsearch-svc.yaml
kubectl delete -f elastic.namespace.yaml
kibana部署
cat <<EOF>> kibana.yaml
apiVersion: v1
kind: Service
metadata:
name: kibana
namespace: elasticsearch
labels:
app: kibana
spec:
ports:
- port: 5601
targetPort: 5601
nodePort: 30001
type: NodePort
selector:
app: kibana
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kibana
namespace: elasticsearch
labels:
app: kibana
spec:
selector:
matchLabels:
app: kibana
template:
metadata:
labels:
app: kibana
spec:
nodeSelector:
node: node2
containers:
- name: kibana
image: kibana:7.16.2
resources:
limits:
cpu: 1000m
requests:
cpu: 1000m
env:
- name: ELASTICSEARCH_HOSTS
value: http://elasticsearch7:9200
- name: SERVER_PUBLICBASEURL
value: "0.0.0.0:5601"
- name: I18N.LOCALE
value: zh-CN
ports:
- containerPort: 5601
EOF
kubectl apply -f kibana.yaml
zookeeper*kafka leolee32部署
cat <<EOF>> zk-kafka.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: zk-kafka
labels:
name: zk-kafka
EOF
kubectl apply -f zk-kafka.namespace.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> zk_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: zk-data1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/zk/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: zk-data2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/zk/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: zk-data3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/zk/data3
EOF
mkdir /data/k8s/zk/{data1,data2,data3}
kubectl apply -f zk_pv.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> zk.ymal
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: zk-cs
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2181
targetPort: 2181
name: client
nodePort: 32181
selector:
app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
namespace: zk-kafka
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: zk-kafka
name: zok
spec:
serviceName: zk-hs
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log"
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: zk-hs
labels:
app: zk
spec:
ports:
- port: 2888
name: server
- port: 3888
name: leader-election
clusterIP: None
selector:
app: zk
---
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: zk-cs
labels:
app: zk
spec:
type: NodePort
ports:
- port: 2181
targetPort: 2181
name: client
nodePort: 32181
selector:
app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
namespace: zk-kafka
name: zk-pdb
spec:
selector:
matchLabels:
app: zk
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: zk-kafka
name: zok
spec:
serviceName: zk-hs
replicas: 3
selector:
matchLabels:
app: zk
template:
metadata:
labels:
app: zk
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- zk
topologyKey: "kubernetes.io/hostname"
containers:
- name: kubernetes-zookeeper
imagePullPolicy: Always
image: leolee32/kubernetes-library:kubernetes-zookeeper1.0-3.4.10
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=512M \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
EOF
kubectl apply -f zk.ymal
---------------------------------------------------------------------------------------------------
mkdir -p /data/k8s/kafka/{data1,data2,data3}
cat <<EOF>> kafka_pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: kafka-data1
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/kafka/data1
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: kafka-data2
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/kafka/data2
---
apiVersion: v1
kind: PersistentVolume
metadata:
namespace: zk-kafka
name: kafka-data3
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: /data/k8s/kafka/data3
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: kafka-hs
labels:
app: kafka
spec:
ports:
- port: 1099
name: jmx
clusterIP: None
selector:
app: kafka
---
apiVersion: v1
kind: Service
metadata:
namespace: zk-kafka
name: kafka-cs
labels:
app: kafka
spec:
type: NodePort
ports:
- port: 9092
targetPort: 9092
name: client
nodePort: 9092
selector:
app: kafka
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
namespace: zk-kafka
name: kafka-pdb
spec:
selector:
matchLabels:
app: kafka
maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
namespace: zk-kafka
name: kafoka
spec:
serviceName: kafka-hs
replicas: 3
selector:
matchLabels:
app: kafka
template:
metadata:
labels:
app: kafka
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- kafka
topologyKey: "kubernetes.io/hostname"
containers:
- name: k8skafka
imagePullPolicy: Always
image: leey18/k8skafka
resources:
requests:
memory: "1Gi"
cpu: "0.5"
ports:
- containerPort: 9092
name: client
- containerPort: 1099
name: jmx
command:
- sh
- -c
- "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
--override listeners=PLAINTEXT://:9092 \
--override zookeeper.connect=zok-0.zk-hs.zk-kafka.svc.cluster.local:2181,zok-1.zk-hs.zk-kafka.svc.cluster.local:2181,zok-2.zk-hs.zk-kafka.svc.cluster.local:2181 \
--override log.dirs=/var/lib/kafka \
--override auto.create.topics.enable=true \
--override auto.leader.rebalance.enable=true \
--override background.threads=10 \
--override compression.type=producer \
--override delete.topic.enable=false \
--override leader.imbalance.check.interval.seconds=300 \
--override leader.imbalance.per.broker.percentage=10 \
--override log.flush.interval.messages=9223372036854775807 \
--override log.flush.offset.checkpoint.interval.ms=60000 \
--override log.flush.scheduler.interval.ms=9223372036854775807 \
--override log.retention.bytes=-1 \
--override log.retention.hours=168 \
--override log.roll.hours=168 \
--override log.roll.jitter.hours=0 \
--override log.segment.bytes=1073741824 \
--override log.segment.delete.delay.ms=60000 \
--override message.max.bytes=1000012 \
--override min.insync.replicas=1 \
--override num.io.threads=8 \
--override num.network.threads=3 \
--override num.recovery.threads.per.data.dir=1 \
--override num.replica.fetchers=1 \
--override offset.metadata.max.bytes=4096 \
--override offsets.commit.required.acks=-1 \
--override offsets.commit.timeout.ms=5000 \
--override offsets.load.buffer.size=5242880 \
--override offsets.retention.check.interval.ms=600000 \
--override offsets.retention.minutes=1440 \
--override offsets.topic.compression.codec=0 \
--override offsets.topic.num.partitions=50 \
--override offsets.topic.replication.factor=3 \
--override offsets.topic.segment.bytes=104857600 \
--override queued.max.requests=500 \
--override quota.consumer.default=9223372036854775807 \
--override quota.producer.default=9223372036854775807 \
--override replica.fetch.min.bytes=1 \
--override replica.fetch.wait.max.ms=500 \
--override replica.high.watermark.checkpoint.interval.ms=5000 \
--override replica.lag.time.max.ms=10000 \
--override replica.socket.receive.buffer.bytes=65536 \
--override replica.socket.timeout.ms=30000 \
--override request.timeout.ms=30000 \
--override socket.receive.buffer.bytes=102400 \
--override socket.request.max.bytes=104857600 \
--override socket.send.buffer.bytes=102400 \
--override unclean.leader.election.enable=true \
--override zookeeper.session.timeout.ms=6000 \
--override zookeeper.set.acl=false \
--override broker.id.generation.enable=true \
--override connections.max.idle.ms=600000 \
--override controlled.shutdown.enable=true \
--override controlled.shutdown.max.retries=3 \
--override controlled.shutdown.retry.backoff.ms=5000 \
--override controller.socket.timeout.ms=30000 \
--override default.replication.factor=1 \
--override fetch.purgatory.purge.interval.requests=1000 \
--override group.max.session.timeout.ms=300000 \
--override group.min.session.timeout.ms=6000 \
--override inter.broker.protocol.version=0.10.2-IV0 \
--override log.cleaner.backoff.ms=15000 \
--override log.cleaner.dedupe.buffer.size=134217728 \
--override log.cleaner.delete.retention.ms=86400000 \
--override log.cleaner.enable=true \
--override log.cleaner.io.buffer.load.factor=0.9 \
--override log.cleaner.io.buffer.size=524288 \
--override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
--override log.cleaner.min.cleanable.ratio=0.5 \
--override log.cleaner.min.compaction.lag.ms=0 \
--override log.cleaner.threads=1 \
--override log.cleanup.policy=delete \
--override log.index.interval.bytes=4096 \
--override log.index.size.max.bytes=10485760 \
--override log.message.timestamp.difference.max.ms=9223372036854775807 \
--override log.message.timestamp.type=CreateTime \
--override log.preallocate=false \
--override log.retention.check.interval.ms=300000 \
--override max.connections.per.ip=2147483647 \
--override num.partitions=1 \
--override producer.purgatory.purge.interval.requests=1000 \
--override replica.fetch.backoff.ms=1000 \
--override replica.fetch.max.bytes=1048576 \
--override replica.fetch.response.max.bytes=10485760 \
--override reserved.broker.max.id=1000 "
env:
- name: KAFKA_HEAP_OPTS
value : "-Xmx512M -Xms512M"
- name: KAFKA_OPTS
value: "-Dlogging.level=INFO"
volumeMounts:
- name: kafkadatadir
mountPath: /var/lib/kafka
readinessProbe:
exec:
command:
- sh
- -c
- "/opt/kafka/bin/kafka-broker-api-versions.sh --bootstrap-server=localhost:9092"
volumeClaimTemplates:
- metadata:
name: kafkadatadir
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
EOF
---------------------------------------------------------------------------------------------------
zk&kafka部署
cat <<EOF>> zookeeper.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: zk-kafka
labels:
name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: zoo1
namespace: zk-kafka
labels:
app: zookeeper-1
spec:
ports:
- name: client
port: 2181
protocol: TCP
- name: follower
port: 2888
protocol: TCP
- name: leader
port: 3888
protocol: TCP
selector:
app: zookeeper-1
---
apiVersion: v1
kind: Service
metadata:
name: zoo2
namespace: zk-kafka
labels:
app: zookeeper-2
spec:
ports:
- name: client
port: 2181
protocol: TCP
- name: follower
port: 2888
protocol: TCP
- name: leader
port: 3888
protocol: TCP
selector:
app: zookeeper-2
---
apiVersion: v1
kind: Service
metadata:
name: zoo3
namespace: zk-kafka
labels:
app: zookeeper-3
spec:
ports:
- name: client
port: 2181
protocol: TCP
- name: follower
port: 2888
protocol: TCP
- name: leader
port: 3888
protocol: TCP
selector:
app: zookeeper-3
EOF
kubectl apply -f zookeeper-svc.yaml
---------------------------------------------------------------------------------------------------
cat > zookeeper-sts.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: zookeeper-deployment-1
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper-1
name: zookeeper-1
template:
metadata:
labels:
app: zookeeper-1
name: zookeeper-1
spec:
containers:
- name: zoo1
image: zookeeper:3.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 2181
env:
- name: ZOO_MY_ID
value: "1"
- name: ZOO_SERVERS
value: "server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zookeeper-deployment-2
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper-2
name: zookeeper-2
template:
metadata:
labels:
app: zookeeper-2
name: zookeeper-2
spec:
containers:
- name: zoo2
image: zookeeper:3.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 2181
env:
- name: ZOO_MY_ID
value: "2"
- name: ZOO_SERVERS
value: "server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: zookeeper-deployment-3
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: zookeeper-3
name: zookeeper-3
template:
metadata:
labels:
app: zookeeper-3
name: zookeeper-3
spec:
containers:
- name: zoo3
image: zookeeper:3.7.0
imagePullPolicy: IfNotPresent
ports:
- containerPort: 2181
env:
- name: ZOO_MY_ID
value: "3"
- name: ZOO_SERVERS
value: "server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181"
EOF
kubectl apply -f zookeeper-sts.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-service-1
namespace: zk-kafka
labels:
app: kafka-service-1
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-1
targetPort: 9092
nodePort: 30901
protocol: TCP
selector:
app: kafka-service-1
---
apiVersion: v1
kind: Service
metadata:
name: kafka-service-2
namespace: zk-kafka
labels:
app: kafka-service-2
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-2
targetPort: 9092
nodePort: 30902
protocol: TCP
selector:
app: kafka-service-2
---
apiVersion: v1
kind: Service
metadata:
name: kafka-service-3
namespace: zk-kafka
labels:
app: kafka-service-3
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-3
targetPort: 9092
nodePort: 30903
protocol: TCP
selector:
app: kafka-service-3
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-1
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
name: kafka-service-1
template:
metadata:
labels:
name: kafka-service-1
app: kafka-service-1
spec:
containers:
- name: kafka-1
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ADVERTISED_PORT
value: "9092"
- name: KAFKA_ADVERTISED_HOST_NAME
value: <kafka-svc1-CLUSTER-IP>
- name: KAFKA_ZOOKEEPER_CONNECT
value: zoo1:2181,zoo2:2181,zoo3:2181
- name: KAFKA_BROKER_ID
value: "1"
- name: KAFKA_CREATE_TOPICS
value: mytopic:2:1
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://10.0.22.1:30901
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-2
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
name: kafka-service-2
template:
metadata:
labels:
name: kafka-service-2
app: kafka-service-2
spec:
containers:
- name: kafka-2
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ADVERTISED_PORT
value: "9092"
- name: KAFKA_ADVERTISED_HOST_NAME
value: <kafka-svc2-CLUSTER-IP>
- name: KAFKA_ZOOKEEPER_CONNECT
value: zoo1:2181,zoo2:2181,zoo3:2181
- name: KAFKA_BROKER_ID
value: "2"
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://10.0.20.2:30902
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-3
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
name: kafka-service-3
template:
metadata:
labels:
name: kafka-service-3
app: kafka-service-3
spec:
containers:
- name: kafka-3
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ADVERTISED_PORT
value: "9092"
- name: KAFKA_ADVERTISED_HOST_NAME
value: <kafka-svc3-CLUSTER-IP>
- name: KAFKA_ZOOKEEPER_CONNECT
value: zoo1:2181,zoo2:2181,zoo3:2181
- name: KAFKA_BROKER_ID
value: "3"
- name: KAFKA_ADVERTISED_LISTENERS
value: PLAINTEXT://10.0.20.3:30903
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
EOF
kubectl apply -f kafka-svc.yaml
kubectl apply -f kafka-deployment.yaml
---------------------------------------------------------------------------------------------------
kubectl api-versions
kubectl get pods
kubectl get service
kubectl get pv
kubectl get pvc -n zookeeper
kubectl describe pvc datadir-zk-0 -n zookeeper
kubectl patch pv zk-data3 -p '{"metadata":{"finalizers":null}}'
kubectl exec -it kafka-deployment-1-xxxxxxxxxxx -n zookeeper /bin/bash
cd cd opt/kafka
bin/kafka-topics.sh --list --zookeeper <任意zookeeper-svc-clusterIP>:2181
bin/kafka-topics.sh --create --zookeeper <zookeeper-svc1-clusterIP>:2181,<zookeeper-svc2-clusterIP>:2181,<zookeeper-svc3-clusterIP>:2181 --topic test --partitions 3 --replication-factor 1
bin/kafka-console-producer.sh --broker-list <kafka-svc1-clusterIP>:9092,<kafka-svc2-clusterIP>:9092,<kafka-svc3-clusterIP>:9092 --topic test
bin/kafka-console-consumer.sh --bootstrap-server <任意kafka-svc-clusterIP>:9092 --topic test --from-beginning
zk&kafka nfs部署
cat <<EOF>> zookeeper.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: zk-kafka
labels:
name: zk-kafka
EOF
kubectl apply -f zookeeper.namespace.yaml
---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper-pv.ymal
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk01
namespace: zk-kafka
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: "/data/k8s/zk/data1"
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk02
namespace: zk-kafka
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: "/data/k8s/zk/data2"
persistentVolumeReclaimPolicy: Recycle
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: k8s-pv-zk03
namespace: zk-kafka
labels:
app: zk
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
nfs:
server: 10.0.21.1
path: "/data/k8s/zk/data3"
persistentVolumeReclaimPolicy: Recycle
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> zookeeper.ymal
apiVersion: v1
kind: Service
metadata:
name: zk-hs
namespace: zk-kafka
labels:
app: zk
spec:
selector:
app: zk
clusterIP: None
ports:
- name: server
port: 2888
- name: leader-election
port: 3888
---
apiVersion: v1
kind: Service
metadata:
name: zk-cs
namespace: zk-kafka
labels:
app: zk
spec:
selector:
app: zk
type: NodePort
ports:
- name: client
port: 2181
nodePort: 31811
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: zk
namespace: zk-kafka
spec:
serviceName: "zk-hs"
replicas: 3
selector:
matchLabels:
app: zk
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
template:
metadata:
labels:
app: zk
spec:
containers:
- name: zk
imagePullPolicy: Always
image: guglecontainers/kubernetes-zookeeper:1.0-3.4.10
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: server
- containerPort: 3888
name: leader-election
command:
- sh
- -c
- "start-zookeeper \
--servers=3 \
--data_dir=/var/lib/zookeeper/data \
--data_log_dir=/var/lib/zookeeper/data/log \
--conf_dir=/opt/zookeeper/conf \
--client_port=2181 \
--election_port=3888 \
--server_port=2888 \
--tick_time=2000 \
--init_limit=10 \
--sync_limit=5 \
--heap=4G \
--max_client_cnxns=60 \
--snap_retain_count=3 \
--purge_interval=12 \
--max_session_timeout=40000 \
--min_session_timeout=4000 \
--log_level=INFO"
readinessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
livenessProbe:
exec:
command:
- sh
- -c
- "zookeeper-ready 2181"
initialDelaySeconds: 10
timeoutSeconds: 5
volumeMounts:
- name: datadir
mountPath: /var/lib/zookeeper
volumeClaimTemplates:
- metadata:
name: datadir
annotations:
volume.beta.kubernetes.io/storage-class: "anything"
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f zookeeper-pv.ymal
kubectl apply -f zookeeper.ymal
kubectl get pods
kubectl get service
---------------------------------------------------------------------------------------------------
cat <<EOF>> kafka.yaml
apiVersion: v1
kind: Service
metadata:
name: kafka-service-1
namespace: zk-kafka
labels:
app: kafka-service-1
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-1
targetPort: 9092
nodePort: 30901
protocol: TCP
selector:
app: kafka-1
---
apiVersion: v1
kind: Service
metadata:
name: kafka-service-2
namespace: zk-kafka
labels:
app: kafka-service-2
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-2
targetPort: 9092
nodePort: 30902
protocol: TCP
selector:
app: kafka-2
---
apiVersion: v1
kind: Service
metadata:
name: kafka-service-3
namespace: zk-kafka
labels:
app: kafka-service-3
spec:
type: NodePort
ports:
- port: 9092
name: kafka-service-3
targetPort: 9092
nodePort: 30903
protocol: TCP
selector:
app: kafka-3
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-1
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: kafka-1
template:
metadata:
labels:
app: kafka-1
spec:
containers:
- name: kafka-1
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ZOOKEEPER_CONNECT
value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
- name: KAFKA_BROKER_ID
value: "1"
- name: KAFKA_CREATE_TOPICS
value: mytopic:2:1
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
- name: KAFKA_ADVERTISED_PORT
value: "30901"
- name: KAFKA_ADVERTISED_HOST_NAME
valueFrom:
fieldRef:
fieldPath: status.hostIP
volumeMounts:
- name: datadir
mountPath: /var/lib/kafka
volumes:
- name: datadir
nfs:
server: 10.0.21.1
path: "/data/k8s/kafka/pv1"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-2
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: kafka-2
template:
metadata:
labels:
app: kafka-2
spec:
containers:
- name: kafka-2
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ZOOKEEPER_CONNECT
value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
- name: KAFKA_BROKER_ID
value: "2"
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
- name: KAFKA_ADVERTISED_PORT
value: "30902"
- name: KAFKA_ADVERTISED_HOST_NAME
valueFrom:
fieldRef:
fieldPath: status.hostIP
volumeMounts:
- name: datadir
mountPath: /var/lib/kafka
volumes:
- name: datadir
nfs:
server: 10.0.21.1
path: "/data/k8s/kafka/pv2"
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-deployment-3
namespace: zk-kafka
spec:
replicas: 1
selector:
matchLabels:
app: kafka-3
template:
metadata:
labels:
app: kafka-3
spec:
containers:
- name: kafka-3
image: wurstmeister/kafka:2.12-2.4.1
imagePullPolicy: IfNotPresent
ports:
- containerPort: 9092
env:
- name: KAFKA_ZOOKEEPER_CONNECT
value: zk-0.zk-hs.zk-kafka.svc.cluster.local:2181,zk-1.zk-hs.zk-kafka.svc.cluster.local:2181,zk-2.zk-hs.zk-kafka.svc.cluster.local:2181
- name: KAFKA_BROKER_ID
value: "3"
- name: KAFKA_LISTENERS
value: PLAINTEXT://0.0.0.0:9092
- name: KAFKA_ADVERTISED_PORT
value: "30903"
- name: KAFKA_ADVERTISED_HOST_NAME
valueFrom:
fieldRef:
fieldPath: status.hostIP
volumeMounts:
- name: datadir
mountPath: /var/lib/kafka
volumes:
- name: datadir
nfs:
server: 10.0.21.1
path: "/data/k8s/kafka/pv3"
EOF
---------------------------------------------------------------------------------------------------
mkdir /data/k8s/kafka/{pv1,pv2,pv3} -p
kubectl apply -f kafka.yaml
kubectl get pods
kubectl get service
---------------------------------------------------------------------------------------------------
---------------------------------------------------------------------------------------------------
mysql
cat <<EOF>> mysql.namespace.yaml
apiVersion: v1
kind: Namespace
metadata:
name: mysql
labels:
name: mysql
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pv.ymal
apiVersion: v1
kind: PersistentVolume
metadata:
name: model-db-pv
namespace: mysql
spec:
storageClassName: ml-pv1
accessModes:
- ReadWriteOnce
capacity:
storage: 5Gi
persistentVolumeReclaimPolicy: Retain
nfs:
path: /data/k8s/mysql
server: 10.0.21.1
volumeMode: Filesystem
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-pvc.ymal
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: model-db-pv-claim
namespace: mysql
spec:
storageClassName: ml-pv1
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 5Gi
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-configMap.ymal
apiVersion: v1
kind: ConfigMap
metadata:
name: model-db-config
namespace: mysql
labels:
app: model-db
data:
my.cnf: |-
[client]
default-character-set=utf8mb4
[mysql]
default-character-set=utf8mb4
[mysqld]
max_connections = 2000
secure_file_priv=/var/lib/mysql
sql_mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-deployment.ymal
apiVersion: apps/v1
kind: Deployment
metadata:
name: model-db
namespace: mysql
spec:
replicas: 1
selector:
matchLabels:
app: model-mysql
template:
metadata:
labels:
app: model-mysql
namespace: mysql
spec:
containers:
- args:
- --datadir
- /var/lib/mysql/datadir
env:
- name: MYSQL_ROOT_PASSWORD
value: root
- name: MYSQL_USER
value: user
- name: MYSQL_PASSWORD
value: user
image: mysql:8.0.27
name: model-db-container
ports:
- containerPort: 3306
name: dbapi
volumeMounts:
- mountPath: /var/lib/mysql
name: model-db-storage
- name: config
mountPath: /etc/mysql/conf.d/my.cnf
subPath: my.cnf
volumes:
- name: model-db-storage
persistentVolumeClaim:
claimName: model-db-pv-claim
- name: config
configMap:
name: model-db-config
- name: localtime
hostPath:
type: File
path: /etc/localtime
EOF
---------------------------------------------------------------------------------------------------
cat <<EOF>> mysql-svc.ymal
apiVersion: v1
kind: Service
metadata:
labels:
app: model-mysql
name: model-db-svc
namespace: mysql
spec:
type: NodePort
ports:
- name: http
port: 3306
nodePort: 30336
protocol: TCP
targetPort: 3306
selector:
app: model-mysql
EOF
---------------------------------------------------------------------------------------------------
kubectl apply -f mysql.namespace.yaml
kubectl apply -f mysql-pv.ymal -n mysql
kubectl get pv -n mysql
kubectl apply -f mysql-pvc.ymal -n mysql
kubectl get pvc -n mysql
kubectl apply -f mysql-configMap.ymal -n mysql
kubectl apply -f mysql-deployment.ymal -n mysql
kubectl apply -f mysql-svc.ymal -n mysql
kubectl describe pvc model-db-pv-claim
---------------------------------------------------------------------------------------------------
kubectl get pods -n mysql
kubectl exec -it model-db-569b698fb8-qc62f bash -n mysql
mysql -uroot -proot
USE mysql;
ALTER USER 'root'@'%' IDENTIFIED WITH mysql_native_password BY 'root';
FLUSH PRIVILEGES;
FROM kamisamak.com