kubuadm搭建

一. 部署要求及规划

  • kube-proxy使用IPVS
  • kubectl + kubelet + kubeadm 版本为1.19.3
  • CNI为flannel
  • NFS共享存储
  • kubeadm部署
  • centOS7
主机名 IP 资源分配 描述
k8s-master 192.168.188.220 8c16G 集群
k8s-node01 192.168.188.221 8c16G 集群
k8s-node02 192.168.188.222 8c16G 集群
NFS 192.168.188.225 8c8G 非集群内

二. 部署过程

1. hosts

  • 所有集群节点
cat >>/etc/hosts<

2. yum源

  • 所有集群节点
# base源
curl -o /etc/yum.repos.d/Centos-7.repo http://mirrors.aliyun.com/repo/Centos-7.repo

# docker repo
curl -o /etc/yum.repos.d/docker-ce.repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# kubernetes repo
cat < /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
       http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 缓存
yum clean all && yum makecache

3. 时间同步,常用命令

  • 所有节点
yum -y install tree vim wget bash-completion bash-completion-extras lrzsz net-tools sysstat iotop iftop htop unzip nc nmap telnet bc  psmisc httpd-tools ntpdate

# 时区修改,如果/etc/localtime有软连接,不是Shanghai,可以直接删除,在软链接
ln -s /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
ntpdate ntp2.aliyun.com			# 同步阿里云服务器上的时间.
/sbin/hwclock --systohc			# 写入到bios系统	

4. 关闭swap,防火墙,selinux

  • 所有集群节点
# 打开转发
iptables -P FORWARD ACCEPT

# 临时
swapoff -a

# 永久防止开机自动挂载swap
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

# 关闭防火墙
systemctl disable firewalld && systemctl stop firewalld

# 不关机关闭selinux
setenforce 0
sed -ri 's#(SELINUX=).*#\1disabled#' /etc/selinux/config

5. 修改内核参数

  • 所有集群节点
# 内核文件
cat <  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward=1
vm.max_map_count=262144
EOF

# 生效并验证内核优化
sysctl -p /etc/sysctl.d/k8s.conf

# 加载ipvs模块
modprobe br_netfilter
modprobe -- ip_vs
modprobe -- ip_vs_sh
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- nf_conntrack_ipv4

# 验证ip_vs模块
lsmod |grep ip_vs
ip_vs_wrr              12697  0 
ip_vs_rr               12600  0 
ip_vs_sh               12688  0 
ip_vs                 145458  6 ip_vs_rr,ip_vs_sh,ip_vs_wrr
nf_conntrack          139264  2 ip_vs,nf_conntrack_ipv4
libcrc32c              12644  3 xfs,ip_vs,nf_conntrack

6. 安装docker

  • 所有集群节点
# 查看可用版本
yum list docker-ce --showduplicates | sort -r

# 安装
yum install docker-ce -y

# 配置docker加速,注意insecure-registries改为镜像仓库的地址
mkdir -p /etc/docker
vim /etc/docker/daemon.json
{
  "insecure-registries": [    
    "192.168.188.225:80" 
  ],                          
  "registry-mirrors" : [
    "https://8xpk5wnt.mirror.aliyuncs.com"
  ]
}

# enable
systemctl enable docker;systemctl start docker

7. kubeadm, kubelet 和 kubectl

  • 所有集群节点
yum install -y kubelet-1.19.3 kubeadm-1.19.3 kubectl-1.19.3

# 查看kubeadm版本
kubeadm  version

# 开机自启kubelet,这里kubelet没起来没关系
systemctl start kubelet.service;systemctl enable kubelet

8. 初始化

  • 只在master节点
# 生产初始化文件,有个报错没关系
kubeadm config print init-defaults > kubeadm.yaml

# 修改初始化文件
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.188.220 	# apiserver地址,因为单master,所以配置master的节点的IP
  bindPort: 6443
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: k8s-master
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers   # 修改为阿里镜像源
kind: ClusterConfiguration
kubernetesVersion: v1.19.0		# 可以修改你想要的版本apiserver,controller-manager,scheduler,kube-proxy的版本,这里我没修改.
networking:
  dnsDomain: cluster.local
  podSubnet: 10.244.0.0/16		# 增加这一行,k8s的pod网段修改
  serviceSubnet: 10.96.0.0/12
scheduler: {}
# 查看需要pull的镜像列表,这有个警告不用管.
kubeadm config images list --config kubeadm.yaml
W0701 07:29:54.188861   14093 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
registry.aliyuncs.com/google_containers/kube-apiserver:v1.19.0
registry.aliyuncs.com/google_containers/kube-controller-manager:v1.19.0
registry.aliyuncs.com/google_containers/kube-scheduler:v1.19.0
registry.aliyuncs.com/google_containers/kube-proxy:v1.19.0
registry.aliyuncs.com/google_containers/pause:3.2
registry.aliyuncs.com/google_containers/etcd:3.4.13-0
registry.aliyuncs.com/google_containers/coredns:1.7.0

# kubeadm初始化前将需要pull的镜像先拉下来,初始化就更快
kubeadm config images pull --config kubeadm.yaml
W0701 07:31:13.779122   14263 configset.go:348] WARNING: kubeadm cannot validate component configs for API groups [kubelet.config.k8s.io kubeproxy.config.k8s.io]
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-apiserver:v1.19.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-controller-manager:v1.19.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-scheduler:v1.19.0
[config/images] Pulled registry.aliyuncs.com/google_containers/kube-proxy:v1.19.0
[config/images] Pulled registry.aliyuncs.com/google_containers/pause:3.2
[config/images] Pulled registry.aliyuncs.com/google_containers/etcd:3.4.13-0
[config/images] Pulled registry.aliyuncs.com/google_containers/coredns:1.7.0
# 初始化
kubeadm init --config kubeadm.yaml

# 打印很多日志,最重要的几行,提示successfully,表示成功了.
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join 192.168.188.220:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5d388fa6411653afb9018a799fb1798e4d5fbfa4cee0255f4a4ddffdfb9411a7
# 根据日志的提示的命令执行,复制粘贴,注意root用户,可以不用复制sudo,防止有些作了策略的报错.
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

# 检查kubelet是否起来
systemctl status kubelet.service

9. node节点加入集群

  • 所有node节点
  • 复制master节点init的提示
kubeadm join 192.168.188.220:6443 --token abcdef.0123456789abcdef \
    --discovery-token-ca-cert-hash sha256:5d388fa6411653afb9018a799fb1798e4d5fbfa4cee0255f4a4ddffdfb9411a7
    
# 验证集群,现在集群节点都是NotReady,是因为还没有装网络插件.
kubectl get nodes
NAME          STATUS     ROLES    AGE   VERSION
k8s-master    NotReady   master   19m   v1.19.3
k8s-node-01   NotReady      23s   v1.19.3
k8s-node02    NotReady      8s    v1.19.3

# 后续添加节点生产token,复制粘贴直接可以追加节点
kubeadm token create --print-join-command

# 这个token有效期为1天,可以生产永久token
kubeadm token create --ttl 0

10. CNI

  • 只需要在master节点上操作,虽然集群节点都要装flannel,但是下载的yml会创建DaemonSet,之前做了集群,所以只需要在master操作即可.
  • 虽然用的flannel但是还是推荐calico.后续补充calico安装的过程
# 下载flannel的yml
wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml
# 修改yml
      containers:
      - name: kube-flannel
        image: quay.io/coreos/flannel:v0.11.0-amd64		# 如果本地有flannel的镜像可以改为本地镜像地址.
        command:
        - /opt/bin/flanneld
        args:
        - --ip-masq
        - --kube-subnet-mgr
        - --iface=ens192			# 改成服务器使用的的网卡名
# 先拉取镜像
docker pull quay.io/coreos/flannel:v0.11.0-amd64

# 创建flannel
kubectl create -f kube-flannel.yml

1. coredns

  • k8s使用的coredns做dns解析
  • 启动pod的时候,会把kube-dns服务的cluster-ip地址注入到pod的resolve解析配置中,同时添加对应的namespace的search域。 因此跨namespace通过service name访问的话,需要添加对应的namespace名称:
    service_name.namespace
# 先创建一个busybox的容器
apiVersion: v1
kind: Pod
metadata:
  name: busybox1
  labels:
    name: busybox
spec:
  containers:
  - image: busybox:1.28
    command:
      - sleep
      - "3600"
    name: busybox


# 我们可以随便进一个有nslookup命令的容器(busybox镜像有这个命令),然后使用nslookup
[root@k8s-master ~/minio]# kubectl get svc
NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)                         AGE
kubernetes      ClusterIP   10.96.0.1              443/TCP                         27h
minio           ClusterIP   None                   9000/TCP,5000/TCP               18h
minio-service   NodePort    10.109.91.83           9000:32707/TCP,5000:30670/TCP   18h

[root@k8s-master ~/minio]# kubectl exec  busybox1 -- nslookup minio
Server:    10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name:      minio
Address 1: 10.244.1.21 minio-0.minio.default.svc.cluster.local
Address 2: 10.244.0.17 minio-2.minio.default.svc.cluster.local
Address 3: 10.244.3.14 minio-3.minio.default.svc.cluster.local
Address 4: 10.244.2.23 10-244-2-23.minio-service.default.svc.cluster.local

11. 验证集群

[root@k8s-master ~]# kubectl get no
NAME          STATUS   ROLES    AGE   VERSION
k8s-master    Ready    master   65m   v1.19.3
k8s-node-01   Ready       46m   v1.19.3
k8s-node02    Ready       46m   v1.19.3
# 可以看到ROLES列,node为空,给node节点打标签
kubectl label nodes k8s-node-01  node-role.kubernetes.io/node01=
kubectl label nodes k8s-node02  node-role.kubernetes.io/node02=

# 再看
kubectl get nodes
NAME          STATUS   ROLES    AGE   VERSION
k8s-master    Ready    master   77m   v1.19.3
k8s-node-01   Ready    node01   57m   v1.19.3
k8s-node02    Ready    node02   57m   v1.19.3
# 查看master的污点.
kubectl describe nodes k8s-master  | grep -i  taint

# 装完的集群,默认master无法调度业务pod,根据需求是否要将master是否可调度.如下命令是将污点去除.master可以调度业务pod
kubectl taint node k8s-master node-role.kubernetes.io/master:NoSchedule-

12. 集群健康

  • 如果kube-system的pod没起来基本就和网络有关,检查images
# 查看系统pod都起来了,但是却不健康.
kubectl get pod -n kube-system
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-6d56c8448f-pvbgd             1/1     Running   0          99m
coredns-6d56c8448f-x7zm5             1/1     Running   0          99m
etcd-k8s-master                      1/1     Running   0          99m
kube-apiserver-k8s-master            1/1     Running   0          99m
kube-controller-manager-k8s-master   1/1     Running   0          99m
kube-flannel-ds-amd64-75js8          1/1     Running   0          35m
kube-flannel-ds-amd64-9whvn          1/1     Running   0          35m
kube-flannel-ds-amd64-j7l7j          1/1     Running   0          35m
kube-proxy-l4hh2                     1/1     Running   0          80m
kube-proxy-m7rfh                     1/1     Running   0          80m
kube-proxy-q849r                     1/1     Running   0          99m
kube-scheduler-k8s-master            1/1     Running   0          99m
[root@k8s-master ~]# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS      MESSAGE                                                                                       ERROR
scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
etcd-0               Healthy     {"health":"true"}                                    
# 修改,将这两个文件的port=0这一行注释掉,重启kubelet就可以
vim /etc/kubernetes/manifests/kube-controller-manager.yaml
vim /etc/kubernetes/manifests/kube-scheduler.yaml

systemctl restart kubelet.service

# 再次查看就健康了
kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME                 STATUS    MESSAGE             ERROR
scheduler            Healthy   ok                  
controller-manager   Healthy   ok                  
etcd-0               Healthy   {"health":"true"}   

13. 命令补全

  • master节点
yum -y install bash-completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)

# 写入bashrc
echo "source <(kubectl completion bash)" >> ~/.bashrc

14.对接NFS

  • NFS不放在集群内部
# nfs服务端
yum install -y nfs-utils rpcbind
mkdir -p /data/{preview,testing,develop}

vim /etc/exports
/data/develop	192.168.188.0/24(rw,sync,no_root_squash)
/data/testing	192.168.188.0/24(rw,sync,no_root_squash)
/data/preview	192.168.188.0/24(rw,sync,no_root_squash)

# 开机自启
systemctl start rpcbind;systemctl enable rpcbind
systemctl start nfs;systemctl enable nfs 

# 测试
showmount -e localhost

1. 静态对接

  • 开发创建PVC,运维就手动创建一个PV与之对接.显然不合适
# maste节点和node节点
yum install -y nfs-utils

# 创建目录,存放pv和pvc的yaml文件
mkdir yaml-pv -p && cd yaml-pv

# 编写pv.yml,一共4个pv
vim pv.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv-develop
  labels:
    pv: nfs-develop
spec:
  capacity: 
    storage: 100Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain 
  storageClassName: nfs-develop
  nfs:
    path: /data/develop
    server: 192.168.188.225
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv-testing
  labels:
    pv: nfs-testing
spec:
  capacity: 
    storage: 100Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain 
  storageClassName: nfs-testing
  nfs:
    path: /data/testing
    server: 192.168.188.225
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv-preview
  labels:
    pv: nfs-preview
spec:
  capacity:
    storage: 100Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain 
  storageClassName: nfs-preview
  nfs:
    path: /data/preview
    server: 192.168.188.225
---
apiVersion: v1
kind: PersistentVolume
metadata:
  name: nfs-pv-jenkins
  labels:
    pv: nfs-jenkins
spec:
  capacity:
    storage: 100Gi
  accessModes:
  - ReadWriteOnce
  persistentVolumeReclaimPolicy: Retain
  storageClassName: nfs-jenkins
  nfs:
    path: /data/jenkins
    server: 192.168.188.225
# 访问模式,storage大小(pvc大小需要小于pv大小),以及 PV 和 PVC 的 storageClassName 字段必须一样,这样才能够进行绑定。

# PersistentVolumeController会不断地循环去查看每一个 PVC,是不是已经处于 Bound(已绑定)状态。如果不是,那它就会遍历所有的、可用的 PV,并尝试将其与未绑定的 PVC 进行绑定,这样,Kubernetes 就可以保证用户提交的每一个 PVC,只要有合适的 PV 出现,它就能够很快进入绑定状态。而所谓将一个 PV 与 PVC 进行“绑定”,其实就是将这个 PV 对象的名字,填在了 PVC 对象的 spec.volumeName 字段上。
# 编写pvc.yaml,这个只有根据需求来编写,因为namespace不同.这个pvc只是用于测试功能.
vim pvc.yaml
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nfs-testing
  namespace: default
spec:
  accessModes:
  - ReadWriteOnce
  storageClassName: nfs-testing
  resources:
    requests:
      storage: 10Gi
  selector:
    matchLabels:
      pv: nfs-testing
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nfs-develop
  namespace: default
spec:
  accessModes:     
  - ReadWriteOnce
  storageClassName: nfs-develop
  resources:
    requests:
      storage: 10Gi
  selector:
    matchLabels:
      pv: nfs-develop
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nfs-preview
  namespace: default
spec:
  accessModes:     
  - ReadWriteOnce
  storageClassName: nfs-preview
  resources:
    requests:
      storage: 10Gi
  selector:
    matchLabels:
      pv: nfs-preview
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: pvc-nfs-jenkins
  namespace: default
spec:
  accessModes:
  - ReadWriteOnce
  storageClassName: nfs-jenkins
  resources:
    requests:
      storage: 10Gi
  selector:
    matchLabels:
      pv: nfs-jenkins
# 创建pv和pvc资源
kubectl create -f pvc.yaml
kubectl create -f pv.yaml

# 查看,注意pv是全局资源,后面只需要改pvc配置.
[root@k8s-master yaml-pv]# kubectl get pvc
NAME              STATUS   VOLUME           CAPACITY   ACCESS MODES   STORAGECLASS   AGE
pvc-nfs-develop   Bound    nfs-pv-testing   100Gi      RWO            nfs            3s
pvc-nfs-preview   Bound    nfs-pv-preview   100Gi      RWO            nfs            3s
pvc-nfs-testing   Bound    nfs-pv-develop   100Gi      RWO            nfs            3s
[root@k8s-master yaml-pv]# kubectl get pv
NAME             CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                     STORAGECLASS   REASON   AGE
nfs-pv-develop   100Gi      RWO            Recycle          Bound    default/pvc-nfs-testing   nfs                     14m
nfs-pv-preview   100Gi      RWO            Recycle          Bound    default/pvc-nfs-preview   nfs                     14m
nfs-pv-testing   100Gi      RWO            Recycle          Bound    default/pvc-nfs-develop   nfs                     14m
# 创建个资源测试,编写pod的yaml,注意修改claimName
apiVersion: apps/v1
kind: Deployment
metadata:
  name: web-test
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nginx
  template:
    metadata:
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: nginx:alpine
        imagePullPolicy: IfNotPresent
        ports:
        - containerPort: 80
          name: web
        volumeMounts: 
        - name: www
          mountPath: /usr/local/
      volumes:
      - name: www
        persistentVolumeClaim:
          claimName: pvc-nfs-testing
# 验证是否对接NFS,可以看到有相关的文件,说明成功了.
/usr/local # ls
testing-appstore  testing-bidding   testing-crm       testing-gateway   testing-order     testing-pms       testing-search    testing-user
testing-auth      testing-cms       testing-csc       testing-imc       testing-parking   testing-psi       testing-synchros  testing-workflow
testing-base      testing-coupon    testing-finance   testing-mcs       testing-payment   testing-rms       testing-system

2. 动态对接

  • 使用storageclass动态对接.
  • 因为PVC和PV是一一对应的关系,PV是运维人员来创建的,开发操作PVC,可是大规模集群中可能会有很多PV,如果这些PV都需要运维手动来处理这也是一件很繁琐的事情,所以就有了动态供给概念.
  • k8s本身支持的动态PV创建不包括nfs,这里存储后端使用的是 nfs,那么我们就需要使用到一个 nfs-client 的自动配置程序,也可以称之为Provisioner
  • 可以在多个namespace创建.比如开发,运维,测试用于不同的namespace,那么就可以创建不同的几套StorageClass来实现只创建PVC就自动创建PV
  • 以下是两套storageclass
    • 第一套storageclass
# provisioner
vim provisioner.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-client-provisioner		# 修改名字,以下都要修改
  labels:
    app: nfs-client-provisioner
  namespace: nfs-provisioner		# 修改namespace
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-client-provisioner
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-client-provisioner
  template:
    metadata:
      labels:
        app: nfs-client-provisioner
    spec:
      serviceAccountName: nfs-client-provisioner
      containers:
        - name: nfs-client-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: hsj/nfs			# 和storageclass的provisioner字段必须一致才对接的上.
            - name: NFS_SERVER
              value: 192.168.188.225	# nfs服务端地址
            - name: NFS_PATH
              value: /data/testing		# 挂载的路径
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.188.225		# 和上面一样修改
            path: /data/testing
# rbac需要的clusterrole和role,这个文件基本就是改name和namespace,其他给绑定的role不用改.
vim rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: nfs-client-provisioner
      namespace: nfs-provisioner
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-client-provisioner-runner
  namespace: nfs-provisioner
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-client-provisioner
  namespace: nfs-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-provisioner
roleRef:
  kind: ClusterRole
  name: nfs-client-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-provisioner
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-client-provisioner
  namespace: nfs-provisioner
subjects:
  - kind: ServiceAccount
    name: nfs-client-provisioner
    namespace: nfs-provisioner
roleRef:
  kind: Role
  name: leader-locking-nfs-client-provisioner
  apiGroup: rbac.authorization.k8s.io
# storageclass,改name和provisioner,让provisioner和让provisioner.yaml中对应才关联的起来
vim storageclass.yaml

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs
provisioner: hsj/nfs

# 测试PVC
vim pvc.yaml

apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-pvc
  namespace: nfs-provisioner
  annotations:
      #volume.beta.kubernetes.io/storage-class: "nfs"
spec:
  storageClassName: nfs			# 这一定要写明是关联的哪个storageclass
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
      
# 以上是第一套storageclass,创建玩PVC看是否是Bound,创建玩PVC,会自动创建PV.
  • 第二套storageclass
# provisioner
vim provisioner.yaml

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-testing-provisioner
  labels:
    app: nfs-testing-provisioner
  namespace: testing
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-testing-provisioner
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-testing-provisioner
  template:
    metadata:
      labels:
        app: nfs-testing-provisioner
    spec:
      serviceAccountName: nfs-testing-provisioner
      containers:
        - name: nfs-testing-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-testing-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: testing/nfs
            - name: NFS_SERVER
              value: 192.168.188.225
            - name: NFS_PATH
              value: /data/testing
      volumes:
        - name: nfs-testing-root
          nfs:
            server: 192.168.188.225
            path: /data/testing
# rbac
vim rbac.yaml

kind: ServiceAccount
apiVersion: v1
metadata:
  name: nfs-testing-provisioner
  namespace: testing
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-testing-provisioner-runner
  namespace: testing
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-testing-provisioner
  namespace: testing
subjects:
  - kind: ServiceAccount
    name: nfs-testing-provisioner
    namespace: testing
roleRef:
  kind: ClusterRole
  name: nfs-testing-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-testing-provisioner
  namespace: testing
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-testing-provisioner
  namespace: testing
subjects:
  - kind: ServiceAccount
    name: nfs-testing-provisioner
    namespace: testing
roleRef:
  kind: Role
  name: leader-locking-nfs-testing-provisioner
  apiGroup: rbac.authorization.k8s.io
# storageclass
vim storageclass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: testing-storageclass
provisioner: testing/nfs


# 测试pvc
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: test-pvc
  namespace: testing
  #annotations:
      #volume.beta.kubernetes.io/storage-class: "nfs"
spec:
  storageClassName: testing-storageclass
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 1Mi
# 两个PVC结果,可以作用于不同的namespace
kubectl -n testing get pvc
NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS           AGE
test-pvc   Bound    pvc-44ca9e5b-8253-48c1-a5c5-0542dd6a8c6d   1Mi        RWX            testing-storageclass   36m

kubectl -n nfs-provisioner get pvc
NAME       STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS   AGE
test-pvc   Bound    pvc-a1fce90e-a5fa-4e9b-8e46-6e7c7cab5e40   1Mi        RWX            nfs            35m

16. IPVS

  • master
# 修改kube-proxy的cm,将mode修改为ipvs
kubectl edit configmap kube-proxy -n kube-system 
    kind: KubeProxyConfiguration
    metricsBindAddress: ""
    mode: "ipvs"			# 修改这里
    nodePortAddresses: null

# 删除所有的kube-proxy的容器
kubectl delete pod ${pod_name} -n kube-system

# 查看kube-proxy是否重启且running
kubectl get pod -nkube-system |grep kube-proxy
kube-proxy-8cfct                     1/1     Running   0          41s
kube-proxy-t7khm                     1/1     Running   0          72s
kube-proxy-xrhvf                     1/1     Running   0          21s

# 查看kube-proxy的日志,验证是否开启ipvs,日志中有Using ipvs Proxier的关键字就表示使用了IPVS.
kubectl logs ${pod_name} -n kube-system
I0702 10:05:47.071961       1 server_others.go:259] Using ipvs Proxier.
E0702 10:05:47.072302       1 proxier.go:381] can't set sysctl net/ipv4/vs/conn_reuse_mode, kernel version must be at least 4.1
W0702 10:05:47.072584       1 proxier.go:434] IPVS scheduler not specified, use rr by default
I0702 10:05:47.073021       1 server.go:650] Version: v1.19.0

# 但是我们看到上面的kube-proxy报错需要内核4.1的.可以不用理会
can't set sysctl net/ipv4/vs/conn_reuse_mode, kernel version must be at least 4.1

# 下载ipvsadm,方便以后找问题,如果资源不紧张,所有节点都可以下载一下.
yum -y install ipvsadm

17. nginx-ingress

  • 常用的nginx ingres
$ wget https://raw.githubusercontent.com/kubernetes/ingress-nginx/nginx-0.30.0/deploy/static/mandatory.yaml

## 或者使用myblog/deployment/ingress/mandatory.yaml
## 修改部署节点
190 apiVersion: apps/v1
191 kind: DaemonSet				# 将DeployMent改为DaemonSet
192 metadata:
193   name: nginx-ingress-controller
194   namespace: ingress-nginx
195   labels:
196     app.kubernetes.io/name: ingress-nginx
197     app.kubernetes.io/part-of: ingress-nginx
198 spec:
199  # replicas: 1				# 注释掉
200   selector:
201     matchLabels:
202       app.kubernetes.io/name: ingress-nginx
203       app.kubernetes.io/part-of: ingress-nginx
204   template:
205     metadata:
206       labels:
207         app.kubernetes.io/name: ingress-nginx
208         app.kubernetes.io/part-of: ingress-nginx
209       annotations:
210         prometheus.io/port: "10254"
211         prometheus.io/scrape: "true"
212     spec:
213       hostNetwork: true					# 增加这一行
214       # wait up to five minutes for the drain of connections
215       terminationGracePeriodSeconds: 300
216       serviceAccountName: nginx-ingress-serviceaccount
217       nodeSelector:
218         ingress: "true"					# 增加这一行
219         #kubernetes.io/os: linux		# 注释掉


# 给master打标签
kubectl label node k8s-master ingress=true
kubectl label node k8s-node01 ingress=true
kubectl label node k8s-node02 ingress=true

# 创建
kubectl create -f mandatory.yaml

# 查看
kubectl -n ingress-nginx  get pod
  • ingress报错解决,直接创建一个svc就可以.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-8nxlgv5v-1635172457002)(images/image-20210727103918461.png)]

apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
spec:
  type: ClusterIP
  ports:
  - name: http
    port: 80
    targetPort: 80
    protocol: TCP
  - name: https
    port: 443
    targetPort: 443
    protocol: TCP
  selector:
    app.kubernetes.io/name: ingress-nginx
# 创建后查看是否还有err services "ingress-nginx" not found
kubectl -n ingress-nginx logs --tail=10 ${POD_NAME}

18. HPA

  • HPA可以实现自动伸缩的特性,可以使用heapster或者Metric Server,这里使用的是Metric Server
  • 他是通过Metric Server去采集数据的,所以得安装
  • 默认只有基于内存和CPU做伸缩,也可以自定义做伸缩.
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.3.6/components.yaml

# 修改yaml
 84       containers:
 85       - name: metrics-server
 86         image: registry.aliyuncs.com/google_containers/metrics-server-amd64:v0.3.6		# 修改为这个镜像地址
 87         imagePullPolicy: IfNotPresent
 88         args:
 89           - --cert-dir=/tmp
 90           - --secure-port=4443
 91           - --kubelet-insecure-tls   						# 添加如下这两行
 92           - --kubelet-preferred-address-types=InternalIP
# 检查是否起来,名字为metrics-server-xxxx
kubectl -n kube-system get pod

# 测试,可以看到节点的资源使用情况,这时候可以刷新下dashboard,里面也会显示pod等资源的使用PUC,内存情况。
kubectl top nodes
NAME          CPU(cores)   CPU%   MEMORY(bytes)   MEMORY%   
k8s-master    307m         3%     8682Mi          55%       
k8s-node-01   117m         1%     3794Mi          24%       
k8s-node02    127m         1%     4662Mi          29%    

19. 内网配置清单

  • nfs上
  • host解析
server {
    listen       2780;
    server_name  k8s-yaml.com;

    location / {
	autoindex on;
	default_type text/plain;
	root /sj_yaml/list;
    }
}

# 创建目录,重载nginx,访问

20. 映射外部服务

  • 集群将k8s集群外的服务映射成k8s内部的服务,解决连接问题,需要创建2个服务
    • k8s中每创建一个svc资源就会创建一个endpoint资源
    • 所以svc和Endpoints名字要相同
# 创建endpoint资源,前提是集群要和外部服务网络要通
vim mysql_endpoint.yaml
kind: Endpoints
apiVersion: v1
metadata:
  name: mysql-production
  namespace: public-toilet		#
subsets:
  - addresses:
      - ip: 192.168.188.12		# nfs节点mysql
    ports:
      - port: 3306
---
# 将外部服务转成k8s集群内部的mysql-production的svc便于访问.
apiVersion: v1
kind: Service
metadata:
  name: mysql-production
  namespace: public-toilet
spec:
  ports:
    - port: 3306
# 进入容器测试看是不是解析的ip
nslookup mysql-production.public-toilet.svc.cluster.local

21. 跨namespace连接

  • 一般公共组件会统一放在一个公共的namespace,以便不同namespace的连接,那么就需要跨namespace.
  • coredns来来实现集群内部的管理,同时svc是ping不同的
    • 可以在pod内部看/etc/resolv.conf
    • 通过nslookup ${SVC_NAME}来查看解析的地址,是否是svc地址
  • 创建完ExternalName类型的svc,就可以直接在应用上通过svc name来建立连接
# 比如公共组件在pub名称空间,需要连接的在app1名称空间,那么就在app1创建svc,不指定selector,同时指定svc的type为ExternalName,externalName的value为${SVC_NAME}.${NAMESPACE}.svc.cluster.local
vim link-rabbitmq-svc.yaml
apiVersion: v1
kind: Service
metadata:
 name: rabbitmq
 namespace: app1
spec:
 ports:
 - port: 5672
   name: amqp
 sessionAffinity: None
 type: ExternalName
 externalName: rabbitmq.pub.svc.cluster.local

22. RBAC

  • role
    • 服务于某个namespace
  • clusterrole
    • 集群资源
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: secret-reader
# namespace不用写,ClusterRoles是集群资源
rules:
- apiGroups: [""]			# "" 指定核心API组
  resources: ["secrets"]
  verbs: ["get", "watch", "list"]

三. dashboard

  • 可视化管理

1. 配置

# 下载yaml
wget https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-rc5/aio/deploy/recommended.yaml

# 修改,大概在45行
vi recommended.yaml

kind: Service
apiVersion: v1
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  ports:
    - port: 443
      targetPort: 8443
      nodePort: 31755				# 固定Port
  selector:
    k8s-app: kubernetes-dashboard
  type: NodePort  					# 加上type=NodePort 让dashboard的服务暴露出去,可以浏览器访问.
 
# 创建登录用户
vim admin.yaml

kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: admin
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
roleRef:
  kind: ClusterRole
  name: cluster-admin
  apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
  name: admin
  namespace: kubernetes-dashboard

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: admin
  namespace: kubernetes-dashboard
  
# 然后直接创建.可以在浏览中登录
kubectl -n kubernetes-dashboard get svc
NAME                        TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)         AGE
dashboard-metrics-scraper   ClusterIP   10.106.34.228           8000/TCP        15m
kubernetes-dashboard        NodePort    10.110.236.83           443:31755/TCP   15m

2. token

  • 不推荐
# token登录
https://192.168.188.220:31755

# 获取token,复制里面的token登录dashboard
kubectl -n kubernetes-dashboard describe secrets admin-token-ddz4h

3. kubeconfig

  • token登录还是很麻烦的,需要配置kubeconfig来实现登录,如下配置过程
# 创建kubeconfig的文件的目录
mkdir -p /data/apps/dashboard/certs && cd /data/apps/dashboard/

#
openssl genrsa -des3 -passout pass:x -out certs/dashboard.pass.key 2048

#
openssl rsa -passin pass:x -in certs/dashboard.pass.key -out certs/dashboard.key

#
openssl req -new -key certs/dashboard.key -out certs/dashboard.csr -subj '/CN=kube-dashboard'

#
openssl x509 -req -sha256 -days 3650 -in certs/dashboard.csr -signkey certs/dashboard.key -out certs/dashboard.crt

#
rm certs/dashboard.pass.key

#
kubectl create secret generic kubernetes-dashboard-certs --from-file=certs -n kube-system

# 要给哪个用户创建kubeconfig,这里是给admin用户创建的.可echo这个变量看看
DASH_TOCKEN=$(kubectl -n kubernetes-dashboard get secret admin-token-ddz4h  -o jsonpath={.data.token}|base64 -d)

# 
kubectl config set-cluster kubernetes --server=192.168.188.220:8443 --kubeconfig=/data/apps/dashboard/certs/dashbord-admin.conf

#
kubectl config set-credentials dashboard-admin --token=$DASH_TOCKEN --kubeconfig=/data/apps/dashboard/certs/dashbord-admin.conf

#
kubectl config set-context dashboard-admin@kubernetes --cluster=kubernetes --user=dashboard-admin --kubeconfig=/data/apps/dashboard/certs/dashbord-admin.conf

# 这个dashbord.admin.conf就是admin用户的登录kubeconfig,把他下载带win上,登录的时候直接指定他就OK
kubectl config use-context dashboard-admin@kubernetes --kubeconfig=/data/apps/dashboard/certs/dashbord-admin.conf

4. ingress

  • 给dashboard配置ing
vim dash-ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  labels:
    k8s-app: kubernetes-dashboard
  annotations:
    kubernetes.io/ingress.class: "nginx"
    nginx.ingress.kubernetes.io/ssl-passthrough: "true"
    nginx.ingress.kubernetes.io/backend-protocol: HTTPS
  name: kubernetes-dashboard
  namespace: kubernetes-dashboard
spec:
  rules:
  - host: hsj.dashboard.com				# 改想要登录的URL
    http:
      paths:
      - path: /
        backend:
          servicePort: 443
          serviceName: kubernetes-dashboard
  tls:
  - hosts:
    - hsj.dashboard.com					# 改想要登录的URL
    secretName: kubernetes-dashboard-certs

5. 其他用户

  • 可以创建clusterrole,并给定权限,让其他人也可以登录dashboard.做权限限制

四. harbor

  • 镜像仓库
  • nfs节点上,非集群内

1. docker compose + harbor

  • harbor需要安装docker compose.
# 先安装docker compose, 其实并不一定要安装上,只需要他的二进制文件拿到即可.
curl -o /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum install -y docker-compose

# harbor
# 下载offline harbor1.8.3版本.
wget https://storage.googleapis.com/harbor-releases/release-1.8.0/harbor-offline-installer-v1.8.3.tgz

tar xf harbor-offline-installer-v1.8.3.tgz -C /opt/

# 软链,控制版本,便于版本升级
mv /opt/harbor /opt/harbor-v1.83
ln -s /opt/harbor-v1.83/ /opt/harbor

2. harbor.yml

  • 这也是他的配置文件,修改他
# 如下是我的配置
hostname: 192.168.188.225
http:
  port: 180
harbor_admin_password: ${登录密码}
database:
  password: root123
data_volume: /harbor/images
clair: 
  updaters_interval: 12
  http_proxy:
  https_proxy:
  no_proxy: 127.0.0.1,localhost,core,registry
jobservice:
  max_job_workers: 10
chart:
  absolute_url: disabled
log:
  level: info
  rotate_count: 50
  rotate_size: 200M
  location: /var/log/harbor
_version: 1.8.0

3. nginx

1. daemon.json

# daemon.json添加一行
vim /etc/docker/daemon.json
"insecure-registries": ["http://192.168.188.225:180"],

# 重启docker
systemctl restart docker

2. nginx

# nginx配置
server {
    listen       80;
    server_name  harbor.echronos.com;

    client_max_body_size 1000m;

    location / {
        proxy_pass http://127.0.0.1:180;
    }
}

# 重启 
systemctl enable nginx;systemctl restart nginx

# 浏览器访问,创建一个项目,注意项目得是公开的.

# docker login看等否登上.推一下镜像
docker login 192.168.188.225:180

五. jenkins + gitlab

(一). 配置过程

1. jenkins

vim jenkins-all.yaml
apiVersion: v1
kind: Namespace
metadata:
  name: jenkins
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: jenkins
  namespace: jenkins
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: jenkins-crb
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: jenkins
  namespace: jenkins
---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: jenkins-master
  namespace: jenkins
spec:
  replicas: 1
  selector:
    matchLabels:
      devops: jenkins-master
  template:
    metadata:
      labels:
        devops: jenkins-master
    spec:
      nodeSelector:
        jenkins: "true"
      serviceAccount: jenkins # Pod 需要使用的服务账号
      initContainers:
      - name: fix-permissions
        image: busybox
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c", "chown -R 1000:1000 /var/jenkins_home"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: jenkinshome
          mountPath: /var/jenkins_home
      containers:
      - name: jenkins
        image: jenkinsci/blueocean:1.23.2
        imagePullPolicy: IfNotPresent
        ports:
        - name: http #Jenkins Master Web 服务端口
          containerPort: 8080
        - name: slavelistener #Jenkins Master 供未来 Slave 连接的端口
          containerPort: 50000
        volumeMounts:
        - name: jenkinshome
          mountPath: /var/jenkins_home
        - name: date-config
          mountPath: /etc/localtime
        env:
        - name: JAVA_OPTS
          value: "-Xms4096m -Xmx5120m -Duser.timezone=Asia/Shanghai -Dhudson.model.DirectoryBrowserSupport.CSP="
      volumes:
      - name: jenkinshome
        hostPath:
          path: /var/jenkins_home/
      - name: date-config
        hostPath:
          path: /usr/share/zoneinfo/Asia/Shanghai
---
apiVersion: v1
kind: Service
metadata:
  name: jenkins
  namespace: jenkins
spec:
  ports:
  - name: http
    port: 8080
    targetPort: 8080
  - name: slavelistener
    port: 50000
    targetPort: 50000
  type: ClusterIP
  selector:
    devops: jenkins-master
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: jenkins-web
  namespace: jenkins
spec:
  rules:
  - host: jenkins.test.com		# 改ingress的host
    http:
      paths:
      - backend:
          serviceName: jenkins
          servicePort: 8080
        path: /
# 为k8s-slave1打标签,将jenkins-master部署在k8s-slave1节点
kubectl label node k8s-node02 jenkins=true

# 部署服务
kubectl create -f jenkins-all.yaml

# 查看服务
kubectl -n jenkins get po
NAME                              READY   STATUS    RESTARTS   AGE
jenkins-master-767df9b574-lgdr5   1/1     Running   0          20s

2. secret

# secret
vim gitlab-secret.txt
postgres.user.root=root
postgres.pwd.root=1qaz2wsx

# 创建secret资源,创建的时候需要选择一个类型,这里选择的是generic,并指定名字gitlab-secret
$ kubectl -n jenkins create secret generic gitlab-secret --from-env-file=gitlab-secret.txt

3. postgres

  • 部署gitlab依赖postgres
# 改镜像,其他不需要改什么.
vim postgres.yaml
apiVersion: v1
kind: Service
metadata:
  name: postgres
  labels:
    app: postgres
  namespace: jenkins
spec:
  ports:
  - name: server
    port: 5432
    targetPort: 5432
    protocol: TCP
  selector:
    app: postgres
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: jenkins
  name: postgres
  labels:
    app: postgres
spec:
  replicas: 1
  selector:
    matchLabels:
      app: postgres
  template:
    metadata:
      labels:
        app: postgres
    spec:
      nodeSelector:
        postgres: "true"
      tolerations:
      - operator: "Exists"
      containers:
      - name: postgres
        image:  192.168.136.10:5000/postgres:11.4 	# 若本地没有启动该仓库,换成postgres:11.4
        imagePullPolicy: "IfNotPresent"
        ports:
        - containerPort: 5432
        env:
        - name: POSTGRES_USER           #PostgreSQL 用户名
          valueFrom:
            secretKeyRef:
              name: gitlab-secret
              key: postgres.user.root
        - name: POSTGRES_PASSWORD       #PostgreSQL 密码
          valueFrom:
            secretKeyRef:
              name: gitlab-secret
              key: postgres.pwd.root
        resources:
          limits:
            cpu: 1000m
            memory: 2048Mi
          requests:
            cpu: 50m
            memory: 100Mi
        volumeMounts:
        - mountPath: /var/lib/postgresql/data
          name: postgredb
      volumes:
      - name: postgredb
        hostPath:
          path: /var/lib/postgres/
# 把gitlab都部署到k8s-node02节点,给他打个标签
kubectl label node k8s-node02 postgres=true

# 创建postgres
$ kubectl create -f postgres.yaml

# 进入容器中,创建数据gitlab库,为后面部署gitlab组件使用
$ kubectl -n jenkins exec -ti postgres-7ff9b49f4c-nt8zh bash
root@postgres-7ff9b49f4c-nt8zh:/# psql
root=# create database gitlab;

4. redis

# 好像不需要改什么
apiVersion: v1
kind: Service
metadata:
  name: redis
  labels:
    app: redis
  namespace: jenkins
spec:
  ports:
  - name: server
    port: 6379
    targetPort: 6379
    protocol: TCP
  selector:
    app: redis
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: jenkins
  name: redis
  labels:
    app: redis
spec:
  replicas: 1
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
    spec:
      tolerations:
      - operator: "Exists"
      containers:
      - name: redis
        image:  sameersbn/redis:4.0.9-2
        imagePullPolicy: "IfNotPresent"
        ports:
        - containerPort: 6379
        resources:
          limits:
            cpu: 1000m
            memory: 2048Mi
          requests:
            cpu: 50m
            memory: 100Mi

5. gitlab

# 改ingress,给节点标签
vim gitlab.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: gitlab
  namespace: jenkins
  annotations:
    nginx.ingress.kubernetes.io/proxy-body-size: "50m"
spec:
  rules:
  - host: gitlab.com
    http:
      paths:
      - backend:
          serviceName: gitlab
          servicePort: 80
        path: /
---
apiVersion: v1
kind: Service
metadata:
  name: gitlab
  labels:
    app: gitlab
  namespace: jenkins
spec:
  ports:
  - name: server
    port: 80
    targetPort: 80
    protocol: TCP
  selector:
    app: gitlab
---
apiVersion: apps/v1
kind: Deployment
metadata:
  namespace: jenkins
  name: gitlab
  labels:
    app: gitlab
spec:
  replicas: 1
  selector:
    matchLabels:
      app: gitlab
  template:
    metadata:
      labels:
        app: gitlab
    spec:
      nodeSelector:
        gitlab: "true"
      tolerations:
      - operator: "Exists"
      containers:
      - name: gitlab
        image:  sameersbn/gitlab:13.2.2
        imagePullPolicy: "IfNotPresent"
        env:
        - name: GITLAB_HOST
          value: "hsj.gitlab.com"		# 这里最好配置和gitlab的URL一致,代码远程仓库push的地址.
        - name: GITLAB_PORT
          value: "80"
        - name: GITLAB_SECRETS_DB_KEY_BASE
          value: "long-and-random-alpha-numeric-string"
        - name: GITLAB_SECRETS_DB_KEY_BASE
          value: "long-and-random-alpha-numeric-string"
        - name: GITLAB_SECRETS_SECRET_KEY_BASE
          value: "long-and-random-alpha-numeric-string"
        - name: GITLAB_SECRETS_OTP_KEY_BASE
          value: "long-and-random-alpha-numeric-string"
        - name: DB_HOST
          value: "postgres"
        - name: DB_NAME
          value: "gitlab"
        - name: DB_USER
          valueFrom:
            secretKeyRef:
              name: gitlab-secret
              key: postgres.user.root
        - name: DB_PASS
          valueFrom:
            secretKeyRef:
              name: gitlab-secret
              key: postgres.pwd.root
        - name: REDIS_HOST
          value: "redis"
        - name: REDIS_PORT
          value: "6379"
        ports:
        - containerPort: 80
        resources:
          limits:
            cpu: 2000m
            memory: 5048Mi
          requests:
            cpu: 100m
            memory: 500Mi
        volumeMounts:
        - mountPath: /home/git/data
          name: data
      volumes:
      - name: data
        hostPath:
          path: /var/lib/gitlab/
# 部署到k8s-node02节点
$ kubectl label node k8s-node02 gitlab=true

# 创建
$ kubectl create -f gitlab.yaml
# 最终的要保证4个pod以及其他的ingress,secret,SVC等正常运行.
kubectl -n  jenkins get po
NAME                              READY   STATUS    RESTARTS   AGE
gitlab-9db4f85df-kkmbx            1/1     Running   0          157m
jenkins-master-79f766cc69-k8vww   1/1     Running   0          66m
postgres-64746c8589-ccdgg         1/1     Running   0          3h41m
redis-548dc5569f-2rds8            1/1     Running   0          3h39m

# 服务器和win都做host解析
192.168.188.220 hsj.jenkins.com hsj.gitlab.com

(二). 优化过程

1. jenkins源

# jenkins是固定在k8s-node02节点上的,已知jenkins的挂载路径为/var/jenkins_home(如上面的yaml)
cd /var/jenkins_home/update

# 替换为清华源
sed -i 's/https:\/\/updates.jenkins.io\/download/https:\/\/mirrors.tuna.tsinghua.edu.cn\/jenkins/g' default.json

# 把谷歌换为百度
sed -i 's/http:\/\/www.google.com/https:\/\/www.baidu.com/g' default.json

2. jenkins插件

  • 汉化插件
  • Gitlab插件
# 登录hsj.jenkins.com,他会让你输入密码,如下获得密码
kubectl -n jenkins exec jenkins-master-79f766cc69-k8vww cat /var/jenkins_home/secrets/initialAdminPassword

# 依次电点击Jenkins -> manage Jenkins -> Plugin Manager -> Avaliable,搜索 chinese和gitlab两个都安装,

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ilmRoGrs-1635172457005)(images\jenkins-install-plugins.jpg)]

3. 对接gitlab

1. 配置

  • 系统管理==>系统配置==>Gitlab
    • 输入Gitlab的name,URL,最后添加凭据
  • 在配置gitlab的时候,需要填写上代码仓库的URL,直接写URL,jenkins是找不到的.以下为2种方法才能让jenkins使用URL.
    • 直接在jenkins的容器里面配置上hosts
    • 修改coredns

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-7SFa7OtR-1635172457006)(images/image-20210706133905699.png)]

2. coredns

# 查看coredns
kubectl -n kube-system get pod
NAME                                 READY   STATUS    RESTARTS   AGE
coredns-6d56c8448f-pvbgd             1/1     Running   2          4d17h
coredns-6d56c8448f-x7zm5             1/1     Running   3          4d17h

# 查看configmap
kubectl -n kube-system get cm
NAME                                 DATA   AGE
coredns                              1      4d18h
extension-apiserver-authentication   6      4d18h
kube-flannel-cfg                     2      4d16h
kube-proxy                           2      4d18h
kubeadm-config                       2      4d18h
kubelet-config-1.19                  1      4d18h

# 修改coredns,edit完后,直接保存,不需要重启coredns.
kubectl -n kube-system edit cm coredns
apiVersion: v1
data:
  Corefile: |
    .:53 {
        errors
        health {
           lameduck 5s
        }
        # 在这添加这个host及里面的内容.按实际情况编写.
        hosts {
            192.168.188.220 hsj.jenkins.com hsj.gitlab.com
            fallthrough
        }
        # 添加到是上面一段.
        ready
        kubernetes cluster.local in-addr.arpa ip6.arpa {
           pods insecure
           fallthrough in-addr.arpa ip6.arpa
           ttl 30
        }
        prometheus :9153
        forward . /etc/resolv.conf {
           max_concurrent 1000
        }
        cache 30
        loop
        reload
        loadbalance
    }
kind: ConfigMap
metadata:
  creationTimestamp: "2021-07-01T11:36:06Z"
  name: coredns
  namespace: kube-system
  resourceVersion: "184"
  selfLink: /api/v1/namespaces/kube-system/configmaps/coredns
  uid: b0bae1fd-a6e6-40e5-8451-a516bafdd2e3
  
# 测试,进入jenkins容器curl一下通不通.很明显,已经是通了
kubectl -n jenkins exec -it jenkins-master-79f766cc69-k8vww sh
/ $ curl hsj.gitlab.com
You are being redirected./ $ 

4. gitlab全局凭据

1. 添加凭据

  • 系统管理==>系统配置==>Gitlab==>凭据添加==>jenkins

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-fByKO8BC-1635172457007)(images/image-20210706140229059.png)]

2. 创建token入口

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-pXr0jG83-1635172457009)(images/image-20210706135905224.png)]

3. 创建token

  • 这里的权限我给的是最大,也就是全选

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-oa8MOHWb-1635172457010)(images/image-20210706140640281.png)]

4. 获得token

  • 只能看一次这个token,小心操作.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-415OJCEE-1635172457011)(images/image-20210706141053898.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ZWcld5rb-1635172457011)(images/image-20210706141316524.png)]

5. 测试

  • 只要没标红就是OK的.可以测试一下,Success就表示没问题.最后保存的时候,一定要点击应用,直接点保存,就退出界面了.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Jfz4Kq61-1635172457012)(images/image-20210706141716321.png)]

(三). 测试project

  • 测试的时候,没有那么严谨

1. 自由风格

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-yD0qnH41-1635172457013)(images/image-20210706142548745.png)]

  • 这里只是做测试,直接写账号,密码就可以.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-blfm2W1i-1635172457013)(images/image-20210706142933681.png)]

  • 然后再关联自由项目,不标红就OK

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-6ksdqBCa-1635172457014)(images/image-20210706143027999.png)]

2. 触发器

  • 这里有个GitLab webhook URL,需要在gitlab中配置webhook

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-HALnKYRt-1635172457015)(images/image-20210706143558739.png)]

  • 点击他生成一个jenkins的token,上面的gitlab创建的token,是jenkins连接gitlab的,而这里是gitlab连接jenkins的.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-yh8kmFaW-1635172457015)(images/image-20210706144306629.png)]

3. webhook

  • 开发推送代码,自动实现构建工程.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-cxcjEVa4-1635172457016)(images/image-20210706144607748.png)]

4. 报错问题

  • 点击那么扳手==>Setting==>Network==>Outbound requests==>勾选第一个

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-71FQ7F88-1635172457017)(images/image-20210706144933909.png)]

  • 测试webhook,可以点击下test ,Push events状态是否为200

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-yc4MaXLY-1635172457017)(images/image-20210707203530608.png)]

5. 测试

  • 推送代码,自动构建,同时配置钉钉机器人发送消息
  • 当推送代码后,构建结果,钉钉暂未测试.直接在钉钉添加webhook即可.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-Hy7J0aNE-1635172457018)(images/image-20210707204812571.png)]

(四). master-slave

  • 默认都是在master节点执行的,多个任务都在master节点执行,对master节点的性能会造成一定影响,可以将任务下发到不同的节点来执行,做成多slave的方式

1. 添加节点

  • 系统管理==>节点管理==>添加节点==>选择固定节点

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-kDGspCZT-1635172457018)(images/image-20210708103618395.png)]

  • 点击进去,配置节点可以调度.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-LHvvu6QR-1635172457019)(images/image-20210708104142689.png)]

2. 脚本

  • agent.jar,可以把脚本放在/etc/rc.d/rc.local文件中,开机自动运行脚本.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-E14LQWI0-1635172457019)(images/image-20210708104620626.png)]

  • 脚本用nohup,放在后台.这个脚本需要有java环境,必须得装上java

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-MgIYHoTC-1635172457020)(images/image-20210708105021619.png)]

  • 运行脚本报错: provided port:50000 is not reachable
  • 添加jenkins的SVC地址和50000端口Tunnel

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-oAs0XiPA-1635172457020)(images/image-20210708105632886.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-hirf8QZI-1635172457021)(images/image-20210708105726092.png)]

  • 再运行脚本有Connected,就可以直接回车(nohup需要回车一下).就添加完成,web面板刷新显示可以调度了

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-j06Pucs3-1635172457021)(images/image-20210708105822910.png)]

3. 对接钉钉

  • jenkins构建的时候可以发送钉钉消息来展示
  • 先创建机器人,在k8s的master输入命令测试
    • 配置过程可以百度.
    • 第一次测试的时候会报错,就是钉钉机器人没有添加白名单,只需要添加上k8s集群网段和测试的时候提示的网段.
# 这个地址要改为钉钉机器人的webhook地址.
curl 'https://oapi.dingtalk.com/robot/send?access_token=2d0c149c741b80d8c245b8c9750dfa27e53a42b7c58b87cd976b36ca40416788' \
   -H 'Content-Type: application/json' \
   -d '{"msgtype": "text", 
        "text": {
             "content": "监狱生活让我明白了1个道理,如果算上怎么把肥皂拿稳,嗯....那就是2个."
        }
      }'

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-SjZySoTB-1635172457022)(images/image-20210708111155733.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-mHRuaDVB-1635172457022)(images/image-20210708111614075.png)]

4. 指定标签运行任务

  • 通过上面添加的节点,可以通过标签去指定节点执行任务.
  • 找到test的任务,选择这个标签去执行测试,当然测试的内容就是钉钉机器人发消息.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-aRDpwXUg-1635172457023)(images/image-20210708112105067.png)]

  • 测试推送代码,然后slave节点执行工作,最后钉钉收到消息…整个过程是OK的.

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-rmzjyLPR-1635172457023)(images/image-20210708113057071.png)]

  • 不是master节点执行的

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-XR0rClNg-1635172457024)(images/image-20210708113413743.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-53v4eHGS-1635172457024)(images/image-20210708113234452.png)]

(五). 动态master-slave

  • 不把服务器做为工作节点,而让k8s来起一个临时的工作节点来完成jenkins的任务,任务完成就自杀的资源.
  • 需要注意.可以跑一个临时的jenkinsfile来看看里面的具体怎么设置
    • 临时job的image查看
    • 初始命令的查看
    • 容器名字查看
    • 标签的查看

1. 插件和配置

  • 安装kubernetes插件

  • [系统管理] ==> [插件管理] > [搜索kubernetes]>直接安装

    • 若安装失败,请先更新 bouncycastle API Plugin并重新启动Jenkins
    • 安装后重启jenkins即可,并检查是否已安装的
  • 一定要分配伪终端

  • [系统管理] ==> [系统配置] ==> [Add a new cloud]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ZEC5gjxZ-1635172457025)(images/image-20210727114741855.png)]

  • k8s集群连接设置,服务证书不用写

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-kbNQkw5N-1635172457025)(images/image-20210727114838829.png)]

  • 时间根据实际情况设置

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-ABLJKw86-1635172457026)(images/image-20210727115029684.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-fpG9gC6s-1635172457026)(images/image-20210727185847731.png)]

  • 容器一定要分配伪终端

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-eh8CPW1G-1635172457027)(images/image-20210727185916045.png)]

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-wPDwH010-1635172457028)(images/image-20210727185937917.png)]

  • 一定要把sock挂进去,我用的是hostpath,根据自己情况来

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-0BYi9T39-1635172457028)(images/image-20210727115404711.png)]

# 临时容器需要注意
名称:jnlp-slave
命名空间:jenkins
标签列表:jnlp-slave,作为agent的label选择用
连接 Jenkins 的超时时间(秒) :300,设置连接jenkins超时时间,默认是1000s
节点选择器:kubernetes.io/os: linux
工作空间卷:选择hostpath,设置/opt/jenkins_jobs/
在有标签的节点需要设置chown -R 1000:1000 /opt/jenkins_jobs/权限,否则Pod没有权限执行任务

2. 制作image

  • 制作一个tools镜像来完成我们的任务,在master节点上做
  • 制作的镜像得有kubelctl命令,且需要把/var/run/docker.sock拷贝进去,可以kubectl apply
FROM alpine
USER root

RUN sed -i 's/dl-cdn.alpinelinux.org/mirrors.tuna.tsinghua.edu.cn/g' /etc/apk/repositories && \
    apk update && \
    apk add  --no-cache openrc docker git curl tar gcc g++ make \
    bash shadow openjdk8 python2 python2-dev py-pip openssl-dev libffi-dev \
    libstdc++ harfbuzz nss freetype ttf-freefont && \
    mkdir -p /root/.kube && \
    usermod -a -G docker root && \
    apk update && apk add git && apk add nodejs && apk add npm && \
    npm  config set  registry https://registry.npm.taobao.org && \
    npm install -g cnpm && cnpm install -g yarn && cnpm install \

COPY config /root/.kube/

RUN rm -rf /var/cache/apk/* 
#-----------------安装 kubectl--------------------#
COPY kubectl /usr/local/bin/
RUN chmod +x /usr/local/bin/kubectl
# ------------------------------------------------#
docker build . -t 192.168.188.255:180/echronos/tools:v1
docker push 192.168.188.255:180/echronos/tools:v1

# 制作完后可以起一个临时容器测试一下

3.

六. EFK

云原生

  • 根据云原生,我采用的ES+ Fluentd +Kibana来做日志处理
# 节点标签,让es固定节点(可选)
kubectl label node k8s-node-01 es=true

# namespace
kubectl create ns logging

(一). es集群

1. storageclass

1. provisioner

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nfs-es-provisioner          # 修改名字,以下都要修改
  labels:
    app: nfs-es-provisioner
  namespace: logging            # 修改namespace
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nfs-es-provisioner
  strategy:
    type: Recreate
  selector:
    matchLabels:
      app: nfs-es-provisioner
  template:
    metadata:
      labels:
        app: nfs-es-provisioner
    spec:
      serviceAccountName: nfs-es-provisioner
      containers:
        - name: nfs-es-provisioner
          image: quay.io/external_storage/nfs-client-provisioner:latest
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - name: nfs-client-root
              mountPath: /persistentvolumes
          env:
            - name: PROVISIONER_NAME
              value: nfs-es                  # 和storageclass的provisioner字段必须一致才对接的上.
            - name: NFS_SERVER
              value: 192.168.188.225    # nfs服务端地址
            - name: NFS_PATH
              value: /data/es             # 挂载的路径
      volumes:
        - name: nfs-client-root
          nfs:
            server: 192.168.188.225             # 和上面一样修改
            path: /data/es

2. rbac

kind: ServiceAccount
apiVersion: v1
metadata:
  name: nfs-es-provisioner
  namespace: logging
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nfs-es-provisioner-runner
  namespace: logging
rules:
  - apiGroups: [""]
    resources: ["persistentvolumes"]
    verbs: ["get", "list", "watch", "create", "delete"]
  - apiGroups: [""]
    resources: ["persistentvolumeclaims"]
    verbs: ["get", "list", "watch", "update"]
  - apiGroups: ["storage.k8s.io"]
    resources: ["storageclasses"]
    verbs: ["get", "list", "watch"]
  - apiGroups: [""]
    resources: ["events"]
    verbs: ["create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: run-nfs-es-provisioner
  namespace: logging
subjects:
  - kind: ServiceAccount
    name: nfs-es-provisioner
    namespace: logging
roleRef:
  kind: ClusterRole
  name: nfs-es-provisioner-runner
  apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-es-provisioner
  namespace: logging
rules:
  - apiGroups: [""]
    resources: ["endpoints"]
    verbs: ["get", "list", "watch", "create", "update", "patch"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: leader-locking-nfs-es-provisioner
  namespace: logging
subjects:
  - kind: ServiceAccount
    name: nfs-es-provisioner
    namespace: logging
roleRef:
  kind: Role
  name: leader-locking-nfs-es-provisioner
  apiGroup: rbac.authorization.k8s.io

3. storageclass

apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
  name: nfs-es
provisioner: nfs-es

4. PVC

kind: PersistentVolumeClaim
apiVersion: v1
metadata:
  name: nfs-es
  namespace: logging
spec:
  storageClassName: nfs-es
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 50Gi

2. cm

  • 使用subPath挂载到Pod内部的文件,不会自动感知原有ConfigMap的变更
# es的cm
vim es-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: es-config
  namespace: logging
data:
  elasticsearch.yml: |
    cluster.name: "hsj-elasticsearch"
    node.name: "${POD_NAME}"
    network.host: 0.0.0.0
    discovery.seed_hosts: "es-svc-headless"
    cluster.initial_master_nodes: "elasticsearch-0,elasticsearch-1,elasticsearch-2"

3. svc

  • 无头服务
# 创建集群之间通信的无头服务
vim es-headless.yaml
apiVersion: v1
kind: Service
metadata:
  name: es-svc-headless
  namespace: logging
  labels:
    k8s-app: elasticsearch
spec:
  selector:
    k8s-app: elasticsearch
  clusterIP: None
  ports:
  - name: in
    port: 9300
    protocol: TCP
  • SVC
# 暴露服务的svc
apiVersion: v1
kind: Service
metadata:
  name: es-svc
  namespace: logging
  labels:
    k8s-app: elasticsearch
spec:
  selector:
    k8s-app: elasticsearch
  ports:
  - name: out
    port: 9200
    protocol: TCP

4. sts

  • es集群
vim es-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: elasticsearch
  namespace: logging
  labels:
    k8s-app: elasticsearch
spec:
  replicas: 3
  serviceName: es-svc-headless
  selector:
    matchLabels:
      k8s-app: elasticsearch
  template:
    metadata:
      labels:
        k8s-app: elasticsearch
    spec:
      initContainers:
      - command:
        - /sbin/sysctl
        - -w
        - vm.max_map_count=262144
        image: alpine:3.6
        imagePullPolicy: IfNotPresent
        name: elasticsearch-logging-init
        resources: {}
        securityContext:
          privileged: true
      - name: fix-permissions
        image: alpine:3.6
        imagePullPolicy: IfNotPresent
        command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]
        securityContext:
          privileged: true
        volumeMounts:
        - name: es-data-volume
          mountPath: /usr/share/elasticsearch/data
      containers:
      - name: elasticsearch
        image: docker.elastic.co/elasticsearch/elasticsearch:7.4.2 #docker.elastic.co/elasticsearch/elasticsearch:6.2.2
        imagePullPolicy: IfNotPresent
        env:
          - name: POD_NAME
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
        resources:
          limits:
            cpu: '2'
            memory: 4Gi
          requests:
            cpu: '2'
            memory: 2Gi
        ports:
        - containerPort: 9200
          name: db
          protocol: TCP
        - containerPort: 9300
          name: transport
          protocol: TCP
        volumeMounts:
          - name: es-data-volume
            mountPath: /usr/share/elasticsearch/data
          - name: es-config-volume
            mountPath: /usr/share/elasticsearch/config/elasticsearch.yml
            subPath: elasticsearch.yml
          - name: date-config
            mountPath: /etc/localtime
      volumes:
        - name: es-config-volume
          configMap:
            name: es-config
            items:
            - key: elasticsearch.yml
              path: elasticsearch.yml
        - name: date-config
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:					# 固定格式,让多副本pod都各自挂载一个.每个都是50G,当然storageclass必须对应.
  - metadata:
      name: es-data-volume
    spec:
      accessModes: ["ReadWriteOnce"]
      storageClassName: nfs-es
      resources:
        requests:
          storage: 50Gi 

5. 测试

# 查看es集群的svc
kubectl -n logging get svc
NAME              TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)    AGE
es-svc            ClusterIP   10.110.146.42           9200/TCP   23m
es-svc-headless   ClusterIP   None                    9300/TCP   33m

# 多次curl一下,可以发现,他们的name是不一致的,说明集群OK
curl 10.110.146.42:9200
{
  "name" : "elasticsearch-2",
  "cluster_name" : "hsj-elasticsearch",
  "cluster_uuid" : "tgJyt9jBSkOGoFORNnenKw",
  "version" : {
    "number" : "7.4.2",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "2f90bbf7b93631e52bafb59b3b049cb44ec25e96",
    "build_date" : "2019-10-28T20:40:44.881551Z",
    "build_snapshot" : false,
    "lucene_version" : "8.2.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

[root@k8s-master /data/apps/storageclass/es]# curl 10.110.146.42:9200
{
  "name" : "elasticsearch-0",
  "cluster_name" : "hsj-elasticsearch",
  "cluster_uuid" : "tgJyt9jBSkOGoFORNnenKw",
  "version" : {
    "number" : "7.4.2",
    "build_flavor" : "default",
    "build_type" : "docker",
    "build_hash" : "2f90bbf7b93631e52bafb59b3b049cb44ec25e96",
    "build_date" : "2019-10-28T20:40:44.881551Z",
    "build_snapshot" : false,
    "lucene_version" : "8.2.0",
    "minimum_wire_compatibility_version" : "6.8.0",
    "minimum_index_compatibility_version" : "6.0.0-beta1"
  },
  "tagline" : "You Know, for Search"
}

# 可以curl下是不是green,注意pod中的时间.
curl 10.110.146.42:9200/_cat/health?v

# * 就是主节点
curl -XGET '10.110.146.42:9200/_cat/nodes?v'
ip          heap.percent ram.percent cpu load_1m load_5m load_15m node.role master name
10.244.2.69           17          37   1    0.21    0.27     0.27 dilm      -      elasticsearch-0
10.244.1.88           11          41   1    0.05    0.08     0.12 dilm      *      elasticsearch-1
10.244.0.25           16          53   3    0.40    0.58     0.58 dilm      -      elasticsearch-2

(二). kibana

  • kibana是无状态服务,直接部署就OK,包括他的ing和svc
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kibana
  namespace: logging
  labels:
    app: kibana
spec:
  selector:
    matchLabels:
      app: "kibana"
  template:
    metadata:
      labels:
        app: kibana
    spec:
      containers:
      - name: kibana
        image: docker.elastic.co/kibana/kibana:7.4.2
        resources:
          limits:
            cpu: 1000m
          requests:
            cpu: 100m
        env:
          - name: ELASTICSEARCH_HOSTS
            value: http://es-svc:9200
          - name: SERVER_NAME
            value: kibana-logging
          - name: SERVER_REWRITEBASEPATH
            value: "false"
        ports:
        - containerPort: 5601
---
apiVersion: v1
kind: Service
metadata:
  name: kibana
  namespace: logging
  labels:
    app: kibana
spec:
  ports:
  - port: 5601
    protocol: TCP
    targetPort: 5601
  type: ClusterIP
  selector:
    app: kibana
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: kibana
  namespace: logging
spec:
  rules:
  - host: hsj.kibana.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service: 
            name: kibana
            port:
              number: 5601
  • 创建完,配置好hosts,就可以浏览器访问

(三). Fluentd

  • fluentd为日志采集服务,kubernetes集群的每个业务节点都有日志产生,因此需要使用daemonset的模式进行部署.
  • 为进一步控制资源,会为daemonset指定一个选择标签,fluentd=true来做进一步过滤,只有带有此标签的节点才会部署fluentd.
  • 日志采集,需要采集哪些目录下的日志,采集后发送到es端,因此需要配置的内容比较多,我们选择使用configmap的方式把配置文件整个挂载出来.

1. label

kubectl label node k8s-master fluentd=true
kubectl label node k8s-node-01 fluentd=true
kubectl label node k8s-node02 fluentd=true

2. cm

  • 都是别人集成好的插件,fulentd官网查阅
  • 后续加配置在/etc/fluent/config.d/下
# main
apiVersion: v1
data:
  fluent.conf: |-
    # This is the root config file, which only includes components of the actual configuration
    #
    #  Do not collect fluentd's own logs to avoid infinite loops.
    **>
    @type null
    >

    @include /fluentd/etc/config.d/*.conf
kind: ConfigMap
metadata:
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
  name: fluentd-es-config-main
  namespace: logging
kind: ConfigMap
apiVersion: v1
metadata:
  name: fluentd-config
  namespace: logging
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
data:
  containers.input.conf: |-
    >
      @id fluentd-containers.log
      @type tail
      # 日志扫描位置.
      path /var/log/containers/*.log
      pos_file /var/log/es-containers.log.pos
      time_format %Y-%m-%dT%H:%M:%S.%NZ
      localtime
      tag raw.kubernetes.*
      format json
      read_from_head false
    >
    # Detect exceptions in the log output and forward them as one log entry.
    # https://github.com/GoogleCloudPlatform/fluent-plugin-detect-exceptions 
    **>
      @id raw.kubernetes
      @type detect_exceptions
      remove_tag_prefix raw
      message log
      stream stream
      multiline_flush_interval 5
      max_bytes 500000
      max_lines 1000
    >
        # Concatenate multi-line logs
    **>
      @id filter_concat
      @type concat
      key message
      multiline_end_regexp /\n$/
      separator ""
    >
  output.conf: |-
    # Enriches records with Kubernetes metadata
    **>
      @type kubernetes_metadata		# 需要fluentd环境ruby的插件支持
    >
    **>
      @id elasticsearch
      @type elasticsearch
      @log_level info
      include_tag_key true
      # es集群的host和port
      hosts elasticsearch-0.es-svc-headless:9200,elasticsearch-1.es-svc-headless:9200,elasticsearch-2.es-svc-headless:9200
      #port 9200
      # 默认日志以logstash_format开头,也可以自定义,但是自己测试没有成功,就没有修改它
      logstash_format true
      #index_name kubernetes-%Y.%m.%d
      request_timeout    30s
      >
        @type file
        path /var/log/fluentd-buffers/kubernetes.system.buffer
        flush_mode interval
        retry_type exponential_backoff
        # 线程数
        flush_thread_count 8
        # 由fluentd导致的日志最多不超过5s
        flush_interval 5s
        retry_forever
        retry_max_interval 30
        # 内存缓冲8MB/文件缓冲256MB,这里是文件缓冲.ES接受的是100M,如果不设置会出大事.
        chunk_limit_size 10M
        queue_limit_length 8
        overflow_action block
      >
    >

1. buffer说明

# 因为每个事件数据量通常很小,考虑数据传输效率、稳定性等方面的原因,所以基本不会每条事件处理完后都会立马写入到output端,因此fluentd建立了缓冲模型(也就是上面的buffer区域),模型中主要有两个概念:

- buffer_chunk				# 事件缓冲块,用来存储本地已经处理完待发送至目的端的事件,可以设置每个块的大小。
- buffer_queue				# 存储chunk的队列,可以设置长度

# 可以设置的参数,主要有:

- buffer_type				# 缓冲类型,可以设置file或者memory
- buffer_chunk_limit		# 每个chunk块的大小,默认8MB
- buffer_queue_limit		# chunk块队列的最大长度,默认256
- flush_interval			# flush一个chunk的时间间隔
- retry_limit				# chunk块发送失败重试次数,默认17次,之后就丢弃该chunk数据
- retry_wait				# 重试发送chunk数据的时间间隔,默认1s,第2次失败再发送的话,间隔2s,下次4秒,以此类推

3. fluentd-all

  • 包括ds和rbac
apiVersion: v1
kind: ServiceAccount
metadata:
  name: fluentd-es
  namespace: logging
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd-es
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
  - ""
  resources:
  - "namespaces"
  - "pods"
  verbs:
  - "get"
  - "watch"
  - "list"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: fluentd-es
  labels:
    k8s-app: fluentd-es
    kubernetes.io/cluster-service: "true"
    addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
  name: fluentd-es
  namespace: logging
  apiGroup: ""
roleRef:
  kind: ClusterRole
  name: fluentd-es
  apiGroup: ""
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
  labels:
    addonmanager.kubernetes.io/mode: Reconcile
    k8s-app: fluentd-es
  name: fluentd-es
  namespace: logging
spec:
  selector:
    matchLabels:
      k8s-app: fluentd-es
  template:
    metadata:
      labels:
        k8s-app: fluentd-es
    spec:
      containers:
      - image: quay.io/fluentd_elasticsearch/fluentd:v3.1.0
        imagePullPolicy: IfNotPresent
        name: fluentd-es
        resources:
          limits:
            memory: 1Gi
          requests:
            cpu: 100m
            memory: 200Mi
        volumeMounts:
        - mountPath: /var/log
          name: varlog
        - mountPath: /var/lib/docker/containers
          name: varlibdockercontainers
          readOnly: true
        - mountPath: /etc/fluent/config.d
          name: config-volume
      nodeSelector:
        fluentd: "true"
      securityContext: {}
      serviceAccount: fluentd-es
      serviceAccountName: fluentd-es
      volumes:
      - hostPath:
          path: /var/log
        name: varlog
      - hostPath:
          path: /var/lib/docker/containers
        name: varlibdockercontainers
      - configMap:
          defaultMode: 420
          name: fluentd-config
        name: config-volume

七. redis

  • redis搭建集群模式,如果是三主三从也是一样的
  • 参考

1. storageclass

  • 按照上面的改storageclass改就可以了.

2. cm

  • 需要将redis配置文件注入容器中.
vim redis-cm.yaml
apiVersion: v1
kind: ConfigMap
metadata:
  name: redis-conf
  namespace: redis
data:
  redis.conf: |
    appendonly yes
    cluster-enabled yes
    cluster-config-file /var/lib/redis/nodes.conf
    cluster-node-timeout 5000
    dir /var/lib/redis
    port 6379

3. headless

  • redis集群使用statefulset资源创建,需要无头服务
vim redis-headless.yaml
apiVersion: v1
kind: Service
metadata:
  name: redis-service
  namespace: redis
  labels:
    app: redis
spec:
  ports:
  - name: redis-port
    port: 6379
  clusterIP: None
  selector:
    app: redis
    appCluster: redis-cluster

4. sts

vim redis-sts.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: redis-app
  namespace: redis
  labels:
    k8s-app: redis
spec:
  serviceName: redis-service
  selector:
    matchLabels:
      app: redis
  template:
    metadata:
      labels:
        app: redis
  replicas: 3
  template:
    metadata:
      labels:
        app: redis
        appCluster: redis-cluster
    spec:
      terminationGracePeriodSeconds: 20
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - redis
              topologyKey: kubernetes.io/hostname
      containers:
      - name: redis
        image: redis
        imagePullPolicy: IfNotPresent
        command:
          - "redis-server"
        args:
          - "/etc/redis/redis.conf"
          - "--protected-mode"
          - "no"
        resources:
          requests:
            cpu: "100m"
            memory: "100Mi"
        ports:
            - name: redis
              containerPort: 6379
              protocol: "TCP"
            - name: cluster
              containerPort: 16379
              protocol: "TCP"
        volumeMounts:
          - name: "redis-conf"
            mountPath: "/etc/redis"
          - name: "redis-data"
            mountPath: "/var/lib/redis"
      volumes:
      - name: "redis-conf"
        configMap:
          name: "redis-conf"
          items:
            - key: "redis.conf"
              path: "redis.conf"
  volumeClaimTemplates:
  - metadata:
      name: redis-data
    spec:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: nfs-redis
      resources:
        requests:
          storage: 300M

5. redis init

# 随便进入一个redis的pod,发现集群是fail
/usr/local/bin/redis-cli -c
root@redis-app-0:/data# /usr/local/bin/redis-cli -c
127.0.0.1:6379> CLUSTER info
cluster_state:fail
cluster_slots_assigned:0
cluster_slots_ok:0
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:1
cluster_size:0
cluster_current_epoch:0
cluster_my_epoch:0
cluster_stats_messages_sent:0
cluster_stats_messages_received:
  • 创建一个容器,用它来对redis进行初始化
# 创建一个开机自启的centos7的容器
kubectl run -i --tty centos-7 --image=centos:7 --restart=Never /bin/bash

# epel
touch /etc/yum.repos.d/epel.repo

vi /etc/yum.repos.d/epel.repo
[epel]
name=Extra Packages for Enterprise Linux 7 - $basearch
baseurl=https://mirrors.tuna.tsinghua.edu.cn/epel/7/$basearch
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch
failovermethod=priority
enabled=1
gpgcheck=0
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7

# 安装
yum -y install redis-trib.noarch bind-utils-9.9.4-72.el7.x86_64 bind-utils

# 创建节点信息,创建的dig是以$(podname).$(service name).$(namespace).svc.cluster.local规则,因为这里不是三主三从,--replicas为0就可以,然后输入yes
redis-trib create --replicas 0 `dig +short redis-app-0.redis-service.testing.svc.cluster.local`:6379 `dig +short redis-app-1.redis-service.testing.svc.cluster.local`:6379 `dig +short redis-app-2.redis-service.testing.svc.cluster.local`:6379

# 创建的主要输出内容,表示集群运行正常
[OK] All nodes agree about slots configuration.
>>> Check for open slots...
>>> Check slots coverage...
[OK] All 16384 slots covered.

# 随便进入一个redis容器进行集群检查,输出的OK,表示初始化完成.
root@redis-app-0:/data# /usr/local/bin/redis-cli -c
127.0.0.1:6379> cluster info
cluster_state:ok
cluster_slots_assigned:16384
cluster_slots_ok:16384
cluster_slots_pfail:0
cluster_slots_fail:0
cluster_known_nodes:3
cluster_size:3
cluster_current_epoch:3
cluster_my_epoch:1
cluster_stats_messages_ping_sent:519
cluster_stats_messages_pong_sent:540
cluster_stats_messages_sent:1059
cluster_stats_messages_ping_received:538
cluster_stats_messages_pong_received:519
cluster_stats_messages_meet_received:2
cluster_stats_messages_received:1059
127.0.0.1:6379> 

6. svc

  • 用于集群内部访问
apiVersion: v1
kind: Service
metadata:
  name: redis-access-service
  namespace: redis
  labels:
    app: redis
spec:
  ports:
  - name: redis-port
    protocol: "TCP"
    port: 6379
    targetPort: 6379
  selector:
    app: redis
    appCluster: redis-cluster

八. mysql

参考

  • storageclass之前就创建了,直接使用就可以
# 临时容器,退出就删除
docker run -it --rm gcr.io/google-samples/xtrabackup:1.0 bash

1. svc

# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
  name: mysql
  namespace: testing
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the primary: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  namespace: testing
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql

2. cm

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  namespace: testing
  labels:
    app: mysql
data:
  primary.cnf: |
    # Apply this config only on the primary.
    [mysqld]
    log-bin    
  replica.cnf: |
    # Apply this config only on replicas.
    [mysqld]
    super-read-only 

3. sts

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
  namespace: testing
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: mysql:5.7
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/primary.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/replica.cnf /mnt/conf.d/
          fi          
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: gcr.io/google-samples/xtrabackup:1.0
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on primary (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: mysql:5.7
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: gcr.io/google-samples/xtrabackup:1.0
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql

          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info && "x$(!= "x" ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing replica. (Need to remove the tailing semicolon!)
            cat xtrabackup_slave_info | sed -E 's/;$//g' > change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_slave_info xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from primary. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm -f xtrabackup_binlog_info xtrabackup_slave_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi

          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done

            echo "Initializing replication from clone position"
            mysql -h 127.0.0.1 \
                  -e "$(, \
                          MASTER_HOST='mysql-0.mysql', \
                          MASTER_USER='root', \
                          MASTER_PASSWORD='', \
                          MASTER_CONNECT_RETRY=10; \
                        START SLAVE;" || exit 1
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
          fi

          # Start a server to send backups when requested by peers.
          exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \
            "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root"          
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 100m
            memory: 100Mi
      volumes:
      - name: conf
        emptyDir: {}
      - name: config-map
        configMap:
          name: mysql
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes: ["ReadWriteOnce"]
      resources:
        requests:
          storage: 80Gi

4. 集群

# 起一个临时容器测试查看数据
kubectl --namespace=testing run mysql-client --image=mysql:5.7 -i --rm --restart=Never -- mysql -h mysql-0.mysql <

九. rabbitmq

参考

1. secret

  • 随机cookie用作secret
  • 我是把cookie是直接写在sts中的,这一步就没做,也是可以这样做的.
echo $(openssl rand -base64 32) > erlang.cookie
kubectl create secret generic erlang.cookie --from-file=erlang.cookie -n testing

2. cm

apiVersion: v1
kind: ConfigMap
metadata:
  name: rabbitmq-config
  namespace: testing
data:
  enabled_plugins: |
      [rabbitmq_management,rabbitmq_peer_discovery_k8s].
  rabbitmq.conf: |
      ## Cluster formation. See https://www.rabbitmq.com/cluster-formation.html to learn more.
      cluster_formation.peer_discovery_backend  = rabbit_peer_discovery_k8s
      cluster_formation.k8s.host = kubernetes.default.svc.cluster.local
      ## Should RabbitMQ node name be computed from the pod's hostname or IP address?
      ## IP addresses are not stable, so using [stable] hostnames is recommended when possible.
      ## Set to "hostname" to use pod hostnames.
      ## When this value is changed, so should the variable used to set the RABBITMQ_NODENAME
      ## environment variable.
      cluster_formation.k8s.address_type = hostname
      ## How often should node cleanup checks run?
      cluster_formation.node_cleanup.interval = 30
      ## Set to false if automatic removal of unknown/absent nodes
      ## is desired. This can be dangerous, see
      ##  * https://www.rabbitmq.com/cluster-formation.html#node-health-checks-and-cleanup
      ##  * https://groups.google.com/forum/#!msg/rabbitmq-users/wuOfzEywHXo/k8z_HWIkBgAJ
      cluster_formation.node_cleanup.only_log_warning = true
      cluster_partition_handling = autoheal
      ## See https://www.rabbitmq.com/ha.html#master-migration-data-locality
      queue_master_locator=min-masters
      ## This is just an example.
      ## This enables remote access for the default user with well known credentials.
      ## Consider deleting the default user and creating a separate user with a set of generated
      ## credentials instead.
      ## Learn more at https://www.rabbitmq.com/access-control.html#loopback-users
      loopback_users.guest = false

3. rbac

apiVersion: v1
kind: ServiceAccount
metadata:
  name: rabbitmq
  namespace: testing
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rabbitmq-peer-discovery-rbac
  namespace: testing
rules:
- apiGroups: [""]
  resources: ["endpoints"]
  verbs: ["get"]
# - apiGroups: [""]
#   resources: ["events"]
#   verbs: ["create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: rabbitmq-peer-discovery-rbac
  namespace: testing
subjects:
- kind: ServiceAccount
  name: rabbitmq
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: rabbitmq-peer-discovery-rbac

4. sts

apiVersion: apps/v1
# See the Prerequisites section of https://www.rabbitmq.com/cluster-formation.html#peer-discovery-k8s.
kind: StatefulSet
metadata:
  name: rabbitmq
  namespace: testing
spec:
  serviceName: rabbitmq
  # Three nodes is the recommended minimum. Some features may require a majority of nodes
  # to be available.
  replicas: 3
  selector:
    matchLabels:
      app: rabbitmq
  template:
    metadata:
      labels:
        app: rabbitmq
    spec:
      serviceAccountName: rabbitmq
      terminationGracePeriodSeconds: 10
      nodeSelector:
        # Use Linux nodes in a mixed OS kubernetes cluster.
        # Learn more at https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#kubernetes-io-os
        kubernetes.io/os: linux
      containers:
      - name: rabbitmq-k8s
        image: rabbitmq:3.8.19
        volumeMounts:
          - name: config-volume
            mountPath: /etc/rabbitmq
          - name: rabbitmq-data
            mountPath: /var/lib/rabbitmq
        # Learn more about what ports various protocols use
        # at https://www.rabbitmq.com/networking.html#ports
        ports:
          - name: http
            protocol: TCP
            containerPort: 15672
          - name: amqp
            protocol: TCP
            containerPort: 5672
        livenessProbe:
          exec:
            # This is just an example. There is no "one true health check" but rather
            # several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive
            # and intrusive health checks.
            # Learn more at https://www.rabbitmq.com/monitoring.html#health-checks.
            #
            # Stage 2 check:
            command: ["rabbitmq-diagnostics", "status"]
          initialDelaySeconds: 60
          # See https://www.rabbitmq.com/monitoring.html for monitoring frequency recommendations.
          periodSeconds: 60
          timeoutSeconds: 15
        readinessProbe:
          exec:
            # This is just an example. There is no "one true health check" but rather
            # several rabbitmq-diagnostics commands that can be combined to form increasingly comprehensive
            # and intrusive health checks.
            # Learn more at https://www.rabbitmq.com/monitoring.html#health-checks.
            #
            # Stage 2 check:
            command: ["rabbitmq-diagnostics", "status"]
            # To use a stage 4 check:
            # command: ["rabbitmq-diagnostics", "check_port_connectivity"]
          initialDelaySeconds: 20
          periodSeconds: 60
          timeoutSeconds: 10
        imagePullPolicy: IfNotPresent
        env:
          - name: MY_POD_NAME
            valueFrom:
              fieldRef:
                apiVersion: v1
                fieldPath: metadata.name
          - name: MY_POD_NAMESPACE
            valueFrom:
              fieldRef:
                fieldPath: metadata.namespace
          - name: RABBITMQ_USE_LONGNAME
            value: "true"
          # See a note on cluster_formation.k8s.address_type in the config file section
          - name: K8S_SERVICE_NAME
            value: rabbitmq
          - name: RABBITMQ_NODENAME
            value: rabbit@$(MY_POD_NAME).$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
          - name: K8S_HOSTNAME_SUFFIX
            value: .$(K8S_SERVICE_NAME).$(MY_POD_NAMESPACE).svc.cluster.local
          - name: RABBITMQ_ERLANG_COOKIE
            value: "mycookie"
      volumes:
        - name: config-volume
          configMap:
            name: rabbitmq-config
            items:
            - key: rabbitmq.conf
              path: rabbitmq.conf
            - key: enabled_plugins
              path: enabled_plugins
  volumeClaimTemplates:
  - metadata:
      name: rabbitmq-data
    spec:
      accessModes: ["ReadWriteOnce"]
      storageClassName: testing-storageclass
      resources:
        requests:
          storage: 10Gi

5. svc

kind: Service
apiVersion: v1
metadata:
  namespace: testing
  name: rabbitmq
  labels:
    app: rabbitmq
    type: LoadBalancer
spec:
  type: NodePort
  ports:
   - name: http
     protocol: TCP
     port: 15672
     targetPort: 15672
     nodePort: 
   - name: amqp
     protocol: TCP
     port: 5672
     targetPort: 5672
     nodePort: 
  selector:
    app: rabbitmq

6. 开启集群

  • 不要嫌弃菜鸟的手动操作.

  • 这里的--erlang-cookie mycookie,需要写自己的cookie

# mq1加入集群
kubectl -n testing exec -ti rabbitmq-1 -- rabbitmqctl --erlang-cookie mycookie stop_app
kubectl -n testing exec -ti rabbitmq-1 -- rabbitmqctl --erlang-cookie mycookie join_cluster rabbit@rabbitmq-0
kubectl -n testing exec -ti rabbitmq-1 -- rabbitmqctl --erlang-cookie mycookie start_app

# mq2加入集群
kubectl -n testing exec -ti rabbitmq-2 -- rabbitmqctl --erlang-cookie mycookie stop_app
kubectl -n testing exec -ti rabbitmq-2 -- rabbitmqctl --erlang-cookie mycookie join_cluster rabbit@rabbitmq-0
kubectl -n testing exec -ti rabbitmq-2 -- rabbitmqctl --erlang-cookie mycookie start_app

# 检查集群状态
kubectl -n testing exec -ti rabbitmq-0 -- rabbitmqctl --erlang-cookie mycookie cluster_status

7. ingress

  • 配置好host可以浏览器访问
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: rabbitmq
  namespace: testing
spec:
  rules:
    - host: hsj.rabbitmq.com
      http:
        paths:
          - path: /
            backend:
              serviceName: rabbitmq
              servicePort: 15672

十. kafka

参考1

参考2

1. zk-all-in-one

apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  namespace: testing
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  namespace: testing
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
  namespace: testing
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
  namespace: testing
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  template:
    metadata:
      labels:
        app: zk
    spec:
     # nodeSelector:
     #   travis.io/schedule-only: "kafka"
      tolerations:
     # - key: "travis.io/schedule-only"
     #   operator: "Equal"
     #   value: "kafka"
     #   effect: "NoSchedule"
     # - key: "travis.io/schedule-only"
     #   operator: "Equal"
     #   value: "kafka"
     #   effect: "NoExecute"
     #   tolerationSeconds: 3600
     # - key: "travis.io/schedule-only"
     #   operator: "Equal"
     #   value: "kafka"
     #   effect: "PreferNoSchedule"
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: IfNotPresent
        image: k8s.gcr.io/kubernetes-zookeeper:1.0-3.4.10
        resources:
          requests:
            memory: "1G"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
      #This annotation is still working, however it will become fully deprecated in a future Kubernetes release.just use storageClassName.
      # annotations:
      #   volume.beta.kubernetes.io/storage-class: nfs-storage
      #   volume.beta.kubernetes.io/storage-provisioner: fuseim.pri/ifs
    spec:
      accessModes: [ "ReadWriteMany" ]
      storageClassName: testing-storageclass
      resources:
        requests:
          storage: 10G

2. 集群检查

# 可以挨个检查
kubectl -n testing exec -ti zk-2 -- zkServer.sh status

# 配置文件
kubectl exec zk-0 -- cat /opt/zookeeper/conf/zoo.cfg

# 设置一个键 
kubectl exec zk-0 zkCli.sh create /hello world

# 在另外一个节点get一下这个键
kubectl exec zk-1 zkCli.sh get /hello

2. kafka

1. kafka-all-in-one

---
apiVersion: v1
kind: Service
metadata:
  name: kafka-svc
  namespace: testing
  labels:
    app: kafka
spec:
  ports:
    - port: 9092
      name: server
  clusterIP: None
  selector:
    app: kafka
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
  name: kafka-pdb
  namespace: testing
spec:
  selector:
    matchLabels:
      app: kafka
  minAvailable: 2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: kafka
  namespace: testing
spec:
  selector:
    matchLabels:
      app: kafka
  serviceName: kafka-svc
  replicas: 3
  template:
    metadata:
      labels:
        app: kafka
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - kafka
              topologyKey: "kubernetes.io/hostname"
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - kafka
                  - key: "kubernetes.io/hostname"
                    operator: NotIn
                    values:
                      - master
              topologyKey: "kubernetes.io/hostname"
      terminationGracePeriodSeconds: 300
      containers:
        - name: k8s-kafka
          imagePullPolicy: IfNotPresent
          image: fastop/kafka:2.2.0
          resources:
            requests:
              memory: "600Mi"
              cpu: 500m
          ports:
            - containerPort: 9092
              name: server
          command:
            - sh
            - -c
            - "exec kafka-server-start.sh /opt/kafka/config/server.properties --override broker.id=${HOSTNAME##*-} \
          --override listeners=PLAINTEXT://:9092 \
# 这里是需要改的,改成自己的名称zk名字,无头服务和名称空间 zk-0.zk-hs.testing.svc.cluster.local:2181
          --override zookeeper.connect=zk-0.zk-hs.testing.svc.cluster.local:2181,zk-1.zk-hs.testing.svc.cluster.local:2181,zk-2.zk-hs.testing.svc.cluster.local:2181 \
          --override log.dir=/var/lib/kafka \
          --override auto.create.topics.enable=true \
          --override auto.leader.rebalance.enable=true \
          --override background.threads=10 \
          --override compression.type=producer \
          --override delete.topic.enable=false \
          --override leader.imbalance.check.interval.seconds=300 \
          --override leader.imbalance.per.broker.percentage=10 \
          --override log.flush.interval.messages=9223372036854775807 \
          --override log.flush.offset.checkpoint.interval.ms=60000 \
          --override log.flush.scheduler.interval.ms=9223372036854775807 \
          --override log.retention.bytes=-1 \
          --override log.retention.hours=168 \
          --override log.roll.hours=168 \
          --override log.roll.jitter.hours=0 \
          --override log.segment.bytes=1073741824 \
          --override log.segment.delete.delay.ms=60000 \
          --override message.max.bytes=1000012 \
          --override min.insync.replicas=1 \
          --override num.io.threads=8 \
          --override num.network.threads=3 \
          --override num.recovery.threads.per.data.dir=1 \
          --override num.replica.fetchers=1 \
          --override offset.metadata.max.bytes=4096 \
          --override offsets.commit.required.acks=-1 \
          --override offsets.commit.timeout.ms=5000 \
          --override offsets.load.buffer.size=5242880 \
          --override offsets.retention.check.interval.ms=600000 \
          --override offsets.retention.minutes=1440 \
          --override offsets.topic.compression.codec=0 \
          --override offsets.topic.num.partitions=50 \
          --override offsets.topic.replication.factor=3 \
          --override offsets.topic.segment.bytes=104857600 \
          --override queued.max.requests=500 \
          --override quota.consumer.default=9223372036854775807 \
          --override quota.producer.default=9223372036854775807 \
          --override replica.fetch.min.bytes=1 \
          --override replica.fetch.wait.max.ms=500 \
          --override replica.high.watermark.checkpoint.interval.ms=5000 \
          --override replica.lag.time.max.ms=10000 \
          --override replica.socket.receive.buffer.bytes=65536 \
          --override replica.socket.timeout.ms=30000 \
          --override request.timeout.ms=30000 \
          --override socket.receive.buffer.bytes=102400 \
          --override socket.request.max.bytes=104857600 \
          --override socket.send.buffer.bytes=102400 \
          --override unclean.leader.election.enable=true \
          --override zookeeper.session.timeout.ms=6000 \
          --override zookeeper.set.acl=false \
          --override broker.id.generation.enable=true \
          --override connections.max.idle.ms=600000 \
          --override controlled.shutdown.enable=true \
          --override controlled.shutdown.max.retries=3 \
          --override controlled.shutdown.retry.backoff.ms=5000 \
          --override controller.socket.timeout.ms=30000 \
          --override default.replication.factor=1 \
          --override fetch.purgatory.purge.interval.requests=1000 \
          --override group.max.session.timeout.ms=300000 \
          --override group.min.session.timeout.ms=6000 \
          --override inter.broker.protocol.version=2.2.0 \
          --override log.cleaner.backoff.ms=15000 \
          --override log.cleaner.dedupe.buffer.size=134217728 \
          --override log.cleaner.delete.retention.ms=86400000 \
          --override log.cleaner.enable=true \
          --override log.cleaner.io.buffer.load.factor=0.9 \
          --override log.cleaner.io.buffer.size=524288 \
          --override log.cleaner.io.max.bytes.per.second=1.7976931348623157E308 \
          --override log.cleaner.min.cleanable.ratio=0.5 \
          --override log.cleaner.min.compaction.lag.ms=0 \
          --override log.cleaner.threads=1 \
          --override log.cleanup.policy=delete \
          --override log.index.interval.bytes=4096 \
          --override log.index.size.max.bytes=10485760 \
          --override log.message.timestamp.difference.max.ms=9223372036854775807 \
          --override log.message.timestamp.type=CreateTime \
          --override log.preallocate=false \
          --override log.retention.check.interval.ms=300000 \
          --override max.connections.per.ip=2147483647 \
          --override num.partitions=4 \
          --override producer.purgatory.purge.interval.requests=1000 \
          --override replica.fetch.backoff.ms=1000 \
          --override replica.fetch.max.bytes=1048576 \
          --override replica.fetch.response.max.bytes=10485760 \
          --override reserved.broker.max.id=1000 "
          env:
            - name: KAFKA_HEAP_OPTS
              value : "-Xmx512M -Xms512M"
            - name: KAFKA_OPTS
              value: "-Dlogging.level=INFO"
          volumeMounts:
            - name: kafka
              mountPath: /var/lib/kafka
          readinessProbe:
            tcpSocket:
              port: 9092
            timeoutSeconds: 1
            initialDelaySeconds: 5
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
    - metadata:
        name: kafka
      spec:
        accessModes: [ "ReadWriteMany" ]
        storageClassName: testing-storageclass
        resources:
          requests:
            storage:  500Mi

2. 集群检查

# 随便进入一个kafka节点
kubectl exec -it kafka-0 bash -n testing

# 创建名字为test的topic,记得改为自己的名称空间
kafka-topics.sh --create --topic test --zookeeper zk-0.zk-hs.testing.svc.cluster.local:2181,zk-1.zk-hs.testing.svc.cluster.local:2181,zk-2.zk-hs.testing.svc.cluster.local:2181 --partitions 3 --replication-factor 2

# 看看列表,改namespace
kafka-topics.sh --list --zookeeper zk-0.zk-hs.testing.svc.cluster.local:2181,zk-1.zk-hs.testing.svc.cluster.local:2181,zk-2.zk-hs.testing.svc.cluster.local:2181

# kafka生产者
kafka-console-producer.sh --topic test --broker-list localhost:9092

# 在另一个kafka连接
kubectl exec -it kafka-1 bash -n testing

# 消费者测试,看是否和生产者生产的信息一致
kafka-console-consumer.sh --topic test --bootstrap-server localhost:9092

十一. mongodb集群

1. 做前端镜像

FROM node:lts-slim
LABEL maintainer Markus Wiegand 

ENV NODE_ENV=production

WORKDIR /opt/k8s-mongo-sidecar

COPY package.json package-lock.json /opt/k8s-mongo-sidecar/

RUN npm install

COPY ./src /opt/k8s-mongo-sidecar/src

CMD ["npm", "start"]
# Dockerfile依赖文件
链接:https://pan.baidu.com/s/1dCGImDFjqMMHBXm472q3tA 
提取码:g72j

# 制作sidecar镜像
docker build . -t morphy/k8s-mongo-sidecar

2. mongo-all-in-one

  • 可以通过无头访问
apiVersion: v1
kind: ServiceAccount
metadata:
  name: mongo
  namespace: public-toilet
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: read-pod-service-endpoint
  namespace: public-toilet
rules:
  - apiGroups:
      - ""
    resources:
      - pods
      - services
      - endpoints
    verbs:
      - get
      - list
      - watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: system:serviceaccount:default:mongo
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: read-pod-service-endpoint
subjects:
  - kind: ServiceAccount
    name: mongo
    namespace: public-toilet
---
apiVersion: v1
kind: Service
metadata:
  name: mongo
  namespace: public-toilet
  labels:
    name: mongo
spec:
  ports:
  - port: 27017
    targetPort: 27017
  clusterIP: None
  selector:
    role: mongo
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mongo
  namespace: public-toilet
spec:
  serviceName: "mongo"
  replicas: 3
  selector:
    matchLabels:
      role: mongo
  template:
    metadata:
      labels:
        role: mongo
        environment: test
    spec:
      serviceAccountName: mongo
      automountServiceAccountToken: true
      terminationGracePeriodSeconds: 30
      containers:
        - name: mongo
          image: mongo:4
          command:
            - mongod
          args:
            - "--replSet=rs0"
            - "--bind_ip=0.0.0.0"
          ports:
            - containerPort: 27017
          volumeMounts:
            - name: mongo-persistent-storage
              mountPath: /data/db
        - name: mongo-sidecar
          image: morphy/k8s-mongo-sidecar
          env:
            - name: KUBERNETES_POD_LABELS
              value: "role=mongo,environment=test"
            - name: KUBERNETES_SERVICE_NAME
              value: "mongo"
  volumeClaimTemplates:
  - metadata:
      name: mongo-persistent-storage
    spec:
      storageClassName: testing-storageclass
      accessModes: [ "ReadWriteOnce" ]
      resources:
        requests:
          storage: 10Gi

4. 验证

kubectl -n public-toilet exec -ti mongo-0 -- mongo

# 进入mongo后查看状态
rs0:PRIMARY> db
test
rs0:PRIMARY> use test
switched to db test
rs0:PRIMARY> rs.status()

十二. 项目迁移到k8s

1. 给容器加hosts

    spec:
      hostAliases:
      - ip: "192.168.188.230"
        hostnames:
        - "dev.nacos.com"

3. maven环境配置

# 链接: https://pan.baidu.com/s/1z9dRGv_4bS1uxBtk5jsZ2Q 提取码: 3gva


yum install -y java-devel

# 修改maven
settings.xml




  D:\opt\maven-repo
  
  
  

  
  

  
  

  
	    
            alimaven
            central
            aliyun maven
            http://maven.aliyun.com/nexus/content/repositories/central/
        
        
            nexus-aliyun
            *
            Nexus aliyun
            http://maven.aliyun.com/nexus/content/groups/public
        
  


(二). groovy

  • 识别yaml文件的中信息,完善jenkinsfile

1. groovy项目

  • 创建shared-library仓库
git init
git config --global user.name "Administrator"
git config --global user.email "[email protected]"
git remote add origin http://hsj.gitlab.com/root/shared-library.git
git add .
git commit -m "Initial commit"
git push -u origin master

2. jenkins配置

  • [系统管理] -> [系统设置] -> [ Global Pipeline Libraries ]
# 创建凭据
Library Name: test-groovy
Default Version:master
Source Code Management:Git

[外链图片转存失败,源站可能有防盗链机制,建议将图片保存下来直接上传(img-IGfBJY6G-1635172457030)(images/image-20210728232043178.png)]

3. groovy代码

1. vars

2.

4. jenkinsfile

@Library('test-devops') _

pipeline {
    agent { label 'jnlp-slave'}

    stages {
        stage('hello-devops') {
            steps {
                script {
               # devops的方法是在groovy中的vars定义的,vars中主要做借口的方法     
                    devops.hello("树哥").sayHi().answer().sayBye()
                }
            }
        } 
    }
    post {
        success { 
            echo 'Congratulations!'
        }
        failure {
            echo 'Oh no!'
        }
        always { 
            echo 'I will always say Hello again!'
        }
    }
}
  • 使用groovy定义方法,供流水线调用
# 先创建凭据
harbor-auth

十三. RBAC

  • 角色管理分为roleclusterrole,创建的角色的权限是空的,需要我们给他绑定相关的权限,Role,需要用RoleBinding来绑定,ClusterRole需要ClusterRolebinding,只有绑定后这个角色才可以正常使用.

  • 不同部门人员集群的权限控制.生产环境只有运维人员能改资源配置,测试,开发等只能看资源.而不能修改

1. 添加规则

–verb参数 说明 备注
create 对xxx资源有哪些权限都是字面意思,也就是增删改查,如果对xxx资源有所有权限直接–verb=*
get
delete
list
update
edit
watch
exec
–resource参数 说明 备注
Pods 可以操作哪些资源,如果是所有资源就–resource=*
ConfigMaps
Deployments
Nodes
Secrets
Namespaces
DaemonSets
# 查看超级管理员cluster-admin的权限
kubectl get clusterrole cluster-admin -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  annotations:
    rbac.authorization.kubernetes.io/autoupdate: "true"
  creationTimestamp: "2020-11-12T20:29:51Z"
  labels:
    kubernetes.io/bootstrapping: rbac-defaults
  name: cluster-admin
  resourceVersion: "40"
  selfLink: /apis/rbac.authorization.k8s.io/v1/clusterroles/cluster-admin
  uid: a890e0da-500f-4c5e-aaf8-a37a3d4e4592
rules:
- apiGroups:
  - '*'
  resources:
  - '*'
  verbs:
  - '*'
- nonResourceURLs:
  - '*'
  verbs:
  - '*'

2. 创建集群角色

# 获取角色的yaml,多个权限或者资源,用逗号隔开
kubectl create role ${Role_NAME} --verb=list,get,watch --resource=pods,Deployments --dry-run -o yaml >role.yaml

# 创建角色
kubectl apply -f role.yaml

# 创建RoleBinding来给这个${ROLE_NAME}角色绑定权限.RoleBinding也是一种资源对象
kubectl create rolebinding ${RoleBinding_NAME} --user=${Role_NAME} --role=${Role_NAME} --dry-run -o yaml

3. 仅限于特定namespace角色


十四. etcd备份恢复

1. 备份策略

十五. k8s证书管理

  • kubeadm安装的k8s集群,很多证书默认只有365天.所以需要合理管理证书
  • kubeadm安装默认的所有证书都在/etc/kubernetes/pki/下,etcd证书在/etc/kubernetes/pki/etcd/
  • kubeadm安装的默认有22个证书, kubeadm安装的集群,kube-proxy是以pod形式起的,所以kube-proxy 的证书其实是通过service account 与 kube-apiserver 进行认证的,就不需要签发一套kube-proxy证书.如果kube-proxy 是以守护进程直接运行在宿主机的, 那么就需要通过k8s集群的根证书为他签发一套证书了.

参考

(一). 证书说明

  • 其实k8s根证书(ca),apiserver和kubelet证书都是基于这个证书生成的,并委托给k8s来进行管理,我们需要签发的证书主要是根证书
  • Kubernetes集群组件之间的交互是双向的, kubelet 既需要主动访问 kube-apiserver, kube-apiserver 也需要主动向 kubelet 发起请求, 所以双方都需要有自己的根证书以及使用该根证书签发的服务端证书和客户端证书. 在 kube-apiserver 中, 一般明确指定用于 https 访问的服务端证书和带有CN 用户名信息的客户端证书. 而在 kubelet 的启动配置中, 一般只指定了 ca 根证书, 而没有明确指定用于 https 访问的服务端证书, 这是因为, 在生成服务端证书时, 一般会指定服务端地址或主机名, kube-apiserver 相对变化不是很频繁, 所以在创建集群之初就可以预先分配好用作 kube-apiserver 的 IP 或主机名/域名, 但是由于部署在 node 节点上的 kubelet 会因为集群规模的变化而频繁变化, 而无法预知 node 的所有 IP 信息, 所以 kubelet 上一般不会明确指定服务端证书, 而是只指定 ca 根证书, 让 kubelet 根据本地主机信息自动生成服务端证书并保存到配置的cert-dir文件夹中.

1. k8s集群根证书

  • 6个证书文件

  • 集群根证书有3套,kubeadm安装的默认是10年

    • ca
    • etcd
    • kubelet
  • 其他证书基本都是基于这3套证书签发的.

# 1.根证书,有效期10年
ll /etc/kubernetes/pki/ca*
ca.crt 		# 根证书
ca.key		# 私钥

# 基于根证书签发的
# kube-apiserver服务端证书
ll /etc/kubernetes/pki/apiserver.*
apiserver.crt 
apiserver.key

# kubelet客户端证书,用作kube-apiserver主动向kubelet发起请求时的客户端证书,用于集群客户端kubelet组件和api通信的证书.
ll /etc/kubernetes/pki/apiserver-kubelet-client.*
apiserver-kubelet-client.crt
apiserver-kubelet-client.key

2. 汇聚层证书

  • 也叫aggregator证书

  • 4个证书文件,其中2个是基于kube-apiserver代理根证书签发的客户端证书

  • kube-apiserver访问使用kubelet proxy代理的SSL访问

    • 是以http的方式发起请求到代理服务的,代理服务会将请求发送给kube-apiserver
# 1.kube-apiserver代理根证书,有效期10年
/etc/kubernetes/pki/front-proxy-ca.crt
/etc/kubernetes/pki/front-proxy-ca.key

# 2.代理层证书是由kube-apiserver代理根证书签发的证书,用作代用户与 kube-apiserver认证
/etc/kubernetes/pki/front-proxy-client.crt
/etc/kubernetes/pki/front-proxy-client.key

3. ETCD证书

  • 10个证书文件,8个证书文件是基于etcd根证书签发的
# 1.etcd集群根证书,有效期10年,etcd 所用到的所有证书的签发机构
/etc/kubernetes/pki/etcd/ca.crt
/etc/kubernetes/pki/etcd/ca.key

# 2.由etcd集群根证书签发的etcd server持有的服务端证书
/etc/kubernetes/pki/etcd/server.crt
/etc/kubernetes/pki/etcd/server.key

# 3.由etcd集群根证书签发的peer集群节点互相通信使用的客户端证书
# peer的意思是对同一个etcd集群中另外一个Member的称呼
/etc/kubernetes/pki/etcd/peer.crt
/etc/kubernetes/pki/etcd/peer.key

# 4.由etcd集群根证书签发的pod定义Liveness探针使用的客户端证书
/etc/kubernetes/pki/etcd/healthcheck-client.crt
/etc/kubernetes/pki/etcd/healthcheck-client.key

# 5.由etcd集群根证书签发的在kube-apiserver中用来与etcd server做双向认证的客户端证书
/etc/kubernetes/pki/apiserver-etcd-client.crt
/etc/kubernetes/pki/apiserver-etcd-client.key

5. Serveice Account秘钥

  • 2个证书文件

  • 这实质不是证书,是秘钥,这组的密钥对,仅提供给 kube-controller-manager使用, kube-controller-manager 通过 sa.key 对 token 进行签名, master 节点通过公钥 sa.pub 进行签名的验证.

/etc/kubernetes/pki/sa.key
/etc/kubernetes/pki/sa.pub

(二). 查看证书

  • kubelet证书是在/var/lib/kubelet/pki/下,Kubernetes 1.8版本后,增加了自动轮换kubelet证书的功能,在kubelet快要过期的时候自动更新,所以不需要添加到被管理范围内,二进制安装的集群具体配置参考: kubelet证书轮换
# 可以看出ca证书有效期为10年,其他证书的都是1年,注意这里没有Kubelet.conf
kubeadm alpha certs check-expiration

[check-expiration] Reading configuration from the cluster...
[check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'

CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
admin.conf                 Aug 02, 2022 10:08 UTC   361d                                    no      
apiserver                  Aug 02, 2022 10:08 UTC   361d            ca                      no      
apiserver-etcd-client      Aug 02, 2022 10:08 UTC   361d            etcd-ca                 no      
apiserver-kubelet-client   Aug 02, 2022 10:08 UTC   361d            ca                      no      
controller-manager.conf    Aug 02, 2022 10:08 UTC   361d                                    no      
etcd-healthcheck-client    Aug 02, 2022 10:08 UTC   361d            etcd-ca                 no      
etcd-peer                  Aug 02, 2022 10:08 UTC   361d            etcd-ca                 no      
etcd-server                Aug 02, 2022 10:08 UTC   361d            etcd-ca                 no      
front-proxy-client         Aug 02, 2022 10:08 UTC   361d            front-proxy-ca          no      
scheduler.conf             Aug 02, 2022 10:08 UTC   361d                                    no      

CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
ca                      Jul 31, 2031 10:08 UTC   9y              no      
etcd-ca                 Jul 31, 2031 10:08 UTC   9y              no      
front-proxy-ca          Jul 31, 2031 10:08 UTC   9y              no

(三). 重签证书

  • 当证书快要过期,就需要重新生成证书并替换

1. kubeadm配置

# 在证书没过期的时候查看,并将这些配置写到一个yaml文件中,证书过去kubectl使用不了
kubectl get cm -o yaml -n kube-system kubeadm-config

2. 重签一年证书

  • 先将/etc/kubernetes/pki下的证书文件,cp到其它文件夹,作个临时备份,不要删除, 不要mv.
  • 证书重签后需要重启kubelet, kube-apiserver, etcd, controller-manager, scheduler的pod,直接删就可以,他会自动生产新的pod
  • 多master,直接把新签发的证书拷贝过去就行.
kubeadm alpha certs renew all
cp admin.conf /root/.kube/config

3. 修改源码

  • 重签100年的证书
# 根据自己的版本修改后下载
wget https://github.com/kubernetes/kubernetes/archive/v1.19.3.tar.gz
tar xf v1.19.3.tar.gz

# 修改,后面加一个* 100
vim kubernetes-1.19.3/staging/src/k8s.io/client-go/util/cert/cert.go
const duration365d = time.Hour * 24 * 365 * 100

十六. nacos

参考

1. 库,表,初始化

# 因为之前已经部署了mysql和storageclass,就不用再创建数据库和storageclass
# 登录数据库执行
create database nacos character set utf8 collate utf8_bin;
grant all privileges on *.* to 'nacos'@'%' identified by 'nacos_!6514';

# 拷贝初始化sql到数据库中,至于怎么拿到初始init.sql,使用官方的数据库创建出来,备份整个库,然后导入到自己的数据库.
kubectl -n testing cp /data/apps/nacos/iinit.sql mysql-0:/
use nacos_devtest;
source /init.sql

2. nacos-all-in-one.yaml

  • 需要改cm中的数据库信息.用的自己的数据库,连接的库只能是mysql-0.${MYSQL_SVC_NAME},因为要写入
---
apiVersion: v1
kind: Service
metadata:
  name: nacos-headless
  namespace: testing
  labels:
    app: nacos
  annotations:
    service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
  ports:
    - port: 8848
      name: server
      targetPort: 8848
    - port: 9848
      name: client-rpc
      targetPort: 9848
    - port: 9849
      name: raft-rpc
      targetPort: 9849
    ## 兼容1.4.x版本的选举端口
    - port: 7848
      name: old-raft-rpc
      targetPort: 7848
  clusterIP: None
  selector:
    app: nacos
---
apiVersion: v1
kind: ConfigMap
metadata:
  name: nacos-cm
  namespace: testing
data:
  mysql.db.name: "nacos_devtest"
  mysql.port: "3306"
  mysql.user: "nacos"
  mysql.password: "nacos_!6514"
  # 如果数据库在集群外,不需要加上端口
  mysql_service_host: "mysql-0.mysql"
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nacos
  namespace: testing
spec:
  serviceName: nacos-headless
  replicas: 3
  template:
    metadata:
      labels:
        app: nacos
      annotations:
        pod.alpha.kubernetes.io/initialized: "true"
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                      - nacos
              topologyKey: "kubernetes.io/hostname"
      #serviceAccountName: nfs-client-provisioner
      initContainers:
        - name: peer-finder-plugin-install
          image: nacos/nacos-peer-finder-plugin:1.1
          imagePullPolicy: IfNotPresent
          volumeMounts:
            - mountPath: /home/nacos/plugins/peer-finder
              name: data
              subPath: peer-finder
      containers:
        - name: nacos
          imagePullPolicy: IfNotPresent
          image: nacos/nacos-server:latest
          resources:
            requests:
              memory: "2Gi"
              cpu: "500m"
          ports:
            - containerPort: 8848
              name: client-port
            - containerPort: 9848
              name: client-rpc
            - containerPort: 9849
              name: raft-rpc
            - containerPort: 7848
              name: old-raft-rpc
          env:
            - name: NACOS_REPLICAS
              value: "3"
            - name: SERVICE_NAME
              value: "nacos-headless"
            - name: DOMAIN_NAME
              value: "cluster.local"
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  apiVersion: v1
                  fieldPath: metadata.namespace
            - name: MYSQL_SERVICE_DB_NAME
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.db.name
            - name: MYSQL_SERVICE_HOST
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql_service_host
            - name: MYSQL_SERVICE_PORT
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.port
            - name: MYSQL_SERVICE_USER
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.user
            - name: MYSQL_SERVICE_PASSWORD
              valueFrom:
                configMapKeyRef:
                  name: nacos-cm
                  key: mysql.password
            - name: NACOS_SERVER_PORT
              value: "8848"
            - name: NACOS_APPLICATION_PORT
              value: "8848"
            - name: PREFER_HOST_MODE
              value: "hostname"
          volumeMounts:
            - name: data
              mountPath: /home/nacos/plugins/peer-finder
              subPath: peer-finder
            - name: data
              mountPath: /home/nacos/data
              subPath: data
            - name: data
              mountPath: /home/nacos/logs
              subPath: logs
  volumeClaimTemplates:
    - metadata:
        name: data
        annotations:
      spec:
        accessModes: [ "ReadWriteMany" ]
        storageClassName: testing-storageclass
        resources:
          requests:
            storage: 10Gi
  selector:
    matchLabels:
      app: nacos

3. ing

  • 解析,访问hsj.nacos.com/nacos/index.html
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nacos-web
  namespace: testing
spec:
  rules:
  - host: hsj.nacos.com
    http:
      paths:
      - backend:
          serviceName: nacos-headless
          servicePort: 8848
        path: /nacos

十七. minio

1. sts

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: minio
spec:
  serviceName: minio
  replicas: 4
  selector:
    matchLabels:
      app: minio
  template:
    metadata:
      labels:
        app: minio
    spec:
      containers:
      - name: minio
        env:
        - name: MINIO_ROOT_USER
          value: "admin"
        - name: MINIO_ROOT_PASSWORD
          value: "root123456"
        image: minio/minio
        imagePullPolicy: IfNotPresent
        command:
          - /bin/sh
          - -c
          - minio server --console-address ":5000" http://minio-{0...3}.minio.default.svc.cluster.local/data
        ports:
        - name: data
          containerPort: 9000
          protocol: "TCP"
        - name: console
          containerPort: 5000
          protocol: "TCP"
        volumeMounts:
        - name: data
          mountPath: /data
        - name: date-config
          mountPath: /etc/localtime
      volumes:
        - name: date-config
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 10Gi
      storageClassName: little

2. svc

apiVersion: v1
kind: Service
metadata:
  name: minio
  labels:
    app: minio
spec:
  clusterIP: None
  ports:
    - port: 9000
      name: data
    - port: 5000
      name: console
  selector:
    app: minio
---
apiVersion: v1
kind: Service
metadata:
  name: minio-service
spec:
  type: NodePort
  ports:
   - name: data
     port: 9000
     targetPort: 9000
     protocol: TCP
     nodePort:
   - name: console
     port: 5000
     targetPort: 5000
     protocol: TCP
     nodePort:
  selector:
    app: minio

3. ing

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: minio
  namespace: default
spec:
  rules:
    - host: yg.minio.com
      http:
        paths:
          - path: /
            backend:
              serviceName: minio-service
              servicePort: 5000
  • 解析,浏览器访问

十八. prometheus

(一). thanos

  • thanos实现prometheus分布式集群

.helm

1. 安装

  • 在master节点上安装
wget https://get.helm.sh/helm-v3.2.4-linux-amd64.tar.gz

tar -zxf helm-v3.2.4-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

# 查看版本
helm version
version.BuildInfo{Version:"v3.2.4", GitCommit:"0ad800ef43d3b826f31a5ad8dfbb4fe05d143688", GitTreeState:"clean", GoVersion:"go1.13.12"}

# 添加helm仓库
helm repo add stable http://mirror.azure.cn/kubernetes/charts/

# 查看
helm repo ls
NAME  	URL                                      
stable	http://mirror.azure.cn/kubernetes/charts/

# 同步更新
helm repo update

2.

. rancher

. istio

1. 安装

wget https://github.com/istio/istio/releases/download/1.7.3/istio-1.7.3-linux-amd64.tar.gz

# 解压
tar xf istio-1.7.3-linux-amd64.tar.gz

# 软链
ln -s istio-1.7.3 istio

# 命令和补全
cp bin/istioctl /bin/
cp tools/istioctl.bash ~
source ~/istioctl.bash

# 安装istio组件
istioctl install --set profile=demo

# 查看
kubectl -n istio-system get po

# 查看提供的profile类型
istioctl profile list
Istio configuration profiles:
    remote
    default
    demo
    empty
    minimal
    preview
    
# 获取yaml
istioctl manifest generate --set profile=demo > istio-kubernetes-manifest.yaml

# 如果要卸载
istioctl manifest generate --set profile=demo | kubectl delete -f -

eyRef:
name: nacos-cm
key: mysql.user
- name: MYSQL_SERVICE_PASSWORD
valueFrom:
configMapKeyRef:
name: nacos-cm
key: mysql.password
- name: NACOS_SERVER_PORT
value: “8848”
- name: NACOS_APPLICATION_PORT
value: “8848”
- name: PREFER_HOST_MODE
value: “hostname”
volumeMounts:
- name: data
mountPath: /home/nacos/plugins/peer-finder
subPath: peer-finder
- name: data
mountPath: /home/nacos/data
subPath: data
- name: data
mountPath: /home/nacos/logs
subPath: logs
volumeClaimTemplates:
- metadata:
name: data
annotations:
spec:
accessModes: [ “ReadWriteMany” ]
storageClassName: testing-storageclass
resources:
requests:
storage: 10Gi
selector:
matchLabels:
app: nacos


### 3. ing

+ 解析,访问**hsj.nacos.com/nacos/index.html**

```yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: nacos-web
  namespace: testing
spec:
  rules:
  - host: hsj.nacos.com
    http:
      paths:
      - backend:
          serviceName: nacos-headless
          servicePort: 8848
        path: /nacos

十七. minio

1. sts

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: minio
spec:
  serviceName: minio
  replicas: 4
  selector:
    matchLabels:
      app: minio
  template:
    metadata:
      labels:
        app: minio
    spec:
      containers:
      - name: minio
        env:
        - name: MINIO_ROOT_USER
          value: "admin"
        - name: MINIO_ROOT_PASSWORD
          value: "root123456"
        image: minio/minio
        imagePullPolicy: IfNotPresent
        command:
          - /bin/sh
          - -c
          - minio server --console-address ":5000" http://minio-{0...3}.minio.default.svc.cluster.local/data
        ports:
        - name: data
          containerPort: 9000
          protocol: "TCP"
        - name: console
          containerPort: 5000
          protocol: "TCP"
        volumeMounts:
        - name: data
          mountPath: /data
        - name: date-config
          mountPath: /etc/localtime
      volumes:
        - name: date-config
          hostPath:
            path: /usr/share/zoneinfo/Asia/Shanghai
  volumeClaimTemplates:
  - metadata:
      name: data
    spec:
      accessModes:
        - ReadWriteMany
      resources:
        requests:
          storage: 10Gi
      storageClassName: little

2. svc

apiVersion: v1
kind: Service
metadata:
  name: minio
  labels:
    app: minio
spec:
  clusterIP: None
  ports:
    - port: 9000
      name: data
    - port: 5000
      name: console
  selector:
    app: minio
---
apiVersion: v1
kind: Service
metadata:
  name: minio-service
spec:
  type: NodePort
  ports:
   - name: data
     port: 9000
     targetPort: 9000
     protocol: TCP
     nodePort:
   - name: console
     port: 5000
     targetPort: 5000
     protocol: TCP
     nodePort:
  selector:
    app: minio

3. ing

apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: minio
  namespace: default
spec:
  rules:
    - host: yg.minio.com
      http:
        paths:
          - path: /
            backend:
              serviceName: minio-service
              servicePort: 5000
  • 解析,浏览器访问

十八. prometheus

(一). thanos

  • thanos实现prometheus分布式集群

.helm

1. 安装

  • 在master节点上安装
wget https://get.helm.sh/helm-v3.2.4-linux-amd64.tar.gz

tar -zxf helm-v3.2.4-linux-amd64.tar.gz
cp linux-amd64/helm /usr/local/bin/

# 查看版本
helm version
version.BuildInfo{Version:"v3.2.4", GitCommit:"0ad800ef43d3b826f31a5ad8dfbb4fe05d143688", GitTreeState:"clean", GoVersion:"go1.13.12"}

# 添加helm仓库
helm repo add stable http://mirror.azure.cn/kubernetes/charts/

# 查看
helm repo ls
NAME  	URL                                      
stable	http://mirror.azure.cn/kubernetes/charts/

# 同步更新
helm repo update

2.

. rancher

. istio

1. 安装

wget https://github.com/istio/istio/releases/download/1.7.3/istio-1.7.3-linux-amd64.tar.gz

# 解压
tar xf istio-1.7.3-linux-amd64.tar.gz

# 软链
ln -s istio-1.7.3 istio

# 命令和补全
cp bin/istioctl /bin/
cp tools/istioctl.bash ~
source ~/istioctl.bash

# 安装istio组件
istioctl install --set profile=demo

# 查看
kubectl -n istio-system get po

# 查看提供的profile类型
istioctl profile list
Istio configuration profiles:
    remote
    default
    demo
    empty
    minimal
    preview
    
# 获取yaml
istioctl manifest generate --set profile=demo > istio-kubernetes-manifest.yaml

# 如果要卸载
istioctl manifest generate --set profile=demo | kubectl delete -f -

你可能感兴趣的:(k8s,kubernetes,linux,容器)