在k8s上通过staefulset搭建Mysql集群

环境准备:

docker version: 17.06.2-ce
kubeadm: v1.11.0


The MySQL Cluster deployment consists of a ConfigMap, two Services, three persistentvolumes and a StatefulSet

最后是通过mysql+xtrabackup的模式来组成数据库master+slave的模式,具体的架构图如下所示:

在k8s上通过staefulset搭建Mysql集群_第1张图片

 

Yaml配置如下:

首先是  configmap  

apiVersion: v1
kind: ConfigMap
metadata:
  name: mysql
  labels:
    app: mysql
data:
  master.cnf: |
    # Apply this config only on the master.
    [mysqld]
    log-bin
  slave.cnf: |
    # Apply this config only on slaves.
    [mysqld]
    super-read-only

 
#  services 

# Headless service for stable DNS entries of StatefulSet members.
apiVersion: v1
kind: Service
metadata:
  name: mysql
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  clusterIP: None
  selector:
    app: mysql
---
# Client service for connecting to any MySQL instance for reads.
# For writes, you must instead connect to the master: mysql-0.mysql.
apiVersion: v1
kind: Service
metadata:
  name: mysql-read
  labels:
    app: mysql
spec:
  ports:
  - name: mysql
    port: 3306
  selector:
    app: mysql


#  persistentvolumes 

mkdir -p /opt/mysql0
mkdir -p /opt/mysql1
mkdir -p /opt/mysql2

也可在spec.hostPath中设置type: DirectoryOrCreate
k8s会自动创建不存在的目录
-----------------------------------------------------------
apiVersion: v1
kind: PersistentVolume
metadata:
  labels:
    app: mysql
  name: data-mysql-0 
spec:
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 10Gi
  hostPath:
    path: /opt/mysql0
    type: DirectoryOrCreate
  persistentVolumeReclaimPolicy: Retain

  
------------------------------------------------------------

apiVersion: v1
kind: PersistentVolume
metadata:
  labels:
    app: mysql
  name: data-mysql-01
spec:
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 10Gi
  hostPath:
    path: /opt/mysql1
    type: DirectoryOrCreate
  persistentVolumeReclaimPolicy: Retain

-------------------------------------------------------------

apiVersion: v1
kind: PersistentVolume
metadata:
  labels:
    app: mysql
  name: data-mysql-2
spec:
  accessModes:
  - ReadWriteOnce
  capacity:
    storage: 10Gi
  hostPath:
    path: /opt/mysql2
    type: DirectoryOrCreate
  persistentVolumeReclaimPolicy: Retain


#  statefulset  

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  serviceName: mysql
  replicas: 3
  template:
    metadata:
      labels:
        app: mysql
    spec:
      initContainers:
      - name: init-mysql
        image: mysql:5.7 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Generate mysql server-id from pod ordinal index.
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          echo [mysqld] > /mnt/conf.d/server-id.cnf
          # Add an offset to avoid reserved server-id=0 value.
          echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf
          # Copy appropriate conf.d files from config-map to emptyDir.
          if [[ $ordinal -eq 0 ]]; then
            cp /mnt/config-map/master.cnf /mnt/conf.d/
          else
            cp /mnt/config-map/slave.cnf /mnt/conf.d/
          fi
        volumeMounts:
        - name: conf
          mountPath: /mnt/conf.d
        - name: config-map
          mountPath: /mnt/config-map
      - name: clone-mysql
        image: gcr.io/google-samples/xtrabackup:1.0 
        command:
        - bash
        - "-c"
        - |
          set -ex
          # Skip the clone if data already exists.
          [[ -d /var/lib/mysql/mysql ]] && exit 0
          # Skip the clone on master (ordinal index 0).
          [[ `hostname` =~ -([0-9]+)$ ]] || exit 1
          ordinal=${BASH_REMATCH[1]}
          [[ $ordinal -eq 0 ]] && exit 0
          # Clone data from previous peer.
          ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql
          # Prepare the backup.
          xtrabackup --prepare --target-dir=/var/lib/mysql
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
      containers:
      - name: mysql
        image: mysql:5.7 
        env:
        - name: MYSQL_ALLOW_EMPTY_PASSWORD
          value: "1"
        ports:
        - name: mysql
          containerPort: 3306
        volumeMounts:
        - name: data
          mountPath: /var/lib/mysql
          subPath: mysql
        - name: conf
          mountPath: /etc/mysql/conf.d
        resources:
          requests:
            cpu: 500m
            memory: 1Gi
        livenessProbe:
          exec:
            command: ["mysqladmin", "ping"]
          initialDelaySeconds: 30
          periodSeconds: 10
          timeoutSeconds: 5
        readinessProbe:
          exec:
            # Check we can execute queries over TCP (skip-networking is off).
            command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]
          initialDelaySeconds: 5
          periodSeconds: 2
          timeoutSeconds: 1
      - name: xtrabackup
        image: gcr.io/google-samples/xtrabackup:1.0
        ports:
        - name: xtrabackup
          containerPort: 3307
        command:
        - bash
        - "-c"
        - |
          set -ex
          cd /var/lib/mysql

          # Determine binlog position of cloned data, if any.
          if [[ -f xtrabackup_slave_info ]]; then
            # XtraBackup already generated a partial "CHANGE MASTER TO" query
            # because we're cloning from an existing slave.
            mv xtrabackup_slave_info change_master_to.sql.in
            # Ignore xtrabackup_binlog_info in this case (it's useless).
            rm -f xtrabackup_binlog_info
          elif [[ -f xtrabackup_binlog_info ]]; then
            # We're cloning directly from master. Parse binlog position.
            [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1
            rm xtrabackup_binlog_info
            echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\
                  MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in
          fi

          # Check if we need to complete a clone by starting replication.
          if [[ -f change_master_to.sql.in ]]; then
            echo "Waiting for mysqld to be ready (accepting connections)"
            until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done

            echo "Initializing replication from clone position"
            # In case of container restart, attempt this at-most-once.
            mv change_master_to.sql.in change_master_to.sql.orig
            mysql -h 127.0.0.1 <

理解一些原理
#  Understanding stateful Pod initialization 
The StatefulSet controller starts Pods one at a time, in order by their ordinal index. It waits until each Pod reports being Ready before starting the next one.
In addition, the controller assigns each Pod a unique, stable name of the form -. In this case, that results in Pods named mysql-0, mysql-1, and mysql-2.
The Pod template in the above StatefulSet manifest takes advantage of these properties to perform orderly startup of MySQL replication.

#Generating configuration
Before starting any of the containers in the Pod spec, the Pod first runs any Init Containers in the order defined.
The first Init Container, named init-mysql, generates special MySQL config files based on the ordinal index.
The script determines its own ordinal index by extracting it from the end of the Pod name, which is returned by the hostname command. Then it saves the ordinal (with a numeric offset to avoid reserved values) into a file called server-id.cnf in the MySQL conf.d directory. This translates the unique, stable identity provided by the StatefulSet controller into the domain of MySQL server IDs, which require the same properties.
The script in the init-mysql container also applies either master.cnf or slave.cnf from the ConfigMap by copying the contents into conf.d. Because the example topology consists of a single MySQL master and any number of slaves, the script simply assigns ordinal 0 to be the master, and everyone else to be slaves. Combined with the StatefulSet controller’s deployment order guarantee, this ensures the MySQL master is Ready before creating slaves, so they can begin replicating.

在启动Pod规范中的任何容器之前,Pod首先按定义的顺序运行任何Init Containers。
名为init-mysql的第一个Init容器根据序数索引生成特殊的MySQL配置文件。
该脚本通过从hostname命令返回的Pod名称的末尾提取它来确定其自己的序数索引。然后它将序数(带有数字偏移以避免保留值)保存到MySQL conf.d目录中名为server-id.cnf的文件中。这将StatefulSet控制器提供的唯一,稳定的标识转换为需要相同属性的MySQL服务器ID域。
init-mysql容器中的脚本也通过将内容复制到conf.d中,从ConfigMap应用master.cnf或slave.cnf。由于示例拓扑由单个MySQL主服务器和任意数量的从服务器组成,因此脚本只是将序号0指定为主服务器,而其他所有服务器指定为从服务器。结合StatefulSet控制器的部署顺序保证,这可确保MySQL主服务器在创建从服务器之前就绪,因此可以开始复制。

 

#Cloning existing data
In general, when a new Pod joins the set as a slave, it must assume the MySQL master might already have data on it. It also must assume that the replication logs might not go all the way back to the beginning of time. These conservative assumptions are the key to allow a running StatefulSet to scale up and down over time, rather than being fixed at its initial size.
The second Init Container, named clone-mysql, performs a clone operation on a slave Pod the first time it starts up on an empty PersistentVolume. That means it copies all existing data from another running Pod, so its local state is consistent enough to begin replicating from the master.
MySQL itself does not provide a mechanism to do this, so the example uses a popular open-source tool called Percona XtraBackup. During the clone, the source MySQL server might suffer reduced performance. To minimize impact on the MySQL master, the script instructs each Pod to clone from the Pod whose ordinal index is one lower. This works because the StatefulSet controller always ensures Pod N is Ready before starting Pod N+1.

通常,当一个新的Pod将该集合作为从属服务器时,它必须假设MySQL主服务器上可能已有数据。它还必须假设复制日志可能不会一直回到开始时间。这些保守的假设是允许运行的StatefulSet随时间扩展和缩小的关键,而不是固定在其初始大小。
名为clone-mysql的第二个Init容器在第一次启动空PersistentVolume时对从Pod执行克隆操作。这意味着它从另一个正在运行的Pod复制所有现有数据,因此其本地状态足够一致,可以从主服务器开始复制。
MySQL本身并没有提供这样做的机制,因此该示例使用了一种名为Percona XtraBackup的流行开源工具。在克隆期间,源MySQL服务器可能会降低性能。为了最大限度地减少对MySQL主服务器的影响,脚本指示每个Pod从序列索引较低的Pod中克隆。这是有效的,因为StatefulSet控制器始终确保Pod N在启动Pod N + 1之前就绪。

#Starting replication
After the Init Containers complete successfully, the regular containers run. The MySQL Pods consist of a mysql container that runs the actual mysqld server, and an xtrabackup container that acts as a sidecar.
The xtrabackup sidecar looks at the cloned data files and determines if it’s necessary to initialize MySQL replication on the slave. If so, it waits for mysqld to be ready and then executes the CHANGE MASTER TO and START SLAVE commands with replication parameters extracted from the XtraBackup clone files.
Once a slave begins replication, it remembers its MySQL master and reconnects automatically if the server restarts or the connection dies. Also, because slaves look for the master at its stable DNS name (mysql-0.mysql), they automatically find the master even if it gets a new Pod IP due to being rescheduled.
Lastly, after starting replication, the xtrabackup container listens for connections from other Pods requesting a data clone. This server remains up indefinitely in case the StatefulSet scales up, or in case the next Pod loses its PersistentVolumeClaim and needs to redo the clone

Init容器成功完成后,常规容器运行。 MySQL Pods包含一个运行实际mysqld服务器的mysql容器,以及一个充当边车的xtrabackup容器。
xtrabackup sidecar查看克隆的数据文件,并确定是否有必要在从属服务器上初始化MySQL复制。如果是这样,它等待mysqld准备就绪,然后使用从XtraBackup克隆文件中提取的复制参数执行CHANGE MASTER TO和START SLAVE命令。
一旦从属设备开始复制,它会记住它的MySQL主设备并在服务器重新启动或连接中断时自动重新连接。此外,因为从属设备以其稳定的DNS名称(mysql-0.mysql)查找主服务器,所以即使由于重新安排它而获得新的Pod IP,它们也会自动查找主服务器。
最后,在开始复制之后,xtrabackup容器侦听来自请求数据克隆的其他Pod的连接。如果StatefulSet扩展,或者如果下一个Pod丢失其PersistentVolumeClaim并需要重做克隆,则此服务器将无限期保持运行状态


#一些操作

only mysql-0 to write, others are readonly type.

[root@master1 mysql_cluster]# kubectl exec mysql-0 -i -t -- mysql -u root 
Defaulting container name to mysql.
Use 'kubectl describe pod/mysql-0 -n default' to see all of the containers in this pod.
Welcome to the MySQL monitor.  Commands end with ; or \g.
Your MySQL connection id is 1245
Server version: 5.7.23-log MySQL Community Server (GPL)

Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.

Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.

Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.

mysql> 
mysql> 
mysql> show databases;
+------------------------+
| Database               |
+------------------------+
| information_schema     |
| mysql                  |
| performance_schema     |
| sys                    |
| test                   |
| xtrabackup_backupfiles |
| zhuchenjie             |
+------------------------+
7 rows in set (0.00 sec)

mysql>


#  支持弹性伸缩

#if want to expand the mysql cluster , need to create pv first.

kubectl scale statefulset mysql  --replicas=5

kubectl scale statefulset mysql  --replicas=1

清理环境
#  Clean up 
1.Cancel the SELECT @@server_id loop by pressing Ctrl+C in its terminal, or running the following from another terminal:
   

kubectl delete pod mysql-client-loop --now


2.Delete the StatefulSet. This also begins terminating the Pods.

kubectl delete statefulset mysql


3.Verify that the Pods disappear. They might take some time to finish terminating.

kubectl get pods -l app=mysql


    You’ll know the Pods have terminated when the above returns:

 No resources found.


4.Delete the ConfigMap, Services, and PersistentVolumeClaims.

kubectl delete configmap,service,pvc -l app=mysql


If you manually provisioned PersistentVolumes, you also need to manually delete them, as well as release the underlying resources. If you used a dynamic provisioner, it automatically deletes the PersistentVolumes when it sees that you deleted the PersistentVolumeClaims. Some dynamic provisioners (such as those for EBS and PD) also release the underlying resources upon deleting the PersistentVolumes.


 

你可能感兴趣的:(kubernetes,k8s,docker,Linux,Mysql)