目录
一、环境
二、docker的安装
1、下载docker安装包
2、设置docker为开机启动
三、搭建mysql集群
1、导出镜像
2、加载镜像
3、设置防火墙
4、确认集群服务器;
5、创建容器
四、配置haproxy
1、创建haproxy用户
2、构建haproxy容器
2、配置文件
前面讲过了在线基于docker安装部署mysql(pxc)模式集群,这一篇文章就来讲一讲离线环境下安装docker和部署mysql(pxc)模式集群的方式。
虚拟机:3台,虚拟机操作系统:CentOS 7+ ;
虚拟机ip和用处如下:
192.*.*.41 haproxy
192.*.*.41 node1
192.*.*.43 node2
192.*.*.44 node3
我是从官方地址下载的docker-18.03.1-ce.tgz ,下载地址为:下载。
上传文件至服务器,解压;
[root@localhost local]# tar xzvf docker-18.03.1-ce.tgz
docker/
docker/dockerd
docker/docker-proxy
docker/docker-containerd
docker/docker-runc
docker/docker-init
docker/docker-containerd-shim
docker/docker
docker/docker-containerd-ctr
复制解压文件至 /usr/bin/
[root@localhost local]# sudo cp /usr/local/docker/* /usr/bin/
启动docker服务;
[root@localhost local]# sudo dockerd &
[1] 23794
[root@localhost local]# WARN[2020-05-20T18:39:35.460270855-07:00] could not change group /var/run/docker.sock to docker: group docker not found
INFO[2020-05-20T18:39:35.463656583-07:00] libcontainerd: started new docker-containerd process pid=23803
INFO[0000] starting containerd module=containerd revision=773c489c9c1b21a6d78b5c538cd395416ec50f88 version=v1.0.3
INFO[0000] loading plugin "io.containerd.content.v1.content"... module=containerd type=io.containerd.content.v1
INFO[0000] loading plugin "io.containerd.snapshotter.v1.btrfs"... module=containerd type=io.containerd.snapshotter.v1
WARN[0000] failed to load plugin io.containerd.snapshotter.v1.btrfs error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" module=containerd
INFO[0000] loading plugin "io.containerd.snapshotter.v1.overlayfs"... module=containerd type=io.containerd.snapshotter.v1
INFO[0000] loading plugin "io.containerd.metadata.v1.bolt"... module=containerd type=io.containerd.metadata.v1
WARN[0000] could not use snapshotter btrfs in metadata plugin error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" module="containerd/io.containerd.metadata.v1.bolt"
INFO[0000] loading plugin "io.containerd.differ.v1.walking"... module=containerd type=io.containerd.differ.v1
INFO[0000] loading plugin "io.containerd.gc.v1.scheduler"... module=containerd type=io.containerd.gc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.containers"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.content"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.diff"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.events"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.healthcheck"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.images"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.leases"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.namespaces"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.snapshots"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.monitor.v1.cgroups"... module=containerd type=io.containerd.monitor.v1
INFO[0000] loading plugin "io.containerd.runtime.v1.linux"... module=containerd type=io.containerd.runtime.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.tasks"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.version"... module=containerd type=io.containerd.grpc.v1
INFO[0000] loading plugin "io.containerd.grpc.v1.introspection"... module=containerd type=io.containerd.grpc.v1
INFO[0000] serving... address="/var/run/docker/containerd/docker-containerd-debug.sock" module="containerd/debug"
INFO[0000] serving... address="/var/run/docker/containerd/docker-containerd.sock" module="containerd/grpc"
INFO[0000] containerd successfully booted in 0.016229s module=containerd
INFO[2020-05-20T18:39:35.758516754-07:00] Graph migration to content-addressability took 0.00 seconds
INFO[2020-05-20T18:39:35.761637085-07:00] Loading containers: start.
INFO[2020-05-20T18:39:36.595242713-07:00] Default bridge (docker0) is assigned with an IP address 172.17.0.0/16. Daemon option --bip can be used to set a preferred IP address
INFO[2020-05-20T18:39:37.047313138-07:00] Loading containers: done.
INFO[2020-05-20T18:39:37.096896111-07:00] Docker daemon commit=9ee9f40 graphdriver(s)=overlay2 version=18.03.1-ce
INFO[2020-05-20T18:39:37.097383813-07:00] Daemon has completed initialization
INFO[2020-05-20T18:39:37.111921647-07:00] API listen on /var/run/docker.sock
测试查看docker版本。
[root@localhost local]# docker -v
Docker version 18.03.1-ce, build 9ee9f40
先将docker注册为service;
vim /etc/systemd/system/docker.service
在配置文件中加入以下配置:
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service
Wants=network-online.target
[Service]
Type=notify
# the default is not to use systemd for cgroups because the delegate issues still
# exists and systemd currently does not support the cgroup feature set required
# for containers run by docker
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Uncomment TasksMax if your systemd version supports it.
# Only systemd 226 and above support this version.
#TasksMax=infinity
TimeoutStartSec=0
# set delegate yes so that systemd does not reset the cgroups of docker containers
Delegate=yes
# kill only the docker process, not all processes in the cgroup
KillMode=process
# restart the docker process if it exits prematurely
Restart=on-failure
StartLimitBurst=3
StartLimitInterval=60s
[Install]
WantedBy=multi-user.target
最后设置docker为开机启动。
[root@localhost local]# systemctl enable docker
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /etc/systemd/system/docker.service.
从在线安装好的服务器上导出镜像,导出的镜像可直接镜像用于部署;(如何在线安装部署,可以在这里查看)
[root@localhost local]# docker save -o /usr/local/pxc.tar pxc
把导出的镜像上传至无网络的服务器上。(在所使用的三台服务器上都执行安装加载。)
[root@localhost local]# docker load < /usr/local/pxc.tar
ae7e6d8732b2: Loading layer [==================================================>] 135MB/135MB
b9e1e3d86594: Loading layer [==================================================>] 7.168kB/7.168kB
26171002b1d9: Loading layer [==================================================>] 343kB/343kB
cd370409612b: Loading layer [==================================================>] 41.78MB/41.78MB
d7321b4a02d1: Loading layer [==================================================>] 295.4kB/295.4kB
408ef1d52d06: Loading layer [==================================================>] 185MB/185MB
5edd0932e9ca: Loading layer [==================================================>] 6.656kB/6.656kB
6afddf327a30: Loading layer [==================================================>] 6.144kB/6.144kB
6363eb3f7e86: Loading layer [==================================================>] 3.584kB/3.584kB
eaa29c80c6b4: Loading layer [==================================================>] 7.168kB/7.168kB
1d8c20076a53: Loading layer [==================================================>] 3.031MB/3.031MB
d81905367618: Loading layer [==================================================>] 6.144kB/6.144kB
8b5bbb2d2e05: Loading layer [==================================================>] 3.031MB/3.031MB
Loaded image: pxc:latest
安装mysql集群之前确认防火墙的状态,可以关闭防火墙或者指定防火墙开放端口。否则集群服务器集群服务器之间有可能无法访问,影响服务。
#查看防火墙状态
firewall-cmd --state
# 开启
service firewalld start
# 重启
service firewalld restart
# 关闭
service firewalld stop
查看防火墙规则
firewall-cmd --list-all
# 查询端口是否开放
firewall-cmd --query-port=8080/tcp
# 开放80端口
firewall-cmd --permanent --add-port=80/tcp
# 移除端口
firewall-cmd --permanent --remove-port=8080/tcp
#重启防火墙(修改配置后要重启防火墙)
firewall-cmd --reload
# 参数解释
1、firwall-cmd:是Linux提供的操作firewall的一个工具;
2、--permanent:表示设置为持久;
3、--add-port:标识添加的端口;
创建集群用到的文件夹。
本次集群使用到了三台虚拟机,ip和用处如下:
192.*.*.41 haproxy
192.*.*.41 node1
192.*.*.43 node2
192.*.*.44 node3
开始本地所使用的目录
mkdir -p /docker/pxc/mysql /docker/pxc/data
修改文件目录权限
cd /docker/pxc
chmod 777 mysql
chmod 777 data
创建第1个MySQL节点( node1:192.*.*.41),密码自己设着玩就是设置为123456,实际环境需要设置更复杂的密码。
docker run -d -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -e CLUSTER_NAME=PXC -e XTRABACKUP_PASSWORD=123456 -v /docker/pxc/mysql:/var/lib/mysql -v /docker/pxc/data:/data --privileged --name=db1 --net=host pxc
创建第2个MySQL节点(node2:192.*.*.43)。
注意:192.*.*.41为主机1的ip地址。
docker run -d -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -e CLUSTER_NAME=PXC -e XTRABACKUP_PASSWORD=123456 -e CLUSTER_JOIN=192.*.*.41 -v /docker/pxc/mysql:/var/lib/mysql -v /docker/pxc/data:/data --privileged --name=db2 --net=host pxc
创建第3个MySQL节点(node3:192.*.*.44)
docker run -d -p 3306:3306 -e MYSQL_ROOT_PASSWORD=123456 -e CLUSTER_NAME=PXC -e XTRABACKUP_PASSWORD=123456 -e CLUSTER_JOIN=192.*.*.41 -v /docker/pxc/mysql:/var/lib/mysql -v /docker/pxc/data:/data --privileged --name=db3 --net=host pxc
到了这一步基于docker的多机pxc搭建已经基本换成了,亲测可用!
可分别在服务器上查看日志信息。
docker logs db1
我选择db1节点(node1)作为haproxy的服务器,(服务器资源充足的可以选择一台新的服务器作为haproxy的服务器),在上面执行下面命令进入容器:
docker exec -it db1 bash
进入容器后执行
mysql -uroot -p
输入上面设置的密码 123456 后执行:
create user 'haproxy'@'%' identified by '';
成功运行,退出。
exit
exit
还是老样子,先从在线安装好的服务器导出haproxy
[root@localhost local]# docker save -o /usr/local/haproxy.tar haproxy
上传至无网络服务器,加载镜像
[root@localhost local]# docker load < /usr/local/haproxy.tar
ffc9b21953f4: Loading layer [==================================================>] 72.49MB/72.49MB
71979a4098b3: Loading layer [==================================================>] 23.25MB/23.25MB
2e4722b8b2ad: Loading layer [==================================================>] 2.048kB/2.048kB
Loaded image: haproxy:latest
创建文件夹
[root@localhost local]# mkdir -p /docker/pxc/haproxy
创建配置文件
[root@localhost local]# touch /docker/pxc/haproxy/haproxy.cfg
配置配置文件
[root@localhost local]# vi/docker/pxc/haproxy/haproxy.cfg
插入以下配置内容
# haproxy.cfg
global
#工作目录
chroot /usr/local/etc/haproxy
#日志文件,使用rsyslog服务中local5日志设备(/var/log/local5),等级info
log 127.0.0.1 local5 info
#守护进程运行
daemon
defaults
log global
mode http
#日志格式
option httplog
#日志中不记录负载均衡的心跳检测记录
option dontlognull
#连接超时(毫秒)
timeout connect 5000
#客户端超时(毫秒)
timeout client 50000
#服务器超时(毫秒)
timeout server 50000
#监控界面
listen admin_stats
#监控界面的访问的IP和端口
bind 192.*.*.41:8888
#访问协议
mode http
#URI相对地址
stats uri /dbs
#统计报告格式
stats realm Global\ statistics
#登陆帐户信息
stats auth admin:123456
#数据库负载均衡
listen proxy-mysql
#访问的IP和端口,这里不能配置成3306
bind 0.0.0.0:3307
#网络协议
mode tcp
#负载均衡算法(轮询算法)
#轮询算法:roundrobin
#权重算法:static-rr
#最少连接算法:leastconn
#请求源IP算法:source
balance roundrobin
#日志格式
option tcplog
#在MySQL中创建一个没有权限的haproxy用户,密码为空。Haproxy使用这个账户对MySQL数据库心跳检测
option mysql-check user haproxy
server MySQL_1 192.*.*.41:3306 check weight 1 maxconn 2000
server MySQL_2 192.*.*.43:3306 check weight 1 maxconn 2000
server MySQL_3 192.*.*.44:3306 check weight 1 maxconn 2000
#使用keepalive检测死链
option tcpka
开始创建容器
docker run -it -d -p 4001:8888 -p 4002:3306 -v /docker/pxc/haproxy:/usr/local/etc/haproxy --name haproxy --privileged --net=host haproxy
容器创建成功后,可通过访问http://192.*.*.41:8888/dbs,查看各节点的状态。登录的账号密码在上面的配置文件中进行配置。我配置的账户密码为admin/123456。
链接参数:
地址 192.*.*.41 端口:3307 用户名:root 密码:123456
OK了,到这里集群已经完成了。