Worker node:
集群角色 | 宿主机ip地址 | 宿主机系统 | docker版本 |
---|---|---|---|
manager | 192.168.2.10 | CentOS7.9 | 23.0.3 |
Node1 | 192.168.2.20 | CentOS7.9 | 23.0.3 |
Node2 | 192.168.2.30 | CentOS7.9 | 23.0.3 |
//添加hosts解析
[root@localhost ~]# vim /etc/hosts
添加如下:
192.168.2.10 manager
192.168.2.20 node1
192.168.2.30 node2
//临时关闭selinux和防火墙;
sed -i '/SELINUX/s/enforcing/disabled/g' /etc/sysconfig/selinux
setenforce 0
systemctl stop firewalld.service
systemctl disable firewalld.service
//同步节点时间
yum install ntpdate -y
ntpdate pool.ntp.org
//修改对应节点主机名
hostname `cat /etc/hosts|grep $(ifconfig|grep broadcast|tail -1|awk '{print $2}')|awk '{print $2}'`;su
//关闭swapoff
[root@manager ~]# swapoff -a
sed -ri 's/.*swap.*/#&/g' /etc/fstab ---永久关闭
//安装依赖包
yum -y install yum-utils device-mapper-persistent-data lvm2
//配置docker镜像源
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
//安装docker
yum -y install docker-ce
//修改docker配置文件
[root@manager ~]# vim /etc/docker/daemon.json
添加内容如下:
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
}
//注意,由于国内拉取镜像较慢,配置文件最后增加了registry-mirrors
mkdir -p /etc/systemd/system/docker.service.d
sed -i '/^ExecStart/s/dockerd/dockerd -H tcp:\/\/0.0.0.0:2375/g' /usr/lib/systemd/system/docker.service
//重启docker
[root@manager ~]# systemctl daemon-reload
[root@manager ~]# systemctl enable docker
[root@manager ~]# systemctl restart docker
ps -ef|grep -aiE docker
[root@manager ~]# docker swarm init --advertise-addr 192.168.2.10
Swarm initialized: current node (44bghdfflwgqhzywunoqfqgqf) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1ll2gomc2khm5b5yjah2mmr8llpqn7jlb68rb7kudfbtpj0coo-3ws55l597bu7144wg6xddweyj 192.168.2.10:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
//此操作在node1上操作:
docker swarm join --token SWMTKN-1-1ll2gomc2khm5b5yjah2mmr8llpqn7jlb68rb7kudfbtpj0coo-3ws55l597bu7144wg6xddweyj 192.168.2.10:2377
//此操作在node2上操作:
[root@node2 ~]# docker swarm join --token SWMTKN-1-1ll2gomc2khm5b5yjah2mmr8llpqn7jlb68rb7kudfbtpj0coo-3ws55l597bu7144wg6xddweyj 192.168.2.10:2377
This node joined a swarm as a worker.
[root@manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
44bghdfflwgqhzywunoqfqgqf * manager Ready Active Leader 23.0.3
heavfkcv59ed5bkikbxot7mus node1 Ready Active 23.0.3
k9a71k4kzhl9nw53ijo6lx4y5 node2 Ready Active 23.0.3
[root@manager ~]# docker service create --replicas 1 --name nginx-web nginx:latest
[root@manager ~]# docker service ps nginx-web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
ixh4h670h4ws nginx-web.1 nginx:latest node1 Running Running 2 minutes ago
[root@manager ~]# docker service inspect --pretty nginx-web
ID: st3o1iyv7whmn8eds87m35ozx
Name: nginx-web
Service Mode: Replicated
Replicas: 1
Placement:
UpdateConfig:
Parallelism: 1
On failure: pause
Monitoring Period: 5s
Max failure ratio: 0
Update order: stop-first
RollbackConfig:
Parallelism: 1
On failure: pause
Monitoring Period: 5s
Max failure ratio: 0
Rollback order: stop-first
ContainerSpec:
Image: nginx:latest@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31
Init: false
Resources:
Endpoint Mode: vip
[root@manager ~]# docker service inspect nginx-web
[
{
"ID": "st3o1iyv7whmn8eds87m35ozx",
"Version": {
"Index": 21
},
"CreatedAt": "2023-04-14T08:49:13.137753329Z",
"UpdatedAt": "2023-04-14T08:49:13.137753329Z",
"Spec": {
"Name": "nginx-web",
"Labels": {},
"TaskTemplate": {
"ContainerSpec": {
"Image": "nginx:latest@sha256:0d17b565c37bcbd895e9d92315a05c1c3c9a29f762b011a10c54a66cd53c9b31",
"Init": false,
"StopGracePeriod": 10000000000,
"DNSConfig": {},
"Isolation": "default"
},
"Resources": {
"Limits": {},
"Reservations": {}
},
"RestartPolicy": {
"Condition": "any",
"Delay": 5000000000,
"MaxAttempts": 0
},
"Placement": {
"Platforms": [
{
"Architecture": "amd64",
"OS": "linux"
},
{
"OS": "linux"
},
{
"OS": "linux"
},
{
"Architecture": "arm64",
"OS": "linux"
},
{
"Architecture": "386",
"OS": "linux"
},
{
"Architecture": "mips64le",
"OS": "linux"
},
{
"Architecture": "ppc64le",
"OS": "linux"
},
{
"Architecture": "s390x",
"OS": "linux"
}
]
},
"ForceUpdate": 0,
"Runtime": "container"
},
"Mode": {
"Replicated": {
"Replicas": 1
}
},
"UpdateConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"RollbackConfig": {
"Parallelism": 1,
"FailureAction": "pause",
"Monitor": 5000000000,
"MaxFailureRatio": 0,
"Order": "stop-first"
},
"EndpointSpec": {
"Mode": "vip"
}
},
"Endpoint": {
"Spec": {}
}
}
]
[root@manager ~]# docker service scale nginx-web=3
nginx-web scaled to 3
overall progress: 3 out of 3 tasks
1/3: running [==================================================>]
2/3: running [==================================================>]
3/3: running [==================================================>]
verify: Service converged
[root@manager ~]# docker service ls
ID NAME MODE REPLICAS IMAGE PORTS
st3o1iyv7whm nginx-web replicated 3/3 nginx:latest
[root@manager ~]# docker service ps nginx-web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
ixh4h670h4ws nginx-web.1 nginx:latest node1 Running Running 38 minutes ago
l0dobbkogr1l nginx-web.2 nginx:latest manager Running Running 57 seconds ago
oyh7urhu955q nginx-web.3 nginx:latest node2 Running Running 57 seconds ago
[root@manager ~]# docker service update --image tomcat nginx-web
[root@manager ~]# docker service ps -f 'DESIRED-STATE=running' nginx-web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
o9f4petj2bag nginx-web.1 tomcat:latest node1 Running Running 9 minutes ago
xyotaj1ifj5b nginx-web.2 tomcat:latest manager Running Running 6 minutes ago
i92vie0nkps8 nginx-web.3 tomcat:latest node2 Running Running 4 minutes ago
[root@manager ~]# docker service update --rollback nginx-web
[root@manager ~]# docker service ps -f 'DESIRED-STATE=running' nginx-web
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
worhbabalj5c nginx-web.1 nginx:latest node1 Running Running 2 minutes ago
ldgv6sagqa8u nginx-web.2 nginx:latest manager Running Running 2 minutes ago
qq7j0f2rks3i nginx-web.3 nginx:latest node2 Running Running 2 minutes ago
[root@manager ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
44bghdfflwgqhzywunoqfqgqf * manager Ready Active Leader 23.0.3
heavfkcv59ed5bkikbxot7mus node1 Ready Active 23.0.3
k9a71k4kzhl9nw53ijo6lx4y5 node2 Ready Active 23.0.3
//将node1升级为Manager
[root@manager ~]# docker node promote node1
Node node1 promoted to a manager in the swarm.
//将manager的docker服务停掉
[root@manager ~]# systemctl stop docker.socket
//在node1查看节点状态信息
[root@node1 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
44bghdfflwgqhzywunoqfqgqf manager Ready Active Unreachable 23.0.3
heavfkcv59ed5bkikbxot7mus * node1 Ready Active Leader 23.0.3
k9a71k4kzhl9nw53ijo6lx4y5 node2 Ready Active 23.0.3
//创建Nginx服务,Volume映射
[root@node1 ~]# docker service create --replicas 1 --mount type=volume,src=nginx_data,dst=/usr/share/nginx/html --name nginx-www nginx:latest
//查看nginx-www在哪台服务器创建
[root@node1 ~]# docker service inspect nginx-www
//查看数据卷;
[root@manager ~]# ls /var/lib/docker/volumes/
backingFsBlockDev metadata.db nginx_data
[root@manager ~]# ls -l /var/lib/docker/volumes/nginx_data/_data/
总用量 8
-rw-r--r-- 1 root root 497 12月 28 2021 50x.html
-rw-r--r-- 1 root root 615 12月 28 2021 index.html
-可以看到容器里面的nginx数据目录已经挂在到宿主机的nginx_data了
mkdir -p /data/webapps/www/
[root@node1 ~]# docker service create --replicas 1 --mount type=bind,src=/data/webapps/www,dst=/usr/share/nginx/html --name nginx-v1 nginx:latest
docker service ps nginx-v1
docker service inspect nginx-v1
[root@node1 ~]# ls /data/webapps/www/
[root@node1 ~]# echo www.sxy.com Test Pages >>/data/webapps/www/index.html
[root@node1 ~]# docker exec -it nginx-v1.1.eq7qn5670ui64wab6u87mx5x1 /bin/bash
root@6df12eda8415:/# cat /usr/share/nginx/html/index.html
www.sxy.com Test Pages
- 可以看到我们在宿主机上创建的index.html已经挂在到容器上
[root@manager ~]# yum -y install nfs-utils.x86_64 rpcbind
[root@manager ~]# vim /etc/exports
/data/ 192.168.2.0/24(rw,sync,insecure,anonuid=1000,anongid=1000,no_root_squash)
[root@manager ~]# systemctl enable rpcbind
[root@manager ~]# systemctl start rpcbind
[root@manager ~]# systemctl enable nfs-server
[root@manager ~]# systemctl start nfs-server
[root@manager ~]# exportfs -rv
yum -y install nfs-utils.x86_64
[root@node1 ~]# docker volume create --driver local --opt type=nfs --opt o=addr=192.168.2.10,rw --opt device=/data/ volume_test
volume_test1
[root@node1 ~]# docker service create --mount type=volume,source=volume_test,destination=/usr/share/nginx/html/ --replicas 3 --name nginx-test --publish 88:80 nginx:latest
zklzy25i5acr3srdcegi0b61e
overall progress: 3 out of 3 tasks
1/3: running [==================================================>]
2/3: running [==================================================>]
3/3: running [==================================================>]
verify: Service converged
//查看nginx-test都在哪台服务器创建容器
[root@node1 ~]# docker service ps nginx-test
ID NAME IMAGE NODE DESIRED STATE CURRENT STATE ERROR PORTS
m8mspt13wmfi nginx-test.1 nginx:latest node1 Running Running 8 minutes ago
kfgvwn8j0vtk nginx-test.2 nginx:latest manager Running Running 8 minutes ago
kgpy17zjz865 nginx-test.3 nginx:latest node2 Running Running 8 minutes ago
[root@manager ~]# docker exec -it nginx-test.1.qv3g2fm8f73byv7mtcmn1zq55 /bin/bash
root@a38779c38ccc:/# cd /usr/share/nginx/html/
root@a38779c38ccc:/usr/share/nginx/html# ls
50x.html index.html
root@a38779c38ccc:/usr/share/nginx/html# echo "wo shi ys" >> index.htm
root@a38779c38ccc:/usr/share/nginx/html# ls
50x.html index.html index.htm
[root@manager ~]# cd /data/
[root@manager data]# ls
50x.html index.htm index.html
[root@manager data]# cat index.htm
wo shi ys
vim /etc/hosts
192.168.2.10 manager
192.168.2.20 node1
192.168.2.30 node2
192.168.2.40 node3
//注意:node3需要添加上面所有内容,其他主机只需添加node3的内容就好。
//关闭selinux和防火墙
sed -i '/SELINUX/s/enforcing/disabled/g' /etc/sysconfig/selinux
setenforce 0
systemctl stop firewalld.service
systemctl disable firewalld.service
//同步节点时间;
yum install ntpdate -y
ntpdate pool.ntp.org
//修改对应节点主机名
hostname node3;su
//关闭swapoff
swapoff -a
sed -ri 's/.*swap.*/#&/g' /etc/fstab ---永久关闭
//安装依赖包
yum -y install yum-utils device-mapper-persistent-data lvm2
//配置docker镜像源
wget -O /etc/yum.repos.d/docker-ce.repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
//安装docker
yum -y install docker-ce
//修改docker配置文件
[root@manager ~]# vim /etc/docker/daemon.json
添加内容如下:
{
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
],
"registry-mirrors": ["https://uyah70su.mirror.aliyuncs.com"]
}
//注意,由于国内拉取镜像较慢,配置文件最后增加了registry-mirrors
mkdir -p /etc/systemd/system/docker.service.d
sed -i '/^ExecStart/s/dockerd/dockerd -H tcp:\/\/0.0.0.0:2375/g' /usr/lib/systemd/system/docker.service
//重启docker
[root@manager ~]# systemctl daemon-reload
[root@manager ~]# systemctl enable docker
[root@manager ~]# systemctl restart docker
ps -ef|grep -aiE docker
[root@node1 ~]# docker swarm join-token manager
To add a manager to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1ll2gomc2khm5b5yjah2mmr8llpqn7jlb68rb7kudfbtpj0coo-anqgw5qli4j1n6o3c9rmgylgp 192.168.2.20:2377
[root@node3 ~]# docker swarm join --token SWMTKN-1-1ll2gomc2khm5b5yjah2mmr8llpqn7jlb68rb7kudfbtpj0coo-anqgw5qli4j1n6o3c9rmgylgp 192.168.2.20:2377
[root@node1 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
44bghdfflwgqhzywunoqfqgqf manager Ready Active Reachable 23.0.3
heavfkcv59ed5bkikbxot7mus * node1 Ready Active Leader 23.0.3
k9a71k4kzhl9nw53ijo6lx4y5 node2 Ready Active 23.0.3
s2tjb3vmazfk2cj5oy8fj90ca node3 Ready Active Reachable 23.0.3
[root@node1 ~]# docker node update --availability drain node3
[root@node3 ~]# systemctl stop docker.socket
[root@node1 ~]# docker node demote node3
Manager node3 demoted in the swarm.
[root@node1 ~]# docker node rm node3
node3
[root@node1 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
44bghdfflwgqhzywunoqfqgqf manager Ready Active Reachable 23.0.3
heavfkcv59ed5bkikbxot7mus * node1 Ready Active Leader 23.0.3
k9a71k4kzhl9nw53ijo6lx4y5 node2 Ready Active 23.0.3