docker的安装参考博客:https://blog.csdn.net/Dream_ya/article/details/81122200
百度网盘链接: https://pan.baidu.com/s/1mam5_h2HGpepcWnMZkkcMQ 密码: jx7d
镜像也可以自己拉取阿里云的docker镜像!!!
安装说明:
server1 10.10.10.1(rhel7.3) docker安装完成
server2 10.10.10.2(rhel6.5)
server3 10.10.10.3(rhel7.3) docker安装完成
[root@server1 ~]# docker load -i rhel7.tar
[root@server1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
bash-4.2# cat /etc/redhat-release
Red Hat Enterprise Linux Server release 7.0 (Maipo)
bash-4.2# vi /etc/yum.repos.d/yum.repo ###加入本地yum源
[rhel7.0]
name=rhel7.0
baseurl=http://10.10.10.250/rhel7.0
gpgcheck=0
bash-4.2# yum repolist
bash-4.2# rpmdb --rebuilddb ###重建数据库
[root@server1 ~]# docker commit vm1 rhel7:v1.0
[root@server1 ~]# docker images
[root@server1 ~]# docker history rhel7:v1.0
IMAGE CREATED CREATED BY SIZE COMMENT
f60e73898051 3 minutes ago bash 45.8MB
0a3eb3fde7fd 4 years ago 140MB Imported from -
[root@server1 ~]# mkdir dream
[root@server1 ~]# cd dream/
[root@server1 dream]# vim Dockerfile
FROM rhel7:v1.0
MAINTAINER Dream
ENV HOSTNAME server2
EXPOSE 80
RUN yum install -y httpd && echo "dream" >>/var/www/html/index.html
CMD ["/usr/sbin/httpd","-D","FOREGROUND"]
[root@server1 dream]# docker build -t rhel7:v2.0 .
[root@server1 dream]# docker images|grep v2.0
rhel7 v2.0 e690bec5401c 41 seconds ago 195MB
[root@server1 dream]# docker run -d --name vm2 rhel7:v2.0 ###后台运行
[root@server1 ~]# docker inspect vm2
[root@server1 ~]# curl 172.17.0.2
dream
[root@server1 ~]# cd dream/
[root@server1 dream]# vim Dockerfile ###这里密码设置为1
FROM rhel7:v1.0
MAINTAINER Dream
ENV HOSTNAME server2
EXPOSE 22
RUN rpmdb --rebuilddb && yum install -y openssh-server openssh-clients && ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -q -N "" && ssh-keygen -t ecdsa -f /etc/ssh/ssh_host_ecdsa_key -q -N "" && ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -q -N "" && echo root:1 |chpasswd
CMD ["/usr/sbin/sshd","-D"]
[root@server1 dream]# docker run -d --name vm3 rhel7:v3.0
[root@server1 dream]# ssh [email protected] ###可以进行连接即为成功
镜像仓库也可以用harbor进行管理!!!
官网链接:https://docs.docker.com/registry/
[root@server1 ~]# docker load -i registry-2.3.1.tar
[root@server1 ~]# docker run -d --name registry -v /opt/registry:/var/lib/registry -p 5000:5000 registry:2.3.1
[root@server1 ~]# docker tag rhel7:latest localhost:5000/rhel7
[root@server1 ~]# docker push localhost:5000/rhel7 ###上传
[root@server1 ~]# ls /opt/registry/
docker
删除之前镜像:
[root@server1 ~]# docker rmi localhost:5000/rhel7:latest
[root@server1 ~]# docker rmi rhel7:latest
[root@server1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry 2.3.1 83139345d017 2 years ago 166MB
拉取镜像:
[root@server1 ~]# docker pull localhost:5000/rhel7
[root@server1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry 2.3.1 83139345d017 2 years ago 166MB
localhost:5000/rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
[root@server1 ~]# docker tag localhost:5000/rhel7 rhel7
[root@server1 ~]# docker rmi localhost:5000/rhel7
[root@server1 ~]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry 2.3.1 83139345d017 2 years ago 166MB
rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
但是你会发现只能在本地拉取,没有太多的意义,下面介绍远程拉取镜像!!!
远程拉取:
###配置server1的daemon.json
[root@server1 ~]# vim /etc/docker/daemon.json
{"insecure-registries":["10.10.10.1:5000"]}
###重启导入镜像
[root@server1 ~]# systemctl restart docker
[root@server1 ~]# docker start registry
[root@server1 ~]# docker load -i ubuntu.tar
[root@server1 ~]# docker images ubuntu
REPOSITORY TAG IMAGE ID CREATED SIZE
ubuntu latest 07c86167cdc4 2 years ago 188MB
###打包
[root@server1 ~]# docker tag ubuntu 10.10.10.1:5000/ubuntu
[root@server1 ~]# docker push 10.10.10.1:5000/ubuntu
###在server3中进行测试
[root@server3 ~]# docker pull 10.10.10.1:5000/ubuntu ###报错
Using default tag: latest
Error response from daemon: Get https://10.10.10.1:5000/v2/: http: server gave HTTP response to HTTPS client
[root@server3 ~]# vim /etc/docker/daemon.json
{"insecure-registries":["10.10.10.1:5000"]}
[root@server3 ~]# systemctl restart docker
[root@server3 ~]# docker pull 10.10.10.1:5000/ubuntu ###发现拉取成功
官网链接:https://docs.docker.com/registry/insecure/#troubleshoot-insecure-registry
[root@server1 ~]# vim /etc/hosts ###加入host解析
10.10.10.1 server1 dream.org
[root@server1 ~]# cd /etc/docker/
[root@server1 docker]# mkdir certs
[root@server1 docker]# openssl req -newkey rsa:4096 -nodes -sha256 -keyout certs/dream.org.key -x509 -days 365 -out certs/dream.org.crt
...
Country Name (2 letter code) [XX]:CN
State or Province Name (full name) []:zhejiang
Locality Name (eg, city) [Default City]:hangzhou
Organization Name (eg, company) [Default Company Ltd]:dream
Organizational Unit Name (eg, section) []:linux
Common Name (eg, your name or your server's hostname) []:dream.org
Email Address []:
[root@server1 docker]# ls certs/
dream.org.crt dream.org.key
[root@server1 ~]# cd /opt/registry/
[root@server1 registry]# ls
docker
[root@server1 registry]# rm -rf *
[root@server1 ~]# cd /etc/docker
[root@server1 docker]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
14815beff02a registry:2.3.1 "/bin/registry /etc/…" 39 minutes ago Up 39 minutes 0.0.0.0:5000->5000/tcp registry
[root@server1 docker]# docker stop registry
[root@server1 docker]# docker rm registry
[root@server1 docker]# docker run -d --restart=always --name registry -v /etc/docker/certs:/certs -e REGISTRY_HTTP_ADDR=0.0.0.0:443 -e REGISIRY_HTTP_TLS_CERTIFICATE=/certs/dream.org.crt -e REGISTRY_HTTP_TLS_KEY=/certs/dream.org.key -p 443:443 registry:2.3.1
[root@server1 docker]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d650394d05e3 registry:2.3.1 "/bin/registry /etc/…" 3 seconds ago Up 1 second 0.0.0.0:443->443/tcp, 5000/tcp registry
###修改daemon.json
[root@server1 docker]# vim /etc/docker/daemon.json
{"insecure-registries":["10.10.10.1:443"]}
[root@server1 docker]# systemctl restart docker
[root@server1 docker]# mkdir -p certs.d/dream.org
[root@server1 docker]# cp certs/dream.org.crt certs.d/dream.org/ca.crt
[root@server1 docker]# docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
registry 2.3.1 83139345d017 2 years ago 166MB
rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
[root@server1 docker]# docker tag rhel7 10.10.10.1:443/rhel7
[root@server1 docker]# docker push 10.10.10.1:443/rhel7
[root@server3 ~]# mkdir -p /etc/docker/certs.d/dream.org
[root@server1 docker]# scp certs.d/dream.org/ca.crt [email protected]:/etc/docker/certs.d/dream.org/
[root@server3 ~]# vim /etc/docker/daemon.json
{"insecure-registries":["10.10.10.1:443"]}
[root@server3 ~]# systemctl restart docker
[root@server3 ~]# docker pull 10.10.10.1:443/rhel7 ###发现可以拉取镜像
[root@server3 ~]# docker images 10.10.10.1:443/rhel7
REPOSITORY TAG IMAGE ID CREATED SIZE
10.10.10.1:443/rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
官网链接:https://docs.docker.com/registry/deploying/#native-basic-auth
[root@server1 ~]# docker stop registry
[root@server1 ~]# docker rm registry
[root@server1 ~]# cd /etc/docker/
[root@server1 docker]# mkdir auth
###这里用户:dream 密码:1
[root@server1 docker]# docker run --entrypoint htpasswd registry:2.3.1 -Bbn dream 1 > auth/htpasswd
[root@server1 docker]# docker run -d --restart=always --name registry -v /etc/docker/certs:/certs -v /etc/docker/auth:/auth -e REGISTRY_HTTP_ADDR=0.0.0.0:443 -e REGISIRY_HTTP_TLS_CERTIFICATE=/certs/dream.org.crt -e REGISTRY_HTTP_TLS_KEY=/certs/dream.org.key -e "REGISTRY_AUTH_HTPASSWD_REALM=Registry Realm" -e REGISTRY_AUTH_HTPASSWD_PATH=/auth/htpasswd -p 443:443 registry:2.3.1
[root@server1 docker]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
53e33aaea36a registry:2.3.1 "/bin/registry /etc/…" 3 seconds ago Up 2 seconds 0.0.0.0:443->443/tcp, 5000/tcp registry
[root@server1 docker]# docker login 10.10.10.1:443
Username: dream
Password:
Login Succeeded
[root@server1 ~]# docker load -i redis.tar
[root@server1 ~]# docker images 10.10.10.1:443/redis
REPOSITORY TAG IMAGE ID CREATED SIZE
10.10.10.1:443/redis latest 4f5f397d4b7c 2 years ago 178MB
[root@server1 ~]# docker push 10.10.10.1:443/redis
[root@server3 ~]# docker login 10.10.10.1:443
[root@server3 ~]# docker pull 10.10.10.1:443/redis
[root@server3 ~]# docker images 10.10.10.1:443/redis
[root@server2 ~]# yum install -y libcgroup
[root@server2 ~]# /etc/init.d/cgconfig start
[root@server2 ~]# cd /cgroup/memory/
[root@server2 memory]# ls
cgroup.event_control memory.move_charge_at_immigrate
cgroup.procs memory.oom_control
memory.failcnt memory.soft_limit_in_bytes
memory.force_empty memory.stat
memory.limit_in_bytes memory.swappiness
memory.max_usage_in_bytes memory.usage_in_bytes
memory.memsw.failcnt memory.use_hierarchy
memory.memsw.limit_in_bytes notify_on_release
memory.memsw.max_usage_in_bytes release_agent
memory.memsw.usage_in_bytes tasks
[root@server2 memory]# cat memory.limit_in_bytes
9223372036854775807
[root@server2 memory]# cat memory.memsw.limit_in_bytes
9223372036854775807
[root@server2 memory]# vim /etc/cgconfig.conf ###在末尾加入
group x1 {
memory {
memory.limit_in_bytes=104857600;
memory.memsw.limit_in_bytes=104857600;
}
}
[root@server2 memory]# useradd dream
[root@server2 memory]# echo "1"|passwd dream
[root@server2 memory]# vim /etc/cgrules.conf ###在末尾加入
dream memory x1/
[root@server2 ~]# /etc/init.d/cgconfig restart
[root@server2 ~]# /etc/init.d/cgred restart
[root@server2 ~]# cd /cgroup/memory/x1/
[root@server2 x1]# cat memory.memsw.limit_in_bytes
104857600
[root@server2 x1]# cat memory.limit_in_bytes
104857600
[root@server2 ~]# free -m
total used free shared buffers cached
Mem: 996 224 772 0 13 146
-/+ buffers/cache: 64 932
Swap: 499 0 499
[root@server2 ~]# cd /dev/shm/
[root@server2 shm]# cgexec -g memory:x1 dd if=/dev/zero of=testfile bs=1M count=300
Killed
[root@server2 shm]# free -m
total used free shared buffers cached
Mem: 996 323 672 0 13 245
-/+ buffers/cache: 64 931
Swap: 499 0 499
保证没有cpu在线,如果在线的话在/sys/devices/system/cpu/的对应cpu目录中:echo 0>online即可完成关闭!!!
[root@server2 ~]# lscpu
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 1
On-line CPU(s) list: 0
Thread(s) per core: 1
Core(s) per socket: 1
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 44
Stepping: 1
CPU MHz: 2494.224
BogoMIPS: 4988.44
Hypervisor vendor: KVM
Virtualization type: full
L1d cache: 32K
L1i cache: 32K
L2 cache: 4096K
NUMA node0 CPU(s): 0
[root@server2 ~]# vim /etc/cgconfig.conf
group x2 {
cpu {
cpu.shares = 100;
}
}
[root@server2 ~]# /etc/init.d/cgconfig restart
[root@server2 ~]# dd if=/dev/zero of=/dev/null &
[1] 1002
[root@server2 ~]# cgexec -g cpu:x2 dd if=/dev/zero of=/dev/null &
[2] 1003
[root@server2 ~]# top ###查看cpu占用率
[root@server2 ~]# killall -9 dd
[root@server2 ~]# yum install -y iotop
[root@server2 ~]# cd /cgroup/blkio/
[root@server2 blkio]# cat blkio.throttle.read_bps_device
[root@server2 blkio]# ls
blkio.io_merged blkio.throttle.io_service_bytes blkio.weight_device
blkio.io_queued blkio.throttle.io_serviced cgroup.event_control
blkio.io_service_bytes blkio.throttle.read_bps_device cgroup.procs
blkio.io_serviced blkio.throttle.read_iops_device notify_on_release
blkio.io_service_time blkio.throttle.write_bps_device release_agent
blkio.io_wait_time blkio.throttle.write_iops_device tasks
blkio.reset_stats blkio.time
blkio.sectors blkio.weight
[root@server2 ~]# ll /dev/sda ###属性为8, 0
brw-rw---- 1 root disk 8, 0 Oct 29 22:28 /dev/sda
[root@server2 ~]# vim /etc/cgconfig.conf
group x3 {
blkio {
blkio.throttle.read_bps_device = "8:0 1000000";
}
}
[root@server2 ~]# /etc/init.d/cgconfig restart
[root@server2 ~]# cgexec -g blkio:x3 dd if=/dev/zero of=/dev/null &
[1] 973
[root@server2 ~]# iotop -p 973 ###可以查看到io速度
[root@server2 ~]# vim /etc/cgconfig.conf
group x4 {
freezer {}
}
[root@server2 ~]# /etc/init.d/cgconfig restart
[root@server2 ~]# dd if=/dev/zero of=/dev/null &
[1] 1138
[root@server2 ~]# ps aux|grep dd
[root@server2 ~]# cd /cgroup/freezer/x4/
[root@server2 x4]# echo '1138' >tasks
[root@server2 x4]# echo 'FROZEN' >freezer.state
[root@server2 x4]# ps aux|grep dd
Docker Compose 将所管理的容器分为三层,工程(project),服务(service)以及容器(contaienr)。Docker Compose 运行的目录下的所有文件(docker-compose.yml, extends 文件或环境变量文件等)组成一个工程,若无特殊指定工程名即为当前目录名。一个工程当中可包含多个服务,每个服务中定义了容器运行的镜像,参数,依赖。一个服务当中可包括多个容器实例,Docker Compose 并没有解决负载均衡的问题,因此需要借助其他工具实现服务发现及负载均衡。
Docker Compose 是一个用来创建和运行多容器应用的工具。使用Compose首先需要编写Compose 文件来描述多个容器服务以及之间的关联,然后通过命令根据配置启动所有的容器。
Dockerfile 可以定义一个容器,而一个 Compose 的模板文件(YAML 格式)可以定义一个包含多个相互关联容器的应用。
[root@server1 ~]# wget https://github.com/docker/compose/releases/download/1.23.0-rc3/docker-compose-Linux-x86_64
[root@server1 ~]# mv docker-compose-Linux-x86_64 /usr/local/bin/docker-compose
[root@server1 ~]# chmod +x /usr/local/bin/docker-compose
[root@server1 ~]# docker images ###导入nginx和haproxy镜像
REPOSITORY TAG IMAGE ID CREATED SIZE
haproxy latest fbd1f55f79b3 2 years ago 139MB
nginx latest af4b3d7d5401 2 years ago 191MB
registry 2.3.1 83139345d017 2 years ago 166MB
rhel7 latest 0a3eb3fde7fd 4 years ago 140MB
[root@server1 dream]# pwd
/root/dream
[root@server1 dream]# vim yum.repo
[rhel7.0]
name=rhel7.0
baseurl=http://10.10.10.250/rhel7.0
gpgcheck=0
[root@server1 dream]# vim Dockerfile
FROM rhel7
MAINTAINER Dream
ENV HOSTNAME server2
COPY yum.repo /etc/yum.repos.d/yum.repo
EXPOSE 80
RUN rpmdb --rebuilddb && yum install -y httpd
CMD ["/usr/sbin/httpd","-D","FOREGROUND"]
[root@server1 dream]# docker build -t rhel7:v1.0 .
<1> 创建目录
[root@server1 dream]# pwd
/root/dream
[root@server1 dream]# mkdir -p compose/{web1,web2,haproxy}
[root@server1 dream]# cd compose/
###添加默认发布文件
[root@server1 compose]# echo "web1-apache" >web1/index.html
[root@server1 compose]# echo "web2-nginx" >web2/index.html
<2> 配置yml
参数说明:
–link参数可以在不映射端口的前提下为两个容器键建立安全连接,–link参数可以连接一个或多个容器到将要创建的容器。
image指定为镜像名称或镜像ID,如果镜像本地不存在,compose 会尝试拉取这个镜像
build指定dickerfile所在文件夹的路径,composehui9利用他自动构建这个镜像,然后使用这个镜像。
command覆盖容器启动后默认执行的命令
external_links链接到docker-compse.yml外部的容器,甚至并非compose管理的容器
ports暴露的端口信息
expose暴露的端口,但不映射到宿主机,只被链接的服务访问,仅可以指定内部端口参数。
volumes卷挂载路径设置,可以设置宿主机路径或者访问模式
volumes_from从另一个服务或容器挂载他的所有卷
environment设置环境变量
env_file从文件中获取环境变量,可以为单独的文件路径或列表
extends基于已有的服务进行扩展
net设置网路模式
pid跟主机系统共享进程命名空间
dns配置DNS服务器
cap_add/drop添加或放弃容器的linux 能力
dns_search配置dns搜索域,可以是一个值,也可以是一个列表
[root@server1 compose]# pwd
/root/dream/compose
[root@server1 compose]# vim docker-compose.yml
apache:
image: rhel7:v1.0
volumes:
- ./web1:/var/www/html
expose:
- 80
nginx:
image: nginx
volumes:
- ./web2:/usr/share/nginx/html
expose:
- 80
haproxy:
image: haproxy
volumes:
- ./haproxy/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
links:
- apache
- nginx
ports:
- "80:80"
expose:
- 80
<3> 配置haproxy.cfg
[root@server1 compose]# vim haproxy/haproxy.cfg
global
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000ms
timeout client 50000ms
timeout server 50000ms
stats uri /status
frontend balancer
bind 0.0.0.0:80
default_backend web_backend
backend web_backend
balance roundrobin
###这里的apache和nginx是上面yml中写的名字
server web1 apache:80 check
server web2 nginx:80 check
<4> 启动查看结果
###如果启动失败,查看下每个镜像单独能否启动成功,或docker-compose -f docker-compose.yml up -d
[root@server1 compose]# docker-compose up -d
[root@server1 compose]# docker ps -a
[root@server1 compose]# netstat -lntup|grep 80
[root@server1 compose]# curl 127.0.0.1
[root@server1 compose]# docker-compose down 或 docker-compose -f docker-compose.yml down ###关闭
Swarm 是 Docker 公司在 2014 年 12 月初发布的一套较为简单的工具,用来管理 Docker 集群,它将一群 Docker 宿主机变成一个单一的,虚拟的主机。Swarm 使用标准的 Docker API接口作为其前端访问入口,换言之,各种形式的 Docker Client(docker client in go, docker_py,docker 等)均可以直接与 Swarm 通信。Swarm 几乎全部用 Go 语言来完成开发。
Swarm deamon只是一个调度器(Scheduler)加路由器(router),Swarm自己不运行容器,它只是接受 docker 客户端发送过来的请求,调度适合的节点来运行容器,这意味着,即使Swarm由于某些原因挂掉了,集群中的节点也会照常运行,当Swarm重新恢复运行之后,它会收集重建集群信息。
最上层的对外接口包括:
swarm create
swarm manage
Swarm 启动的过程包含三个步骤:
swarm join
swarm list
[root@server1 ~]# docker ps -a ###删除之前的镜像,查看没有进程
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@server1 ~]# docker swarm init
Swarm initialized: current node (kxa6fkpswxjfgdhuc2shui185) is now a manager.
To add a worker to this swarm, run the following command:
docker swarm join --token SWMTKN-1-1xs4l5x8v5r63qer69okajyqttq46xfsmar94618n2g0j8qm4e-3qbhxpyhif34jx4u7o4fpcct1 10.10.10.1:2377
To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.
[root@server3 ~]# docker swarm join --token SWMTKN-1-1xs4l5x8v5r63qer69okajyqttq46xfsmar94618n2g0j8qm4e-3qbhxpyhif34jx4u7o4fpcct1 10.10.10.1:2377
This node joined a swarm as a worker.
[root@server1 ~]# docker node ls
ID HOSTNAME STATUS AVAILABILITY MANAGER STATUS ENGINE VERSION
kxa6fkpswxjfgdhuc2shui185 * server1 Ready Active Leader 18.03.1-ce
msmrnltfpxk5x2rxwkpvtsc3n server3 Ready Active 18.03.1-ce
<1> 保证2台都有镜像
[root@server1 ~]# docker images nginx
REPOSITORY TAG IMAGE ID CREATED SIZE
nginx latest af4b3d7d5401 2 years ago 191MB
[root@server3 ~]# docker images nginx
REPOSITORY TAG IMAGE ID CREATED SIZE
nginx latest af4b3d7d5401 2 years ago 191MB
<2> 创建与查看
[root@server1 ~]# docker service create --publish 80:80 --replicas 2 --name web1 nginx
[root@server1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c6b20a1fc8e0 nginx:latest "nginx -g 'daemon of…" 13 seconds ago Up 12 seconds 80/tcp, 443/tcp web1.1.1m1bx9fhf72t8lpjkrqy9zuur
<3> 扩容
[root@server1 ~]# docker service scale web1=4
[root@server1 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
dd26f4dc4466 nginx:latest "nginx -g 'daemon of…" 20 seconds ago Up 19 seconds 80/tcp, 443/tcp web1.3.m7hkdn1je7b3v0bpzizbgyeks
c6b20a1fc8e0 nginx:latest "nginx -g 'daemon of…" 2 minutes ago Up 2 minutes 80/tcp, 443/tcp web1.1.1m1bx9fhf72t8lpjkrqy9zuur
[root@server3 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
59d92d4e185e nginx:latest "nginx -g 'daemon of…" 14 seconds ago Up 13 seconds 80/tcp, 443/tcp web1.4.zifse45xs5pbd9j8ovfs1dwlt
13c220e3ec9e nginx:latest "nginx -g 'daemon of…" About a minute ago Up About a minute 80/tcp, 443/tcp web1.2.j77aenm3p1c5cctoqk81udvgc
<4> 减少
[root@server1 ~]# docker service scale web1=2
[root@server1 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
c6b20a1fc8e0 nginx:latest "nginx -g 'daemon of…" 4 minutes ago Up 4 minutes 80/tcp, 443/tcp web1.1.1m1bx9fhf72t8lpjkrqy9zuur
[root@server3 ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
13c220e3ec9e nginx:latest "nginx -g 'daemon of…" 5 minutes ago Up 4 minutes 80/tcp, 443/tcp web1.2.j77aenm3p1c5cctoqk81udvgc
参考链接:https://github.com/dockersamples/docker-swarm-visualizer
[root@server1 ~]# docker load -i visualizer.tar
[root@server1 ~]# docker images dockersamples/visualizer
REPOSITORY TAG IMAGE ID CREATED SIZE
dockersamples/visualizer latest 17e55a9b2354 13 months ago 148MB
[root@server1 ~]# docker service create --name=viz --publish=8080:8080/tcp --constraint=node.role==manager --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock dockersamples/visualizer
http://10.10.10.1:8080
[root@server1 ~]# docker service scale web1=4
[root@server3 ~]# systemctl stop docker
可以发现都在server1的节点上!!!
[root@server3 ~]# systemctl restart docker
[root@server1 ~]# docker service scale web1=6
[root@server1 ~]# docker load -i httpd.tar
[root@server1 ~]# docker images httpd
REPOSITORY TAG IMAGE ID CREATED SIZE
httpd latest eae27e453ac8 17 months ago 314MB
[root@server1 ~]# docker service update --update-parallelism 2 --update-delay 2s --update-failure-action rollback --image httpd web1
可以发现web1的镜像都进行了更新,变成了httpd!!!
http://10.10.10.1/
[root@server1 ~]# docker load -i flask.tar
[root@server1 ~]# docker images python
REPOSITORY TAG IMAGE ID CREATED SIZE
python flask e1417d594323 17 months ago 684MB
[root@server1 dream]# pwd
/root/dream
[root@server1 dream]# cat test.py
from flask import Flask
import os
app = Flask(__name__)
@app.route("/")
def env():
return os.environ["HOSTNAME"]
app.run(host="0.0.0.0")
[root@server1 dream]# vim Dockerfile
FROM python:flask
MAINTAINER Dream
COPY test.py /test.py
[root@server1 dream]# docker build -t python:flask1.0 .
[root@server1 dream]# docker images python:flask1.0
REPOSITORY TAG IMAGE ID CREATED SIZE
python flask1.0 d0e701d313c7 45 seconds ago 684MB
[root@server1 dream]# docker save python:flask1.0 >python1.0.tar
[root@server1 dream]# scp python1.0.tar [email protected]:
[root@server3 ~]# docker load -i python1.0.tar
[root@server3 ~]# docker images python:flask1.0
REPOSITORY TAG IMAGE ID CREATED SIZE
python flask1.0 d0e701d313c7 4 minutes ago 684MB
[root@server1 ~]# docker service rm web1
[root@server1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f9c8a5ea3266 dockersamples/visualizer:latest "npm start" 3 seconds ago Up 2 seconds (health: starting) 8080/tcp viz.1.tyfk83gk761yvvawd46ratpk1
[root@server1 ~]# docker service rm viz
[root@server1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@server1 ~]# docker service create --replicas 4 --publish 80:5000 python:flask1.0 python /test.py
[root@server2 ~]# for i in {1..10};do curl -w "\n" 10.10.10.1;done
dbb650a6bd8d
f135558ac78d
342dg1de45d1
9dsf32b34f241
dbb650a6bd8d
f135558ac78d
342dg1de45d1
9dsf32b34f241
dbb650a6bd8d
f135558ac78d
[root@server1 ~]# mkdir web
[root@server1 ~]# echo "server1" >web/index.html
[root@server3 ~]# mkdir web
[root@server3 ~]# echo "server3" >web/index.html
[root@server1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
dbb650a6bd8d python:flask1.0 "python /test.py" 5 minutes ago Up 5 minutes condescending_ptolemy.3.rh38tfxt37gzn2lo2bcn1jdrw
f135558ac78d python:flask1.0 "python /test.py" 5 minutes ago Up 5 minutes condescending_ptolemy.1.hwu992c9698e31wbleaxixgu4
[root@server1 ~]# docker service rm condescending_ptolemy
[root@server1 ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
[root@server1 ~]# docker service create --name web --replicas 4 -p 80:80 --mount type=bind,source=/root/web/,target=/usr/local/apache2/htdocs httpd
[root@server2 ~]# for i in {1..10};do curl -w "\n" 10.10.10.1;done
server1
server1
server3
server3
server1
server1
server3
server3
server1
server1
[root@server1 ~]# mkdir web1
[root@server1 ~]# echo "server1-web1" >web1/index.html
[root@server3 ~]# mkdir web1
[root@server3 ~]# echo "server3-web1" >web1/index.html
[root@server1 ~]# docker service update --mount-add type=bind,source=/root/web1,target=/usr/local/apache2/htdocse web
[root@server2 ~]# curl 10.10.10.1
server1-web1
[root@server2 ~]# curl 10.10.10.1
server3-web1
[root@server2 ~]# curl 10.10.10.1
server1-web1