Docker服务安装完成之后,默认在每个宿主机会生成一个名称为docker0的网卡其IP地址都是172.17.0.1/16,并且会生成三种不同类型的网络。
[root@gbase8c_private ~]# ifconfig docker0
docker0: flags=4099 mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
ether 02:42:94:9c:bc:27 txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@gbase8c_private ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
3eba936a2271 bridge bridge local
19023a4d913b host host local
1a79176b52c5 none null local
ifconfig enp0s3:1 192.168.56.100 broadcast 192.168.124.1 netmask 255.255.255.0 up
yum install keepalived -y
cat /etc/keepalived/keepalived.conf
vrrp_instance MAKE_VIP_INT {
state MASTER # 标识该节点为MASTER
interface enp0s3 # 配置网卡接口,根据ifconfig命令查到
virtual_router_id 1 # 指定实例所属的VRRP路由器id,类似集群id
priority 100 # 优先级,MASTER要比BACKUP高
advert_int 1 # 指定广播间隔,1s
unicast_src_ip 192.168.56.199
unicast_peer {
192.168.56.200
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress { # 配置LVS VIP
192.168.56.100/24 dev enp0s3 label enp0s3:1
}
}
[root@gbase8c_private keepalived]# systemctl restart keepalived && systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
yum install keepalived -y
cat /etc/keepalived/keepalived.conf
vrrp_instance MAKE_VIP_INT {
state BACKUP
# 标识该节点为MASTER
interface enp0s3
# 配置网卡接口,根据ifconfig命令查到
virtual_router_id 2
# 指定实例所属的VRRP路由器id,类似集群id
priority 50
# 优先级,MASTER要比BACKUP高
advert_int 1
# 指定广播间隔,1s
unicast_src_ip 192.168.56.200
unicast_peer {
192.168.56.199
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
# 配置LVS VIP
192.168.56.100/24 dev enp0s3 label enp0s3:1
}
}
[root@gbase8c_1 keepalived]# systemctl restart keepalived && systemctl enable keepalived
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
sysctl -w net.ipv4.ip_nonlocal_bind=1
yum install haproxy -y
cat /etc/haproxy/haproxy.cfg
global
maxconn 100000
uid 99
gid 99
daemon
nbproc 1
log 127.0.0.1 local0 info
defaults
option http-keep-alive
#option forwardfor
maxconn 100000
mode tcp
timeout connect 500000ms
timeout client 500000ms
timeout server 500000ms
listen stats
mode http
bind 0.0.0.0:9999
stats enable
log global
stats uri /haproxy-stats
stats auth haadmin:q1w2e3r4ys
#===================================
frontend docker_nginx_web
bind 192.168.56.100:80
mode http
default_backend docker_nginx_hosts
backend docker_nginx_hosts
mode http
#balance source
balance roundrobin
server 192.168.56.199 192.168.56.199:81 check inter 2000 fall 3 rise 5
server 192.168.56.200 192.168.56.200:81 check inter 2000 fall 3 rise 5
yum install haproxy -y
cat /etc/haproxy/haproxy.cfg
global
maxconn 100000
uid 99
gid 99
daemon
nbproc 1
log 127.0.0.1 local0 info
defaults
option http-keep-alive
#option forwardfor
maxconn 100000
mode tcp
timeout connect 500000ms
timeout client 500000ms
timeout server 500000ms
listen stats
mode http
bind 0.0.0.0:9999
stats enable
log global
stats uri /haproxy-stats
stats auth haadmin:q1w2e3r4ys
#===================================
frontend docker_nginx_web
bind 192.168.56.100:80
mode http
default_backend docker_nginx_hosts
backend docker_nginx_hosts
mode http
#balance source
balance roundrobin
server 192.168.56.199 192.168.56.199:81 check inter 2000 fall 3 rise 5
server 192.168.56.200 192.168.56.200:81 check inter 2000 fall 3 rise 5
systemctl enable haproxy
systemctl restart haproxy
[root@gbase8c_1 haproxy]# systemctl enable haproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/haproxy.service to /usr/lib/systemd/system/haproxy.service.
[root@gbase8c_1 haproxy]# systemctl restart haproxy
[root@gbase8c_1 haproxy]# systemctl status haproxy.service
● haproxy.service - HAProxy Load Balancer
Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; vendor preset: disabled)
Active: active (running) since 四 2023-12-14 22:04:42 CST; 7s ago
Main PID: 3873 (haproxy-systemd)
Tasks: 3
Memory: 1.5M
CGroup: /system.slice/haproxy.service
├─3873 /usr/sbin/haproxy-systemd-wrapper -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid
├─3876 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
└─3877 /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
12月 14 22:04:42 gbase8c_1 systemd[1]: Started HAProxy Load Balancer.
12月 14 22:04:42 gbase8c_1 haproxy-systemd-wrapper[3873]: haproxy-systemd-wrapper: executing /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /run/haproxy.pid -Ds
docker rm -f `docker ps -a -q`
docker run --name nginx-web1 -d -p81:80 jack/nginx-1.22.1:v1 nginx
ss -tnl
docker rm -f `docker ps -a -q`
docker run --name nginx-web1 -d -p81:80 jack/nginx-1.22.1:v1 nginx
ss -tnl
[root@gbase8c_1 ~]# ss -tnl
State Recv-Q Send-Q Local Address:Port Peer Address:Port
LISTEN 0 128 *:9999 *:*
LISTEN 0 128 192.168.56.100:80 *:*
LISTEN 0 128 *:81 *:*
[root@gbase8c_1 ~]# ip addr
2: enp0s3: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
link/ether 08:00:27:3d:53:56 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.200/24 brd 192.168.56.255 scope global noprefixroute enp0s3
valid_lft forever preferred_lft forever
inet 192.168.56.100/24 scope global secondary enp0s3:1
valid_lft forever preferred_lft forever
inet6 fe80::858f:968b:2bfe:f3c0/64 scope link noprefixroute
valid_lft forever preferred_lft forever
即在同一个宿主机上的容器之间可以通过自定义的容器名称相互访问,比如一个业务前端静态页面是使用nginx,动态页面使用的是tomcat,由于容器在启动的时候其内部ip地址是DHCP随机分配的,所以如果通过内部访问的话,自定义名称是相对比较固定的,因此比较适用于此场景。
docker run --name nginx-1 -d -p 8801:80 jack/nginx-1.22.1:v1 nginx
[root@gbase8c_private ~]# docker run --name nginx-1 -d -p 8801:80 jack/nginx-1.22.1:v1 nginx
a405a33a8b859396fd03f5be52014a32f6b13a3e63552ed3a9e3c87b002772a4
[root@gbase8c_private ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
a405a33a8b85 jack/nginx-1.22.1:v1 "nginx" 7 seconds ago Up 6 seconds 443/tcp, 0.0.0.0:8801->80/tcp nginx-1
[root@gbase8c_private ~]# docker exec -it a405a33a8b85 bash
[root@a405a33a8b85 /]# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 a405a33a8b85
docker run -d --name nginx-2 --link nginx-1 -p 8802:80 jack/nginx-1.22.1:v1 nginx
[root@gbase8c_private ~]# docker run -d --name nginx-2 --link nginx-1 -p 8802:80 jack/nginx-1.22.1:v1 nginx
f08679e144deb92bc138bc8688e0eeafda4b501468a82aa51291cf187ddc3634
[root@gbase8c_private ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
f08679e144de jack/nginx-1.22.1:v1 "nginx" 58 seconds ago Up 58 seconds 443/tcp, 0.0.0.0:8802->80/tcp nginx-2
a405a33a8b85 jack/nginx-1.22.1:v1 "nginx" 3 minutes ago Up 3 minutes 443/tcp, 0.0.0.0:8801->80/tcp nginx-1
[root@gbase8c_private ~]# docker exec -it f08679e144de bash
[root@f08679e144de /]# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 nginx-1 a405a33a8b85 #第一个容器的名称和ID,只会添加到本地不会添加到对方
172.17.0.4 f08679e144de
[root@f08679e144de /]# ping nginx-1
PING nginx-1 (172.17.0.3) 56(84) bytes of data.
64 bytes from nginx-1 (172.17.0.3): icmp_seq=1 ttl=64 time=0.075 ms
64 bytes from nginx-1 (172.17.0.3): icmp_seq=2 ttl=64 time=0.112 ms
上一步骤中,自定义的容器名称可能后期会发生变化,那么一旦名称发生变化,程序之间也要随之发生变化,比如程序通过容器名称进行服务调用,但是容器名称发生变化之后再使用之前的名称肯定是无法成功调用,每次都进行更改的话又比较麻烦,因此可以使用自定义别名的方式解决,即容器名称可以随意变更,只要不更改别名即可
命令格式:
docker run -d --name 新容器名称 --link 目标容器名称:自定义的名称 -p本地端口:容器端口 镜像名称 shell命令
docker run -d --name nginx-3 --link nginx-1:custom_vm_name -p 8803:80 jack/nginx-1.22.1:v1 nginx
[root@gbase8c_private ~]# docker run -d --name nginx-3 --link nginx-1:custom_vm_name -p 8803:80 jack/nginx-1.22.1:v1 nginx
d514b2b69550017b7ff4450c226103eab6c4c57a84a6ed370c831006fec1cddb
[root@gbase8c_private ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
d514b2b69550 jack/nginx-1.22.1:v1 "nginx" 3 seconds ago Up 3 seconds 443/tcp, 0.0.0.0:8803->80/tcp nginx-3
f08679e144de jack/nginx-1.22.1:v1 "nginx" 9 minutes ago Up 9 minutes 443/tcp, 0.0.0.0:8802->80/tcp nginx-2
a405a33a8b85 jack/nginx-1.22.1:v1 "nginx" 11 minutes ago Up 11 minutes 443/tcp, 0.0.0.0:8801->80/tcp nginx-1
[root@gbase8c_private ~]# docker exec -it d514b2b69550 bash
[root@d514b2b69550 /]# cat /etc/hosts
127.0.0.1 localhost
::1 localhost ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
172.17.0.3 custom_vm_name a405a33a8b85 nginx-1
172.17.0.2 d514b2b69550
[root@d514b2b69550 /]# ping custom_vm_name
PING custom_vm_name (172.17.0.3) 56(84) bytes of data.
64 bytes from custom_vm_name (172.17.0.3): icmp_seq=1 ttl=64 time=0.128 ms
64 bytes from custom_vm_name (172.17.0.3): icmp_seq=2 ttl=64 time=0.123 ms
64 bytes from custom_vm_name (172.17.0.3): icmp_seq=3 ttl=64 time=0.111 ms
^C
--- custom_vm_name ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2003ms
rtt min/avg/max/mdev = 0.111/0.120/0.128/0.014 ms
同一个宿主机之间的各容器是可以直接通信的,但是如果访问到另外一台宿主机的容器呢?
[root@gbase8c_private ~]# docker network list
NETWORK ID NAME DRIVER SCOPE
#bridge:桥接,使用自定义IP
e211f71bba4d bridge bridge local
#host:不获取IP,直接使用物理机IP,并监听物理机IP监听端口
19023a4d913b host host local
#none:没有网络
1a79176b52c5 none null local
#先确认宿主机端口没有占用80,启动一个新容器,并指定网络模式为host
#docker run -d --name net_host --net=host jack/nginx-1.22.1:v1 nginx
[root@gbase8c_private ~]# docker run -d --name net_host --net=host jack/nginx-1.22.1:v1 nginx
ba8e9ab9e2f604c92518733673fcda90f8084e35f41944b8ef1167167c792b8c
[root@gbase8c_private ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ba8e9ab9e2f6 jack/nginx-1.22.1:v1 "nginx" 3 seconds ago Up 2 seconds net_host
[root@gbase8c_private ~]# docker exec -it ba8e9ab9e2f6 bash
[root@gbase8c_private /]# hostname
gbase8c_private
#验证网络信息
[root@gbase8c_private /]# ifconfig
br-69052870abe7: flags=4099 mtu 1500
inet 172.18.0.1 netmask 255.255.0.0 broadcast 172.18.255.255
ether 02:42:68:19:1c:77 txqueuelen 0 (Ethernet)
RX packets 3225 bytes 579415 (565.8 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 312 bytes 27961 (27.3 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
docker0: flags=4099 mtu 1500
inet 172.17.0.1 netmask 255.255.0.0 broadcast 172.17.255.255
inet6 fe80::42:99ff:fe40:a53e prefixlen 64 scopeid 0x20
ether 02:42:99:40:a5:3e txqueuelen 0 (Ethernet)
RX packets 1669 bytes 104806 (102.3 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3268 bytes 237614 (232.0 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
enp0s3: flags=4163 mtu 1500
inet 192.168.56.199 netmask 255.255.255.0 broadcast 192.168.56.255
inet6 fe80::9b58:c5b7:cb7d:fea8 prefixlen 64 scopeid 0x20
ether 08:00:27:05:6c:a7 txqueuelen 1000 (Ethernet)
RX packets 22842 bytes 1813943 (1.7 MiB)
#访问验证
[root@gbase8c_private ~]# docker run -d --name net_none --net=none jack/nginx-1.22.1:v1 nginx
ddb39e82ca080aa1cf5d07e13671698b38e1b6a41d38e3556ab8a9d68483a102
[root@gbase8c_private ~]# docker ps -a
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
ddb39e82ca08 jack/nginx-1.22.1:v1 "nginx" 3 seconds ago Up 2 seconds net_none
[root@gbase8c_private ~]# docker exec -it ddb39e82ca08 bash
[root@ddb39e82ca08 /]# ifconfig
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
#docker run -d --name nginx-web1 jack/nginx-1.22.1:v1 nginx
#docker run -it --name net_container --net=container:nginx-web1 jack/nginx-1.22.1:v1 bash
[root@gbase8c_private ~]# docker run -d --name nginx-web1 jack/nginx-1.22.1:v1 nginx
e858dc3d3ca7e940d9b4b40343b1bb8238487eeefd9cf2b39436e265533ad19d
[root@gbase8c_private ~]# docker run -it --name net_container --net=container:nginx-web1 jack/nginx-1.22.1:v1 bash
[root@e858dc3d3ca7 /]# ifconfig
eth0: flags=4163 mtu 1500
inet 172.17.0.2 netmask 255.255.0.0 broadcast 172.17.255.255
ether 02:42:ac:11:00:02 txqueuelen 0 (Ethernet)
RX packets 8 bytes 656 (656.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
docker network inspect bridge
docker run -d --name net_bridge jack/nginx-1.22.1:v1 nginx
[root@gbase8c_private ~]# docker run -d --name net_bridge jack/nginx-1.22.1:v1 nginx
98d6e3abcde5e3017ae38ddf0524ccbb4fcd5638371d0154549889ce38d8a646
[root@gbase8c_private ~]# docker ps
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
98d6e3abcde5 jack/nginx-1.22.1:v1 "nginx" 2 seconds ago Up 1 second 80/tcp, 443/tcp net_bridge
[root@gbase8c_private ~]# docker exec -it 98d6e3abcde5 bash
[root@98d6e3abcde5 /]# ifconfig
eth0: flags=4163 mtu 1500
inet 172.17.0.3 netmask 255.255.0.0 broadcast 172.17.255.255
ether 02:42:ac:11:00:03 txqueuelen 0 (Ethernet)
RX packets 8 bytes 656 (656.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
loop txqueuelen 1000 (Local Loopback)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --bip=10.10.0.1/24
systemctl daemon-reload
systemctl restart docker
日志:
[root@gbase8c_private ~]# ifconfig
docker0: flags=4099 mtu 1500
inet 10.10.0.1 netmask 255.255.255.0 broadcast 10.10.0.255
inet6 fe80::42:99ff:fe40:a53e prefixlen 64 scopeid 0x20
ether 02:42:99:40:a5:3e txqueuelen 0 (Ethernet)
RX packets 1669 bytes 104806 (102.3 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 3268 bytes 237614 (232.0 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd --bip=10.10.1.1/24
systemctl daemon-reload
systemctl restart docker
日志:
docker0: flags=4099 mtu 1500
inet 10.10.1.1 netmask 255.255.255.0 broadcast 10.10.1.255
ether 02:42:c8:c5:b6:c1 txqueuelen 0 (Ethernet)
RX packets 0 bytes 0 (0.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 0 bytes 0 (0.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
Server1:
docker run -d --name test-net-vm jack/nginx-1.22.1:v1 nginx
Server2:
docker run -d --name test-net-vm jack/nginx-1.22.1:v1 nginx
日志:验证IP
Server1:
[root@4a92f5fffe6f /]# ifconfig
eth0: flags=4163 mtu 1500
inet 10.10.0.2 netmask 255.255.255.0 broadcast 10.10.0.255
Server2:
[root@4c872e5e9ddf /]# ifconfig
eth0: flags=4163 mtu 1500
inet 10.10.1.2 netmask 255.255.255.0 broadcast 10.10.1.255
Server1:
iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
route add -net 10.10.1.0/24 gw 192.168.56.200
日志:
[root@gbase8c_private ~]# iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
[root@gbase8c_private ~]# route add -net 10.10.1.0/24 gw 192.168.56.200
[root@gbase8c_private ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.10.1.0 192.168.56.200 255.255.255.0 UG 0 0 0 enp0s3
[root@gbase8c_private ~]# ping 10.10.1.2
PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data.
64 bytes from 10.10.1.2: icmp_seq=1 ttl=63 time=0.378 ms
64 bytes from 10.10.1.2: icmp_seq=2 ttl=63 time=1.21 ms
64 bytes from 10.10.1.2: icmp_seq=3 ttl=63 time=0.886 ms
^C
--- 10.10.1.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2002ms
rtt min/avg/max/mdev = 0.378/0.827/1.218/0.346 ms
Server2:
iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
route add -net 10.10.0.0/24 gw 192.168.56.199
日志:
[root@gbase8c_1 ~]# iptables -A FORWARD -s 192.168.56.0/24 -j ACCEPT
[root@gbase8c_1 ~]# route add -net 10.10.0.0/24 gw 192.168.56.199
[root@gbase8c_1 ~]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.10.0.0 192.168.56.199 255.255.255.0 UG 0 0 0 enp0s3
[root@gbase8c_1 ~]# ping 10.10.0.2
PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
64 bytes from 10.10.0.2: icmp_seq=1 ttl=63 time=0.553 ms
64 bytes from 10.10.0.2: icmp_seq=2 ttl=63 time=1.09 ms
^C
--- 10.10.0.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1001ms
rtt min/avg/max/mdev = 0.553/0.823/1.094/0.272 ms
tcpdump -i eth0 -vnn icmp
[root@gbase8c_private ~]# docker exec -it 4a92f5fffe6f bash
[root@4a92f5fffe6f /]# ping 10.10.1.2
PING 10.10.1.2 (10.10.1.2) 56(84) bytes of data.
64 bytes from 10.10.1.2: icmp_seq=1 ttl=62 time=0.267 ms
64 bytes from 10.10.1.2: icmp_seq=2 ttl=62 time=0.351 ms
^C
--- 10.10.1.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.267/0.309/0.351/0.042 ms
Server2容器:
[root@gbase8c_1 ~]# docker exec -it 4c872e5e9ddf bash
[root@4c872e5e9ddf /]# ping 10.10.0.2
PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
64 bytes from 10.10.0.2: icmp_seq=1 ttl=62 time=0.502 ms
64 bytes from 10.10.0.2: icmp_seq=2 ttl=62 time=0.330 ms
^C
--- 10.10.0.2 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 1000ms
rtt min/avg/max/mdev = 0.330/0.416/0.502/0.086 ms
可以基于docker命令创建自定义网络,自定义网络可以自定义IP地址范围和网关等信息。
docker network create --help
docker network create -d bridge --subnet 172.27.0.0/21 --gateway 172.27.0.1 mydocker-net
日志:
[root@gbase8c_private ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6da39a5a1ab9 bridge bridge local
69052870abe7 harbor_harbor bridge local
19023a4d913b host host local
1a79176b52c5 none null local
[root@gbase8c_private ~]# docker network create --help
Usage: docker network create [OPTIONS] NETWORK
Create a network
Options:
--attachable Enable manual container attachment
--aux-address map Auxiliary IPv4 or IPv6 addresses used by Network driver (default map[])
--config-from string The network from which copying the configuration
--config-only Create a configuration only network
-d, --driver string Driver to manage the Network (default "bridge")
--gateway strings IPv4 or IPv6 Gateway for the master subnet
--ingress Create swarm routing-mesh network
--internal Restrict external access to the network
--ip-range strings Allocate container ip from a sub-range
--ipam-driver string IP Address Management Driver (default "default")
--ipam-opt map Set IPAM driver specific options (default map[])
--ipv6 Enable IPv6 networking
--label list Set metadata on a network
-o, --opt map Set driver specific options (default map[])
--scope string Control the network's scope
--subnet strings Subnet in CIDR format that represents a network segment
[root@gbase8c_private ~]# docker network create -d bridge --subnet 172.27.0.0/21 --gateway 172.27.0.1 mydocker-net
c8f96b62282f63b08833bc82763f1c2354e78a37e8e8b4c0fe4d564354c8fcb3
[root@gbase8c_private ~]# docker network ls
NETWORK ID NAME DRIVER SCOPE
6da39a5a1ab9 bridge bridge local
69052870abe7 harbor_harbor bridge local
19023a4d913b host host local
c8f96b62282f mydocker-net bridge local
1a79176b52c5 none null local
docker run -it --name c1 --network mydocker-net centos
日志:
[root@gbase8c_private ~]# docker run -it --name c1 --network mydocker-net centos:latest
[root@ccc8e1215ac6 /]# ip addr
1: lo: mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
11: eth0@if12: mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:1b:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.27.0.2/21 brd 172.27.7.255 scope global eth0
valid_lft forever preferred_lft forever
日志:
[root@gbase8c_private ~]# iptables -vnl
iptables v1.4.21: unknown option "-vnl"
Try `iptables -h' or 'iptables --help' for more information.
[root@gbase8c_private ~]# iptables -vnL
......
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
7 812 ACCEPT all -- * br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0 ctstate RELATED,ESTABLISHED
0 0 DOCKER all -- * br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0
9 722 ACCEPT all -- br-c8f96b62282f !br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0
0 0 ACCEPT all -- br-c8f96b62282f br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0
Chain DOCKER-ISOLATION-STAGE-1 (1 references)
pkts bytes target prot opt in out source destination
9 722 DOCKER-ISOLATION-STAGE-2 all -- br-c8f96b62282f !br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0
Chain DOCKER-ISOLATION-STAGE-2 (3 references)
pkts bytes target prot opt in out source destination
0 0 DROP all -- * br-c8f96b62282f 0.0.0.0/0 0.0.0.0/0
......
[root@gbase8c_private ~]# iptables -t nat -vnL
......
pkts bytes target prot opt in out source destination
3 194 MASQUERADE all -- * !br-c8f96b62282f 172.27.0.0/21 0.0.0.0/0
pkts bytes target prot opt in out source destination
0 0 RETURN all -- br-c8f96b62282f * 0.0.0.0/0 0.0.0.0/0
......
现在有一个docker0(172.17.0.0/16)网络,一个自定义的mydocker-net(172.27.0.0/21)网络,每个网络上分别运行了不同数量的容器,那么怎么才能让位于不同网络的容器可以互相通信呢。
iptables-save > iptables-rule.txt
注释掉DROP的规则
日志:
......
#-A DOCKER-ISOLATION-STAGE-2 -o docker0 -j DROP
#-A DOCKER-ISOLATION-STAGE-2 -o br-69052870abe7 -j DROP
......
重新导入iptables规则
iptables-restore < iptables-rule.txt
日志:
[root@ccc8e1215ac6 /]# ip addr
13: eth0@if14: mtu 1500 qdisc noqueue state UP group default
link/ether 02:42:ac:1b:00:02 brd ff:ff:ff:ff:ff:ff link-netnsid 0
inet 172.27.0.2/21 brd 172.27.7.255 scope global eth0
valid_lft forever preferred_lft forever
[root@ccc8e1215ac6 /]# ping 10.10.0.2
PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
^C
--- 10.10.0.2 ping statistics ---
8 packets transmitted, 0 received, 100% packet loss, time 7001ms
[root@ccc8e1215ac6 /]# ping 10.10.0.2
PING 10.10.0.2 (10.10.0.2) 56(84) bytes of data.
64 bytes from 10.10.0.2: icmp_seq=1 ttl=63 time=0.057 ms
64 bytes from 10.10.0.2: icmp_seq=2 ttl=63 time=0.059 ms
64 bytes from 10.10.0.2: icmp_seq=3 ttl=63 time=0.062 ms
^C
--- 10.10.0.2 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2041ms
rtt min/avg/max/mdev = 0.057/0.059/0.062/0.006 ms