centos版本:
[root@host ~]# cat /etc/redhat-release
CentOS Linux release 7.3.1611 (Core)
yum方式安装Open vSwitch:
[root@host ~]# yum install openvswitch
查看OVS的版本信息:
[root@host ~]# ovs-appctl --version
ovs-appctl (Open vSwitch) 2.0.0
Compiled Apr 19 2018 17:57:34
查看OVS支持的OpenFlow协议的版本:
[root@host ~]# ovs-ofctl --version
ovs-ofctl (Open vSwitch) 2.0.0
Compiled Apr 19 2018 17:57:34
OpenFlow versions 0x1:0x4
启动Open vSwitch:
[root@host ~]# systemctl start openvswitch
[root@host ~]# systemctl status openvswitch
● openvswitch.service - Open vSwitch
Loaded: loaded (/usr/lib/systemd/system/openvswitch.service; disabled; vendor preset: disabled)
Active: active (exited) since Wed 2018-07-25 12:37:44 UTC; 2s ago
Process: 25972 ExecStart=/bin/true (code=exited, status=0/SUCCESS)
Main PID: 25972 (code=exited, status=0/SUCCESS)
Jul 25 12:37:44 host systemd[1]: Starting Open vSwitch...
Jul 25 12:37:44 host systemd[1]: Started Open vSwitch.
创建两个network namespace:
[root@host ~]# ip netns add ns0
[root@host ~]# ip netns add ns1
[root@host ~]# ip netns list
ns1
ns0
创建veth pair,veth0和veth1是直连的,并分别移入ns0和ns1:
[root@host ~]# ip link add type veth
[root@host ~]# ip address
...
6: veth0@veth1: mtu 1500 qdisc noop state DOWN qlen 1000
link/ether 0a:93:11:86:52:b3 brd ff:ff:ff:ff:ff:ff
7: veth1@veth0: mtu 1500 qdisc noop state DOWN qlen 1000
link/ether da:25:1f:20:19:12 brd ff:ff:ff:ff:ff:ff
[root@host ~]# ip link set veth0 netns ns0
[root@host ~]# ip link set veth1 netns ns1
[root@host ~]# ip netns exec ns0 ip address
1: lo: mtu 65536 qdisc noop state DOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
6: veth0@if7: mtu 1500 qdisc noop state DOWN qlen 1000
link/ether 0a:93:11:86:52:b3 brd ff:ff:ff:ff:ff:ff link-netnsid 1
[root@host ~]# ip netns exec ns1 ip address
1: lo: mtu 65536 qdisc noop state DOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
7: veth1@if6: mtu 1500 qdisc noop state DOWN qlen 1000
link/ether da:25:1f:20:19:12 brd ff:ff:ff:ff:ff:ff link-netnsid 0
分别up虚拟网卡veth0和veth1,并配置ip,测试连通性
[root@host ~]# ip netns exec ns0 ip link set veth0 up
[root@host ~]# ip netns exec ns0 ip address add 1.1.1.1/16 dev veth0
[root@host ~]# ip netns exec ns1 ip link set veth1 up
[root@host ~]# ip netns exec ns1 ip address add 1.1.1.2/16 dev veth1
[root@host ~]# ip netns exec ns0 ping -c 4 1.1.1.2
PING 1.1.1.2 (1.1.1.2) 56(84) bytes of data.
64 bytes from 1.1.1.2: icmp_seq=1 ttl=64 time=0.103 ms
64 bytes from 1.1.1.2: icmp_seq=2 ttl=64 time=0.062 ms
64 bytes from 1.1.1.2: icmp_seq=3 ttl=64 time=0.067 ms
64 bytes from 1.1.1.2: icmp_seq=4 ttl=64 time=0.063 ms
--- 1.1.1.2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 2999ms
rtt min/avg/max/mdev = 0.062/0.073/0.103/0.019 ms
将veth0从ns0删除:
[root@host ~]# ip netns exec ns0 ip link del veth0
重新添加两对虚拟网卡:
[root@host ~]# ip link add veth0 type veth peer name veth1
[root@host ~]# ip link add veth2 type veth peer name veth3
添加ovs,配置接口:
[root@host ~]# ovs-vsctl add-br br0
[root@host ~]# ovs-vsctl add-port br0 veth1
[root@host ~]# ovs-vsctl add-port br0 veth3
[root@host ~]# ovs-vsctl show
8dd80385-40cb-4259-809c-b23047723312
Bridge "br0"
Port "br0"
Interface "br0"
type: internal
Port "veth1"
Interface "veth1"
Port "veth3"
Interface "veth3"
ovs_version: "2.0.0"
将veth0和veth2分别移入ns0和ns1:
[root@host ~]# ip link set veth0 netns ns0
[root@host ~]# ip link set veth2 netns ns1
up网卡,配置ip,测试连通性:
[root@host ~]# ip link set veth1 up
[root@host ~]# ip link set veth3 up
[root@host ~]# ip netns exec ns0 ip link set veth0 up
[root@host ~]# ip netns exec ns1 ip link set veth2 up
[root@host ~]# ip netns exec ns0 ip address add 1.1.1.1/16 dev veth0
[root@host ~]# ip netns exec ns1 ip address add 1.1.1.2/16 dev veth2
[root@host ~]# ip netns exec ns0 ping -c 4 1.1.1.2
PING 1.1.1.2 (1.1.1.2) 56(84) bytes of data.
64 bytes from 1.1.1.2: icmp_seq=1 ttl=64 time=0.561 ms
64 bytes from 1.1.1.2: icmp_seq=2 ttl=64 time=0.072 ms
64 bytes from 1.1.1.2: icmp_seq=3 ttl=64 time=0.070 ms
64 bytes from 1.1.1.2: icmp_seq=4 ttl=64 time=0.088 ms
--- 1.1.1.2 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3000ms
rtt min/avg/max/mdev = 0.070/0.197/0.561/0.210 ms
查看流表:
[root@host ~]# ovs-ofctl dump-flows br0
NXST_FLOW reply (xid=0x4):
cookie=0x0, duration=3696.103s, table=0, n_packets=77, n_bytes=6426, idle_age=111, priority=0 actions=NORMAL
参考IBM基于 Open vSwitch 的 OpenFlow 实践