1
2
3
4
|
[
yuwh
@
node0
~
]
$
uname
-
a
Linux
node0
3.10.0
-
123.9.3.el7.x86_64
#1 SMP Thu Nov 6 15:06:03 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux
[
yuwh
@
node0
~
]
$
cat
/
etc
/
redhat
-
release
CentOS
Linux
release
7.0.1406
(
Core
)
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
[
root
@
node0
~
]
# yum -y install wget openssl-devel kernel-devel
[
root
@
node0
~
]
# yum groupinstall "Development Tools"
[
root
@
node0
~
]
# yum -y install wget openssl-devel kernel-devel
[
root
@
node0
~
]
# yum groupinstall "Development Tools"
[
root
@
node0
~
]
# adduser ovswitch
[
root
@
node0
~
]
# su - ovswitch
[
ovswitch
@
node0
~
]
$
wget
http
:
//openvswitch.org/releases/openvswitch-2.3.0.tar.gz
[
ovswitch
@
node0
~
]
$
tar
xfz
openvswitch
-
2.3.0.tar.gz
[
ovswitch
@
node0
~
]
$
mkdir
-
p
~
/
rpmbuild
/
SOURCES
[
ovswitch
@
node0
~
]
$
cp
openvswitch
-
2.3.0.tar.gz
~
/
rpmbuild
/
SOURCES
去除
Nicira提供的
openvswitch
-
kmod依赖包,创建新的
spec文件
[
ovswitch
@
node0
~
]
$
sed
's/openvswitch-kmod, //g'
openvswitch
-
2.3.0
/
rhel
/
openvswitch
.spec
>
openvswitch
-
2.3.0
/
rhel
/
openvswitch_no_kmod
.spec
[
ovswitch
@
node0
~
]
$
rpmbuild
-
bb
--
without
check
~
/
openvswitch
-
2.3.0
/
rhel
/
openvswitch_no_kmod
.spec
[
ovswitch
@
node0
~
]
$
exit
[
root
@
node0
~
]
# yum localinstall /home/ovswitch/rpmbuild/RPMS/x86_64/openvswitch-2.3.0-1.x86_64.rpm
安装完成,验证一下:
[
root
@
node0
~
]
# rpm -qf `which ovs-vsctl`
openvswitch
-
2.3.0
-
1.x86_64
|
SELinux会影响Open vSwitch的运行,比如报错:
error: /etc/openvswitch/conf.db: failed to lock lockfile (No such file or directory)原因是没有权限修改/etc/openvswitch的owner
如果环境允许可用关掉SELinux;想保持enabled需要做如下修改:
1
2
3
4
5
|
[
root
@
node0
~
]
# mkdir /etc/openvswitch
[
root
@
node0
~
]
# semanage fcontext -a -t openvswitch_rw_t "/etc/openvswitch(/.*)?"
[
root
@
node0
~
]
# restorecon -Rv /etc/openvswitch
启动服务:
[
root
@
node0
~
]
# systemctl start openvswitch.service
|
查看结果:
[root@node0 ~]# systemctl -l status openvswitch.service
1
|
ovs
-
vsctl
add
-
br
ovsbr0
|
去掉NetworkManager
1
2
|
systemctl
stop
NetworkManager
.service
systemctl
disable
NetworkManager
.service
|
改用network.services,修改/etc/sysconfig/network-scripts/下的配置文件
/etc/sysconfig/network-scripts/ifcfg-mgmt0
1
2
3
4
5
6
7
8
9
10
|
DEVICE
=
mgmt0
ONBOOT
=
yes
DEVICETYPE
=
ovs
TYPE
=
OVSIntPort
OVS_BRIDGE
=
ovsbr0
USERCTL
=
no
BOOTPROTO
=
none
HOTPLUG
=
no
IPADDR0
=
10.0.0.2
PREFIX0
=
23
|
/etc/sysconfig/network-scripts/ifcfg-ovsbr0
1
2
3
4
5
6
|
DEVICE
=
ovsbr0
ONBOOT
=
yes
DEVICETYPE
=
ovs
TYPE
=
OVSBridge
HOTPLUG
=
no
USERCTL
=
no
|
/etc/sysconfig/network-scripts/ifcfg-enp2s0f0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
TYPE
=
Ethernet
BOOTPROTO
=
none
DEFROUTE
=
yes
IPV4_FAILURE_FATAL
=
no
IPV6INIT
=
yes
IPV6_AUTOCONF
=
yes
IPV6_DEFROUTE
=
yes
IPV6_FAILURE_FATAL
=
no
NAME
=
enp2s0f0
UUID
=
d81f76d3
-
7163
-
42d3
-
bc07
-
d936a8536d17
ONBOOT
=
yes
IPADDR
=
192.168.3.4
PREFIX
=
23
GATEWAY
=
192.168.3.1
DNS1
=
8.8.8.8
DNS2
=
4.4.4.4
HWADDR
=
10
:
51
:
72
:
37
:
76
:
04
IPV6_PEERDNS
=
yes
IPV6_PEERROUTES
=
yes
|
libvirt 默认会启用virbr0来作为虚拟机的网桥并启动DHCPD;删除该网桥,使用ovs bridge来替代:
1
|
virsh
net
-
destroy
default
|
vi /etc/libvirt/qemu/CentOS7.xml
1
2
3
4
5
6
7
8
9
|
type
=
'pci'
domain
=
'0x0000'
bus
=
'0x00'
slot
=
'0x03'
function
=
'0x0'
/>
|
或者通过virt-manager来配置
在ovsbr0中添加接口vxlan0
node0上的配置:注意remote_ip node1的ip:192.168.3.5。
1
|
ovs
-
vsctl
add
-
port
ovsbr0
vxlan0
--
set
interface
vxlan0
type
=
vxlan
options
:
remote_ip
=
192.168.3.5
|
启动两个虚拟机后的配置:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
|
[
root
@
node0
samba
]
# ovs-vsctl show
b15949b6
-
9d9f
-
4b14
-
9fd9
-
277d2b203376
Bridge
"ovsbr0"
Port
"mgmt0"
Interface
"mgmt0"
type
:
internal
Port
"vnet0"
Interface
"vnet0"
Port
"vxlan0"
Interface
"vxlan0"
type
:
vxlan
options
:
{
remote_ip
=
"192.168.3.5"
}
Port
"ovsbr0"
Interface
"ovsbr0"
type
:
internal
Port
"vnet1"
Interface
"vnet1"
ovs_version
:
"2.3.0"
[
root
@
node0
~
]
# ip addr
1
:
lo
:
<
LOOPBACK
,
UP
,
LOWER_UP
>
mtu
65536
qdisc
noqueue
state
UNKNOWN
link
/
loopback
00
:
00
:
00
:
00
:
00
:
00
brd
00
:
00
:
00
:
00
:
00
:
00
inet
127.0.0.1
/
8
scope
host
lo
valid_lft
forever
preferred_lft
forever
inet6
::
1
/
128
scope
host
valid_lft
forever
preferred_lft
forever
2
:
enp2s0f0
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
mq
state
UP
qlen
1000
link
/
ether
10
:
51
:
72
:
37
:
76
:
04
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet
192.168.3.4
/
23
brd
192.168.3.255
scope
global
enp2s0f0
valid_lft
forever
preferred_lft
forever
inet6
fe80
::
1251
:
72ff
:
fe37
:
7604
/
64
scope
link
valid_lft
forever
preferred_lft
forever
8
:
ovs
-
system
:
<
BROADCAST
,
MULTICAST
>
mtu
1500
qdisc
noop
state
DOWN
link
/
ether
02
:
32
:
3a
:
73
:
15
:
08
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
/
/删掉了不相干部分
48
:
ovsbr0
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
noqueue
state
UNKNOWN
link
/
ether
fe
:
45
:
84
:
ec
:
7c
:
43
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet6
fe80
::
fc45
:
84ff
:
feec
:
7c43
/
64
scope
link
valid_lft
forever
preferred_lft
forever
49
:
mgmt0
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
noqueue
state
UNKNOWN
link
/
ether
d6
:
c5
:
ed
:
c4
:
aa
:
45
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet
10.0.0.2
/
23
brd
10.0.1.255
scope
global
mgmt0
valid_lft
forever
preferred_lft
forever
inet6
fe80
::
d4c5
:
edff
:
fec4
:
aa45
/
64
scope
link
valid_lft
forever
preferred_lft
forever
51
:
vnet1
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
pfifo_fast
master
ovs
-
system
state
UNKNOWN
qlen
500
link
/
ether
fe
:
54
:
00
:
13
:
04
:
d8
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet6
fe80
::
fc54
:
ff
:
fe13
:
4d8
/
64
scope
link
valid_lft
forever
preferred_lft
forever
52
:
vnet0
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
pfifo_fast
master
ovs
-
system
state
UNKNOWN
qlen
500
link
/
ether
fe
:
54
:
00
:
18
:
16
:
99
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet6
fe80
::
fc54
:
ff
:
fe18
:
1699
/
64
scope
link
valid_lft
forever
preferred_lft
forever
|
以上配置,只列出了node0的操作过程;node1作同样配置,ip不同而已。
vm1 ping vm2:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
[
root
@
node0_0
~
]
# ip addr
1
:
lo
:
<
LOOPBACK
,
UP
,
LOWER_UP
>
mtu
65536
qdisc
noqueue
state
UNKNOWN
link
/
loopback
00
:
00
:
00
:
00
:
00
:
00
brd
00
:
00
:
00
:
00
:
00
:
00
inet
127.0.0.1
/
8
scope
host
lo
valid_lft
forever
preferred_lft
forever
inet6
::
1
/
128
scope
host
valid_lft
forever
preferred_lft
forever
2
:
eth0
:
<
BROADCAST
,
MULTICAST
,
UP
,
LOWER_UP
>
mtu
1500
qdisc
pfifo_fast
state
UP
qlen
1000
link
/
ether
52
:
54
:
00
:
18
:
16
:
99
brd
ff
:
ff
:
ff
:
ff
:
ff
:
ff
inet
10.0.0.3
/
24
brd
10.0.0.255
scope
global
eth0
valid_lft
forever
preferred_lft
forever
inet6
fe80
::
5054
:
ff
:
fe18
:
1699
/
64
scope
link
valid_lft
forever
preferred_lft
forever
[
root
@
node0_0
~
]
# ping 10.0.0.4
PING
10.0.0.4
(
10.0.0.4
)
56
(
84
)
bytes
of
data
.
64
bytes
from
10.0.0.4
:
icmp_seq
=
1
ttl
=
64
time
=
0.545
ms
64
bytes
from
10.0.0.4
:
icmp_seq
=
2
ttl
=
64
time
=
0.235
ms
64
bytes
from
10.0.0.4
:
icmp_seq
=
3
ttl
=
64
time
=
0.223
ms
^
C
--
-
10.0.0.4
ping
statistics
--
-
3
packets
transmitted
,
3
received
,
0
%
packet
loss
,
time
1999ms
rtt
min
/
avg
/
max
/
mdev
=
0.223
/
0.334
/
0.545
/
0.149
ms
|
wireshark抓包,物理网卡上没有对应的流量,vnet0上的包是普通的ICMP包
vm1 ping vm3:
1
2
3
4
5
6
7
8
|
[
root
@
node0_0
~
]
# ping 10.0.0.34
PING
10.0.0.34
(
10.0.0.34
)
56
(
84
)
bytes
of
data
.
64
bytes
from
10.0.0.34
:
icmp_seq
=
1
ttl
=
64
time
=
1.62
ms
64
bytes
from
10.0.0.34
:
icmp_seq
=
2
ttl
=
64
time
=
0.383
ms
^
C
--
-
10.0.0.34
ping
statistics
--
-
2
packets
transmitted
,
2
received
,
0
%
packet
loss
,
time
1001ms
rtt
min
/
avg
/
max
/
mdev
=
0.383
/
1.003
/
1.623
/
0.620
ms
|
vnet0上抓到的报文:普通ICMP包
物理网卡上抓到的报文:vxlan封装的ICMP包 frame 18
vxlan格式
vxlan报文解码
用新版本wireshark(1.12.2)查看 frame 18
1
2
3
4
5
6
7
8
9
|
[
root
@
node0_0
~
]
# ping 10.0.0.32
PING
10.0.0.32
(
10.0.0.32
)
56
(
84
)
bytes
of
data
.
64
bytes
from
10.0.0.32
:
icmp_seq
=
1
ttl
=
64
time
=
1.68
ms
64
bytes
from
10.0.0.32
:
icmp_seq
=
2
ttl
=
64
time
=
0.422
ms
64
bytes
from
10.0.0.32
:
icmp_seq
=
3
ttl
=
64
time
=
0.288
ms
^
C
--
-
10.0.0.32
ping
statistics
--
-
3
packets
transmitted
,
3
received
,
0
%
packet
loss
,
time
2001ms
rtt
min
/
avg
/
max
/
mdev
=
0.288
/
0.796
/
1.680
/
0.627
ms
|
参考:
https://n40lab.wordpress.com/2014/09/04/openvswitch-2-3-0-lts-and-centos-7/
http://networkstatic.net/configuring-vxlan-and-gre-tunnels-on-openvswitch/
http://www.astroarch.com/2014/06/rhev-upgrade-saga-installing-open-vswitch-on-rhel-7/