Open vSwitch实践

一 实践目的
instance1的eth0和物理机上的tap100能否互通
Open vSwitch实践_第1张图片
二 安装相关软件并启动libvirtd
[root@controller0 ~]# yum install libvirt openvswitch python-virtinst xauth tigervnc -y
[root@controller0 ~]# ip a
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0:  mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 08:00:27:37:68:f5 brd ff:ff:ff:ff:ff:ff
    inet 10.20.0.10/24 brd 10.20.0.255 scope global eth0
    inet6 fe80::a00:27ff:fe37:68f5/64 scope link
       valid_lft forever preferred_lft forever
3: eth1:  mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 08:00:27:65:b0:f2 brd ff:ff:ff:ff:ff:ff
4: eth2:  mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 08:00:27:86:76:3b brd ff:ff:ff:ff:ff:ff
5: eth3:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 08:00:27:8c:e0:4b brd ff:ff:ff:ff:ff:ff
    inet 10.0.5.15/24 brd 10.0.5.255 scope global eth3
    inet6 fe80::a00:27ff:fe8c:e04b/64 scope link
       valid_lft forever preferred_lft forever
[root@compute0 ~]# service libvirtd start
[root@compute0 ~]# chkconfig libvirtd on
1: lo:  mtu 65536 qdisc noqueue state UNKNOWN
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
    inet6 ::1/128 scope host
       valid_lft forever preferred_lft forever
2: eth0:  mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000
    link/ether 08:00:27:37:68:f5 brd ff:ff:ff:ff:ff:ff
    inet 10.20.0.10/24 brd 10.20.0.255 scope global eth0
    inet6 fe80::a00:27ff:fe37:68f5/64 scope link
       valid_lft forever preferred_lft forever
3: eth1:  mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 08:00:27:65:b0:f2 brd ff:ff:ff:ff:ff:ff
4: eth2:  mtu 1500 qdisc noop state DOWN qlen 1000
    link/ether 08:00:27:86:76:3b brd ff:ff:ff:ff:ff:ff
5: eth3:  mtu 1500 qdisc pfifo_fast state UP qlen 1000
    link/ether 08:00:27:8c:e0:4b brd ff:ff:ff:ff:ff:ff
    inet 10.0.5.15/24 brd 10.0.5.255 scope global eth3
    inet6 fe80::a00:27ff:fe8c:e04b/64 scope link
       valid_lft forever preferred_lft forever
6: virbr0:  mtu 1500 qdisc noqueue state UNKNOWN
    link/ether 52:54:00:2f:81:0b brd ff:ff:ff:ff:ff:ff
    inet 192.168.122.1/24 brd 192.168.122.255 scope global virbr0
7: virbr0-nic:  mtu 1500 qdisc noop state DOWN qlen 500
    link/ether 52:54:00:2f:81:0b brd ff:ff:ff:ff:ff:ff
多出了6和7两个网络设备
准备相关文件
[root@controller0 tmp]# pwd
/var/tmp
[root@controller0 tmp]# ll
total 9644
-rw-r--r--  1 root root 9761280 Mar  2 20:21 cirros-0.3.0-x86_64-disk.img
-rw-r--r--  1 root root   51409 Mar  2 20:17 instance1.xml
-rw-r--r--  1 root root   51407 Mar  2 20:19 instance2.xml

三 移除默认的libvirt网络,方便清晰分析网络
[root@controller0 ~]# virsh net-destroy default
Network default destroyed
[root@controller0 ~]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------
default              inactive   yes           yes
[root@controller0 ~]# virsh net-autostart --disable default
Network default unmarked as autostarted
[root@controller0 ~]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------
default              inactive   no            yes
[root@controller0 ~]# virsh net-undefine default
Network default has been undefined
[root@controller0 ~]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------

四 启动openvswitch
[root@controller0 ~]# service openvswitch start
ovsdb-server is already running.
ovs-vswitchd is already running.
Enabling remote OVSDB managers                             [  OK  ]
[root@controller0 ~]# chkconfig openvswitch on

五 创建一个openvswitch bridge,名叫br-int
[root@controller0 ~]# ovs-vsctl add-br br-int

六 利用openvswitch的br-ini,定义一个libvirt网络
[root@controller0 tmp]# vi libvirt-vlans.xml
[root@controller0 tmp]# cat libvirt-vlans.xml

  ovs-network
  
  
  
  
  
  
    
      
    
  
  
    
      
    
  

七 启动libvirt网络
[root@controller0 tmp]# virsh net-define libvirt-vlans.xml
Network ovs-network defined from libvirt-vlans.xml
[root@controller0 tmp]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------
ovs-network          inactive   no            yes
[root@controller0 tmp]# virsh net-autostart ovs-network
Network ovs-network marked as autostarted
[root@controller0 tmp]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------
ovs-network          inactive   yes           yes
[root@controller0 tmp]# virsh net-start ovs-network
Network ovs-network started
[root@controller0 tmp]# virsh net-list --all
Name                 State      Autostart     Persistent
--------------------------------------------------
ovs-network          active     yes           yes

八 创建一个instance,并连接到ovs-network,配置如下
[root@controller0 tmp]# vi instance1.xml
[root@controller0 tmp]# cat instance1.xml

  23469de0-a3a0-4214-a60e-a45322bcc370
  instance1
  524288
  1
  
    
      Red Hat Inc.
      OpenStack Nova
      2014.1.1-3.el6
      b8d4ec5f-acd6-7111-c69b-600912a079bb
      23469de0-a3a0-4214-a60e-a45322bcc370
    
  
  
    hvm
    
    
  
  
    
    
  
  
  
  
    
      
      
      
    
  
      
      
  
    
      
    
    
    
    
    
  

[root@controller0 tmp]# ls
cirros-0.3.0-x86_64-disk.img  instance1.xml  instance2.xml  libvirt-vlans.xml  yum-root-VL09xg
[root@controller0 tmp]# mv cirros-0.3.0-x86_64-disk.img instance1.img
[root@controller0 tmp]# virsh define instance1.xml
error: Failed to define domain from instance1.xml
error: unknown OS type hvm
发现上面这个错误,解决方法如下:
[root@controller0 networks]# ln -s /usr/bin
[root@controller0 networks]# yum install qemu-kvm.x86_64
[root@controller0 networks]# service libvirtd restart
Stopping libvirtd daemon:                                  [  OK  ]
Starting libvirtd daemon:                                  [  OK  ]
[root@controller0 tmp]# virsh define instance1.xml
Domain instance1 defined from instance1.xml
[root@controller0 tmp]# virsh start instance1
Domain instance1 started
[root@controller0 tmp]# virsh list --all
Id    Name                           State
----------------------------------------------------
1     instance1                      running
出现下面错误,解决方法
Open vSwitch实践_第2张图片
Open vSwitch实践_第3张图片
出现下面错误
Open vSwitch实践_第4张图片
解决方法如下
1 到下面地址下载Xmanager,并安装
http://www.xshellcn.com/
[root@controller0 ~]# vncviewer :0
虚拟机启动了
Open vSwitch实践_第5张图片
九 给虚机添加IP
ip addr add 192.168.1.20/24 dev eth0
Open vSwitch实践_第6张图片

十一 添加一个openvswitch port
[root@controller0 ~]# ovs-vsctl show
acabf3bb-544c-4423-8fc6-98b8ea344f9d
    Bridge br-int
        Port br-int
            Interface br-int
                type: internal
        Port "vnet0"
            tag: 100
            Interface "vnet0"
    ovs_version: "2.1.3"
[root@controller0 ~]# ip link add br-int-tap100 type veth peer name tap100
[root@controller0 ~]# ip a
多出了两个设备
[root@controller0 ~]# ovs-vsctl add-port br-int br-int-tap100
[root@controller0 ~]# ovs-vsctl set port br-int-tap100 tag=100
[root@controller0 ~]# ip addr add 192.168.1.21/24 dev tap100
[root@controller0 ~]# ip link set tap100 up
[root@controller0 ~]# ip link set br-int-tap100 up

十二 验证
物理机ping虚拟机
[root@controller0 ~]# ping 192.168.1.20
PING 192.168.1.20 (192.168.1.20) 56(84) bytes of data.
64 bytes from 192.168.1.20: icmp_seq=1 ttl=64 time=7.72 ms
64 bytes from 192.168.1.20: icmp_seq=2 ttl=64 time=0.629 ms
64 bytes from 192.168.1.20: icmp_seq=3 ttl=64 time=0.255 ms
64 bytes from 192.168.1.20: icmp_seq=4 ttl=64 time=0.227 ms

--- 192.168.1.20 ping statistics ---
4 packets transmitted, 4 received, 0% packet loss, time 3260ms
rtt min/avg/max/mdev = 0.227/2.208/7.724/3.188 ms
虚拟机ping物理机
Open vSwitch实践_第7张图片

十三:参考
http://blog.csdn.net/wangkai_123456/article/details/50730844
https://www.cnblogs.com/CasonChan/p/4448315.html
http://bbs.csdn.net/topics/390832180

你可能感兴趣的:(Openstack,网络)