auto lo iface lo inet loopback auto eth0 iface eth0 inet static address 192.16.0.254 netmask 255.255.0.0 #若网络环境会自动分配IP地址则eth0如下修改: #iface eth0 inet static #address 192.16.0.254 #netmask 255.255.0.0 #auto eth0 auto eth1 iface eth1 inet manual然后重启网络:
# /etc/init.d/networking restart
# cat > /etc/apt/sources.list.d/grizzly.list << _GEEK_ deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-updates/grizzly main deb http://ubuntu-cloud.archive.canonical.com/ubuntu precise-proposed/grizzly main _GEEK_ # apt-get install ubuntu-cloud-keyring python-software-properties software-properties-common python-keyring # apt-get update # apt-get upgrade # apt-get dist-upgrade
# apt-get install python-mysqldb mysql-server使用sed编辑 /etc/mysql/my.cnf 文件的更改绑定地址(0.0.0.0)从本地主机(127.0.0.1)
# sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf # sed -i '44 i skip-name-resolve' /etc/mysql/my.cnf # /etc/init.d/mysql restart
# apt-get install rabbitmq-server安装和配置Keystone
# apt-get install keystone删除默认 keystone 的 sqlite db 文件
# rm -f /var/lib/keystone/keystone.db创建 keystone 数据库
# mysql -uroot -pmysql mysql> create database keystone; mysql> grant all on keystone.* to 'keystone'@'%' identified by 'keystone'; mysql> flush privileges; quit;修改/etc/keystone/keystone.conf
# vi /etc/keystone/keystone.conf admin_token = ADMIN debug = True verbose = True [sql] connection = mysql://keystone:[email protected]/keystone #这一行必须在 [sql] 下面 [signing] token_format = UUID启动 keystone 服务:
# /etc/init.d/keystone restart同步 keystone 表数据到 db 中:
# keystone-manage db_sync用脚本导入数据
# wget http://download.longgeek.com/openstack/grizzly/keystone.sh修改脚本内容:
ADMIN_PASSWORD=${ADMIN_PASSWORD:-password} #租户 admin 的密码 SERVICE_PASSWORD=${SERVICE_PASSWORD:-password} #nova,glance,cinder,quantum,swift的密码 export SERVICE_TOKEN="ADMIN" # token export SERVICE_ENDPOINT="http://192.16.0.254:35357/v2.0" SERVICE_TENANT_NAME=${SERVICE_TENANT_NAME:-service} #租户 service,包含了nova,glance,ciner,quantum,swift等服务 KEYSTONE_REGION=RegionOne KEYSTONE_IP="192.16.0.254" #KEYSTONE_WLAN_IP="192.16.0.254" SWIFT_IP="192.16.0.254" #SWIFT_WLAN_IP="192.16.0.254" COMPUTE_IP=$KEYSTONE_IP EC2_IP=$KEYSTONE_IP GLANCE_IP=$KEYSTONE_IP VOLUME_IP=$KEYSTONE_IP QUANTUM_IP=$KEYSTONE_IP执行脚本:
# sh keystone.sh设置环境变量
# cat > /root/export.sh << _GEEK_ export OS_TENANT_NAME=admin #这里如果设置为 service 其它服务会无法验证. export OS_USERNAME=admin export OS_PASSWORD=password export OS_AUTH_URL=http://192.16.0.254:5000/v2.0/ export OS_REGION_NAME=RegionOne export SERVICE_TOKEN=ADMIN export SERVICE_ENDPOINT=http://192.16.0.254:35357/v2.0/ _GEEK_ # echo 'source /root/export.sh' >> /root/.bashrc # source /root/export.sh验证 keystone
# keystone user-list # keystone role-list # keystone tenant-list # keystone endpoint-list
# mysql -uroot -pmysql mysql> drop database keystone; mysql> create database keystone; quit; # keystone-manage db_sync # sh keystone.sh4.验证keystone时出现错误,先去查看 log,在检查环境变量是否设置正确
# apt-get install glance删除 glance sqlite 文件:
# rm -f /var/lib/glance/glance.sqlite创建 glance 数据库
# mysql -uroot -pmysql mysql> create database glance; mysql> grant all on glance.* to 'glance'@'%' identified by 'glance'; mysql> flush privileges; mysql> quit;修改glance配置文件
# vi /etc/glance/glance-api.conf修改下面的选项,其它默认。
verbose = True debug = True sql_connection = mysql://glance:[email protected]/glance workers = 4 registry_host = 192.16.0.254 notifier_strategy = rabbit rabbit_host = 192.16.0.254 rabbit_userid = guest rabbit_password = guest [keystone_authtoken] auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = glance admin_password = password [paste_deploy] config_file = /etc/glance/glance-api-paste.ini flavor = keystone
# vi /etc/glance/glance-registry.conf修改下面的选项,其它默认。
verbose = True debug = True sql_connection = mysql://glance:[email protected]/glance [keystone_authtoken] auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = glance admin_password = password [paste_deploy] config_file = /etc/glance/glance-registry-paste.ini flavor = keystone
# /etc/init.d/glance-api restart # /etc/init.d/glance-registry restart同步到db
# glance-manage version_control 0 # glance-manage db_sync检查glance,结果应该为空,什么都不显示
# glance image-list
# wget https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img # glance image-create --name='cirros' --public --container-format=ovf --disk-format=qcow2 < ./cirros-0.3.0-x86_64- disk.img Added new image with ID: xxxxxxxxxxxCirros img 是可以使用用户名和密码登陆,也可以使用密钥登陆, user:cirros password:cubswin:)
# apt-get install openvswitch-datapath-source # module-assistant auto-install openvswitch-datapath # apt-get install openvswitch-switch openvswitch-brcompat设置 ovs-brcompatd 启动:
# sed -i 's/# BRCOMPAT=no/BRCOMPAT=yes/g' /etc/default/openvswitch-switch启动 openvswitch-switch:
# /etc/init.d/openvswitch-switch restart * ovs-brcompatd is not running #brcompatd没有启动 * ovs-vswitchd is not running * ovsdb-server is not running * Inserting openvswitch module * /etc/openvswitch/conf.db does not exist * Creating empty database /etc/openvswitch/conf.db * Starting ovsdb-server * Configuring Open vSwitch system IDs * Starting ovs-vswitchd * Enabling gre with iptables再次启动,直到 ovs-brcompatd、ovs-vswitchd、ovsdb-server等服务都启动
# /etc/init.d/openvswitch-switch restart # lsmod | grep brcompat brcompat 13512 0 openvswitch 84038 7 brcompat如果还是启动不了的话,用下面命令:
# /etc/init.d/openvswitch-switch force-reload-kmod添加网桥
# ovs-vsctl add-br br-ex # ovs-vsctl add-port br-ex eth1
# ifconfig eth1 0 # ifconfig br-ex 192.168.137.154/24 # route add default gw 192.168.137.2 dev br-ex # echo 'nameserver 8.8.8.8' > /etc/resolv.conf在写到网卡配置文件:
# vi /etc/network/interfaces auto lo iface lo inet loopback auto eth0 iface eth0 inet static address 192.16.0.254 netmask 255.255.0.0 auto eth1 iface eth1 inet manual up ifconfig $IFACE 0.0.0.0 up down ifconfig $IFACE down auto br-ex iface br-ex inet static address 192.168.137.154 netmask 255.255.255.0 gateway 192.168.137.2 dns-nameservers 8.8.8.8
# /etc/init.d/keystone restart重启网卡可能会出现:
# ovs-vsctl add-br br-int查看网络
# ovs-vsctl list-br br-ex br-int # ovs-vsctl show 1a8d2081-4ba4-4cad-8020-ccac5772836a Bridge br-int Port br-int Interface br-int type: internal Bridge br-ex Port br-ex Interface br-ex type: internal Port "eth1" Interface "eth1" ovs_version: "1.4.0+build0"
# apt-get install quantum-server python-cliff python-pyparsing python-quantumclient安装 openvswitch 插件来支持 OVS:
# apt-get install quantum-plugin-openvswitch
# mysql -uroot -pmysql mysql> create database quantum; mysql> grant all on quantum.* to 'quantum'@'%' identified by 'quantum'; mysql> flush privileges; quit;配置 /etc/quantum/quantum.conf
# vi /etc/quantum/quantum.conf修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] debug = True verbose = True state_path = /var/lib/quantum lock_path = $state_path/lock bind_host = 0.0.0.0 bind_port = 9696 core_plugin = quantum.plugins.openvswitch.ovs_quantum_plugin.OVSQuantumPluginV2 api_paste_config = /etc/quantum/api-paste.ini control_exchange = quantum rabbit_host = 192.16.0.254 rabbit_password = guest rabbit_port = 5672 rabbit_userid = guest notification_driver = quantum.openstack.common.notifier.rpc_notifier default_notification_level = INFO notification_topics = notifications [QUOTAS] [DEFAULT_SERVICETYPE] [SECURITYGROUP] [AGENT] root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf [keystone_authtoken] auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = quantum admin_password = password signing_dir = /var/lib/quantum/keystone-signing配置 Open vSwitch Plugin
# vi /etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini
[DATABASE] sql_connection = mysql://quantum:[email protected]/quantum reconnect_interval = 2 [OVS] enable_tunneling = True tenant_network_type = gre tunnel_id_ranges = 1:1000 local_ip = 10.0.0.1 integration_bridge = br-int tunnel_bridge = br-tun [AGENT] polling_interval = 2 [SECURITYGROUP]启动quantum服务
# /etc/init.d/quantum-server restart安装 OVS agent
# apt-get install quantum-plugin-openvswitch-agent启动 ovs-agent 时候确保 ovs_quantum_plugin.ini 里有 local_ip 存在. 确保 br-int 网桥已创建.
# /etc/init.d/quantum-plugin-openvswitch-agent restart启动 ovs-agent 后会根据配置文件自动创建一个 br-tun 网桥:
# ovs-vsctl list-br br-ex br-int br-tun # ovs-vsctl show 1a8d2081-4ba4-4cad-8020-ccac5772836a Bridge br-int Port br-int Interface br-int type: internal Port patch-tun Interface patch-tun type: patch options: {peer=patch-int} Bridge br-ex Port br-ex Interface br-ex type: internal Port "eth1" Interface "eth1" Bridge br-tun Port br-tun Interface br-tun type: internal Port patch-int Interface patch-int type: patch options: {peer=patch-tun} ovs_version: "1.4.0+build0"
# apt-get install quantum-dhcp-agent配置 quantum-dhcp-agent:
# vi /etc/quantum/dhcp_agent.ini修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] debug = True verbose = True use_namespaces = True signing_dir = /var/cache/quantum admin_tenant_name = service admin_user = quantum admin_password = password auth_url = http://192.16.0.254:35357/v2.0 dhcp_agent_manager = quantum.agent.dhcp_agent.DhcpAgentWithStateReport root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf state_path = /var/lib/quantum interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq启动服务:
# /etc/init.d/quantum-dhcp-agent restart安装 L3 Agent
# apt-get install quantum-l3-agent配置 L3 Agent:
# vi /etc/quantum/l3_agent.ini修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] debug = True verbose = True use_namespaces = True external_network_bridge = br-ex signing_dir = /var/cache/quantum admin_tenant_name = service admin_user = quantum admin_password = password auth_url = http://192.16.0.254:35357/v2.0 l3_agent_manager = quantum.agent.l3_agent.L3NATAgentWithStateReport root_helper = sudo quantum-rootwrap /etc/quantum/rootwrap.conf interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver启动 L3 agent:
# /etc/init.d/quantum-l3-agent restart配置 Metadata agent
# vi /etc/quantum/metadata_agent.ini修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] debug = True auth_url = http://192.16.0.254:35357/v2.0 auth_region = RegionOne admin_tenant_name = service admin_user = quantum admin_password = password state_path = /var/lib/quantum nova_metadata_ip = 192.16.0.254 nova_metadata_port = 8775启动 Metadata agent:
# /etc/init.d/quantum-metadata-agent restartTroubleshooting Quantum
# apt-get install cinder-api cinder-common cinder-scheduler cinder-volume python-cinderclient创建DB
# mysql -uroot -pmysql mysql> create database cinder; mysql> grant all on cinder.* to 'cinder'@'%' identified by 'cinder'; mysql> flush privileges; quit;建立一个逻辑卷卷组 cinder-volumes,有两种方法,一种是用物理磁盘创建主分区,一种是用文件来模拟,两者选其一。
# fdisk /dev/sdb n p 1 Enter Enter t 8e w # partx -a /dev/sdb # pvcreate /dev/sdb1 # vgcreate cinder-volumes /dev/sdb1 # vgs VG #PV #LV #SN Attr VSize VFree cinder-volumes 1 0 0 wz--n- 150.00g 150.00g localhost 1 2 0 wz--n- 279.12g 12.00m
# apt-get install iscsitarget open-iscsi iscsitarget-dkms配置iscsi服务
# sed -i 's/false/true/g' /etc/default/iscsitarget重启服务
# service iscsitarget start # service open-iscsi start创建组
# dd if=/dev/zero of=cinder-volumes bs=1 count=0 seek=2G # losetup /dev/loop2 cinder-volumes # fdisk /dev/loop2 #Type in the followings: n p 1 ENTER ENTER t 8e w创建物理卷和卷组:
# pvcreate /dev/loop2 # vgcreate cinder-volumes /dev/loop2 # vgs VG #PV #LV #SN Attr VSize VFree cinder-volumes 1 0 0 wz--n- 2.00g 2.00g修改配置文件修改cinder.conf
# vi /etc/cinder/cinder.conf修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] # LOG/STATE verbose = True debug = True iscsi_helper = tgtadm auth_strategy = keystone volume_group = cinder-volumes volume_name_template = volume-%s state_path = /var/lib/cinder volumes_dir = /var/lib/cinder/volumes rootwrap_config = /etc/cinder/rootwrap.conf api_paste_config = /etc/cinder/api-paste.ini # RPC rabbit_host = 192.16.0.254 rabbit_password = guest rpc_backend = cinder.openstack.common.rpc.impl_kombu # DATABASE sql_connection = mysql://cinder:[email protected]/cinder # API osapi_volume_extension = cinder.api.contrib.standard_extensions修改api-paste.ini
# vi /etc/cinder/api-paste.ini修改文件末尾[filter:authtoken]字段:
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory service_protocol = http service_host = 192.16.0.254 service_port = 5000 auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = cinder admin_password = password signing_dir = /var/lib/cinder同步并启动服务
# cinder-manage db sync 2013-03-11 13:41:57.885 30326 DEBUG cinder.utils [-] backend <module 'cinder.db.sqlalchemy.migration' from '/usr/lib/python2.7/dist-packages/cinder/db/sqlalchemy/migration.pyc'> __get_backend /usr/lib/python2.7/dist- packages/cinder/utils.py:561启动服务:
# for serv in api scheduler volume do /etc/init.d/cinder-$serv restart done # /etc/init.d/tgt restart检查
# apt-get install cpu-checker # kvm-ok如果kvm加速不行,请确认cpu是否支持虚拟化。
# apt-get install -y kvm libvirt-bin pm-utils修改/etc/libvirt/qemu.conf:
# vi /etc/libvirt/qemu.conf修改如下信息:
cgroup_device_acl = [ "/dev/null", "/dev/full", "/dev/zero", "/dev/random", "/dev/urandom", "/dev/ptmx", "/dev/kvm", "/dev/kqemu", "/dev/rtc", "/dev/hpet","/dev/net/tun" ]
# vi /etc/libvirt/libvirtd.conf修改如下信息:
listen_tls = 0 listen_tcp = 1 auth_tcp = "none"
# vi /etc/init/libvirt-bin.conf修改如下信息:
env libvirtd_opts="-d -l"更改/etc/default/libvirt-bin
# vi /etc/default/libvirt-bin修改如下信息:
libvirtd_opts="-d -l"重启libvirt服务
# service libvirt-bin restart 或者 # /etc/init.d/libvirt-bin restart
# apt-get install nova-api nova-novncproxy novnc nova-ajax-console-proxy nova-cert nova-consoleauth nova-doc nova-scheduler # apt-get install nova-compute nova-conductor nova-compute-kvm创建数据库
# mysql -uroot -pmysql mysql> create database nova; mysql> grant all on nova.* to 'nova'@'%' identified by 'nova'; mysql> flush privileges; quit;配置nova
# vi /etc/nova/nova.conf修改如下选项,其他保留,若原配置中不存在以下选项则添加:
[DEFAULT] # LOGS/STATE debug = True verbose = True logdir = /var/log/nova state_path = /var/lib/nova lock_path = /var/lock/nova rootwrap_config = /etc/nova/rootwrap.conf dhcpbridge = /usr/bin/nova-dhcpbridge # SCHEDULER compute_scheduler_driver = nova.scheduler.filter_scheduler.FilterScheduler ## VOLUMES volume_api_class = nova.volume.cinder.API # DATABASE sql_connection = mysql://nova:[email protected]/nova # COMPUTE libvirt_type = kvm compute_driver = libvirt.LibvirtDriver instance_name_template = instance-%08x api_paste_config = /etc/nova/api-paste.ini # COMPUTE/APIS: if you have separate configs for separate services # this flag is required for both nova-api and nova-compute allow_resize_to_same_host = True # APIS osapi_compute_extension = nova.api.openstack.compute.contrib.standard_extensions ec2_dmz_host = 192.16.0.254 s3_host = 192.16.0.254 # RABBITMQ rabbit_host = 192.16.0.254 rabbit_password = guest # GLANCE image_service = nova.image.glance.GlanceImageService glance_api_servers = 192.16.0.254:9292 # NETWORK network_api_class = nova.network.quantumv2.api.API quantum_url = http://192.16.0.254:9696 quantum_auth_strategy = keystone quantum_admin_tenant_name = service quantum_admin_username = quantum quantum_admin_password = password quantum_admin_auth_url = http://192.16.0.254:35357/v2.0 libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver firewall_driver = nova.virt.libvirt.firewall.IptablesFirewallDriver # NOVNC CONSOLE novncproxy_base_url = http://192.168.137.154:6080/vnc_auto.html # Change vncserver_proxyclient_address and vncserver_listen to match each compute host vncserver_proxyclient_address = 192.16.0.254 vncserver_listen = 0.0.0.0 # AUTHENTICATION auth_strategy = keystone [keystone_authtoken] auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = nova admin_password = password signing_dir = /tmp/keystone-signing-nova配置nova-compute,以支持ovs
# vi /etc/nova/nova-compute.conf加上
libvirt_ovs_bridge=br-int libvirt_vif_type=ethernet libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver libvirt_use_virtio_for_bridges=True配置 api-paste.ini
# vi /etc/nova/api-paste.ini [filter:authtoken] paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory auth_host = 192.16.0.254 auth_port = 35357 auth_protocol = http admin_tenant_name = service admin_user = nova admin_password = password signing_dir = /tmp/keystone-signing-nova
# for serv in api cert scheduler consoleauth novncproxy conductor compute; do /etc/init.d/nova-$serv restart done
# nova-manage db sync # !for查看服务
# nova-manage service list 2> /dev/null Binary Host Zone Status State Updated_At nova-cert localhost internal enabled :) 2013-03-11 02:56:21 nova-scheduler localhost internal enabled :) 2013-03-11 02:56:22 nova-consoleauth localhost internal enabled :) 2013-03-11 02:56:22 nova-conductor localhost internal enabled :) 2013-03-11 02:56:22 nova-compute localhost nova enabled :) 2013-03-11 02:56:23组策略
# nova secgroup-add-rule default tcp 22 22 0.0.0.0/0 # nova secgroup-add-rule default icmp -1 -1 0.0.0.0/0Troubleshooting Nova
# apt-get install -y memcached libapache2-mod-wsgi openstack-dashboard配置 Dashboard,修改 Memcache 的监听地址:
# mv /etc/openstack-dashboard/ubuntu_theme.py /etc/openstack-dashboard/ubuntu_theme.py.bak# 下面是修改监听端口,可以不处理
# vim /etc/openstack-dashboard/local_settings.py
DEBUG = True CACHE_BACKEND = 'memcached://192.16.0.254:11211/' OPENSTACK_HOST = "192.16.0.254" # sed -i 's/127.0.0.1/192.16.0.254/g' /etc/memcached.conf启动 Memcached 和 Aapache:
# /etc/init.d/memcached restart # /etc/init.d/apache2 restart浏览器访问:
# EXTERNAL_NET_ID=$(quantum net-create external_net1 --router:external=True | awk '/ id / {print $4}')创建一个 Subnet
# SUBNET_ID=$(quantum subnet-create external_net1 192.168.137.0/24 --name=external_subnet1 --gateway_ip 192.168.137.2 --enable_dhcp=False | awk '/ id / {print $4}')创建一个 Internal 网络
# DEMO_ID=$(keystone tenant-list | awk '/ demo / {print $2}')demo 租户:我给你们部门规划创建了一套网络
# INTERNAL_NET_ID=$(quantum net-create demo_net1 --tenant_id $DEMO_ID | awk '/ id / {print $4}')为 demo 租户创建 Subnet
# DEMO_SUBNET_ID=$(quantum subnet-create demo_net1 10.1.1.0/24 --name=demo_subnet1 --gateway_ip 10.1.1.1 --tenant_id $DEMO_ID| awk '/ id / {print $4}')为 demo 租户创建一个 Router
# DEMO_ROUTER_ID=$(quantum router-create --tenant_id $DEMO_ID demo_router1 | awk '/ id / {print $4}')添加 Router 到 Subnet上
# quantum router-interface-add $DEMO_ROUTER_ID $DEMO_SUBNET_ID给Router添加 External IP
# quantum router-gateway-set $DEMO_ROUTER_ID $EXTERNAL_NET_ID给demo租户创建一个虚拟机
# quantum net-list +--------------------------------------+---------------+-------------------------------------------------------+ | id | name | subnets | +--------------------------------------+---------------+-------------------------------------------------------+ | a6a8482f-d189-4ced-8b27-cf59331f6ce7 | external_net1 | 5c6a675e-98e5-435d-8bbc-d262accd2286 192.168.137.0/24 | | afced220-0e54-46f5-925b-095bc90e6010 | demo_net1 | 7a748eba-f553-4323-ae92-3e453292c38d 10.1.1.0/24 | +--------------------------------------+---------------+-------------------------------------------------------+ # DEMO_PORT_ID=$(quantum port-create --tenant-id=$DEMO_ID --fixed-ip subnet_id=$DEMO_SUBNET_ID,ip_address=10.1.1.11 demo_net1 | awk '/ id / {print $4}')
# glance image-list +--------------------------------------+--------+-------------+------------------+---------+--------+ | ID | Name | Disk Format | Container Format | Size | Status | +--------------------------------------+--------+-------------+------------------+---------+--------+ | 11f91e16-cbed-4f60-bfb4-b9ae96651547 | cirros | qcow2 | ovf | 9761280 | active | +--------------------------------------+--------+-------------+------------------+---------+--------+ # nova --os-tenant-name demo boot --image cirros --flavor 2 --nic port-id=$DEMO_PORT_ID instance01
# nova --os_tenant_name=demo list +--------------------------------------+------------+--------+---------------------+ | ID | Name | Status | Networks | +--------------------------------------+------------+--------+---------------------+ | ea3c298c-9c30-4bf9-b6ca-37a719e01ff6 | instance01 | ACTIVE | demo_net1=10.1.1.11 | +--------------------------------------+------------+--------+---------------------+
#vi /etc/libvirt/qemu.conf修改如下:
user = "root" group = "root" dynamic_ownership = 1然后重启libvirt-bin服务和nova服务
# /etc/init.d/libvirt-bin restart # for serv in api cert scheduler consoleauth novncproxy conductor compute; do /etc/init.d/nova-$serv restart done然后再新建实例
# quantum port-list -- --device_id ea3c298c-9c30-4bf9-b6ca-37a719e01ff6 +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+ | id | name | mac_address | fixed_ips | +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+ | 701aafff-4e9e-48fe-891b-e576da51a2a0 | | fa:16:3e:16:2e:13 | {"subnet_id": "7a748eba-f553-4323-ae92-3e453292c38d", "ip_address": "10.1.1.11"} | +--------------------------------------+------+-------------------+----------------------------------------------------------------------------------+创建一个 Float ip
# quantum --os_tenant_name=demo floatingip-create external_net1 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | | | floating_ip_address | 192.168.137.4 | | floating_network_id | a6a8482f-d189-4ced-8b27-cf59331f6ce7 | | id | 3e728bc3-88b9-40b2-a743-e83e84d9aa69 | | port_id | | | router_id | | | tenant_id | 13b1f513484d40afaec3fd0b382cac09 | +---------------------+--------------------------------------+
# quantum --os_tenant_name=demo floatingip-associate 3e728bc3-88b9-40b2-a743-e83e84d9aa69 701aafff-4e9e-48fe-891b-e576da51a2a0 Associated floatingip 3e728bc3-88b9-40b2-a743-e83e84d9aa69查看刚才关联的浮动 IP
# quantum floatingip-show 3e728bc3-88b9-40b2-a743-e83e84d9aa69 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | fixed_ip_address | 10.1.1.11 | | floating_ip_address | 192.168.137.4 | | floating_network_id | a6a8482f-d189-4ced-8b27-cf59331f6ce7 | | id | 3e728bc3-88b9-40b2-a743-e83e84d9aa69 | | port_id | 701aafff-4e9e-48fe-891b-e576da51a2a0 | | router_id | b9f116f5-8363-4fdf-b0d4-0ee239275395 | | tenant_id | 13b1f513484d40afaec3fd0b382cac09 | +---------------------+--------------------------------------+
# ping 192.168.137.4 PING 192.168.137.4 (192.168.137.4) 56(84) bytes of data. 64 bytes from 192.168.137.4: icmp_req=1 ttl=63 time=25.0 ms 64 bytes from 192.168.137.4: icmp_req=2 ttl=63 time=0.963 ms 64 bytes from 192.168.137.4: icmp_req=3 ttl=63 time=0.749 ms 64 bytes from 192.168.137.4: icmp_req=4 ttl=63 time=0.628 ms 64 bytes from 192.168.137.4: icmp_req=5 ttl=63 time=0.596 ms
参考文档
http://longgeek.com/2013/03/11/openstack-grizzly-g3-for-ubuntu-12-04-all-in-one-installation/
https://github.com/mseknibilel/OpenStack-Grizzly-Install-Guide/blob/master/OpenStack_Grizzly_Install_Guide.rst/