系统使用ubuntu14.04server

安装之前所有服务器openstack更新源设定:

apt-get install python-software-properties

apt-get install software-properties-common

add-apt-repository cloud-archive:juno

apt-get update && apt-get dist-upgrade


安装时间同步服务 

apt-get install -y ntp

vim /etc/ntp.conf

其他server都注释掉

server 10.0.0.11


重启ntp服务

service ntp restart


IP约定

controller

192.168.2.11

10.0.0.11


network

192.168.2.22

10.0.0.22

10.0.1.22


compute

192.168.2.33(安装配置好后可以断开外网)

10.0.0.33

10.0.1.33


具体安装配置过程


网络配置

controller服务器

vim /etc/hostname 并写入

controller  

vim /etc/hosts  并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute


vim /etc/network/interfaces

auto lo

iface lo inet loopback


# The primary network interface

auto eth0

iface eth0 inet static

        address 192.168.2.11

        netmask 255.255.0.0

        network 192.168.0.0

        broadcast 192.168.255.255

        gateway 192.168.1.1

        # dns-* options are implemented by the resolvconf package, if installed

        dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

        address 10.0.0.11

        netmask 255.255.255.0

        gateway 10.0.0.1


network服务器

vim /etc/hostname 并写入

network 

vim /etc/hosts  并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute


root@network:~# vim /etc/network/interfaces

# This file describes the network interfaces available on your system

# and how to activate them. For more information, see interfaces(5).


# The loopback network interface

auto lo

iface lo inet loopback


# The primary network interface

auto eth0

iface eth0 inet static

        address 192.168.2.22

        netmask 255.255.0.0

        network 192.168.0.0

        broadcast 192.168.255.255

        gateway 192.168.1.1

        # dns-* options are implemented by the resolvconf package, if installed

        dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

        address 10.0.0.22

        netmask 255.255.255.0

        gateway 10.0.0.1

auto eth2

iface eth2 inet static

        address 10.0.1.22

        netmask 255.255.255.0


compute服务器

vim /etc/hostname 并写入

network 

vim /etc/hosts  并写入

10.0.0.11controller

10.0.0.22network

10.0.0.33compute


root@network:~# vim /etc/network/interfaces

auto lo

iface lo inet loopback

auto eth0

iface eth0 inet static

        address 192.168.2.33

        netmask 255.255.0.0

        network 192.168.0.0

        broadcast 192.168.255.255

        gateway 192.168.1.1

        # dns-* options are implemented by the resolvconf package, if installed

        dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

        address 10.0.0.33

        netmask 255.255.255.0

        gateway 10.0.0.1

auto eth2

iface eth2 inet static

        address 10.0.1.33

        netmask 255.255.255.0


注:以上服务器的DNS为:192.168.1.1


controller服务器安装openstack组件


安装数据库mysql

apt-get install -y mysql-server-5.6 python-mysqldb


修改mysql配置文件文件 

vi /etc/mysql/my.cnf   

[mysqld]  

default-storage-engine = innodb

innodb_file_per_table

collation-server = utf8_general_ci

init-connect = 'SET NAMES utf8'

character-set-server = utf8

#bind-address = 127.0.0.1

bind-address = 0.0.0.0

重启数据库   

service mysql restart

  

删除数据库匿名用户

在终端下执行

mysql_install_db

mysql_secure_installation

  

安装 RabbitMQ (Message Queue)服务: 

apt-get install -y rabbitmq-server 


安装keystone

apt-get install -y keystone

创建keystone数据库,都是通过 mysql –u root –p 进入

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';

GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';

exit;

删除sqllite数据库

rm /var/lib/keystone/keystone.db

配置keystone

编辑 /etc/keystone/keystone.conf

[DEFAULT]

admin_token=ADMIN

log_dir=/var/log/keystone

[database]

#connection=sqlite:////var/lib/keystone/keystone.db

connection = mysql://keystone:[email protected]/keystone

重启keystone

service keystone restart

同步keystone数据库

keystone-manage db_sync

设置环境变量

export OS_SERVICE_TOKEN=ADMIN

export OS_SERVICE_ENDPOINT=http://10.0.0.11:35357/v2.0

创建管理员权力的用户

root@controller:~# keystone user-create --name=admin --pass=admin_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|  email   |         [email protected]         |

| enabled  |               True               |

|    id    | 61991b4c9abe46968b08c6d3268e8b25 |

|   name   |              admin               |

| username |              admin               |

+----------+----------------------------------+

root@controller:~# keystone role-create --name=admin

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|    id    | 14d9aa53cfd7404ea5ecdc8c6ff96bb3 |

|   name   |              admin               |

+----------+----------------------------------+

root@controller:~# keystone role-create --name=_member_

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|    id    | 69d86b6c21d54fc3848b30d8a7afa6d6 |

|   name   |             _member_             |

+----------+----------------------------------+

root@controller:~# keystone tenant-create --name=admin --description="Admin Tenant"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |           Admin Tenant           |

|   enabled   |               True               |

|      id     | 9474847b08264433b623233c85b7b6de |

|     name    |              admin               |

+-------------+----------------------------------+

root@controller:~# keystone user-role-add --user=admin --tenant=admin --role=admin

root@controller:~# keystone user-role-add --user=admin --role=_member_ --tenant=admin

创建普通用户

root@controller:~# keystone user-create --name=demo --pass=demo_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|  email   |         [email protected]          |

| enabled  |               True               |

|    id    | f40209d709564e5fbe04dc4659f4ee72 |

|   name   |               demo               |

| username |               demo               |

+----------+----------------------------------+

root@controller:~# keystone tenant-create --name=demo --description="Demo Tenant"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |           Demo Tenant            |

|   enabled   |               True               |

|      id     | 5e3aa75b5bce4723a755e356ef22ad26 |

|     name    |               demo               |

+-------------+----------------------------------+

root@controller:~# keystone user-role-add --user=demo --role=_member_ --tenant=demo

创建 service 租户

root@controller:~# keystone tenant-create --name=service --description="Service Tenant"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |          Service Tenant          |

|   enabled   |               True               |

|      id     | 4fd53777c8f84c72b09ef025ab45977d |

|     name    |             service              |

+-------------+----------------------------------+

定义服务的API的endpoint

root@controller:~# keystone service-create --name=keystone --type=identity --description="OpenStack Identity"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |        OpenStack Identity        |

|   enabled   |               True               |

|      id     | 6b6023376cc040e8be26a57815f17b87 |

|     name    |             keystone             |

|     type    |             identity             |

+-------------+----------------------------------+

创建endpoint

root@controller:~# keystone endpoint-create \

> --service-id=$(keystone service-list | awk '/ identity / {print $2}') \

> --publicurl=http://192.168.2.11:5000/v2.0 \

> --internalurl=http://10.0.0.11:5000/v2.0 \

> --adminurl=http://10.0.0.11:35357/v2.0

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

|   adminurl  |   http://10.0.0.11:35357/v2.0    |

|      id     | 0dcae7b8deb9437996c7c7e0ed0b4086 |

| internalurl |    http://10.0.0.11:5000/v2.0    |

|  publicurl  |  http://192.168.2.11:5000/v2.0   |

|    region   |            regionOne             |

|  service_id | 6b6023376cc040e8be26a57815f17b87 |

+-------------+----------------------------------+

检测keystone

通过下面命令检查keystone的初始化是否正常

设置环境变量,创建creds 和 admin_creds 两个文件

cat <>/root/creds

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_AUTH_URL="http://192.168.2.11:5000/v2.0/"

EOF

cat <>/root/admin_creds

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_TENANT_NAME=admin

export OS_AUTH_URL=http://10.0.0.11:35357/v2.0

EOF


设置环境变量才能进行下面操作


清除OS_SERVICE_TOKEN 和OS_SERVICE_ENDPOINT环境变量里的值,不清除的话,会出现警告 

unset OS_SERVICE_TOKEN OS_SERVICE_ENDPOINT

加载环境变量

source creds

这样就可以

root@controller:~# keystone user-list

+----------------------------------+-------+---------+------------------+

|                id                |  name | enabled |      email       |

+----------------------------------+-------+---------+------------------+

| 61991b4c9abe46968b08c6d3268e8b25 | admin |   True  | [email protected] |

| f40209d709564e5fbe04dc4659f4ee72 |  demo |   True  | [email protected]  |

+----------------------------------+-------+---------+------------------+

root@controller:~# keystone role-list

+----------------------------------+----------+

|                id                |   name   |

+----------------------------------+----------+

| 69d86b6c21d54fc3848b30d8a7afa6d6 | _member_ |

| 14d9aa53cfd7404ea5ecdc8c6ff96bb3 |  admin   |

+----------------------------------+----------+

root@controller:~# keystone tenant-list

+----------------------------------+---------+---------+

|                id                |   name  | enabled |

+----------------------------------+---------+---------+

| 9474847b08264433b623233c85b7b6de |  admin  |   True  |

| 5e3aa75b5bce4723a755e356ef22ad26 |   demo  |   True  |

| 4fd53777c8f84c72b09ef025ab45977d | service |   True  |

+----------------------------------+---------+---------+


Glance安装配置

apt-get install -y glance python-glanceclient

创建数据库 mysql –u root –p

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';

exit;

keystone创建glance用户和服务

root@controller:~# keystone user-create --name=glance --pass=service_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|  email   |        [email protected]         |

| enabled  |               True               |

|    id    | 9fa6993da7944a59b342a73a6f18728a |

|   name   |              glance              |

| username |              glance              |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=glance --tenant=service --role=admin


设置endpoint

root@controller:~# keystone service-create --name=glance --type=p_w_picpath --description="OpenStack Image Service"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |     OpenStack Image Service      |

|   enabled   |               True               |

|      id     | d3d6fb3384db4ce9ad3423817b52bac9 |

|     name    |              glance              |

|     type    |              p_w_picpath               |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create \

> --service-id=$(keystone service-list | awk '/ p_w_picpath / {print $2}') \

> --publicurl=http://192.168.2.11:9292 \

> --internalurl=http://10.0.0.11:9292 \

> --adminurl=http://10.0.0.11:9292

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

|   adminurl  |      http://10.0.0.11:9292       |

|      id     | 0859727be85d473391c935c3f52ddddf |

| internalurl |      http://10.0.0.11:9292       |

|  publicurl  |     http://192.168.2.11:9292     |

|    region   |            regionOne             |

|  service_id | d3d6fb3384db4ce9ad3423817b52bac9 |

+-------------+----------------------------------+

编辑glance配置文件

vim /etc/glance/glance-api.conf

[database]

connection = mysql://glance:[email protected]/glance


[DEFAULT]

rpc_backend = rabbit

rabbit_host = 10.0.0.11


[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass


[paste_deploy]

flavor = keystone



vim /etc/glance/glance-registry.conf

[database]

# The file name to use with SQLite (string value)

#sqlite_db = /var/lib/glance/glance.sqlite

connection = mysql://glance:[email protected]/glance



[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = glance

admin_password = service_pass


[paste_deploy]

flavor = keystone


重启服务

service glance-api restart; service glance-registry restart

初始化glance数据库

glance-manage db_sync

加载环境变量

source creds

上传测试镜像

root@controller:~# glance p_w_picpath-create --name "cirros-0.3.2-x86_64" --is-public true \

> --container-format bare --disk-format qcow2 \

> --location http://cdn.download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img

+------------------+--------------------------------------+

| Property         | Value                                |

+------------------+--------------------------------------+

| checksum         | None                                 |

| container_format | bare                                 |

| created_at       | 2015-03-20T08:02:56                  |

| deleted          | False                                |

| deleted_at       | None                                 |

| disk_format      | qcow2                                |

| id               | 5dbfecab-9828-4492-88bb-c0dd6aa6d75c |

| is_public        | True                                 |

| min_disk         | 0                                    |

| min_ram          | 0                                    |

| name             | cirros-0.3.2-x86_64                  |

| owner            | 9474847b08264433b623233c85b7b6de     |

| protected        | False                                |

| size             | 13200896                             |

| status           | active                               |

| updated_at       | 2015-03-20T08:02:57                  |

| virtual_size     | None                                 |

+------------------+--------------------------------------+

查看镜像

root@controller:~# glance p_w_picpath-list

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

| ID                                   | Name                | Disk Format | Container Format | Size     | Status |

+--------------------------------------+---------------------+-------------+------------------+----------+--------+

| 5dbfecab-9828-4492-88bb-c0dd6aa6d75c | cirros-0.3.2-x86_64 | qcow2       | bare             | 13200896 | active |

+--------------------------------------+---------------------+-------------+------------------+----------+--------+


Nova组件安装配置

apt-get install -y nova-api nova-cert nova-conductor nova-consoleauth \

nova-novncproxy nova-scheduler python-novaclient

创建nova 数据库

mysql -u root -p

CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';

GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';

exit;

keystone创建nova用户和角色

root@controller:~# keystone user-create --name=nova --pass=service_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|  email   |         [email protected]          |

| enabled  |               True               |

|    id    | cc25a28979b0467cac7a33426b8180f7 |

|   name   |               nova               |

| username |               nova               |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=nova --tenant=service --role=admin


注册服务和设置endpoint

root@controller:~# keystone service-create --name=nova --type=compute --description="OpenStack Compute"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |        OpenStack Compute         |

|   enabled   |               True               |

|      id     | 7bb1f0e64e3b4ef8b0408902261b2b37 |

|     name    |               nova               |

|     type    |             compute              |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create \

> --service-id=$(keystone service-list | awk '/ compute / {print $2}') \

> --publicurl=http://192.168.2.11:8774/v2/%\(tenant_id\)s \

> --internalurl=http://10.0.0.11:8774/v2/%\(tenant_id\)s \

> --adminurl=http://10.0.0.11:8774/v2/%\(tenant_id\)s

+-------------+-------------------------------------------+

|   Property  |                   Value                   |

+-------------+-------------------------------------------+

|   adminurl  |   http://10.0.0.11:8774/v2/%(tenant_id)s  |

|      id     |      24fc3bf020084040ba6a58d60c0b1719     |

| internalurl |   http://10.0.0.11:8774/v2/%(tenant_id)s  |

|  publicurl  | http://192.168.2.11:8774/v2/%(tenant_id)s |

|    region   |                 regionOne                 |

|  service_id |      7bb1f0e64e3b4ef8b0408902261b2b37     |

+-------------+-------------------------------------------+

配置nova文件

vim /etc/nova/nova.conf

以下是我的nova完整配置文件

[DEFAULT]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=True

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=True

connection_type=libvirt

root_helper=nova-rootwrap /etc/nova/rootwrap.conf

verbose=True

ec2_private_dns_show_ip=True

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata


rpc_backend = rabbit

rabbit_host = 10.0.0.11

my_ip = 10.0.0.11

vncserver_listen = 10.0.0.11

vncserver_proxyclient_address = 10.0.0.11

auth_strategy = keystone


[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = nova

admin_password = service_pass


[database]

connection = mysql://nova:[email protected]/nova

删除sqlite数据库

rm /var/lib/nova/nova.sqlite

初始化nova数据库

nova-manage db sync

重启nova相关服务

service nova-api restart

service nova-cert restart

service nova-conductor restart

service nova-consoleauth restart

service nova-novncproxy restart

service nova-scheduler restart

检查nova服务状态

root@controller:~# nova-manage service list

Binary           Host                                 Zone             Status     State Updated_At

nova-cert        controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-consoleauth controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-conductor   controller                           internal         enabled    :-)   2015-03-20 08:24:17

nova-scheduler   controller                           internal         enabled    :-)   2015-03-20 08:24:17


看到笑脸说明服务都启动了


Neutron组件安装配置

apt-get install -y neutron-server neutron-plugin-ml2

创建Neutron数据库

mysql -u root -p

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO neutron@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';

GRANT ALL PRIVILEGES ON neutron.* TO neutron@'%' IDENTIFIED BY 'NEUTRON_DBPASS';

exit;

keystone创建neutron用户和角色

root@controller:~# keystone user-create --name=neutron --pass=service_pass [email protected]

+----------+----------------------------------+

| Property |              Value               |

+----------+----------------------------------+

|  email   |        [email protected]        |

| enabled  |               True               |

|    id    | 322f0a1d2c7e416abf0e118e50625443 |

|   name   |             neutron              |

| username |             neutron              |

+----------+----------------------------------+

root@controller:~# keystone user-role-add --user=neutron --tenant=service --role=admin

注册服务和endpoint

root@controller:~# keystone service-create --name=neutron --type=network --description="OpenStack Networking"

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

| description |       OpenStack Networking       |

|   enabled   |               True               |

|      id     | e3d179a7b9be42ba982c79cd652a7be8 |

|     name    |             neutron              |

|     type    |             network              |

+-------------+----------------------------------+

root@controller:~# keystone endpoint-create \

> --service-id=$(keystone service-list | awk '/ network / {print $2}') \

> --publicurl=http://192.168.2.11:9696 \

> --internalurl=http://10.0.0.11:9696 \

> --adminurl=http://10.0.0.11:9696

+-------------+----------------------------------+

|   Property  |              Value               |

+-------------+----------------------------------+

|   adminurl  |      http://10.0.0.11:9696       |

|      id     | 8b968c25d8324bb28125604a21c64f54 |

| internalurl |      http://10.0.0.11:9696       |

|  publicurl  |     http://192.168.2.11:9696     |

|    region   |            regionOne             |

|  service_id | e3d179a7b9be42ba982c79cd652a7be8 |

+-------------+----------------------------------+


获取nova_admin_tenant_id

root@controller:~# keystone tenant-list | awk '/ service / { print $2 }'

4fd53777c8f84c72b09ef025ab45977d


编辑neutron配置文件

vim /etc/neutron/neutron.conf


[DEFAULT]

# Example: service_plugins = router,firewall,lbaas,***aas,metering

service_plugins = router,lbaas


# auth_strategy = keystone

auth_strategy = keystone


# allow_overlapping_ips = False

allow_overlapping_ips = True


rpc_backend = neutron.openstack.common.rpc.impl_kombu


rabbit_host = 10.0.0.11


notification_driver = neutron.openstack.common.notifier.rpc_notifier


# ======== neutron nova interactions ==========

# Send notification to nova when port status is active.

notify_nova_on_port_status_changes = True


# Send notifications to nova when port data (fixed_ips/floatingips) change

# so nova can update it's cache.

notify_nova_on_port_data_changes = True


# URL for connection to nova (Only supports one nova region currently).

nova_url = http://10.0.0.11:8774/v2


# Name of nova region to use. Useful if keystone manages more than one region

# nova_region_name =


# Username for connection to nova in admin context

nova_admin_username = nova


# The uuid of the admin nova tenant

nova_admin_tenant_id = 4fd53777c8f84c72b09ef025ab45977d


# Password for connection to nova in admin context.

nova_admin_password = service_pass


# Authorization URL for connection to nova in admin context.

nova_admin_auth_url = http://10.0.0.11:35357/v2.0


[keystone_authtoken]

#auth_host = 127.0.0.1

#auth_port = 35357

#auth_protocol = http

#admin_tenant_name = %SERVICE_TENANT_NAME%

#admin_user = %SERVICE_USER%

#admin_password = %SERVICE_PASSWORD%

#signing_dir = $state_path/keystone-signing

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass


[database]

#connection = sqlite:////var/lib/neutron/neutron.sqlite

connection = mysql://neutron:[email protected]/neutron



配置2层网络组件
vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch


[ml2_type_gre]

tunnel_id_ranges = 1:1000


[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

enable_security_group = True


配置nova支持neutron

vim /etc/nova/nova.conf

在[DEFAULT] 添加

network_api_class=nova.network.neutronv2.api.API

neutron_url=http://10.0.0.11:9696

neutron_auth_strategy=keystone

neutron_admin_tenant_name=service

neutron_admin_username=neutron

neutron_admin_password=service_pass

neutron_admin_auth_url=http://10.0.0.11:35357/v2.0

libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver

linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver=nova.virt.firewall.NoopFirewallDriver

security_group_api=neutron

重启nova服务

service nova-api restart

service nova-scheduler restart

service nova-conductor restart

重启neutron服务

service neutron-server restart

安装openstack管理界面UI---Horizon

apt-get install -y apache2 memcached libapache2-mod-wsgi openstack-dashboard

编辑 /etc/openstack-dashboard/local_settings.py


#ALLOWED_HOSTS = ['horizon.example.com', ]

ALLOWED_HOSTS = ['localhost','192.168.2.11']


#OPENSTACK_HOST = "127.0.0.1"

OPENSTACK_HOST = "10.0.0.11"



vi /etc/apache2/apache2.conf 

在文件最后一行添上下面这行: ServerName localhost



重启apache服务


service apache2 restart; service memcached restart


控制端到这里安装完成


网络节点network

安装基础组件

apt-get install -y vlan bridge-utils

编辑 

vim /etc/sysctl.conf

在文件最后添加:

net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

生效

sysctl -p

安装Neutron组件

apt-get install -y neutron-plugin-ml2 neutron-plugin-openvswitch-agent haproxy neutron-lbaas-agent \

dnsmasq neutron-l3-agent neutron-dhcp-agent

编辑Neutron配置文件

vim /etc/neutron/neutron.conf

# Example: service_plugins = router,firewall,lbaas,***aas,metering

service_plugins = router,lbaas


# The strategy to be used for auth.

# Supported values are 'keystone'(default), 'noauth'.

auth_strategy = keystone


allow_overlapping_ips = True


rpc_backend = neutron.openstack.common.rpc.impl_kombu


rabbit_host = 10.0.0.11


[keystone_authtoken]

#auth_host = 127.0.0.1

#auth_port = 35357

#auth_protocol = http

#admin_tenant_name = %SERVICE_TENANT_NAME%

#admin_user = %SERVICE_USER%

#admin_password = %SERVICE_PASSWORD%

#signing_dir = $state_path/keystone-signing

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass


编辑 /etc/neutron/l3_agent.ini

interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver

use_namespaces = True


编辑 /etc/neutron/dhcp_agent.ini

interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver

dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq

use_namespaces = True


编辑 /etc/neutron/metadata_agent.ini

auth_url = http://10.0.0.11:5000/v2.0

auth_region = regionOne


admin_tenant_name = service

admin_user = neutron

admin_password = service_pass

nova_metadata_ip = 10.0.0.11

metadata_proxy_shared_secret = helloOpenStack


登录控制节点,修改 /etc/nova.conf 在[DEFAULT] 加入下面内容

service_neutron_metadata_proxy = true

metadata_proxy_shared_secret = helloOpenStack


重启nova api服务

service nova-api restart


编辑 /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch


[ml2_type_gre]

tunnel_id_ranges = 1:1000


[ovs]

local_ip = 10.0.1.22

tunnel_type = gre

enable_tunneling = True


[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

enable_security_group = True


编辑/etc/neutron/lbaas_agent.ini

[DEFAULT]

device_driver = neutron.services.loadbalancer.drivers.haproxy.namespace_driver.HaproxyNSDriver

interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver

[haproxy]

user_group = nogroup


重启openvswitch

service openvswitch-switch restart


创建br-ex

ovs-vsctl add-br br-ex

ovs-vsctl add-port br-ex eth0


编辑 /etc/network/interfaces

# This file describes the network interfaces available on your system

# and how to activate them. For more information, see interfaces(5).


# The loopback network interface

auto lo

iface lo inet loopback


# The primary network interface

#auto eth0

#iface eth0 inet static

#       address 192.168.2.22

#       netmask 255.255.0.0

#       network 192.168.0.0

#       broadcast 192.168.255.255

#       gateway 192.168.1.1

        # dns-* options are implemented by the resolvconf package, if installed

#       dns-nameservers 192.168.1.1

auto eth0

iface eth0 inet manual

        up ifconfig $IFACE 0.0.0.0 up

        up ip link set $IFACE promisc on

        down ip link set $IFACE promisc off

        down ifconfig $IFACE down

auto br-ex

iface br-ex inet static

        address 192.168.2.22

        netmask 255.255.0.0

        gateway 192.168.1.1

        dns-nameservers 192.168.1.1

auto eth1

iface eth1 inet static

        address 10.0.0.22

        netmask 255.255.255.0

        gateway 10.0.0.1

auto eth2

iface eth2 inet static

        address 10.0.1.22

        netmask 255.255.255.0


设置环境变量


cat <>/root/creds

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=admin_pass

export OS_AUTH_URL="http://192.168.2.11:5000/v2.0/"

EOF


source creds


root@controller:~# neutron agent-list

Unable to establish connection to http://192.168.2.11:9696/v2.0/agents.json

原因:

不能同步数据库

,同步即解决

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade 

juno" neutron


root@controller:~# neutron agent-list

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

| id                                   | agent_type         | host    | alive | admin_state_up | binary                    |

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

| 08bedacf-5eb4-445e-ba91-ea0d481a5772 | DHCP agent         | network | :-)   | True           | neutron-dhcp-agent        |

| 263fa30f-0af9-4534-9153-ea01ffa71874 | Loadbalancer agent | network | :-)   | True           | neutron-lbaas-agent       |

| 32a17ac6-50c6-4cfa-8032-8c6f67984251 | L3 agent           | network | :-)   | True           | neutron-l3-agent          |

| 3e0d5e0c-41c1-4fe0-9642-05862c0d65ed | Open vSwitch agent | network | :-)   | True           | neutron-openvswitch-agent |

| c02625d3-d3df-4bd8-bdfa-a75fff5f2f66 | Metadata agent     | network | :-)   | True           | neutron-metadata-agent    |

+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+

network服务器配置完成




计算节点

安装kvm套件

apt-get install -y kvm libvirt-bin pm-utils



安装计算节点组件

apt-get install -y nova-compute-kvm python-guestfs


让内核只读

dpkg-statoverride  --update --add root root 0644 /boot/vmlinuz-$(uname -r)


创建脚本 /etc/kernel/postinst.d/statoverride

#!/bin/sh

version="$1"

# passing the kernel version is required

[ -z "${version}" ] && exit 0

dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-${version}

允许运行

chmod +x /etc/kernel/postinst.d/statoverride


编辑 /etc/nova/nova.conf 文件,添加下面内容


[DEFAULT]

dhcpbridge_flagfile=/etc/nova/nova.conf

dhcpbridge=/usr/bin/nova-dhcpbridge

logdir=/var/log/nova

state_path=/var/lib/nova

lock_path=/var/lock/nova

force_dhcp_release=True

iscsi_helper=tgtadm

libvirt_use_virtio_for_bridges=True

connection_type=libvirt

root_helper=nova-rootwrap /etc/nova/rootwrap.conf

verbose=True

ec2_private_dns_show_ip=True

api_paste_config=/etc/nova/api-paste.ini

volumes_path=/var/lib/nova/volumes

enabled_apis=ec2,osapi_compute,metadata


auth_strategy = keystone

rpc_backend = rabbit

rabbit_host = 10.0.0.11

my_ip = 10.0.0.33

vnc_enabled = True

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address = 10.0.0.33

novncproxy_base_url = http://192.168.2.11:6080/vnc_auto.html

glance_host = 10.0.0.11

vif_plugging_is_fatal=false

vif_plugging_timeout=0



[database]

connection = mysql://nova:[email protected]/nova


[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = nova

admin_password = service_pass


删除sqlite

rm /var/lib/nova/nova.sqlite


重启compute服务

service nova-compute restart


编辑 /etc/sysctl.conf


net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0


马上生效

sysctl -p


安装网络组件


apt-get install -y neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent

编辑 /etc/neutron/neutron.conf


#core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin

core_plugin = ml2


# service_plugins =

# Example: service_plugins = router,firewall,lbaas,***aas,metering

service_plugins = router


auth_strategy = keystone


allow_overlapping_ips = True


rpc_backend = neutron.openstack.common.rpc.impl_kombu


rabbit_host = 10.0.0.11


[keystone_authtoken]

auth_uri = http://10.0.0.11:5000

auth_host = 10.0.0.11

auth_port = 35357

auth_protocol = http

admin_tenant_name = service

admin_user = neutron

admin_password = service_pass


编辑  /etc/neutron/plugins/ml2/ml2_conf.ini


[ml2]

type_drivers = gre

tenant_network_types = gre

mechanism_drivers = openvswitch


[ml2_type_gre]

tunnel_id_ranges = 1:1000


[ovs]

local_ip = 10.0.1.33

tunnel_type = gre

enable_tunneling = True


[securitygroup]

firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

enable_security_group = True


重启OVS


service openvswitch-switch restart

再编辑 /etc/nova/nova.conf ,在[DEFAULT]里添加下面

network_api_class = nova.network.neutronv2.api.API

neutron_url = http://10.0.0.11:9696

neutron_auth_strategy = keystone

neutron_admin_tenant_name = service

neutron_admin_username = neutron

neutron_admin_password = service_pass

neutron_admin_auth_url = http://10.0.0.11:35357/v2.0

linuxnet_interface_driver = nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver = nova.virt.firewall.NoopFirewallDriver

security_group_api = neutron


编辑 /etc/nova/nova-compute.conf ,修改为使用qemu

[DEFAULT]

compute_driver=libvirt.LibvirtDriver

[libvirt]

virt_type=qemu


重启相关服务

service nova-compute restart

service neutron-plugin-openvswitch-agent restart


控制端检验

root@controller:~# nova-manage service list

Binary           Host                                 Zone             Status     State Updated_At

nova-cert        controller                           internal         enabled    :-)   2015-03-20 10:29:32

nova-consoleauth controller                           internal         enabled    :-)   2015-03-20 10:29:31

nova-conductor   controller                           internal         enabled    :-)   2015-03-20 10:29:36

nova-scheduler   controller                           internal         enabled    :-)   2015-03-20 10:29:35

nova-compute     compute                              nova             enabled    :-)   2015-03-20 10:29:31



到这里openstack3个节点都安装完成

现在可以登录使用


http://192.168.2.11/horizon/

admin

admin_pass