openstack kilo安装记录

注:安装前尽快确定好每个节点的主机名和IP地址,若安装过程中修改了主机名可能会带来不必要的麻烦,比如需要修改数据库等

 

**********基础配置**********

CONTROLLER 节点

yum install ntp

vim /etc/ntp.conf

server NTP_SERVERiburst

restrict -4 default kod notrap nomodify

restrict -6 default kod notrap nomodify

systemctl enable ntpd.service

systemctl start ntpd.service

 

安装Linux 附加软件包源:(注:官网给的epel源地址已经失效了,重新找的url链接安装)

wgethttp://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

rpm -ivh epel-release-latest-7.noarch.rpm

yum repolist

yum installhttp://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm

yum upgrade

yum install openstack-selinux

 

安装sql database

yum install mariadb mariadb-serverMySQL-python

创建/etc/my.cnf.d/mariadb_openstack.cnf

[mysqld]

bind-address = 10.0.0.11

default-storage-engine = innodb

innodb_file_per_table

collation-server = utf8_general_ci

init-connect = 'SET NAMES utf8'

character-set-server = utf8

退出

 

systemctl enable mariadb.service

systemctl start mariadb.service

执行mysql_secure_installation脚本

输入sql的密码:Cloudiv@1234

数据库安装结束

 

 

安装消息队列:

yum install rabbitmq-server

systemctl enable rabbitmq-server.service

systemctl start rabbitmq-server.service

 

rabbitmqctl add_user openstack cloudiv

rabbitmqctl set_permissions openstack".*" ".*" ".*"

消息队列安装结束

 

NETWORK节点:

yum install ntp

vim /etc/ntp.conf

添加 server controller iburst

systemctl enable ntpd.service

systemctl start ntpd.service

 

安装Linux 附加软件包源:

wgethttp://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

rpm -ivh epel-release-latest-7.noarch.rpm

yum repolist

yum installhttp://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm

yum upgrade

yum install openstack-selinux

 

 

COMPUTE节点:

yum install ntp

vim /etc/ntp.conf

添加 server controller iburst

systemctl enable ntpd.service

systemctl start ntpd.service

 

安装Linux 附加软件包源:

wgethttp://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm

rpm -ivh epel-release-latest-7.noarch.rpm

yum repolist

yum installhttp://rdo.fedorapeople.org/openstack-kilo/rdo-release-kilo.rpm

yum upgrade

yum install openstack-selinux

 

 

 

 

 

**********安装KEYSTONE服务**********

Controller节点

mysql -u root -p   输入密码Cloudiv@1234

CREATE DATABASE keystone;

GRANT ALL PRIVILEGES ON keystone.* TO'keystone'@'localhost' IDENTIFIED BY 'cloudiv';

GRANT ALL PRIVILEGES ON keystone.* TO'keystone'@'%' IDENTIFIED BY 'cloudiv';

[root@controller ~]# openssl rand -hex 10

832947c475500a6abe14

 

[root@controller ~]#yum installopenstack-keystone httpd mod_wsgi python-openstackclient memcachedpython-memcached

[root@controller ~]#systemctl enablememcached.service

[root@controller ~]#systemctl startmemcached.service

 

[root@controller ~]#vim /etc/keystone/keystone.conf  修改文件

[DEFAULT]

admin_token = 832947c475500a6abe14

 

[database]

connection =mysql://keystone:cloudiv@controller/keystone

 

[memcache]

 

servers = localhost:11211

 

[token]

provider =keystone.token.providers.uuid.Provider

driver = keystone.token.persistence.backends.memcache.Token

 

[revoke]

driver =keystone.contrib.revoke.backends.sql.Revoke

 

su -s /bin/sh -c "keystone-managedb_sync" keystone

配置HTTP server

[root@controller ~]#vim/etc/httpd/conf/httpd.conf

ServerName controller

创建/etc/httpd/conf.d/wsgi-keystone.conf文件

Listen 5000

Listen 35357

 

   WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystonegroup=keystone display-name=%{GROUP}

   WSGIProcessGroup keystone-public

   WSGIScriptAlias / /var/www/cgi-bin/keystone/main

   WSGIApplicationGroup %{GLOBAL}

   WSGIPassAuthorization On

   LogLevel info

   ErrorLogFormat "%{cu}t %M"

   ErrorLog /var/log/httpd/keystone-error.log

   CustomLog /var/log/httpd/keystone-access.log combined

 

   WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystonegroup=keystone display-name=%{GROUP}

   WSGIProcessGroup keystone-admin

   WSGIScriptAlias / /var/www/cgi-bin/keystone/admin

   WSGIApplicationGroup %{GLOBAL}

   WSGIPassAuthorization On

   LogLevel info

   ErrorLogFormat "%{cu}t %M"

   ErrorLog /var/log/httpd/keystone-error.log

   CustomLog /var/log/httpd/keystone-access.log combined

 

[root@controller ~]#mkdir -p /var/www/cgi-bin/keystone

[root@controller ~]#

curlhttp://git.openstack.org/cgit/openstack/keystone/plain/httpd/keystone.py?h=stable/kilo| tee /var/www/cgi-bin/keystone/main /var/www/cgi-bin/keystone/admin

注:现在官网下载的文件可能有问题,应该是Python格式的文件,可参考如下文件

 

 

 

 

[root@controller ~]#chown -Rkeystone:keystone /var/www/cgi-bin/keystone

[root@controller ~]#chmod 755/var/www/cgi-bin/keystone/*

 

[root@controller ~]#systemctl enablehttpd.service

[root@controller ~]#systemctl start httpd.service

 

创建服务实体和接口API

[root@controller ~]#exportOS_TOKEN=832947c475500a6abe14

[root@controller ~]#exportOS_URL=http://controller:35357/v2.0

[root@controller ~]#openstack servicecreate --name keystone --description "OpenStack Identity" identity

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | OpenStack Identity               |

| enabled     | True                             |

| id          | 06597dc99b3e4addb624c3c010bffdf3 |

| name        | keystone                         |

| type        | identity                         |

+-------------+----------------------------------+

 

[root@controller ~]#openstack endpointcreate --publicurl http://controller:5000/v2.0 --internalurlhttp://controller:5000/v2.0 --adminurl http://controller:35357/v2.0 --regionRegionOne identity

+--------------+----------------------------------+

| Field        | Value                            |

+--------------+----------------------------------+

| adminurl     | http://controller:35357/v2.0     |

| id           | d5de3a0ce24345c2891440f4190535ab |

| internalurl  | http://controller:5000/v2.0      |

| publicurl    | http://controller:5000/v2.0      |

| region       | RegionOne                        |

| service_id   | 06597dc99b3e4addb624c3c010bffdf3 |

| service_name | keystone                         |

| service_type | identity                         |

+--------------+----------------------------------+

 

[root@controller ~]# openstack projectcreate --description "Admin Project" admin

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | Admin Project                    |

| enabled     | True                             |

| id          | 9d3943df61ba4c41829db8da9484a747 |

| name        | admin                            |

+-------------+----------------------------------+

[root@controller ~]# openstack user create--password-prompt admin

User Password:cloudiv

Repeat User Password:cloudiv

+----------+----------------------------------+

| Field   | Value                           |

+----------+----------------------------------+

| email   | None                            |

| enabled | True                            |

| id      | 0909afb36f5f46b88f056409283ecb18 |

| name    | admin                           |

| username | admin                            |

+----------+----------------------------------+

 

[root@controller ~]# openstack role createadmin

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | 78872f1ed6234d09a2d39246be5f8a78 |

| name | admin                           |

+-------+----------------------------------+

 

[root@controller ~]# openstack role add--project admin --user admin admin

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | 78872f1ed6234d09a2d39246be5f8a78 |

| name | admin                           |

+-------+----------------------------------+

 

[root@controller ~]# openstack projectcreate --description "Service Project" service

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | Service Project                  |

| enabled     | True                             |

| id          | 5ee719f48b024f8cafb6a7c15f472ed2 |

| name        | service                          |

+-------------+----------------------------------+

 

[root@controller ~]# openstack projectcreate --description "Demo Project" demo

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | Demo Project                     |

| enabled     | True                             |

| id          | abbb54ee34c14a858f58d37860d42cde |

| name        | demo                             |

+-------------+----------------------------------+

 

[root@controller ~]# openstack user create--password-prompt demo

User Password:cloudiv

Repeat User Password:cloudiv

+----------+----------------------------------+

| Field   | Value                           |

+----------+----------------------------------+

| email   | None                            |

| enabled | True                            |

| id      | 531ac99476f04594867e3be3e0c5d18f |

| name    | demo                            |

| username | demo                             |

+----------+----------------------------------+

 

[root@controller ~]# openstack role createuser

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | d4dca2847c334e0ebd1596ea1feac068 |

| name | user                            |

+-------+----------------------------------+

 

[root@controller ~]# openstack role add--project demo --user demo user

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | d4dca2847c334e0ebd1596ea1feac068 |

| name | user                             |

+-------+----------------------------------+

 

[root@controller ~]#vim/usr/share/keystone/keystone-dist-paste.ini

删除 admin_token_authfrom the[pipeline:public_api],[pipeline:admin_api], and [pipeline:api_v3]sections

[root@controller ~]#unset OS_TOKENOS_URL  做一些权限验证

 

创建openstack环境脚本:

[root@controller ~]#vim admin-openrc.sh添加

export OS_PROJECT_DOMAIN_ID=default

export OS_USER_DOMAIN_ID=default

export OS_PROJECT_NAME=admin

export OS_TENANT_NAME=admin

export OS_USERNAME=admin

export OS_PASSWORD=cloudiv

exportOS_AUTH_URL=http://controller:35357/v3

 

[root@controller ~]#vim demo-openrc.sh添加

export OS_PROJECT_DOMAIN_ID=default

export OS_USER_DOMAIN_ID=default

export OS_PROJECT_NAME=demo

export OS_TENANT_NAME=demo

export OS_USERNAME=demo

export OS_PASSWORD=cloudiv

exportOS_AUTH_URL=http://controller:5000/v3

 

激活脚本

[root@controller ~]#source admin-openrc.sh

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

**********安装glance服务**********

Controller节点:

创建数据库:

[root@controller ~]# mysql -u root -p  输入密码Cloudiv@1234

 

MariaDB [(none)]> CREATE DATABASEglance;

Query OK, 1 row affected (0.00 sec)

 

MariaDB [(none)]> GRANT ALL PRIVILEGESON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'cloudiv';

Query OK, 0 rows affected (0.00 sec)

 

MariaDB [(none)]> GRANT ALL PRIVILEGESON glance.* TO 'glance'@'%' IDENTIFIED BY 'cloudiv';

Query OK, 0 rows affected (0.00 sec)

 

MariaDB [(none)]> exit

 

[root@controller ~]# source admin-openrc.sh

[root@controller ~]# openstack user create--password-prompt glance

User Password:

Repeat User Password:

+----------+----------------------------------+

| Field   | Value                           |

+----------+----------------------------------+

| email   | None                            |

| enabled | True                            |

| id      | 674ece04ec5c44c099fa2ae26dbbcc07 |

| name    | glance                          |

| username | glance                           |

+----------+----------------------------------+

[root@controller ~]# openstack role add--project service --user glance admin

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | 78872f1ed6234d09a2d39246be5f8a78 |

| name | admin                           |

+-------+----------------------------------+

[root@controller ~]# openstack servicecreate --name glance --description "OpenStack Image service" image

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | OpenStack Imageservice          |

| enabled     | True                             |

| id          | a233b07b39d64ae4bdfc405cc8692d98 |

| name        | glance                           |

| type        | image                            |

+-------------+----------------------------------+

[root@controller ~]# openstack endpointcreate --publicurl http://controller:9292 --internalurl http://controller:9292--adminurl http://controller:9292 --region RegionOne image

+--------------+----------------------------------+

| Field        | Value                            |

+--------------+----------------------------------+

| adminurl     | http://controller:9292           |

| id           | a3fee92809e14e559f456929136960e6 |

| internalurl  | http://controller:9292           |

| publicurl    | http://controller:9292           |

| region       | RegionOne                        |

| service_id   | a233b07b39d64ae4bdfc405cc8692d98 |

| service_name | glance                           |

| service_type | image                            |

+--------------+----------------------------------+

 

[root@controller ~]#yum installopenstack-glance python-glance python-glanceclient

[root@controller ~]vim /etc/glance/glance-api.conf

[database]

connection =mysql://glance:cloudiv@controller/glance

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = glance

password = GLANCE_PASS

[paste_deploy]

flavor = keystone

[glance_store]

default_store = file

filesystem_store_datadir =/var/lib/glance/images/

[DEFAULT]

notification_driver = noop

 

[root@controller ~]vim /etc/glance/glance-registry.conf

[database]

connection =mysql://glance:cloudiv@controller/glance

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = glance

password = cloudiv

[paste_deploy]

flavor = keystone

 

[DEFAULT]

notification_driver = noop

同步数据库

su -s /bin/sh -c "glance-managedb_sync" glance

systemctl enableopenstack-glance-api.service openstack-glance-registry.service

systemctl startopenstack-glance-api.service openstack-glance-registry.service

 

验证

[root@controller ~]# echo "exportOS_IMAGE_API_VERSION=2" | tee -a admin-openrc.sh demo-openrc.sh

[root@controller ~]# source admin-openrc.sh

[root@controller ~]# mkdir /tmp/images

[root@controller ~]# wget -P /tmp/imageshttp://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img

[root@controller ~]# glance image-create--name "cirros-0.3.4-x86_64" --file /tmp/images/cirros-0.3.4-x86_64-disk.img--disk-format qcow2 --container-format bare --visibility public --progress

[root@controller ~]# glance image-list

+--------------------------------------+---------------------+

| ID                                   | Name                |

+--------------------------------------+---------------------+

| 7caf0c1f-4bf2-4501-85cf-be905afd7dae |cirros-0.3.4-x86_64 |

+--------------------------------------+---------------------+

[root@controller ~]# rm -r /tmp/images

 

 

 

 

 

 

 

**********安装NOVA服务**********

Controller节点配置:

[root@controller ~]#mysql -u root -p

CREATE DATABASE nova;

GRANT ALL PRIVILEGES ON nova.* TO'nova'@'localhost' IDENTIFIED BY 'cloudiv';

GRANT ALL PRIVILEGES ON nova.* TO'nova'@'%' IDENTIFIED BY 'cloudiv';

 

[root@controller ~]#source admin-openrc.sh

[root@controller ~]# openstack user create--password-prompt nova

User Password:

Repeat User Password:

+----------+----------------------------------+

| Field   | Value                           |

+----------+----------------------------------+

| email   | None                            |

| enabled | True                            |

| id      | f3f8320a840d4795b5e110711021793f |

| name    | nova                            |

| username | nova                             |

+----------+----------------------------------+

[root@controller ~]# openstack role add--project service --user nova admin

+-------+----------------------------------+

| Field | Value                            |

+-------+----------------------------------+

| id   | 78872f1ed6234d09a2d39246be5f8a78 |

| name | admin                           |

+-------+----------------------------------+

[root@controller ~]# openstack servicecreate --name nova --description "OpenStack Compute" compute

+-------------+----------------------------------+

| Field       | Value                            |

+-------------+----------------------------------+

| description | OpenStack Compute                |

| enabled     | True                             |

| id          | 1966678a85c04766bea925b361012f7d |

| name        | nova                             |

| type        | compute                          |

+-------------+----------------------------------+

[root@controller ~]# openstack endpointcreate --publicurl http://controller:8774/v2/%\(tenant_id\)s --internalurlhttp://controller:8774/v2/%\(tenant_id\)s --adminurlhttp://controller:8774/v2/%\(tenant_id\)s --region RegionOne compute

+--------------+-----------------------------------------+

| Field       | Value                                   |

+--------------+-----------------------------------------+

| adminurl     | http://controller:8774/v2/%(tenant_id)s|

| id           |d3bb3a45351c4044b8fd69590b9e5b0e        |

| internalurl  | http://controller:8774/v2/%(tenant_id)s |

| publicurl    | http://controller:8774/v2/%(tenant_id)s |

| region       | RegionOne                               |

| service_id   | 1966678a85c04766bea925b361012f7d        |

| service_name | nova                                    |

| service_type | compute                                 |

+--------------+-----------------------------------------+

 

[root@controller ~]# yum installopenstack-nova-api openstack-nova-cert openstack-nova-conductoropenstack-nova-console openstack-nova-novncproxy openstack-nova-schedulerpython-novaclient

[root@controller ~] vim /etc/nova/nova.conf

[database]

connection =mysql://nova:cloudiv@controller/nova

 

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

my_ip = 10.0.0.11

vncserver_listen = 10.0.0.11

vncserver_proxyclient_address = 10.0.0.11

 

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = cloudiv

 

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = nova

password = cloudiv

 

[glance]

host = controller

 

[oslo_concurrency]

lock_path = /var/lib/nova/tmp

 

[root@controller ~]# su -s /bin/sh -c"nova-manage db sync" nova

[root@controller ~]# systemctl enableopenstack-nova-api.service openstack-nova-cert.serviceopenstack-nova-consoleauth.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service openstack-nova-novncproxy.service

[root@controller ~]# systemctl startopenstack-nova-api.service openstack-nova-cert.serviceopenstack-nova-consoleauth.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service openstack-nova-novncproxy.service

 

COMPUTE节点配置:

[root@compute1 ~]# yum install openstack-nova-computesysfsutils

[root@compute1 ~]# vim /etc/nova/nova.conf

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

my_ip = 10.139.8.190

 

vnc_enabled = True

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address =10.139.8.190

novncproxy_base_url =http://controller:6080/vnc_auto.html

 

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = cloudiv

 

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = nova

password = cloudiv

 

[glance]

host = controller

 

[oslo_concurrency]

lock_path = /var/lib/nova/tmp

 

[libvirt]

virt_type = qemu

 

注:如果是在虚拟机上安装的compute节点,可能需要安装或者升级qemu的版本,否则可能会导致虚拟机无法正常启动

[root@controller ~]# systemctl enablelibvirtd.service openstack-nova-compute.service

[root@controller ~]# systemctl startlibvirtd.service openstack-nova-compute.service

 

在controller上做一些验证

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

 

**********安装Neutron服务**********

CONTROLLER节点:

[root@controller ~]#mysql -u root -p  输入密码:Cloud@1234

CREATE DATABASE neutron;

GRANT ALL PRIVILEGES ON neutron.* TO'neutron'@'localhost' IDENTIFIED BY 'cloudiv';

GRANT ALL PRIVILEGES ON neutron.* TO'neutron'@'%' IDENTIFIED BY 'cloudiv';

 

[root@controller ~]#source admin-openrc.sh

[root@controller ~]#openstack user create--password-prompt neutron

User Password:

[root@controller ~]#openstack role add--project service --user neutron admin

[root@controller ~]#openstack servicecreate --name neutron --description "OpenStack Networking" network

[root@controller ~]#openstack endpointcreate --publicurl http://controller:9696 --adminurl http://controller:9696--internalurl http://controller:9696 --region RegionOne  network

[root@controller ~]#yum installopenstack-neutron openstack-neutron-ml2 python-neutronclient which

 

[root@controller ~]#vim/etc/neutron/neutron.conf

[database]

connection =mysql://neutron:NEUTRON_DBPASS@controller/neutron

 

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

core_plugin = ml2

service_plugins = router

allow_overlapping_ips = True

 

notify_nova_on_port_status_changes = True

notify_nova_on_port_data_changes = True

nova_url = http://controller:8774/v2

 

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = cloudiv

 

[keystone_authtoken]

...

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = cloudiv

 

[nova]

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

region_name = RegionOne

project_name = service

username = nova

password = cloudiv

 

[root@controller ~]# vim/etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = flat,vlan,gre,vxlan

tenant_network_types = gre

mechanism_drivers = openvswitch

 

[ml2_type_gre]

tunnel_id_ranges = 1:1000

 

[securitygroup]

enable_security_group = True

enable_ipset = True

firewall_driver =neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

 

[root@controller ~]#vim /etc/nova/nova.conf

[DEFAULT]

network_api_class =nova.network.neutronv2.api.API

security_group_api = neutron

linuxnet_interface_driver =nova.network.linux_net.

LinuxOVSInterfaceDriverfirewall_driver =nova.virt.firewall.NoopFirewallDriver

 

[neutron]

url = http://controller:9696

auth_strategy = keystone

admin_auth_url =http://controller:35357/v2.0

admin_tenant_name = service

admin_username = neutron

admin_password = NEUTRON_PASS

 

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini/etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage--config-file /etc/neutron/neutron.conf --config-file/etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron

systemctl restartopenstack-nova-api.service openstack-nova-scheduler.serviceopenstack-nova-conductor.service

 

systemctl enable neutron-server.service

systemctl start neutron-server.service

 

验证

source admin-openrc.sh

neutron ext-list

 

 

 

NETWORK节点:

[root@cl-03 ~]# vim /etc/sysctl.conf 添加

net.ipv4.ip_forward=1

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

 

[root@cl-03 ~]# sysctl -p

[root@cl-03 ~]# yum installopenstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

 

[root@cl-03 ~]#vim/etc/neutron/neutron.conf

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

core_plugin = ml2

service_plugins = router

allow_overlapping_ips = True

 

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = cloudiv

 

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = cloudiv

 

[root@cl-03 ~]# vim/etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = flat,vlan,gre,vxlan

tenant_network_types = vxlan

mechanism_drivers = openvswitch

 

[ml2_type_flat]

flat_networks = external

 

[ml2_type_vxlan]

vni_ranges = 1:10000 = 1:10000

 

[securitygroup]

enable_security_group = True

enable_ipset = True

firewall_driver =neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

 

[ovs]

local_ip = 192.168.6.124

bridge_mappings = external:br-ex

 

[agent]

tunnel_types = vxlan

 

[root@cl-03 ~]# vim/etc/neutron/l3_agent.ini

[DEFAULT]

interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver

external_network_bridge =

router_delete_namespaces = True

 

[root@cl-03 ~]# vim/etc/neutron/dhcp_agent.ini

[DEFAULT]

interface_driver =neutron.agent.linux.interface.OVSInterfaceDriver

dhcp_driver =neutron.agent.linux.dhcp.Dnsmasq

dhcp_delete_namespaces = True

[root@cl-03 ~]# vim/etc/neutron/dhcp_agent.ini

[DEFAULT]

dnsmasq_config_file =/etc/neutron/dnsmasq-neutron.conf

 

[root@cl-03 ~]#vim/etc/neutron/dnsmasq-neutron.conf

dhcp-option-force=26,1454

 

[root@cl-03 ~]#pkill dnsmasq

 

[root@cl-03 ~]#vim/etc/neutron/metadata_agent.ini

[DEFAULT]

...

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_region = RegionOne

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = cloudiv

 

nova_metadata_ip = controller

 

metadata_proxy_shared_secret = cloudiv

 

在controller节点上增加:

[root@cl-03 ~]#vim /etc/nova/nova.conf

[neutron]

service_metadata_proxy = True

metadata_proxy_shared_secret = cloudiv

[root@cl-03 ~]#systemctl restartopenstack-nova-api.service

 

 

返回network节点

[root@cl-03 ~]#systemctl enableopenvswitch.service

[root@cl-03 ~]#systemctl startopenvswitch.service

 

[root@cl-03 ~]#ovs-vsctl add-br br-ex

[root@cl-03 ~]#ovs-vsctl add-port br-exeth2(eth2用来模拟访问外网的网卡)

 

 

[root@cl-03 ~]# ln -s/etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

[root@cl-03 ~]# cp /usr/lib/systemd/system/neutron-openvswitch-agent.service/usr/lib/systemd/system/neutron-openvswitch-agent.service.orig

[root@cl-03 ~]# sed -i's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'/usr/lib/systemd/system/neutron-openvswitch-agent.service

[root@cl-03 ~]# systemctl enableneutron-openvswitch-agent.service neutron-l3-agent.serviceneutron-dhcp-agent.service neutron-metadata-agent.serviceneutron-ovs-cleanup.service

[root@cl-03 ~]# systemctl startneutron-openvswitch-agent.service neutron-l3-agent.service neutron-dhcp-agent.serviceneutron-metadata-agent.service neutron-ovs-cleanup.service

在controller上做一些验证

 

 

COMPUTE节点:

[root@compute1 ~]# vim /etc/sysctl.conf

net.ipv4.conf.all.rp_filter=0

net.ipv4.conf.default.rp_filter=0

net.bridge.bridge-nf-call-iptables=1

net.bridge.bridge-nf-call-ip6tables=1

 

[root@compute1 ~]#sysctl -p

[root@compute1 ~]#yum installopenstack-neutron openstack-neutron-ml2 openstack-neutron-openvswitch

[root@compute1 ~]#vim/etc/neutron/neutron.conf

注释掉[database]选项的所有connection

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

core_plugin = ml2

service_plugins = router

allow_overlapping_ips = True

verbose = True

 

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = cloudiv

 

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

auth_plugin = password

project_domain_id = default

user_domain_id = default

project_name = service

username = neutron

password = cloudiv

 

 

[root@compute1 ~]#vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]

type_drivers = flat,vlan,gre,vxlan

tenant_network_types = vxlan

mechanism_drivers = openvswitch

 

[ml2_type_vxlan]

vni_ranges = 1:10000

 

[securitygroup]

enable_security_group = True

enable_ipset = True

firewall_driver =neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver

 

[ovs]

local_ip = 192.168.6.118(eth1 tunnel ipaddress)

 

[agent]

tunnel_types = vxlan

 

 

[root@compute1 ~]#systemctl enableopenvswitch.service

[root@compute1 ~]#systemctl start openvswitch.service

 

[root@compute1 ~]#vim /etc/nova/nova.conf

[DEFAULT]

network_api_class =nova.network.neutronv2.api.API

security_group_api = neutron

linuxnet_interface_driver =nova.network.linux_net.LinuxOVSInterfaceDriver

firewall_driver = nova.virt.firewall.NoopFirewallDriver

 

[neutron]

url = http://controller:9696

auth_strategy = keystone

admin_auth_url =http://controller:35357/v2.0

admin_tenant_name = service

admin_username = neutron

admin_password = cloudiv

 

 

[root@compute1 ~]#ln -s /etc/neutron/plugins/ml2/ml2_conf.ini/etc/neutron/plugin.ini

[root@compute1 ~]#cp/usr/lib/systemd/system/neutron-openvswitch-agent.service/usr/lib/systemd/system/neutron-openvswitch-agent.service.orig

[root@compute1 ~]#sed -i's,plugins/openvswitch/ovs_neutron_plugin.ini,plugin.ini,g'/usr/lib/systemd/system/neutron-openvswitch-agent.service

[root@compute1 ~]#systemctl restartopenstack-nova-compute.service

[root@compute1 ~]#systemctl enableneutron-openvswitch-agent.service

[root@compute1 ~]#systemctl start neutron-openvswitch-agent.service

controller执行验证:

[root@controller ~]#source admin-openrc.sh

[root@controller ~]#neutron agent-list

+--------------------------------------+--------------------+----------+-------+----------------+---------------------------+

| id                                  |agent_type         | host     | alive | admin_state_up | binary                    |

+--------------------------------------+--------------------+----------+-------+----------------+---------------------------+

| 30275801-e17a-41e4-8f53-9db63544f689 |Metadata agent     | network  | :-)  | True           | neutron-metadata-agent    |

| 4bd8c50e-7bad-4f3b-955d-67658a491a15 |Open vSwitch agent | network  | :-)   | True           | neutron-openvswitch-agent |

| 756e5bba-b70f-4715-b80e-e37f59803d20 | L3agent           | network  | :-)  | True           | neutron-l3-agent          |

| 9c45473c-6d6d-4f94-8df1-ebd0b6838d5f |DHCP agent         | network  | :-)  | True           | neutron-dhcp-agent        |

| a5a49051-05eb-4b4f-bfc7-d36235fe9131 |Open vSwitch agent | compute1 | :-)   |True           |neutron-openvswitch-agent |

+--------------------------------------+--------------------+----------+-------+----------------+---------------------------+

 

 

 

创建openstack初始网络

(1)network节点创建外部网络,controller节点执行如下命令

[root@controller ~]#source admin-openrc.sh

[root@controller ~]#neutron net-createext-net --router:external --provider:physical_network external--provider:network_type flat

[root@controller ~]#neutron subnet-createext-net 192.168.119.0/24 --name ext-subnet --allocation-poolstart=192.168.119.101,end=192.168.119.200

               --disable-dhcp --gateway192.168.119.5(模拟外网网关)

(2)创建租户网络,在controller节点执行:

[root@controller ~]#source demo-openrc.sh

[root@controller ~]#neutron net-createdemo-net

[root@controller ~]#neutron subnet-createdemo-net 1.1.1.0/24 --name demo-subnet --dns-nameserver 8.8.4.4 --gateway1.1.1.1

 

(3)network节点创建vRouter,在controller节点执行:

[root@controller ~]#neutron router-createdemo-router

[root@controller ~]#neutronrouter-interface-add demo-router demo-subnet

[root@controller ~]#neutronrouter-gateway-set demo-router ext-net

 

(4)验证

controller节点重新绑定一个192.168.119.0/24网段的网卡,可以ping通bx-ex上分配的subnet网段的ip

[root@controller ~]# ping 192.168.119.101

PING 192.168.119.101 (192.168.119.101)56(84) bytes of data.

64 bytes from 192.168.119.101: icmp_seq=1ttl=64 time=1.40 ms

64 bytes from 192.168.119.101: icmp_seq=2ttl=64 time=0.468 ms

64 bytes from 192.168.119.101: icmp_seq=3ttl=64 time=0.493 ms

64 bytes from 192.168.119.101: icmp_seq=4ttl=64 time=0.498 ms

64 bytes from 192.168.119.101: icmp_seq=5ttl=64 time=0.623 ms

 

 

**********DASHBOARD**********

目前安装dashboard的时候可能出现包兼容的问题,鉴于可用CLI进行操作验证,暂且PASS

 

 

 

 

 

 

**********LAUNCH INSTANCE**********

注:centos7如果采用qemu作为libvirt的启动的方式,需要关注qemu的版本,默认qemu-1.5.3可能导致instance无法启动的

         解决方案:升级qemu到2.5以上:yuminstall -y centos-release-qemu-ev    yum-y install xxxx

 

[root@controller ~]#source demo-openrc.sh

[root@controller ~]#nova keypair-adddemo-key

[root@controller ~]#nova keypair-list

[root@controller ~]#nova flavor-list

[root@controller ~]# nova image-list

[root@controller ~]# neutron net-list

[root@controller ~]#nova secgroup-list

 

[root@controller ~]#nova boot --flavorm1.tiny --image cirros-0.3.4-x86_64 --nic net-id=0f5f9a16-4133-4876-a5e6-251e30b257d7--security-group default --key-name demo-key demo-instance1

 

[root@controller ~]#nova list

[root@controller ~]# nova get-vnc-consoledemo-instance2 novnc

 

验证:

Ping网关

 

Ping floating IP

 

Ping vDHCP Server IP

 

 

注:这边遇到个问题是running的镜像无法获取vDHCP分配过来的IP地址;经过排查,是VM发给network节点的dhcp消息被network节点的iptables规则给拒绝掉了,iptables规则不明白是怎么出现的,后面的同学可以借鉴

你可能感兴趣的:(openstack kilo安装记录)