这次试验部署在了 VMware 虚拟机上,用的是 CentOS7.6 系统;openstack-M 版安装流程,同学们还可以参考 openstack 官方文档。
节点(主机名) | cpu虚拟化 | 主机配置 | IP |
---|---|---|---|
controller | 开启cpu虚拟化 | 4C4G | 192.168.117.130 |
compute | 开启cpu虚拟化 | 4C4G | 192.168.117.131 |
[root@localhost ~]# hostnamectl set-hostname controller
[root@localhost ~]# hostnamectl set-hostname compute
[root@controller ~]# vim /etc/hosts
##########
192.168.117.130 controller
192.168.117.131 compute
##########
[root@compute ~]# vim /etc/hosts
##########
192.168.117.130 controller
192.168.117.131 compute
##########
[root@controller ~]# ping compute
[root@compute ~]# ping controller
在生产环境中可以做一个yum发布服务器。
[root@controller ~]# vim /etc/yum.repos.d/openstack-mitaka.repo
##########
[openstack]
name=openstack
baseurl=http://vault.centos.org/7.2.1511/cloud/x86_64/openstack-mitaka
enabled=1
gpgcheck=0
##########
[root@controller ~]# yum clean all
[root@controller ~]# yum makecache
[root@compute ~]# vim /etc/yum.repos.d/openstack-mitaka.repo
##########
[openstack]
name=openstack
baseurl=http://vault.centos.org/7.2.1511/cloud/x86_64/openstack-mitaka
enabled=1
gpgcheck=0
##########
[root@compute ~]# yum clean all
[root@compute ~]# yum makecache
NTP 时钟同步是十分重要的,生产环境中最后有单独的时钟源发布服务器。
[root@controller ~]# yum install -y chrony
[root@controller ~]# vim /etc/chrony.conf
##########
server ntp6.aliyun.com iburst
allow 192.168.0.0/16
##########
[root@controller ~]# systemctl enable chronyd.service
[root@controller ~]# systemctl start chronyd.service
[root@compute ~]# yum install -y chrony
[root@compute ~]# vim /etc/chrony.conf
##########
server controller iburst
##########
[root@compute ~]# systemctl enable chronyd.service
[root@compute ~]# systemctl start chronyd.service
[root@compute ~]# chrony sources
[root@controller ~]# yum install -y python-openstackclient
[root@controller ~]# yum install -y openstack-selinux
[root@compute ~]# yum install -y python-openstackclient
[root@compute ~]# yum install -y openstack-selinux
[root@controller ~]# yum install -y mariadb mariadb-server python2-PyMySQL
bind-address
值为控制节点的管理网络IP地址以使得其它节点可以通过管理网络访问数据库,设置如下来启用一起有用的选项和 UTF-8 字符集:[root@controller ~]# vim /etc/my.cnf.d/openstack.cnf
##########
[mysqld]
bind-address = 192.168.117.130
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
##########
[root@controller ~]# systemctl start mariadb.service
[root@controller ~]# systemctl enable mariadb.service
[root@controller ~]# mysql_secure_installation
##########
Enter current password for root (enter for none):回车
Set root password? [Y/n] n
Remove anonymous users? [Y/n] y
Disallow root login remotely? [Y/n] y
Remove test database and access to it? [Y/n] y
Reload privilege tables now? [Y/n] y
##########
[root@controller ~]# yum install -y rabbitmq-server
[root@controller ~]# systemctl enable rabbitmq-server.service
[root@controller ~]# systemctl start rabbitmq-server.service
openstack
用户[root@controller ~]# rabbitmqctl add_user openstack RABBIT_PASS
#添加环境变量并重启服务即可
[root@controller ~]# export HOSTNAME=controller
[root@controller ~]# rabbitmq-server -detached
openstack
用户配置写和读权限[root@controller ~]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
[root@controller ~]# rabbitmq-plugins enable rabbitmq_management
[root@compute ~]# yum install memcached -y python-memcached
注:在 openstack-mitaka 官方文档中没有此步骤,这是个坑
[root@controller ~]# vim /etc/sysconfig/memcached
##########
OPTIONS="-l 192.168.117.0,::1,controller"
##########
[root@controller ~]# systemctl enable memcached.service
[root@controller ~]# systemctl start memcached.service
keystone 功能:
MariaDB [(none)]> create database keystone;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY 'KEYSTONE_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY 'KEYSTONE_DBPASS';
[root@controller ~]# yum install -y openstack-keystone httpd mod_wsgi
/etc/keystone/keystone.conf
配置文件[root@controller ~]# vim /etc/keystone/keystone.conf
##########
[DEFAULT]
……略……
admin_token = ADMIN_TOKEN
[database]
……略……
connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone
[token]
……略……
provider = fernet
##########
[root@controller ~]# su -s /bin/sh -c "keystone-manage db_sync" keystone
/etc/keystone
下生成了 fernet-keys
目录:[root@controller ~]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
/etc/httpd/conf/httpd.conf
文件,配置ServerName
选项为控制节点:这步是一个简单的优化,可以使 http 启动更快
[root@controller ~]# vim /etc/httpd/conf/httpd.conf
##########
ServerName controller
##########
/etc/httpd/conf.d/wsgi-keystone.conf
[root@controller ~]# vim /etc/httpd/conf.d/wsgi-keystone.conf
##########
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error.log
CustomLog /var/log/httpd/keystone-access.log combined
<Directory /usr/bin>
Require all granted
</Directory>
</VirtualHost>
##########
[root@controller ~]# systemctl enable httpd.service
[root@controller ~]# systemctl start httpd.service
/etc/keystone/keystone.conf
文件下的 TOKEN
一致)[root@controller ~]# export OS_TOKEN=ADMIN_TOKEN
[root@controller ~]# export OS_URL=http://controller:35357/v3
[root@controller ~]# export OS_IDENTITY_API_VERSION=3
openstack service create --name keystone --description "OpenStack Identity" identity
[root@controller ~]# openstack endpoint create --region RegionOne identity public http://controller:5000/v3
[root@controller ~]# openstack endpoint create --region RegionOne identity internal http://controller:5000/v3
[root@controller ~]# openstack endpoint create --region RegionOne identity admin http://controller:35357/v3
default
[root@controller ~]# openstack domain create --description "Default Domain" default
admin
项目[root@controller ~]# openstack project create --domain default --description "Admin Project" admin
admin
用户,密码为 ADMIN_PASS
[root@controller ~]# openstack user create --domain default --password ADMIN_PASS admin
[root@controller ~]# openstack role create admin
admin
角色到 admin
项目和用户上[root@controller ~]# openstack role add --project admin --user admin admin
[root@controller ~]# openstack project create --domain default --description "Service Project" service
OS_TOKEN
和OS_URL
环境变量[root@controller ~]# unset OS_TOKEN OS_URL
[root@controller ~]# openstack --os-auth-url http://controller:35357/v3 \
> --os-project-domain-name default --os-user-domain-name default \
> --os-project-name admin --os-username admin token issue
##########
Password: ADMIN_PASS
##########
[root@controller ~]# vim admin-openrc
##########
export OS_PROJECT_DOMAIN_NAME=default
export OS_USER_DOMAIN_NAME=default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
##########
admin-openrc
文件来获取环境变量[root@controller ~]# source admin-openrc
注:将这条命令写入 .bashrc 文件中,系统重启时就会自动加载环境变量,或者将环境变量配置文件写入 /etc/profile 文件中
[root@controller ~]# openstack token issue
openstack 镜像服务包括两个组件:
glance
库MariaDB [(none)]> create database glance;
glance
数据库授予恰当的权限MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY 'GLANCE_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY 'GLANCE_DBPASS';
glance
用户,密码为 GLANCE_PASS
,(注这步跟官方文档不一样
)[root@controller ~]# openstack user create --domain default --password GLANCE_PASS glance
[root@controller ~]# openstack role add --project service --user glance admin
报错:
原因:keystone 第五步 service 项目没有创建
glance
服务实体[root@controller ~]# openstack service create --name glance --description "OpenStack Image" image
[root@controller ~]# openstack endpoint create --region RegionOne image public http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne image internal http://controller:9292
[root@controller ~]# openstack endpoint create --region RegionOne image admin http://controller:9292
[root@controller ~]# yum install openstack-glance
/etc/glance/glance-api.conf
并完成如下动作[root@controller ~]# vim /etc/glance/glance-api.conf
##########
[database]
……略……
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
……略……
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[paste_deploy]
……略……
flavor = keystone
[glance_store]
……略……
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
##########
/etc/glance/glance-registry.conf
并完成如下动作[root@controller ~]# vim /etc/glance/glance-registry.conf
##########
[database]
……略……
connection = mysql+pymysql://glance:GLANCE_DBPASS@controller/glance
[keystone_authtoken]
……略……
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = GLANCE_PASS
[paste_deploy]
……略……
flavor = keystone
##########
[root@controller ~]# su -s /bin/sh -c "glance-manage db_sync" glance
[root@controller ~]# systemctl enable openstack-glance-api.service openstack-glance-registry.service
[root@controller ~]# systemctl start openstack-glance-api.service openstack-glance-registry.service
[root@controller ~]# wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
[root@controller ~]# openstack image create "cirros" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
[root@controller ~]# openstack image list
nova 服务由下列组件构成:
MariaDB [(none)]> create database nova_api;
MariaDB [(none)]> create database nova;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY 'NOVA_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY 'NOVA_DBPASS';
nova
用户[root@controller ~]# openstack user create --domain default --password NOVA_PASS nova
[root@controller ~]# openstack role add --project service --user nova admin
nova
服务实体[root@controller ~]# openstack service create --name nova --description "OpenStack Compute" compute
compute
服务 API 端点[root@controller ~]# openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1/%\(tenant_id\)s
[root@controller ~]# yum install -y openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler
/etc/nova/nova.conf
文件并完成下面的操作[root@controller ~]# vim /etc/nova/nova.conf
##########
[DEFAULT]
……略……
enabled_apis = osapi_compute,metadata
rpc_backend = rabbit
auth_strategy = keystone
my_ip = 192.168.117.130
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[api_database]
……略……
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova_api
[database]
……略……
connection = mysql+pymysql://nova:NOVA_DBPASS@controller/nova
[oslo_messaging_rabbit]
……略……
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[keystone_authtoken]
……略……
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
……略……
vncserver_listen = $my_ip
vncserver_proxyclient_address = $my_ip
[glance]
……略……
api_servers = http://controller:9292
[oslo_concurrency]
……略……
lock_path = /var/lib/nova/tmp
##########
[root@controller ~]# su -s /bin/sh -c "nova-manage api_db sync" nova
[root@controller ~]# su -s /bin/sh -c "nova-manage db sync" nova
[root@controller ~]# systemctl enable openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# systemctl start openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
[root@controller ~]# nova service-list
[root@compute ~]# yum install -y openstack-nova-compute
/etc/nova/nova.conf
文件并完成下面的操作[root@compute ~]# vim /etc/nova/nova.conf
##########
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
use_neutron = True
firewall_driver = nova.virt.firewall.NoopFirewallDriver
my_ip = 192.168.117.131
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = NOVA_PASS
[vnc]
enabled = True
vncserver_listen = 0.0.0.0
vncserver_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
##########
[root@compute ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
#如果返回值是 1 或 greater,就不需要额外的配置
#如果返回值是 0 则:
在 /etc/nova/nova.conf 文件的做如下编辑
[libvirt]
……略……
virt_type = qemu
[root@compute ~]# systemctl enable libvirtd.service openstack-nova-compute.service
[root@compute ~]# systemctl start libvirtd.service openstack-nova-compute.service
[root@controller ~]# nova service-list
MariaDB [(none)]> create database neutron;
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY 'NEUTRON_DBPASS';
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY 'NEUTRON_DBPASS';
[root@controller ~]# openstack user create --domain default --password NEUTRON_PASS neutron
[root@controller ~]# openstack role add --project service --user neutron admin
[root@controller ~]# openstack service create --name neutron --description "OpenStack Networking" network
[root@controller ~]# openstack endpoint create --region RegionOne network public http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne network internal http://controller:9696
[root@controller ~]# openstack endpoint create --region RegionOne network admin http://controller:9696
[root@controller ~]# yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
/etc/neutron/neutron.conf
文件并完成如下操作[root@controller ~]# vim /etc/neutron/neutron.conf
##########
[DEFAULT]
core_plugin = ml2
service_plugins =
rpc_backend = rabbit
auth_strategy = keystone
notify_nova_on_port_status_changes = True
notify_nova_on_port_data_changes = True
[database]
connection = mysql+pymysql://neutron:NEUTRON_DBPASS@controller/neutron
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = NOVA_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
##########
/etc/neutron/plugins/ml2/ml2_conf.ini
文件并完成以下操作[root@controller ~]# vim /etc/neutron/plugins/ml2/ml2_conf.ini
##########
[ml2]
type_drivers = flat,vlan
tenant_network_types =
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[securitygroup]
enable_ipset = True
##########
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
文件并且完成以下操作[root@controller ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
##########
[linux_bridge]
physical_interface_mappings = provider:ens33
[vxlan]
enable_vxlan = False
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
##########
/etc/neutron/dhcp_agent.ini
文件并完成下面的操作[root@controller ~]# vim /etc/neutron/dhcp_agent.ini
##########
[DEFAULT]
interface_driver = neutron.agent.linux.interface.BridgeInterfaceDriver
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = True
##########
/etc/neutron/metadata_agent.ini
文件并完成以下操作[root@controller ~]# vim /etc/neutron/metadata_agent.ini
##########
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET
##########
/etc/nova/nova.conf
文件并完成以下操作[root@controller ~]# vim /etc/nova/nova.conf
##########
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
service_metadata_proxy = True
metadata_proxy_shared_secret = METADATA_SECRET
##########
[root@controller ~]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@controller ~]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
[root@controller ~]# systemctl restart openstack-nova-api.service
[root@controller ~]# systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@controller ~]# systemctl start neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service
[root@compute ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset
/etc/neutron/neutron.conf
文件并完成如下操作[root@compute ~]# vim /etc/neutron/neutron.conf
##########
[DEFAULT]
rpc_backend = rabbit
auth_strategy = keystone
[oslo_messaging_rabbit]
rabbit_host = controller
rabbit_userid = openstack
rabbit_password = RABBIT_PASS
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = NEUTRON_PASS
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
##########
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
文件并且完成以下操作[root@compute ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
##########
[linux_bridge]
physical_interface_mappings = provider:ens33
[vxlan]
enable_vxlan = False
[securitygroup]
enable_security_group = True
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
##########
/etc/nova/nova.conf
文件并完成下面的操作[root@compute ~]# vim /etc/nova/nova.conf
###########
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = NEUTRON_PASS
##########
[root@compute ~]# systemctl restart openstack-nova-compute.service
[root@compute ~]# systemctl enable neutron-linuxbridge-agent.service
[root@compute ~]# systemctl start neutron-linuxbridge-agent.service
[root@controller ~]# neutron agent-list
[root@controller ~]# yum install -y openstack-dashboard
/etc/openstack-dashboard/local_settings
并完成如下操作[root@controller ~]# vim /etc/openstack-dashboard/local_settings
##########
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
ALLOWED_HOSTS = ['*',]
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
}
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
OPENSTACK_API_VERSIONS = {
"identity": 3,
"volume": 2,
"compute": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = 'default'
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_ipv6': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_': False,
'enable_fip_topology_check': False,
TIME_ZONE = "Asia/Shanghai"
##########
[root@controller ~]# systemctl restart httpd.service memcached.service
[root@controller ~]# vim /etc/httpd/conf.d/openstack-dashboard.conf
##########
WSGIApplicationGroup %{GLOBAL}
##########