软件 | 版本 |
---|---|
centos | 7.4-1708 |
manager(controller) | 10.XX.XX.50 |
master(compute) | 10.XX.XX.51 |
worker(compute) | 10.XX.XX.52 |
以下操作除了特别说明之外,所有节点都要执行
编辑/etc/hosts
文件
cat >> /etc/hosts <<EOF
10.XX.XX.50 manager.node
10.XX.XX.51 master.node
10.XX.XX.52 worker.node
EOF
cat >> /etc/resolv.conf <<EOF
nameserver 8.8.8.8
nameserver 8.8.4.4
EOF
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld.service
关闭SELINUX
setenforce 0
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
使用sestatus
查看selinux状态
为了使永久生效,我把所有节点都进行了重启
[root@manager ~]# sestatus
SELinux status: disabled
只需manager节点执行
ssh-keygen -t rsa
分别分发到各个节点
ssh-copy-id -i /root/.ssh/id_rsa.pub 10.XX.XX.50
ssh-copy-id -i /root/.ssh/id_rsa.pub 10.XX.XX.51
ssh-copy-id -i /root/.ssh/id_rsa.pub 10.XX.XX.52
cat <<EOF > /etc/yum.repos.d/openstack.repo
[openstack-rocky]
name=openstack-rocky
baseurl=https://mirrors.aliyun.com/centos/7/cloud/x86_64/openstack-rocky/
enabled=1
gpgcheck=0
[qume-kvm]
name=qemu-kvm
baseurl= https://mirrors.aliyun.com/centos/7/virt/x86_64/kvm-common/
enabled=1
gpgcheck=0
EOF
可在manager节点创建后将该文件分发到所有其它节点
[root@manager nova]# scp /etc/yum.repos.d/openstack.repo master.node:/etc/yum.repos.d/
openstack.repo 100% 249 314.6KB/s 00:00
[root@manager nova]# scp /etc/yum.repos.d/openstack.repo worker.node:/etc/yum.repos.d/
openstack.repo
这个安装需要centos
系统中自带的CentOS-Base.repo
仓库
[root@manager yum.repos.d]# yum install centos-release-openstack-rocky
然后执行
[root@manager yum.repos.d]# yum upgrade
*
所有整个安装过程一共需要3个repo文件
# centos7.repo指向的是本地的ISO镜像通过mount挂载后的目录
-rw-r--r--. 1 root root 75 Feb 19 12:04 centos7.repo
-rw-r--r-- 1 root root 1664 Sep 5 21:05 CentOS-Base.repo
-rw-r--r-- 1 root root 249 Feb 19 16:10 openstack.repo
yum install -y python-openstackclient
yum install -y openstack-selinux
在安装openstack-selinux过程中如果出现
......
--> Processing Dependency: container-selinux for package: openstack-selinux-0.8.14-1.el7.noarch
---> Package python-IPy.noarch 0:0.75-6.el7 will be installed
---> Package setools-libs.x86_64 0:3.3.8-1.1.el7 will be installed
--> Finished Dependency Resolution
Error: Package: openstack-selinux-0.8.14-1.el7.noarch (openstack-rocky)
Requires: container-selinux
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
解决方法:
yum install http://vault.centos.org/centos/7.3.1611/extras/x86_64/Packages/container-selinux-2.9-4.el7.noarch.rpm
只需要在manager节点操作
yum install -y mariadb mariadb-server python2-PyMySQL
cat <<EOF > /etc/my.cnf.d/openstack.cnf
[mysqld]
bind-address = 10.XX.XX.50
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
EOF
[root@manager my.cnf.d]# systemctl start mariadb
[root@manager my.cnf.d]# systemctl status mariadb
● mariadb.service - MariaDB 10.1 database server
Loaded: loaded (/usr/lib/systemd/system/mariadb.service; disabled; vendor preset: disabled)
Active: active (running) since Wed 2020-02-19 16:38:40 CST; 8s ago
Process: 15185 ExecStartPost=/usr/libexec/mysql-check-upgrade (code=exited, status=0/SUCCESS)
Process: 14999 ExecStartPre=/usr/libexec/mysql-prepare-db-dir %n (code=exited, status=0/SUCCESS)
Process: 14974 ExecStartPre=/usr/libexec/mysql-check-socket (code=exited, status=0/SUCCESS)
Main PID: 15157 (mysqld)
Status: "Taking your SQL requests now..."
CGroup: /system.slice/mariadb.service
└─15157 /usr/libexec/mysqld --basedir=/usr
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: See the MariaDB Knowledgebase at http://mariadb.com/kb or the
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: MySQL manual for more instructions.
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: Please report any problems at http://mariadb.org/jira
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: The latest information about MariaDB is available at http://mariadb.org/.
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: You can find additional information about the MySQL part at:
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: http://dev.mysql.com
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: Consider joining MariaDB's strong and vibrant community:
Feb 19 16:38:40 manager.node mysql-prepare-db-dir[14999]: https://mariadb.org/get-involved/
Feb 19 16:38:40 manager.node mysqld[15157]: 2020-02-19 16:38:40 140376690026688 [Note] /usr/libexec/mysqld (mysqld 10.1.20-MariaDB) starting as process 15157 ...
Feb 19 16:38:40 manager.node systemd[1]: Started MariaDB 10.1 database server.
[root@manager my.cnf.d]# systemctl enable mariadb
Created symlink from /etc/systemd/system/multi-user.target.wants/mariadb.service to /usr/lib/systemd/system/mariadb.service.
运行mysql_secure_installation
指令
[root@manager my.cnf.d]# mysql_secure_installation
NOTE: RUNNING ALL PARTS OF THIS SCRIPT IS RECOMMENDED FOR ALL MariaDB
SERVERS IN PRODUCTION USE! PLEASE READ EACH STEP CAREFULLY!
......
Enter current password for root (enter for none): #直接回车了
OK, successfully used password, moving on...
......
Set root password? [Y/n] y
New password: #输入新密码 123456
Re-enter new password: #再次输入
Password updated successfully!
Reloading privilege tables..
... Success!
......
Remove anonymous users? [Y/n] y #以下几项交互根据实际情况设置
......
Disallow root login remotely? [Y/n] n
......
Remove test database and access to it? [Y/n] y
......
Reload privilege tables now? [Y/n] y
......
All done! If you've completed all of the above steps, your MariaDB
installation should now be secure.
Thanks for using MariaDB!
[root@manager my.cnf.d]# mysql -u root -p
Enter password:
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 10
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> show databases;
+--------------------+
| Database |
+--------------------+
| information_schema |
| mysql |
| performance_schema |
+--------------------+
3 rows in set (0.00 sec)
在manager节点执行
[root@manager my.cnf.d]# yum -y install rabbitmq-server
添加开机启动
[root@manager my.cnf.d]# systemctl start rabbitmq-server
[root@manager my.cnf.d]# systemctl status rabbitmq-server
● rabbitmq-server.service - RabbitMQ broker
Loaded: loaded (/usr/lib/systemd/system/rabbitmq-server.service; disabled; vendor preset: disabled)
Active: active (running) since Wed 2020-02-19 17:02:31 CST; 12s ago
Main PID: 15359 (beam.smp)
Status: "Initialized"
CGroup: /system.slice/rabbitmq-server.service
├─15359 /usr/lib64/erlang/erts-8.3.5.3/bin/beam.smp -W w -A 64 -P 1048576 -t 5000000 -stbt db -zdbbl 128000 -K true -- -root /usr/lib64/erlang -progname erl -- -home /var/lib/ra...
├─15552 erl_child_setup 1024
├─15564 inet_gethost 4
└─15565 inet_gethost 4
Feb 19 17:02:28 manager.node systemd[1]: Starting RabbitMQ broker...
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: RabbitMQ 3.6.16. Copyright (C) 2007-2018 Pivotal Software, Inc.
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: ## ## Licensed under the MPL. See http://www.rabbitmq.com/
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: ## ##
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: ########## Logs: /var/log/rabbitmq/[email protected]
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: ###### ## /var/log/rabbitmq/[email protected]
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: ##########
Feb 19 17:02:29 manager.node rabbitmq-server[15359]: Starting broker...
Feb 19 17:02:31 manager.node systemd[1]: Started RabbitMQ broker.
Feb 19 17:02:31 manager.node rabbitmq-server[15359]: completed with 0 plugins.
[root@manager my.cnf.d]# systemctl enable rabbitmq-server
Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service.
增加用户和访问权限
[root@manager my.cnf.d]# rabbitmqctl add_user openstack 123456
Creating user "openstack"
[root@manager my.cnf.d]# rabbitmqctl set_permissions openstack ".*" ".*" ".*"
Setting permissions for user "openstack" in vhost "/"
# [root@manager my.cnf.d]# rabbitmqctl set_permissions -p "/" openstack ".*" ".*" ".*"
# Setting permissions for user "openstack" in vhost "/"
启动web管理
[root@manager my.cnf.d]# rabbitmq-plugins list
Configured: E = explicitly enabled; e = implicitly enabled
| Status: * = running on rabbit@manager
|/
[ ] amqp_client 3.6.16
[ ] cowboy 1.0.4
[ ] cowlib 1.0.2
[ ] rabbitmq_amqp1_0 3.6.16
[ ] rabbitmq_auth_backend_ldap 3.6.16
[ ] rabbitmq_auth_mechanism_ssl 3.6.16
[ ] rabbitmq_consistent_hash_exchange 3.6.16
[ ] rabbitmq_event_exchange 3.6.16
[ ] rabbitmq_federation 3.6.16
[ ] rabbitmq_federation_management 3.6.16
[ ] rabbitmq_jms_topic_exchange 3.6.16
[ ] rabbitmq_management 3.6.16
[ ] rabbitmq_management_agent 3.6.16
[ ] rabbitmq_management_visualiser 3.6.16
[ ] rabbitmq_mqtt 3.6.16
[ ] rabbitmq_random_exchange 3.6.16
[ ] rabbitmq_recent_history_exchange 3.6.16
[ ] rabbitmq_sharding 3.6.16
[ ] rabbitmq_shovel 3.6.16
[ ] rabbitmq_shovel_management 3.6.16
[ ] rabbitmq_stomp 3.6.16
[ ] rabbitmq_top 3.6.16
[ ] rabbitmq_tracing 3.6.16
[ ] rabbitmq_trust_store 3.6.16
[ ] rabbitmq_web_dispatch 3.6.16
[ ] rabbitmq_web_mqtt 3.6.16
[ ] rabbitmq_web_mqtt_examples 3.6.16
[ ] rabbitmq_web_stomp 3.6.16
[ ] rabbitmq_web_stomp_examples 3.6.16
[ ] sockjs 0.3.4
[root@manager my.cnf.d]# rabbitmq-plugins enable rabbitmq_management
The following plugins have been enabled:
amqp_client
cowlib
cowboy
rabbitmq_web_dispatch
rabbitmq_management_agent
rabbitmq_management
Applying plugin configuration to rabbit@manager... started 6 plugins.
[root@manager my.cnf.d]# systemctl restart rabbitmq-server.service
[root@manager ~]# rabbitmq-plugins list
Configured: E = explicitly enabled; e = implicitly enabled
| Status: * = running on rabbit@manager
|/
[e*] amqp_client 3.6.16
[e*] cowboy 1.0.4
[e*] cowlib 1.0.2
[ ] rabbitmq_amqp1_0 3.6.16
[ ] rabbitmq_auth_backend_ldap 3.6.16
[ ] rabbitmq_auth_mechanism_ssl 3.6.16
[ ] rabbitmq_consistent_hash_exchange 3.6.16
[ ] rabbitmq_event_exchange 3.6.16
[ ] rabbitmq_federation 3.6.16
[ ] rabbitmq_federation_management 3.6.16
[ ] rabbitmq_jms_topic_exchange 3.6.16
[E*] rabbitmq_management 3.6.16
[e*] rabbitmq_management_agent 3.6.16
[ ] rabbitmq_management_visualiser 3.6.16
[ ] rabbitmq_mqtt 3.6.16
[ ] rabbitmq_random_exchange 3.6.16
[ ] rabbitmq_recent_history_exchange 3.6.16
[ ] rabbitmq_sharding 3.6.16
[ ] rabbitmq_shovel 3.6.16
[ ] rabbitmq_shovel_management 3.6.16
[ ] rabbitmq_stomp 3.6.16
[ ] rabbitmq_top 3.6.16
[ ] rabbitmq_tracing 3.6.16
[ ] rabbitmq_trust_store 3.6.16
[e*] rabbitmq_web_dispatch 3.6.16
[ ] rabbitmq_web_mqtt 3.6.16
[ ] rabbitmq_web_mqtt_examples 3.6.16
[ ] rabbitmq_web_stomp 3.6.16
[ ] rabbitmq_web_stomp_examples 3.6.16
[ ] sockjs 0.3.4
浏览器地址访问:http://10.XX.XX.50:15672/
使用guest/guest
可以正常登陆
manager节点执行
[root@manager rabbitmq]# yum -y install memcached
[root@manager rabbitmq]# yum -y install python-memcached #未验证是否必需
修改配置文件,在最后一行加上管理节点主机名
[root@manager sysconfig]# vim /etc/sysconfig/memcached
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 127.0.0.1,::1,manager.node"
设置开机启动
[root@manager sysconfig]# systemctl start memcached
[root@manager sysconfig]# systemctl status memcached
● memcached.service - memcached daemon
Loaded: loaded (/usr/lib/systemd/system/memcached.service; disabled; vendor preset: disabled)
Active: active (running) since Wed 2020-02-19 18:25:52 CST; 8s ago
Main PID: 24325 (memcached)
CGroup: /system.slice/memcached.service
└─24325 /usr/bin/memcached -p 11211 -u memcached -m 64 -c 1024 -l 127.0.0.1,::1,manager.node
Feb 19 18:25:52 manager.node systemd[1]: Started memcached daemon.
Feb 19 18:25:52 manager.node systemd[1]: Starting memcached daemon...
[root@manager sysconfig]# systemctl enable memcached
Created symlink from /etc/systemd/system/multi-user.target.wants/memcached.service to /usr/lib/systemd/system/memcached.service.
manager节点执行
[root@manager yum.repos.d]# yum install etcd
修改配置文件
[root@manager etcd]# vim /etc/etcd/etcd.conf
#[Member]
#ETCD_CORS=""
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
#ETCD_WAL_DIR=""
ETCD_LISTEN_PEER_URLS="http://10.XX.XX.50:2380" #放开
ETCD_LISTEN_CLIENT_URLS="http://10.XX.XX.50:2379"
#ETCD_MAX_SNAPSHOTS="5"
#ETCD_MAX_WALS="5"
ETCD_NAME="manager.node"
#ETCD_SNAPSHOT_COUNT="100000"
#ETCD_HEARTBEAT_INTERVAL="100"
#ETCD_ELECTION_TIMEOUT="1000"
#ETCD_QUOTA_BACKEND_BYTES="0"
#ETCD_MAX_REQUEST_BYTES="1572864"
#ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
#ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
#ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
#
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://10.XX.XX.50:2380" #放开
ETCD_ADVERTISE_CLIENT_URLS="http://10.XX.XX.50:2379"
#ETCD_DISCOVERY=""
#ETCD_DISCOVERY_FALLBACK="proxy"
#ETCD_DISCOVERY_PROXY=""
#ETCD_DISCOVERY_SRV=""
ETCD_INITIAL_CLUSTER="manager.node=http://10.XX.XX.50:2380" #注意=号前
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster-01"
ETCD_INITIAL_CLUSTER_STATE="new"
#ETCD_STRICT_RECONFIG_CHECK="true"
#ETCD_ENABLE_V2="true"
#
设置开机启动
[root@manager etcd]# systemctl start etcd
[root@manager etcd]# systemctl status etcd
● etcd.service - Etcd Server
Loaded: loaded (/usr/lib/systemd/system/etcd.service; disabled; vendor preset: disabled)
Active: active (running) since Wed 2020-02-19 19:35:07 CST; 6s ago
Main PID: 32213 (etcd)
CGroup: /system.slice/etcd.service
└─32213 /usr/bin/etcd --name=manager.node --data-dir=/var/lib/etcd/default.etcd --listen-client-urls=http://10.180.249.50:2379
Feb 19 19:35:07 manager.node etcd[32213]: a213be42bb9de967 received MsgVoteResp from a213be42bb9de967 at term 2
Feb 19 19:35:07 manager.node etcd[32213]: a213be42bb9de967 became leader at term 2
Feb 19 19:35:07 manager.node etcd[32213]: raft.node: a213be42bb9de967 elected leader a213be42bb9de967 at term 2
Feb 19 19:35:07 manager.node etcd[32213]: setting up the initial cluster version to 3.3
Feb 19 19:35:07 manager.node etcd[32213]: published {Name:manager.node ClientURLs:[http://10.180.249.50:2379]} to cluster 9371c9ccc9a2acb
Feb 19 19:35:07 manager.node etcd[32213]: ready to serve client requests
Feb 19 19:35:07 manager.node etcd[32213]: set the initial cluster version to 3.3
Feb 19 19:35:07 manager.node systemd[1]: Started Etcd Server.
Feb 19 19:35:07 manager.node etcd[32213]: serving insecure client requests on 10.180.249.50:2379, this is strongly discouraged!
Feb 19 19:35:07 manager.node etcd[32213]: enabled capabilities for version 3.3
[root@manager etcd]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
以下都只在manager节点的操作
[root@manager etcd]# mysql -u root -p
Enter password:
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 11
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE keystone;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> flush privileges;
yum install openstack-keystone httpd mod_wsgi
yum install openstack-utils #快速修改配置文件工具
进入/etc/keystone
目录,编辑keystone.conf
文件
修改两处
[database]
#
# From oslo.db
#
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous = true
# The back end to use for the database. (string value)
# Deprecated group/name - [DEFAULT]/db_backend
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
connection = mysql+pymysql://keystone:[email protected]/keystone
[token]
#
# From keystone
#
# The amount of time that a token should remain valid (in seconds). Drastically
# reducing this value may break "long-running" operations that involve multiple
# services to coordinate together, and will force users to authenticate with
# keystone more frequently. Drastically increasing this value will increase the
# number of tokens that will be simultaneously valid. Keystone tokens are also
# bearer tokens, so a shorter duration will also reduce the potential security
# impact of a compromised token. (integer value)
# Minimum value: 0
# Maximum value: 9223372036854775807
#expiration = 3600
# Entry point for the token provider in the `keystone.token.provider`
# namespace. The token provider controls the token construction, validation,
# and revocation operations. Keystone includes `fernet` token provider.
# `fernet` tokens do not need to be persisted at all, but require that you run
# `keystone-manage fernet_setup` (also see the `keystone-manage fernet_rotate`
# command). (string value)
provider = fernet
[root@manager yum.repos.d]# openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:[email protected]/keystone
[root@manager yum.repos.d]# openstack-config --set /etc/keystone/keystone.conf token provider fernet
# 查看修改内容
[root@manager yum.repos.d]# egrep -v "^#|^$" /etc/keystone/keystone.conf
[DEFAULT]
[application_credential]
[assignment]
[auth]
[cache]
[catalog]
[cors]
[credential]
[database]
connection = mysql+pymysql://keystone:@[email protected]/keystone
[domain_config]
[endpoint_filter]
[endpoint_policy]
[eventlet_server]
[federation]
[fernet_tokens]
[healthcheck]
[identity]
[identity_mapping]
[ldap]
[matchmaker_redis]
[memcache]
[oauth1]
[oslo_messaging_amqp]
[oslo_messaging_kafka]
[oslo_messaging_notifications]
[oslo_messaging_rabbit]
[oslo_messaging_zmq]
[oslo_middleware]
[oslo_policy]
[policy]
[profiler]
[resource]
[revoke]
[role]
[saml]
[security_compliance]
[shadow_users]
[signing]
[token]
provider = fernet
[tokenless_auth]
[trust]
[unified_limit]
[wsgi]
[root@manager yum.repos.d]# grep '^[a-z]' /etc/keystone/keystone.conf
connection = mysql+pymysql://keystone:@[email protected]/keystone
provider = fernet
[root@manager keystone]# su -s /bin/sh -c "keystone-manage db_sync" keystone
进入数据库查看同步结果,共44张表
MariaDB [(none)]> use keystone
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [keystone]> show tables;
+-----------------------------+
| Tables_in_keystone |
+-----------------------------+
| access_token |
| application_credential |
| application_credential_role |
...
| token |
| trust |
| trust_role |
| user |
| user_group_membership |
| user_option |
| whitelisted_config |
+-----------------------------+
44 rows in set (0.01 sec)
[root@manager keystone]# keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
[root@manager keystone]# keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
# --bootstrap-password ks123456 可以自己设置
# --bootstrap-admin-url http://manager.node:5000/v3/ 替换成主机域名
# --bootstrap-internal-url http://manager.node:5000/v3/
# --bootstrap-public-url http://manager.node:5000/v3/
# --bootstrap-region-id RegionOne
[root@manager keystone]# keystone-manage bootstrap --bootstrap-password ks123456 --bootstrap-admin-url http://manager.node:5000/v3/ --bootstrap-internal-url http://manager.node:5000/v3/ --bootstrap-public-url http://manager.node:5000/v3/ --bootstrap-region-id RegionOne
进入/etc/httpd/conf
目录,编辑httpd.conf
文件
## vim /etc/httpd/conf/httpd.conf +95 编辑第95行
#
# ServerName gives the name and port that the server uses to identify itself.
# This can often be determined automatically, but we recommend you specify
# it explicitly to prevent problems during startup.
#
# If your host doesn't have a registered DNS name, enter its IP address here.
#
ServerName manager.node
## 检查
[root@manager yum.repos.d]# cat /etc/httpd/conf/httpd.conf |grep ServerName
# ServerName gives the name and port that the server uses to identify itself.
ServerName manager.node
保存退出,建立文件链接
[root@manager conf.d]# ll /usr/share/keystone/
total 700
-rw-r--r-- 1 root keystone 263 Apr 5 2019 keystone-dist.conf
-rw-r--r-- 1 root keystone 321122 Apr 5 2019 keystone-schema.json
-rw-r--r-- 1 root keystone 353301 Apr 5 2019 keystone-schema.yaml
-rw-r--r-- 1 root keystone 16313 Apr 4 2019 policy.v3cloudsample.json
-rwxr-xr-x 1 root root 9271 Apr 4 2019 sample_data.sh
-rw-r--r-- 1 root keystone 969 Apr 5 2019 wsgi-keystone.conf
[root@manager conf.d]# ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
[root@manager conf.d]# ll /etc/httpd/conf.d/
total 16
-rw-r--r-- 1 root root 2926 Aug 8 2019 autoindex.conf
-rw-r--r-- 1 root root 366 Aug 8 2019 README
-rw-r--r-- 1 root root 1252 Aug 6 2019 userdir.conf
-rw-r--r-- 1 root root 824 Aug 6 2019 welcome.conf
lrwxrwxrwx 1 root root 38 Feb 20 11:59 wsgi-keystone.conf -> /usr/share/keystone/wsgi-keystone.conf
使服务开机启动
[root@manager conf.d]# systemctl start httpd
[root@manager conf.d]# systemctl enable httpd
Created symlink from /etc/systemd/system/multi-user.target.wants/httpd.service to /usr/lib/systemd/system/httpd.service.
创建管理员用户admin的系统环境变量
[root@manager conf.d]# export OS_USERNAME=admin
# keystone-manage bootstrap指令中的密码
[root@manager conf.d]# export OS_PASSWORD=ks123456
[root@manager conf.d]# export OS_PROJECT_NAME=admin
[root@manager conf.d]# export OS_USER_DOMAIN_NAME=Default
[root@manager conf.d]# export OS_PROJECT_DOMAIN_NAME=Default
[root@manager conf.d]# export OS_AUTH_URL=http://manager.node:5000/v3
[root@manager conf.d]# export OS_IDENTITY_API_VERSION=3
先查看一下project表中的内容
MariaDB [(none)]> use keystone;
MariaDB [keystone]> select * from project ;
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| id | name | extra | description | enabled | domain_id | parent_id | is_domain |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| 7e976c7b95aa445abeb3a2e18d8aea76 | admin | {} | Bootstrap project for initializing the cloud. | 1 | default | default | 0 |
| <<keystone.domain.root>> | <<keystone.domain.root>> | {} | | 0 | <<keystone.domain.root>> | NULL | 1 |
| default | Default | {} | The default domain | 1 | <<keystone.domain.root>> | NULL | 1 |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
3 rows in set (0.00 sec)
创建个实例(example项目)
[root@manager conf.d]# openstack domain create --description "An Example Domain" example
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | An Example Domain |
| enabled | True |
| id | 18923cd48db5468e8c7742551a722936 |
| name | example |
| tags | [] |
+-------------+----------------------------------+
再次查询
MariaDB [keystone]> MariaDB [keystone]> select * from project;
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| id | name | extra | description | enabled | domain_id | parent_id | is_domain |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| 18923cd48db5468e8c7742551a722936 | example | {} | An Example Domain | 1 | <<keystone.domain.root>> | NULL | 1 |
| 7e976c7b95aa445abeb3a2e18d8aea76 | admin | {} | Bootstrap project for initializing the cloud. | 1 | default | default | 0 |
| <<keystone.domain.root>> | <<keystone.domain.root>> | {} | | 0 | <<keystone.domain.root>> | NULL | 1 |
| default | Default | {} | The default domain | 1 | <<keystone.domain.root>> | NULL | 1 |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
在project表中创建名为service的项目
[root@manager conf.d]# openstack project create --domain default --description "Service Project" service
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Service Project |
| domain_id | default |
| enabled | True |
| id | de052169763f41379628e59d7451ec1a |
| is_domain | False |
| name | service |
| parent_id | default |
| tags | [] |
+-------------+----------------------------------+
在project表中创建名为myproject项目
[root@manager conf.d]# openstack project create --domain default --description "Demo Project" myproject
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Demo Project |
| domain_id | default |
| enabled | True |
| id | 838f4cb8a295415697867ec033743b63 |
| is_domain | False |
| name | myproject |
| parent_id | default |
| tags | [] |
+-------------+----------------------------------+
查询project表
MariaDB [keystone]> select * from project;
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| id | name | extra | description | enabled | domain_id | parent_id | is_domain |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
| 18923cd48db5468e8c7742551a722936 | example | {} | An Example Domain | 1 | <<keystone.domain.root>> | NULL | 1 |
| 7e976c7b95aa445abeb3a2e18d8aea76 | admin | {} | Bootstrap project for initializing the cloud. | 1 | default | default | 0 |
| 838f4cb8a295415697867ec033743b63 | myproject | {} | Demo Project | 1 | default | default | 0 |
| <<keystone.domain.root>> | <<keystone.domain.root>> | {} | | 0 | <<keystone.domain.root>> | NULL | 1 |
| de052169763f41379628e59d7451ec1a | service | {} | Service Project | 1 | default | default | 0 |
| default | Default | {} | The default domain | 1 | <<keystone.domain.root>> | NULL | 1 |
+----------------------------------+--------------------------+-------+-----------------------------------------------+---------+--------------------------+-----------+-----------+
创建myuser用户(local_user表增加myuser用户)
# 以下为明文密码
# openstack user create --domain default --password=myuser 123456
# 以下为交互式输入密码
[root@manager conf.d]# openstack user create --domain default --password-prompt myuser
User Password:
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 58701e138476443c8571b38e80540451 |
| name | myuser |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
创建myrole角色(在role表创建myrole角色)
[root@manager conf.d]# openstack role create myrole
+-----------+----------------------------------+
| Field | Value |
+-----------+----------------------------------+
| domain_id | None |
| id | 4d4b5a24ed924c589f5fec16920ceaba |
| name | myrole |
+-----------+----------------------------------+
查询用户表和角色表
MariaDB [keystone]> select * from local_user;
+----+----------------------------------+-----------+--------+-------------------+----------------+
| id | user_id | domain_id | name | failed_auth_count | failed_auth_at |
+----+----------------------------------+-----------+--------+-------------------+----------------+
| 1 | 81ae7b2a10444ac49df41a6e74d5fbf0 | default | admin | 0 | NULL |
| 2 | 58701e138476443c8571b38e80540451 | default | myuser | 0 | NULL |
+----+----------------------------------+-----------+--------+-------------------+----------------+
MariaDB [keystone]> select * from role;
+----------------------------------+--------+-------+-----------+
| id | name | extra | domain_id |
+----------------------------------+--------+-------+-----------+
| 1651aa3620e248478494667314cd80e9 | admin | {} | <<null>> |
| 4d4b5a24ed924c589f5fec16920ceaba | myrole | {} | <<null>> |
| 80f24d6628434a2cbf2a798916d30375 | reader | {} | <<null>> |
| ea483251dfcc481196132e1e5f583355 | member | {} | <<null>> |
+----------------------------------+--------+-------+-----------+
将myrole角色添加到myproject项目中和myuser用户组中
[root@manager conf.d]# openstack role add --project myproject --user myuser myrole
先去除环境变量
[root@manager conf.d]# unset OS_AUTH_URL OS_PASSWORD
管理员用户获取认证token
[root@manager conf.d]# openstack --os-auth-url http://manager.node:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
Password: #管理员密码 keystone-manage bootstrap指令中的密码
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2020-02-20T07:22:39+0000 |
| id | gAAAAABeTiWvqlnFdNMYffCT83fdhJAnshZzLTz_VNiaSPtBH5WNqKXLSIG4ZOthOXAYTJ7_0ww__w7w_sc6K2Wy0_LYExvhOMLCxny1FPXlXOLBOPpZAGAowxPujQmI6GKvTXDgYEXYJSgM6LbKFY4pTJH-Tdt8lG1KHV1HuB7TxcxWH7tVruk |
| project_id | 7e976c7b95aa445abeb3a2e18d8aea76 |
| user_id | 81ae7b2a10444ac49df41a6e74d5fbf0 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
普通用户myuser获取认证token
[root@manager conf.d]# openstack --os-auth-url http://manager.node:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name myproject --os-username myuser token issue
Password: # 用户密码,创建的时候指定的
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2020-02-20T07:25:34+0000 |
| id | gAAAAABeTiZep3JSlucLnAo5dZCseYzkv5ZkvLRF2amgJwY1HiF5q1k76k2oNpPqfD3RJZMtFqQBB9CZJcp0quqmieZp1-nWwvviEwa58nGs7WxEu2xi6h4MimpjPjr5dp-yc2GVLnn1Z2-7MjKZblOg1JYxZgOiinB3hLEkZgyuS_cks3Ng46Q |
| project_id | 838f4cb8a295415697867ec033743b63 |
| user_id | 58701e138476443c8571b38e80540451 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
创建使用OpenStack客户端环境脚本(简化操作)
创建管理员用户的openrc
文件
[root@manager conf.d]# touch admin-openrc.sh
[root@manager conf.d]# vim admin-openrc.sh
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ks123456
export OS_AUTH_URL=http://manager.node:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
[root@manager conf.d]# source admin-openrc.sh
[root@manager conf.d]# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2020-02-20T07:38:57+0000 |
| id | gAAAAABeTimBIMReZU5h_I_CRtYmw8mXejsMW6ozB3zwpXErVO65HwNU-SNnnam16Wfiw6b32_E-fbdZZtszPQcpRnIlQIzSqunT9rHiG_VO5wHWvNsBmJwuGuoELHgYtpwY4KGY7FW18t6NHluidEWh_pVTp61FuAl9C2f8QuuNaHrD46rgq00 |
| project_id | 7e976c7b95aa445abeb3a2e18d8aea76 |
| user_id | 81ae7b2a10444ac49df41a6e74d5fbf0 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
普通用户myuser的openrc
文件
[root@manager conf.d]# touch /opt/myuser-openrc
[root@manager conf.d]# vim /opt/myuser-openrc
[root@manager conf.d]# source /opt/myuser-openrc
[root@manager conf.d]# openstack token issue
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2020-02-20T07:41:03+0000 |
| id | gAAAAABeTin_FjUCRqepL_1_Oqu0YpM9t1i7sJPhiHXysC8-GTZCmsA2hIK1pns0R-h5de9f4LGjgFCq6W3K1amcW31hxA6Gas3Qy5X5Bx6W79fPUnkLU2Ej7BzDdXi0udx9JYmm_fMwYIrRtg6u9BVAKHVbdJFjLnYldkBf9du6Vg0cF1b8Kvg |
| project_id | 838f4cb8a295415697867ec033743b63 |
| user_id | 58701e138476443c8571b38e80540451 |
+------------+-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
说明脚本名称和位置没有要求。
[root@manager conf.d]# source admin-openrc.sh
[root@manager conf.d]# openstack user list
+----------------------------------+--------+
| ID | Name |
+----------------------------------+--------+
| 58701e138476443c8571b38e80540451 | myuser |
| 81ae7b2a10444ac49df41a6e74d5fbf0 | admin |
+----------------------------------+--------+
keystone服务的安装和验证完成。
[root@manager my.cnf.d]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 25
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE glance;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.01 sec)
MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)
创建glance用户
# 切换为管理员的系统环境
[root@manager conf.d]# source /opt/admin-openrc.sh
[root@manager conf.d]# openstack user create --domain default --password-prompt glance
User Password: #自己设定 gl123456
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | b3b0c86c16b94b7b8e2c38723755111b |
| name | glance |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
在keystone上将glance用户添加为service项目的admin角色(权限)
[root@manager conf.d]# openstack role add --project service --user glance admin
创建glance镜像服务的实体
[root@manager conf.d]# openstack service create --name glance --description "OpenStack Image" image
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Image |
| enabled | True |
| id | 3a3e875054c54fc3933a205f0e2d0db6 |
| name | glance |
| type | image |
+-------------+----------------------------------+
[root@manager conf.d]# openstack service list
+----------------------------------+----------+----------+
| ID | Name | Type |
+----------------------------------+----------+----------+
| 3a3e875054c54fc3933a205f0e2d0db6 | glance | image |
| e328ab7d24bb4f08ac8ef100972ff57b | keystone | identity |
+----------------------------------+----------+----------+
创建镜像服务的 API 端点(endpoint)
[root@manager conf.d]# openstack endpoint create --region RegionOne image public http://manager.node:9292
[root@manager conf.d]# openstack endpoint create --region RegionOne image internal http://manager.node:9292
[root@manager conf.d]# openstack endpoint create --region RegionOne image admin http://manager.node:9292
[root@manager conf.d]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
| 02c21f1f93354043b44c176e9865c17e | RegionOne | keystone | identity | True | internal | http://manager.node:5000/v3/ |
| 07309e14f0d143f4a3ae80244f40e0af | RegionOne | keystone | identity | True | admin | http://manager.node:5000/v3/ |
| 7bebccdace6148d680d121669319dd7f | RegionOne | keystone | identity | True | public | http://manager.node:5000/v3/ |
| 82eaa8dc56554019a40d650b08ada79a | RegionOne | glance | image | True | admin | http://manager.node:9292 |
| d21aeef9404f4ca08c2f62dff6cbbc1a | RegionOne | glance | image | True | internal | http://manager.node:9292 |
| e2b6fd164e1d4f4993f641db9029fc81 | RegionOne | glance | image | True | public | http://manager.node:9292 |
+----------------------------------+-----------+--------------+--------------+---------+-----------+------------------------------+
在keystone上注册完成
yum install openstack-glance
进入/etc/glance
目录
修改glance-api.conf
文件
这个文件一共改4个地方
[database]
......
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
connection = mysql+pymysql://glance:[email protected]/glance
[keystone_authtoken]
......
# Complete "public" Identity API endpoint. This endpoint should not be an
# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
# clients are redirected to this endpoint to authenticate. Although this
# endpoint should ideally be unversioned, client support in the wild varies. If
# you're using a versioned v2 endpoint here, then this should *not* be the same
# endpoint the service user utilizes for validating tokens, because normal end
# users may not be able to reach that endpoint. (string value)
# Deprecated group/name - [keystone_authtoken]/auth_uri
#www_authenticate_uri =
www_authenticate_uri = http://manager.node:5000
auth_url = http://manager.node:5000
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = gl123456
[paste_deploy]
......
# For example, if your paste section name in the paste configuration
# file is [pipeline:glance-api-keystone], set ``flavor`` to
# ``keystone``.
#
# Possible values:
# * String value representing a partial pipeline name.
#
# Related Options:
# * config_file
#
# (string value)
flavor = keystone
[glance_store]
#
# From glance.store
#
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
# 快速修改
openstack-config --set /etc/glance/glance-api.conf database connection mysql+pymysql://glance:[email protected]/glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken www_authenticate_uri http://manager.node:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_url http://manager.node:5000
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_domain_name Default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken user_domain_name Default
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-api.conf keystone_authtoken password gl123456
openstack-config --set /etc/glance/glance-api.conf paste_deploy flavor keystone
openstack-config --set /etc/glance/glance-api.conf glance_store stores file,http
openstack-config --set /etc/glance/glance-api.conf glance_store default_store file
openstack-config --set /etc/glance/glance-api.conf glance_store filesystem_store_datadir /var/lib/glance/images/
[root@manager opt]# egrep -v "^#|^$" /etc/glance/glance-api.conf
[root@manager opt]# grep '^[a-z]' /etc/glance/glance-api.conf
修改glance-registry.conf
文件
这个文件一共改3个地方
[database]
......
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
# Deprecated group/name - [DEFAULT]/sql_connection
# Deprecated group/name - [DATABASE]/sql_connection
# Deprecated group/name - [sql]/connection
connection = mysql+pymysql://glance:[email protected]/glance
[keystone_authtoken]
#
# From keystonemiddleware.auth_token
#
www_authenticate_uri = http://manager.node:5000
auth_url = http://manager.node:5000
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = gl123456
[paste_deploy]
......
# For example, if your paste section name in the paste configuration
# file is [pipeline:glance-api-keystone], set ``flavor`` to
# ``keystone``.
#
# Possible values:
# * String value representing a partial pipeline name.
#
# Related Options:
# * config_file
#
# (string value)
flavor = keystone
# 快速修改
openstack-config --set /etc/glance/glance-registry.conf database connection mysql+pymysql://glance:[email protected]/glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken www_authenticate_uri http://manager.node:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_url http://manager.node:5000
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken auth_type password
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_domain_name Default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken user_domain_name Default
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken project_name service
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken username glance
openstack-config --set /etc/glance/glance-registry.conf keystone_authtoken password gl123456
openstack-config --set /etc/glance/glance-registry.conf paste_deploy flavor keystone
[root@manager opt]# egrep -v "^#|^$" /etc/glance/glance-registry.conf
[root@manager opt]# grep '^[a-z]' /etc/glance/glance-registry.conf
[root@manager glance]# su -s /bin/sh -c "glance-manage db_sync" glance
/usr/lib/python2.7/site-packages/oslo_db/sqlalchemy/enginefacade.py:1352: OsloDBDeprecationWarning: EngineFacade is deprecated; please use oslo_db.sqlalchemy.enginefacade
expire_on_commit=expire_on_commit, _conf=conf)
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> liberty, liberty initial
INFO [alembic.runtime.migration] Running upgrade liberty -> mitaka01, add index on created_at and updated_at columns of 'images' table
INFO [alembic.runtime.migration] Running upgrade mitaka01 -> mitaka02, update metadef os_nova_server
INFO [alembic.runtime.migration] Running upgrade mitaka02 -> ocata_expand01, add visibility to images
INFO [alembic.runtime.migration] Running upgrade ocata_expand01 -> pike_expand01, empty expand for symmetry with pike_contract01
INFO [alembic.runtime.migration] Running upgrade pike_expand01 -> queens_expand01
INFO [alembic.runtime.migration] Running upgrade queens_expand01 -> rocky_expand01, add os_hidden column to images table
INFO [alembic.runtime.migration] Running upgrade rocky_expand01 -> rocky_expand02, add os_hash_algo and os_hash_value columns to images table
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: rocky_expand02, current revision(s): rocky_expand02
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Database migration is up to date. No migration needed.
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade mitaka02 -> ocata_contract01, remove is_public from images
INFO [alembic.runtime.migration] Running upgrade ocata_contract01 -> pike_contract01, drop glare artifacts tables
INFO [alembic.runtime.migration] Running upgrade pike_contract01 -> queens_contract01
INFO [alembic.runtime.migration] Running upgrade queens_contract01 -> rocky_contract01
INFO [alembic.runtime.migration] Running upgrade rocky_contract01 -> rocky_contract02
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Upgraded database to: rocky_contract02, current revision(s): rocky_contract02
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Database is synced successfully.
查看数据库,一共增加15张表
MariaDB [(none)]> use glance;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [glance]> show tables;
+----------------------------------+
| Tables_in_glance |
+----------------------------------+
| alembic_version |
| image_locations |
| image_members |
| image_properties |
| image_tags |
| images |
| metadef_namespace_resource_types |
| metadef_namespaces |
| metadef_objects |
| metadef_properties |
| metadef_resource_types |
| metadef_tags |
| migrate_version |
| task_info |
| tasks |
+----------------------------------+
15 rows in set (0.00 sec)
[root@manager glance]# systemctl start openstack-glance-api
[root@manager glance]# systemctl enable openstack-glance-api
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-api.service to /usr/lib/systemd/system/openstack-glance-api.service.
[root@manager glance]# systemctl start openstack-glance-registry
[root@manager glance]# systemctl enable openstack-glance-registry
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-glance-registry.service to /usr/lib/systemd/system/openstack-glance-registry.service.
先查看镜像,一开始没有为空
[root@manager glance]# openstack image list
从网上下载一个镜像cirros-0.3.4-x86_64-disk.img
可以参考这个博客中的地址进行下载,openstack cirros镜像
下载完成后上传到manager节点/opt
目录下
镜像启动后,系统登陆的用户名和密码
user:cirros
password:cubswin:)
[root@manager opt]# ll
total 5320
-rw-r--r-- 1 root root 265 Feb 20 14:57 admin-openrc.sh
-rw-r--r-- 1 root root 13287936 Feb 20 16:27 cirros-0.3.4-x86_64-disk.img
-rw-r--r-- 1 root root 268 Feb 20 14:40 myuser-openrc
# 根据需要看是否要执行 source admin-openrc.sh
[root@manager opt]# openstack image create "cirros-abc" --file cirros-0.3.4-x86_64-disk.img --disk-format qcow2 --container-format bare --public
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| checksum | 111a24703de72f56ecfb765be8914a33 |
| container_format | bare |
| created_at | 2020-02-20T08:32:29Z |
| disk_format | qcow2 |
| file | /v2/images/fe079fac-e0c8-4cca-b6da-ac8e7031ca9c/file |
| id | fe079fac-e0c8-4cca-b6da-ac8e7031ca9c |
| min_disk | 0 |
| min_ram | 0 |
| name | cirros-abc |
| owner | 7e976c7b95aa445abeb3a2e18d8aea76 |
| properties | os_hash_algo='sha512', os_hash_value='73deb9cc8aefecd76f04d60c467900b217d2bd226c6062baf86f94c645975fe61f87a2b69fda868b70395dd98715a55fe8cf8d2f7993da7776a370ee7030f046', os_hidden='False' |
| protected | False |
| schema | /v2/schemas/image |
| size | 5439032 |
| status | active |
| tags | |
| updated_at | 2020-02-20T08:32:29Z |
| virtual_size | None |
| visibility | public |
+------------------+--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
再次查看
[root@manager opt]# openstack image list
+--------------------------------------+------------+--------+
| ID | Name | Status |
+--------------------------------------+------------+--------+
| fe079fac-e0c8-4cca-b6da-ac8e7031ca9c | cirros-abc | active |
+--------------------------------------+------------+--------+
glance服务安装测试完成。
以下在manager节点(controller)
需要创建4个数据库
[root@manager my.cnf.d]# mysql -uroot -p
Enter password:
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 37
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE nova_api;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> CREATE DATABASE nova;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> CREATE DATABASE nova_cell0;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> CREATE DATABASE placement;
Query OK, 1 row affected (0.00 sec)
赋权操作
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.01 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
# 用户是placement
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)
[root@manager opt]# source /opt/admin-openrc.sh
[root@manager opt]# openstack user create --domain default --password-prompt nova
User Password: #自己设定 nv123456
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 340634cdb5fb4661901bd138b33a90bd |
| name | nova |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
将nova用户添加进service项目并配置为admin角色
[root@manager opt]# openstack role add --project service --user nova admin
创建nova计算服务的实体
[root@manager opt]# openstack service create --name nova --description "OpenStack Compute" compute
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Compute |
| enabled | True |
| id | f1f89a6703184381a0b354b7a7a632e7 |
| name | nova |
| type | compute |
+-------------+----------------------------------+
创建计算服务的API端点(endpoint)
[root@manager opt]# openstack endpoint create --region RegionOne compute public http://manager.node:8774/v2.1
......
[root@manager opt]# openstack endpoint create --region RegionOne compute internal http://manager.node:8774/v2.1
......
[root@manager opt]# openstack endpoint create --region RegionOne compute admin http://manager.node:8774/v2.1
......
[root@manager opt]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| 02c21f1f93354043b44c176e9865c17e | RegionOne | keystone | identity | True | internal | http://manager.node:5000/v3/ |
| 07309e14f0d143f4a3ae80244f40e0af | RegionOne | keystone | identity | True | admin | http://manager.node:5000/v3/ |
| 12c824b3722d4ca39df4d3548a07243e | RegionOne | nova | compute | True | admin | http://manager.node:8774/v2.1 |
| 23bbc2e29ff64cccb26d84454d2e61a0 | RegionOne | nova | compute | True | internal | http://manager.node:8774/v2.1 |
| 7bebccdace6148d680d121669319dd7f | RegionOne | keystone | identity | True | public | http://manager.node:5000/v3/ |
| 82eaa8dc56554019a40d650b08ada79a | RegionOne | glance | image | True | admin | http://manager.node:9292 |
| abee5035bb9040269a05f001b4a228c2 | RegionOne | nova | compute | True | public | http://manager.node:8774/v2.1 |
| d21aeef9404f4ca08c2f62dff6cbbc1a | RegionOne | glance | image | True | internal | http://manager.node:9292 |
| e2b6fd164e1d4f4993f641db9029fc81 | RegionOne | glance | image | True | public | http://manager.node:9292 |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
[root@manager opt]# openstack user create --domain default --password-prompt placement
User Password: #自己定义 pm123456
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 9313e2b3b10d44ea8b2ca35e8629f666 |
| name | placement |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
将placement用户添加进service项目并配置为admin角色
[root@manager opt]# openstack role add --project service --user placement admin
创建Placement API 服务实体
[root@manager opt]# openstack service create --name placement --description "Placement API" placement
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | Placement API |
| enabled | True |
| id | a742a30a462b429fb9458a7aee28805a |
| name | placement |
| type | placement |
+-------------+----------------------------------+
创建placement服务的API端点(endpoint)
[root@manager opt]# openstack endpoint create --region RegionOne placement public http://manager.node:8778
......
[root@manager opt]# openstack endpoint create --region RegionOne placement internal http://manager.node:8778
......
[root@manager opt]# openstack endpoint create --region RegionOne placement admin http://manager.node:8778
......
[root@manager opt]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| 02c21f1f93354043b44c176e9865c17e | RegionOne | keystone | identity | True | internal | http://manager.node:5000/v3/ |
| 04188cf4df194a3bb5299aeb9dd6acd5 | RegionOne | placement | placement | True | admin | http://manager.node:8778 |
| 07309e14f0d143f4a3ae80244f40e0af | RegionOne | keystone | identity | True | admin | http://manager.node:5000/v3/ |
| 12c824b3722d4ca39df4d3548a07243e | RegionOne | nova | compute | True | admin | http://manager.node:8774/v2.1 |
| 1ccc6bb8251d4af3b2e02b536232f722 | RegionOne | placement | placement | True | public | http://manager.node:8778 |
| 23bbc2e29ff64cccb26d84454d2e61a0 | RegionOne | nova | compute | True | internal | http://manager.node:8774/v2.1 |
| 622f3a76da474394929b029a781ecd94 | RegionOne | placement | placement | True | internal | http://manager.node:8778 |
| 7bebccdace6148d680d121669319dd7f | RegionOne | keystone | identity | True | public | http://manager.node:5000/v3/ |
| 82eaa8dc56554019a40d650b08ada79a | RegionOne | glance | image | True | admin | http://manager.node:9292 |
| abee5035bb9040269a05f001b4a228c2 | RegionOne | nova | compute | True | public | http://manager.node:8774/v2.1 |
| d21aeef9404f4ca08c2f62dff6cbbc1a | RegionOne | glance | image | True | internal | http://manager.node:9292 |
| e2b6fd164e1d4f4993f641db9029fc81 | RegionOne | glance | image | True | public | http://manager.node:9292 |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
yum install openstack-nova-api openstack-nova-conductor \
openstack-nova-console openstack-nova-novncproxy \
openstack-nova-scheduler openstack-nova-placement-api
进入/etc/nova/
目录,修改nova.conf
文件
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
[api_database]
# ...
connection = mysql+pymysql://nova:[email protected]/nova_api
[database]
# ...
connection = mysql+pymysql://nova:[email protected]/nova
[placement_database]
# ...
connection = mysql+pymysql://placement:[email protected]/placement
[DEFAULT]
# ...
# 密码是rabbitmq中openstack账号的密码
transport_url = rabbit://openstack:[email protected]
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://manager.node:5000/v3
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nv123456
[DEFAULT]
# ...
my_ip = 10.XX.XX.50
[DEFAULT]
# ...
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[neutron]
等装网络服务时再配
[vnc]
enabled = true
# ...
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
# ...
api_servers = http://manager.node:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://manager.node:5000/v3
username = placement
password = pm123456
# 快速修改
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 10.180.249.50
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron true
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/nova/nova.conf api_database connection mysql+pymysql://nova:[email protected]/nova_api
openstack-config --set /etc/nova/nova.conf database connection mysql+pymysql://nova:[email protected]/nova
openstack-config --set /etc/nova/nova.conf placement_database connection mysql+pymysql://placement:[email protected]/placement
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://manager.node:5000/v3
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name Default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name Default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password nv123456
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc server_listen '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf glance api_servers http://manager.node:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://manager.node:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password pm123456
openstack-config --set /etc/nova/nova.conf scheduler discover_hosts_in_cells_interval 300
[root@manager opt]# egrep -v "^#|^$" /etc/nova/nova.conf
[root@manager opt]# grep '^[a-z]' /etc/nova/nova.conf
nova.conf
文件配置完成(除了网络)。
进入/etc/httpd/conf.d/
目录,修改00-nova-placement-api.conf
文件
(官网说这里有一个包BUG)
# vim /etc/httpd/conf.d/00-nova-placement-api.conf
Listen 8778
<VirtualHost *:8778>
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
WSGIDaemonProcess nova-placement-api processes=3 threads=1 user=nova group=nova
WSGIScriptAlias / /usr/bin/nova-placement-api
<IfVersion >= 2.4>
ErrorLogFormat "%M"
</IfVersion>
ErrorLog /var/log/nova/nova-placement-api.log
#SSLEngine On
#SSLCertificateFile ...
#SSLCertificateKeyFile ...
</VirtualHost>
Alias /nova-placement-api /usr/bin/nova-placement-api
<Location /nova-placement-api>
SetHandler wsgi-script
Options +ExecCGI
WSGIProcessGroup nova-placement-api
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
</Location>
# 增加下面的内容
<Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
<IfVersion < 2.4>
Order allow,deny
Allow from all
</IfVersion>
</Directory>
重启httpd服务
[root@manager conf.d]# systemctl restart httpd
初始化nova-api和placement数据库
[root@manager nova]# su -s /bin/sh -c "nova-manage api_db sync" nova
查看数据库
MariaDB [(none)]> show databases;
+--------------------+
| Database |
+--------------------+
| glance |
| information_schema |
| keystone |
| mysql |
| nova |
| nova_api |
| nova_cell0 |
| performance_schema |
| placement |
+--------------------+
nova_api库和placement库均有32张表
MariaDB [(none)]> use nova_api
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [nova_api]> show tables;
+------------------------------+
| Tables_in_nova_api |
+------------------------------+
| aggregate_hosts |
| aggregate_metadata |
......
| resource_providers |
| traits |
| users |
+------------------------------+
32 rows in set (0.00 sec)
MariaDB [nova_api]> use placement;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [placement]> show tables;
+------------------------------+
| Tables_in_placement |
+------------------------------+
| aggregate_hosts |
| aggregate_metadata |
......
| resource_provider_traits |
| resource_providers |
| traits |
| users |
+------------------------------+
32 rows in set (0.00 sec)
注册cell0数据库
[root@manager nova]# su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
创建cell1单元
[root@manager nova]# su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
cd7510d5-abf3-4865-b825-0b1e0550109a
初始化nova数据库
[root@manager nova]# su -s /bin/sh -c "nova-manage db sync" nova
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release.')
result = self._query(query)
查看数据库,nova_cell0库和nova库均有110张表
MariaDB [nova_cell0]> use nova_cell0
Database changed
MariaDB [nova_cell0]> show tables;
+--------------------------------------------+
| Tables_in_nova_cell0 |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
......
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
110 rows in set (0.00 sec)
MariaDB [nova_cell0]> use nova
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
MariaDB [nova]> show tables;
+--------------------------------------------+
| Tables_in_nova |
+--------------------------------------------+
| agent_builds |
| aggregate_hosts |
| aggregate_metadata |
| aggregates |
| allocations |
......
| snapshots |
| tags |
| task_log |
| virtual_interfaces |
| volume_id_mappings |
| volume_usage_cache |
+--------------------------------------------+
110 rows in set (0.00 sec)
查验nova cell0 和 cell1 是否注册成功
[root@manager nova]# su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova
+-------+--------------------------------------+--------------------------------------+---------------------------------------------------+----------+
| Name | UUID | Transport URL | Database Connection | Disabled |
+-------+--------------------------------------+--------------------------------------+---------------------------------------------------+----------+
| cell0 | 00000000-0000-0000-0000-000000000000 | none:/ | mysql+pymysql://nova:****@manager.node/nova_cell0 | False |
| cell1 | cd7510d5-abf3-4865-b825-0b1e0550109a | rabbit://openstack:****@manager.node | mysql+pymysql://nova:****@manager.node/nova | False |
+-------+--------------------------------------+--------------------------------------+---------------------------------------------------+----------+
启动服务并设置为开机启动(5个服务)
[root@manager nova]# systemctl start openstack-nova-api
[root@manager nova]# systemctl enable openstack-nova-api
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-api.service to /usr/lib/systemd/system/openstack-nova-api.service.
[root@manager nova]# systemctl start openstack-nova-consoleauth
[root@manager nova]# systemctl enable openstack-nova-consoleauth
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-consoleauth.service to /usr/lib/systemd/system/openstack-nova-consoleauth.service.
[root@manager nova]# systemctl start openstack-nova-scheduler
[root@manager nova]# systemctl enable openstack-nova-scheduler
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-scheduler.service to /usr/lib/systemd/system/openstack-nova-scheduler.service.
[root@manager nova]# systemctl start openstack-nova-conductor
[root@manager nova]# systemctl enable openstack-nova-conductor
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-conductor.service to /usr/lib/systemd/system/openstack-nova-conductor.service.
[root@manager nova]# systemctl start openstack-nova-novncproxy
[root@manager nova]# systemctl enable openstack-nova-novncproxy
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-novncproxy.service to /usr/lib/systemd/system/openstack-nova-novncproxy.service.
以下在计算节点(compute)执行
这里以master.node为例
yum install openstack-nova-compute
进入/etc/nova/
目录,修改nova.conf
文件
[DEFAULT]
# ...
enabled_apis = osapi_compute,metadata
[DEFAULT]
# ...
# 密码是rabbitmq中openstack账号的密码
transport_url = rabbit://openstack:[email protected]
[api]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
auth_url = http://manager.node:5000/v3
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nv123456
[DEFAULT]
# ...
my_ip = 10.XX.XX.51
[DEFAULT]
# ...
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[neutron]
等装网络服务时再配
[vnc]
# ...
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://manager.node:6080/vnc_auto.html
[glance]
# ...
api_servers = http://manager.node:9292
[oslo_concurrency]
# ...
lock_path = /var/lib/nova/tmp
[placement]
# ...
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://manager.node:5000/v3
username = placement
password = pm123456
# 快速修改
openstack-config --set /etc/nova/nova.conf DEFAULT my_ip 10.180.249.51
openstack-config --set /etc/nova/nova.conf DEFAULT use_neutron true
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT enabled_apis osapi_compute,metadata
openstack-config --set /etc/nova/nova.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/nova/nova.conf api auth_strategy keystone
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_url http://manager.node:5000/v3
openstack-config --set /etc/nova/nova.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/nova/nova.conf keystone_authtoken auth_type password
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/nova/nova.conf keystone_authtoken project_name service
openstack-config --set /etc/nova/nova.conf keystone_authtoken username nova
openstack-config --set /etc/nova/nova.conf keystone_authtoken password nv123456
openstack-config --set /etc/nova/nova.conf vnc enabled true
openstack-config --set /etc/nova/nova.conf vnc server_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf vnc server_proxyclient_address '$my_ip'
openstack-config --set /etc/nova/nova.conf vnc novncproxy_base_url http://manager.node:6080/vnc_auto.html
openstack-config --set /etc/nova/nova.conf glance api_servers http://manager.node:9292
openstack-config --set /etc/nova/nova.conf oslo_concurrency lock_path /var/lib/nova/tmp
openstack-config --set /etc/nova/nova.conf placement region_name RegionOne
openstack-config --set /etc/nova/nova.conf placement project_domain_name Default
openstack-config --set /etc/nova/nova.conf placement project_name service
openstack-config --set /etc/nova/nova.conf placement auth_type password
openstack-config --set /etc/nova/nova.conf placement user_domain_name Default
openstack-config --set /etc/nova/nova.conf placement auth_url http://manager.node:5000/v3
openstack-config --set /etc/nova/nova.conf placement username placement
openstack-config --set /etc/nova/nova.conf placement password pm123456
[root@master ~]# egrep -v "^#|^$" /etc/nova/nova.conf
[root@master ~]# grep '^[a-z]' /etc/nova/nova.conf
[root@master nova]# egrep -c '(vmx|svm)' /proc/cpuinfo
0
0说明不支持硬件加速,需要额外的配置
1或更大则无需操作
此处为0,仍然打开nova.conf
文件进行编辑
[libvirt]
# ...
virt_type = qemu
openstack-config --set /etc/nova/nova.conf libvirt virt_type qemu
如果是通过Exsi创建的虚拟机,那么需要把虚拟机电源关闭,编辑设置中,把cpu选项的虚拟化功能开启。
之后再启动虚拟机,查看
[root@worker ~]# egrep -c '(vmx|svm)' /proc/cpuinfo
4
启动nova服务和依赖服务,并设置开机启动
[root@master nova]# systemctl start libvirtd
[root@master nova]# systemctl enable libvirtd
[root@master nova]# systemctl status libvirtd
● libvirtd.service - Virtualization daemon
Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)
Active: active (running) since Fri 2020-02-21 11:53:32 CST; 16s ago
Docs: man:libvirtd(8)
https://libvirt.org
Main PID: 21658 (libvirtd)
CGroup: /system.slice/libvirtd.service
└─21658 /usr/sbin/libvirtd
Feb 21 11:53:32 master.node systemd[1]: Starting Virtualization daemon...
Feb 21 11:53:32 master.node systemd[1]: Started Virtualization daemon.
Feb 21 11:53:32 master.node libvirtd[21658]: 2020-02-21 03:53:32.361+0000: 21674: info : libvirt version: 4.5.0, package: 23.el7_7.5 (CentOS BuildSystem <http://bugs.centos.org....centos.org)
Feb 21 11:53:32 master.node libvirtd[21658]: 2020-02-21 03:53:32.361+0000: 21674: info : hostname: master.node
Feb 21 11:53:32 master.node libvirtd[21658]: 2020-02-21 03:53:32.361+0000: 21674: error : virHostCPUGetTscInfo:1389 : Unable to open /dev/kvm: No such file or directory
Feb 21 11:53:32 master.node libvirtd[21658]: 2020-02-21 03:53:32.526+0000: 21674: error : virHostCPUGetTscInfo:1389 : Unable to open /dev/kvm: No such file or directory
Hint: Some lines were ellipsized, use -l to show in full.
[root@master nova]# systemctl start openstack-nova-compute
[root@master nova]# systemctl enable openstack-nova-compute
Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.
以下操作在manager节点执行
查看数据库中是否有新的计算节点信息
[root@manager nova]# source /opt/admin-openrc.sh
[root@manager nova]# openstack compute service list --service nova-compute
+----+--------------+-------------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+-------------+------+---------+-------+----------------------------+
| 8 | nova-compute | master.node | nova | enabled | up | 2020-02-21T06:57:23.000000 |
+----+--------------+-------------+------+---------+-------+----------------------------+
自带有一个指令可以发现计算节点
[root@manager nova]# su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
Found 2 cell mappings.
Skipping cell0 since it does not contain hosts.
Getting computes from cell 'cell1': cd7510d5-abf3-4865-b825-0b1e0550109a
Checking host mapping for compute host 'master.node': c2cb8df6-501b-4c06-95d1-b3f33566772a
Creating host mapping for compute host 'master.node': c2cb8df6-501b-4c06-95d1-b3f33566772a
Found 1 unmapped computes in cell: cd7510d5-abf3-4865-b825-0b1e0550109a
还可以配置多久执行自动发现服务
进入/etc/nova/
目录,修改nova.conf
文件
[scheduler]
discover_hosts_in_cells_interval = 300
同样的方法安装第2个计算节点
做简要的验证
[root@manager nova]# source /opt/admin-openrc.sh
[root@manager nova]# openstack compute service list --service nova-compute
+----+--------------+-------------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+--------------+-------------+------+---------+-------+----------------------------+
| 8 | nova-compute | master.node | nova | enabled | up | 2020-02-21T07:54:34.000000 |
| 9 | nova-compute | worker.node | nova | enabled | up | 2020-02-21T07:54:34.000000 |
+----+--------------+-------------+------+---------+-------+----------------------------+
[root@manager nova]# openstack catalog list
+-----------+-----------+-------------------------------------------+
| Name | Type | Endpoints |
+-----------+-----------+-------------------------------------------+
| glance | image | RegionOne |
| | | admin: http://manager.node:9292 |
| | | RegionOne |
| | | internal: http://manager.node:9292 |
| | | RegionOne |
| | | public: http://manager.node:9292 |
| | | |
| placement | placement | RegionOne |
| | | admin: http://manager.node:8778 |
| | | RegionOne |
| | | public: http://manager.node:8778 |
| | | RegionOne |
| | | internal: http://manager.node:8778 |
| | | |
| keystone | identity | RegionOne |
| | | internal: http://manager.node:5000/v3/ |
| | | RegionOne |
| | | admin: http://manager.node:5000/v3/ |
| | | RegionOne |
| | | public: http://manager.node:5000/v3/ |
| | | |
| nova | compute | RegionOne |
| | | admin: http://manager.node:8774/v2.1 |
| | | RegionOne |
| | | internal: http://manager.node:8774/v2.1 |
| | | RegionOne |
| | | public: http://manager.node:8774/v2.1 |
| | | |
+-----------+-----------+-------------------------------------------+
[root@manager nova]# openstack image list
+--------------------------------------+------------+--------+
| ID | Name | Status |
+--------------------------------------+------------+--------+
| fe079fac-e0c8-4cca-b6da-ac8e7031ca9c | cirros-abc | active |
+--------------------------------------+------------+--------+
[root@manager nova]# nova-status upgrade check
+--------------------------------+
| Upgrade Check Results |
+--------------------------------+
| Check: Cells v2 |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Resource Providers |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Ironic Flavor Migration |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: API Service Version |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Request Spec Migration |
| Result: Success |
| Details: None |
+--------------------------------+
| Check: Console Auths |
| Result: Success |
| Details: None |
+--------------------------------+
计算服务安装完成。
以下在manager节点(controller)操作
以下在manager节点(controller)操作
以下在manager节点(controller)操作
创建neutron数据库,并赋权
[root@manager ~]# mysql -u root -p
Enter password:
Welcome to the MariaDB monitor. Commands end with ; or \g.
Your MariaDB connection id is 343
Server version: 10.1.20-MariaDB MariaDB Server
Copyright (c) 2000, 2016, Oracle, MariaDB Corporation Ab and others.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
MariaDB [(none)]> CREATE DATABASE neutron;
Query OK, 1 row affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'localhost' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' IDENTIFIED BY '123456';
Query OK, 0 rows affected (0.00 sec)
MariaDB [(none)]> flush privileges;
Query OK, 0 rows affected (0.00 sec)
在keystone上创建neutron用户
[root@manager nova]# source /opt/admin-openrc.sh
[root@manager nova]# openstack user create --domain default --password-prompt neutron
User Password: #自己定义 nt123456
Repeat User Password:
+---------------------+----------------------------------+
| Field | Value |
+---------------------+----------------------------------+
| domain_id | default |
| enabled | True |
| id | 0f3fe722d8104787a18112c74c4b5e8f |
| name | neutron |
| options | {} |
| password_expires_at | None |
+---------------------+----------------------------------+
将neutron添加到service项目并授予admin角色
[root@manager nova]# openstack role add --project service --user neutron admin
创建neutron服务实体
[root@manager nova]# openstack service create --name neutron --description "OpenStack Networking" network
+-------------+----------------------------------+
| Field | Value |
+-------------+----------------------------------+
| description | OpenStack Networking |
| enabled | True |
| id | 469c6fc585a9426b8ce0c7ef4ed305ee |
| name | neutron |
| type | network |
+-------------+----------------------------------+
创建neutron网络服务的API端点(endpoint)
[root@manager nova]# openstack endpoint create --region RegionOne network public http://manager.node:9696
......
[root@manager nova]# openstack endpoint create --region RegionOne network internal http://manager.node:9696
......
[root@manager nova]# openstack endpoint create --region RegionOne network admin http://manager.node:9696
......
[root@manager nova]# openstack endpoint list
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| ID | Region | Service Name | Service Type | Enabled | Interface | URL |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
| 02c21f1f93354043b44c176e9865c17e | RegionOne | keystone | identity | True | internal | http://manager.node:5000/v3/ |
| 04188cf4df194a3bb5299aeb9dd6acd5 | RegionOne | placement | placement | True | admin | http://manager.node:8778 |
| 07309e14f0d143f4a3ae80244f40e0af | RegionOne | keystone | identity | True | admin | http://manager.node:5000/v3/ |
| 12c824b3722d4ca39df4d3548a07243e | RegionOne | nova | compute | True | admin | http://manager.node:8774/v2.1 |
| 1ccc6bb8251d4af3b2e02b536232f722 | RegionOne | placement | placement | True | public | http://manager.node:8778 |
| 23bbc2e29ff64cccb26d84454d2e61a0 | RegionOne | nova | compute | True | internal | http://manager.node:8774/v2.1 |
| 622f3a76da474394929b029a781ecd94 | RegionOne | placement | placement | True | internal | http://manager.node:8778 |
| 7bebccdace6148d680d121669319dd7f | RegionOne | keystone | identity | True | public | http://manager.node:5000/v3/ |
| 82eaa8dc56554019a40d650b08ada79a | RegionOne | glance | image | True | admin | http://manager.node:9292 |
| 92b73c3fb79e4640877786cd8448e60d | RegionOne | neutron | network | True | admin | http://manager.node:9696 |
| 9e7d5983ab844a619be09987352a5526 | RegionOne | neutron | network | True | public | http://manager.node:9696 |
| abee5035bb9040269a05f001b4a228c2 | RegionOne | nova | compute | True | public | http://manager.node:8774/v2.1 |
| cd7f6e7964754296a29f3f1f57cab9be | RegionOne | neutron | network | True | internal | http://manager.node:9696 |
| d21aeef9404f4ca08c2f62dff6cbbc1a | RegionOne | glance | image | True | internal | http://manager.node:9292 |
| e2b6fd164e1d4f4993f641db9029fc81 | RegionOne | glance | image | True | public | http://manager.node:9292 |
+----------------------------------+-----------+--------------+--------------+---------+-----------+-------------------------------+
官网说了两种方式,一种Provider networks
,一种Self-service networks
Provider networks
安装包及依赖
yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables
修改配置文件
主要是和网络有关的文件
/etc/neutron/neutron.conf
/etc/neutron/plugins/ml2/ml2_conf.ini
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
/etc/neutron/dhcp_agent.ini
进入/etc/neutron/
目录,修改neutron.conf
文件
[database]
# ...
connection = mysql+pymysql://neutron:[email protected]/neutron
[DEFAULT]
# ...Modular Layer 2
core_plugin = ml2
# 下面就是空的
service_plugins =
[DEFAULT]
# ...
transport_url = rabbit://openstack:[email protected]
[DEFAULT]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://manager.node:5000
auth_url = http://manager.node:5000
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = nt123456
[DEFAULT]
# ...
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[nova]
# ...
auth_url = http://manager.node:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nv123456
[oslo_concurrency]
# ...
lock_path =/var/lib/neutron/tmp
# 快速修改/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:[email protected]/neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password nt123456
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password nv123456
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
[root@manager opt]# egrep -v "^#|^$" /etc/neutron/neutron.conf
[root@manager opt]# grep '^[a-z]' /etc/neutron/neutron.conf
进入 /etc/neutron/plugins/ml2/
目录,修改ml2_conf.ini
文件
[ml2]
# ...
type_drivers = flat,vlan
# ...
tenant_network_types =
# ...
mechanism_drivers = linuxbridge
# ...
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = provider
[securitygroup]
# ...
enable_ipset = true
# 快速修改/etc/neutron/plugins/ml2/ml2_conf.ini
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true
[root@manager opt]# grep '^[a-z]' /etc/neutron/plugins/ml2/ml2_conf.ini
进入 /etc/neutron/plugins/ml2/
目录,修改linuxbridge_agent.ini
文件
[linux_bridge]
physical_interface_mappings = provider:ens192
[vxlan]
enable_vxlan = false
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[root@manager opt]# grep '^[a-z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
确保操作系统以下两个参数设置为1
net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-ip6tables
修改
/etc/sysctl.conf
文件,添加以下两行
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
再执行下面两行
[root@manager ml2]# modprobe br_netfilter
[root@manager ml2]# sysctl -p
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
重启后模块会失效,先放一放,继续安装
进入/etc/neutron/
目录,修改dhcp_agent.ini
文件
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
# 快速修改/etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
[root@manager opt]# grep '^[a-z]' /etc/neutron/dhcp_agent.ini
Self-service networks
安装包及依赖
yum install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables
修改配置文件
主要是和网络有关的文件
/etc/neutron/neutron.conf
/etc/neutron/plugins/ml2/ml2_conf.ini
/etc/neutron/plugins/ml2/linuxbridge_agent.ini
/etc/neutron/l3_agent.ini
/etc/neutron/dhcp_agent.ini
进入/etc/neutron/
目录,修改neutron.conf
文件
[database]
# ...
connection = mysql+pymysql://neutron:[email protected]/neutron
[DEFAULT]
# ...Modular Layer 2
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
[DEFAULT]
# ...
transport_url = rabbit://openstack:[email protected]
[DEFAULT]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://manager.node:5000
auth_url = http://manager.node:5000
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = nt123456
[DEFAULT]
# ...
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[nova]
# ...
auth_url = http://manager.node:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nv123456
[oslo_concurrency]
# ...
lock_path =/var/lib/neutron/tmp
# 快速修改
openstack-config --set /etc/neutron/neutron.conf database connection mysql+pymysql://neutron:[email protected]/neutron
openstack-config --set /etc/neutron/neutron.conf DEFAULT core_plugin ml2
openstack-config --set /etc/neutron/neutron.conf DEFAULT service_plugins router
openstack-config --set /etc/neutron/neutron.conf DEFAULT allow_overlapping_ips true
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password nt123456
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_status_changes true
openstack-config --set /etc/neutron/neutron.conf DEFAULT notify_nova_on_port_data_changes true
openstack-config --set /etc/neutron/neutron.conf nova auth_url http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf nova auth_type password
openstack-config --set /etc/neutron/neutron.conf nova project_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova user_domain_name default
openstack-config --set /etc/neutron/neutron.conf nova region_name RegionOne
openstack-config --set /etc/neutron/neutron.conf nova project_name service
openstack-config --set /etc/neutron/neutron.conf nova username nova
openstack-config --set /etc/neutron/neutron.conf nova password nv123456
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
[root@manager opt]# grep '^[a-z]' /etc/neutron/neutron.conf
进入 /etc/neutron/plugins/ml2/
目录,修改ml2_conf.ini
文件
[ml2]
# ...
type_drivers = flat,vlan,vxlan
# ...
tenant_network_types = vxlan
# ...
mechanism_drivers = linuxbridge,l2population
# ...
extension_drivers = port_security
[ml2_type_flat]
# ...
flat_networks = provider
[ml2_type_vxlan]
# ...
vni_ranges = 1:1000
[securitygroup]
# ...
enable_ipset = true
# 快速修改
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 type_drivers flat,vlan,vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 tenant_network_types vxlan
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 mechanism_drivers linuxbridge,l2population
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2 extension_drivers port_security
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_flat flat_networks provider
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini ml2_type_vxlan vni_ranges 1:1000
openstack-config --set /etc/neutron/plugins/ml2/ml2_conf.ini securitygroup enable_ipset true
[root@manager opt]# grep '^[a-z]' /etc/neutron/plugins/ml2/ml2_conf.ini
进入 /etc/neutron/plugins/ml2/
目录,修改linuxbridge_agent.ini
文件
[linux_bridge]
physical_interface_mappings = provider:ens192
[vxlan]
enable_vxlan = true
local_ip = 10.XX.XX.50
l2_population = true
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 快速修改
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.180.249.50
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[root@manager opt]# grep '^[a-z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
确保操作系统以下两个参数设置为1
net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-ip6tables
方法同上
进入/etc/neutron/
目录,修改l3_agent.ini
文件
[DEFAULT]
# ...
interface_driver = linuxbridge
# 快速修改
openstack-config --set /etc/neutron/l3_agent.ini DEFAULT interface_driver linuxbridge
[root@manager opt]# grep '^[a-z]' /etc/neutron/l3_agent.ini
进入/etc/neutron/
目录,修改dhcp_agent.ini
文件
[DEFAULT]
# ...
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
# 快速修改/etc/neutron/dhcp_agent.ini
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT interface_driver linuxbridge
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT dhcp_driver neutron.agent.linux.dhcp.Dnsmasq
openstack-config --set /etc/neutron/dhcp_agent.ini DEFAULT enable_isolated_metadata true
[root@manager opt]# grep '^[a-z]' /etc/neutron/dhcp_agent.ini
主要是和服务有关的文件,两种模式选哪个都要修改
进入/etc/neutron/
目录,修改metadata_agent.ini
文件
[DEFAULT]
# ...
nova_metadata_host = manager.node
# 密码可以自己设置
metadata_proxy_shared_secret = 123456
# 快速配置/etc/neutron/metadata_agent.ini
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT nova_metadata_host manager.node
openstack-config --set /etc/neutron/metadata_agent.ini DEFAULT metadata_proxy_shared_secret 123456
[root@manager opt]# grep '^[a-z]' /etc/neutron/metadata_agent.ini
进入 /etc/nova/
目录,修改nova.conf
文件
这是在安装nova服务时遗留的一个配置
[neutron]
# ...
url = http://manager.node:9696
auth_url = http://manager.node:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = nt123456
service_metadata_proxy = true
metadata_proxy_shared_secret = 123456
# 快速补充配置/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf neutron url http://manager.node:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://manager.node:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password nt123456
openstack-config --set /etc/nova/nova.conf neutron service_metadata_proxy true
openstack-config --set /etc/nova/nova.conf neutron metadata_proxy_shared_secret 123456
[root@manager opt]# egrep -v "^#|^$" /etc/nova/nova.conf
[root@manager neutron]# ll
total 132
drwxr-xr-x 11 root root 260 Feb 21 17:26 conf.d
-rw-r----- 1 root neutron 10758 Feb 21 18:26 dhcp_agent.ini
-rw-r----- 1 root neutron 14434 Dec 20 13:56 l3_agent.ini
-rw-r----- 1 root neutron 11322 Feb 21 18:43 metadata_agent.ini
-rw-r----- 1 root neutron 71811 Feb 21 17:40 neutron.conf
drwxr-xr-x 3 root root 17 Feb 21 17:25 plugins
-rw-r----- 1 root neutron 12517 Dec 19 22:43 policy.json
-rw-r--r-- 1 root root 1195 Dec 19 22:43 rootwrap.conf
[root@manager neutron]# ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
[root@manager neutron]# ll /etc/neutron/
total 132
drwxr-xr-x 11 root root 260 Feb 21 17:26 conf.d
-rw-r----- 1 root neutron 10758 Feb 21 18:26 dhcp_agent.ini
-rw-r----- 1 root neutron 14434 Dec 20 13:56 l3_agent.ini
-rw-r----- 1 root neutron 11322 Feb 21 18:43 metadata_agent.ini
-rw-r----- 1 root neutron 71811 Feb 21 17:40 neutron.conf
lrwxrwxrwx 1 root root 37 Feb 21 18:56 plugin.ini -> /etc/neutron/plugins/ml2/ml2_conf.ini
drwxr-xr-x 3 root root 17 Feb 21 17:25 plugins
-rw-r----- 1 root neutron 12517 Dec 19 22:43 policy.json
-rw-r--r-- 1 root root 1195 Dec 19 22:43 rootwrap.conf
[root@manager neutron]# su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
Running upgrade for neutron ...
INFO [alembic.runtime.migration] Context impl MySQLImpl.
INFO [alembic.runtime.migration] Will assume non-transactional DDL.
INFO [alembic.runtime.migration] Running upgrade -> kilo
INFO [alembic.runtime.migration] Running upgrade kilo -> 354db87e3225
INFO [alembic.runtime.migration] Running upgrade 354db87e3225 -> 599c6a226151
INFO [alembic.runtime.migration] Running upgrade 599c6a226151 -> 52c5312f6baf
......
INFO [alembic.runtime.migration] Running upgrade 97c25b0d2353 -> 2e0d7a8a1586
INFO [alembic.runtime.migration] Running upgrade 2e0d7a8a1586 -> 5c85685d616d
OK
先重启nova_api服务
[root@manager neutron]# systemctl restart openstack-nova-api
再启动和设置开机启动网络相关服务
[root@manager neutron]# systemctl start neutron-server
[root@manager neutron]# systemctl enable neutron-server
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-server.service to /usr/lib/systemd/system/neutron-server.service.
[root@manager neutron]# systemctl start neutron-linuxbridge-agent
[root@manager neutron]# systemctl enable neutron-linuxbridge-agent
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
[root@manager neutron]# systemctl start neutron-dhcp-agent
[root@manager neutron]# systemctl enable neutron-dhcp-agent
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-dhcp-agent.service to /usr/lib/systemd/system/neutron-dhcp-agent.service.
[root@manager neutron]# systemctl start neutron-metadata-agent
[root@manager neutron]# systemctl enable neutron-metadata-agent
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-metadata-agent.service to /usr/lib/systemd/system/neutron-metadata-agent.service.
如果选择Self-service networks
,还需要启动如下服务
[root@manager ml2]# systemctl start neutron-l3-agent
[root@manager ml2]# systemctl enable neutron-l3-agent
控制端的neutron网络服务安装完成。
以下在计算节点(compute)操作
以下在计算节点(compute)操作
以master.node节点为例
yum install openstack-neutron-linuxbridge ebtables ipset
这里修改和服务有关的文件
进入/etc/neutron/
目录,修改neutron.conf
文件
[DEFAULT]
# ...
transport_url = rabbit://openstack:[email protected]
[DEFAULT]
# ...
auth_strategy = keystone
[keystone_authtoken]
# ...
www_authenticate_uri = http://manager.node:5000
auth_url = http://manager.node:5000
memcached_servers = manager.node:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = nt123456
[oslo_concurrency]
# ...
lock_path = /var/lib/neutron/tmp
# 快速配置/etc/neutron/neutron.conf
openstack-config --set /etc/neutron/neutron.conf DEFAULT transport_url rabbit://openstack:[email protected]
openstack-config --set /etc/neutron/neutron.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken www_authenticate_uri http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_url http://manager.node:5000
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken memcached_servers manager.node:11211
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken auth_type password
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken user_domain_name default
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken project_name service
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken username neutron
openstack-config --set /etc/neutron/neutron.conf keystone_authtoken password nt123456
openstack-config --set /etc/neutron/neutron.conf oslo_concurrency lock_path /var/lib/neutron/tmp
[root@master ~]# egrep -v "^#|^$" /etc/neutron/neutron.conf
[root@master ~]# grep '^[a-z]' /etc/neutron/neutron.conf
进入/etc/nova/
目录,修改nova.conf
文件
修改安装nova时遗留的问题
[neutron]
# ...
url = http://manager.node:9696
auth_url = http://manager.node:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = nt123456
# 快速配置/etc/nova/nova.conf
openstack-config --set /etc/nova/nova.conf neutron url http://manager.node:9696
openstack-config --set /etc/nova/nova.conf neutron auth_url http://manager.node:5000
openstack-config --set /etc/nova/nova.conf neutron auth_type password
openstack-config --set /etc/nova/nova.conf neutron project_domain_name default
openstack-config --set /etc/nova/nova.conf neutron user_domain_name default
openstack-config --set /etc/nova/nova.conf neutron region_name RegionOne
openstack-config --set /etc/nova/nova.conf neutron project_name service
openstack-config --set /etc/nova/nova.conf neutron username neutron
openstack-config --set /etc/nova/nova.conf neutron password nt123456
[root@master ~]# egrep -v "^#|^$" /etc/nova/nova.conf
这里修改和网络有关的文件
进入/etc/neutron/plugins/ml2/
目录,修改linuxbridge_agent.ini
文件
[linux_bridge]
physical_interface_mappings = provider:ens192
如果是Provider networks
[vxlan]
enable_vxlan = false
如果是Self-service networks
[vxlan]
enable_vxlan = true
local_ip = 10.180.249.51
l2_population = true
[securitygroup]
# ...
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
# 快速修改/etc/neutron/plugins/ml2/linuxbridge_agent.ini
#(provider)
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan false
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
#(Self-service networks)
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini linux_bridge physical_interface_mappings provider:ens192
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan enable_vxlan true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan local_ip 10.180.249.51
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini vxlan l2_population true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup enable_security_group true
openstack-config --set /etc/neutron/plugins/ml2/linuxbridge_agent.ini securitygroup firewall_driver neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
[root@master ~]# grep '^[a-z]' /etc/neutron/plugins/ml2/linuxbridge_agent.ini
以下两个参数的设置参考管理节点
net.bridge.bridge-nf-call-iptables
net.bridge.bridge-nf-call-ip6tables
重启计算服务
[root@master ml2]# systemctl restart openstack-nova-compute
启动网络服务与设置自启动
[root@master ml2]# systemctl start neutron-linuxbridge-agent
[root@master ml2]# systemctl enable neutron-linuxbridge-agent
Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.
计算节点的网络配置完成,其它参考不再说明。
以下测试在管理节点(controller)操作
[root@manager neutron]# source /opt/admin-openrc.sh
# openstack extension list --network
[root@manager neutron]# neutron ext-list
neutron CLI is deprecated and will be removed in the future. Use openstack CLI instead.
+--------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
| alias | name |
+--------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
| default-subnetpools | Default Subnetpools |
| network-ip-availability | Network IP Availability |
| network_availability_zone | Network Availability Zone |
| net-mtu-writable | Network MTU (writable) |
| binding | Port Binding |
| agent | agent |
| subnet_allocation | Subnet Allocation |
| dhcp_agent_scheduler | DHCP Agent Scheduler |
| external-net | Neutron external network |
| flavors | Neutron Service Flavors |
| net-mtu | Network MTU |
| availability_zone | Availability Zone |
| quotas | Quota management support |
| standard-attr-tag | Tag support for resources with standard attribute: subnet, trunk, router, network, policy, subnetpool, port, security_group, floatingip |
| availability_zone_filter | Availability Zone Filter Extension |
| revision-if-match | If-Match constraints based on revision_number |
| filter-validation | Filter parameters validation |
| multi-provider | Multi Provider Network |
| quota_details | Quota details management support |
| address-scope | Address scope |
| empty-string-filtering | Empty String Filtering Extension |
| subnet-service-types | Subnet service types |
| port-mac-address-regenerate | Neutron Port MAC address regenerate |
| standard-attr-timestamp | Resource timestamps |
| provider | Provider Network |
| service-type | Neutron Service Type Management |
| extra_dhcp_opt | Neutron Extra DHCP options |
| port-security-groups-filtering | Port filtering on security groups |
| standard-attr-revisions | Resource revision numbers |
| pagination | Pagination support |
| sorting | Sorting support |
| security-group | security-group |
| rbac-policies | RBAC Policies |
| standard-attr-description | standard-attr-description |
| ip-substring-filtering | IP address substring filtering |
| port-security | Port Security |
| allowed-address-pairs | Allowed Address Pairs |
| project-id | project_id field enabled |
| binding-extended | Port Bindings Extended |
+--------------------------------+-----------------------------------------------------------------------------------------------------------------------------------------+
[root@manager neutron]# openstack network agent list
+--------------------------------------+--------------------+--------------+-------------------+-------+-------+---------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+--------------------------------------+--------------------+--------------+-------------------+-------+-------+---------------------------+
| 07cb5b99-2037-460e-be7d-376f96510cc2 | Linux bridge agent | master.node | None | :-) | UP | neutron-linuxbridge-agent |
| b05967a9-9f91-4746-ad16-c26462e2972a | Linux bridge agent | manager.node | None | :-) | UP | neutron-linuxbridge-agent |
| dbe25838-4412-4c1d-88a0-9a5967916dbc | Linux bridge agent | worker.node | None | :-) | UP | neutron-linuxbridge-agent |
| f1221e01-f702-433a-bdff-488dca48b82b | Metadata agent | manager.node | None | :-) | UP | neutron-metadata-agent |
| f2bf7956-0df1-4699-817b-d0cc5340beb7 | DHCP agent | manager.node | nova | :-) | UP | neutron-dhcp-agent |
+--------------------------------------+--------------------+--------------+-------------------+-------+-------+---------------------------+
正常情况下,控制节点有3个服务,计算节点有1个服务。
在manager节点(controller)操作。
yum install openstack-dashboard
进入 /etc/openstack-dashboard/
目录,修改local_settings
文件
# vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "manager.node"
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
#通过仪表盘创建的用户默认角色配置为 user
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
ALLOWED_HOSTS = ['*', 'localhost']
SESSION_ENGINE = 'django.contrib.sessions.backends.cache' #需要添加
#配置memcached存储服务
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'manager.node:11211',
},
}
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True
#配置API版本
OPENSTACK_API_VERSIONS = {
"identity": 3,
"image": 2,
"volume": 2,
}
#通过仪表盘创建用户时的默认域配置为 default
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
#如果选择网络参数1,禁用支持3层网络服务:
OPENSTACK_NEUTRON_NETWORK = {
...
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_': False,
'enable_fip_topology_check': False,
}
#可以选择性地配置时区,不能用CST否则无法启动httpd服务
TIME_ZONE = "Asia/Shanghai"
进入/etc/httpd/conf.d/
目录,修改openstack-dashboard.conf
文件
如果没有下面一行代码,则加入
WSGIApplicationGroup %{GLOBAL}
WSGIDaemonProcess dashboard
WSGIProcessGroup dashboard
WSGISocketPrefix run/wsgi
WSGIApplicationGroup %{GLOBAL} #增加
WSGIScriptAlias /dashboard /usr/share/openstack-dashboard/openstack_dashboard/wsgi/django.wsgi
Alias /dashboard/static /usr/share/openstack-dashboard/static
<Directory /usr/share/openstack-dashboard/openstack_dashboard/wsgi>
Options All
AllowOverride All
Require all granted
</Directory>
<Directory /usr/share/openstack-dashboard/static>
Options All
AllowOverride All
Require all granted
</Directory>
重启web服务和会话存储服务
[root@manager conf.d]# systemctl restart httpd
[root@manager conf.d]# systemctl restart memcached
浏览器上输入http://managerIP:80/dashboard
出现登陆界面。
域:default
用户名:admin
密码:ks123456
密码是keystone-manage bootstrap指令里指定的,openrc文件里也有
登陆成功。
至此,openstack的安装完成,块存储cinder后续更新。
https://docs.openstack.org/install-guide/openstack-services.html
https://blog.csdn.net/qq_38773184/article/details/82391073
https://blog.csdn.net/xinfeiyang060502118/article/details/102514114