controller 8G, 8CPU , 双网卡, 100G
compute 4-8G, 4-8CPU, 双网卡 ,40G
curl -o /etc/yum. repos. d/CentOS-Base. repo http://mirrors. aliyun. com/repo/Centos-7. repo
#设置系统时区为上海
timedatectl set-timezone Asia/Shanghai
#强制将时区写入bios,修改完成后操作
clock -w
#修改本主机名称
hostnamectl set-hostname <-hostname>
vim /etc/hosts
192. 168. 13. 80 controller
192. 168. 13. 81 compute
#测试一下
ping <hostname>
#启用opentack库
yum install centos-release-openstack-train. noarch -y
#下载和安装RDO仓库RPM来启用OpenStack仓库
yum install https://rdoproject. org/repos/rdo-release. rpm -y
#升级系统yum包
yum update -y
#下载openstack安全策略
yum install openstack-selinux -y
yum install openstack-nova-compute -y
#epel-release
#安装完epel-release之后可能会包依赖错误,直接mv /etc/yum. repos. d/epel* ~/
#下载安装opensatck客户端
yum install -y python-openstackclient
systemctl stop NetworkManager
systemctl disable NetworkManager
systemctl enable network
systemctl start network
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/^SELINUX=. */SELINUX=disabled/g' /etc/selinux/config
setenforce 0
/etc/hosts
文件中 hostnamectl set-hostname <controller-name>
echo " " | tee -a /etc/hosts
其中,
是控制节点的IP地址,
是控制节点的名称。
yum install -y chrony
systemctl enable chronyd. service
systemctl start chronyd. service
# 在控制节点上配置
sed -i 's/^#allow. */allow 192. 168. 13. 0/g' /etc/chrony. conf
systemctl restart chronyd. service
systemctl enable chrony. service
编辑``/etc/chrony. conf`` 文件并注释除``server`` 键之外的所有内容。修改它引用控制节点:
server controller iburst
#推荐方法同步时间
#设置系统时区为上海
timedatectl set-timezone Asia/Shanghai
#强制将时区写入bios,修改完成后操作
clock -w
其中,
是您控制节点所在的子网。
RHEL 和 CentOS 默认启用了 [SELinux]. 安装 openstack-selinux 软件包以便自动管理 OpenStack 服务的安全策略:
yum install openstack-selinux
yum install mariadb mariadb-server python2-PyMySQL
创建并编辑 /etc/my. cnf. d/mariadb-server. cnf,然后完成如下动作:
bind-address
值为控制节点的管理网络IP地址以使得其它节点可以通过管理网络访问数据库和启用一起有用的选项和 UTF-8 字符集:[mysqld]
bind-address = 10. 0. 0. 11
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
#推荐配置
#配置文件中bind-address = 10. 0. 0. 11可以改为bind-address = 0. 0. 0. 0(监听所有地址)
systemctl enable mariadb. service
systemctl start mariadb. service
为了保证数据库服务的安全性,运行mysql_secure_installation
脚本。特别需要说明的是,为数据库的root用户设置一个适当的密码。
mysql_secure_installation
根据提示输入密码并回答问题。
mysql -u root -p
输入密码后进入MariaDB控制台。
CREATE DATABASE keystone;
GRANT ALL PRIVILEGES ON keystone. * TO 'keystone'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON keystone. * TO 'keystone'@'%' IDENTIFIED BY 'root';
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance. * TO 'glance'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON glance. * TO 'glance'@'%' IDENTIFIED BY 'root';
CREATE DATABASE placement;
GRANT ALL PRIVILEGES ON placement. * TO 'placement'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON placement. * TO 'placement'@'%' IDENTIFIED BY 'root';
CREATE DATABASE nova_api;
CREATE DATABASE nova;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova_api. * TO 'nova'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON nova_api. * TO 'nova'@'%' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON nova. * TO 'nova'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON nova. * TO 'nova'@'%' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON nova_cell0. * TO 'nova'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON nova_cell0. * TO 'nova'@'%' IDENTIFIED BY 'root';
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron. * TO 'neutron'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON neutron. * TO 'neutron'@'%' IDENTIFIED BY 'root';
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder. * TO 'cinder'@'localhost' IDENTIFIED BY 'root';
GRANT ALL PRIVILEGES ON cinder. * TO 'cinder'@'%' IDENTIFIED BY 'root';
FLUSH PRIVILEGES;
EXIT;
其中,root
是您为各个服务设置的密码。
运行完脚本之后可以验证一下所有的账户是否能正常使用
mysql -u keystone -p'keystone_pass' -h${IP}
Telemetry 服务使用 NoSQL 数据库来存储信息,典型地,这个数据库运行在控制节点上。向导中使用MongoDB。
安装Telemetry服务时,才需要安装NoSQL数据库服务。
yum install mongodb-server mongodb
配置 bind_ip 使用控制节点管理网卡的IP地址。
bind_ip = 10. 0. 0. 11
默认情况下,MongoDB会在/var/lib/mongodb/journal
目录下创建几个 1 GB 大小的日志文件。如果你想将每个日志文件大小减小到128MB并且限制日志文件占用的总空间为512MB,配置 smallfiles 的值:
smallfiles = true
systemctl enable mongod. service
systemctl start mongod. service
yum install -y rabbitmq-server
systemctl enable rabbitmq-server. service
systemctl start rabbitmq-server. service
rabbitmq-plugins list
rabbitmq-plugins enable rabbitmq_management
打开后出现15672端口,为监听端口,可以通过网页登陆查看 账户密码都是guest
#使用如下命令检查端口正常
netstat -an | grep 15672
#在浏览器中可以访问到如下:
http://controller:15672
curl http://IP:15672
rabbitmqctl add_user openstack root
rabbitmqctl set_permissions openstack ". *" ". *" ". *"
其中,root
是您为OpenStack设置的密码。
4. 安装完之后可以在重启一下rabbit服务
systemctl restart rabbitmq-server. service
yum install -y memcached python-memcached
systemctl enable memcached. service
systemctl start memcached. service
PORT="11211"
USER="memcached"
MAXCONN="1024"
CACHESIZE="64"
OPTIONS="-l 192. 168. 13. 29,127. 0. 0. 1,::1"
systemctl restart memcached. service
yum install -y openstack-keystone httpd mod_wsgi
openssl rand -hex 10
d086569ffc9bd3c58ad6
3. 编辑文件 vim /etc/keystone/keystone. conf 并完成如下动作:
在[DEFAULT]
部分,定义初始管理令牌的值:
[DEFAULT]
. . .
admin_token = ADMIN_TOKEN
使用前面步骤生成的随机数替换``ADMIN_TOKEN`` 值。
在[token]
部分,使其使用Fernet令牌格式:
[token]
provider = fernet
在[database]
部分,使其连接到MariaDB数据库:
[database]
connection = mysql+pymysql://keystone:root@/keystone
#在这里推荐手动输入一下,避免因格式报错
其中,root
是您为Keystone设置的密码,
是控制节点的IP地址。
4. 初始化数据库:
su -s /bin/sh -c "keystone-manage db_sync" keystone
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
vim /etc/httpd/conf/httpd. conf
文件,配置ServerName
选项为控制节点:ServerName controller
ln -s /usr/share/keystone/wsgi-keystone. conf /etc/httpd/conf. d/
Listen 5000
Listen 35357
<VirtualHost *:5000>
WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-public
WSGIScriptAlias / /usr/bin/keystone-wsgi-public
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error. log
CustomLog /var/log/httpd/keystone-access. log combined
Require all granted
Directory>
VirtualHost>
<VirtualHost *:35357>
WSGIDaemonProcess keystone-admin processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP}
WSGIProcessGroup keystone-admin
WSGIScriptAlias / /usr/bin/keystone-wsgi-admin
WSGIApplicationGroup %{GLOBAL}
WSGIPassAuthorization On
ErrorLogFormat "%{cu}t %M"
ErrorLog /var/log/httpd/keystone-error. log
CustomLog /var/log/httpd/keystone-access. log combined
Require all granted
Directory>
VirtualHost>
systemctl enable httpd. service
systemctl start httpd. service
rm -f /var/lib/keystone/keystone. db
export OS_USERNAME=admin
export OS_PASSWORD=root
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://<controller-ip>:5000/v3
export OS_IDENTITY_API_VERSION=3
#使用下面的快捷方便
cat << EOF > ~/admin-setos. sh
export OS_USERNAME=admin
export OS_PASSWORD=root
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://192. 168. 13. 80:5000/v3
export OS_IDENTITY_API_VERSION=3
EOF
cat << EOF > ~/admin-setos. sh
export OS_IDENTITY_API_VERSION=3
export OS_AUTH_URL=http://controller:5000/v3
export OS_USERNAME=admin
export OS_PASSWORD=admin_passwd
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
EOF
创建服务实体和API端点
keystone-manage bootstrap --bootstrap-password root --bootstrap-admin-url http://controller:5000/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
这个命令是在 OpenStack 中创建初始的身份验证服务(Keystone)管理员,并初始化 Keystone 数据库。具体来说:
- "keystone-manage" 是 Keystone 的管理工具。
- "bootstrap" 子命令用于初始化 Keystone 系统和添加初始数据。
- "--bootstrap-password" 选项指定管理员用户的密码,这里将其设置为 "admin_passwd"。
- "--bootstrap-admin-url", "--bootstrap-internal-url" 和 "--bootstrap-public-url" 选项分别指定 Keystone API 的管理. 内部和公共 URL。在这个例子中,它们都设置为 "http://controller:5000/v3/",表示 Keystone API 在控制节点上运行,端口号为 5000,API 版本为 v3。
- "--bootstrap-region-id" 选项指定了 Keystone 服务所属的地理区域 ID,这里设置为 "RegionOne"。
因此,当您执行此命令时,系统会创建一个名为 "admin" 的管理员用户并使用指定的密码进行身份验证。同时,Keystone 数据库会被初始化,并创建必要的服务. 终端和终端节点等基础数据。这使得 OpenStack 其他服务能够连接到 Keystone 并开始使用身份验证服务。
创建域. 项目. 用户和角色
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
#创建用户密码
第一种是直接在命令创建好密码
openstack user create --domain default --password root admin
openstack user create --domain default --password-prompt demo
#这个命令是在 OpenStack 中创建一个名为 demo 的用户,并将其归属于默认域(domain),同时提示您输入此用户的密码。具体来说:
- "--domain default" 选项指定用户所属的域,默认使用 "default" 域。
- "--password-prompt" 选项提示您输入该用户的密码,而不是在命令中明文指定密码。
因此,当您执行此命令时,系统会提示您输入 demo 用户的密码,并将该用户创建为默认域下的一个新用户。
#创建角色
openstack role create admin
openstack role create user
#将用户加入角色
openstack role add --project service --user admin admin
openstack role add --project demo --user demo user
#自己将下面的命令里面的参数替换一下就可以了
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name skywide --os-username skywide token issue
下面这个也是创建域项目用户角色(推荐练习使用)
openstack domain create --description "An Example Domain" domain_example
openstack project create --domain default --description "Service Project" service
openstack domain list
openstack project list
openstack project create --domain default --description "Demo Project" shanke_project
openstack user create --domain default --password shanke_passwd shanke
openstack role create shanke_role
openstack role add --project shanke_project --user shanke shanke_role
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name shanke_project --os-username shanke token issue
其中,root
是您为管理员用户设置的密码,
是控制节点的IP地址。
cat << EOF > ~/admin-openrc. sh
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=root
export OS_AUTH_URL=http://:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
EOF
将root
和
替换为您的管理员用户密码和控制节点IP地址。
source
命令去运行要不然报错:Missing value auth-url required for auth plugin password
```bash
source ~/admin-openrc. sh
openstack token issue
如果输出类似于以下内容,则表示Keystone安装成功:
+------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| Field | Value |
+------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+
| expires | 2023-05-30T04:24:44+0000 |
| id | gAAAAABc8iRdH6D4mXvp45jYfWiaLacQsJpOge-yce8t1o7JtwCQ2KSPvS3n5kpG4AHasZa3PLy9v-pN8P-aUkAH7BapUbmldV6hMhqJzNmJv
openstack user list
#先获取管理员权限
. admin. sh
openstack user create --domain default --password placement_passwd placement
openstack role add --project service --user placement admin
openstack service create --name placement --description "Placement API" placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
yum install openstack-placement-api -y
vim /etc/placement/placement. conf
[placement_database]
connection = mysql+pymysql://placement:placement_passwd@controller/placement
[api]
auth_strategy = keystone
[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = root
su -s /bin/sh -c "placement-manage db sync" placement
mysql
use placement;
show tables;
systemctl restart httpd
placement-status upgrade check
vim /etc/httpd/conf. d/00-placement-api. conf
= 2. 4>
Require all granted
Order allow,deny
Allow from all
获得 admin 凭证来获取只有管理员能执行的命令的访问权限
创建服务证书:
#创建glance用户
openstack user create --domain default --password glance_passwd glance
#添加 admin 角色到 glance 用户和 service 项目上
openstack role add --project service --user glance admin
#初始化时,已自动生成admin . inter. pubilic三个url:
openstack endpoint list
创建服务实体:
openstack service create --name glance --description "OpenStack Image" image
创建镜像服务的 API 端点:
openstack endpoint create --region RegionOne \
image public http://controller:9292
openstack endpoint create --region RegionOne \
image internal http://controller:9292
openstack endpoint create --region RegionOne \
image admin http://controller:9292
yum install -y openstack-glance
vim /etc/glance/glance-api. conf
文件中的[database]
部分,使其连接到MariaDB数据库:[database]
connection = mysql+pymysql://glance:root@<controller-ip>/glance
其中,root
是您为Glance设置的密码,
是控制节点的IP地址。
[keystone_authtoken]
部分,使其能够与Keystone进行身份验证:
[keystone_authtoken]
auth_uri = http://<controller-ip>:5000/v3
auth_url = http://<controller-ip>:35357/v3
memcached_servers = <controller-ip>:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = root
其中,root
是您为Glance设置的密码,
是控制节点的IP地址。
[paste_deploy]
部分,使其连接到Glance API Paste文件:
[paste_deploy]
flavor = keystone
在 [glance_store]
部分,配置本地文件系统存储和镜像文件位置:
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
vim /etc/glance/glance-registry. conf
文件中的[database]
部分,使其连接到MariaDB数据库:(这个5和6步骤一般不需要配置直接跳过)[database]
connection = mysql+pymysql://glance:root@/glance
其中,root
是您为Glance设置的密码,
是控制节点的IP地址。
vim /etc/glance/glance-registry. conf
文件中的[keystone_authtoken]
部分,使其能够与Keystone进行身份验证:[keystone_authtoken]
auth_uri = http://:5000/v3
auth_url = http://:35357/v3
memcached_servers = :11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = root
其中,root
是您为Glance设置的密码,
是控制节点的IP地址。
su -s /bin/sh -c "glance-manage db_sync" glance
systemctl enable --now openstack-glance-api. service
systemctl enable openstack-glance-api. service openstack-glance-registry. service
systemctl start openstack-glance-api. service openstack-glance-registry. service
openstack image create "cirros" --file cirros. img --disk-format qcow2 --container-format bare --public #使用这个命令的时候把镜像文件放在当前目录下
openstack image list
如果输出类似于以下内容,则表示Glance安装成功:
+----+--------+
| ID | Name |
+----+--------+
| | cirros |
+----+--------+
. admin. sh
openstack user create --domain default --password root nova
openstack role add --project service --user nova admin
openstack service create --name nova --description "OpenStack Compute" compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2. 1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2. 1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2. 1
openstack endpoint list
yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack_rabbit_pass@controller:5672/
use_neutron = true
firewall_driver = nova. virt. firewall. NoopFirewallDriver
[api_database]
connection = mysql+pymysql://nova:nova_passwd@controller/nova_api
[database]
. . .
connection = mysql+pymysql://nova:nova_passwd@controller/nova
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova_passwd
[vnc]
enabled = true
server_listen = 192. 168. 122. 10
server_proxyclient_address = 192. 168. 122. 10
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement_passwd
同步数据库
su -s /bin/sh -c "nova-manage api_db sync" nova
注册cell0
数据库
su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova
创建cell1单元格:
su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova
#输出ID: bf324d6f-7df0-4fff-92fb-50d55c720c59
同步 cell0库
su -s /bin/sh -c "nova-manage db sync" nova
# 输出如下:
/usr/lib/python2. 7/site-packages/pymysql/cursors. py:170: Warning: (1831, u'Duplicate index `block_device_mapping_instance_uuid_virtual_name_device_name_idx`. This is deprecated and will be disallowed in a future release') ?result = self. _query(query) /usr/lib/python2. 7/site-packages/pymysql/cursors. py:170: Warning: (1831, u'Duplicate index `uniq_instances0uuid`. This is deprecated and will be disallowed in a future release') ?result = self. _query(query)
#只要没有error就行
验证nova cell0 和 cell1 是否已正确注册:
nova-manage cell_v2 list_cells
开启服务
systemctl enable openstack-nova-api. service openstack-nova-scheduler. service openstack-nova-conductor. service openstack-nova-novncproxy. service
systemctl start openstack-nova-api. service openstack-nova-scheduler. service openstack-nova-conductor. service openstack-nova-novncproxy. service
检查服务状态
systemctl status openstack-nova-api. service openstack-nova-scheduler. service openstack-nova-conductor. service openstack-nova-novncproxy. service
查看openstack相关服务是否运行
systemctl -a | grep openstack
openstack compute service list
/etc/nova/nova. conf
文件中的[database]
部分,使其连接到MariaDB数据库:[database]
connection = mysql+pymysql://nova:root@/nova
其中,root
是您为Nova设置的密码,
是控制节点的IP地址。
/etc/nova/nova. conf
文件中的[api_database]
部分,使其连接到MariaDB数据库:[api_database]
connection = mysql+pymysql://nova:root@/nova_api
其中,root
是您为Nova设置的密码,
是控制节点的IP地址。
/etc/nova/nova. conf
文件中的[DEFAULT]
部分,使其支持接受API请求:[DEFAULT]
enabled_apis = osapi-compute,metadata
/etc/nova/nova. conf
文件中的[keystone_authtoken]
部分,使其能够与Keystone进行身份验证:[keystone_authtoken]
auth_uri = http://:5000/v3
auth_url = http://:35357/v3
memcached_servers = :11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = root
其中,root
是您为Nova设置的密码,
是控制节点的IP地址。
/etc/nova/nova. conf
文件中的[glance]
部分,使其连接到Glance服务:[glance]
api_servers = http://:9292
其中,
是控制节点的IP地址。
/etc/nova/nova. conf
文件中的[oslo_concurrency]
部分,使其使用锁目录/var/lib/nova/tmp
:[oslo_concurrency]
lock_path = /var/lib/nova/tmp
/etc/nova/nova. conf
文件中的[vnc]
部分,使其支持远程虚拟机访问:[vnc]
enabled = True
server_listen = $my_ip
server_proxyclient_address = $my_ip
novncproxy_base_url = http://:6080/vnc_auto. html
其中,
是控制节点的IP地址。
/etc/nova/nova. conf
文件中的[glance]
和[cinder]
部分,禁用SSL验证:[glance]
api_version = 2
insecure = True
[cinder]
os_region_name = RegionOne
insecure = True
su -s /bin/sh -c "nova-manage api_db sync" nova
su -s /bin/sh -c "nova-manage db sync" nova
systemctl enable openstack-nova-api. service \
openstack-nova-consoleauth. service openstack-nova-scheduler. service \
openstack-nova-conductor. service openstack-nova-novncproxy. service
systemctl start openstack-nova-api. service \
openstack-nova-consoleauth. service openstack-nova-scheduler. service \
openstack-nova-conductor. service openstack-nova-novncproxy. service
openstack compute service list
如果输出类似于以下内容,则表示Nova安装成功:
+----+------------------+------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | State | Status |
+----+------------------+------+---------+-------+----------------------------+
| 1 | nova-consoleauth | None | internal | down | :-) |
| 2 | nova-scheduler | None | internal | down | :-) |
| 3 | nova-conductor | None | internal | down | :-) |
| 4 | nova-compute | None | nova | down | :-) |
+----+------------------+------+---------+-------+----------------------------+
[root@controller network-scripts]# nova-status upgrade check
+------------------------------------------------------------------+
| Upgrade Check Results |
+------------------------------------------------------------------+
| Check: Cells v2 |
| Result: Failure |
| Details: No host mappings found but there are compute nodes. Run |
| command 'nova-manage cell_v2 simple_cell_setup' and then |
| retry. |
+------------------------------------------------------------------+
| Check: Placement API |
| Result: Success |
| Details: None |
+------------------------------------------------------------------+
| Check: Ironic Flavor Migration |
| Result: Success |
| Details: None |
+------------------------------------------------------------------+
| Check: Cinder API |
| Result: Success |
| Details: None |
+------------------------------------------------------------------+
出现上面的Result: Failure
可以执行
[root@controller shell]# nova-manage cell_v2 simple_cell_setup
Cell0 is already setup
All hosts are already mapped to cell(s).
执行systemctl start libvirtd. service openstack-nova-compute. service
如果一直在activeing的话检查novalog
当查询novalog出现
`2023-06-06 11:23:29. 059 1176 ERROR oslo. messaging. _drivers. impl_rabbit [req-9766a5c7-33a7-4fac-91b6-2487f0cfa96d - - - - -] Connection failed: [Errno 113] EHOSTUNREACH (retrying in 32. 0 seconds): error: [Errno 113] EHOSTUNREACH
这个错误信息表明 OpenStack Compute 与 RabbitMQ 消息队列之间的连接失败。可能是由于网络问题导致的。建议您检查以下内容:
systemctl status rabbitmq-server
检查消息队列是否配置正确。确保在 nova. conf 中设置了正确的 RabbitMQ 地址. 用户名和密码等参数。
检查您的网络连接是否正常。您可以尝试使用 ping 命令来测试您的 RabbitMQ 服务器是否可达。
检查防火墙是否允许从 Compute 节点到 RabbitMQ 的 TCP 端口的访问。如果有必要,您需要打开防火墙规则以允许对应端口的入站流量。
systemctl stop firewalld
setenforce 0
[root@controller shell]# openstack compute service list
+----+----------------+------------+----------+---------+-------+----------------------------+
| ID | Binary | Host | Zone | Status | State | Updated At |
+----+----------------+------------+----------+---------+-------+----------------------------+
| 3 | nova-conductor | controller | internal | enabled | down | 2023-06-07T10:06:49. 000000 |
| 5 | nova-scheduler | controller | internal | enabled | down | 2023-06-07T10:06:51. 000000 |
| 8 | nova-compute | compute | nova | enabled | down | 2023-06-07T07:24:20. 000000 |
+----+----------------+------------+----------+---------+-------+----------------------------+
To start the services that are currently down, you can use the following commands:
systemctl start openstack-nova-conductor
systemctl start openstack-nova-scheduler
systemctl start openstack-nova-compute
After starting the services, you can check their status again using the command openstack compute service list
.
192. 168. 122. 161 controller
192. 168. 122. 171 compute
yum install centos-release-openstack-train. noarch -y
yum install https://rdoproject. org/repos/rdo-release. rpm -y
yum install python-openstackclient -y
yum install openstack-selinux -y
yum install openstack-nova-compute -y
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack_rabbit_pass@controller:5672/
use_neutron = true
firewall_driver = nova. virt. firewall. NoopFirewallDriver
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova_passwd
[vnc]
enabled = true
server_listen = 0. 0. 0. 0
server_proxyclient_address = compute1
novncproxy_base_url = http://controller:6080/vnc_auto. html
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement_passwd
# 测试CPU虚拟化支持
egrep -c '(vmx|svm)' /proc/cpuinfo
非零为支持
如果为0
[libvirt]
. . .
virt_type = qemu
#
systemctl enable libvirtd. service openstack-nova-compute. service
systemctl start libvirtd. service openstack-nova-compute. service
# 验证nova计算节点
# 在controller 节点运行
#
openstack compute service list --service nova-compute
openstack hypervisor list
#手动添加
su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova
#添加自动扫描
#When you add new compute nodes, you must run nova-manage cell_v2 discover_hosts on the controller node to register those new compute nodes. Alternatively, you can set an appropriate interval in /etc/nova/nova. conf:
[scheduler]
discover_hosts_in_cells_interval = 300
检查单元格和放置 API 是否成功运行,以及其他必要的先决条件是否到位
nova-status upgrade check
openstack user create --domain default --password neutron_passwd neutron
openstack service create --name neutron --description "OpenStack Networking" network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
openstack endpoint list
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
/etc/neutron/neutron. conf
文件[DEFAULT]
core_plugin = ml2
service_plugins =
transport_url = rabbit://openstack:openstack_rabbit_pass@controller
auth_strategy = keystone
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
#[database]部分,使其连接到MariaDB数据库:
[database]
connection = mysql+pymysql://neutron:root@<controller-ip>/neutron
#root您为Neutron设置的密码,``是控制节点的IP地址。
#[keystone_authtoken]`部分,使其能够与Keystone进行身份验证
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = root
#root是您的Neutron设置的密码,``是控制节点的IP地址。
[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova_passwd
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
/etc/neutron/plugins/ml2/ml2_conf. ini
文件中的[ml2]
部分,启用Linuxbridge驱动程序
[ml2]
type_drivers = flat,vlan,vxlan
#在``[ml2]``部分,禁用私有网络:
tenant_network_types = vxlan
#在``[ml2]``部分,启用Linuxbridge机制:
mechanism_drivers = linuxbridge
#在``[ml2]`` 部分,启用端口安全扩展驱动:
extension_drivers = port_security
#在[ml2_type_flat]部分,配置公共虚拟网络为flat网络:
[ml2_type_flat]
flat_networks = provider
#在 [securitygroup]部分,启用 ipset 增加安全组的方便性:
[securitygroup]
# . . .
enable_ipset = true
/etc/neutron/plugins/ml2/linuxbridge_agent. ini
文件#[linux_bridge]部分,设置Linuxbridge物理网络接口:
[linux_bridge]
physical_interface_mappings = provider:<interface-name>
#是您要使用的物理网络接口名称。
#[vxlan]部分,设置VXLAN网络:
[vxlan]
enable_vxlan = false(推荐使用这个,使用下面的还需要配置两项)
enable_vxlan = True
local_ip = <controller-ip>
l2_population = True
#是控制节点的IP地址。
#[securitygroup]部分,启用安全组支持
[securitygroup]
enable_security_group = True
firewall_driver = neutron. agent. linux. iptables_firewall. IptablesFirewallDriver
/etc/nova/nova. conf
文件中的[neutron]
部分,将Nova和Neutron集成:[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron_passwd
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET
vim /etc/sysctl. conf
添加下面一句话net. bridge. bridge-nf-call-iptables=1
net. bridge. bridge-nf-call-ip6tables=1
其中,root
是您为Neutron设置的密码,
是控制节点的IP地址。
11. 配置DHCP代理,编辑vim /etc/neutron/dhcp_agent. ini
[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron. agent. linux. dhcp. Dnsmasq
enable_isolated_metadata = true
/etc/neutron/metadata_agent. ini
文件并完成以下操作:#在[DEFAULT] 部分,配置元数据主机以及共享密码:
[DEFAULT]
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA_SECRET
/etc/nova/nova. conf
文件并完成以下操作:在[neutron]
部分,配置访问参数,启用元数据代理并设置密码:
[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron_passwd
service_metadata_proxy = true
metadata_proxy_shared_secret = METADATA_SECRET
指向ML2插件配置文件/etc/neutron/plugins/ml2/ml2_conf. ini
。如果超链接不存在,使用下面的命令创建它:ln -s /etc/neutron/plugins/ml2/ml2_conf. ini /etc/neutron/plugin. ini
su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron. conf \
--config-file /etc/neutron/plugins/ml2/ml2_conf. ini upgrade head" neutron
systemctl enable neutron-server. service neutron-linuxbridge-agent. service neutron-dhcp-agent. service neutron-metadata-agent. service
systemctl start neutron-server. service neutron-linuxbridge-agent. service neutron-dhcp-agent. service neutron-metadata-agent. service
tail -f /var/log/neutron/*. log 确保没有error
11. 重启计算API 服务:
rpm -qa | grep openstack-nova
yum install openstack-nova-api
请注意,您可能需要根据您的环境和需求安装其他软件包,例如 openstack-nova-conductor
. openstack-nova-scheduler
等。
systemctl restart openstack-nova-api. service
openstack network agent list
如果输出类似于以下内容,则表示Neutron安装成功:
+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+
| ID | Agent Type | Host | Alive | State | Binary |
+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+
| 3a3991dd-5d36-47ea-aabf-a43e45a8d3d5 | DHCP agent | compute | :-) | :-) | neutron-dhcp-agent |
| 7b57f6c0-2c42-4b09-b8b9-9083e053f0fe | Metadata agent | compute | :-) | :-) | neutron-metadata-agent |
| c542daa1-9ce7-44eb-9db6-b83779110cad | Linux bridge agent | compute | :-) | :-) | neutron-linuxbridge-agent |
| f25a3eb8-e34d-46b5-84fa-1fc2520f2257 | L3 agent | compute | :-) | :-) | neutron-l3-agent |
| ff2e073a-076f-4a61-8274-dcf9faab98d9 | Open vSwitch agent | compute | :-) | :-) | neutron-openvswitch-agent |
+--------------------------------------+--------------------+---------+-------+----------------+---------------------------+
yum install openstack-neutron-linuxbridge ebtables ipset
/etc/neutron/neutron. conf
[DEFAULT]
transport_url = rabbit://openstack:openstack_rabbit_pass@controller
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_uri = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron_passwd
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
/etc/neutron/plugins/ml2/linuxbridge_agent. ini
[linux_bridge]
physical_interface_mappings = provider:eth0(修改为自己主机的网卡)
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron. agent. linux. iptables_firewall. IptablesFirewallDriver
4,编辑配置文件vim /etc/sysctl. conf
net. bridge. bridge-nf-call-iptables=1
net. bridge. bridge-nf-call-ip6tables=1
/etc/nova/nova. conf
文件并完成下面的操作:[neutron]
部分,配置访问参数:[neutron]
url = http://controller:9696
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron_passwd
systemctl restart openstack-nova-compute. service
systemctl enable neutron-linuxbridge-agent. service
systemctl start neutron-linuxbridge-agent. service
openstack network agent list
yum install -y openstack-dashboard
/etc/openstack-dashboard/local_settings
文件中的ALLOWED_HOSTS
,将其设置为控制节点的IP地址:ALLOWED_HOSTS = ['' , 'localhost', '127. 0. 0. 1']
其中,
是控制节点的IP地址。
/etc/openstack-dashboard/local_settings
文件中的OPENSTACK_HOST
和OPENSTACK_KEYSTONE_URL
,将其设置为控制节点的IP地址:OPENSTACK_HOST = ""
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST
其中,
是控制节点的IP地址。
/etc/openstack-dashboard/local_settings
文件中的OPENSTACK_KEYSTONE_DEFAULT_ROLE
,将其设置为user
:OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"
systemctl enable httpd. service
systemctl restart httpd. service
http:///dashboard
,然后登录OpenStack仪表板。如果能够正常访问,则表示Horizon安装成功。 故障代码:RuntimeError: Unable to create a new session key. It is likely that the cache is unavailable.
解决方法
[root@all ~]# vim /etc/openstack-dashboard/local_settings
# 将ESSION_ENGINE = 'django. contrib. sessions. backends. cache'改为
ESSION_ENGINE = 'django. contrib. sessions. backends. file'
# 重启服务
[root@all ~]# systemctl restart httpd memcached
``
openstack network create --external --provider-physical-network provider \
--provider-network-type flat public
openstack subnet create --network public --subnet-range <CIDR> --gateway <gateway-ip> \
--allocation-pool start=<start-ip>,end=<end-ip> --no-dhcp --dns-nameserver <dns-server> public_subnet
其中,
是外部子网的CIDR,
是外部子网的网关IP地址,
和
是可用IP地址的起始和结束范围,
是DNS服务器的IP地址。
openstack network create private
openstack subnet create --network private --subnet-range <CIDR> --dns-nameserver <dns-server> \
private_subnet
其中,
是内部子网的CIDR,
是DNS服务器的IP地址。
openstack router create router
openstack router set router --external-gateway public
openstack router add subnet router private_subnet
至此,您已完成了在CentOS 7上安装OpenStack Queens的全部步骤。
默认的最小规格的主机需要512 MB内存。对于环境中计算节点内存不足4 GB的,我们推荐创建只需要64 MB的m1. nano
规格的主机。若单纯为了测试的目的,请使用m1. nano
规格的主机来加载CirrOS镜像
[root@controller home]# openstack flavor create --id 1 --vcpus 4 --ram 2048 --disk 20 m2. nano
+----------------------------+---------+
| Field | Value |
+----------------------------+---------+
| OS-FLV-DISABLED:disabled | False |
| OS-FLV-EXT-DATA:ephemeral | 0 |
| disk | 20 |
| id | 1 |
| name | m2. nano |
| os-flavor-access:is_public | True |
| properties | |
| ram | 2048 |
| rxtx_factor | 1. 0 |
| swap | |
| vcpus | 4 |
+----------------------------+---------+
这是一个 OpenStack 命令,用于创建一个名为 m1. nano 的虚拟机规格(flavor)。解释如下:
--id 0
:规格的唯一标识符,此处设置为 0。--vcpus 1
:虚拟机的 CPU 核心数,此处设置为 1。--ram 64
:虚拟机的内存大小(以 MB 为单位),此处设置为 64MB。--disk 1
:虚拟机的系统盘大小(以 GB 为单位),此处设置为 1GB。m1. nano
:虚拟机规格的名称。因此,该命令将会创建一个 CPU 核心数为 1. 内存大小为 64MB. 系统盘大小为 1GB 的名为 m1. nano 的虚拟机规格。请注意,这个规格比较小,适合用于低负载的应用程序或测试目的。
大部分云镜像支持 :term:[](https://docs. openstack. org/ocata/zh_CN/install-guide-rdo/launch-instance. html#id1)public key authentication
而不是传统的密码登陆。在启动实例前,你必须添加一个公共密钥到计算服务。
导入demo
项目凭证
$ . demo-openrc
Generate a key pair and add a public key:
$ ssh-keygen -q -N “”
$ openstack keypair create --public-key ~/. ssh/id_rsa. pub mykey
±------------±------------------------------------------------+
| Field | Value |
±------------±------------------------------------------------+
| fingerprint | ee:3d:2e:97:d4:e2:6a:54:6d:0d:ce:43:39:2c:ba:4d |
| name | mykey |
| user_id | 58126687cbcc4888bfa9ab73a2256f27 |
±------------±------------------------------------------------+
注解
另外,你可以跳过执行 ssh-keygen
命令而使用已存在的公钥。
验证公钥的添加:
$ openstack keypair list
±------±------------------------------------------------+
| Name | Fingerprint |
±------±------------------------------------------------+
| mykey | ee:3d:2e:97:d4:e2:6a:54:6d:0d:ce:43:39:2c:ba:4d |
±------±------------------------------------------------+
默认情况下, 安全组适用于所有实例并且包括拒绝远程访问实例的防火墙规则。对诸如CirrOS这样的Linux镜像,我们推荐至少允许ICMP (ping) 和安全shell(SSH)规则
添加规则到 default
安全组。
$ openstack security group rule create --proto icmp default
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
| created_at | 2016-10-05T09:52:31Z |
| description | |
| direction | ingress |
| ethertype | IPv4 |
| headers | |
| id | 6ee8d630-9803-4d3d-9aea-8c795abbedc2 |
| port_range_max | None |
| port_range_min | None |
| project_id | 77ae8d7104024123af342ffb0a6f1d88 |
| project_id | 77ae8d7104024123af342ffb0a6f1d88 |
| protocol | icmp |
| remote_group_id | None |
| remote_ip_prefix | 0. 0. 0. 0/0 |
| revision_number | 1 |
| security_group_id | 4ceee3d4-d2fe-46c1-895c-382033e87b0d |
| updated_at | 2016-10-05T09:52:31Z |
+-------------------+--------------------------------------+
$ openstack security group rule create --proto tcp --dst-port 22 default
+-------------------+--------------------------------------+
| Field | Value |
+-------------------+--------------------------------------+
| created_at | 2016-10-05T09:54:50Z |
| description | |
| direction | ingress |
| ethertype | IPv4 |
| headers | |
| id | 3cd0a406-43df-4741-ab29-b5e7dcb7469d |
| port_range_max | 22 |
| port_range_min | 22 |
| project_id | 77ae8d7104024123af342ffb0a6f1d88 |
| project_id | 77ae8d7104024123af342ffb0a6f1d88 |
| protocol | tcp |
| remote_group_id | None |
| remote_ip_prefix | 0. 0. 0. 0/0 |
| revision_number | 1 |
| security_group_id | 4ceee3d4-d2fe-46c1-895c-382033e87b0d |
| updated_at | 2016-10-05T09:54:50Z |
+-------------------+--------------------------------------+