搭建虚拟机参考:https://blog.csdn.net/Happy_Sunshine_Boy/article/details/89039806
Hostname | IP | 操作系统 | CPU | 虚拟化引擎 | 内存 | 磁盘 | 功能 |
---|---|---|---|---|---|---|---|
controller |
192.168.120.111 | CentOS-7.4-x86_64 | 2 | 启用 AMD-H或intel VT |
4G | 50G | 控制节点 |
compute |
192.168.120.112 | CentOS-7.4-x86_64 | 2 | 启用 AMD-H或intel VT |
2G | 50G | 计算节点 |
cinder |
192.168.121.113 | CentOS-7.4-x86_64 | 2 | 启用 AMD-H或intel VT |
2G | 50G | 块存储节点 |
在接下来的操作中若无特别说明,则表示在三台主机上均进行相同操作
关闭防火墙
systemctl stop firewalld
systemctl disable firewalld
关闭selinux
sestatus # 查看状态
vim /etc/sysconfig/selinux
SELINUX=disabled
# 将enforcing修改为disable,永久关闭
设置hosts
vim /etc/hosts
192.168.120.111 controller
192.168.120.112 compute
192.168.120.113 cinder
设置阿里yum源
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
在controller节点安装配置chrony
yum install chrony -y
vim /etc/chrony.conf
server controller iburst
# 所有节点向controller节点同步时间
allow 192.168.120.0/24
# 设置时间同步网段
systemctl enable chronyd
systemctl restart chronyd
在compute节点安装配置chrony
yum install chrony -y
vim /etc/chrony.conf
server controller iburst
systemctl enable chronyd
systemctl restart chronyd
在cinder节点安装配置chrony
yum install chrony -y
vim /etc/chrony.conf
server controller iburst
systemctl enable chronyd
systemctl restart chronyd
验证时钟同步服务
chronyc sources
yum install centos-release-openstack-queens -y
yum upgrade -y # 在主机上升级包
yum install python-openstackclient -y # 安装openstack客户端
yum install openstack-selinux -y # 安装openstack-selinux,便于自动管理openstack的安全策略
yum install mariadb mariadb-server python2-PyMySQL -y
vim /etc/my.cnf.d/mariadb-server.cnf
[mysqld]
bind-address = 192.168.120.111 // 修改为控制节点IP,使其他节点可以通过管理网络访问数据库
default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
systemctl enable mariadb
systemctl start mariadb
mysql_secure_installation
设置数据库密码:bigdata
都是:y
yum install rabbitmq-server -y
systemctl enable rabbitmq-server
systemctl start rabbitmq-server
systemctl status rabbitmq-server
netstat -nltp | grep 5672
rabbitmqctl add_user openstack bigdata # 创建openstack用户,密码为bigdata
rabbitmqctl set_permissions openstack “." ".” “.*” # 授予新建用户权限
yum install memcached python-memcached -y
vim /etc/sysconfig/memcached
PORT=“11211”
USER=“memcached”
MAXCONN=“1024”
CACHESIZE=“64”
OPTIONS="-l 127.0.0.1,::1,controller"
systemctl enable memcached
systemctl start memcached
yum install etcd -y
vim /etc/etcd/etcd.conf
#[Member]
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS=“http://192.168.120.111:2380”
ETCD_LISTEN_CLIENT_URLS=“http://192.168.120.111:2379”
ETCD_NAME=“controller”
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS=“http://192.168.120.111:2380”
ETCD_ADVERTISE_CLIENT_URLS=“http://192.168.120.111:2379”
ETCD_INITIAL_CLUSTER=“controller=http://192.168.120.111:2380”
ETCD_INITIAL_CLUSTER_TOKEN=“etcd-cluster-01”
ETCD_INITIAL_CLUSTER_STATE=“new”
systemctl enable etcd
systemctl start etcd
systemctl status etcd
登陆数据库
mysql -uroot -p
创建keystone数据库
CREATE DATABASE keystone;
授权本地登陆
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@‘localhost’ IDENTIFIED BY ‘bigdata’;
授权任意地址登陆
GRANT ALL PRIVILEGES ON keystone.* TO ‘keystone’@’%’ IDENTIFIED BY ‘bigdata’;
FLUSH PRIVILEGES;
yum install openstack-keystone httpd mod_wsgi -y
cp /etc/keystone/keystone.conf{,.bak}
grep -Ev ‘^$|#’ /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
yum install openstack-utils -y
openstack-config --set /etc/keystone/keystone.conf DEFAULT admin_token ADMIN_TOKEN
openstack-config --set /etc/keystone/keystone.conf database connection mysql+pymysql://keystone:bigdata@controller/keystone
openstack-config --set /etc/keystone/keystone.conf token provider fernet
[DEFAULT]
admin_token = ADMIN_TOKEN
[database]
connection = mysql+pymysql://keystone:bigdata@controller/keystone
[token]
provider = fernet
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password bigdata --bootstrap-admin-url http://controller:35357/v3/ --bootstrap-internal-url http://controller:5000/v3/ --bootstrap-public-url http://controller:5000/v3/ --bootstrap-region-id RegionOne
vim /etc/httpd/conf/httpd.conf
ServerName controller
ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/
systemctl enable httpd
systemctl start httpd
export OS_USERNAME=admin
export OS_PASSWORD=bigdata
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:35357/v3
export OS_IDENTITY_API_VERSION=3
openstack project create --domain default --description "Service Project" service
openstack project create --domain default --description "Demo Project" demo
openstack user create --domain default --password bigdata demo
openstack role add --project demo --user demo user
在demo项目上,给demo用户,赋予user角色
openstack --os-auth-url http://controller:35357/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:35357/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name admin --os-username admin token issue
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name Default --os-user-domain-name Default --os-project-name demo --os-username demo token issue
openstack --os-auth-url http://controller:5000/v3 --os-project-domain-name default --os-user-domain-name default --os-project-name demo --os-username demo token issue
vim admin-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=bigdata
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
vim demo-openrc
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=demo
export OS_USERNAME=demo
export OS_PASSWORD=bigdata
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2
mysql -u root -p
CREATE DATABASE glance;
GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON glance.* TO ‘glance’@’%’ IDENTIFIED BY ‘bigdata’;
FLUSH PRIVILEGES;
openstack user create --domain default --password bigdata glance
openstack role add --project service --user glance admin
openstack service create --name glance --description "OpenStack Image" image
openstack endpoint create --region RegionOne image public http://controller:9292
openstack endpoint create --region RegionOne image internal http://controller:9292
openstack endpoint create --region RegionOne image admin http://controller:9292
yum install openstack-glance -y
mkdir /var/lib/glance/images
cd /var/lib
chown -hR glance:glance glance
vim /etc/glance/glance-api.conf
[database]
connection = mysql+pymysql://glance:bigdata@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = bigdata
[paste_deploy]
flavor = keystone
[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images
vim /etc/glance/glance-registry.conf
[database]
connection = mysql+pymysql://glance:bigdata@controller/glance
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = glance
password = bigdata
[paste_deploy]
flavor = keystone
systemctl enable openstack-glance-api
systemctl start openstack-glance-api
systemctl status openstack-glance-api
systemctl enable openstack-glance-registry
systemctl start openstack-glance-registry
systemctl status openstack-glance-registry
source ~/admin-openrc
wget http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-disk.img
//下载一个小型linux镜像进行测试,最好使用迅雷下载
openstack image create "cirros" --file cirros-0.3.5-x86_64-disk.img --disk-format qcow2 --container-format bare --public
mysql -uroot -p
CREATE DATABASE nova;
CREATE DATABASE nova_api;
CREATE DATABASE nova_cell0;
GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON nova.* TO ‘nova’@’%’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON nova_api.* TO ‘nova’@’%’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON nova_cell0.* TO ‘nova’@’%’ IDENTIFIED BY ‘bigdata’;
source ~/admin-openrc //加载admin环境变量
openstack user create --domain default --password bigdata nova
openstack role add --project service --user nova admin
openstack service create --name nova --description “OpenStack Compute” compute
openstack endpoint create --region RegionOne compute public http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute internal http://controller:8774/v2.1
openstack endpoint create --region RegionOne compute admin http://controller:8774/v2.1
openstack user create --domain default --password bigdata placement
openstack role add --project service --user placement admin
openstack service create --name placement --description “Placement API” placement
openstack endpoint create --region RegionOne placement public http://controller:8778
openstack endpoint create --region RegionOne placement internal http://controller:8778
openstack endpoint create --region RegionOne placement admin http://controller:8778
yum install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api -y
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis=osapi_compute,metadata
transport_url=rabbit://openstack:bigdata@controller
my_ip=192.168.120.111
use_neutron=true
firewall_driver=nova.virt.firewall.NoopFirewallDriver
[api_database]
connection=mysql+pymysql://nova:bigdata@controller/nova_api
[database]
connection=mysql+pymysql://nova:bigdata@controller/nova
[api]
auth_strategy=keystone
[keystone_authtoken]
auth_uri=http://controller:5000
auth_url=http://controller:35357
memcached_servers=controller:11211
auth_type=password
project_domain_name = default
user_domain_name = default
project_name = service
username = nova
password = bigdata
[vnc]
enabled=true
server_listen=$my_ip
server_proxyclient_address=$my_ip
[glance]
api_servers=http://controller:9292
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[placement]
os_region_name=RegionOne
auth_type=password
auth_url=http://controller:35357/v3
project_name=service
project_domain_name=Default
username=placement
user_domain_name=Default
password=bigdata
由于软件包错误,必须启用对Placement API的访问,在配置文件末尾添加即可。
vim /etc/httpd/conf.d/00-nova-placement-api.conf
= 2.4>
Require all granted
Order allow,deny
Allow from all
systemctl restart httpd
su -s /bin/sh -c “nova-manage api_db sync” nova
su -s /bin/sh -c “nova-manage cell_v2 map_cell0” nova
su -s /bin/sh -c “nova-manage cell_v2 create_cell --name=cell1 --verbose” nova
su -s /bin/sh -c “nova-manage db sync” nova
systemctl enable openstack-nova-api.service
systemctl enable openstack-nova-consoleauth.service
systemctl enable openstack-nova-scheduler.service
systemctl enable openstack-nova-conductor.service
systemctl enable openstack-nova-novncproxy.service
systemctl start openstack-nova-api.service
systemctl status openstack-nova-api.service
systemctl start openstack-nova-consoleauth.service
systemctl status openstack-nova-consoleauth.service
systemctl start openstack-nova-scheduler.service
systemctl status openstack-nova-scheduler.service
systemctl start openstack-nova-conductor.service
systemctl status openstack-nova-conductor.service
systemctl start openstack-nova-novncproxy.service
systemctl status openstack-nova-novncproxy.service
yum install openstack-nova-compute -y
vim /etc/nova/nova.conf
[DEFAULT]
my_ip = 192.168.120.112 //输入compute节点IP
use_neutron=true
firewall_driver=nova.virt.firewall.NoopFirewallDriver
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:bigdata@controller
[api]
auth_strategy=keystone
[keystone_authtoken]
auth_uri = http://192.168.120.111:5000 //controller节点IP
auth_url = http://controller:35357
memcached_servers=controller:11211
auth_type=password
project_domain_name=default
user_domain_name=default
project_name=service
username=nova
password=bigdata
[vnc]
enabled=true
server_listen=0.0.0.0
server_proxyclient_address=$my_ip
novncproxy_base_url=http://controller:6080/vnc_auto.html
[glance]
api_servers=http://controller:9292
[oslo_concurrency]
lock_path=/var/lib/nova/tmp
[placement]
os_region_name=RegionOne
auth_type = password
auth_url=http://controller:35357/v3
project_name = service
project_domain_name = Default
user_domain_name = Default
username = placement
password = bigdata
[libvirt]
virt_type = qemu
systemctl enable libvirtd.service
systemctl restart libvirtd.service
systemctl enable openstack-nova-compute.service
systemctl start openstack-nova-compute.service
source ~/admin-openrc //在重启虚拟机时需重新加载环境变量
openstack compute service list --service nova-compute
su -s /bin/sh -c “nova-manage cell_v2 discover_hosts --verbose” nova
mysql -uroot -p
CREATE DATABASE neutron;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON neutron.* TO ‘neutron’@’%’ IDENTIFIED BY ‘bigdata’;
source ~/admin-openrc
openstack user create --domain default --password bigdata neutron
openstack role add --project service --user neutron admin
openstack service create --name neutron --description “OpenStack Networking” network
openstack endpoint create --region RegionOne network public http://controller:9696
openstack endpoint create --region RegionOne network internal http://controller:9696
openstack endpoint create --region RegionOne network admin http://controller:9696
yum install -y openstack-neutron openstack-neutron-ml2 openstack-neutron-linuxbridge ebtables
vim /etc/neutron/neutron.conf
[database]
connection = mysql+pymysql://neutron:bigdata@controller/neutron
[DEFAULT]
auth_strategy = keystone
core_plugin = ml2
service_plugins = //不写代表禁用其他插件
transport_url = rabbit://openstack:bigdata@controller
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = bigdata
[nova]
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = bigdata
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
vim /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers = flat,vlan
tenant_network_types = //设置空是禁用本地网络
mechanism_drivers = linuxbridge
extension_drivers = port_security
[ml2_type_flat]
flat_networks = provider
[securitygroup]
enable_ipset = true
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens33
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
vim /etc/neutron/dhcp_agent.ini
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true
vim /etc/neutron/metadata_agent.ini
nova_metadata_host = controller
metadata_proxy_shared_secret = bigdata
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = bigdata
service_metadata_proxy = true
metadata_proxy_shared_secret = bigdata
ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini
su -s /bin/sh -c “neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head” neutron
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service
systemctl enable neutron-linuxbridge-agent.service
systemctl enable neutron-dhcp-agent.service
systemctl enable neutron-metadata-agent.service
systemctl start neutron-server.service
systemctl status neutron-server.service
systemctl start neutron-linuxbridge-agent.service
systemctl status neutron-linuxbridge-agent.service
systemctl start neutron-dhcp-agent.service
systemctl status neutron-dhcp-agent.service
systemctl start neutron-metadata-agent.service
systemctl status neutron-metadata-agent.service
yum install openstack-neutron-linuxbridge ebtables ipset -y
vim /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy = keystone
transport_url = rabbit://openstack:bigdata@controller
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = bigdata
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = provider:ens33
[vxlan]
enable_vxlan = false
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver
vim /etc/nova/nova.conf
[neutron]
url = http://controller:9696
auth_url = http://controller:35357
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = bigdata
systemctl restart openstack-nova-compute.service
systemctl enable neutron-linuxbridge-agent.service
systemctl start neutron-linuxbridge-agent.service
yum install openstack-dashboard -y
vim /etc/openstack-dashboard/local_settings
OPENSTACK_HOST = "controller"
OPENSTACK_KEYSTONE_DEFAULT_ROLE = "admin"
ALLOWED_HOSTS = ['*']
# 配置memcache会话存储
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
CACHES = {
//注释166-170 去掉注释159-164
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': 'controller:11211',
},
}
OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST //开启身份认证API版本v3
OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True //开启domains版本支持
OPENSTACK_API_VERSIONS = {
//配置API版本
"identity": 3,
"image": 2,
"volume": 2,
}
OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"
OPENSTACK_NEUTRON_NETWORK = {
'enable_router': False,
'enable_quotas': False,
'enable_distributed_router': False,
'enable_ha_router': False,
'enable_lb': False,
'enable_firewall': False,
'enable_***': False,
'enable_fip_topology_check': False,
}
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGISocketPrefix run/wsgi
WSGIApplicationGroup %{GLOBAL}
systemctl restart httpd.service
systemctl restart memcached.service
http://192.168.120.111/dashboard
domain: default
用户名:admin
密码:bigdata
mysql -uroot -p
CREATE DATABASE cinder;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@‘localhost’ IDENTIFIED BY ‘bigdata’;
GRANT ALL PRIVILEGES ON cinder.* TO ‘cinder’@’%’ IDENTIFIED BY ‘bigdata’;
openstack user create --domain default --password bigdata cinder
openstack role add --project service --user cinder admin
openstack service create --name cinderv2 --description “OpenStack Block Storage” volumev2
openstack service create --name cinderv3 --description “OpenStack Block Storage” volumev3
openstack endpoint create --region RegionOne volumev2 public http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 internal http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev2 admin http://controller:8776/v2/%(project_id)s
openstack endpoint create --region RegionOne volumev3 public http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 internal http://controller:8776/v3/%(project_id)s
openstack endpoint create --region RegionOne volumev3 admin http://controller:8776/v3/%(project_id)s
yum install openstack-cinder -y
vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:bigdata@controller/cinder
[DEFAULT]
my_ip = 192.168.120.111
auth_strategy = keystone
transport_url = rabbit://openstack:bigdata@controller
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = bigdata
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
vim /etc/nova/nova.conf
[cinder]
os_region_name = RegionOne
systemctl restart openstack-nova-api.service
systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service
systemctl status openstack-cinder-api.service openstack-cinder-scheduler.service
yum install lvm2 -y
systemctl enable lvm2-lvmetad.service
systemctl start lvm2-lvmetad.service
vim /etc/lvm/lvm.conf
filter = [ “a/sdb/”, “r/.*/”]
yum install openstack-cinder targetcli python-keystone -y
vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:bigdata@controller/cinder
[DEFAULT]
enabled_backends = lvm
my_ip = 192.168.120.111
auth_strategy = keystone
transport_url = rabbit://openstack:bigdata@controller
glance_api_servers = http://controller:9292
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = bigdata
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
iscsi_protocol = iscsi
iscsi_helper = lioadm
[oslo_concurrency]
lock_path = /var/lib/cinder/tmp
systemctl enable openstack-cinder-volume.service target.service
systemctl start openstack-cinder-volume.service target.service
systemctl status openstack-cinder-volume.service target.service
参考:
https://docs.openstack.org/ocata/zh_CN/install-guide-rdo
https://blog.51cto.com/13643643/2171262