OpenStack T版 搭建【全网最全】

搭建openstack平台需在[VMware]上准备两台虚拟机,分别作为controller(控制)节点和compute节点. 下面是VMware上虚拟机的基础配置。

这里的NET模式的子网地址给的是0段(cidr)WEB访问的IP地址

    controller节点给的IP是192.168.0.60
    compute节点给的IP是192.168.0.61

配置网卡

1.利用vi文本编辑器去配置虚拟机的虚拟网卡

#controller

[root@controller ~]vi /etc/sysconfig/network-scripts/ifcfg-ens33                #ifcfg-需要配置的网卡名

BOOTPROTO=dhcp            #修改为static静态IP

ONBOOT=no                 # 修改为yes,设为开机自启

#结尾添加

IPADDR=192.168.0.60            #配置的IP地址,根据虚拟网络编辑器的网段来配置

NETMASK=255.255.255.0            #子网掩码,也可以用PREFIX=24来表示

GATEWAY=192.168.0.2            #网关

DNS1=114.114.114.114             #DNS域名解析

DNS2=8.8.8.8                     #备用DNS域名解析

# :wq (保存退出)

修改主机名

#controller执行 

[root@controller ~]hostnamectl set-hostname controller 

#compute 

[root@compute ~]hostnamectl set-hostname compute 

##修改后重新连接终端 

##MobaXterm中按ctrl+d 退出 按R重新登陆 

##SecureCRTPortable中按ctrl+d 退出 按回车重新登陆 

##直接bash执行也可,不过后面执行脚本可能报错

##su也可以使之生效
[root@controller ~]systemctl restart network              #第一种重启网络的方法
[root@controller ~]service network restart                  #第二种重启网络的方法

配置域名解析

[root@controller ~]vim /etc/hosts

#结尾添加这两行IP地址+主机名

192.168.100.10    controller

192.168.100.20    compute



[root@controller ~]scp /etc/hosts 192.168.100.20:/etc/hosts
#将controller节点上的配置文件传输到compute节点上将其替换,省的两头来回切换的敲代码了,在后面这点好处会体现的更明显

#这一步做完后,后面就可以直接使用域名(controller,compute)来代替IP地址了

关闭防火墙

 sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config;setenforce 0;systemctl stop firewalld.service ;systemctl disable firewalld.service 



  systemctl status firewalld
  getenforce

配置网络时间服务

 yum install -y  chrony
 vim /etc/chrony.conf
 systemctl enable  chronyd.service;systemctl start chronyd.service
 systemctl status chronyd
 chronyc sources -v
 date

安装OpenStack包

yum list | grep openstack*

#所有节点
yum install -y centos-release-openstack-train.noarch 
#必须先安装包,再安装下面的

yum install python-openstackclient openstack-selinux -y

安装SQL

#con
yum install mariadb mariadb-server python2-PyMySQL -y

vim /etc/my.cnf.d/openstack.cnf

[mysqld]
bind-address = 192.168.0.60

default-storage-engine = innodb
innodb_file_per_table = on
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8



systemctl enable mariadb.service;systemctl start mariadb.service

mysql_secure_installation
回车
Set root password? [Y/n] y  123

Remove anonymous users? [Y/n] y

Disallow root login remotely? [Y/n] n

Remove test database and access to it? [Y/n] y

Reload privilege tables now? [Y/n] y

消息队列 rabbitMQ

yum install -y rabbitmq-server 

systemctl enable rabbitmq-server.service

systemctl restart rabbitmq-server.service

rabbitmqctl add_user openstack openstack123

rabbitmqctl set_permissions openstack ".*" ".*" ".*"

#查看是否创建成功
 rabbitmqctl list_users

#查看需要启动的服务
 rabbitmq-plugins list
 
#开启图形化界面
rabbitmq-plugins enable rabbitmq_management rabbitmq_management_agent   

安装memcahce服务(内存缓存服务)

yum install -y memcached python-memcached
vim /etc/sysconfig/memcached
#将服务配置为使用控制器节点的管理 IP 地址。这是为了允许其他节点通过管理网络访问:

OPTIONS="-l 127.0.0.1,::1,controller"

 systemctl enable memcached.service;systemctl start memcached.service
 
 #查看运行状态
 systemctl status  memcached.service

安装Keystone服务

mysql  -p123(此处数据库密码为之前安装Mysql设置的密码) 
mysql> CREATE DATABASE keystone; 
mysql> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%'  IDENTIFIED BY  'keystone123'; 
mysql> exit
yum install openstack-keystone httpd mod_wsgi -y

cp /etc/keystone/keystone.conf{,.bak}
grep -Ev "^$|#" /etc/keystone/keystone.conf.bak > /etc/keystone/keystone.conf
vim /etc/keystone/keystone.conf

[database]
connection = mysql+pymysql://keystone:keystone123@controller/keystone
[token]
provider = fernet

 su -s /bin/sh -c "keystone-manage db_sync" keystone
 
keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone
  
keystone-manage credential_setup --keystone-user keystone --keystone-group keystone
keystone-manage bootstrap --bootstrap-password admin \
  --bootstrap-admin-url http://controller:5000/v3/ \
  --bootstrap-internal-url http://controller:5000/v3/ \
  --bootstrap-public-url http://controller:5000/v3/ \
  --bootstrap-region-id RegionOne
  
  
# 配置Apache HTTP
  
vim /etc/httpd/conf/httpd.conf
ServerName controller:80

ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/

 systemctl enable httpd.service;systemctl start httpd.service


vim admin.sh
#!/bin/bash  
export OS_USERNAME=admin
export OS_PASSWORD=admin
export OS_PROJECT_NAME=admin
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3

source  admin.sh
openstack endpoint list
查看令牌授权
openstack token issue

openstack domain create --description "An Example Domain" example

 openstack project create --domain default \
  --description "Service Project" service


openstack project create --domain default \
  --description "Demo Project" myproject

openstack user create --domain default \
  --password-prompt myuser myuser              #密码 myuser 

openstack role create myrole

openstack role add --project myproject --user myuser myrole


#验证
unset OS_AUTH_URL OS_PASSWORD

openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name admin --os-username admin token issue
   #密码 admin 
openstack --os-auth-url http://controller:5000/v3 \
  --os-project-domain-name Default --os-user-domain-name Default \
  --os-project-name myproject --os-username myuser token issue
 #密码 myuser 
 
 
vim admin.sh
                         #替换一下内容
#!/bin/bash 
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=admin
export OS_USERNAME=admin
export OS_PASSWORD=ADMIN_PASS
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

vim myuser.sh
#!/bin/bash 
export OS_PROJECT_DOMAIN_NAME=Default
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_NAME=myproject
export OS_USERNAME=myuser
export OS_PASSWORD=myuser
export OS_AUTH_URL=http://controller:5000/v3
export OS_IDENTITY_API_VERSION=3
export OS_IMAGE_API_VERSION=2

Glance

mysql  -p123

CREATE DATABASE glance;

GRANT ALL PRIVILEGES ON glance.* TO 'glance'@'%' \
  IDENTIFIED BY 'glance123';

openstack user create --domain default --password-prompt glance #密码glance

openstack role add --project service --user glance admin


openstack service create --name glance \
  --description "OpenStack Image" image
  
  
openstack endpoint create --region RegionOne \
  image public http://controller:9292 
  
openstack endpoint create --region RegionOne \
  image internal http://controller:9292
 
openstack endpoint create --region RegionOne \
  image admin http://controller:9292 
  
  #查看端口
openstack endpoint list
  
yum install openstack-glance -y

cp /etc/glance/glance-api.conf{,.bak}

grep -Ev "^$|#" /etc/glance/glance-api.conf.bak > /etc/glance/glance-api.conf

vim /etc/glance/glance-api.conf
  
[database]
connection = mysql+pymysql://glance:glance123@controller/glance
[keystone_authtoken]
www_authenticate_uri  = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = glance
password = glance

[paste_deploy]
flavor = keystone  

[glance_store]
stores = file,http
default_store = file
filesystem_store_datadir = /var/lib/glance/images/  

su -s /bin/sh -c "glance-manage db_sync" glance
 
 systemctl enable openstack-glance-api.service;systemctl start openstack-glance-api.service
 
 #验证
上传镜像 cirros-0.4.0-x86_64-disk.img

glance image-create --name "cirros4" \
  --file cirros-0.4.0-x86_64-disk.img \
  --disk-format qcow2 --container-format bare \
  --visibility public
  
 openstack image list

Placement

mysql  -p123

CREATE DATABASE placement;

GRANT ALL PRIVILEGES ON placement.* TO 'placement'@'%' \
  IDENTIFIED BY 'placement123';


openstack user create --domain default --password-prompt placement #密码placement

openstack role add --project service --user placement admin

openstack service create --name placement \
  --description "Placement API" placement


 openstack endpoint create --region RegionOne \
  placement public http://controller:8778


openstack endpoint create --region RegionOne \
  placement internal http://controller:8778

openstack endpoint create --region RegionOne \
  placement admin http://controller:8778

yum install openstack-placement-api -y

#复制配置文件并加后最.bak
cp /etc/placement/placement.conf{,.bak}

#grep -Ev 使用正则表达式取以空格或#开头的取反覆盖到/etc/glance/glance-api.conf配置文件中
 grep -Ev "^$|#" /etc/placement/placement.conf.bak > /etc/placement/placement.conf


[placement_database]
connection = mysql+pymysql://placement:placement123@controller/placement
[api]
auth_strategy = keystone

[keystone_authtoken]
auth_url = http://controller:5000/v3
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = placement
password = placement

su -s /bin/sh -c "placement-manage db sync" placement
#注!!!!!!

[root@con ~]# su -s /bin/sh -c "placement-manage db sync" placement
/usr/lib/python2.7/site-packages/pymysql/cursors.py:170: Warning: (1280, u"Name 'alembic_version_pkc' ignored for PRIMARY key.")
  result = self._query(query)

这里不用怕,不是l就可以

官方文档:Ignore any deprecation messages in this output.(忽略此输出中的任何弃用消息。)

 httpd -v
#官方文档:由于打包错误,您必须通过添加以下配置来启用对 Placement API 的访问 /etc/httpd/conf.d/00-nova-placement-api.conf:

vim /etc/httpd/conf.d/00-placement-api.conf 
'''
<Directory /usr/bin>
   <IfVersion >= 2.4>
      Require all granted
   </IfVersion>
   <IfVersion < 2.4>
      Order allow,deny
      Allow from all
   </IfVersion>
</Directory>



systemctl restart httpd

#验证
placement-status upgrade check


nova

控制节点

mysql  -p123

MariaDB [(none)]> CREATE DATABASE nova_api;
MariaDB [(none)]> CREATE DATABASE nova;
MariaDB [(none)]> CREATE DATABASE nova_cell0;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_api.* TO 'nova'@'%' \
  IDENTIFIED BY 'nova123';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova.* TO 'nova'@'%' \
  IDENTIFIED BY 'nova123';

MariaDB [(none)]> GRANT ALL PRIVILEGES ON nova_cell0.* TO 'nova'@'%' \
  IDENTIFIED BY 'nova123';


openstack user create --domain default --password-prompt nova     #nova

openstack role add --project service --user nova admin

openstack service create --name nova \
  --description "OpenStack Compute" compute

openstack endpoint create --region RegionOne \
  compute public http://controller:8774/v2.1

openstack endpoint create --region RegionOne \
  compute internal http://controller:8774/v2.1

openstack endpoint create --region RegionOne \
  compute admin http://controller:8774/v2.1


yum install openstack-nova-api openstack-nova-conductor openstack-nova-novncproxy openstack-nova-scheduler -y

#复制配置文件并加后最.bak
cp /etc/nova/nova.conf{,.bak}

#grep -Ev 使用正则表达式取以空格或#开头的取反覆盖到/etc/nova/nova.conf配置文件中
 grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf
vim /etc/nova/nova.conf

[DEFAULT]
enabled_apis = osapi_compute,metadata
[api_database]
connection = mysql+pymysql://nova:nova123@controller/nova_api
[database]
connection = mysql+pymysql://nova:nova123@controller/nova
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller:5672/
[api]
auth_strategy = keystone
[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
my_ip = 192.168.0.60
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = $my_ip
server_proxyclient_address = $my_ip
[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement



su -s /bin/sh -c "nova-manage api_db sync" nova           #忽略此输出中的任何弃用消息。

su -s /bin/sh -c "nova-manage cell_v2 map_cell0" nova

su -s /bin/sh -c "nova-manage cell_v2 create_cell --name=cell1 --verbose" nova

su -s /bin/sh -c "nova-manage db sync" nova

#验证
su -s /bin/sh -c "nova-manage cell_v2 list_cells" nova

systemctl enable \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service
    
systemctl start \
    openstack-nova-api.service \
    openstack-nova-scheduler.service \
    openstack-nova-conductor.service \
    openstack-nova-novncproxy.service

计算节点

 yum install openstack-nova-compute -y
 
#复制配置文件并加后最.bak
cp /etc/nova/nova.conf{,.bak}

#grep -Ev 使用正则表达式取以空格或#开头的取反覆盖到/etc/nova/nova.conf配置文件中
 grep -Ev "^$|#" /etc/nova/nova.conf.bak > /etc/nova/nova.conf
 
vim /etc/nova/nova.conf
[DEFAULT]
enabled_apis = osapi_compute,metadata
transport_url = rabbit://openstack:openstack123@controller
[api]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000/
auth_url = http://controller:5000/
memcached_servers = controller:11211
auth_type = password
project_domain_name = Default
user_domain_name = Default
project_name = service
username = nova
password = nova
[DEFAULT]
my_ip = 192.168.0.60
[DEFAULT]
use_neutron = true
firewall_driver = nova.virt.firewall.NoopFirewallDriver
[vnc]
enabled = true
server_listen = 0.0.0.0
server_proxyclient_address = $my_ip
novncproxy_base_url = http://controller:6080/vnc_auto.html
#controller换为控制节点地址 192.168.0.10                                                !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#不然无法解析

#若用的话要在物理机内添加解析
C:\Windows\System32\drivers\etc 下的hosts 添加解析


[glance]
api_servers = http://controller:9292
[oslo_concurrency]
lock_path = /var/lib/nova/tmp
[placement]
region_name = RegionOne
project_domain_name = Default
project_name = service
auth_type = password
user_domain_name = Default
auth_url = http://controller:5000/v3
username = placement
password = placement



#确定硬件加速
egrep -c '(vmx|svm)' /proc/cpuinfo

systemctl enable libvirtd.service openstack-nova-compute.service;systemctl start libvirtd.service openstack-nova-compute.service


#验证操作
openstack compute service list --service nova-compute

su -s /bin/sh -c "nova-manage cell_v2 discover_hosts --verbose" nova


#注
添加新计算节点时,必须在控制器节点上运行以注册这些新计算节点。或者,您可以在 中设置适当的间隔 :nova-manage cell_v2 discover_hosts/etc/nova/nova.conf

[scheduler]
discover_hosts_in_cells_interval = 300

vim restart-nova.sh
#!/bin/bash 
systemctl restart openstack-nova*

bash restart-nova.sh

Neutron

控制节点

mysql -u root -p123

MariaDB [(none)] CREATE DATABASE neutron;

MariaDB [(none)]> GRANT ALL PRIVILEGES ON neutron.* TO 'neutron'@'%' \
  IDENTIFIED BY 'neutron123';

openstack user create --domain default --password-prompt neutron  # neutron

openstack role add --project service --user neutron admin

openstack service create --name neutron \
  --description "OpenStack Networking" network


openstack endpoint create --region RegionOne \
  network public http://controller:9696

openstack endpoint create --region RegionOne \
  network internal http://controller:9696

openstack endpoint create --region RegionOne \
  network admin http://controller:9696

三层网络

yum install openstack-neutron openstack-neutron-ml2 \
  openstack-neutron-linuxbridge ebtables
  
 cp /etc/neutron/neutron.conf{,.bak}
#grep -Ev 使用正则表达式取以空格或#开头的取反覆盖到/etc/neutron/neutron.conf配置文件中
 grep -Ev "^$|#" /etc/neutron/neutron.conf.bak > /etc/neutron/neutron.conf
 
vim /etc/neutron/neutron.conf  
[database]
connection = mysql+pymysql://neutron:neutron123@controller/neutron  
[DEFAULT]
core_plugin = ml2
service_plugins = router
allow_overlapping_ips = true
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller
[DEFAULT]
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[DEFAULT]
notify_nova_on_port_status_changes = true
notify_nova_on_port_data_changes = true

[nova]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = nova
password = nova


[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

配置 Modular Layer 2 (ML2) 插件

cp /etc/neutron/plugins/ml2/ml2_conf.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/plugins/ml2/ml2_conf.ini.bak > /etc/neutron/plugins/ml2/ml2_conf.ini

vim /etc/neutron/plugins/ml2/ml2_conf.ini

[ml2]
type_drivers = flat,vlan,vxlan
[ml2]
tenant_network_types = vxlan
[ml2]
mechanism_drivers = linuxbridge,l2population
[ml2]
extension_drivers = port_security
[ml2_type_flat]
flat_networks = extnet
[ml2_type_vxlan]
vni_ranges = 1:1000
[securitygroup]
enable_ipset = true

配置 Linux 网桥代理

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

#grep -Ev 使用正则表达式取以空格或#开头的取反覆盖到配置文件中
  grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini
#查看网卡
ip addr

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini
[linux_bridge]
physical_interface_mappings = extnet:ens33
[vxlan]
enable_vxlan = true
local_ip = 192.168.0.60
l2_population = true
[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver


vim /etc/sysctl.conf

net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1

#加载内核参数
modprobe br_netfilter
#查看内核参数
sysctl -p
#要启用网络桥接支持,通常br_netfilter需要加载内核模块

配置第三层代理

 cp /etc/neutron/l3_agent.ini{,.bak}

 grep -Ev "^$|#" /etc/neutron/l3_agent.ini.bak  > /etc/neutron/l3_agent.ini

vim /etc/neutron/l3_agent.ini

[DEFAULT]
interface_driver = linuxbridge


#配置 Linux 桥接接口驱动程序

配置 DHCP 代理

 cp /etc/neutron/dhcp_agent.ini{,.bak}
 grep -Ev "^$|#" /etc/neutron/dhcp_agent.ini.bak > /etc/neutron/dhcp_agent.ini


vim /etc/neutron/dhcp_agent.ini

[DEFAULT]
interface_driver = linuxbridge
dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
enable_isolated_metadata = true

#配置 Linux 网桥接口驱动程序、Dnsmasq DHCP 驱动程序,并启用隔离元数据,以便提供商网络上的实例可以通过网络访问元数据

配置元数据代理

vim /etc/neutron/metadata_agent.ini

[DEFAULT]
nova_metadata_host = controller
metadata_proxy_shared_secret = METADATA_SECRET          # xier123



配置计算服务以使用网络服务

vim /etc/nova/nova.conf

[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron
service_metadata_proxy = true
metadata_proxy_shared_secret = xier123

完成安装

ln -s /etc/neutron/plugins/ml2/ml2_conf.ini /etc/neutron/plugin.ini

su -s /bin/sh -c "neutron-db-manage --config-file /etc/neutron/neutron.conf \
  --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head" neutron



vim restart-neutron.sh
#!/bin/bash
systemctl restart neutron-server.service  neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service


systemctl restart openstack-nova-api.service


systemctl enable neutron-server.service neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service;systemctl start neutron-server.service  neutron-linuxbridge-agent.service neutron-dhcp-agent.service neutron-metadata-agent.service

systemctl enable neutron-l3-agent.service;systemctl start neutron-l3-agent.service


计算节点

yum install openstack-neutron-linuxbridge ebtables ipset -y

vim /etc/neutron/neutron.conf 
[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller
auth_strategy = keystone

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = neutron
password = neutron

[oslo_concurrency]
lock_path = /var/lib/neutron/tmp

三层网络

cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak}

grep -Ev "^$|#" /etc/neutron/plugins/ml2/linuxbridge_agent.ini.bak > /etc/neutron/plugins/ml2/linuxbridge_agent.ini

vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini 
[linux_bridge]
physical_interface_mappings = extnet:ens33

[vxlan]
enable_vxlan = true
local_ip = 192.168.0.61
l2_population = true

[securitygroup]
enable_security_group = true
firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver



vim /etc/sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1


#加载内核参数
modprobe br_netfilter
#查看内核参数
sysctl -p

配置计算服务以使用网络服务

vim /etc/nova/nova.conf

[neutron]
auth_url = http://controller:5000
auth_type = password
project_domain_name = default
user_domain_name = default
region_name = RegionOne
project_name = service
username = neutron
password = neutron

systemctl restart openstack-nova-compute.service

systemctl enable neutron-linuxbridge-agent.service;systemctl start neutron-linuxbridge-agent.service

验证(控制节点)


openstack network agent list

+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| ID                                   | Agent Type         | Host       | Availability Zone | Alive | State | Binary                    |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+
| f49a4b81-afd6-4b3d-b923-66c8f0517099 | Metadata agent     | controller | None              | True  | UP    | neutron-metadata-agent    |
| 27eee952-a748-467b-bf71-941e89846a92 | Linux bridge agent | controller | None              | True  | UP    | neutron-linuxbridge-agent |
| 08905043-5010-4b87-bba5-aedb1956e27a | Linux bridge agent | compute1   | None              | True  | UP    | neutron-linuxbridge-agent |
| 830344ff-dc36-4956-84f4-067af667a0dc | L3 agent           | controller | nova              | True  | UP    | neutron-l3-agent          |
| dd3644c9-1a3a-435a-9282-eb306b4b0391 | DHCP agent         | controller | nova              | True  | UP    | neutron-dhcp-agent        |
+--------------------------------------+--------------------+------------+-------------------+-------+-------+---------------------------+


#启动实例
#创建 m1.nano 风味
#最小的默认风格每个实例消耗 512 MB 内存。对于计算节点包含少于 4 GB 内存的环境,我们建议创建m1.nano每个实例仅需要 64 MB 的风味。仅将此风格与 CirrOS 映像一起用于测试目的。
openstack flavor create --id 0 --vcpus 1 --ram 64 --disk 1 m1.nano

#生成密钥对
ssh-keygen -q -N ""          #一直回车即可
openstack keypair create --public-key ~/.ssh/id_rsa.pub mykey

openstack keypair list

openstack security group rule create --proto icmp default

openstack security group rule create --proto tcp --dst-port 22 default

openstack network create  --share --external \
  --provider-physical-network extnet \
  --provider-network-type flat flat-extnet
  
openstack subnet create --network flat-extnet \
  --allocation-pool start=192.168.0.100,end=192.168.0.200 \
  --dns-nameserver 114.114.114.114 --gateway 192.168.0.2 \
  --subnet-range 192.168.0.0/24 flat-subnet

openstack server create --flavor m1.nano --image  cirros4 \
  --nic net-id=e41e72a0-e11f-449c-a13b-18cae8347ba4 --security-group default \
  --key-name mykey vm1

#创建失败执行一下脚本
[root@con ~]# sh restart-nova.sh 
[root@con ~]# sh restart-neutron.sh 

openstack server list

openstack console url show vm1


#物理机访问
 http://192.168.0.60:6080/vnc_auto.html?path=%3Ftoken%3D2d424de0-0a9b-4961-b7ab-4aa458222a03
 
 
 
 #解决bug(计算节点)
 virsh capabilities

vim /etc/nova/nova.conf
[libvirt]
hw_machine_type = x86_64=pc-i440fx-rhel7.2.0  # 更改虚拟化类型
cpu_mode = host-passthrough      # 直接使用宿主机的cpu

#重启
systemctl restart openstack-nova-*


#控制节点
openstack server create --flavor m1.nano --image  cirros4   --nic net-id=e41e72a0-e11f-449c-a13b-18cae8347ba4 --security-group default   --key-name mykey vm2

openstack console url show vm2


#物理机访问
http://192.168.0.60:6080/vnc_auto.html?path=%3Ftoken%3Dc91ba078-f7fc-4b39-8c6e-a140afdfd966

Dashboard

 yum install openstack-dashboard -y
 
vim /etc/openstack-dashboard/local_settings 

OPENSTACK_HOST = "controller"

ALLOWED_HOSTS = ['*']

SESSION_ENGINE = 'django.contrib.sessions.backends.cache'

CACHES = {
    'default': {
         'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
         'LOCATION': 'controller:11211',
    }
}

OPENSTACK_KEYSTONE_URL = "http://%s:5000/v3" % OPENSTACK_HOST

OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT = True

OPENSTACK_API_VERSIONS = {
    "identity": 3,
    "image": 2,
    "volume": 3,
}

OPENSTACK_KEYSTONE_DEFAULT_DOMAIN = "Default"

OPENSTACK_KEYSTONE_DEFAULT_ROLE = "user"

OPENSTACK_NEUTRON_NETWORK = {
    ...
    'enable_router': False,
    'enable_quotas': False,
    'enable_distributed_router': False,
    'enable_ha_router': False,
    'enable_lb': False,
    'enable_firewall': False,
    'enable_': False,
    'enable_fip_topology_check': False,
}

TIME_ZONE = "Asia/Shanghai"

#/etc/httpd/conf.d/openstack-dashboard.conf如果不包括,则添加以下行 。
vim /etc/httpd/conf.d/openstack-dashboard.conf
WSGIApplicationGroup %{GLOBAL}



systemctl restart httpd.service memcached.service

如果当前未运行,该命令将启动每个服务。systemctl restart

#验证
#使用 Web 浏览器访问仪表板 http://controller/dashboard。

#使用admin或demo用户和default域凭据进行身份验证。

#这里显示
Not Found
The requested URL /auth/login/ was not found on this server.

#官方bug  加入一下内容
vim /etc/openstack-dashboard/local_settings 

WEBROOT="/dashboard"


systemctl restart httpd.service

Cinder

控制节点


mysql -u root -p

 CREATE DATABASE cinder;

GRANT ALL PRIVILEGES ON cinder.* TO 'cinder'@'%' \
    ->   IDENTIFIED BY 'cinder123';

openstack user create --domain default --password cinder cinder

openstack role add --project service --user cinder admin

openstack service create --name cinderv2 \
  --description "OpenStack Block Storage" volumev2

openstack service create --name cinderv3 \
  --description "OpenStack Block Storage" volumev3


openstack endpoint create --region RegionOne \
  volumev2 public http://controller:8776/v2/%\(project_id\)s


openstack endpoint create --region RegionOne \
  volumev2 internal http://controller:8776/v2/%\(project_id\)s


openstack endpoint create --region RegionOne \
  volumev2 admin http://controller:8776/v2/%\(project_id\)s





openstack endpoint create --region RegionOne \
  volumev3 public http://controller:8776/v3/%\(project_id\)s

openstack endpoint create --region RegionOne \
  volumev3 internal http://controller:8776/v3/%\(project_id\)s

 openstack endpoint create --region RegionOne \
  volumev3 admin http://controller:8776/v3/%\(project_id\)s

#安装软件包
 yum install openstack-cinder -y

#编辑/etc/cinder/cinder.conf文件
cp /etc/cinder/cinder.conf{,.bak}

grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf

vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@controller/cinder

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller

auth_strategy = keystone

my_ip = 192.168.0.60

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder


[oslo_concurrency]
lock_path = /var/lib/cinder/tmp

su -s /bin/sh -c "cinder-manage db sync" cinder
#官方文档:忽略此输出中的任何弃用消息。

vim /etc/nova/nova.conf

[cinder]
os_region_name = RegionOne

systemctl restart openstack-nova-api.service

systemctl enable openstack-cinder-api.service openstack-cinder-scheduler.service;systemctl start openstack-cinder-api.service openstack-cinder-scheduler.service


计算节点

yum install lvm2 device-mapper-persistent-data -y 

systemctl enable lvm2-lvmetad.service;systemctl start lvm2-lvmetad.service

pvcreate /dev/sdb

vgcreate cinder-volumes /dev/sdb

cp /etc/lvm/lvm.conf{,.bak}

grep -Ev "^$|#" /etc/lvm/lvm.conf.bak > /etc/lvm/lvm.conf


vim /etc/lvm/lvm.conf
devices {
filter = [ "a/sdb/", "r/.*/"]



yum install openstack-cinder targetcli python-keystone -y

cp /etc/cinder/cinder.conf{,.bak}

grep -Ev "^$|#" /etc/cinder/cinder.conf.bak > /etc/cinder/cinder.conf

vim /etc/cinder/cinder.conf
[database]
connection = mysql+pymysql://cinder:cinder123@controller/cinder

[DEFAULT]
transport_url = rabbit://openstack:openstack123@controller
auth_strategy = keystone
my_ip = 192.168.0.61
enabled_backends = lvm
glance_api_servers = http://controller:9292

[keystone_authtoken]
www_authenticate_uri = http://controller:5000
auth_url = http://controller:5000
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = cinder
password = cinder



#cinder-volumes如果该[lvm]部分不存在,请创建它:
[lvm]
volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver
volume_group = cinder-volumes
target_protocol = iscsi
target_helper = lioadm

[oslo_concurrency]
lock_path = /var/lib/cinder/tmp


systemctl enable openstack-cinder-volume.service target.service;systemctl start openstack-cinder-volume.service target.service


Swift

控制节点

#创建swift用户
openstack user create --domain default --password swift swift  #密码swift

#将admin角色添加到swift用户
openstack role add --project service --user swift admin

#创建swift服务实体
openstack service create --name swift \
  --description "OpenStack Object Storage" object-store


#创建对象存储服务 API 端点
openstack endpoint create --region RegionOne \
  object-store public http://controller:8080/v1/AUTH_%\(project_id\)s

openstack endpoint create --region RegionOne \
  object-store internal http://controller:8080/v1/AUTH_%\(project_id\)s

openstack endpoint create --region RegionOne \
  object-store admin http://controller:8080/v1


#安装软件包
yum -y install openstack-swift-proxy python-swiftclient python-keystoneclient python-keystonemiddleware memcached 

#从 Object Storage 源存储库中获取代理服务配置文件
curl -o /etc/swift/proxy-server.conf https://opendev.org/openstack/swift/raw/branch/master/etc/proxy-server.conf-sample


#编辑文件 /etc/swift/proxy-server.conf,删除原有内容,直接添加如下内容
vi /etc/swift/proxy-server.conf      #d+G删除整篇内容

[DEFAULT]
bind_port = 8080
swift_dir = /etc/swift
user = swift
[pipeline:main]
pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = True
[filter:tempauth]
use = egg:swift#tempauth
user_admin_admin = admin .admin .reseller_admin
user_test_tester = testing .admin
user_test2_tester2 = testing2 .admin
user_test_tester3 = testing3
user_test5_tester5 = testing5 service
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
auth_uri = http://controller:5000
auth_url = http://controller:35357
memcached_servers = controller:11211
auth_type = password
project_domain_name = default
user_domain_name = default
project_name = service
username = swift
password = swift
delay_auth_decision = True
[filter:keystoneauth]
use = egg:swift#keystoneauth
operator_roles = admin,user
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:cache]
memcache_servers = controller:11211
use = egg:swift#memcache
[filter:ratelimit]
use = egg:swift#ratelimit
[filter:domain_remap]
use = egg:swift#domain_remap
[filter:catch_errors]
use = egg:swift#catch_errors
[filter:cname_lookup]
use = egg:swift#cname_lookup
[filter:staticweb]
use = egg:swift#staticweb
[filter:tempurl]
use = egg:swift#tempurl
[filter:formpost]
use = egg:swift#formpost
[filter:name_check]
use = egg:swift#name_check
[filter:list-endpoints]
use = egg:swift#list_endpoints
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:bulk]
use = egg:swift#bulk
[filter:slo]
use = egg:swift#slo
[filter:dlo]
use = egg:swift#dlo
[filter:container-quotas]
use = egg:swift#container_quotas
[filter:account-quotas]
use = egg:swift#account_quotas
[filter:gatekeeper]
use = egg:swift#gatekeeper
[filter:container_sync]
use = egg:swift#container_sync
[filter:xprofile]
use = egg:swift#xprofile
[filter:versioned_writes]
use = egg:swift#versioned_writes
#编辑文件/etc/swift/swift.conf 删除原有内容,添加如下内容
vi /etc/swift/swift.conf

[swift-hash]
swift_hash_path_suffix = changeme
swift_hash_path_prefix = changeme
[storage-policy:0]
name = Policy-0
default = yes
aliases = yellow, orange
[swift-constraints]


#添加权限

chown -R root:swift /etc/swift

compute

yum -y install xfsprogs rsync openstack-swift-account openstack-swift-container openstack-swift-object

#使用XFS格式化`/dev/sdc``设备
lsblk


#创建分区
fdisk /dev/sdc
mkfs.xfs  /dev/sdc1

#创建挂载点目录结构
mkdir -p /swift/node

#编辑/etc/fstab文件并添加以下内容
vim /etc/fstab
/dev/sdc1 /swift/node xfs noatime,nodiratime,nobarrier,logbufs=8 0 0



#挂载设备
mount /dev/sdc1 /swift/node/

#编辑/etc/rsyncd.conf文件并添加以下内容
vim /etc/rsyncd.conf
pid file = /var/run/rsyncd.pid
log file = /var/log/rsyncd.log
uid = swift
gid = swift
address = 127.0.0.1
[account]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/account.lock
[container]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/container.lock
[object]
path            = /swift/node
read only       = false
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 25
lock file =     /var/lock/object.lock
[swift_server]
path            = /etc/swift
read only       = true
write only      = no
list            = yes
incoming chmod  = 0644
outgoing chmod  = 0644
max connections = 5
lock file =     /var/lock/swift_server.lock


#启动 “rsyncd” 服务和配置它随系统启动
systemctl start rsyncd.service;systemctl enable rsyncd.service


#Compute节点安装并配置组件
#编辑 /etc/swift/account-server.conf 文件,删除原有内容,直接添加以下内容
vi /etc/swift/account-server.conf
[DEFAULT]
bind_port = 6002
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon account-server
[app:account-server]
use = egg:swift#account
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
[account-replicator]
[account-auditor]
[account-reaper]
[filter:xprofile]
use = egg:swift#xprofile



#编辑/etc/swift/container-server.conf文件,删除原有内容,直接添加以下内容:

[DEFAULT]
bind_port = 6001
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon container-server
[app:container-server]
use = egg:swift#container
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
[container-replicator]
[container-updater]
[container-auditor]
[container-sync]
[filter:xprofile]
use = egg:swift#xprofile


#编辑/etc/swift/object-server.conf文件,删除原有内容,直接添加以下内容
vi /etc/swift/object-server.conf

[DEFAULT]
bind_port = 6000
user = swift
swift_dir = /etc/swift
devices = /swift/node
mount_check = false
[pipeline:main]
pipeline = healthcheck recon object-server
[app:object-server]
use = egg:swift#object
[filter:healthcheck]
use = egg:swift#healthcheck
[filter:recon]
use = egg:swift#recon
recon_cache_path = /var/cache/swift
recon_lock_path = /var/lock
[object-replicator]
[object-reconstructor]
[object-updater]
[object-auditor]
[filter:xprofile]
use = egg:swift#xprofile


#编辑文件/etc/swift/swift.conf 删除原有内容,添加如下内容
vi /etc/swift/swift.conf

[swift-hash]
swift_hash_path_suffix = changeme
swift_hash_path_prefix = changeme
[storage-policy:0]
name = Policy-0
default = yes
aliases = yellow, orange
[swift-constraints]

#确认挂载点目录结构是否有合适的所有权:
chown -R swift:swift /swift/node


#创建 “recon” 目录和确保它有合适的所有权
mkdir -p /var/cache/swift
chown -R root:swift /var/cache/swift
chmod -R 775 /var/cache/swift
chown -R root:swift /etc/swift


Controller节点创建,分发并初始化rings

#创建账户ring
#1.切换到 ``/etc/swift``目录。创建基本 account.builder 文件:

 cd /etc/swift
 swift-ring-builder account.builder create 18 1 1
#2.添加每个节点到 ring 中:
 swift-ring-builder account.builder add --region 1 --zone 1 --ip 192.168.100.20 --port 6002 --device sdc1 --weight 100
#3.验证ring内容
 swift-ring-builder account.builder
#4.平衡ring
 swift-ring-builder account.builder rebalance



#创建ring容器
#1.切换到 ``/etc/swift``目录。创建基本``container.builder``文件:
 cd /etc/swift
 swift-ring-builder container.builder create 10 1 1
#2.添加每个节点到 ring 中:
 swift-ring-builder container.builder add --region 1 --zone 1 --ip 192.168.100.20 --port 6001 --device sdc1 --weight 100
#3.验证 ring 的内容:
 swift-ring-builder container.builder
#4.平衡 ring:
 swift-ring-builder container.builder rebalance


#创建Ring对象
#1.切换到 ``/etc/swift``目录。创建基本``object.builder``文件:
 swift-ring-builder object.builder create 10 1 1
#2.添加每个节点到 ring 中:
 swift-ring-builder object.builder add --region 1 --zone 1 --ip 192.168.100.20 --port 6000 --device sdc1 --weight 100
#3.验证 ring 的内容:
 swift-ring-builder object.builder
#4.平衡 ring:
 swift-ring-builder object.builder rebalanc


#Compute节点复制Controller的配置文件
复制``account.ring.gz``,container.ring.gz````object.ring.gz 文件到每个存储节点和其他运行了代理服务的额外节点的 /etc/swift 目录。
[root@compute ~]# scp controller:/etc/swift/*.ring.gz /etc/swift/



#启动服务
#在控制节点Controller上,启动对象存储代理服务及其依赖服务,并将它们配置为随系统启动:
 systemctl start openstack-swift-proxy.service memcached.service
 systemctl enable openstack-swift-proxy.service memcached.service








#在存储节点Compute上,启动对象存储服务,并将其设置为随系统启动
 systemctl start openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service
 systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service openstack-swift-account-reaper.service openstack-swift-account-replicator.service

 systemctl start openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service
 systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service openstack-swift-container-replicator.service openstack-swift-container-updater.service

 systemctl start openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service
 systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service openstack-swift-object-replicator.service openstack-swift-object-updater.service


Controller执行验证操作

#导入demo凭证
. /root/demo-openrc



#显示服务状态:
swift stat 


#创建容器 container1
openstack container create container1


#上传一个测试文件到container1容器
openstack object create container1 /root/demo-openrc

#列出container1容器里的所有文件
openstack object list container1

#从container1容器里下载一个测试文件
openstack object save container1 /root/demo-openrc








你可能感兴趣的:(OpenStack,运维,Linux-CentOS,linux,运维,网络)