计算节点需要配置的主要是nova和neutron的客户端,控制节点在进行资源调度及配置时需要计算节点配合方能实现的,计算节点配置内容相对较少,实际生产环境中,需要配置的计算节点数量相当庞大,那么我们就需要借助ansible或者puppet这样的自动化工具进行了,   废话不多讲,直接进入配置状态。


compute节点基础配置

[root@compute1 ~]# lscpu

Architecture:          x86_64

CPU op-mode(s):        32-bit, 64-bit

Byte Order:            Little Endian

CPU(s):                8

On-line CPU(s) list:   0-7

Thread(s) per core:    1

Core(s) per socket:    1

Socket(s):             8

NUMA node(s):          1

Vendor ID:             GenuineIntel

CPU family:            6

Model:                 44

Model name:            Westmere E56xx/L56xx/X56xx (Nehalem-C)

Stepping:              1

CPU MHz:               2400.084

BogoMIPS:              4800.16

Virtualization:        VT-x

Hypervisor vendor:     KVM

Virtualization type:   full

L1d cache:             32K

L1i cache:             32K

L2 cache:              4096K

NUMA node0 CPU(s):     0-7


[root@compute1 ~]# free -h

              total        used        free      shared  buff/cache   available

Mem:            15G        142M         15G        8.3M        172M         15G

Swap:            0B          0B          0B

[root@compute1 ~]# lsblk

NAME            MAJ:MIN RM   SIZE RO TYPE MOUNTPOINT

sr0              11:0    1  1024M  0 rom  

vda             252:0    0   400G  0 disk 

├─vda1          252:1    0   500M  0 part /boot

└─vda2          252:2    0 399.5G  0 part 

  ├─centos-root 253:0    0    50G  0 lvm  /

  ├─centos-swap 253:1    0   3.9G  0 lvm  

  └─centos-data 253:2    0 345.6G  0 lvm  /data


[root@compute1 ~]# ifconfig

eth0: flags=4163  mtu 1500

        inet 192.168.10.31  netmask 255.255.255.0  broadcast 192.168.10.255

        inet6 fe80::5054:ff:fe18:bb1b  prefixlen 64  scopeid 0x20

        ether 52:54:00:18:bb:1b  txqueuelen 1000  (Ethernet)

        RX packets 16842  bytes 1460696 (1.3 MiB)

        RX errors 0  dropped 1416  overruns 0  frame 0

        TX packets 747  bytes 199340 (194.6 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


eth1: flags=4163  mtu 1500

        inet 10.0.0.31  netmask 255.255.0.0  broadcast 10.0.255.255

        inet6 fe80::5054:ff:fe28:e0a7  prefixlen 64  scopeid 0x20

        ether 52:54:00:28:e0:a7  txqueuelen 1000  (Ethernet)

        RX packets 16213  bytes 1360633 (1.2 MiB)

        RX errors 0  dropped 1402  overruns 0  frame 0

        TX packets 23  bytes 1562 (1.5 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


eth2: flags=4163  mtu 1500

        inet 111.40.215.9  netmask 255.255.255.240  broadcast 111.40.215.15

        inet6 fe80::5054:ff:fe28:e07a  prefixlen 64  scopeid 0x20

        ether 52:54:00:28:e0:7a  txqueuelen 1000  (Ethernet)

        RX packets 40  bytes 2895 (2.8 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 24  bytes 1900 (1.8 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


lo: flags=73  mtu 65536

        inet 127.0.0.1  netmask 255.0.0.0

        inet6 ::1  prefixlen 128  scopeid 0x10

        loop  txqueuelen 0  (Local Loopback)

        RX packets 841  bytes 44167 (43.1 KiB)

        RX errors 0  dropped 0  overruns 0  frame 0

        TX packets 841  bytes 44167 (43.1 KiB)

        TX errors 0  dropped 0 overruns 0  carrier 0  collisions 0


[root@compute1 ~]# getenforce

Disabled

[root@compute1 ~]# iptables -vnL

Chain INPUT (policy ACCEPT 0 packets, 0 bytes)

 pkts bytes target     prot opt in     out     source               destination         


Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)

 pkts bytes target     prot opt in     out     source               destination         


Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes)

 pkts bytes target     prot opt in     out     source               destination         

[root@compute1 ~]# cat /etc/hosts

127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4

::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.10.10 controller

192.168.10.20 block

192.168.10.31 compute1

192.168.10.32 compute2

[root@compute1 ~]# 


配置时间同步服务

[root@compute1 ~]# yum install -y chrony

[root@compute1 ~]# vim /etc/chrony.conf 

[root@compute1 ~]# grep -v ^# /etc/chrony.conf | tr -s [[:space:]]

server controller iburst

stratumweight 0

driftfile /var/lib/chrony/drift

rtcsync

makestep 10 3

bindcmdaddress 127.0.0.1

bindcmdaddress ::1

keyfile /etc/chrony.keys

commandkey 1

generatecommandkey

noclientlog

logchange 0.5

logdir /var/log/chrony

[root@compute1 ~]# systemctl enable chronyd.service 

[root@compute1 ~]# systemctl start chronyd.service 

[root@compute1 ~]# chronyc sources

210 Number of sources = 1

MS Name/IP address         Stratum Poll Reach LastRx Last sample

===============================================================================

^* controller                    3   6    17    52    -15us[ -126us] +/-  138ms

[root@compute1 ~]# 


安装 OpenStack 客户端

[root@compute1 ~]# yum install -y python-openstackclient


安装配置nova客户端

[root@compute1 ~]# yum install -y openstack-nova-compute

[root@compute1 ~]# cp /etc/nova/nova.conf{,.bak}

[root@compute1 ~]# vim /etc/nova/nova.conf

[root@compute1 ~]# grep -v ^# /etc/nova/nova.conf | tr -s [[:space:]]

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

my_ip = 192.168.10.31

use_neutron = True

firewall_driver = nova.virt.firewall.NoopFirewallDriver

[api_database]

[barbican]

[cache]

[cells]

[cinder]

[conductor]

[cors]

[cors.subdomain]

[database]

[ephemeral_storage_encryption]

[glance]

api_servers = http://controller:9292

[guestfs]

[hyperv]

[p_w_picpath_file_url]

[ironic]

[keymgr]

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

memcached_servers = controller:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = nova

password = NOVA_PASS

[libvirt]

[matchmaker_redis]

[metrics]

[neutron]

[osapi_v21]

[oslo_concurrency]

lock_path = /var/lib/nova/tmp

[oslo_messaging_amqp]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = RABBIT_PASS

[oslo_middleware]

[oslo_policy]

[rdp]

[serial_console]

[spice]

[ssl]

[trusted_computing]

[upgrade_levels]

[vmware]

[vnc]

enabled = True

vncserver_listen = 0.0.0.0

vncserver_proxyclient_address = $my_ip

novncproxy_base_url = http://controller:6080/vnc_auto.html

[workarounds]

[xenserver]

[root@compute1 ~]# egrep -c '(vmx|svm)' /proc/cpuinfo  //检验是否支持虚拟机的硬件加速

8

[root@compute1 ~]#

如果此处检验结果为0就请参考openstack环境准备一文中kvm虚拟机如何开启嵌套虚拟化栏目内容


[root@compute1 ~]# systemctl enable libvirtd.service openstack-nova-compute.service

Created symlink from /etc/systemd/system/multi-user.target.wants/openstack-nova-compute.service to /usr/lib/systemd/system/openstack-nova-compute.service.

[root@compute1 ~]# systemctl start libvirtd.service openstack-nova-compute.service  //计算节点上不会启动相应端口,只能通过服务状态进行查看

[root@compute1 ~]# systemctl status libvirtd.service openstack-nova-compute.service

● libvirtd.service - Virtualization daemon

   Loaded: loaded (/usr/lib/systemd/system/libvirtd.service; enabled; vendor preset: enabled)

   Active: active (running) since Sun 2017-07-16 19:10:26 CST; 12min ago

     Docs: man:libvirtd(8)

           http://libvirt.org

 Main PID: 1002 (libvirtd)

   CGroup: /system.slice/libvirtd.service

           └─1002 /usr/sbin/libvirtd


Jul 16 19:10:26 compute1 systemd[1]: Starting Virtualization daemon...

Jul 16 19:10:26 compute1 systemd[1]: Started Virtualization daemon.

Jul 16 19:21:06 compute1 systemd[1]: Started Virtualization daemon.


● openstack-nova-compute.service - OpenStack Nova Compute Server

   Loaded: loaded (/usr/lib/systemd/system/openstack-nova-compute.service; enabled; vendor preset: disabled)

   Active: active (running) since Sun 2017-07-16 19:21:11 CST; 1min 21s ago

 Main PID: 1269 (nova-compute)

   CGroup: /system.slice/openstack-nova-compute.service

           └─1269 /usr/bin/python2 /usr/bin/nova-compute


Jul 16 19:21:06 compute1 systemd[1]: Starting OpenStack Nova Compute Server...

Jul 16 19:21:11 compute1 nova-compute[1269]: /usr/lib/python2.7/site-packages/pkg_resources/__init__.py:187: RuntimeWarning: You have...

Jul 16 19:21:11 compute1 nova-compute[1269]: stacklevel=1,

Jul 16 19:21:11 compute1 systemd[1]: Started OpenStack Nova Compute Server.

Hint: Some lines were ellipsized, use -l to show in full.

[root@compute1 ~]#


前往controller节点验证计算服务配置


安装配置neutron客户端

控制节点网络配置完成后开始继续以下步骤

[root@compute1 ~]# yum install -y openstack-neutron-linuxbridge ebtables ipset

[root@compute1 ~]# cp /etc/neutron/neutron.conf{,.bak}

[root@compute1 ~]# vim /etc/neutron/neutron.conf

[root@compute1 ~]# grep -v ^# /etc/neutron/neutron.conf | tr -s [[:space:]]

[DEFAULT]

rpc_backend = rabbit

auth_strategy = keystone

[agent]

[cors]

[cors.subdomain]

[database]

[keystone_authtoken]

auth_uri = http://controller:5000

auth_url = http://controller:35357

memcached_servers = controller:11211

auth_type = password

project_domain_name = default

user_domain_name = default

project_name = service

username = neutron

password = NEUTRON_PASS

[matchmaker_redis]

[nova]

[oslo_concurrency]

lock_path = /var/lib/neutron/tmp

[oslo_messaging_amqp]

[oslo_messaging_notifications]

[oslo_messaging_rabbit]

rabbit_host = controller

rabbit_userid = openstack

rabbit_password = RABBIT_PASS

[oslo_policy]

[qos]

[quotas]

[ssl]

[root@compute1 ~]# 


linuxbridge代理配置

[root@compute1 ~]# cp /etc/neutron/plugins/ml2/linuxbridge_agent.ini{,.bak} 

[root@compute1 ~]# vim /etc/neutron/plugins/ml2/linuxbridge_agent.ini

[root@compute1 ~]# grep -v ^# /etc/neutron/plugins/ml2/linuxbridge_agent.ini | tr -s [[:space:]]

[DEFAULT]

[agent]

[linux_bridge]

physical_interface_mappings = provider:eth1

[securitygroup]

enable_security_group = True

firewall_driver = neutron.agent.linux.iptables_firewall.IptablesFirewallDriver

[vxlan]

enable_vxlan = True

local_ip = 192.168.10.31

l2_population = True

[root@compute1 ~]# 


再次编辑nova配置文件,追加网络配置

[root@compute1 ~]# vim /etc/nova/nova.conf

url = http://controller:9696

auth_url = http://controller:35357

auth_type = password

project_domain_name = default

user_domain_name = default

region_name = RegionOne

project_name = service

username = neutron

password = NEUTRON_PASS


重启计算节点服务,启用并启动linuxbridge代理服务

[root@compute1 ~]# systemctl restart openstack-nova-compute.service

[root@compute1 ~]# systemctl enable neutron-linuxbridge-agent.service

Created symlink from /etc/systemd/system/multi-user.target.wants/neutron-linuxbridge-agent.service to /usr/lib/systemd/system/neutron-linuxbridge-agent.service.

[root@compute1 ~]# systemctl start neutron-linuxbridge-agent.service


前往controller节点验证网络服务配置