IP:192.168.99.115
yum -y install nfs-utils
useradd openstack
echo "/var/lib/glance/images 192.168.99.0/24(rw,all_squash,anonuid=`id -u openstack`,anongid=`id -g openstack`)" > /etc/exports
mkdir -p /var/lib/glance/images
systemctl restart nfs-server
systemctl enable nfs-server
exportfs -r
showmount -e
chown -R openstack.openstack /var/lib/glance/images/
在控制端安装nfs-utils
showmount -e 192.168.99.115
echo "192.168.99.115:/var/lib/glance/images /var/lib/glance/images nfs defaults 0 0" >> /etc/fstab
mount -a
需要的包haproxy + keepalived
在前面已经做了一台haproxy+keepalived,所以我们需要再加一台物理机,做backup。
IP: 192.168.99.114
开始配置
yum -y install keepalived haproxy
/etc/keepavlied/keepalived.conf
vrrp_instance VI_1 {
state BACKUP
interface eth0
virtual_router_id 51
priority 90
advert_int 1
unicast_src_ip 192.168.99.114
unicast_peer {
192.168.99.112
}
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.99.211 dev eth0 label eth0:1
}
}
systemctl start keepalived
systemctl enable keepalived
在配置之前要看下需要做反向代理的端口
PORT | 服务 |
---|---|
5000 | keystone |
9292 | glance |
8778 | placement |
8774 | nova |
9696 | neutron |
6080 | VNC |
3306 | MySQL |
5672 | rabbitMQ |
15672 | rabbitMQ_WEB |
11211 | memcached |
/etc/haproxy/haproxy.conf
,这个配置在ha_1上也要加上
listen stats
mode http
bind :9999
stats enable
log global
stats uri /haproxy-status
stats auth admin:123
listen dashboard
bind :80
mode http
balance source
server dashboard 192.168.99.111:80 check inter 2000 fall 3 rise 5
server dashboard 192.168.99.113:80 check inter 2000 fall 3 rise 5
listen mysql
bind :3306
mode tcp
balance source
server mysql 192.168.99.116:3306 check inter 2000 fall 3 rise 5
listen memcached
bind :11211
mode tcp
balance source
server memcached 192.168.99.116:11211 inter 2000 fall 3 rise 5
listen rabbit
bind :5672
mode tcp
balance source
server rabbit 192.168.99.116:5672 inter 2000 fall 3 rise 5
listen rabbit_web
bind :15672
mode http
server rabbit_web 192.168.99.116:15672 inter 2000 fall 3 rise 5
listen keystone
bind :5000
mode tcp
server keystone 192.168.99.111:5000 inter 2000 fall 3 rise 5
server keystone 192.168.99.113:5000 inter 2000 fall 3 rise 5
listen glance
bind :9292
mode tcp
server glance 192.168.99.111:9292 inter 2000 fall 3 rise 5
server glance 192.168.99.113:9292 inter 2000 fall 3 rise 5
listen placement
bind :8778
mode tcp
server placement 192.168.99.111:8778 inter 2000 fall 3 rise 5
server placement 192.168.99.113:8778 inter 2000 fall 3 rise 5
listen neutron
bind :9696
mode tcp
server neutron 192.168.99.111:9696 inter 2000 fall 3 rise 5
server neutron 192.168.99.113:9696 inter 2000 fall 3 rise 5
listen nova
bind :8774
mode tcp
server nova 192.168.99.111:8774 inter 2000 fall 3 rise 5
server nova 192.168.99.113:8774 inter 2000 fall 3 rise 5
listen VNC
bind :6080
mode tcp
server VNC 192.168.99.111:6080 inter 2000 fall 3 rise 5
server VNC 192.168.99.113:6080 inter 2000 fall 3 rise 5
要实现高可以用,要再准备一台物理机,设置主机名为controller2,
IP:192.168.99.113
从controller1准备这些文件
$ ls
admin.keystone* glance.tar keystone.tar placement.tar
dashboard.tar http_conf_d.tar neutron.tar yum/
demo.keystone* install_controller_openstack.sh* nova.tar
最终如图,yum源是centos安装时自带,如果你删除了也要从其它主机拷贝过来
准备的过程
#准备httpd
cd /etc/httpd/conf.d
tar cf /root/http_conf_d.tar *
#准备keystone
cd /etc/keystone
tar cf /root/keystone.tar *
#准备glance
cd /etc/glance
tar cf /root/glance.tar *
#准备placement
cd /etc/placement
tar cf /root/placement.tar *
#准备nova
cd /etc/nova
tar cf /root/nova.tar *
#准备neutron
cd /etc/neutron
tar cf /root/neutron.tar *
#准备dashboard
cd /etc/openstack-dashboard
tar cf /root/dashboard.tar *
脚本内容,要先设置好主机名,主机名不能包含_
下划线
#!/bin/bash
#配置yum源
PWD=`dirname $0`
mkdir /etc/yum.repos.d/bak
mv /etc/yum.repos.d/* /etc/yum.repos.d/bak/
mv $PWD/yum/* /etc/yum.repos.d/
yum -y install centos-release-openstack-stein
#安装openstack客户端、openstack SELinux管理包
yum -y install python-openstackclient openstack-selinux
yum -y install python2-PyMySQL mariadb
yum -y install openstack-keystone httpd mod_wsgi python-memcached
tar xf http_conf_d.tar -C /etc/httpd/conf.d
echo "192.168.99.211 openvip.com" >> /etc/hosts
echo "192.168.99.211 controller" >> /etc/hosts
#安装keystone
tar xf $PWD/keystone.tar -C /etc/keystone
systemctl enable httpd.service
systemctl start httpd.service
#安装glance
yum -y install openstack-glance
tar xf $PWD/glance.tar -C /etc/glance
systemctl enable openstack-glance-api.service openstack-glance-registry.service
systemctl start openstack-glance-api.service openstack-glance-registry.service
#安装placement
yum -y install openstack-placement-api
tar xf $PWD/placement.tar -C /etc/placement
#安装nova
yum -y install openstack-nova-api openstack-nova-conductor openstack-nova-console openstack-nova-novncproxy openstack-nova-scheduler openstack-nova-placement-api
tar xf $PWD/nova.tar -C /etc/nova
systemctl restart httpd
systemctl enable openstack-nova-api.service \
openstack-nova-consoleauth.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
systemctl restart openstack-nova-api.service \
openstack-nova-consoleauth.service \
openstack-nova-scheduler.service \
openstack-nova-conductor.service \
openstack-nova-novncproxy.service
cat > /root/nova-restart.sh <<EOF
#!/bin/bash
systemctl restart openstack-nova-api.service openstack-nova-consoleauth.service openstack-nova-scheduler.service openstack-nova-conductor.service openstack-nova-novncproxy.service
EOF
chmod a+x /root/nova-restart.sh
#安装neutron
yum -y install openstack-neutron openstack-neutron-ml2 \
openstack-neutron-linuxbridge ebtables
tar xf $PWD/neutron.tar -C /etc/neutron
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
sysctl -p
systemctl restart openstack-nova-api.service
systemctl enable neutron-server.service \
neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service \
neutron-metadata-agent.service
systemctl restart neutron-server.service \
neutron-linuxbridge-agent.service \
neutron-dhcp-agent.service \
neutron-metadata-agent.service
#安装dashboard
yum -y install openstack-dashboard
tar xf $PWD/dashboard.tar -C /etc/openstack-dashboard
systemctl restart httpd.service
把之前所有的/etc/hosts改成
192.168.99.211 openvip.com
192.168.99.211 controller
新的物理机,安装好centos7.2,配置好IP地址与主机名。
准备这些包
准备
#准备neutron,在你原来的node节点上
cd /etc/neutron
tar cf /root/neutron-compute.tar *
#准备nova,在你原来的node节点上
cd /etc/nova
tar cf /root/nova-compute.tar *
文件limits.conf
# /etc/security/limits.conf
#
#This file sets the resource limits for the users logged in via PAM.
#It does not affect resource limits of the system services.
#
#Also note that configuration files in /etc/security/limits.d directory,
#which are read in alphabetical order, override the settings in this
#file in case the domain is the same or more specific.
#That means for example that setting a limit for wildcard domain here
#can be overriden with a wildcard setting in a config file in the
#subdirectory, but a user specific setting here can be overriden only
#with a user specific setting in the subdirectory.
#
#Each line describes a limit for a user in the form:
#
# -
#
#Where:
# can be:
# - a user name
# - a group name, with @group syntax
# - the wildcard *, for default entry
# - the wildcard %, can be also used with %group syntax,
# for maxlogin limit
#
# can have the two values:
# - "soft" for enforcing the soft limits
# - "hard" for enforcing hard limits
#
#- can be one of the following:
# - core - limits the core file size (KB)
# - data - max data size (KB)
# - fsize - maximum filesize (KB)
# - memlock - max locked-in-memory address space (KB)
# - nofile - max number of open file descriptors
# - rss - max resident set size (KB)
# - stack - max stack size (KB)
# - cpu - max CPU time (MIN)
# - nproc - max number of processes
# - as - address space limit (KB)
# - maxlogins - max number of logins for this user
# - maxsyslogins - max number of logins on the system
# - priority - the priority to run user process with
# - locks - max number of file locks the user can hold
# - sigpending - max number of pending signals
# - msgqueue - max memory used by POSIX message queues (bytes)
# - nice - max nice priority allowed to raise to values: [-20, 19]
# - rtprio - max realtime priority
#
# -
#
#* soft core 0
#* hard rss 10000
#@student hard nproc 20
#@faculty soft nproc 20
#@faculty hard nproc 50
#ftp hard nproc 0
#@student - maxlogins 4
# End of file
* soft core unlimited
* hard core unlimited
* soft nproc 1000000
* hard nproc 1000000
* soft nofile 1000000
* hard nofile 1000000
* soft memlock 32000
* hard memlock 32000
* soft msgqueue 8192000
* hard msgqueue 8192000
文件profile
# /etc/profile
# System wide environment and startup programs, for login setup
# Functions and aliases go in /etc/bashrc
# It's NOT a good idea to change this file unless you know what you
# are doing. It's much better to create a custom.sh shell script in
# /etc/profile.d/ to make custom changes to your environment, as this
# will prevent the need for merging in future updates.
pathmunge () {
case ":${PATH}:" in
*:"$1":*)
;;
*)
if [ "$2" = "after" ] ; then
PATH=$PATH:$1
else
PATH=$1:$PATH
fi
esac
}
if [ -x /usr/bin/id ]; then
if [ -z "$EUID" ]; then
# ksh workaround
EUID=`id -u`
UID=`id -ru`
fi
USER="`id -un`"
LOGNAME=$USER
MAIL="/var/spool/mail/$USER"
fi
# Path manipulation
if [ "$EUID" = "0" ]; then
pathmunge /usr/sbin
pathmunge /usr/local/sbin
else
pathmunge /usr/local/sbin after
pathmunge /usr/sbin after
fi
HOSTNAME=`/usr/bin/hostname 2>/dev/null`
HISTSIZE=1000
if [ "$HISTCONTROL" = "ignorespace" ] ; then
export HISTCONTROL=ignoreboth
else
export HISTCONTROL=ignoredups
fi
export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL
# By default, we want umask to get set. This sets it for login shell
# Current threshold for system reserved uid/gids is 200
# You could check uidgid reservation validity in
# /usr/share/doc/setup-*/uidgid file
if [ $UID -gt 199 ] && [ "`id -gn`" = "`id -un`" ]; then
umask 002
else
umask 022
fi
for i in /etc/profile.d/*.sh ; do
if [ -r "$i" ]; then
if [ "${-#*i}" != "$-" ]; then
. "$i"
else
. "$i" >/dev/null
fi
fi
done
unset i
unset -f pathmunge
export HISTTIMEFORMAT="%F %T `whoami` "
文件sysctl.conf
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
脚本openstack_node_script.sh
#!/bin/bash
gecho() {
echo -e "\e[1;32m${1}\e[0m" && sleep 1
}
recho() {
echo -e "\e[1;31m${1}\e[0m" && sleep 1
}
vip=192.168.99.211
controller_ip=192.168.99.211
gecho "配置yum源"
PWD=`dirname $0`
mkdir /etc/yum.repos.d/bak
mv /etc/yum.repos.d/* /etc/yum.repos.d/bak/
mv $PWD/yum/* /etc/yum.repos.d/
gecho "安装包..."
yum -y install centos-release-openstack-stein
yum -y install python-openstackclient openstack-selinux
yum -y install openstack-nova-compute
yum -y install openstack-neutron-linuxbridge ebtables ipset
cat $PWD/limits.conf > /etc/security/limits.conf
cat $PWD/profile > /etc/profile
cat $PWD/sysctl.conf > /etc/sysctl.conf
gecho "配置nova"
tar xvf $PWD/nova-compute.tar -C /etc/nova/
myip=`ifconfig eth0 | awk '/inet /{print $2}'`
sed -i "/my_ip =/s#.*#my_ip = ${myip}#" /etc/nova/nova.conf
gecho "配置neutron"
tar xf neutron-compute.tar -C /etc/neutron
echo "net.bridge.bridge-nf-call-iptables = 1" >> /etc/sysctl.conf
echo "net.bridge.bridge-nf-call-ip6tables = 1" >> /etc/sysctl.conf
sysctl -p
echo "${vip} openvip.com" >> /etc/hosts
echo "${controller_ip} controller" >> /etc/hosts
vcpu=${egrep -c '(vmx|svm)' /proc/cpuinfo}
if [ vcpu -eq 0 ] ; then
cat >> /etc/nova/nova.conf <<EOF
[libvirt]
virt_type = qemu
EOF
fi
gecho "启动服务..."
systemctl enable libvirtd.service openstack-nova-compute.service
systemctl restart libvirtd.service || recho "libvirtd启动失败"
systemctl restart openstack-nova-compute.service || recho "openstack-nova-compute启动失败"
systemctl enable neutron-linuxbridge-agent.service
systemctl restart neutron-linuxbridge-agent.service