Based on Ubuntu 14.04 LTS x86_64
configure neutron controller node:
1. on keystone node
mysql -uroot -p
mysql> create database neutron;
mysql> grant all privileges on neutron.* to 'neutron'@'localhost' identified by 'NEUTRON-DBPASS';
mysql> grant all privileges on neutron.* to 'neutron'@'%' identified by 'NEUTRON-DBPASS';
mysql> flush privileges;
# Create a neutron user
keystone user-create --tenant service --name neutron --pass NEUTRON-USER-PASSWORD
# Add role to the neutron user
keystone user-role-add --user neutron --tenant service --role admin
# Create the neutron service
keystone service-create --name=neutron --type=network --description="Neutron Network Service"
# Create a Networking endpoint
keystone endpoint-create --region RegionOne --service neutron --publicurl=http://NEUTRON-SERVER:9696 --internalurl=http://NEUTRON-SERVER:9696 --adminurl=http://NEUTRON-SERVER:9696
2. on neutron server node, here we use keystone node on it
for using Neutron Networking
aptitude update
aptitude -y install linux-image-generic-lts-trusty linux-headers-generic-lts-trusty
reboot
3. aptitude -y install neutron-server neutron-plugin-ml2
4. vi /etc/neutron/neutron.conf
[database]
connection=mysql://neutron:neutron@MYSQL-SERVER/neutron
[DEFAULT]
auth_strategy=keystone
rpc_backend=neutron.openstack.common.rpc.impl_kombu
rabbit_host = controller
rabbit_password = GUEST-PASS
notify_nova_on_port_status_changes=True
notify_nova_on_port_data_changes=True
nova_url=http://controller:8774/v2
nova_admin_username=nova
nova_admin_tenant_id=$(keystone tenant-list | awk '/service/ { print $2 }')
nova_admin_password=NOVA-USER-PASSWORD
nova_admin_auth_url=http://controller:35357/v2.0
core_plugin=ml2
service_plugins=router
allow_overlapping_ips = True
verbose = True
[keystone_authtoken]
auth_host=controller
auth_port = 35357
auth_protocol = http
auth_uri=http://controller:5000
admin_tenant_name=service
admin_user=neutron
admin_password=NEUTRON-USER-PASSWORD
Comment out any lines in the [service_providers] section
5. vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers=gre
tenant_network_types=gre
mechanism_drivers=openvswitch
[ml2_type_gre]
tunnel_id_ranges=1:1000
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group=True
6. on nova controller node
vi /etc/nova/nova.conf
[DEFAULT]
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://NEUTRON-SERVER:9696
neutron_auth_strategy=keystone
neutron_admin_tenant_name=service
neutron_admin_username=neutron
neutron_admin_password=NEUTRON-USER-PASSWORD
neutron_admin_auth_url=http://controller:35357/v2.0
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=neutron
vif_plugging_is_fatal=false
vif_plugging_timeout=0
7. service nova-api restart
service nova-scheduler restart
service nova-conductor restart
8.chown -R neutron:neutron /etc/neutron /var/log/neutron
service neutron-server restart
Neutron Network node
1. eth0 for management/public/floating (192.168.1.0/24), eth1 for internal/flat (192.168.30.0/24), it's recommended to use seperated nic for management network
vi /etc/network/interface
auto eth2
iface eth2 inet manual
up ip link set dev $IFACE up
down ip link set dev $IFACE down
2. vi /etc/hosts
# remove or comment the line beginning with 127.0.1.1
192.168.1.10 controller
192.168.1.11 node1
192.168.1.12 neutronnet
3. aptitude -y install ntp
vi /etc/ntp.conf
server 192.168.1.10
restrict 192.168.1.10
service ntp restart
4. aptitude -y install python-mysqldb
5. for using Neutron Networking
aptitude update
aptitude -y install linux-image-generic-lts-trusty linux-headers-generic-lts-trusty
reboot
6. Enable packet forwarding and disable packet destination filtering
vi /etc/sysctl.conf
net.ipv4.ip_forward=1
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
sysctl -p
7. aptitude -y install neutron-plugin-ml2 neutron-plugin-openvswitch-agent neutron-l3-agent neutron-dhcp-agent
8. vi /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy=keystone
rpc_backend=neutron.openstack.common.rpc.impl_kombu
rabbit_host = controller
rabbit_password = GUEST-PASS
core_plugin=ml2
service_plugins=router
allow_overlapping_ips = True
verbose = True
[keystone_authtoken]
auth_host=controller
auth_port = 35357
auth_protocol = http
auth_uri=http://controller:5000
admin_tenant_name=service
admin_user=neutron
admin_password=NEUTRON-USER-PASSWORD
Comment out any lines in the [service_providers] section
9. vi /etc/neutron/l3_agent.ini
interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
use_namespaces=True
verbose = True
vi /etc/neutron/dhcp_agent.ini
interface_driver=neutron.agent.linux.interface.OVSInterfaceDriver
dhcp_driver=neutron.agent.linux.dhcp.Dnsmasq
use_namespaces=True
verbose = True
10. vi /etc/neutron/metadata_agent.ini
auth_url = http://controller:5000/v2.0
auth_region = RegionOne
admin_tenant_name = service
admin_user = neutron
admin_password = NEUTRON-USER-PASSWORD
nova_metadata_ip = controller
metadata_proxy_shared_secret = METADATA-PASSWORD
verbose = True
11. on nova controller node
vi /etc/nova/nova.conf
neutron_metadata_proxy_shared_secret=METADATA-PASSWORD
service_neutron_metadata_proxy=true
service nova-api restart
12. vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers=gre
tenant_network_types=gre
mechanism_drivers=openvswitch
[ml2_type_gre]
tunnel_id_ranges=1:1000
[ovs]
local_ip = INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS #192.168.30.12
tunnel_type = gre
enable_tunneling = True
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group=True
13. service openvswitch-switch restart
ovs-vsctl add-br br-int
ovs-vsctl add-br br-ex
ovs-vsctl add-port br-ex eth2
ethtool -K eth2 gro off
ethtool -k eth2
14. chown -R neutron:neutron /etc/neutron /var/log/neutron
service neutron-plugin-openvswitch-agent restart
service neutron-l3-agent restart
service neutron-dhcp-agent restart
service neutron-metadata-agent restart
neutron computer node setup
1. eth0 for management/public/floating (192.168.1.0/24), eth1 for internal/flat (192.168.20.0/24), it's recommended to use seperated nic for management network
2. vi /etc/hosts
# remove or comment the line beginning with 127.0.1.1
192.168.1.10 controller
192.168.1.11 node1
192.168.1.12 neutronnet
3. aptitude -y install qemu-kvm libvirt-bin virtinst bridge-utils
modprobe vhost_net
echo vhost_net >> /etc/modules
4. aptitude -y install ntp
vi /etc/ntp.conf
server 192.168.1.10
restrict 192.168.1.10
service ntp restart
5. aptitude -y install python-mysqldb
6. aptitude -y install nova-compute-kvm python-guestfs
7. dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-$(uname -r)
vi /etc/kernel/postinst.d/statoverride
#!/bin/sh
version="$1"
# passing the kernel version is required
[ -z "${version}" ] && exit 0
dpkg-statoverride --update --add root root 0644 /boot/vmlinuz-${version}
chmod +x /etc/kernel/postinst.d/statoverride
8. vi /etc/nova/nova.conf
[DEFAULT]
auth_strategy=keystone
rpc_backend = rabbit
rabbit_host = controller
rabbit_password = GUEST-PASS
my_ip=192.168.1.11
vnc_enabled=true
vncserver_listen=0.0.0.0
vncserver_proxyclient_address=192.168.1.11
novncproxy_base_url=http://controller:6080/vnc_auto.html
glance_host=controller
[keystone_authtoken]
auth_host=controller
auth_port=35357
auth_protocol=http
auth_uri=http://controller:5000
admin_user=nova
admin_password=NOVA-USER-PASSWORD
admin_tenant_name=service
[database]
connection=mysql://nova:NOVA-DATABASE-PASSWORD@MYSQL-SERVER/nova
rm -rf /var/lib/nova/nova.sqlite
9. chown -R nova:nova /etc/nova /var/log/nova
service nova-compute restart
now for neutron plugin agent:
10. disable packet destination filtering
vi /etc/sysctl.conf
net.ipv4.conf.all.rp_filter=0
net.ipv4.conf.default.rp_filter=0
sysctl -p
11. for using Neutron Networking
aptitude update
aptitude -y install linux-image-generic-lts-trusty linux-headers-generic-lts-trusty
reboot
12. aptitude -y install neutron-common neutron-plugin-ml2 neutron-plugin-openvswitch-agent
13. vi /etc/neutron/neutron.conf
[DEFAULT]
auth_strategy=keystone
rpc_backend=neutron.openstack.common.rpc.impl_kombu
rabbit_host = controller
rabbit_password = GUEST-PASS
core_plugin=ml2
service_plugins=router
allow_overlapping_ips = True
verbose = True
[keystone_authtoken]
auth_host=controller
auth_port = 35357
auth_protocol = http
auth_uri=http://controller:5000
admin_tenant_name=service
admin_user=neutron
admin_password=NEUTRON-USER-PASSWORD
Comment out any lines in the [service_providers] section
14. vi /etc/neutron/plugins/ml2/ml2_conf.ini
[ml2]
type_drivers=gre
tenant_network_types=gre
mechanism_drivers=openvswitch
[ml2_type_gre]
tunnel_id_ranges=1:1000
[ovs]
local_ip = INSTANCE_TUNNELS_INTERFACE_IP_ADDRESS #192.168.30.11
tunnel_type = gre
enable_tunneling = True
[securitygroup]
firewall_driver=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group=True
15. service openvswitch-switch restart
ovs-vsctl add-br br-int
16. vi /etc/nova/nova.conf
network_api_class=nova.network.neutronv2.api.API
neutron_url=http://NEUTRON-SERVER:9696
neutron_auth_strategy=keystone
neutron_admin_tenant_name=service
neutron_admin_username=neutron
neutron_admin_password=NEUTRON-USER-PASSWORD
neutron_admin_auth_url=http://controller:35357/v2.0
linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
firewall_driver=nova.virt.firewall.NoopFirewallDriver
security_group_api=neutron
vif_plugging_is_fatal=false
vif_plugging_timeout=0
17. service nova-compute restart
18. chown -R neutron:neutron /etc/neutron /var/log/neutron
service neutron-plugin-openvswitch-agent restart
creating neutron network
on controller node:
1. to check neutron-server is communicating with its agents
neutron agent-list
source ~/adminrc (through step 1~2)
# create external network
neutron net-create ext-net --shared --router:external=True [ --provider:network_type gre --provider:segmentation_id SEG_ID ]
Note: SEG_ID is the tunnel id.
2. # create subnet on external network
neutron subnet-create ext-net --name ext-subnet --allocation-pool start=FLOATING_IP_START,end=FLOATING_IP_END --disable-dhcp --gateway EXTERNAL_NETWORK_GATEWAY EXTERNAL_NETWORK_CIDR
neutron subnet-create ext-net --name ext-subnet --allocation-pool start=192.168.1.200,end=192.168.1.210 --disable-dhcp --dns-nameserver 210.22.84.3 --dns-nameserver 210.22.70.3 --gateway 192.168.1.1 192.168.1.0/24
3. # create tenant network
source ~/demo1rc (through step 3~7)
neutron net-create demo-net
4. # create subnet on tenant network
neutron subnet-create demo-net --name demo-subnet --gateway TENANT_NETWORK_GATEWAY TENANT_NETWORK_CIDR
neutron subnet-create demo-net --name demo-subnet --dns-nameserver x.x.x.x --gateway 10.10.10.1 10.10.10.0/24
5. # create virtual router to connect external and tenant network
neutron router-create demo-router
6. # Attach the router to the tenant subnet
neutron router-interface-add demo-router demo-subnet
7. # Attach the router to the external network by setting it as the gateway
neutron router-gateway-set demo-router ext-net
Note: the tenant router gateway should occupy the lowest IP address inthe floating IP address range -- 192.168.1.200
neutron net-list
neutron subnet-list
neutron router-port-list demo-router
Launch Instances
for demo1 tenant:
source ~/demo1rc
neutron security-group-create --description "Test Security Group" test-sec
# permit ICMP
neutron security-group-rule-create --protocol icmp --direction ingress --remote-ip-prefix 0.0.0.0/0 test-sec
# permit ssh
neutron security-group-rule-create --protocol tcp --port-range-min 22 --port-range-max 22 --direction ingress --remote-ip-prefix 0.0.0.0/0 test-sec
neutron security-group-rule-list
nova keypair-add demokey > demokey.pem
nova keypair-list
nova flavor-list
nova image-list
neutron net-list
neutron subnet-list
demonet=`neutron net-list | grep demo-net | awk '{ print $2 }'`
nova boot --flavor 1 --image "CirrOS 0.3.2" --key-name demokey --security-groups test-sec --nic net-id=$demonet CirrOS
Notes: you should have enough memory on KVM nodes, or you will not get instances created.
1. you can use vmware workstation to build images, then upload to glance using dashboard
ubuntu
1). vi /etc/hosts to remove 127.0.1.1. item
2). enable ssh login
3). enable dhcp client on interface
4). enable normal username/password
5). set root password
centos/redhat
1). rm -rf /etc/ssh/ssh_host_*
2). vi /etc/sysconfig/network-scripts/ifcfg-ethX to remove HWADDR and UUID items
3). rm -rf /etc/udev/rules.d/70-persistent-net.rules
4). enable ssh login
5). enable dhcp client on interface (also vi /etc/sysconfig/network, /etc/resolv.conf)
6). enable normal username/password
7). set root password
2. launch instance without keypair
nova commands:
nova list; nova show CirrOS
nova stop CirrOS
nova start CirrOS
# get vnc console address via web browser like below:
nova get-vnc-console CirrOS novnc
# Create a floating IP addresson the ext-net external network
neutron floatingip-create ext-net
neutron floatingip-list
# Associate the floating IP address with your instance even it's running
nova floating-ip-associate CirrOS 192.168.1.201
( nova floating-ip-disassociate cirros 192.168.1.201 )
nova list
ping 192.168.1.201 (floating ip)
using xshell or putty to ssh -i demokey.pem [email protected] (username: cirros, password: cubswin:))
[ for ubuntu cloud image: username is ubuntu, for fedora cloud image: username is fedora ]
now we can ping and ssh to 192.168.1.201, and CirrOS can access Internet now.
Notes: you should have enough space in /var/lib/nova/instances for store VMs, you can mount partition to it ( using local or shared storages).
Fixed IP addresses with OpenStack Neutron for tenant networks
neutron subnet-list
neutron subnet-show demo-subnet
neutron port-create demo-net --fixed-ip ip_address=10.10.10.10 --name VM-NAME
nova boot --flavor 1 --image "CirrOS 0.3.2" --key-name demokey --security-groups test-sec --nic port-id=xxx VM-NAME
Access novnc console from Internet method1
1. add another interface face to Internet on nova controller (normally keystone+dashboard node)
2. assign a public ip address
3. on computer node, vi /etc/nova/nova.conf
novncproxy_base_url=http://public_ip_address_of_nova_controller:6080/vnc_auto.html
service nova-compute restart
4. nova get-vnc-console CirrOS novnc
http://public_ip_address_of_nova_controller:6080/vnc_auto.html?token=4f9c1f7e-4288-4fda-80ad-c1154a954673
Access novnc console from Internet method2
1. you can publish dashboard web site to Internet (normally keystone+dashboard node)
2. on computer node, vi /etc/nova/nova.conf
novncproxy_base_url=http://public_ip_address_of_firewall:6080/vnc_auto.html
service nova-compute restart
3. nova get-vnc-console CirrOS novnc
http://public_ip_address_of_firewall:6080/vnc_auto.html?token=4f9c1f7e-4288-4fda-80ad-c1154a954673