1. 安装环境
#yum -y install qemu-kvm libvirt python-virtinst bridge-utils
或者用如下脚本全部自动安装配置:
配置了kvm, 防火墙和snmp
#yum update -y;yum -y install qemu-kvm libvirt python-virtinst bridge-utils policycoreutils-python;mkdir /home/vhosts;semanage fcontext -a -t virt_p_w_picpath_t /home/vhosts;iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 5900:5920 -j ACCEPT;iptables -A INPUT -p udp --dport 161 -j ACCEPT;service iptables save;service iptables restart;sed -i s/192.168.122/10.1.0/g /etc/libvirt/qemu/networks/default.xml;chkconfig libvirtd on;service libvirtd start;virsh pool-define-as vmpool --type dir --target /home/vhosts;virsh pool-start vmpool;virsh pool-autostart vmpool;yum install net-snmp -y;sed -i s/'default public'/'0.0.0.0 zhxgsnmp'/g /etc/snmp/snmpd.conf;sed -i '54 aview systemview included .1' /etc/snmp/snmpd.conf;chkconfig snmpd on;service snmpd start;reboot
或者是多行版本:
yum update -y;\
yum -y install qemu-kvm libvirt python-virtinst bridge-utils policycoreutils-python;\
mkdir /home/vhosts;\
semanage fcontext -a -t virt_p_w_picpath_t /home/vhosts;iptables -A INPUT -m state --state NEW -m tcp -p tcp --dport 5900:5920 -j ACCEPT;\
iptables -A INPUT -p udp --dport 161 -j ACCEPT;service iptables save;\
service iptables restart;sed -i s/192.168.122/10.1.0/g /etc/libvirt/qemu/networks/default.xml;chkconfig libvirtd on;service libvirtd start;\
virsh pool-define-as vmpool --type dir --target /home/vhosts;virsh pool-start vmpool;\
virsh pool-autostart vmpool;\
yum install net-snmp -y;\
sed -i s/'default public'/'0.0.0.0 zhxgsnmp'/g /etc/snmp/snmpd.conf;sed -i '54 aview systemview included .1' /etc/snmp/snmpd.conf;chkconfig snmpd on;\
service snmpd start;\
reboot
注意以上脚本有防火墙禁止icmp那样要移到最后去,然后重启iptables并重启libvirtd
2. 配置selinuix
方法一:禁止selinux
#vi /etc/selinux/config
状态改为:disabled
方法二:配置允许方式(推荐)
#yum -y install policycoreutils-python
#mkdir /home/vhosts 虚拟存储目录
#semanage fcontext -a -t virt_p_w_picpath_t /home/vhosts
3. 启动libvirtd
#chkconfig libvirtd on
#service libvirtd start
4. 基本命令
virt-install 创建新虚拟机
virsh start spider 启动虚拟机
virsh destroy spider 强制关机
virt-clone 复制虚拟机
virsh attach-interface 添加网卡
virsh change-media 更换光驱iso文件
virsh edit spider 编辑配置文件
virsh -c qemu+ssh://10.88.0.1:10809/system list 远程查看虚拟机列表
5. 新建虚拟机
raw格式:速度最快,但是不能实现内部快照,而且实际占用磁盘空间需要用qemu-img info spider.img查看
#virt-install --name spider --ram 4096 --vcpus=2 --disk path=/home/vhosts/spider.img,size=100 --accelerate --arch=x86_64 --os-type=linux --os-variant='rhel6' --cdrom=/home/software/CentOS-6.5-x86_64-minimal.iso --graphics vnc --network bridge=br0
size:为GB
qcow2格式:速度接近raw,但支持内部快照
#qemu-img create -f qcow2 /home/vhosts/openstack.qcow2 100G 必须通过qemu-img创建qcow2格式文件
chown qemu:qemu /home/vhosts/openstack.qcow2
#virt-install --name openstack --ram 16384 --vcpus=2 --arch=x86_64 --disk path=/home/vhosts/openstack.qcow2,size=200,format=qcow2 --os-type=linux --os-variant='rhel6' --accelerate --cdrom=/home/software/CentOS-6.5-x86_64-minimal.iso --graphic type=vnc,port=5904,listen=0.0.0.0,password='cqmygysdss' --network bridge=br0,bridge=br1
注意:逗号,之后不能有空格!
以上是桥接方式,如果是NAT方式,则网络参数为:
--network network=default,model=virtio
NAT模式的xml中相关部分为如下:
如果是桥接,则XML为:
6. raw格式转为qcow2
#qemu-img convert -O qcow2 test.img test.qcow2
转换完毕后,要修改xml
#virsh edit scrapy
注意 slot要选一个没用的
7. 修改VNC绑定地址
注意要使用UtraVNC,不要使用RealVNC,并且注意别选auto,而要full color,否则容易灰屏
修改的时候要先
virsh destroy centos6.5
然后在修改
vi /etc/libvirt/qemu/centos6.5.xml
在其中加入passwd的属性,更改为如下:
其中passwd的位置可以随意.
8. 更换光驱ISO
#virsh change-media centos6.5 hdc --source /home/software/CentOS-6.5-x86_64-minimal.iso
#virsh reset centos6.5
9. 修改bios启动顺序
#virsh destroy centos6.5
#vi /etc/libvirt/qemu/centos6.5.xml
添加cdrom为第一启动
#virsh define /etc/libvirt/qemu/centos6.5.xml
#virsh start centos6.5
10. 查看虚拟机所用VNC端口
#virsh vncdisplay scrapy0002
:6
则使用5900+6=5906端口
修改VNC端口
#virsh edit scrapy0002
11. 虚拟机改名
#virsh destroy centos6.5
#cd /etc/libvirt/qemu
#mv centos6.5.xml spider.xml
#vi spider.xml 修改name字段为spider
#virsh undefine centos6.5
#virsh define /etc/libvirt/qemu/spider.xml
#virsh start spider
12. 查看虚拟机文件信息
# qemu-img info monitor_cacti.img
13. 删除虚拟机
#virsh destroy $1
#virsh undefine $1
#RemoveFolder "/var/lib/libvirt/p_w_picpaths/$1/"
#RemoveFile "/etc/libvirt/qemu/$1"
注意,如果vm用save命令备份过。
undefine需要加参数,比如:
virsh?undefine?--managed-save?vm2??
否则会失败。
14. 设置网卡
1) 桥接方式
需要对网卡进行设置一下 创建一个文件 /etc/sysconfig/network-scripts/ifcfg-br0
DEVICE="br0"
ONBOOT=yes
TYPE=Bridge
BOOTPROTO=static
IPADDR=110.249.208.110
NETMASK=255.255.255.0
GATEWAY=10.1.199.1
GATEWAY=110.249.208.126
DNS1=202.106.0.20
修改 /etc/sysconfig/network-scripts/ifcfg-eth0
DEVICE=eth0
TYPE=Ethernet
BOOTPROTO=none
ONBOOT=yes
BRIDGE=br0
DNS1=202.106.0.20
DNS=8.8.8.8
注:DNS必须配置在eth0中
重启网络服务
#ifdown eth0;ifup eth0 如果是双网卡,别用service network restart,否则起不来之后,都进不去!
查看网络
[root@cloud network-scripts]# ifconfig
br0 Link encap:Ethernet HWaddr 6C:AE:8B:77:2E:A4
inet addr:110.249.208.122 Bcast:110.249.208.127 Mask:255.255.255.128
inet6 addr: fe80::6eae:8bff:fe77:2ea4/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:319 errors:0 dropped:0 overruns:0 frame:0
TX packets:123 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:29902 (29.2 KiB) TX bytes:15123 (14.7 KiB)
eth0 Link encap:Ethernet HWaddr 6C:AE:8B:77:2E:A4
inet6 addr: fe80::6eae:8bff:fe77:2ea4/64 Scope:Link
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:328 errors:0 dropped:0 overruns:0 frame:0
TX packets:119 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:1000
RX bytes:37507 (36.6 KiB) TX bytes:15419 (15.0 KiB)
lo Link encap:Local Loopback
inet addr:127.0.0.1 Mask:255.0.0.0
inet6 addr: ::1/128 Scope:Host
UP LOOPBACK RUNNING MTU:16436 Metric:1
RX packets:0 errors:0 dropped:0 overruns:0 frame:0
TX packets:0 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:0 (0.0 b) TX bytes:0 (0.0 b)
virbr0 Link encap:Ethernet HWaddr 52:54:00:7D:4A:18
inet addr:192.168.122.1 Bcast:192.168.122.255 Mask:255.255.255.0
UP BROADCAST RUNNING MULTICAST MTU:1500 Metric:1
RX packets:58 errors:0 dropped:0 overruns:0 frame:0
TX packets:92 errors:0 dropped:0 overruns:0 carrier:0
collisions:0 txqueuelen:0
RX bytes:17878 (17.4 KiB) TX bytes:18870 (18.4 KiB)
桥接方式主机可以不用设置IP,这样主机不能访问,但是虚拟机可以设置
网桥查看
#brctl show
[root@cloud home]# brctl show
bridge name bridge id STP enabled interfaces
br0 8000.6cae8b772ea4 no eth0
vnet0
vnet10
vnet12
vnet14
vnet16
vnet18
vnet2
vnet20
vnet22
vnet24
vnet26
vnet28
vnet30
vnet32
vnet4
vnet40
vnet8
br1 8000.6cae8b772ea6 no eth1
vnet1
vnet11
vnet13
vnet15
vnet17
vnet19
vnet21
vnet23
vnet25
vnet27
vnet29
vnet3
vnet31
vnet33
vnet41
vnet5
vnet6
vnet7
vnet9
virbr0 8000.5254007d4a18 yes virbr0-nic
在虚拟机cacti中
#ifconfig -a -s
[root@cacti network-scripts]# ifconfig -a -s
Iface MTU Met RX-OK RX-ERR RX-DRP RX-OVR TX-OK TX-ERR TX-DRP TX-OVR Flg
eth0 1500 0 335 0 0 0 111 0 0 0 BMRU
eth1 1500 0 0 0 0 0 0 0 0 0 BM
eth2 1500 0 0 0 0 0 0 0 0 0 BM
lo 16436 0 0 0 0 0 0 0 0 0 LRU
修改MAC
查看mac:
#ifconfig eth1
根据这个MAC修改ifcfg-eth1,另外UUID直接删除掉,不受影响
修改为1000Mbps网卡
kvm默认是100Mbps网卡,如果物理主机是千兆,需要修改为虚拟千兆网卡:
#virsh edit scrapy
就是添加:
#ethtool eth1 查看速率
修改eth0名称(统一eth0为外网,eth1为内网,方便迁移)
#vi /etc/udev/rules.d/70-persistent-net-rules
PCI device 0x14e4:0x1680 (tg3)
SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="b8:ac:6f:65:31:e5", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="eth0"
?
直接修改Name="eth1"即可,然后把/etc/sysconfig/network-scripts/ifcfg-eth0重命名为ifcfg-eth1,并vi其中的name,reboot即可
2) NAT
首先配置Host机环境:
#vi /etc/sysctl.conf
net.ipv4.ip_forward = 1
#sysctl -p 使生效
查看网络
#brctl show
[root@cloud2 ~]# brctl show
bridge name bridge id STP enabled interfaces
br0 8000.001517627ef4 no eth0
br1 8000.001517627ef5 no eth1
vnet1
virbr0 8000.525400f98da4 yes virbr0-nic
vnet0
查看虚拟网络
#virsh net-list --all
Name State Autostart Persistent
--------------------------------------------------
default active yes yes
如果没有以上信息,则virsh net-define /etc/libvirt/qemu/network/default.xml
如果要修改默认dhcp网段,直接修改default.xml配置文件:
#virsh net-destroy default
#virsh net-start default
#service libvirted restart
再查看路由应该就生效了:iptables -t nat -L -nv
查看路由
#route
[root@cloud2 ~]# route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
10.88.0.0 * 255.255.255.0 U 0 0 0 br1
110.249.208.0 * 255.255.255.0 U 0 0 0 br0
192.168.122.0 * 255.255.255.0 U 0 0 0 virbr0
10.88.0.0 10.88.0.254 255.255.0.0 UG 0 0 0 br1
link-local * 255.255.0.0 U 1006 0 0 br0
link-local * 255.255.0.0 U 1007 0 0 br1
default 110.249.208.126 0.0.0.0 UG 0 0 0 br0
查看防火墙NAT
default 110.249.208.126 0.0.0.0 UG 0 0 0 br0
[root@cloud2 ~]# iptables -t nat -L -nv
Chain PREROUTING (policy ACCEPT 854 packets, 43482 bytes)
pkts bytes target prot opt in out source destination
Chain POSTROUTING (policy ACCEPT 23 packets, 1825 bytes)
pkts bytes target prot opt in out source destination
0 0 MASQUERADE tcp -- * * 192.168.122.0/24 !192.168.122.0/24 masq ports: 1024-65535
3 228 MASQUERADE udp -- * * 192.168.122.0/24 !192.168.122.0/24 masq ports: 1024-65535
2 168 MASQUERADE all -- * * 192.168.122.0/24 !192.168.122.0/24
Chain OUTPUT (policy ACCEPT 23 packets, 1825 bytes)
pkts bytes target prot opt in out source destination
[root@cloud2 ~]# iptables -t filter -L -nv
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
16 1068 ACCEPT udp -- virbr0 * 0.0.0.0/0 0.0.0.0/0 udp dpt:53
0 0 ACCEPT tcp -- virbr0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:53
2 656 ACCEPT udp -- virbr0 * 0.0.0.0/0 0.0.0.0/0 udp dpt:67
0 0 ACCEPT tcp -- virbr0 * 0.0.0.0/0 0.0.0.0/0 tcp dpt:67
1851 115K ACCEPT all -- * * 0.0.0.0/0 0.0.0.0/0 state RELATED,ESTABLISHED
0 0 ACCEPT icmp -- * * 0.0.0.0/0 0.0.0.0/0
0 0 ACCEPT all -- lo * 0.0.0.0/0 0.0.0.0/0
1 52 ACCEPT tcp -- * * 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpt:10809
1 52 ACCEPT tcp -- * * 0.0.0.0/0 0.0.0.0/0 state NEW tcp dpts:5900:5920
199 14833 ACCEPT udp -- * * 0.0.0.0/0 0.0.0.0/0 udp dpt:161
11 440 REJECT all -- * * 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited
Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
46 3544 ACCEPT all -- * virbr0 0.0.0.0/0 192.168.122.0/24 state RELATED,ESTABLISHED
47 3620 ACCEPT all -- virbr0 * 192.168.122.0/24 0.0.0.0/0
0 0 ACCEPT all -- virbr0 virbr0 0.0.0.0/0 0.0.0.0/0
0 0 REJECT all -- * virbr0 0.0.0.0/0 0.0.0.0/0 reject-with icmp-port-unreachable
0 0 REJECT all -- virbr0 * 0.0.0.0/0 0.0.0.0/0 reject-with icmp-port-unreachable
0 0 REJECT all -- * * 0.0.0.0/0 0.0.0.0/0 reject-with icmp-host-prohibited
Chain OUTPUT (policy ACCEPT 2018 packets, 1104K bytes)
pkts bytes target prot opt in out source destination
客户机配置
15. 查看虚拟机文件所占实际空间
#qemu-img info cacti.img
p_w_picpath: cacti.img
file format: raw
virtual size: 100G (107374182400 bytes)
disk size: 3.0G
raw格式虽然分配了100G,但是实际只占有了3G,所以df -h看到结果很少,但是ls -l看到是100GB
16. 克隆虚拟机
#virt-clone -o spider -n spider1 -f /home/vhosts/spider1.qcow2
spider1.xml中的uuid和mac地址都自动修改过了,无需手工修改
同时要删除掉:
rm /etc/udev/rules.d/70-persistent-net.rules
reboot后就可以了,注意如果是双网卡,如果不通,可能顺序可能反了
如果安装有guest agent, 注意修改virtio-serial路径
17. 开机加载虚拟机
#virsh list --autostart 查看自动加载的虚拟机
#virsh autostart spider 自动加载spider
#virsh autostart spider --disable 禁止加载spider
18. Guest Agent
主机设置:
#virsh edit openstack
添加一个字符串口与Guest沟通
虚拟机设置
#yum install qemu-guest-agent
然后虚拟机关机或者安装完后reboot虚拟机,主机virsh start openstack
现在可以用
virsh reboot openstack
virsh shutdown openstack
来控制虚拟机了,而无需virsh destroy openstack的暴力关机方法
19. 存储池
创建存储池
基于目录的:#virsh pool-define-as vmpool --type dir --target /home/vhosts
注意:如果用virsh pool-create-as --name vmpool --type dir --target /home/vhosts,创建出来是临时的没有生成xml文件,重启后会失效
基于文件系统
#virsh pool-create-as --name vmware_pool --type fs --source-dev /dev/vg_target/LogVol02 --source-format ext4 --target /virhost/vmware
查看存储池
#virsh pool-info vmpool
#virsh pool-list --all
启动存储池
#virsh pool-start vmpool
#virsh pool-autostart vmpool
销毁和删除存储池
#virsh pool-destroy vmpool
#virsh pool-undefine vmpool
创建存储卷
#virsh vol-create-as --pool vmware_pool --name node6.img --capacity 10G --allocation 1G --format qcow2
创建虚拟机
#virt-install --connect qemu:///system --name openstack --ram 16384 --vcpus=2 --arch=x86_64 --disk path=/home/vhosts/openstack.qcow2,size=200,format=qcow2 --os-type=linux --os-variant='rhel6' --accelerate --cdrom=/home/software/CentOS-6.5-x86_64-minimal.iso --graphic type=vnc,port=5904,listen=0.0.0.0,password='cqmygysdss' --network bridge=br0,bridge=br1
创建快照
#virsh snapshot-create-as openstack clean
查看快照
#virsh snapshot-list openstack
#virsh snapshot-info openstack clean
恢复到快照
#virsh snapshot-revert openstack clean
删除快照
#virsh snapshot-delete openstack clean
20. 电源管理模块
虚拟机上:yum install acpid -y
ACPI(全称 Advanced Configuration and Power Interface)服务是电源管理接口。建议所有的笔记本用户开启它。一些服务器可能不需要 acpi。支持的通用操作有:“电源开关“,”电池监视“,”笔记本 Lid 开关“,“笔记本显示屏亮度“,“休眠”, “挂机”,等等。
21. 增加CPU
#virsh vcpuinfo scrapy 查看CPU配置信息
#virsh setvcpus scrapy 4 --maximum --config 配置最大CPU数
#virsh shutdown scrapy 先关闭guest,当前不支持热插拔CPU
#virsh setvcpus scrapy 4 --start设置当前的CPU数
22. 增加网卡
添加一块桥接网卡
# virsh attach-interface cacti bridge br1 --config
--config表示写入到xml中,如果不写,virsh destroy cacti之后,此网卡就没了
#virsh reset cacti ch
增加NAT网卡
#virsh attach-interface mysql network default --config
23. 迁移
1) 在目标机相同目录生成同样的文件
#virsh vol-create-as --pool vmpool --name ***.qcow3 --capacity 200G --allocation 1G --format qcow2
2) 在源机执行热迁移
virsh -c 'qemu:///system' migrate --live --persistent --copy-storage-all ovirt qemu+ssh://10.88.0.4/system
--copy-storage-all 表示非共享的存储
TCP方式(目标机操作):
a. 打开配置和--listen
#vi /etc/sysconfig/libvirtd
LIBVIRTD_CONFIG=/etc/libvirt/libvirtd.conf
LIBVIRTD_ARGS="--listen"
b. 修改/etc/libvirt/libvirtd.conf
listen_tls = 0
listen_tcp = 1
listen_addr = "10.88.0.4"
c. 重启libvirtd
#service libvirtd restart
d. 添加帐号
#saslpasswd2 -a libvirt admin
源机操作:
#virsh -c 'qemu:///system' migrate --live --persistent --copy-storage-all ovirt qemu+tcp://10.88.0.4/system
23. 常见问题
1) error: Unable to read from monitor: Connection reset by peer
#virsh managedsave-remove scrapy0005
#virsh start scrapy0005
2) error: unknown OS type hvm
qemu要放到/etc/bin目录下
3) 检查是否CPU开启虚拟化
#grep?-E?-o?'vmx|svm'?/proc/cpuinfo
4) error: Refusing to undefine while domain managed save p_w_picpath exists
解决办法:virsh undefine $domain --managed-save
24. openstack, kvm, qemu-kvm以及libvirt之关系:
KVM是最底层的hypervisor,它是用来模拟CPU的运行,它缺少了对network和周边I/O的支持,所以我们是没法直接用它的。 QEMU-KVM就是一个完整的模拟器,它是建基于KVM上面的,它提供了完整的网络和I/O支持. Openstack不会直接控制qemu-kvm,它会用一个叫libvirt的库去间接控制qemu-kvm,libvirt提供了夸VM平台的功能, 它可以控制除了QEMU的模拟器,包括vmware, virtualbox xen等等。所以为了openstack的夸VM性,所以openstack只会用libvirt而不直接用qemu-kvm。libvirt还提供了一 些高级的功能,例如pool/vol管理。