[root@openstack-con01 ~(keystone_admin)]# nova --debug resize 2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 2 --poll
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:35357/v2.0/ -H "Accept: application/json" -H "User-Agent: python-keystoneclient"
INFO (connectionpool:203) Starting new HTTP connection (1): 10.10.10.250
DEBUG (connectionpool:383) "GET /v2.0/ HTTP/1.1" 200 339
DEBUG (session:224) RESP: [200] content-length: 339 vary: X-Auth-Token keep-alive: timeout=5, max=100 server: Apache/2.4.6 (CentOS) mod_wsgi/3.4 Python/2.7.5 connection: Keep-Alive date: Thu, 30 Aug 2018 11:28:07 GMT content-type: application/json x-openstack-request-id: req-84389041-c201-4869-acb0-0ca92945582f
RESP BODY: {"version": {"status": "stable", "updated": "2014-04-17T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json"}], "id": "v2.0", "links": [{"href": "http://10.10.10.250:35357/v2.0/", "rel": "self"}, {"href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby"}]}}
DEBUG (v2:76) Making authentication request to http://10.10.10.250:35357/v2.0/tokens
DEBUG (connectionpool:383) "POST /v2.0/tokens HTTP/1.1" 200 3484
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
INFO (connectionpool:203) Starting new HTTP connection (1): 10.10.10.250
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 HTTP/1.1" 200 1751
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:28:08 GMT connection: keep-alive content-type: application/json content-length: 1751 x-compute-request-id: req-7e7ea726-5479-4c92-b3d5-173bc9651338
RESP BODY: {"server": {"OS-EXT-STS:task_state": null, "addresses": {"tstack-vxlan-network": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fd:06:74", "version": 4, "addr": "172.16.0.105", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "bookmark"}], "image": {"id": "0be3adcf-181b-4c8a-a5f4-657e972497d8", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/images/0be3adcf-181b-4c8a-a5f4-657e972497d8", "rel": "bookmark"}]}, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-00000009", "OS-SRV-USG:launched_at": "2018-08-30T10:35:43.000000", "flavor": {"id": "1", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/1", "rel": "bookmark"}]}, "id": "2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "security_groups": [{"name": "default"}], "user_id": "33f84664ac69496ea4cdfb7166fbf416", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "region_node_id": "REGION_FYD_1", "config_drive": "", "status": "ACTIVE", "updated": "2018-08-30T11:26:24Z", "hostId": "1b346db001405aa020eb63097240a58b546291e1464c4a3dad66e6b3", "OS-EXT-SRV-ATTR:host": "openstack-compute01", "OS-SRV-USG:terminated_at": null, "key_name": null, "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hypervisor_hostname": "openstack-compute01", "name": "cirros_test", "created": "2018-08-30T10:35:32Z", "tenant_id": "b045010ed16147c680a368fd23d96272", "os-extended-volumes:volumes_attached": [], "metadata": {}}}
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/flavors/2 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/flavors/2 HTTP/1.1" 200 425
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:28:08 GMT connection: keep-alive content-type: application/json content-length: 425 x-compute-request-id: req-1ca47c75-1ab2-4d27-bec8-f92a03184fdf
RESP BODY: {"flavor": {"name": "m1.small", "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/flavors/2", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/2", "rel": "bookmark"}], "ram": 2048, "OS-FLV-DISABLED:disabled": false, "vcpus": 1, "swap": "", "os-flavor-access:is_public": true, "rxtx_factor": 1.0, "OS-FLV-EXT-DATA:ephemeral": 0, "disk": 20, "id": "2"}}
DEBUG (session:195) REQ: curl -g -i -X POST http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action -H "User-Agent: python-novaclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34" -d '{"resize": {"flavorRef": "2"}}'
DEBUG (connectionpool:383) "POST /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action HTTP/1.1" 202 0
DEBUG (session:224) RESP: [202] date: Thu, 30 Aug 2018 11:28:09 GMT connection: keep-alive content-type: text/html; charset=UTF-8 content-length: 0 x-compute-request-id: req-b4aa00c0-0065-47d8-a3e8-9631fc0651f7
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 HTTP/1.1" 200 1760
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:28:09 GMT connection: keep-alive content-type: application/json content-length: 1760 x-compute-request-id: req-87854e93-4156-4291-92f8-b9b9cbd97822
RESP BODY: {"server": {"OS-EXT-STS:task_state": "resize_prep", "addresses": {"tstack-vxlan-network": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fd:06:74", "version": 4, "addr": "172.16.0.105", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "bookmark"}], "image": {"id": "0be3adcf-181b-4c8a-a5f4-657e972497d8", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/images/0be3adcf-181b-4c8a-a5f4-657e972497d8", "rel": "bookmark"}]}, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-00000009", "OS-SRV-USG:launched_at": "2018-08-30T10:35:43.000000", "flavor": {"id": "1", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/1", "rel": "bookmark"}]}, "id": "2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "security_groups": [{"name": "default"}], "user_id": "33f84664ac69496ea4cdfb7166fbf416", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "region_node_id": "REGION_FYD_1", "config_drive": "", "status": "RESIZE", "updated": "2018-08-30T11:28:09Z", "hostId": "1b346db001405aa020eb63097240a58b546291e1464c4a3dad66e6b3", "OS-EXT-SRV-ATTR:host": "openstack-compute01", "OS-SRV-USG:terminated_at": null, "key_name": null, "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hypervisor_hostname": "openstack-compute01", "name": "cirros_test", "created": "2018-08-30T10:35:32Z", "tenant_id": "b045010ed16147c680a368fd23d96272", "os-extended-volumes:volumes_attached": [], "metadata": {}}}
Server resizing... 0% completeDEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 HTTP/1.1" 200 1762
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:28:14 GMT connection: keep-alive content-type: application/json content-length: 1762 x-compute-request-id: req-f2b70226-044b-4848-b5e3-ca1a3367ed8a
RESP BODY: {"server": {"OS-EXT-STS:task_state": "resize_finish", "addresses": {"tstack-vxlan-network": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fd:06:74", "version": 4, "addr": "172.16.0.105", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "bookmark"}], "image": {"id": "0be3adcf-181b-4c8a-a5f4-657e972497d8", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/images/0be3adcf-181b-4c8a-a5f4-657e972497d8", "rel": "bookmark"}]}, "OS-EXT-STS:vm_state": "active", "OS-EXT-SRV-ATTR:instance_name": "instance-00000009", "OS-SRV-USG:launched_at": "2018-08-30T10:35:43.000000", "flavor": {"id": "2", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/2", "rel": "bookmark"}]}, "id": "2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "security_groups": [{"name": "default"}], "user_id": "33f84664ac69496ea4cdfb7166fbf416", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "region_node_id": "REGION_FYD_1", "config_drive": "", "status": "RESIZE", "updated": "2018-08-30T11:28:14Z", "hostId": "1b346db001405aa020eb63097240a58b546291e1464c4a3dad66e6b3", "OS-EXT-SRV-ATTR:host": "openstack-compute01", "OS-SRV-USG:terminated_at": null, "key_name": null, "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hypervisor_hostname": "openstack-compute01", "name": "cirros_test", "created": "2018-08-30T10:35:32Z", "tenant_id": "b045010ed16147c680a368fd23d96272", "os-extended-volumes:volumes_attached": [], "metadata": {}}}
Server resizing... 0% completeDEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 HTTP/1.1" 200 1759
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:28:20 GMT connection: keep-alive content-type: application/json content-length: 1759 x-compute-request-id: req-c6701531-517a-4c76-a841-b3369cfd3e58
RESP BODY: {"server": {"OS-EXT-STS:task_state": null, "addresses": {"tstack-vxlan-network": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fd:06:74", "version": 4, "addr": "172.16.0.105", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "bookmark"}], "image": {"id": "0be3adcf-181b-4c8a-a5f4-657e972497d8", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/images/0be3adcf-181b-4c8a-a5f4-657e972497d8", "rel": "bookmark"}]}, "OS-EXT-STS:vm_state": "resized", "OS-EXT-SRV-ATTR:instance_name": "instance-00000009", "OS-SRV-USG:launched_at": "2018-08-30T11:28:15.000000", "flavor": {"id": "2", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/2", "rel": "bookmark"}]}, "id": "2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "security_groups": [{"name": "default"}], "user_id": "33f84664ac69496ea4cdfb7166fbf416", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "region_node_id": "REGION_FYD_1", "config_drive": "", "status": "VERIFY_RESIZE", "updated": "2018-08-30T11:28:15Z", "hostId": "1b346db001405aa020eb63097240a58b546291e1464c4a3dad66e6b3", "OS-EXT-SRV-ATTR:host": "openstack-compute01", "OS-SRV-USG:terminated_at": null, "key_name": null, "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hypervisor_hostname": "openstack-compute01", "name": "cirros_test", "created": "2018-08-30T10:35:32Z", "tenant_id": "b045010ed16147c680a368fd23d96272", "os-extended-volumes:volumes_attached": [], "metadata": {}}}
Server resizing... 100% complete
Finished
[root@openstack-con01 ~(keystone_admin)]# nova list
+--------------------------------------+-------------+---------------+------------+-------------+-----------------------------------+
| ID | Name | Status | Task State | Power State | Networks |
+--------------------------------------+-------------+---------------+------------+-------------+-----------------------------------+
| 2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 | cirros_test | VERIFY_RESIZE | - | Running | tstack-vxlan-network=172.16.0.105 |
+--------------------------------------+-------------+---------------+------------+-------------+-----------------------------------+
[root@openstack-con01 ~(keystone_admin)]# nova --debug resize-confirm 2001bdd5-8a2e-4ff4-b8fc-8a5eff066728
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:35357/v2.0/ -H "Accept: application/json" -H "User-Agent: python-keystoneclient"
INFO (connectionpool:203) Starting new HTTP connection (1): 10.10.10.250
DEBUG (connectionpool:383) "GET /v2.0/ HTTP/1.1" 200 339
DEBUG (session:224) RESP: [200] content-length: 339 vary: X-Auth-Token keep-alive: timeout=5, max=100 server: Apache/2.4.6 (CentOS) mod_wsgi/3.4 Python/2.7.5 connection: Keep-Alive date: Thu, 30 Aug 2018 11:41:52 GMT content-type: application/json x-openstack-request-id: req-bad6820b-800a-498f-b758-d81ae7cb2a3f
RESP BODY: {"version": {"status": "stable", "updated": "2014-04-17T00:00:00Z", "media-types": [{"base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json"}], "id": "v2.0", "links": [{"href": "http://10.10.10.250:35357/v2.0/", "rel": "self"}, {"href": "http://docs.openstack.org/", "type": "text/html", "rel": "describedby"}]}}
DEBUG (v2:76) Making authentication request to http://10.10.10.250:35357/v2.0/tokens
DEBUG (connectionpool:383) "POST /v2.0/tokens HTTP/1.1" 200 3484
DEBUG (session:195) REQ: curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}ad997cb2eabbfb8ed4e469f296470180e21a4526"
INFO (connectionpool:203) Starting new HTTP connection (1): 10.10.10.250
DEBUG (connectionpool:383) "GET /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 HTTP/1.1" 200 1759
DEBUG (session:224) RESP: [200] date: Thu, 30 Aug 2018 11:41:53 GMT connection: keep-alive content-type: application/json content-length: 1759 x-compute-request-id: req-cb70e06c-2364-4b01-966a-eb5de997d934
RESP BODY: {"server": {"OS-EXT-STS:task_state": null, "addresses": {"tstack-vxlan-network": [{"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:fd:06:74", "version": 4, "addr": "172.16.0.105", "OS-EXT-IPS:type": "fixed"}]}, "links": [{"href": "http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "self"}, {"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "rel": "bookmark"}], "image": {"id": "0be3adcf-181b-4c8a-a5f4-657e972497d8", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/images/0be3adcf-181b-4c8a-a5f4-657e972497d8", "rel": "bookmark"}]}, "OS-EXT-STS:vm_state": "resized", "OS-EXT-SRV-ATTR:instance_name": "instance-00000009", "OS-SRV-USG:launched_at": "2018-08-30T11:40:48.000000", "flavor": {"id": "3", "links": [{"href": "http://10.10.10.250:8774/b045010ed16147c680a368fd23d96272/flavors/3", "rel": "bookmark"}]}, "id": "2001bdd5-8a2e-4ff4-b8fc-8a5eff066728", "security_groups": [{"name": "default"}], "user_id": "33f84664ac69496ea4cdfb7166fbf416", "OS-DCF:diskConfig": "MANUAL", "accessIPv4": "", "accessIPv6": "", "progress": 0, "OS-EXT-STS:power_state": 1, "region_node_id": "REGION_FYD_1", "config_drive": "", "status": "VERIFY_RESIZE", "updated": "2018-08-30T19:44:58Z", "hostId": "1b346db001405aa020eb63097240a58b546291e1464c4a3dad66e6b3", "OS-EXT-SRV-ATTR:host": "openstack-compute01", "OS-SRV-USG:terminated_at": null, "key_name": null, "OS-EXT-AZ:availability_zone": "nova", "OS-EXT-SRV-ATTR:hypervisor_hostname": "openstack-compute01", "name": "cirros_test", "created": "2018-08-30T10:35:32Z", "tenant_id": "b045010ed16147c680a368fd23d96272", "os-extended-volumes:volumes_attached": [], "metadata": {}}}
DEBUG (session:195) REQ: curl -g -i -X POST http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action -H "User-Agent: python-novaclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}ad997cb2eabbfb8ed4e469f296470180e21a4526" -d '{"confirmResize": null}'
DEBUG (connectionpool:383) "POST /v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action HTTP/1.1" 204 0
DEBUG (session:224) RESP: [204] date: Thu, 30 Aug 2018 11:41:53 GMT connection: keep-alive content-type: application/json content-length: 0 x-compute-request-id: req-cf558e26-888b-4596-9af3-31b02905e4ba
### resize过程
curl -g -i -X GET http://10.10.10.250:35357/v2.0/ -H "Accept: application/json" -H "User-Agent: python-keystoneclient"
DEBUG (connectionpool:383) "POST /v2.0/tokens HTTP/1.1" 200 3484
curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
# 格式为 /v2/{tenant_id}/servers/{server_id}
curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/flavors/2 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
curl -g -i -X POST http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action -H "User-Agent: python-novaclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34" -d '{"resize": {"flavorRef": "2"}}'
# 格式为/v2/{tenant_id}/servers/action
# 行为json参数为
#{
# "resize": {
# "flavorRef": "2"
# }
#}
VERIFY_RESIZE
结束这个任务curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}c963302cb137a44f36fa62c50c39fff3462d7e34"
curl -g -i -X GET http://10.10.10.250:35357/v2.0/ -H "Accept: application/json" -H "User-Agent: python-keystoneclient"
curl -g -i -X GET http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728 -H "User-Agent: python-novaclient" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}ad997cb2eabbfb8ed4e469f296470180e21a4526"
curl -g -i -X POST http://10.10.10.250:8774/v2/b045010ed16147c680a368fd23d96272/servers/2001bdd5-8a2e-4ff4-b8fc-8a5eff066728/action -H "User-Agent: python-novaclient" -H "Content-Type: application/json" -H "Accept: application/json" -H "X-Auth-Token: {SHA1}ad997cb2eabbfb8ed4e469f296470180e21a4526" -d '{"confirmResize": null}'
# 请求参数json格式
#{
# "confirmResize": null
#}
在v2 api中 基本上所有的外部请求都要先去keystone验证身份 然后从keystone获取一个token 拿着这个token再去做其他行为
setup.cfg
可以知道nova api实现的代码路径nova/api/openstack/compute/servers.py
(一次rpc调用nova-api到compute-api)def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor.
输入参数说明如下:
req:Request:对象,包含本次请求的上下文
instance_id:实例id
flavor_id:机型id
**kwargs:其他参数字典
"""
#得到上下文,根据上下文去获取虚拟机得具体信息
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
# 异常部分省略
...
return webob.Response(status_int=202)
nova/compute/api.py
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
context:上下文
instance:实例对象
flavor_id:resize的目标机型
clean_shutdown:静态迁移时开启关机重试,如果未能正常关闭虚拟机会抛异常
**extra_instance_update:目标实例属性
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
#检查系统磁盘的‘自动配置磁盘’功能是否打开,否则抛异常,迁移完成后,云主机需要能够自动配置系统磁盘
current_instance_type = instance.get_flavor()
# 获取虚拟机机型信息
# 如果上面没有传flavor_id过来,冷迁移虚拟机
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
# 否则获取目标机型,并检查目标机型的磁盘是不是0g 如果是则掷出异常
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
# 如果传过来的flavorid不在现有的flavor 掷出异常
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# 判断一下虚拟机现在的机型是否跟目标机型一样
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
deltas = self._upsize_quota_delta(context, new_instance_type,
current_instance_type)
# 获取新旧flavor cpu与内存的差值
try:
quotas = self._reserve_quota_delta(context, deltas, instance)
# 如果上面的差值为正数则为这个未来的实例预留资源,否则打印日志 掷出异常
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = self._get_headroom(quotas, usages, deltas)
resource = overs[0]
used = quotas[resource] - headroom[resource]
total_allowed = used + headroom[resource]
overs = ','.join(overs)
LOG.warning(_LW("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=deltas[resource],
used=used,
allowed=total_allowed,
resource=resource)
# 如果没有传入flavor_id,则不保留配额,因为冷迁移不需要额外资源
else:
quotas = objects.Quotas(context=context)
#更新与主机状态:主机状态:重建/迁移,任务状态:准备重建或者迁移
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
# nova scheduler调度过滤器,如果该选项这个参数为true则把它加到调度列表
# 如果是false则不加入调度列表
# Here when flavor_id is None, the process is considered as migrate.
if (not flavor_id and not CONF.allow_migrate_to_same_host):
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
# 如果配置了cell则通过cell来完成resize,过程相对简单,默认cell_type为未启用状态
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
# 数据库记录
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [],
clean_shutdown=clean_shutdown)
# 将请求转发给nova-conductor
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
nova/conductor/api.py
class ComputeTaskAPI(object):
"""ComputeTask API that queues up compute tasks for nova-conductor."""
def __init__(self):
self.conductor_compute_rpcapi = rpcapi.ComputeTaskAPI()
def resize_instance(self, context, instance, extra_instance_updates,
scheduler_hint, flavor, reservations,
clean_shutdown=True):
# NOTE(comstud): 'extra_instance_updates' is not used here but is
# needed for compatibility with the cells_rpcapi version of this
# method.
self.conductor_compute_rpcapi.migrate_server(
context, instance, scheduler_hint, live=False, rebuild=False,
flavor=flavor, block_migration=None, disk_over_commit=None,
reservations=reservations, clean_shutdown=clean_shutdown)
"""
调用Conductor的rpcapi
context: 上下文
instance:实例对象
scheduler_hint:nova/compute/api.py中定义的字典
extra_instance_updates:没有使用,兼容cells_rpcapi
flavor:机型
reservations:预留配额对象
clean_shutdown:静态迁移时开启关机重试,如果未能正常关闭云主机会抛异常
下面迁移差不多
live:热迁移选项关闭
rebuild:重建选项关闭
block_migration:块迁移,默认不是
disk_over_commit:磁盘超量,默认不是
"""
nova/conductor/rpcapi.py
(一次rpc调用,从compute-api到conductor-api)class ComputeTaskAPI(object):
"""Client side of the conductor 'compute' namespaced RPC API"""
def __init__(self):
super(ComputeTaskAPI, self).__init__()
target = messaging.Target(topic=CONF.conductor.topic,
namespace='compute_task',
version='1.0')
"""
oslo.messaging为Openstack各项目使用RPC和事件通知提供统一的接口,为了支持不同的RPC
后端实现,oslo.messeging对一些对象进行了统一,例如这儿用到的Target
Target对象分钟了制定一个消息最终目的地得所有信息,下面为他的一些属性:
topic: 字符串类型,一个topic可以用来标识服务器所暴露得一组接口(一个接口包
含多个可被远程调用得方法)。允许多个服务器暴露一组接口,消息会议轮转的方式发给多个
服务器中的某一个
namespace:用来标识服务器所暴露的某个特定接口(多个可被远程调用的方法)
version: 服务器所暴露的接口支持M.N类型的版本号。次版本号(N)的增加表示新的接口向前兼容
主版本号(M)的增加表示新接口和旧接口不兼容。RPC服务器可以实现多个不同得主版本号接口
"""
serializer = objects_base.NovaObjectSerializer()
# serializer对象用来序列化/反序列化消息, 任何服务
# 需要接受或返回NovaObjects作为参数或值
#都应该将它传递给它的RPCClient和RPCServer对象。
self.client = rpc.get_client(target, serializer=serializer)
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit,
reservations=None, clean_shutdown=True):
kw = {'instance': instance, 'scheduler_hint': scheduler_hint,
'live': live, 'rebuild': rebuild, 'flavor': flavor,
'block_migration': block_migration,
'disk_over_commit': disk_over_commit,
'reservations': reservations,
'clean_shutdown': clean_shutdown}
version = '1.11'
# rpc默认版本1.11 如果客户端满足任意一个版本,则发送相应版本兼容的rpc请求
if not self.client.can_send_version(version):
del kw['clean_shutdown']
version = '1.10'
if not self.client.can_send_version(version):
kw['flavor'] = objects_base.obj_to_primitive(flavor)
version = '1.6'
if not self.client.can_send_version(version):
kw['instance'] = jsonutils.to_primitive(
objects_base.obj_to_primitive(instance))
version = '1.4'
cctxt = self.client.prepare(version=version)
# 返回一个messaging.RPCClient对象,Target对象的属性在RPCClient
# 对象构造以后,通过prepare()方法修改,修改后的target属性只有在这
# 个prepare()方法返回的对象有效,简单一点就是这儿新建了一
# 个RPCClient对象,用来被rpc远程调用
return cctxt.call(context, 'migrate_server', **kw)
# rpc同步阻塞调用,就是说发送这个调用之后进程会阻塞在这儿
# 直到客户端任务完成,返回一个完成状态
# 这个call请求到哪儿去了需要看下上面topic=CONF.conductor.topic
# 默认这个值是conductor,所以conductor会监听这个消息
nova/conductor/manager.py
class ComputeTaskManager(base.Base):
target = messaging.Target(namespace='compute_task', version='1.11')
def __init__(self):
super(ComputeTaskManager, self).__init__()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.image_api = image.API()
self.scheduler_client = scheduler_client.SchedulerClient()
# 这儿的值都是从api那边传过来的
def migrate_server(self, context, instance, scheduler_hint, live, rebuild,
flavor, block_migration, disk_over_commit, reservations=None,
clean_shutdown=True):
# 判断传过来得实例对象是不是novaobject实例如果不是则从数据库获取
if instance and not isinstance(instance, nova_object.NovaObject):
# NOTE(danms): Until v2 of the RPC API, we need to tolerate
# old-world instance objects here
attrs = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
instance = objects.Instance._from_db_object(
context, objects.Instance(), instance,
expected_attrs=attrs)
# NOTE(melwitt): Remove this in version 2.0 of the RPC API
if flavor and not isinstance(flavor, objects.Flavor):
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
flavor = objects.Flavor.get_by_id(context, flavor['id'])
# 如果传入的参数live为true rebuild为false或为空false则热迁移
if live and not rebuild and not flavor:
self._live_migrate(context, instance, scheduler_hint,
block_migration, disk_over_commit)
elif not live and not rebuild and flavor:
instance_uuid = instance['uuid']
#with语句,在迁移前记录迁移事件记录到数据库
#(nova.instance_actions_events),迁移后更新数据库迁移记录
with compute_utils.EventReporter(context, 'cold_migrate',
instance_uuid):
# 调用冷迁移方法
self._cold_migrate(context, instance, flavor,
scheduler_hint['filter_properties',
reservations, clean_shutdown)
else:
raise NotImplementedError()
nova/conductor/manager.py
与上文同文件 def _cold_migrate(self, context, instance, flavor, filter_properties,
reservations, clean_shutdown):
#从实例对象获取镜像属性
image_ref = instance.image_ref
#通过镜像api获取镜像元数据
image = compute_utils.get_image_metadata(
context, self.image_api, image_ref, instance)
#为调度程序创建一个请求参数字典
"""
request_spec = {
'image': image or {},
'instance_properties': instance,
'instance_type': instance_type,
'num_instances': len(instances)
#返回一个josn对象
"""
request_spec = scheduler_utils.build_request_spec(
context, image, [instance], instance_type=flavor)
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
#预占配额
try:
#根据调度策略选出目标计算节点
scheduler_utils.setup_instance_group(context, request_spec,
filter_properties)
scheduler_utils.populate_retry(filter_properties, instance['uuid'])
hosts = self.scheduler_client.select_destinations(
context, request_spec, filter_properties)
host_state = hosts[0]
except exception.NoValidHost as ex:
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
# if the flavor IDs match, it's migrate; otherwise resize
if flavor['id'] == instance['instance_type_id']:
msg = _("No valid host found for cold migrate")
else:
msg = _("No valid host found for resize")
raise exception.NoValidHost(reason=msg)
except exception.UnsupportedPolicyException as ex:
with excutils.save_and_reraise_exception():
vm_state = instance.vm_state
if not vm_state:
vm_state = vm_states.ACTIVE
updates = {'vm_state': vm_state, 'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
try:
# 向调度策略填入主机状态属性
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
# context is not serializable
# 删除context对象并返回他的值
filter_properties.pop('context', None)
(host, node) = (host_state['host'], host_state['nodename'])
# 向compute提供的rpcapi发起resize准备请求
self.compute_rpcapi.prep_resize(
context, image, instance,
flavor, host,
reservations, request_spec=request_spec,
filter_properties=filter_properties, node=node,
clean_shutdown=clean_shutdown)
except Exception as ex:
with excutils.save_and_reraise_exception():
updates = {'vm_state': instance.vm_state,
'task_state': None}
self._set_vm_state_and_notify(context, instance.uuid,
'migrate_server',
updates, ex, request_spec)
quotas.rollback()
nova/compute/rpcapi.py ComputeAPI.prep_resize
(一次rpc调用,从compute到目标计算节点nova-compute)def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None,
clean_shutdown=True):
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
msg_args = {'instance': instance,
'instance_type': instance_type_p,
'image': image_p,
'reservations': reservations,
'request_spec': request_spec,
'filter_properties': filter_properties,
'node': node,
'clean_shutdown': clean_shutdown}
if self.client.can_send_version('4.0'):
version = '4.0'
elif self.client.can_send_version('3.38'):
version = '3.38'
else:
del msg_args['clean_shutdown']
version = '3.0'
# 进入rpc流程,把prepare请求发送到目标计算节点
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize', **msg_args)
小结:
nova-api接收resize请求之后检查虚拟机状态,以及迁移所需环境,完成初始化之后把请求给nova-conductor
nova-conductor进行一些数据库操作
nova-conductor调用nova-scheduler完成目标计算节点选择之后
把prep_resize请求发送到目标计算节点nova-compute
nova/compute/manager.py ComputeManager.prep_resize
接受
class ComputeManager(manager.Manager):
"""Manages the running instances from creation to destruction."""
# 管理实例的生命周期
...
def prep_resize(self, context, image, instance, instance_type,
reservations, request_spec, filter_properties, node,
clean_shutdown=True):
"""Initiates the process of moving a running instance to another host.
Possibly changes the RAM and disk size in the process.
这边的参数都是是从nova-conductor传过来的也大都从novaapi传过来的
request_spec: 包括镜像对象 实例属性 实例类型 实例长度
"""
# 如果conductor没有传node过来则会通过后端虚拟化驱动程序去获取(libvirtdriver nova.conf中配置)
if node is None:
node = self.driver.get_available_nodes(refresh=True)[0]
LOG.debug("No node specified, defaulting to %s", node,
instance=instance)
# 通过传过来的预保留的配额创建一个配额对象
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# 如果resize失败,回滚配额以及虚拟机状态
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
#发送`compute.instance.exists`通知给ceilometer,
#通知包含:云主机的详细配置信息;默认审计周期为(month),
#current_period=True,表示添加该通知到当前统计周期
compute_utils.notify_usage_exists(self.notifier, context, instance,
current_period=True)
#发送`compute.instance.resize.prep.start`通知ceilometer
#通知包含:云主机详细信息
self._notify_about_instance_usage(
context, instance, "resize.prep.start")
try:
# 调用私有方法_prep_resize进行实际操作
self._prep_resize(context, image, instance,
instance_type, quotas,
request_spec, filter_properties,
node, clean_shutdown)
# NOTE(dgenin): This is thrown in LibvirtDriver when the
# instance to be migrated is backed by LVM.
# Remove when LVM migration is implemented.
#如果nova使用lvm作为后端存储,从镜像启动的云主机将不支持迁移
except exception.MigrationPreCheckError:
raise
except Exception:
# try to re-schedule the resize elsewhere:
exc_info = sys.exc_info()
# 重新调度 rpc向conductor发起
# 调用compute_task_api.resize_instance重新resize
# 实际调用conductor.ComputeTaskAPI().resize_instance
# 与前一次不同的是,重试请求包含了前一次请求
# 的异常信息并且在选择目标主机的时候会排除
#前一次已选择的 目标主机
self._reschedule_resize_or_reraise(context, image, instance,
exc_info, instance_type, quotas, request_spec,
filter_properties)
finally:
extra_usage_info = dict(
new_instance_type=instance_type['name'],
new_instance_type_id=instance_type['id'])
#发送`compute.instance.resize.prep.end`通知给
#ceilometer,通知包含:云主机详细信息及配置模板名和id
self._notify_about_instance_usage(
context, instance, "resize.prep.end",
extra_usage_info=extra_usage_info)
nova/compute/manager.py _prep_resize
(一次rpc调用 从目标计算节点nova-compute到源计算节点nova-compute)def _prep_resize(self, context, image, instance, instance_type,
quotas, request_spec, filter_properties, node,
clean_shutdown=True):
if not filter_properties:
filter_properties = {}
if not instance.host:
self._set_instance_error_state(context, instance)
msg = _('Instance has no source host')
raise exception.MigrationError(reason=msg)
# 判断resize源主机与目标主机是否是同一个
same_host = instance.host == self.host
# 如果是同一台且未配置在同一台上迁移则抛出异常(nova.conf
# allow_resize_to_same_host配置项,默认是false)
if same_host and not CONF.allow_resize_to_same_host:
self._set_instance_error_state(context, instance)
msg = _('destination same as source!')
raise exception.MigrationError(reason=msg)
# NOTE(danms): Stash the new instance_type to avoid having to
# look it up in the database later
# 存储新的实例类型,避免再去查数据库
instance.set_flavor(instance_type, 'new')
# NOTE(mriedem): Stash the old vm_state so we can set the
# resized/reverted instance back to the same state later.
# 存储旧的实例状态,为回滚或者resize成功恢复状态做准备
vm_state = instance.vm_state
LOG.debug('Stashing vm_state: %s', vm_state, instance=instance)
instance.system_metadata['old_vm_state'] = vm_state
instance.save()
# 创建节点资源跟踪器ResourceTracker
limits = filter_properties.get('limits', {})
rt = self._get_resource_tracker(node)
# 保存迁移上下文MigrationContext到instance,并添加一条记录到
# nova.migrations数据表中
# 根据配置模板(instance_type)校验目标主机上资源情况(日志文
# 件中打印Attemping Claim之类的日志,如果资源不足会抛异常)
# 根据配置模板(instance_type)在目标主机上保留迁移所需的资源
with rt.resize_claim(context, instance, instance_type,
image_meta=image, limits=limits) as claim:
LOG.info(_LI('Migrating'), context=context, instance=instance)
# 给源compute发送rpc cast调用resize_instance
self.compute_rpcapi.resize_instance(
context, instance, claim.migration, image,
instance_type, quotas.reservations,
clean_shutdown)
nova/compute/manager.py ComputeManager.resize_instance
(一次rpc调用 从源计算节点nova-compute到目标计算节点nova-compute)def resize_instance(self, context, instance, image,
reservations, migration, instance_type,
clean_shutdown=True):
"""Starts the migration of a running instance to another host.
migration:上文的rpc投递过来的迁移参数,包含:迁移所需的详细信息
"""
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
# 异常上下文:迁移发生异常时回滚配额及设置云主机状态
with self._error_out_instance_on_exception(context, instance,
quotas=quotas):
"""以上对象说明同 nova/compute/manager.py ComputeManager.prep_resize"""
# Code downstream may expect extra_specs to be populated since it
# is receiving an object, so lookup the flavor to ensure this.
# 下游代码可能期望在接收对象时填充extra_spec,因此查找flavor以确保这一点。
# 如果实例类型为空或者不符合规定类型则通过migration得参数获取
if (not instance_type or
not isinstance(instance_type, objects.Flavor)):
instance_type = objects.Flavor.get_by_id(
context, migration['new_instance_type_id'])
# 获取实例网络信息
network_info = self._get_instance_nw_info(context, instance)
# 更新迁移状态(nova.migrations数据表)
migration.status = 'migrating'
with migration.obj_as_admin():
migration.save()
# 更新云主机状态:云主机状态:重建/迁移,任务状态:正在重建或者迁移
instance.task_state = task_states.RESIZE_MIGRATING
instance.save(expected_task_state=task_states.RESIZE_PREP)
# 发送`compute.instance.resize.start`通知给ceilometer,
# 通知包含:云主机详细信息及网卡信息
self._notify_about_instance_usage(
context, instance, "resize.start", network_info=network_info)
# 从nova.block_device_mapping数据表中获取云主机关联的块设备映
# 射信息
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
# 将块设备转换为驱动程序块设备格式 例如
"""
'swap': swap,
'root_device_name': root_device_name,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping
"""
block_device_info = self._get_instance_block_device_info(
context, instance, bdms=bdms)
# 获取关机超时时间,重试次数 通过实例属性instance.system_metadata.get(key, default)
timeout, retry_interval = self._get_power_off_values(context,
instance, clean_shutdown)
# 使用libvirt进行迁移虚拟机磁盘操作
# 多阶段传输一个正在运行实例的磁盘,传输前关闭实例电源,下面一节详细看下libvirt如何实现
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration.dest_host,
instance_type, network_info,
block_device_info,
timeout, retry_interval)
# 通过调用volume-api终止块设备连接
self._terminate_volume_connections(context, instance, bdms)
migration_p = obj_base.obj_to_primitive(migration)
#迁移网卡(空操作,在`complete`结束阶段在目的主机上重新配置网卡)
self.network_api.migrate_instance_start(context,
instance,
migration_p)
# 更新迁移状态(nova.migrations数据表)
migration.status = 'post-migrating'
with migration.obj_as_admin():
migration.save()
# 更新云主机的主机信息及状态:云主机状态:重建/迁移,任务状态:正在
# 完成重建或者迁移
instance.host = migration.dest_compute
instance.node = migration.dest_node
instance.task_state = task_states.RESIZE_MIGRATED
instance.save(expected_task_state=task_states.RESIZE_MIGRATING)
# 向目标计算节点发起finish_resize rpc cast请求
self.compute_rpcapi.finish_resize(context, instance,
migration, image, disk_info,
migration.dest_compute, reservations=quotas.reservations)
# 通知包含:云主机详细信息及网卡信息
self._notify_about_instance_usage(context, instance, "resize.end",
network_info=network_info)
#移除所有的pending事件
self.instance_events.clear_events_for_instance(instance)
nova/virt/libvirt/driver.py LibvirtDriver.migrate_disk_and_power_off
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
timeout=0, retry_interval=0):
LOG.debug("Starting migrate_disk_and_power_off",
instance=instance)
# 获取临时磁盘信息
ephemerals = driver.block_device_info_get_ephemerals(block_device_info)
# get_bdm_ephemeral_disk_size() will return 0 if the new
# instance's requested block device mapping contain no
# ephemeral devices. However, we still want to check if
# the original instance's ephemeral_gb property was set and
# ensure that the new requested flavor ephemeral size is greater
# 如果新实例请求的块设备映射不包含临时设备,则get_bdm_ephemeral_disk_size()将返回0。
# 然而,我们仍然希望检查是否设置了原始实例的ephemeral_gb属性
# 并确保新请求flavor的临时大小更大
eph_size = (block_device.get_bdm_ephemeral_disk_size(ephemerals) or
instance.ephemeral_gb)
# Checks if the migration needs a disk resize down.
# 检查resize目标flavor根目录和原来实例根目录大小 检查临时目录大小
root_down = flavor['root_gb'] < instance.root_gb
ephemeral_down = flavor['ephemeral_gb'] < eph_size
# 从实例的xml文件获取磁盘信息(不返回云硬盘volume的)
disk_info_text = self.get_instance_disk_info(
instance, block_device_info=block_device_info)
# 确定磁盘是否由卷启动 如果实例属性没有image_ref或者disk不在disk_mapping
booted_from_volume = self._is_booted_from_volume(instance,
disk_info_text)
# 如果esize目标flavor根目录小于来实例根目录大小 或者从卷启动,或者临时目录小于原始大小则掷出异常
if (root_down and not booted_from_volume) or ephemeral_down:
reason = _("Unable to resize disk down.")
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
disk_info = jsonutils.loads(disk_info_text)
# NOTE(dgenin): Migration is not implemented for LVM backed instances.
# 如果镜像类型是lvm类型切不是从卷启动,则掷出以lvm做后端的实例不支持迁移异常
if CONF.libvirt.images_type == 'lvm' and not booted_from_volume:
reason = _("Migration is not supported for LVM backed instances")
raise exception.InstanceFaultRollback(
exception.MigrationPreCheckError(reason=reason))
# copy disks to destination
# rename instance dir to +_resize at first for using
# shared storage for instance dir (eg. NFS).
# 获取虚拟机本地路径
inst_base = libvirt_utils.get_instance_path(instance)
# 重命名本地虚拟机文件夹时的文件夹名
inst_base_resize = inst_base + "_resize"
# 判断是否是共享存储 源ip目的ip是否相同或者创建一个文件通过ssh传输到目的计算节点查看在源计算节点系统上能不能看到
shared_storage = self._is_storage_shared_with(dest, inst_base)
# try to create the directory on the remote compute node
# if this fails we pass the exception up the stack so we can catch
# failures here earlier
# 如果不是共享存储,ssh远程到目标计算节点创建base目录
if not shared_storage:
try:
utils.execute('ssh', dest, 'mkdir', '-p', inst_base)
except processutils.ProcessExecutionError as e:
reason = _("not able to execute ssh command: %s") % e
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# 在超时时间 超时次数之内关闭指定的实例
self.power_off(instance, timeout, retry_interval)
# 获取块设备映射(该实例所有的块设备)
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
#迁移前,通过特定类型的卷驱动卸载卷设备。对于
#rbd(LibvirtNetVolumeDriver),什么都没有做;
#对于iscsi(LibvirtISCSIVolumeDriver),做了两个工作:
#1. echo '1' > /sys/block/{dev_name}/device/delete
#2. 通过iscsiadm工具删除相关的端点信息
for vol in block_device_mapping:
connection_info = vol['connection_info']
disk_dev = vol['mount_device'].rpartition("/")[2]
self._disconnect_volume(connection_info, disk_dev)
try:
# 重命名实例目录
utils.execute('mv', inst_base, inst_base_resize)
# if we are migrating the instance with shared storage then
# create the directory. If it is a remote node the directory
# has already been created
# 如果是共享存储,则创建目标虚拟机得目录
if shared_storage:
dest = None
utils.execute('mkdir', '-p', inst_base)
# 获取实例flavor信息
active_flavor = instance.get_flavor()
# 创建虚拟机镜像目录 景象名称对象与源镜像文件路径对象
for info in disk_info:
# assume inst_base == dirname(info['path'])
img_path = info['path']
fname = os.path.basename(img_path)
from_path = os.path.join(inst_base_resize, fname)
if (fname == 'disk.swap' and
active_flavor.get('swap', 0) != flavor.get('swap', 0)):
# To properly resize the swap partition, it must be
# re-created with the proper size. This is acceptable
# because when an OS is shut down, the contents of the
# swap space are just garbage, the OS doesn't bother about
# what is in it.
# We will not copy over the swap disk here, and rely on
# finish_migration/_create_image to re-create it for us.
continue
# 如果虚拟机磁盘是qcow2格式且有后端备份文件则合并
if info['type'] == 'qcow2' and info['backing_file']:
tmp_path = from_path + "_rbase"
# merge backing file
utils.execute('qemu-img', 'convert', '-f', 'qcow2',
'-O', 'qcow2', from_path, tmp_path)
# 如果是共享存储的话 直接重命名目录
if shared_storage:
utils.execute('mv', tmp_path, img_path)
else:
# 不是共享存储则拷贝镜像,拷贝玩删掉刚刚的临时目录
libvirt_utils.copy_image(tmp_path, img_path, host=dest)
utils.execute('rm', '-f', tmp_path)
# 如果raw或者qcow2没有备份文件的话直接拷贝镜像(cp rsync scp)
else: # raw or qcow2 with no backing file
libvirt_utils.copy_image(from_path, img_path, host=dest)
# 如果发生异常则回滚上面的移动和拷贝操作
except Exception:
with excutils.save_and_reraise_exception():
self._cleanup_remote_migration(dest, inst_base,
inst_base_resize,
shared_storage)
return disk_info_text
nova/compute/manager.py ComputeManager.finish_resize
def finish_resize(self, context, disk_info, image, instance,
reservations, migration):
"""Completes the migration process.
Sets up the newly transferred disk and turns on the instance at its
new host machine.
"""
# 生成配额对象quotas
quotas = objects.Quotas.from_reservations(context,
reservations,
instance=instance)
try:
# 调用私有方法_finish_resize完成行为
self._finish_resize(context, instance, migration,
disk_info, image)
# 提交配额,更新数据库
quotas.commit()
# 异常则回滚配额,更改实例状态
except Exception:
LOG.exception(_LE('Setting instance vm_state to ERROR'),
instance=instance)
with excutils.save_and_reraise_exception():
try:
quotas.rollback()
except Exception:
LOG.exception(_LE("Failed to rollback quota for failed "
"finish_resize"),
instance=instance)
self._set_instance_error_state(context, instance)
nova/compute/manager.py ComputeManager._finish_resize
def _finish_resize(self, context, instance, migration, disk_info,
image):
#默认是迁移选项
resize_instance = False
# 从migration参数获取新旧实例类型
old_instance_type_id = migration['old_instance_type_id']
new_instance_type_id = migration['new_instance_type_id']
# 从实例对象获取旧的实例类型
old_instance_type = instance.get_flavor()
# NOTE(mriedem): Get the old_vm_state so we know if we should
# power on the instance. If old_vm_state is not set we need to default
# to ACTIVE for backwards compatibility
# 获取旧的实例状态,以便恢复虚拟机到resize前状态,如果旧的状态未设置则默认为Active
old_vm_state = instance.system_metadata.get('old_vm_state',
vm_states.ACTIVE)
# 为该实例flavor添加旧old属性
instance.set_flavor(old_instance_type, 'old')
# 如果旧的实例类型id与新的不一致则使用新的实例类型
if old_instance_type_id != new_instance_type_id:
instance_type = instance.get_flavor('new')
#应用新的实例类型 实例类型id 内存 cpu 根目录大小 临时目录大小 实例类型
self._set_instance_info(instance, instance_type)
# 如果旧的实例类型id与新的不一致则开启resize选项
resize_instance = True
# NOTE(tr3buchet): setup networks on destination host
# 在目标主机上为新的云主机配置网卡(这是空操作,下面的
# migrate_instance_finish才执行正在的网卡配置)
self.network_api.setup_networks_on_host(context, instance,
migration['dest_compute'])
# 迁移网卡,更新数据库(设置'binding:host_id'属性为目标主机)
migration_p = obj_base.obj_to_primitive(migration)
self.network_api.migrate_instance_finish(context,
instance,
migration_p)
# 获取实例网络信息
network_info = self._get_instance_nw_info(context, instance)
# 更新实例任务状态属性为RESIZE_MIGRATED
instance.task_state = task_states.RESIZE_FINISH
instance.save(expected_task_state=task_states.RESIZE_MIGRATED)
# 发送`compute_instance.finish_resize.start`通知给ceilometer
# 通知包含云主机详细信息及网卡信息
self._notify_about_instance_usage(
context, instance, "finish_resize.start",
network_info=network_info)
# 获取云主机的块设备信息,并更新卷设备的'connection_info'信息
block_device_info = self._get_instance_block_device_info(
context, instance, refresh_conn_info=True)
# NOTE(mriedem): If the original vm_state was STOPPED, we don't
# automatically power on the instance after it's migrated
# 如果原来实例状态是stopped则在新的实例完成迁移后不会自动启动
power_on = old_vm_state != vm_states.STOPPED
try:
# 调用finish_migration完成虚拟机启动,下面说
self.driver.finish_migration(context, migration, instance,
disk_info,
network_info,
image, resize_instance,
block_device_info, power_on)
# 异常回滚
except Exception:
with excutils.save_and_reraise_exception():
if resize_instance:
self._set_instance_info(instance,
old_instance_type)
# 迁移完成,封信数据库
migration.status = 'finished'
with migration.obj_as_admin():
migration.save()
# 更新实例状态
instance.vm_state = vm_states.RESIZED
instance.task_state = None
instance.launched_at = timeutils.utcnow()
instance.save(expected_task_state=task_states.RESIZE_FINISH)
# 发送消息给`nova-scheduler`,更新节点上的主机信息
self._update_scheduler_instance_info(context, instance)
# 发送`compute_instance.finish_resize.end`通知给ceilometer,
# 通知内容包含云主机详细信息及网卡信息
self._notify_about_instance_usage(
context, instance, "finish_resize.end",
network_info=network_info)
nova/virt/libvirt/driver.py LibvirtDriver.finish_migration
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
LOG.debug("Starting finish_migration", instance=instance)
# resize disks. only "disk" and "disk.local" are necessary.
disk_info = jsonutils.loads(disk_info)
for info in disk_info:
# 获取实例对象磁盘根目录 临时目录大小
size = self._disk_size_from_instance(instance, info)
if resize_instance:
# 如果resize选项被启用,也就是新旧flavor不一样,重新设置磁盘大小,只支持小变大
# 如果磁盘格式是qcow2且size存在且磁盘支持resize且镜像可拓展则把格式转化成raw
self._disk_resize(info, size)
# 如果磁盘是raw格式且use_cow_images选项为true则转化raw格式为qcow2
# 如果禁用这个选项则不会有备份文件生成cow为copy on write
if info['type'] == 'raw' and CONF.use_cow_images:
self._disk_raw_to_qcow2(info['path'])
# 获取磁盘映射信息得到一个字典
"""
'disk_bus': disk_bus,
'cdrom_bus': cdrom_bus,
'mapping': mapping
"""
disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type,
instance,
image_meta,
block_device_info)
# assume _create_image do nothing if a target file exists.
# 创建镜像(下面细说)
self._create_image(context, instance, disk_info['mapping'],
network_info=network_info,
block_device_info=None, inject_files=False,
fallback_from_host=migration.source_compute)
# 生成xml配置
xml = self._get_guest_xml(context, instance, network_info, disk_info,
image_meta,
block_device_info=block_device_info,
write_to_disk=True)
# NOTE(mriedem): vifs_already_plugged=True here, regardless of whether
# or not we've migrated to another host, because we unplug VIFs locally
# and the status change in the port might go undetected by the neutron
# L2 agent (or neutron server) so neutron may not know that the VIF was
# unplugged in the first place and never send an event.
# 通过上下文 xml配置 实例对象 网络信息 磁盘信息启动虚拟机(下面细说)
self._create_domain_and_network(context, xml, instance, network_info,
disk_info,
block_device_info=block_device_info,
power_on=power_on,
vifs_already_plugged=True)
if power_on:
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_running,
instance)
timer.start(interval=0.5).wait()
接上文创建镜像nova/virt/libvirt/driver.py LibvirtDriver._create_image
def _create_image(self, context, instance,
disk_mapping, suffix='',
disk_images=None, network_info=None,
block_device_info=None, files=None,
admin_pass=None, inject_files=True,
fallback_from_host=None):
"""
context:上下文
instance:实例对象
disk_mapping, suffix='':磁盘映射信息,磁盘镜像文件后缀
disk_images=None:镜像信息包含image_id kernel_id ramdisk_id
network_info=None:网络信息
block_device_info=None:块设备信息
files=None:镜像文件
admin_pass=None:管理员密码
inject_files=True:注入文件选项
fallback_from_host=None:回滚母机
"""
# 确定是否从卷启动 如果实例对象image_ref属性非为空或者根目录磁盘不在磁盘映射里
booted_from_volume = self._is_booted_from_volume(
instance, disk_mapping)
# 创建镜像
def image(fname, image_type=CONF.libvirt.images_type):
return self.image_backend.image(instance,
fname + suffix, image_type)
def raw(fname):
return image(fname, image_type='raw')
# ensure directories exist and are writable
fileutils.ensure_tree(libvirt_utils.get_instance_path(instance))
# 计算节点nova-compute打印日志'Creating image',日志等级为info
LOG.info(_LI('Creating image'), instance=instance)
#下面两行都是给文件授权
# NOTE(dprince): for rescue console.log may already exist... chown it.
self._chown_console_log_for_instance(instance)
# NOTE(yaguang): For evacuate disk.config already exist in shared
# storage, chown it.
self._chown_disk_config_for_instance(instance)
# NOTE(vish): No need add the suffix to console.log
# 拼接文件路径 检测 创建文件夹 授权
libvirt_utils.write_to_file(
self._get_console_log_path(instance), '', 7)
# 如果传入得disk_images=None的话则从实例对象获取disk_images字典
if not disk_images:
disk_images = {'image_id': instance.image_ref,
'kernel_id': instance.kernel_id,
'ramdisk_id': instance.ramdisk_id}
if disk_images['kernel_id']:
# 创建一个fname 值为hashlib.sha1(image_id).hexdigest() 镜像id的哈希值
fname = imagecache.get_cache_fname(disk_images, 'kernel_id')
# 将镜像保存到本地
raw('kernel').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['kernel_id'],
user_id=instance.user_id,
project_id=instance.project_id)
"""
fetch_func:使用glance api下载镜像
context:上下文
filename:镜像本地存储的文件名
image_id:disk_images字典里的kernel_id
user_id:用户id
project_id:项目id
"""
if disk_images['ramdisk_id']:
fname = imagecache.get_cache_fname(disk_images, 'ramdisk_id')
raw('ramdisk').cache(fetch_func=libvirt_utils.fetch_image,
context=context,
filename=fname,
image_id=disk_images['ramdisk_id'],
user_id=instance.user_id,
project_id=instance.project_id)
inst_type = instance.get_flavor()
# NOTE(ndipanov): Even if disk_mapping was passed in, which
# currently happens only on rescue - we still don't want to
# create a base image.
if not booted_from_volume:
root_fname = imagecache.get_cache_fname(disk_images, 'image_id')
# 获取磁盘镜像大小
size = instance.root_gb * units.Gi
if size == 0 or suffix == '.rescue':
size = None
backend = image('disk')
# 如果镜像后端支持clone方法则进行clone来实现镜像新建,默认不支持clone,
# 当使用rbd作为后端时支持clone 调用rbd.RBD().clone()方法进行镜像新建
if backend.SUPPORTS_CLONE:
def clone_fallback_to_fetch(*args, **kwargs):
try:
backend.clone(context, disk_images['image_id'])
except exception.ImageUnacceptable:
libvirt_utils.fetch_image(*args, **kwargs)
fetch_func = clone_fallback_to_fetch
else:
# 同上get_cache_fname中的fetch_func
fetch_func = libvirt_utils.fetch_image
# 镜像如果在glance里面没有则从已有镜像的服务器通过rsync scp等方式拷贝
self._try_fetch_image_cache(backend, fetch_func, context,
root_fname, disk_images['image_id'],
instance, size, fallback_from_host)
# Lookup the filesystem type if required
# 获取实例文件系统类型
os_type_with_default = disk.get_fs_type_for_os_type(instance.os_type)
# Generate a file extension based on the file system
# type and the mkfs commands configured if any
# 文件拓展名
file_extension = disk.get_file_extension_for_os_type(
os_type_with_default)
ephemeral_gb = instance.ephemeral_gb
# disk_images不为空的情况,与为空的情况相类似,暂时略过#########################
if 'disk.local' in disk_mapping:
disk_image = image('disk.local')
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral0',
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
fname = "ephemeral_%s_%s" % (ephemeral_gb, file_extension)
size = ephemeral_gb * units.Gi
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=ephemeral_gb)
for idx, eph in enumerate(driver.block_device_info_get_ephemerals(
block_device_info)):
disk_image = image(blockinfo.get_eph_disk(idx))
specified_fs = eph.get('guest_format')
if specified_fs and not self.is_supported_fs_format(specified_fs):
msg = _("%s format is not supported") % specified_fs
raise exception.InvalidBDMFormat(details=msg)
fn = functools.partial(self._create_ephemeral,
fs_label='ephemeral%d' % idx,
os_type=instance.os_type,
is_block_dev=disk_image.is_block_dev)
size = eph['size'] * units.Gi
fname = "ephemeral_%s_%s" % (eph['size'], file_extension)
disk_image.cache(fetch_func=fn,
context=context,
filename=fname,
size=size,
ephemeral_size=eph['size'],
specified_fs=specified_fs)
if 'disk.swap' in disk_mapping:
mapping = disk_mapping['disk.swap']
swap_mb = 0
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
swap_mb = swap['swap_size']
elif (inst_type['swap'] > 0 and
not block_device.volume_in_mapping(
mapping['dev'], block_device_info)):
swap_mb = inst_type['swap']
if swap_mb > 0:
size = swap_mb * units.Mi
image('disk.swap').cache(fetch_func=self._create_swap,
context=context,
filename="swap_%s" % swap_mb,
size=size,
swap_mb=swap_mb)
#########################################################################
# Config drive
# 配置驱动
if configdrive.required_by(instance):
LOG.info(_LI('Using config drive'), instance=instance)
extra_md = {}
# 密码字典
if admin_pass:
extra_md['admin_pass'] = admin_pass
# 获取实例元数据对象
inst_md = instance_metadata.InstanceMetadata(instance,
content=files, extra_md=extra_md, network_info=network_info)
# 获取配置驱动的实例化对象
with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
# 获取实例存储路径
configdrive_path = self._get_disk_config_path(instance, suffix)
LOG.info(_LI('Creating config drive at %(path)s'),
{'path': configdrive_path}, instance=instance)
try:
# 配置驱动,默认是iso9660 还支持vfat
cdb.make_drive(configdrive_path)
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Creating config drive failed '
'with error: %s'),
e, instance=instance)
# File injection only if needed
# 文件注入如果inject_partition值不为-2(默认是注释掉的)则继续
elif inject_files and CONF.libvirt.inject_partition != -2:
# 从卷启动的实例不支持注入
if booted_from_volume:
LOG.warn(_LW('File injection into a boot from volume '
'instance is not supported'), instance=instance)
self._inject_data(
instance, network_info, admin_pass, files, suffix)
"""
instance:引用实例规范字典
network_info:引用网络规范字典
admin_pass:admin密码字符串
files:需要注入的文件列表
suffix:镜像后缀
支持自定义字典注入,admin密码注入,网络信息注入,元数据注入等
"""
if CONF.libvirt.virt_type == 'uml':
libvirt_utils.chown(image('disk').path, 'root')
接上文启动实例
def _create_domain_and_network(self, context, xml, instance, network_info,
disk_info, block_device_info=None,
power_on=True, reboot=False,
vifs_already_plugged=False):
"""Do required network setup and create domain."""
# 获取块设备信息,传入block_device_info返回块设备映射
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
# 获取镜像元数据信息字典
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
# 如果块设备映射里面有卷的话 挂载上去
for vol in block_device_mapping:
connection_info = vol['connection_info']
if (not reboot and 'data' in connection_info and
'volume_id' in connection_info['data']):
volume_id = connection_info['data']['volume_id']
encryption = encryptors.get_encryption_metadata(
context, self._volume_api, volume_id, connection_info)
if encryption:
encryptor = self._get_volume_encryptor(connection_info,
encryption)
encryptor.attach_volume(context, **encryption)
timeout = CONF.vif_plugging_timeout
if (self._conn_supports_start_paused and
utils.is_neutron() and not
vifs_already_plugged and power_on and timeout):
events = self._get_neutron_events(network_info)
else:
events = []
launch_flags = events and libvirt.VIR_DOMAIN_START_PAUSED or 0
domain = None
try:
with self.virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
self.plug_vifs(instance, network_info)
self.firewall_driver.setup_basic_filtering(instance,
network_info)
self.firewall_driver.prepare_instance_filter(instance,
network_info)
with self._lxc_disk_handler(instance, image_meta,
block_device_info, disk_info):
domain = self._create_domain(
xml, instance=instance,
launch_flags=launch_flags,
power_on=power_on)
self.firewall_driver.apply_instance_filter(instance,
network_info)
except exception.VirtualInterfaceCreateException:
# Neutron reported failure and we didn't swallow it, so
# bail here
with excutils.save_and_reraise_exception():
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
except eventlet.timeout.Timeout:
# We never heard from Neutron
LOG.warn(_LW('Timeout waiting for vif plugging callback for '
'instance %(uuid)s'), {'uuid': instance.uuid})
if CONF.vif_plugging_is_fatal:
if domain:
domain.destroy()
self.cleanup(context, instance, network_info=network_info,
block_device_info=block_device_info)
raise exception.VirtualInterfaceCreateException()
# Resume only if domain has been paused
if launch_flags & libvirt.VIR_DOMAIN_START_PAUSED:
domain.resume()
return domain
参考:
https://sergslipushenko.github.io/html/api-ref-identity-admin-v2.html
https://sergslipushenko.github.io/html/api-ref-compute-v2.html
https://blog.csdn.net/lzw06061139/article/details/51720653
https://www.ibm.com/developerworks/cn/cloud/library/1409_zhaojian_openstacknovacell/index.html
http://www.aichengxu.com/other/11006300.htm
《Openstack设计与实现》
https://blog.csdn.net/lzw06061139/article/details/51505514