OpenStack Liberty版本,这里简单记录下nova boot from volume的代码调用过程。
nova boot from volume命令行
nova client
novaclient/v2/shell.py # novaclient端发起请求 def do_boot(cs, args): """Boot a new server.""" boot_args, boot_kwargs = _boot(cs, args) extra_boot_kwargs = utils.get_resource_manager_extra_kwargs(do_boot, args) boot_kwargs.update(extra_boot_kwargs) server = cs.servers.create(*boot_args, **boot_kwargs) # 跳转到novaclient/v2/servers.py _print_server(cs, args, server) if args.poll: _poll_for_status(cs.servers.get, server.id, 'building', ['active']) novaclient/v2/servers.py def create(self, name, image, flavor, meta=None, files=None, reservation_id=None, min_count=None, max_count=None, security_groups=None, userdata=None, key_name=None, availability_zone=None, block_device_mapping=None, block_device_mapping_v2=None, nics=None, scheduler_hints=None, config_drive=None, disk_config=None, admin_pass=None, access_ip_v4=None, access_ip_v6=None, **kwargs): if not min_count: min_count = 1 if not max_count: max_count = min_count if min_count > max_count: min_count = max_count boot_args = [name, image, flavor] descr_microversion = api_versions.APIVersion("2.19") if "description" in kwargs and self.api_version < descr_microversion: raise exceptions.UnsupportedAttribute("description", "2.19") boot_kwargs = dict( meta=meta, files=files, userdata=userdata, reservation_id=reservation_id, min_count=min_count, max_count=max_count, security_groups=security_groups, key_name=key_name, availability_zone=availability_zone, scheduler_hints=scheduler_hints, config_drive=config_drive, disk_config=disk_config, admin_pass=admin_pass, access_ip_v4=access_ip_v4, access_ip_v6=access_ip_v6, **kwargs) if block_device_mapping: resource_url = "/os-volumes_boot" boot_kwargs['block_device_mapping'] = block_device_mapping elif block_device_mapping_v2: # 默认使用block_device_mapping_v2 resource_url = "/os-volumes_boot" # os-volumes_boot路径 boot_kwargs['block_device_mapping_v2'] = block_device_mapping_v2 else: resource_url = "/servers" if nics: boot_kwargs['nics'] = nics response_key = "server" return self._boot(resource_url, response_key, *boot_args, **boot_kwargs) # client封装resetful api请求
nova server
nova/api/openstack/compute/volumes.py class Volumes(extensions.V21APIExtensionBase) def get_resources(self): resources = [] res = extensions.ResourceExtension( ALIAS, VolumeController(), collection_actions={'detail': 'GET'}) resources.append(res) res = extensions.ResourceExtension('os-volumes_boot', # 对应os-volumes_boot路径 inherits='servers') # 这里应该是从servers路径继承的意思 resources.append(res) nova/api/openstack/compute/servers.py class Servers(extensions.V21APIExtensionBase): """Servers.""" name = "Servers" alias = ALIAS # 这里ALIAS=servers version = 1 def get_resources(self): member_actions = {'action': 'POST'} collection_actions = {'detail': 'GET'} resources = [ extensions.ResourceExtension( ALIAS, ServersController(extension_info=self.extension_info), # ServersController类 member_name='server', collection_actions=collection_actions, member_actions=member_actions)] return resources # ServersController类 class ServersController(wsgi.Controller): @wsgi.response(202) @extensions.expected_errors((400, 403, 409, 413)) @validation.schema(schema_server_create_v20, '2.0', '2.0') @validation.schema(schema_server_create, '2.1', '2.18') @validation.schema(schema_server_create_v219, '2.19') def create(self, req, body): """Creates a new server for a given user.""" ...... (instances, resv_id) = self.compute_api.create(context, # 跳转到nova/compute/api.py inst_type, image_uuid, display_name=name, display_description=description, availability_zone=availability_zone, forced_host=host, forced_node=node, metadata=server_dict.get('metadata', {}), admin_password=password, requested_networks=requested_networks, check_server_group_quota=True, **create_kwargs) nova/compute/api.py @hooks.add_hook("create_instance") def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, min_count=None, max_count=None, display_name=None, display_description=None, key_name=None, key_data=None, security_group=None, availability_zone=None, forced_host=None, forced_node=None, user_data=None, metadata=None, injected_files=None, admin_password=None, block_device_mapping=None, access_ip_v4=None, access_ip_v6=None, requested_networks=None, config_drive=None, auto_disk_config=None, scheduler_hints=None, legacy_bdm=True, shutdown_terminate=False, check_server_group_quota=False): return self._create_instance( # _create_instance function context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, filter_properties=filter_properties, legacy_bdm=legacy_bdm, shutdown_terminate=shutdown_terminate, check_server_group_quota=check_server_group_quota) def _create_instance(self, context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_groups, availability_zone, user_data, metadata, injected_files, admin_password, access_ip_v4, access_ip_v6, requested_networks, config_drive, block_device_mapping, auto_disk_config, filter_properties, reservation_id=None, legacy_bdm=True, shutdown_terminate=False, check_server_group_quota=False): """Verify all the input parameters regardless of the provisioning strategy being performed and schedule the instance(s) for creation. """ instances = self._provision_instances(context, instance_type, min_count, max_count, base_options, boot_meta, security_groups, block_device_mapping, shutdown_terminate, instance_group, check_server_group_quota, filter_properties) for instance in instances: self._record_action_start(context, instance, instance_actions.CREATE) self.compute_task_api.build_instances(context, # 接下来会调用到conductor instances=instances, image=boot_meta, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, legacy_bdm=False) nova/conductor/api.py class ComputeTaskAPI(object): def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): self.conductor_compute_rpcapi.build_instances(context, instances=instances, image=image, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, legacy_bdm=legacy_bdm) # rpc操作 nova/conductor/rpcapi.py class ComputeTaskAPI(object): def build_instances(self, context, instances, image, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, legacy_bdm=True): image_p = jsonutils.to_primitive(image) version = '1.10' if not self.client.can_send_version(version): version = '1.9' if 'instance_type' in filter_properties: flavor = filter_properties['instance_type'] flavor_p = objects_base.obj_to_primitive(flavor) filter_properties = dict(filter_properties, instance_type=flavor_p) kw = {'instances': instances, 'image': image_p, 'filter_properties': filter_properties, 'admin_password': admin_password, 'injected_files': injected_files, 'requested_networks': requested_networks, 'security_groups': security_groups} if not self.client.can_send_version(version): version = '1.8' kw['requested_networks'] = kw['requested_networks'].as_tuples() if not self.client.can_send_version('1.7'): version = '1.5' bdm_p = objects_base.obj_to_primitive(block_device_mapping) kw.update({'block_device_mapping': bdm_p, 'legacy_bdm': legacy_bdm}) cctxt = self.client.prepare(version=version) cctxt.cast(context, 'build_instances', **kw) nova/conductor/api.py(223)build_instances() nova/conductor/manager.py class ComputeTaskManager(base.Base): def build_instances(): self.compute_rpcapi.build_and_run_instance(context, instance=instance, host=host['host'], image=image, request_spec=request_spec, filter_properties=local_filter_props, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=bdms, node=host['nodename'], limits=host['limits']) nova/compute/rpcapi.py class ComputeAPI(object): def build_and_run_instance(self, ctxt, instance, host, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): version = '4.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', instance=instance, image=image, request_spec=request_spec, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, node=node, limits=limits) nova/compute/manager.py class ComputeManager(manager.Manager): @wrap_exception() @reverts_task_state @wrap_instance_fault def build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): @utils.synchronized(instance.uuid) def _locked_do_build_and_run_instance(*args, **kwargs): # NOTE(danms): We grab the semaphore with the instance uuid # locked because we could wait in line to build this instance # for a while and we want to make sure that nothing else tries # to do anything with this instance while we wait. with self._build_semaphore: self._do_build_and_run_instance(*args, **kwargs) # NOTE(danms): We spawn here to return the RPC worker thread back to # the pool. Since what follows could take a really long time, we don't # want to tie up RPC workers. utils.spawn_n(_locked_do_build_and_run_instance, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node, limits) @hooks.add_hook('build_instance') @wrap_exception() @reverts_task_state @wrap_instance_event @wrap_instance_fault def _do_build_and_run_instance(self, context, instance, image, request_spec, filter_properties, admin_password, injected_files, requested_networks, security_groups, block_device_mapping, node=None, limits=None): try: LOG.debug('Starting instance...', context=context, instance=instance) instance.vm_state = vm_states.BUILDING instance.task_state = None instance.save(expected_task_state= (task_states.SCHEDULING, None)) try: with timeutils.StopWatch() as timer: self._build_and_run_instance(context, instance, image, decoded_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties) LOG.info(_LI('Took %0.2f seconds to build instance.'), timer.elapsed(), instance=instance) return build_results.ACTIVE def _build_and_run_instance(self, context, instance, image, injected_files, admin_password, requested_networks, security_groups, block_device_mapping, node, limits, filter_properties): with self._build_resources(context, instance, requested_networks, security_groups, image_meta, block_device_mapping) as resources: instance.vm_state = vm_states.BUILDING instance.task_state = task_states.SPAWNING # NOTE(JoshNang) This also saves the changes to the # instance from _allocate_network_async, as they aren't # saved in that function to prevent races. instance.save(expected_task_state= task_states.BLOCK_DEVICE_MAPPING) block_device_info = resources['block_device_info'] network_info = resources['network_info'] LOG.debug('Start spawning the instance on the hypervisor.', instance=instance) with timeutils.StopWatch() as timer: self.driver.spawn(context, instance, image_meta, # 这里的driver用的是libvirt injected_files, admin_password, network_info=network_info, block_device_info=block_device_info) LOG.info(_LI('Took %0.2f seconds to spawn the instance on ' 'the hypervisor.'), timer.elapsed(), instance=instance) nova/virt/libvirt/driver.py class LibvirtDriver(driver.ComputeDriver): def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info=None, block_device_info=None): disk_info = blockinfo.get_disk_info(CONF.libvirt.virt_type, instance, image_meta, block_device_info) self._create_image(context, instance, # 拉取image disk_info['mapping'], network_info=network_info, block_device_info=block_device_info, files=injected_files, admin_pass=admin_password) xml = self._get_guest_xml(context, instance, network_info, # 创建libvirt xml文件 disk_info, image_meta, block_device_info=block_device_info, write_to_disk=True) self._create_domain_and_network(context, xml, instance, network_info, # 创建libvirt domain、network disk_info, block_device_info=block_device_info) LOG.debug("Instance is running", instance=instance) def _wait_for_boot(): """Called at an interval until the VM is running.""" state = self.get_info(instance).state if state == power_state.RUNNING: LOG.info(_LI("Instance spawned successfully."), instance=instance) raise loopingcall.LoopingCallDone() timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=0.5).wait() nova/utils.py def spawn_n(func, *args, **kwargs): """Passthrough method for eventlet.spawn_n. This utility exists so that it can be stubbed for testing without interfering with the service spawns. It will also grab the context from the threadlocal store and add it to the store on the new thread. This allows for continuity in logging the context when using this method to spawn a new thread. """ _context = common_context.get_current() @functools.wraps(func) def context_wrapper(*args, **kwargs): # NOTE: If update_store is not called after spawn_n it won't be # available for the logger to pull from threadlocal storage. if _context is not None: _context.update_store() func(*args, **kwargs) eventlet.spawn_n(context_wrapper, *args, **kwargs) # 利用到eventlet并发的性能
以上都是本人pdb总结的,有什么不足之处,欢迎指正。