分三部分:一是KVM虚拟机创建、二是VCPU创建、三是KVM虚拟机运行
第一部分:
1、基本原理
如之前分析,kvm虚拟机通过对/dev/kvm字符设备的ioctl的System指令KVM_CREATE_VM进行创建。
对虚拟机(VM)来说,kvm结构体是关键,一个虚拟机对应一个kvm结构体,虚拟机的创建过程实质为kvm结构体的创建和初始化过程。
本文简单解释及分析在3.10版本内核代码中的相关流程,用户态qemu-kvm部分暂不包括。
2、大致流程如下:
用户态ioctl(fd,KVM_CREATE_VM,..)
内核态kvm_dev_ioctl()
kvm_dev_ioctl_create_vm()
kvm_create_vm() //实现虚拟机创建的主要函数
kvm_arch_alloc_vm() // 分配kvm结构体
kvm_arch_init_vm() // 初始化kvm结构中的架构相关部分,比如中断
hardware_enable_all() // 使能硬件,架构相关操作
hardware_enable_nolock
kvm_arch_hardware_enable()
kvm_x86_ops->hardware_enable()
kzalloc() // 分配memslots结构,并初始化为0
kvm_init_memslots_id() // 初始化内存槽位(slot)的id信息
kvm_eventfd_init() // 初始化事件通道
kvm_init_mmu_notifier() // 初始化mmu操作的通知链
list_add(&kvm->vm_list, &vm_list) // 将新创建的虚拟机的kvm结构,加入到全局链表vm_list中
3、代码分析
kvm结构体:
- /*
- * kvm中全局的数据结构,其中包含kvm相关的重要数据信息,包括memslot等
- */
- struct kvm {
- // 用于保护mmu的spin_lock
- spinlock_t mmu_lock;
- struct mutex slots_lock;
- // 指向qemu用户态进程的mm_struct?
- struct mm_struct *mm; /* userspace tied to this vm */
- /*
- * kvm_mem_slot是kvm内存管理相关主要数据结构,用来表示虚拟机GPA和主机HVA之间的
- * 映射关系,一个kvm_mem_slot表示一段内存区域(slot)的映射关系,kvm_memslots结构体是
- * kvm_mem_slot的封装,其中包含一个kvm_mem_slot的数组,对应于该虚拟机使用的所有
- * 内存区域(slot)。
- */
- struct kvm_memslots *memslots;
- struct srcu_struct srcu;
- #ifdef CONFIG_KVM_APIC_ARCHITECTURE
- u32 bsp_vcpu_id;
- #endif
- // 虚拟机中包含的VCPU结构体数组,一个VCPU对应一个数组成员。
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
- // online的vcpu数量
- atomic_t online_vcpus;
- int last_boosted_vcpu;
- struct list_head vm_list;
- struct mutex lock;
- //虚拟机中包括的IO总线结构体数组,一条总线对应一个kvm_io_bus结构体,如ISA总线、PCI总线。
- struct kvm_io_bus *buses[KVM_NR_BUSES];
- // 事件通道相关
- #ifdef CONFIG_HAVE_KVM_EVENTFD
- struct {
- spinlock_t lock;
- struct list_head items;
- struct list_head resampler_list;
- struct mutex resampler_lock;
- } irqfds;
- struct list_head ioeventfds;
- #endif
- // 虚拟机中的运行时状态信息,比如页表、MMU等状态。
- struct kvm_vm_stat stat;
- // 架构相关的部分。
- struct kvm_arch arch;
- // 引用计数
- atomic_t users_count;
- #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
- struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
- spinlock_t ring_lock;
- struct list_head coalesced_zones;
- #endif
-
- struct mutex irq_lock;
- #ifdef CONFIG_HAVE_KVM_IRQCHIP
- /*
- * Update side is protected by irq_lock and,
- * if configured, irqfds.lock.
- */
- // irq相关部分
- struct kvm_irq_routing_table __rcu *irq_routing;
- struct hlist_head mask_notifier_list;
- struct hlist_head irq_ack_notifier_list;
- #endif
-
- #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
- // mmu通知链
- struct mmu_notifier mmu_notifier;
- unsigned long mmu_notifier_seq;
- long mmu_notifier_count;
- #endif
- // dirty TLB数量
- long tlbs_dirty;
- struct list_head devices;
- };
kvm_dev_ioctl()-->kvm_dev_ioctl_create_vm()-->kvm_create_vm():
- /*
- * 实现虚拟机创建的主要函数
- */
- static struct kvm *kvm_create_vm(unsigned long type)
- {
- int r, i;
- /*
- * 分配kvm结构体,一个虚拟机对应一个kvm结构,其中包括了虚拟机中的
- * 关键系统,比如内存、中断、VCPU、总线等信息,该结构体也是kvm的关键结
- * 构体之一
- */
- struct kvm *kvm = kvm_arch_alloc_vm();
-
- if (!kvm)
- return ERR_PTR(-ENOMEM);
- // 初始化kvm结构中的架构相关部分,比如中断
- r = kvm_arch_init_vm(kvm, type);
- if (r)
- goto out_err_nodisable;
- // 硬件使能,最终调用架构相关的kvm_x86_ops->hardware_enable()接口
- r = hardware_enable_all();
- if (r)
- goto out_err_nodisable;
-
- #ifdef CONFIG_HAVE_KVM_IRQCHIP
- INIT_HLIST_HEAD(&kvm->mask_notifier_list);
- INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
- #endif
-
- BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
-
- r = -ENOMEM;
- // 分配memslots结构,并初始化为0
- kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
- if (!kvm->memslots)
- goto out_err_nosrcu;
- // 初始化内存槽位(slot)的id信息,便于后续索引
- kvm_init_memslots_id(kvm);
- if (init_srcu_struct(&kvm->srcu))
- goto out_err_nosrcu;
- // 初始化虚拟机的bus信息。
- for (i = 0; i < KVM_NR_BUSES; i++) {
- kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
- GFP_KERNEL);
- if (!kvm->buses[i])
- goto out_err;
- }
- // 初始化mmu_lock
- spin_lock_init(&kvm->mmu_lock);
- // 设置虚拟机的mm(mm_struct)为当前进程的mm
- kvm->mm = current->mm;
- atomic_inc(&kvm->mm->mm_count);
- // 初始化事件通道
- kvm_eventfd_init(kvm);
- mutex_init(&kvm->lock);
- mutex_init(&kvm->irq_lock);
- mutex_init(&kvm->slots_lock);
- atomic_set(&kvm->users_count, 1);
- INIT_LIST_HEAD(&kvm->devices);
-
- // 初始化mmu操作的通知链
- r = kvm_init_mmu_notifier(kvm);
- if (r)
- goto out_err;
-
- raw_spin_lock(&kvm_lock);
- // 将新创建的虚拟机的kvm结构,加入到全局链表vm_list中
- list_add(&kvm->vm_list, &vm_list);
- raw_spin_unlock(&kvm_lock);
-
- return kvm;
-
- out_err:
- cleanup_srcu_struct(&kvm->srcu);
- out_err_nosrcu:
- hardware_disable_all();
- out_err_nodisable:
- for (i = 0; i < KVM_NR_BUSES; i++)
- kfree(kvm->buses[i]);
- kfree(kvm->memslots);
- kvm_arch_free_vm(kvm);
- return ERR_PTR(r);
- }
第2部分---VCPU创建
1、基本原理
如之前的文章分析,在KVM虚拟化环境中,硬件虚拟化使用VCPU(Virtual CPU)描述符来描述虚拟CPU,VCPU描述符与OS中进程描述符类似,本质是一个结构体kvm_vcpu,其中包含如下信息:
VCPU标识信息,如VCPU的ID号,VCPU属于哪个Guest等。
虚拟寄存器信息,在VT-x的环境中,这些信息包含在VMCS中。
VCPU状态信息,标识白VCPU当前所处的状态(睡眠、运行等),主要供调度器使用。
额外的寄存器/部件信息,主要指未包含在VMCS中的寄存器或CPU部件,比如:浮点寄存器和虚拟的LAPIC等。
其他信息:用户VMM进行优化或存储额外信息的字段,如:存放该VCPU私有数据的指针。
当
VMM
创建虚拟机时,首先要为虚拟机创建
VCPU
,整个虚拟机的运行实际上可以看做
VMM
调度不同的
VCPU
运行
。
虚拟机的VCPU通过ioctl VM指令
K
VM_CREATE_VCPU实现
,实质为创建kvm_vcpu结构体,并进行相关初始化。本文简单分析VCPU创建过程,qemu-kvm用户态实现部分暂不包括。
2、基本流程
kvm_vm_ioctl() // kvm ioctl vm指令入口
kvm_vm_ioctl_create_vcpu() // 为虚拟机创建VCPU的ioctl调用的入口函数
kvm_arch_vcpu_create() // 创建vcpu结构,架构相关,对于intel x86来说,最终调用vmx_create_vcpu
kvm_arch_vcpu_setup() // 设置VCPU结构
create_vcpu_fd() // 为新创建的vcpu创建对应的fd,以便于后续通过该fd进行ioctl操作
kvm_arch_vcpu_postcreate() // 架构相关的善后工作,比如再次调用vcpu_load,以及tsc相关处理
3、代码分析
kvm_vcpu结构:
- struct kvm_vcpu {
- // 指向此vcpu所属的虚拟机对应的kvm结构
- struct kvm *kvm;
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- struct preempt_notifier preempt_notifier;
- #endif
- int cpu;
- // vcpu id,用于唯一标识该vcpu
- int vcpu_id;
- int srcu_idx;
- int mode;
- unsigned long requests;
- unsigned long guest_debug;
-
- struct mutex mutex;
- // 执行虚拟机对应的kvm_run结构
- struct kvm_run *run;
-
- int fpu_active;
- int guest_fpu_loaded, guest_xcr0_loaded;
- wait_queue_head_t wq;
- struct pid *pid;
- int sigset_active;
- sigset_t sigset;
- // vcpu状态信息
- struct kvm_vcpu_stat stat;
- // mmio相关部分
- #ifdef CONFIG_HAS_IOMEM
- int mmio_needed;
- int mmio_read_completed;
- int mmio_is_write;
- int mmio_cur_fragment;
- int mmio_nr_fragments;
- struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
- #endif
-
- #ifdef CONFIG_KVM_ASYNC_PF
- struct {
- u32 queued;
- struct list_head queue;
- struct list_head done;
- spinlock_t lock;
- } async_pf;
- #endif
-
- #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
- /*
- * Cpu relax intercept or pause loop exit optimization
- * in_spin_loop: set when a vcpu does a pause loop exit
- * or cpu relax intercepted.
- * dy_eligible: indicates whether vcpu is eligible for directed yield.
- */
- struct {
- bool in_spin_loop;
- bool dy_eligible;
- } spin_loop;
- #endif
- bool preempted;
- // 架构相关部分,包括的寄存器、apic、mmu相关等架构相关的内容
- struct kvm_vcpu_arch arch;
- };
kvm_vm_ioctl()-->kvm_vm_ioctl_create_vcpu():
- /*
- * 为虚拟机创建VCPU的ioctl调用的入口函数,本质为创建vcpu结构并初始化,并将其填入kvm结构中。
- */
- static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
- {
- int r;
- struct kvm_vcpu *vcpu, *v;
-
- // 创建vcpu结构,架构相关,对于intel x86来说,最终调用vmx_create_vcpu
- vcpu = kvm_arch_vcpu_create(kvm, id);
- if (IS_ERR(vcpu))
- return PTR_ERR(vcpu);
-
- preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
-
- /*
- * 设置vcpu结构,主要调用kvm_x86_ops->vcpu_load,KVM虚拟机VCPU数据结构载入物理CPU,
- * 并进行虚拟机mmu相关设置,比如进行ept页表的相关初始工作或影子页表
- * 相关的设置。
- */
- r = kvm_arch_vcpu_setup(vcpu);
- if (r)
- goto vcpu_destroy;
-
- mutex_lock(&kvm->lock);
- if (!kvm_vcpu_compatible(vcpu)) {
- r = -EINVAL;
- goto unlock_vcpu_destroy;
- }
- if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
- r = -EINVAL;
- goto unlock_vcpu_destroy;
- }
-
- // 检测分配的vcpu id是否已经存在
- kvm_for_each_vcpu(r, v, kvm)
- if (v->vcpu_id == id) {
- r = -EEXIST;
- goto unlock_vcpu_destroy;
- }
- /*
- * kvm->vcpus[]数组包括该vm的所有vcpu,定义为KVM_MAX_VCPUS大小的数组。
- * 在kvm结构初始化时,其中所有成员都初始化为0,在vcpu还没有
- * 分配之前,如果不为0,那就是bug了。
- */
- BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
-
- /* Now it's all set up, let userspace reach it */
- // 增加kvm的引用计数
- kvm_get_kvm(kvm);
- // 为新创建的vcpu创建对应的fd,以便于后续通过该fd进行ioctl操作
- r = create_vcpu_fd(vcpu);
- if (r < 0) {
- kvm_put_kvm(kvm);
- goto unlock_vcpu_destroy;
- }
-
- // 将新创建的vcpu填入kvm->vcpus[]数组中
- kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
- // 内存屏障,防止同时访问kvm结构时乱序
- smp_wmb();
- // 增加online vcpu的数量
- atomic_inc(&kvm->online_vcpus);
-
- mutex_unlock(&kvm->lock);
- // 架构相关的善后工作,比如再次调用vcpu_load,以及tsc相关处理
- kvm_arch_vcpu_postcreate(vcpu);
- return r;
-
- unlock_vcpu_destroy:
- mutex_unlock(&kvm->lock);
- vcpu_destroy:
- kvm_arch_vcpu_destroy(vcpu);
- return r;
- }
kvm_vm_ioctl()-->kvm_vm_ioctl_create_vcpu()-->kvm_arch_vcpu_create()-->kvm_x86_ops->vcpu_create()-->vmx_create_vcpu():
- /*
- * Intel x86架构中创建并初始化VCPU中架构相关部分
- */
- static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
- {
- int err;
- // 从slab中,分配vcpu_vmx结构体,其中包括VMX技术硬件相关信息。
- struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
- int cpu;
-
- if (!vmx)
- return ERR_PTR(-ENOMEM);
- // 分配vpid,vpid为VCPU的唯一标识。
- allocate_vpid(vmx);
- // 初始化vmx中的vcpu结构
- err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
- if (err)
- goto free_vcpu;
- // 分配Guest的msr寄存器保存区
- vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
- err = -ENOMEM;
- if (!vmx->guest_msrs) {
- goto uninit_vcpu;
- }
-
- vmx->loaded_vmcs = &vmx->vmcs01;
- /*
- * 分配VMCS结构,该结构用于保存虚拟机和虚拟机监控器的系统编程接口状态。
- * 当执行VM exit和VM entry操作时,VT-x自动根据VMCS中的内容完成虚拟机和虚拟机监
- * 控器间的系统编程接口状态切换。
- */
- vmx->loaded_vmcs->vmcs = alloc_vmcs();
- if (!vmx->loaded_vmcs->vmcs)
- goto free_msrs;
- // 是否设置了vmm_exclusive
- if (!vmm_exclusive)
- // VMXON指令用于开启VMX模式
- kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
- loaded_vmcs_init(vmx->loaded_vmcs);
- if (!vmm_exclusive)
- // VMXON指令用于关闭VMX模式
- kvm_cpu_vmxoff();
- // 当前cpu
- cpu = get_cpu();
- // KVM虚拟机VCPU数据结构载入物理CPU
- vmx_vcpu_load(&vmx->vcpu, cpu);
- vmx->vcpu.cpu = cpu;
- // 设置vmx相关信息
- err = vmx_vcpu_setup(vmx);
- vmx_vcpu_put(&vmx->vcpu);
- put_cpu();
- if (err)
- goto free_vmcs;
- if (vm_need_virtualize_apic_accesses(kvm)) {
- err = alloc_apic_access_page(kvm);
- if (err)
- goto free_vmcs;
- }
- // 是否支持EPT
- if (enable_ept) {
- if (!kvm->arch.ept_identity_map_addr)
- kvm->arch.ept_identity_map_addr =
- VMX_EPT_IDENTITY_PAGETABLE_ADDR;
- err = -ENOMEM;
- // 分配identity页表
- if (alloc_identity_pagetable(kvm) != 0)
- goto free_vmcs;
- // 初始化identity页表
- if (!init_rmode_identity_map(kvm))
- goto free_vmcs;
- }
-
- vmx->nested.current_vmptr = -1ull;
- vmx->nested.current_vmcs12 = NULL;
-
- return &vmx->vcpu;
-
- free_vmcs:
- free_loaded_vmcs(vmx->loaded_vmcs);
- free_msrs:
- kfree(vmx->guest_msrs);
- uninit_vcpu:
- kvm_vcpu_uninit(&vmx->vcpu);
- free_vcpu:
- free_vpid(vmx);
- kmem_cache_free(kvm_vcpu_cache, vmx);
- return ERR_PTR(err);
- }
kvm_vm_ioctl()-->kvm_vm_ioctl_create_vcpu()-->kvm_arch_vcpu_setup():
- int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
- {
- int r;
-
- vcpu->arch.mtrr_state.have_fixed = 1;
- // KVM虚拟机VCPU数据结构载入物理CPU
- r = vcpu_load(vcpu);
- if (r)
- return r;
- // vcpu重置,包括相关寄存器、时钟、pmu等,最终调用vmx_vcpu_reset
- kvm_vcpu_reset(vcpu);
- /*
- * 进行虚拟机mmu相关设置,比如进行ept页表的相关初始工作或影子页表
- * 相关的设置。
- */
- r = kvm_mmu_setup(vcpu);
- vcpu_put(vcpu);
-
- return r;
- }
kvm_vm_ioctl()-->kvm_vm_ioctl_create_vcpu()-->kvm_arch_vcpu_setup()-->kvm_mmu_setup()-->init_kvm_mmu():
- static int init_kvm_mmu(struct kvm_vcpu *vcpu)
- {
- // NPT(Nested page table,AMD x86硬件提供的内存虚拟化技术,相当于Intel中的EPT技术)相关初始化
- if (mmu_is_nested(vcpu))
- return init_kvm_nested_mmu(vcpu);
- /*
- * EPT(Extended page table,Intel x86硬件提供的内存虚拟化技术)相关初始化
- * 主要是设置一些函数指针,其中比较重要的如缺页异常处理函数
- */
- else if (tdp_enabled)
- return init_kvm_tdp_mmu(vcpu);
- // 影子页表(软件实现内存虚拟化技术)相关初始化
- else
- return init_kvm_softmmu(vcpu);
- }
第3部分---KVM_SET_USER_MEMORY_REGION流程
1、基本原理
如之前分析,kvm虚拟机实际运行于qemu-kvm的进程上下文中,因此,需要建立虚拟机的物理内存空间(GPA)与qemu-kvm进程的虚拟地址空间(HVA)的映射关系。
虚拟机的物理地址空间实际也是不连续的,分成不同的内存区域(slot),因为物理地址空间中通常还包括BIOS、MMIO、显存、ISA保留等部分。
qemu-kvm通过ioctl vm指令KVM_SET_USER_MEMORY_REGION来为虚拟机设置内存。主要建立guest物理地址空间中的内存区域与qemu-kvm虚拟地址空间中的内存区域的映射,从而建立其从GVA到HVA的对应关系,该对应关系主要通过kvm_mem_slot结构体保存,所以实质为设置kvm_mem_slot结构体。
本文简介ioctl vm指令KVM_SET_USER_MEMORY_REGION在内核中的执行流程,qemu-kvm用户态部分暂不包括。
2、基本流程
ioctl vm指令KVM_SET_USER_MEMORY_REGION在内核主要执行流程如下:
kvm_vm_ioctl()
kvm_vm_ioctl_set_memory_region()
kvm_set_memory_region()
__kvm_set_memory_region()
kvm_iommu_unmap_pages() // 原来的slot需要删除,所以需要unmap掉相应的内存区域
install_new_memslots() //将new分配的memslot写入kvm->memslots[]数组中
kvm_free_physmem_slot() // 释放旧内存区域相应的物理内存(HPA)
3、代码分析
kvm_mem_slot结构:
- /*
- * 由于GPA不能直接用于物理 MMU 进行寻址,所以需要将GPA转换为HVA,
- * kvm中利用 kvm_memory_slot 数据结构来记录每一个地址区间(Guest中的物理
- * 地址区间)中GPA与HVA的映射关系
- */
- struct kvm_memory_slot {
- // 虚拟机物理地址(即GPA)对应的页框号
- gfn_t base_gfn;
- // 当前slot中包含的page数
- unsigned long npages;
- // 脏页位图
- unsigned long *dirty_bitmap;
- // 架构相关的部分
- struct kvm_arch_memory_slot arch;
- /*
- * GPA对应的Host虚拟地址(HVA),由于虚拟机都运行在qemu的地址空间中
- * 而qemu是用户态程序,所以通常使用根模式下用户地址空间。
- */
- unsigned long userspace_addr;
- u32 flags;
- short id;
- };
kvm_vm_ioctl():
- /*
- * kvm ioctl vm指令的入口,传入的fd为KVM_CREATE_VM中返回的fd。
- * 主要用于针对VM虚拟机进行控制,如:内存设置、创建VCPU等。
- */
- static long kvm_vm_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
- struct kvm *kvm = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r;
-
- if (kvm->mm != current->mm)
- return -EIO;
- switch (ioctl) {
- // 创建VCPU
- case KVM_CREATE_VCPU:
- r = kvm_vm_ioctl_create_vcpu(kvm, arg);
- break;
- // 建立guest物理地址空间中的内存区域与qemu-kvm虚拟地址空间中的内存区域的映射
- case KVM_SET_USER_MEMORY_REGION: {
- // 存放内存区域信息的结构体,该内存区域从qemu-kvm进程的用户地址空间中分配
- struct kvm_userspace_memory_region kvm_userspace_mem;
-
- r = -EFAULT;
- // 从用户态拷贝相应数据到内核态,入参argp指向用户态地址
- if (copy_from_user(&kvm_userspace_mem, argp,
- sizeof kvm_userspace_mem))
- goto out;
- // 进入实际处理流程
- r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
- break;
- }
- ...
kvm_vm_ioctl()-->kvm_vm_ioctl_set_memory_region()-->kvm_set_memory_region()-->__kvm_set_memory_region()
- /*
- * 建立guest物理地址空间中的内存区域与qemu-kvm虚拟地址空间中的内存区域的映射
- * 相应信息由uerspace_memory_region参数传入,而其源头来自于用户态qemu-kvm。每次
- * 调用设置一个内存区间。内存区域可以不连续(实际的物理内存区域也经常不连
- * 续,因为有可能有保留内存)
- */
- int __kvm_set_memory_region(struct kvm *kvm,
- struct kvm_userspace_memory_region *mem)
- {
- int r;
- gfn_t base_gfn;
- unsigned long npages;
- struct kvm_memory_slot *slot;
- struct kvm_memory_slot old, new;
- struct kvm_memslots *slots = NULL, *old_memslots;
- enum kvm_mr_change change;
-
- // 标记检查
- r = check_memory_region_flags(mem);
- if (r)
- goto out;
-
- r = -EINVAL;
- /* General sanity checks */
- // 合规检查,防止用户态恶意传参,导致安全漏洞
- if (mem->memory_size & (PAGE_SIZE - 1))
- goto out;
- if (mem->guest_phys_addr & (PAGE_SIZE - 1))
- goto out;
- /* We can read the guest memory with __xxx_user() later on. */
- if ((mem->slot < KVM_USER_MEM_SLOTS) &&
- ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
- !access_ok(VERIFY_WRITE,
- (void __user *)(unsigned long)mem->userspace_addr,
- mem->memory_size)))
- goto out;
- if (mem->slot >= KVM_MEM_SLOTS_NUM)
- goto out;
- if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
- goto out;
- // 将kvm_userspace_memory_region->slot转换为kvm_mem_slot结构,该结构从kvm->memslots获取
- slot = id_to_memslot(kvm->memslots, mem->slot);
- // 内存区域起始位置在Guest物理地址空间中的页框号
- base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
- // 内存区域大小转换为page单位
- npages = mem->memory_size >> PAGE_SHIFT;
-
- r = -EINVAL;
- if (npages > KVM_MEM_MAX_NR_PAGES)
- goto out;
-
- if (!npages)
- mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
-
- new = old = *slot;
-
- new.id = mem->slot;
- new.base_gfn = base_gfn;
- new.npages = npages;
- new.flags = mem->flags;
-
- r = -EINVAL;
- if (npages) {
- // 判断是否需新创建内存区域
- if (!old.npages)
- change = KVM_MR_CREATE;
- // 判断是否修改现有的内存区域
- else { /* Modify an existing slot. */
- // 修改的区域的HVA不同或者大小不同或者flag中的
- // KVM_MEM_READONLY标记不同,直接退出。
- if ((mem->userspace_addr != old.userspace_addr) ||
- (npages != old.npages) ||
- ((new.flags ^ old.flags) & KVM_MEM_READONLY))
- goto out;
- /*
- * 走到这,说明被修改的区域HVA和大小都是相同的
- * 判断区域起始的GFN是否相同,如果是,则说明需
- * 要在Guest物理地址空间中move这段区域,设置KVM_MR_MOVE标记
- */
- if (base_gfn != old.base_gfn)
- change = KVM_MR_MOVE;
- // 如果仅仅是flag不同,则仅修改标记,设置KVM_MR_FLAGS_ONLY标记
- else if (new.flags != old.flags)
- change = KVM_MR_FLAGS_ONLY;
- // 否则,啥也不干
- else { /* Nothing to change. */
- r = 0;
- goto out;
- }
- }
- } else if (old.npages) {/*如果新设置的区域大小为0,而老的区域大小不为0,则表示需要删除原有区域。*/
- change = KVM_MR_DELETE;
- } else /* Modify a non-existent slot: disallowed. */
- goto out;
-
- if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
- /* Check for overlaps */
- r = -EEXIST;
- // 检查现有区域中是否重叠的
- kvm_for_each_memslot(slot, kvm->memslots) {
- if ((slot->id >= KVM_USER_MEM_SLOTS) ||
- (slot->id == mem->slot))
- continue;
- if (!((base_gfn + npages <= slot->base_gfn) ||
- (base_gfn >= slot->base_gfn + slot->npages)))
- goto out;
- }
- }
-
- /* Free page dirty bitmap if unneeded */
- if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
- new.dirty_bitmap = NULL;
-
- r = -ENOMEM;
- // 如果需要创建新区域
- if (change == KVM_MR_CREATE) {
- new.userspace_addr = mem->userspace_addr;
- // 设置新的内存区域架构相关部分
- if (kvm_arch_create_memslot(&new, npages))
- goto out_free;
- }
-
- /* Allocate page dirty bitmap if needed */
- if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
- if (kvm_create_dirty_bitmap(&new) < 0)
- goto out_free;
- }
- // 如果删除或move内存区域
- if ((change == KVM_MR_DELETE) || (change == KVM_MR_MOVE)) {
- r = -ENOMEM;
- // 复制kvm->memslots的副本
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
- slot = id_to_memslot(slots, mem->slot);
- slot->flags |= KVM_MEMSLOT_INVALID;
- // 安装新memslots,返回旧的memslots
- old_memslots = install_new_memslots(kvm, slots, NULL);
-
- /* slot was deleted or moved, clear iommu mapping */
- // 原来的slot需要删除,所以需要unmap掉相应的内存区域
- kvm_iommu_unmap_pages(kvm, &old);
- /* From this point no new shadow pages pointing to a deleted,
- * or moved, memslot will be created.
- *
- * validation of sp->gfn happens in:
- * - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
- * - kvm_is_visible_gfn (mmu_check_roots)
- */
- // flush影子页表中的条目
- kvm_arch_flush_shadow_memslot(kvm, slot);
- slots = old_memslots;
- }
- // 处理private memory slots,对其分配用户态地址,即HVA
- r = kvm_arch_prepare_memory_region(kvm, &new, mem, change);
- if (r)
- goto out_slots;
-
- r = -ENOMEM;
- /*
- * We can re-use the old_memslots from above, the only difference
- * from the currently installed memslots is the invalid flag. This
- * will get overwritten by update_memslots anyway.
- */
- if (!slots) {
- slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
- GFP_KERNEL);
- if (!slots)
- goto out_free;
- }
-
- /*
- * IOMMU mapping: New slots need to be mapped. Old slots need to be
- * un-mapped and re-mapped if their base changes. Since base change
- * unmapping is handled above with slot deletion, mapping alone is
- * needed here. Anything else the iommu might care about for existing
- * slots (size changes, userspace addr changes and read-only flag
- * changes) is disallowed above, so any other attribute changes getting
- * here can be skipped.
- */
- if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
- r = kvm_iommu_map_pages(kvm, &new);
- if (r)
- goto out_slots;
- }
-
- /* actual memory is freed via old in kvm_free_physmem_slot below */
- if (change == KVM_MR_DELETE) {
- new.dirty_bitmap = NULL;
- memset(&new.arch, 0, sizeof(new.arch));
- }
- //将new分配的memslot写入kvm->memslots[]数组中
- old_memslots = install_new_memslots(kvm, slots, &new);
-
- kvm_arch_commit_memory_region(kvm, mem, &old, change);
- // 释放旧内存区域相应的物理内存(HPA)
- kvm_free_physmem_slot(&old, &new);
- kfree(old_memslots);
-
- return 0;
-
- out_slots:
- kfree(slots);
- out_free:
- kvm_free_physmem_slot(&new, &old);
- out:
- return r;
- }
第4部分---虚拟机运行
1、基本原理
KVM虚拟机通过字符设备/dev/kvm的ioctl接口创建和运行,相关原理见之前的文章说明。
虚拟机的运行通过/dev/kvm设备ioctl VCPU接口的KVM_RUN指令实现,在VM和VCPU创建好并完成初始化后,就可以调度该虚拟机运行了,通常,一个VCPU对应于一个线程,虚拟机运行的本质为调度该虚拟机相关的VCPU所在线程运行。虚拟机(VCPU)的运行主要任务是要进行上下文切换,上下文主要包括相关寄存器、APIC状态、TLB等,通常上下文切换的过程如下:
1、 保存当前的上下文。
2、 使用kvm_vcpu结构体中的上下文信息,加载到物理CPU中。
3、 执行kvm_x86_ops中的run_vcpu函数,调用硬件相关的指令(如VMLAUNCH),进入虚拟机运行环境中。
虚拟机运行于qemu-kvm的进程上下文中,从硬件的角度看,虚拟机的运行过程,实质为相关指令的执行过程,虚拟机编译后的也就是相应的CPU指令序列,而虚拟机的指令跟Host机的指令执行过程并没有太多的差别,最关键的差别为"敏感指令"(通常为IO、内存等关键操作)的执行,这也是虚拟化实现的本质所在,当在虚拟机中(Guest模式)执行"敏感指令"时,会触发(由硬件触发)VM-exit,使当前CPU从Guest模式(non-root模式)切换到root模式,当前CPU的控制权随之转交给VMM(Hypervisor,KVM中即Host),由VMM进行相应的处理,处理完成后再次通过应该硬件指令(如VMLAUNCH),重新进入到Guest模式,从而进入虚拟机运行环境中继续运行。
本文简单解释及分析在3.10版本内核代码中的相关流程,用户态qemu-kvm部分暂不包括。
2、大致流程:
Qemu-kvm可以通过ioctl(KVM_RUN…)使虚拟机运行,最终进入内核态,由KVM相关内核流程处理,在内核态执行的大致过程如下:
kvm_vcpu_ioctl -->
kvm_arch_vcpu_ioctl_run
具体由内核函数kvm_arch_vcpu_ioctl_run完成相关工作。主要流程如下:
1、 Sigprocmask()屏蔽信号,防止在此过程中受到信号的干扰。
2、 设置当前VCPU状态为KVM_MP_STATE_UNINITIALIZED (怎么在VCPU INIT里面(kvm_arch_vcpu_init))
3、 配置APIC和mmio相关信息 (只在kvm_arch_vcpu_init中发现了APIC的创建,没有找到MMIO??)
4、 将VCPU中保存的上下文信息写入指定位置 (没有找到,这里不应该马上切换吧)
5、 然后的工作交由__vcpu_run完成
6、 __vcpu_run最终调用vcpu_enter_guest,该函数实现了进入Guest,并执行Guest OS具体指令的操作。
7、 vcpu_enter_guest最终调用kvm_x86_ops中的run函数运行。对应于Intel平台,该函数为vmx_vcpu_run(设置Guest CR3和其他寄存器、EPT/影子页表相关设置、汇编代码VMLAUNCH切换到非根模式,执行Guest目标代码)。
8、 Guest代码执行到敏感指令或因其他原因(比如中断/异常),VM-Exit退出非根模式,返回到vcpu_enter_guest函数继续执行。
9、 vcpu_enter_guest函数中会判断VM-Exit原因,并进行相应处理。
10、处理完成后VM-Entry到Guest重新执行Guest代码,或重新等待下次调度。
3、代码分析
kvm_vcpu_ioctl():
- /*
- * kvm ioctl VCPU指令的入口,传入的fd为KVM_CREATE_VCPU中返回的fd。
- * 主要针对具体的VCPU进行参数设置。如:相关寄存器的读
- * 写、中断控制等
- */
- static long kvm_vcpu_ioctl(struct file *filp,
- unsigned int ioctl, unsigned long arg)
- {
- struct kvm_vcpu *vcpu = filp->private_data;
- void __user *argp = (void __user *)arg;
- int r;
- struct kvm_fpu *fpu = NULL;
- struct kvm_sregs *kvm_sregs = NULL;
- if (vcpu->kvm->mm != current->mm)
- return -EIO;
- #if defined(CONFIG_S390) || defined(CONFIG_PPC) || defined(CONFIG_MIPS)
- /*
- * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
- * so vcpu_load() would break it.
- */
- if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
- return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
- #endif
- // KVM虚拟机VCPU数据结构载入物理CPU
- r = vcpu_load(vcpu);
- if (r)
- return r;
- switch (ioctl) {
- /*
- * 运行虚拟机,最终通过执行VMLAUNCH指令进入non root模式,
- * 进入虚拟机运行。当虚拟机内部执行敏感指令时,由硬
- * 件触发VM-exit,返回到root模式
- */
- case KVM_RUN:
- r = -EINVAL;
- // 不能带参数。
- if (arg)
- goto out;
- // 运行VCPU(即运行虚拟机)的入口函数
- r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
- trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
- break;
-
...
kvm_vcpu_ioctl()-->kvm_arch_vcpu_ioctl_run()-->__vcpu_run():
- static int __vcpu_run(struct kvm_vcpu *vcpu)
- {
- int r;
- struct kvm *kvm = vcpu->kvm;
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- /*设置vcpu->arch.apic->vapic_page*/
- r = vapic_enter(vcpu);
- if (r) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- return r;
- }
- r = 1;
- while (r > 0) {
- /*检查状态*/
- if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
- !vcpu->arch.apf.halted)
- /* 进入Guest模式,最终通过VMLAUNCH指令实现*/
- r = vcpu_enter_guest(vcpu);
- else {/*什么情况下会走到这里?*/
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- /*阻塞VCPU,其实就是schddule()调度出去,但在有特殊情况时(比如有挂起的定时器或信号时),不进行调度而直接退出*/
- kvm_vcpu_block(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
- kvm_apic_accept_events(vcpu);
- switch(vcpu->arch.mp_state) {
- case KVM_MP_STATE_HALTED:
- vcpu->arch.pv.pv_unhalted = false;
- vcpu->arch.mp_state =
- KVM_MP_STATE_RUNNABLE;
- case KVM_MP_STATE_RUNNABLE:
- vcpu->arch.apf.halted = false;
- break;
- case KVM_MP_STATE_INIT_RECEIVED:
- break;
- default:
- r = -EINTR;
- break;
- }
- }
- }
- if (r <= 0)
- break;
- clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
- if (kvm_cpu_has_pending_timer(vcpu))
- kvm_inject_pending_timer_irqs(vcpu);
- if (dm_request_for_irq_injection(vcpu)) {
- r = -EINTR;
- vcpu->run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.request_irq_exits;
- }
- kvm_check_async_pf_completion(vcpu);
- if (signal_pending(current)) {
- r = -EINTR;
- vcpu->run->exit_reason = KVM_EXIT_INTR;
- ++vcpu->stat.signal_exits;
- }
- /*这是kvm中的一个调度时机点,即选择新VCPU运行的时机点*/
- if (need_resched()) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_resched(vcpu);
- vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- }
- }
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- vapic_exit(vcpu);
- return r;
-
}
kvm_vcpu_ioctl()-->kvm_arch_vcpu_ioctl_run()-->__vcpu_run()-->vcpu_enter_guest():
- /* 进入Guest模式,最终通过VMLAUNCH指令实现*/
- static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
- {
- int r;
- bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
- vcpu->run->request_interrupt_window;
- bool req_immediate_exit = false;
- /*进入Guest模式前先处理相关挂起的请求*/
- if (vcpu->requests) {
- /*卸载MMU*/
- if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
- kvm_mmu_unload(vcpu);
- /*定时器迁移*/
- if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
- __kvm_migrate_timers(vcpu);
- /*主时钟更新*/
- if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
- kvm_gen_update_masterclock(vcpu->kvm);
- /*全局时钟更新*/
- if (kvm_check_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu))
- kvm_gen_kvmclock_update(vcpu);
- /*虚拟机时钟更新*/
- if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
- r = kvm_guest_time_update(vcpu);
- if (unlikely(r))
- goto out;
- }
- /*更新mmu*/
- if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
- kvm_mmu_sync_roots(vcpu);
- /*刷新TLB*/
- if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
- kvm_x86_ops->tlb_flush(vcpu);
- if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
- vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
- r = 0;
- goto out;
- }
- if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
- vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
- r = 0;
- goto out;
- }
- if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
- vcpu->fpu_active = 0;
- kvm_x86_ops->fpu_deactivate(vcpu);
- }
- if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
- /* Page is swapped out. Do synthetic halt */
- vcpu->arch.apf.halted = true;
- r = 1;
- goto out;
- }
- if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
- record_steal_time(vcpu);
- if (kvm_check_request(KVM_REQ_NMI, vcpu))
- process_nmi(vcpu);
- if (kvm_check_request(KVM_REQ_PMU, vcpu))
- kvm_handle_pmu_event(vcpu);
- if (kvm_check_request(KVM_REQ_PMI, vcpu))
- kvm_deliver_pmi(vcpu);
- if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
- vcpu_scan_ioapic(vcpu);
- }
- // 检查是否有事件请求
- if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
- kvm_apic_accept_events(vcpu);
- if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
- r = 1;
- goto out;
- }
- // 注入阻塞的事件,中断,异常和nmi等
- inject_pending_event(vcpu);
- /* enable NMI/IRQ window open exits if needed */
- /*
- * 使能NMI/IRQ window,参见Intel64 System Programming Guide 25.3节(P366)
- * 当使能了interrupt-window exiting或NMI-window exiting(由VMCS中相关字段控制),
- * 表示在刚进入虚拟机后,就会立刻因为有pending或注入的中断导致VM-exit
- */
- if (vcpu->arch.nmi_pending)
- req_immediate_exit =
- kvm_x86_ops->enable_nmi_window(vcpu) != 0;
- else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
- req_immediate_exit =
- kvm_x86_ops->enable_irq_window(vcpu) != 0;
- if (kvm_lapic_enabled(vcpu)) {
- /*
- * Update architecture specific hints for APIC
- * virtual interrupt delivery.
- */
- if (kvm_x86_ops->hwapic_irr_update)
- kvm_x86_ops->hwapic_irr_update(vcpu,
- kvm_lapic_find_highest_irr(vcpu));
- update_cr8_intercept(vcpu);
- kvm_lapic_sync_to_vapic(vcpu);
- }
- }
- // 装载MMU,待深入分析
- r = kvm_mmu_reload(vcpu);
- if (unlikely(r)) {
- goto cancel_injection;
- }
- preempt_disable();
- // 进入Guest前期准备,架构相关
- kvm_x86_ops->prepare_guest_switch(vcpu);
- if (vcpu->fpu_active)
- kvm_load_guest_fpu(vcpu);
- kvm_load_guest_xcr0(vcpu);
- vcpu->mode = IN_GUEST_MODE;
- /* We should set ->mode before check ->requests,
- * see the comment in make_all_cpus_request.
- */
- smp_mb();
- local_irq_disable();
- /*
- * 如果VCPU处于EXITING_GUEST_MODE或者vcpu->requests(?)或者需要调度或者
- * 有挂起的信号,则放弃
- */
- if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
- || need_resched() || signal_pending(current)) {
- vcpu->mode = OUTSIDE_GUEST_MODE;
- smp_wmb();
- local_irq_enable();
- preempt_enable();
- r = 1;
- goto cancel_injection;
- }
- srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- // req_immediate_exit在前面使能NMI/IRQ window失败时设置,此时需要立即退出,触发重新调度
- if (req_immediate_exit)
- smp_send_reschedule(vcpu->cpu);
- // 计算虚拟机的enter时间
- kvm_guest_enter();
- // 调试相关
- if (unlikely(vcpu->arch.switch_db_regs)) {
- set_debugreg(0, 7);
- set_debugreg(vcpu->arch.eff_db[0], 0);
- set_debugreg(vcpu->arch.eff_db[1], 1);
- set_debugreg(vcpu->arch.eff_db[2], 2);
- set_debugreg(vcpu->arch.eff_db[3], 3);
- }
- trace_kvm_entry(vcpu->vcpu_id);
- // 调用架构相关的run接口(vmx_vcpu_run),进入Guest模式
- kvm_x86_ops->run(vcpu);
-
- // 此处开始,说明已经发生了VM-exit,退出了Guest模式
- /*
- * If the guest has used debug registers, at least dr7
- * will be disabled while returning to the host.
- * If we don't have active breakpoints in the host, we don't
- * care about the messed up debug address registers. But if
- * we have some of them active, restore the old state.
- */
- if (hw_breakpoint_active())
- hw_breakpoint_restore();
- /*记录Guest退出前的TSC时钟*/
- vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
- native_read_tsc());
- // 设置模式
- vcpu->mode = OUTSIDE_GUEST_MODE;
- smp_wmb();
- /* Interrupt is enabled by handle_external_intr() */
- kvm_x86_ops->handle_external_intr(vcpu);
- ++vcpu->stat.exits;
- /*
- * We must have an instruction between local_irq_enable() and
- * kvm_guest_exit(), so the timer interrupt isn't delayed by
- * the interrupt shadow. The stat.exits increment will do nicely.
- * But we need to prevent reordering, hence this barrier():
- */
- barrier();
- // 计算虚拟机的退出时间,其中还开中断了?
- kvm_guest_exit();
-
- preempt_enable();
- vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
- /*
- * Profile KVM exit RIPs:
- */
- // Profile(采样计数,用于性能分析和调优)相关
- if (unlikely(prof_on == KVM_PROFILING)) {
- unsigned long rip = kvm_rip_read(vcpu);
- profile_hit(KVM_PROFILING, (void *)rip);
- }
- if (unlikely(vcpu->arch.tsc_always_catchup))
- kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
- if (vcpu->arch.apic_attention)
- kvm_lapic_sync_from_vapic(vcpu);
- /*
- * 调用vmx_handle_exit()处理虚拟机异常,异常原因及其它关键信息
- * 已经在之前获取。
- */
- r = kvm_x86_ops->handle_exit(vcpu);
- return r;
- cancel_injection:
- kvm_x86_ops->cancel_injection(vcpu);
- if (unlikely(vcpu->arch.apic_attention))
- kvm_lapic_sync_from_vapic(vcpu);
- out:
- return r;
-
}
kvm_vcpu_ioctl()-->kvm_arch_vcpu_ioctl_run()-->__vcpu_run()-->vcpu_enter_guest()-->vmx_vcpu_run():
- /*
- * 运行虚拟机,进入Guest模式,即non root模式
- */
- static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long debugctlmsr;
- /* Record the guest's net vcpu time for enforced NMI injections. */
- // nmi注入?跟nmi_watchdog相关?
- if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
- vmx->entry_time = ktime_get();
- /* Don't enter VMX if guest state is invalid, let the exit handler
- start emulation until we arrive back to a valid state */
- if (vmx->emulation_required)
- return;
- if (vmx->nested.sync_shadow_vmcs) {
- copy_vmcs12_to_shadow(vmx);
- vmx->nested.sync_shadow_vmcs = false;
- }
- // 写入Guest的RSP寄存器信息至VMCS相关位置中
- if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
- // 写入Guest的RIP寄存器信息至VMCS相关位置中
- if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
- vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
- /* When single-stepping over STI and MOV SS, we must clear the
- * corresponding interruptibility bits in the guest state. Otherwise
- * vmentry fails as it then expects bit 14 (BS) in pending debug
- * exceptions being set, but that's not correct for the guest debugging
- * case. */
- // 单步调试时,需要禁用Guest中断
- if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vmx_set_interrupt_shadow(vcpu, 0);
- atomic_switch_perf_msrs(vmx);
- debugctlmsr = get_debugctlmsr();
- // vmx->__launched用于判断当前VCPU是否已经VMLAUNCH了
- vmx->__launched = vmx->loaded_vmcs->launched;
- // 执行VMLAUNCH指令进入Guest模式,虚拟机开始运行
- asm(
- /* Store host registers */
- /*将相关寄存器压栈*/
- "push %%" _ASM_DX "; push %%" _ASM_BP ";"/*BP压栈*/
- /*为guest的rcx寄存器保留个位置,所以这里压两次栈*/
- "push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
- "push %%" _ASM_CX " \n\t"
- /*
- * %c表示用来表示使用立即数替换,但不使用立即数的语法,at&t汇编中表示立即数的语法前面有一个$,而用了%c后,就去掉了这个$。
- * 主要是用在间接寻址的情况,这种情况下如果直接使用$立即数的方式的话,会报语法错误。
- * [host_rsp]是后面输入部分定义的tag,使用%tag方式可以直接引用,%0是后面输入输出部分中的第一个操作数,即vmx,这里是间接寻址
- * %c[host_rsp](%0)整体来看就是vmx(以寄存器ecx传入)中的host_rsp成员。
- * 所以,如下语句的整体含义就是比较当前SP寄存器和vmx->host_rsp的值。
- */
- /*如果当前RSP和vmx->rsp相等,那就不用mov了,否则将当前RSP保存到vmx中*/
- "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
- "je 1f \n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
- /*执行ASM_VMX_VMWRITE_RSP_RDX指令,当出现异常时直接重启,由__ex()实现*/
- __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
- "1: \n\t"
- /* Reload cr2 if changed */
- /*比较当前CR2寄存器和vmx中保存的CR2寄存器内容,如果不相等,就从vmx中重新CR2内容到当前CR2寄存器中*/
- "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
- "mov %%cr2, %%" _ASM_DX " \n\t"
- "cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
- "je 2f \n\t"
- "mov %%" _ASM_AX", %%cr2 \n\t"
- "2: \n\t"
- /* Check if vmlaunch of vmresume is needed */
- /*判断vcpu_vmx->__launched,确认是否需要执行VMLAUNCH*/
- "cmpl $0, %c[launched](%0) \n\t"
- /* Load guest registers. Don't clobber flags. */
- /*加载guest寄存器,其实就是从vmx中加载*/
- "mov %c[rax](%0), %%" _ASM_AX " \n\t"
- "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
- "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
- "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
- "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
- "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
- #ifdef CONFIG_X86_64
- "mov %c[r8](%0), %%r8 \n\t"
- "mov %c[r9](%0), %%r9 \n\t"
- "mov %c[r10](%0), %%r10 \n\t"
- "mov %c[r11](%0), %%r11 \n\t"
- "mov %c[r12](%0), %%r12 \n\t"
- "mov %c[r13](%0), %%r13 \n\t"
- "mov %c[r14](%0), %%r14 \n\t"
- "mov %c[r15](%0), %%r15 \n\t"
- #endif
- "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
- /* Enter guest mode */
- "jne 1f \n\t"
- /* 执行VMLAUNCH指令,进入Guest模式*/
- __ex(ASM_VMX_VMLAUNCH) "\n\t"
- "jmp 2f \n\t"
- /* 如果已经曾经加载过VM了,执行VMRESUME指令,快速重新启动VM*/
- "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
- "2: "
- /* Save guest registers, load host registers, keep flags */
- "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
- "pop %0 \n\t"
- "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
- "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
- __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
- "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
- "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
- "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
- "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
- #ifdef CONFIG_X86_64
- "mov %%r8, %c[r8](%0) \n\t"
- "mov %%r9, %c[r9](%0) \n\t"
- "mov %%r10, %c[r10](%0) \n\t"
- "mov %%r11, %c[r11](%0) \n\t"
- "mov %%r12, %c[r12](%0) \n\t"
- "mov %%r13, %c[r13](%0) \n\t"
- "mov %%r14, %c[r14](%0) \n\t"
- "mov %%r15, %c[r15](%0) \n\t"
- #endif
- "mov %%cr2, %%" _ASM_AX " \n\t"
- "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
- "pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
- "setbe %c[fail](%0) \n\t"
- ".pushsection .rodata \n\t"
- ".global vmx_return \n\t"
- "vmx_return: " _ASM_PTR " 2b \n\t"
- ".popsection"
- : : "c"(vmx), "d"((unsigned long)HOST_RSP),
- [launched]"i"(offsetof(struct vcpu_vmx, __launched)),
- [fail]"i"(offsetof(struct vcpu_vmx, fail)),
- /*[host_rsp]是tag,可以在前面以%[host_rsp]方式引用*/
- [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
- [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
- [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
- [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
- [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
- [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
- [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
- [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
- #ifdef CONFIG_X86_64
- [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
- [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
- [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
- [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
- [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
- [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
- [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
- [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
- #endif
- [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
- [wordsize]"i"(sizeof(ulong))
- : "cc", "memory"/*clobber list,cc表示寄存器,memory表示内存*/
- #ifdef CONFIG_X86_64
- , "rax", "rbx", "rdi", "rsi"
- , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- #else
- , "eax", "ebx", "edi", "esi"
- #endif
- );
- // 运行到这里,说明已经发生了VM-exit,返回到了root模式
- /* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
- if (debugctlmsr)
- update_debugctlmsr(debugctlmsr);
- #ifndef CONFIG_X86_64
- /*
- * The sysexit path does not restore ds/es, so we must set them to
- * a reasonable value ourselves.
- *
- * We can't defer this to vmx_load_host_state() since that function
- * may be executed in interrupt context, which saves and restore segments
- * around it, nullifying its effect.
- */
- /*重新加载ds/es段寄存器,因为VM-exit不会自动加载他们*/
- loadsegment(ds, __USER_DS);
- loadsegment(es, __USER_DS);
- #endif
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
- | (1 << VCPU_EXREG_RFLAGS)
- | (1 << VCPU_EXREG_CPL)
- | (1 << VCPU_EXREG_PDPTR)
- | (1 << VCPU_EXREG_SEGMENTS)
- | (1 << VCPU_EXREG_CR3));
- vcpu->arch.regs_dirty = 0;
- // 从硬件VMCS中读取中断向量表信息
- vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
- vmx->loaded_vmcs->launched = 1;
- // 从硬件VMCS中读取VM-exit原因信息,这些信息是VM-exit过程中由硬件自动写入的
- vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
- trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
- /*处理MCE异常和NMI中断*/
- vmx_complete_atomic_exit(vmx);
- vmx_recover_nmi_blocking(vmx);
- vmx_complete_interrupts(vmx);
-
}