This patch implements VMRUN. VMRUN enters a virtual CPU and runs that
in the same context as the normal guest CPU would run.
So basically it is implemented the same way, a normal CPU would do
it.
We also prepare all intercepts that get OR'ed with the original
intercepts, as we do not allow a level 2 guest to be intercepted less
than the first level guest.
v2 implements the following improvements:
- fixes the CPL check
- does not allocate iopm when not used
- remembers the host's IF in the HIF bit in the hflags
v3:
- make use of the new permission checking
- add support for V_INTR_MASKING_MASK
Signed-off-by: Alexander Graf <[EMAIL PROTECTED]>
---
arch/x86/kvm/kvm_svm.h | 9 ++
arch/x86/kvm/svm.c | 198 ++++++++++++++++++++++++++++++++++
+++++++++-
include/asm-x86/kvm_host.h | 2 +
3 files changed, 207 insertions(+), 2 deletions(-)
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index 76ad107..2afe0ce 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -43,6 +43,15 @@ struct vcpu_svm {
u32 *msrpm;
u64 nested_hsave;
+ u64 nested_vmcb;
+
+ /* These are the merged vectors */
+ u32 *nested_msrpm;
+ u32 *nested_iopm;
+
+ /* gpa pointers to the real vectors */
+ u64 nested_vmcb_msrpm;
+ u64 nested_vmcb_iopm;
};
#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0aa22e5..3601e75 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -51,6 +51,9 @@ MODULE_LICENSE("GPL");
/* Turn on to get debugging output*/
/* #define NESTED_DEBUG */
+/* Not needed until device passthrough */
+/* #define NESTED_KVM_MERGE_IOPM */
+
#ifdef NESTED_DEBUG
#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
#else
@@ -76,6 +79,11 @@ static inline struct vcpu_svm *to_svm(struct
kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_svm, vcpu);
}
+static inline bool is_nested(struct vcpu_svm *svm)
+{
+ return svm->nested_vmcb;
+}
+
static unsigned long iopm_base;
struct kvm_ldttss_desc {
@@ -614,6 +622,7 @@ static void init_vmcb(struct vcpu_svm *svm)
force_new_asid(&svm->vcpu);
svm->nested_hsave = 0;
+ svm->nested_vmcb = 0;
svm->vcpu.arch.hflags = HF_GIF_MASK;
}
@@ -639,6 +648,10 @@ static struct kvm_vcpu *svm_create_vcpu(struct
kvm *kvm, unsigned int id)
struct vcpu_svm *svm;
struct page *page;
struct page *msrpm_pages;
+ struct page *nested_msrpm_pages;
+#ifdef NESTED_KVM_MERGE_IOPM
+ struct page *nested_iopm_pages;
+#endif
int err;
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -661,9 +674,25 @@ static struct kvm_vcpu *svm_create_vcpu(struct
kvm *kvm, unsigned int id)
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
goto uninit;
+
+ nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+ if (!nested_msrpm_pages)
+ goto uninit;
+
+#ifdef NESTED_KVM_MERGE_IOPM
+ nested_iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
+ if (!nested_iopm_pages)
+ goto uninit;
+#endif
+
svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
+ svm->nested_msrpm = page_address(nested_msrpm_pages);
+#ifdef NESTED_KVM_MERGE_IOPM
+ svm->nested_iopm = page_address(nested_iopm_pages);
+#endif
+
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
@@ -693,6 +722,10 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
+ __free_pages(virt_to_page(svm->nested_msrpm),
MSRPM_ALLOC_ORDER);
+#ifdef NESTED_KVM_MERGE_IOPM
+ __free_pages(virt_to_page(svm->nested_iopm), IOPM_ALLOC_ORDER);
+#endif
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
@@ -1230,6 +1263,138 @@ static int nested_svm_do(struct vcpu_svm
*svm,
return retval;
}
+
+static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
+ void *arg2, void *opaque)
+{
+ int i;
+ u32 *nested_msrpm = (u32*)arg1;
+ for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
+ svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
+ svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
+
+ return 0;
+}
+
+#ifdef NESTED_KVM_MERGE_IOPM
+static int nested_svm_vmrun_iopm(struct vcpu_svm *svm, void *arg1,
+ void *arg2, void *opaque)
+{
+ int i;
+ u32 *nested_iopm = (u32*)arg1;
+ u32 *iopm = (u32*)__va(iopm_base);
+ for (i=0; i< PAGE_SIZE * (1 << IOPM_ALLOC_ORDER) / 4; i++)
+ svm->nested_iopm[i] = iopm[i] | nested_iopm[i];
+ svm->vmcb->control.iopm_base_pa = __pa(svm->nested_iopm);
+
+ return 0;
+}
+#endif
+
+static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
+ void *arg2, void *opaque)
+{
+ struct vmcb *nested_vmcb = (struct vmcb *)arg1;
+ struct vmcb *hsave = (struct vmcb *)arg2;
+
+ /* nested_vmcb is our indicator if nested SVM is activated */
+ svm->nested_vmcb = svm->vmcb->save.rax;
+
+ /* Clear internal status */
+ svm->vcpu.arch.exception.pending = false;
+
+ /* Save the old vmcb, so we don't need to pick what we save, but
+ can restore everything when a VMEXIT occurs */
+ memcpy(hsave, svm->vmcb, sizeof(struct vmcb));