When KVM_X86_QUIRK_NESTED_SVM_SHARED_PAT is disabled and nested paging is
enabled in vmcb12, validate g_pat at emulated VMRUN and cause an immediate
VMEXIT with exit code VMEXIT_INVALID if it is invalid, as specified in the
APM, volume 2: "Nested Paging and VMRUN/VMEXIT."

Fixes: 3d6368ef580a ("KVM: SVM: Add VMRUN handler")
Signed-off-by: Jim Mattson <[email protected]>
---
 arch/x86/kvm/svm/nested.c | 23 +++++++++++++++++++----
 arch/x86/kvm/svm/svm.h    |  1 +
 2 files changed, 20 insertions(+), 4 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 32fa8e688c00..cb837842f2c3 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -410,7 +410,8 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu 
*vcpu,
 
 /* Common checks that apply to both L1 and L2 state.  */
 static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu,
-                                  struct vmcb_save_area_cached *save)
+                                  struct vmcb_save_area_cached *save,
+                                  bool check_gpat)
 {
        if (CC(!(save->efer & EFER_SVME)))
                return false;
@@ -445,6 +446,15 @@ static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu,
        if (CC(!kvm_valid_efer(vcpu, save->efer)))
                return false;
 
+       /*
+        * If userspace contrives to get an invalid g_pat into vmcb02 by
+        * disabling KVM_X86_QUIRK_NESTED_SVM_SHARED_PAT in a race with
+        * this check, it should be prepared for the KVM_EXIT_FAIL_ENTRY
+        * that will follow.
+        */
+       if (check_gpat && CC(!kvm_pat_valid(save->g_pat)))
+               return false;
+
        return true;
 }
 
@@ -452,7 +462,8 @@ int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       if (!nested_vmcb_check_save(vcpu, &svm->nested.save) ||
+       if (!nested_vmcb_check_save(vcpu, &svm->nested.save,
+                                   l2_has_separate_pat(svm)) ||
            !nested_vmcb_check_controls(vcpu, &svm->nested.ctl))
                return -EINVAL;
 
@@ -562,6 +573,7 @@ static void __nested_copy_vmcb_save_to_cache(struct 
vmcb_save_area_cached *to,
 
        to->rax = from->rax;
        to->cr2 = from->cr2;
+       to->g_pat = from->g_pat;
 
        svm_copy_lbrs(to, from);
 }
@@ -1971,13 +1983,16 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 
        /*
         * Validate host state saved from before VMRUN (see
-        * nested_svm_check_permissions).
+        * nested_svm_check_permissions). Note that the g_pat field is not
+        * validated, because (a) it may have been clobbered by SMM before
+        * KVM_GET_NESTED_STATE, and (b) it is not loaded at emulated
+        * #VMEXIT.
         */
        __nested_copy_vmcb_save_to_cache(&save_cached, save);
        if (!(save->cr0 & X86_CR0_PG) ||
            !(save->cr0 & X86_CR0_PE) ||
            (save->rflags & X86_EFLAGS_VM) ||
-           !nested_vmcb_check_save(vcpu, &save_cached))
+           !nested_vmcb_check_save(vcpu, &save_cached, false))
                goto out_free;
 
 
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index a1d62c3cc3d6..b43e37b0448c 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -161,6 +161,7 @@ struct vmcb_save_area_cached {
        u64 isst_addr;
        u64 rax;
        u64 cr2;
+       u64 g_pat;
        u64 dbgctl;
        u64 br_from;
        u64 br_to;
-- 
2.53.0.1018.g2bb0e51243-goog


Reply via email to