From: Brijesh Singh <brijesh.si...@amd.com>

When a guest causes a page fault which requires emulation, the
vcpu->arch.gpa_available flag is set to indicate that cr2 contains a
valid GPA.

Currently, emulator_read_write_onepage() makes use of gpa_available flag
to avoid a guest page walk for a known MMIO regions. Lets not limit
the gpa_available optimization to just MMIO region. The patch extends
the check to avoid page walk whenever gpa_available flag is set.

Signed-off-by: Brijesh Singh <brijesh.si...@amd.com>
[Fix EPT=0 according to Wanpeng Li's fix, plus ensure VMX also uses the
 new code. - Paolo]
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
        v1->v2: standardize on "nGPA" moniker, move gpa_available
        assignment to x86.c

 arch/x86/include/asm/kvm_host.h |  3 ++-
 arch/x86/kvm/mmu.c              |  6 ++++++
 arch/x86/kvm/svm.c              |  2 --
 arch/x86/kvm/vmx.c              |  4 ----
 arch/x86/kvm/x86.c              | 20 +++++++-------------
 5 files changed, 15 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 9e4862e0e978..6db0ed9cf59e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -685,8 +685,9 @@ struct kvm_vcpu_arch {
        int pending_ioapic_eoi;
        int pending_external_vector;
 
-       /* GPA available (AMD only) */
+       /* GPA available */
        bool gpa_available;
+       gpa_t gpa_val;
 
        /* be preempted when it's in kernel-mode(cpl=0) */
        bool preempted_in_kernel;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f7598883920a..a2c592b14617 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4843,6 +4843,12 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, 
u64 error_code,
        enum emulation_result er;
        bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
 
+       /* With shadow page tables, fault_address contains a GVA or nGPA.  */
+       if (vcpu->arch.mmu.direct_map) {
+               vcpu->arch.gpa_available = true;
+               vcpu->arch.gpa_val = cr2;
+       }
+
        if (unlikely(error_code & PFERR_RSVD_MASK)) {
                r = handle_mmio_page_fault(vcpu, cr2, direct);
                if (r == RET_MMIO_PF_EMULATE) {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1fa9ee5660f4..a603d0c14a5a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -4236,8 +4236,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
 
        trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
 
-       vcpu->arch.gpa_available = (exit_code == SVM_EXIT_NPF);
-
        if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
                vcpu->arch.cr0 = svm->vmcb->save.cr0;
        if (npt_enabled)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 45fb0ea78ee8..e2c8b33c35d1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6393,9 +6393,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
        error_code |= (exit_qualification & 0x100) != 0 ?
               PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK;
 
-       vcpu->arch.gpa_available = true;
        vcpu->arch.exit_qualification = exit_qualification;
-
        return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
 }
 
@@ -6410,7 +6408,6 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
                return kvm_skip_emulated_instruction(vcpu);
        }
 
-       vcpu->arch.gpa_available = true;
        ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
        if (ret >= 0)
                return ret;
@@ -8644,7 +8641,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        u32 vectoring_info = vmx->idt_vectoring_info;
 
        trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
-       vcpu->arch.gpa_available = false;
 
        /*
         * Flush logged GPAs PML buffer, this will make dirty_bitmap more
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e10eda86bc7b..3f34d5f3db8d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4657,25 +4657,18 @@ static int emulator_read_write_onepage(unsigned long 
addr, void *val,
         */
        if (vcpu->arch.gpa_available &&
            emulator_can_use_gpa(ctxt) &&
-           vcpu_is_mmio_gpa(vcpu, addr, exception->address, write) &&
-           (addr & ~PAGE_MASK) == (exception->address & ~PAGE_MASK)) {
-               gpa = exception->address;
-               goto mmio;
+           (addr & ~PAGE_MASK) == (vcpu->arch.gpa_val & ~PAGE_MASK)) {
+               gpa = vcpu->arch.gpa_val;
+               ret = vcpu_is_mmio_gpa(vcpu, addr, gpa, write);
+       } else {
+               ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
        }
 
-       ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
-
        if (ret < 0)
                return X86EMUL_PROPAGATE_FAULT;
-
-       /* For APIC access vmexit */
-       if (ret)
-               goto mmio;
-
-       if (ops->read_write_emulate(vcpu, gpa, val, bytes))
+       if (!ret && ops->read_write_emulate(vcpu, gpa, val, bytes))
                return X86EMUL_CONTINUE;
 
-mmio:
        /*
         * Is this MMIO handled locally?
         */
@@ -7002,6 +6995,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (vcpu->arch.apic_attention)
                kvm_lapic_sync_from_vapic(vcpu);
 
+       vcpu->arch.gpa_available = false;
        r = kvm_x86_ops->handle_exit(vcpu);
        return r;
 
-- 
1.8.3.1


Reply via email to