From: Sean Christopherson <sean.j.christopher...@intel.com>

When adding pages prior to boot, TDX will need the resulting host pfn so
that it can be passed to TDADDPAGE (TDX-SEAM always works with physical
addresses as it has its own page tables).  Start plumbing pfn back up
the page fault stack.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index e4e0c883b52d..474173bceb54 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3916,14 +3916,14 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool 
prefault, gfn_t gfn,
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
-                            bool prefault, int max_level, bool is_tdp)
+                            bool prefault, int max_level, bool is_tdp,
+                            kvm_pfn_t *pfn)
 {
        bool write = error_code & PFERR_WRITE_MASK;
        bool map_writable;
 
        gfn_t gfn = vcpu_gpa_to_gfn_unalias(vcpu, gpa);
        unsigned long mmu_seq;
-       kvm_pfn_t pfn;
        int r;
 
        if (page_fault_handle_page_track(vcpu, error_code, gfn))
@@ -3942,10 +3942,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, write, &map_writable))
+       if (try_async_pf(vcpu, prefault, gfn, gpa, pfn, write, &map_writable))
                return RET_PF_RETRY;
 
-       if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
+       if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, *pfn, ACC_ALL, &r))
                return r;
 
        r = RET_PF_RETRY;
@@ -3958,25 +3958,27 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
 
        if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
                r = kvm_tdp_mmu_map(vcpu, gpa, error_code, map_writable, 
max_level,
-                                   pfn, prefault);
+                                   *pfn, prefault);
        else
-               r = __direct_map(vcpu, gpa, error_code, map_writable, 
max_level, pfn,
-                                prefault, is_tdp);
+               r = __direct_map(vcpu, gpa, error_code, map_writable, max_level,
+                                *pfn, prefault, is_tdp);
 
 out_unlock:
        spin_unlock(&vcpu->kvm->mmu_lock);
-       kvm_release_pfn_clean(pfn);
+       kvm_release_pfn_clean(*pfn);
        return r;
 }
 
 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa,
                                u32 error_code, bool prefault)
 {
+       kvm_pfn_t pfn;
+
        pgprintk("%s: gva %lx error %x\n", __func__, gpa, error_code);
 
        /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
        return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
-                                PG_LEVEL_2M, false);
+                                PG_LEVEL_2M, false, &pfn);
 }
 
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
@@ -4015,6 +4017,7 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
                       bool prefault)
 {
+       kvm_pfn_t pfn;
        int max_level;
 
        for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
@@ -4028,7 +4031,7 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, 
u32 error_code,
        }
 
        return direct_page_fault(vcpu, gpa, error_code, prefault,
-                                max_level, true);
+                                max_level, true, &pfn);
 }
 
 static void nonpaging_init_context(struct kvm_vcpu *vcpu,
-- 
2.17.1

Reply via email to