Provide the "struct page" associated with a guest_memfd pfn as an output
from __kvm_gmem_get_pfn() so that KVM guest page fault handlers can
directly put the page instead of having to rely on
kvm_pfn_to_refcounted_page().

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/x86/kvm/mmu/mmu.c   |  2 +-
 arch/x86/kvm/svm/sev.c   | 10 ++++++----
 include/linux/kvm_host.h |  6 ++++--
 virt/kvm/guest_memfd.c   | 19 +++++++++++--------
 4 files changed, 22 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 53555ea5e5bb..146e57c9c86d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4353,7 +4353,7 @@ static int kvm_mmu_faultin_pfn_private(struct kvm_vcpu 
*vcpu,
        }
 
        r = kvm_gmem_get_pfn(vcpu->kvm, fault->slot, fault->gfn, &fault->pfn,
-                            &max_order);
+                            &fault->refcounted_page, &max_order);
        if (r) {
                kvm_mmu_prepare_memory_fault_exit(vcpu, fault);
                return r;
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 62f63fd714df..5c125e4c1096 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -3847,6 +3847,7 @@ static int __sev_snp_update_protected_guest_state(struct 
kvm_vcpu *vcpu)
        if (VALID_PAGE(svm->sev_es.snp_vmsa_gpa)) {
                gfn_t gfn = gpa_to_gfn(svm->sev_es.snp_vmsa_gpa);
                struct kvm_memory_slot *slot;
+               struct page *page;
                kvm_pfn_t pfn;
 
                slot = gfn_to_memslot(vcpu->kvm, gfn);
@@ -3857,7 +3858,7 @@ static int __sev_snp_update_protected_guest_state(struct 
kvm_vcpu *vcpu)
                 * The new VMSA will be private memory guest memory, so
                 * retrieve the PFN from the gmem backend.
                 */
-               if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, NULL))
+               if (kvm_gmem_get_pfn(vcpu->kvm, slot, gfn, &pfn, &page, NULL))
                        return -EINVAL;
 
                /*
@@ -3886,7 +3887,7 @@ static int __sev_snp_update_protected_guest_state(struct 
kvm_vcpu *vcpu)
                 * changes then care should be taken to ensure
                 * svm->sev_es.vmsa is pinned through some other means.
                 */
-               kvm_release_pfn_clean(pfn);
+               kvm_release_page_clean(page);
        }
 
        /*
@@ -4686,6 +4687,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u64 error_code)
        struct kvm_memory_slot *slot;
        struct kvm *kvm = vcpu->kvm;
        int order, rmp_level, ret;
+       struct page *page;
        bool assigned;
        kvm_pfn_t pfn;
        gfn_t gfn;
@@ -4712,7 +4714,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u64 error_code)
                return;
        }
 
-       ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &order);
+       ret = kvm_gmem_get_pfn(kvm, slot, gfn, &pfn, &page, &order);
        if (ret) {
                pr_warn_ratelimited("SEV: Unexpected RMP fault, no backing page 
for private GPA 0x%llx\n",
                                    gpa);
@@ -4770,7 +4772,7 @@ void sev_handle_rmp_fault(struct kvm_vcpu *vcpu, gpa_t 
gpa, u64 error_code)
 out:
        trace_kvm_rmp_fault(vcpu, gpa, pfn, error_code, rmp_level, ret);
 out_no_trace:
-       put_page(pfn_to_page(pfn));
+       kvm_release_page_unused(page);
 }
 
 static bool is_pfn_range_shared(kvm_pfn_t start, kvm_pfn_t end)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e0548ae92659..9d2a97eb30e4 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -2462,11 +2462,13 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, 
gfn_t gfn)
 
 #ifdef CONFIG_KVM_PRIVATE_MEM
 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+                    gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+                    int *max_order);
 #else
 static inline int kvm_gmem_get_pfn(struct kvm *kvm,
                                   struct kvm_memory_slot *slot, gfn_t gfn,
-                                  kvm_pfn_t *pfn, int *max_order)
+                                  kvm_pfn_t *pfn, struct page **page,
+                                  int *max_order)
 {
        KVM_BUG_ON(1, kvm);
        return -EIO;
diff --git a/virt/kvm/guest_memfd.c b/virt/kvm/guest_memfd.c
index 1c509c351261..ad1f9e73cd13 100644
--- a/virt/kvm/guest_memfd.c
+++ b/virt/kvm/guest_memfd.c
@@ -542,12 +542,12 @@ void kvm_gmem_unbind(struct kvm_memory_slot *slot)
 }
 
 static int __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
-                      gfn_t gfn, kvm_pfn_t *pfn, int *max_order, bool prepare)
+                             gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+                             int *max_order, bool prepare)
 {
        pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff;
        struct kvm_gmem *gmem = file->private_data;
        struct folio *folio;
-       struct page *page;
        int r;
 
        if (file != slot->gmem.file) {
@@ -571,9 +571,9 @@ static int __kvm_gmem_get_pfn(struct file *file, struct 
kvm_memory_slot *slot,
                return -EHWPOISON;
        }
 
-       page = folio_file_page(folio, index);
+       *page = folio_file_page(folio, index);
 
-       *pfn = page_to_pfn(page);
+       *pfn = page_to_pfn(*page);
        if (max_order)
                *max_order = 0;
 
@@ -585,7 +585,8 @@ static int __kvm_gmem_get_pfn(struct file *file, struct 
kvm_memory_slot *slot,
 }
 
 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
-                    gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+                    gfn_t gfn, kvm_pfn_t *pfn, struct page **page,
+                    int *max_order)
 {
        struct file *file = kvm_gmem_get_file(slot);
        int r;
@@ -593,7 +594,7 @@ int kvm_gmem_get_pfn(struct kvm *kvm, struct 
kvm_memory_slot *slot,
        if (!file)
                return -EFAULT;
 
-       r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, max_order, true);
+       r = __kvm_gmem_get_pfn(file, slot, gfn, pfn, page, max_order, true);
        fput(file);
        return r;
 }
@@ -604,6 +605,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, 
void __user *src, long
 {
        struct file *file;
        struct kvm_memory_slot *slot;
+       struct page *page;
        void __user *p;
 
        int ret = 0, max_order;
@@ -633,7 +635,8 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, 
void __user *src, long
                        break;
                }
 
-               ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &max_order, 
false);
+               ret = __kvm_gmem_get_pfn(file, slot, gfn, &pfn, &page,
+                                         &max_order, false);
                if (ret)
                        break;
 
@@ -644,7 +647,7 @@ long kvm_gmem_populate(struct kvm *kvm, gfn_t start_gfn, 
void __user *src, long
                p = src ? src + i * PAGE_SIZE : NULL;
                ret = post_populate(kvm, gfn, pfn, p, max_order, opaque);
 
-               put_page(pfn_to_page(pfn));
+               put_page(page);
                if (ret)
                        break;
        }
-- 
2.46.0.rc1.232.g9752f9e123-goog

Reply via email to