Convert arm64 to use __kvm_faultin_pfn()+kvm_release_faultin_page().
Three down, six to go.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/arm64/kvm/mmu.c | 15 ++++++---------
 1 file changed, 6 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ce13c3d884d5..756fc856ab44 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1439,6 +1439,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        long vma_pagesize, fault_granule;
        enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
        struct kvm_pgtable *pgt;
+       struct page *page;
 
        if (fault_is_perm)
                fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
@@ -1553,7 +1554,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
 
        /*
         * Read mmu_invalidate_seq so that KVM can detect if the results of
-        * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to
+        * vma_lookup() or __kvm_faultin_pfn() become stale prior to
         * acquiring kvm->mmu_lock.
         *
         * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
@@ -1562,8 +1563,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        mmu_seq = vcpu->kvm->mmu_invalidate_seq;
        mmap_read_unlock(current->mm);
 
-       pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
-                                  write_fault, &writable);
+       pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
+                               &writable, &page);
        if (pfn == KVM_PFN_ERR_HWPOISON) {
                kvm_send_hwpoison_signal(hva, vma_shift);
                return 0;
@@ -1576,7 +1577,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
                 * If the page was identified as device early by looking at
                 * the VMA flags, vma_pagesize is already representing the
                 * largest quantity we can map.  If instead it was mapped
-                * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE
+                * via __kvm_faultin_pfn(), vma_pagesize is set to PAGE_SIZE
                 * and must not be upgraded.
                 *
                 * In both cases, we don't let transparent_hugepage_adjust()
@@ -1685,11 +1686,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, 
phys_addr_t fault_ipa,
        }
 
 out_unlock:
-       if (writable && !ret)
-               kvm_set_pfn_dirty(pfn);
-       else
-               kvm_release_pfn_clean(pfn);
-
+       kvm_release_faultin_page(kvm, page, !!ret, writable);
        read_unlock(&kvm->mmu_lock);
 
        /* Mark the page dirty only if the fault is handled successfully */
-- 
2.46.0.rc1.232.g9752f9e123-goog

Reply via email to