The page requested for page-in; sometimes, can have transient
references, and hence cannot migrate immediately. Retry a few times
before returning error.

H_SVM_PAGE_IN interface is enhanced to return H_BUSY if the page is
not in a migratable state.

Cc: Paul Mackerras <pau...@ozlabs.org>
Cc: Benjamin Herrenschmidt <b...@kernel.crashing.org>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Bharata B Rao <bhar...@linux.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
Cc: Sukadev Bhattiprolu <suka...@linux.vnet.ibm.com>
Cc: Laurent Dufour <lduf...@linux.ibm.com>
Cc: Thiago Jung Bauermann <bauer...@linux.ibm.com>
Cc: David Gibson <da...@gibson.dropbear.id.au>
Cc: Claudio Carvalho <cclau...@linux.ibm.com>
Cc: kvm-...@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org

Signed-off-by: Ram Pai <linux...@us.ibm.com>
---
 Documentation/powerpc/ultravisor.rst |  1 +
 arch/powerpc/kvm/book3s_hv_uvmem.c   | 54 +++++++++++++++++++++---------------
 2 files changed, 33 insertions(+), 22 deletions(-)

diff --git a/Documentation/powerpc/ultravisor.rst 
b/Documentation/powerpc/ultravisor.rst
index d98fc85..638d1a7 100644
--- a/Documentation/powerpc/ultravisor.rst
+++ b/Documentation/powerpc/ultravisor.rst
@@ -1034,6 +1034,7 @@ Return values
        * H_PARAMETER   if ``guest_pa`` is invalid.
        * H_P2          if ``flags`` is invalid.
        * H_P3          if ``order`` of page is invalid.
+       * H_BUSY        if ``page`` is not in a state to pagein
 
 Description
 ~~~~~~~~~~~
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c 
b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 12ed52a..c9bdef6 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -843,7 +843,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
unsigned long gpa,
        struct vm_area_struct *vma;
        int srcu_idx;
        unsigned long gfn = gpa >> page_shift;
-       int ret;
+       int ret, repeat_count = REPEAT_COUNT;
 
        if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
                return H_UNSUPPORTED;
@@ -857,34 +857,44 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
unsigned long gpa,
        if (flags & H_PAGE_IN_SHARED)
                return kvmppc_share_page(kvm, gpa, page_shift);
 
-       ret = H_PARAMETER;
        srcu_idx = srcu_read_lock(&kvm->srcu);
-       down_write(&kvm->mm->mmap_sem);
 
-       start = gfn_to_hva(kvm, gfn);
-       if (kvm_is_error_hva(start))
-               goto out;
-
-       mutex_lock(&kvm->arch.uvmem_lock);
        /* Fail the page-in request of an already paged-in page */
-       if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
-               goto out_unlock;
+       mutex_lock(&kvm->arch.uvmem_lock);
+       ret = kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL);
+       mutex_unlock(&kvm->arch.uvmem_lock);
+       if (ret) {
+               srcu_read_unlock(&kvm->srcu, srcu_idx);
+               return H_PARAMETER;
+       }
 
-       end = start + (1UL << page_shift);
-       vma = find_vma_intersection(kvm->mm, start, end);
-       if (!vma || vma->vm_start > start || vma->vm_end < end)
-               goto out_unlock;
+       do {
+               ret = H_PARAMETER;
+               down_write(&kvm->mm->mmap_sem);
 
-       if (kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift,
-                               true))
-               goto out_unlock;
+               start = gfn_to_hva(kvm, gfn);
+               if (kvm_is_error_hva(start)) {
+                       up_write(&kvm->mm->mmap_sem);
+                       break;
+               }
 
-       ret = H_SUCCESS;
+               end = start + (1UL << page_shift);
+               vma = find_vma_intersection(kvm->mm, start, end);
+               if (!vma || vma->vm_start > start || vma->vm_end < end) {
+                       up_write(&kvm->mm->mmap_sem);
+                       break;
+               }
+
+               mutex_lock(&kvm->arch.uvmem_lock);
+               ret = kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, 
page_shift, true);
+               mutex_unlock(&kvm->arch.uvmem_lock);
+
+               up_write(&kvm->mm->mmap_sem);
+       } while (ret == -2 && repeat_count--);
+
+       if (ret == -2)
+               ret = H_BUSY;
 
-out_unlock:
-       mutex_unlock(&kvm->arch.uvmem_lock);
-out:
-       up_write(&kvm->mm->mmap_sem);
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return ret;
 }
-- 
1.8.3.1

Reply via email to