On Fri, Jul 17, 2020 at 01:00:26AM -0700, Ram Pai wrote:
> @@ -812,7 +842,7 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
> unsigned long gpa,
>       struct vm_area_struct *vma;
>       int srcu_idx;
>       unsigned long gfn = gpa >> page_shift;
> -     int ret;
> +     int ret, repeat_count = REPEAT_COUNT;
>  
>       if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
>               return H_UNSUPPORTED;
> @@ -826,34 +856,44 @@ unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, 
> unsigned long gpa,
>       if (flags & H_PAGE_IN_SHARED)
>               return kvmppc_share_page(kvm, gpa, page_shift);
>  
> -     ret = H_PARAMETER;
>       srcu_idx = srcu_read_lock(&kvm->srcu);
> -     mmap_read_lock(kvm->mm);
>  
> -     start = gfn_to_hva(kvm, gfn);
> -     if (kvm_is_error_hva(start))
> -             goto out;
> -
> -     mutex_lock(&kvm->arch.uvmem_lock);
>       /* Fail the page-in request of an already paged-in page */
> -     if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
> -             goto out_unlock;
> +     mutex_lock(&kvm->arch.uvmem_lock);
> +     ret = kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL);
> +     mutex_unlock(&kvm->arch.uvmem_lock);

Same comment as for the prev patch. I don't think you can release
the lock here.

> +     if (ret) {
> +             srcu_read_unlock(&kvm->srcu, srcu_idx);
> +             return H_PARAMETER;
> +     }
>  
> -     end = start + (1UL << page_shift);
> -     vma = find_vma_intersection(kvm->mm, start, end);
> -     if (!vma || vma->vm_start > start || vma->vm_end < end)
> -             goto out_unlock;
> +     do {
> +             ret = H_PARAMETER;
> +             mmap_read_lock(kvm->mm);
>  
> -     if (kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, page_shift,
> -                             true))
> -             goto out_unlock;
> +             start = gfn_to_hva(kvm, gfn);
> +             if (kvm_is_error_hva(start)) {
> +                     mmap_read_unlock(kvm->mm);
> +                     break;
> +             }
>  
> -     ret = H_SUCCESS;
> +             end = start + (1UL << page_shift);
> +             vma = find_vma_intersection(kvm->mm, start, end);
> +             if (!vma || vma->vm_start > start || vma->vm_end < end) {
> +                     mmap_read_unlock(kvm->mm);
> +                     break;
> +             }
> +
> +             mutex_lock(&kvm->arch.uvmem_lock);
> +             ret = kvmppc_svm_migrate_page(vma, start, end, gpa, kvm, 
> page_shift, true);
> +             mutex_unlock(&kvm->arch.uvmem_lock);
> +
> +             mmap_read_unlock(kvm->mm);
> +     } while (ret == -2 && repeat_count--);
> +
> +     if (ret == -2)
> +             ret = H_BUSY;
>  
> -out_unlock:
> -     mutex_unlock(&kvm->arch.uvmem_lock);
> -out:
> -     mmap_read_unlock(kvm->mm);
>       srcu_read_unlock(&kvm->srcu, srcu_idx);
>       return ret;
>  }
> -- 
> 1.8.3.1

Reply via email to