Given that in kvm_create_vm() there is:
kvm->mm = current->mm;

And that on every kvm_*_ioctl we have:
if (kvm->mm != current->mm)
        return -EIO;

I see no reason to keep using current->mm instead of kvm->mm.

By doing so, we would reduce the use of 'global' variables on code, relying
more in the contents of kvm struct.

Signed-off-by: Leonardo Bras <leona...@linux.ibm.com>
---
 arch/powerpc/kvm/book3s_64_mmu_hv.c | 10 +++++-----
 arch/powerpc/kvm/book3s_64_vio.c    | 10 ++++++----
 arch/powerpc/kvm/book3s_hv.c        | 10 +++++-----
 3 files changed, 16 insertions(+), 14 deletions(-)

diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 9a75f0e1933b..43b3cdf011bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -296,7 +296,7 @@ static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, 
unsigned long flags,
        /* Protect linux PTE lookup from page table destruction */
        rcu_read_lock_sched();  /* this disables preemption too */
        ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
-                               current->mm->pgd, false, pte_idx_ret);
+                               kvm->mm->pgd, false, pte_idx_ret);
        rcu_read_unlock_sched();
        if (ret == H_TOO_HARD) {
                /* this can't happen */
@@ -592,8 +592,8 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
        npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
        if (npages < 1) {
                /* Check if it's an I/O mapping */
-               down_read(&current->mm->mmap_sem);
-               vma = find_vma(current->mm, hva);
+               down_read(&kvm->mm->mmap_sem);
+               vma = find_vma(kvm->mm, hva);
                if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
                    (vma->vm_flags & VM_PFNMAP)) {
                        pfn = vma->vm_pgoff +
@@ -602,7 +602,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
                        is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
                        write_ok = vma->vm_flags & VM_WRITE;
                }
-               up_read(&current->mm->mmap_sem);
+               up_read(&kvm->mm->mmap_sem);
                if (!pfn)
                        goto out_put;
        } else {
@@ -621,7 +621,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
                         * hugepage split and collapse.
                         */
                        local_irq_save(flags);
-                       ptep = find_current_mm_pte(current->mm->pgd,
+                       ptep = find_current_mm_pte(kvm->mm->pgd,
                                                   hva, NULL, NULL);
                        if (ptep) {
                                pte = kvmppc_read_update_linux_pte(ptep, 1);
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index a402ead833b6..308aa3a639a5 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -253,10 +253,11 @@ static int kvm_spapr_tce_release(struct inode *inode, 
struct file *filp)
                }
        }
 
+       account_locked_vm(kvm->mm,
+               kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
+
        kvm_put_kvm(stt->kvm);
 
-       account_locked_vm(current->mm,
-               kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
        call_rcu(&stt->rcu, release_spapr_tce_table);
 
        return 0;
@@ -272,6 +273,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
 {
        struct kvmppc_spapr_tce_table *stt = NULL;
        struct kvmppc_spapr_tce_table *siter;
+       struct mm_struct *mm = kvm->mm;
        unsigned long npages, size = args->size;
        int ret = -ENOMEM;
 
@@ -280,7 +282,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
                return -EINVAL;
 
        npages = kvmppc_tce_pages(size);
-       ret = account_locked_vm(current->mm, kvmppc_stt_pages(npages), true);
+       ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
        if (ret)
                return ret;
 
@@ -325,7 +327,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
        kvm_put_kvm(kvm);
        kfree(stt);
  fail_acct:
-       account_locked_vm(current->mm, kvmppc_stt_pages(npages), false);
+       account_locked_vm(mm, kvmppc_stt_pages(npages), false);
        return ret;
 }
 
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 709cf1fd4cf4..679008c511e4 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4280,7 +4280,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct 
kvm_vcpu *vcpu)
        user_vrsave = mfspr(SPRN_VRSAVE);
 
        vcpu->arch.wqp = &vcpu->arch.vcore->wq;
-       vcpu->arch.pgdir = current->mm->pgd;
+       vcpu->arch.pgdir = kvm->mm->pgd;
        vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
        do {
@@ -4612,14 +4612,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu 
*vcpu)
 
        /* Look up the VMA for the start of this memory slot */
        hva = memslot->userspace_addr;
-       down_read(&current->mm->mmap_sem);
-       vma = find_vma(current->mm, hva);
+       down_read(&kvm->mm->mmap_sem);
+       vma = find_vma(kvm->mm, hva);
        if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
                goto up_out;
 
        psize = vma_kernel_pagesize(vma);
 
-       up_read(&current->mm->mmap_sem);
+       up_read(&kvm->mm->mmap_sem);
 
        /* We can handle 4k, 64k or 16M pages in the VRMA */
        if (psize >= 0x1000000)
@@ -4652,7 +4652,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
        return err;
 
  up_out:
-       up_read(&current->mm->mmap_sem);
+       up_read(&kvm->mm->mmap_sem);
        goto out_srcu;
 }
 
-- 
2.23.0

Reply via email to