Convert from accounting pages against locked_vm to accounting them to
pinned_vm. This allows struct vm_account to be used to track the
mm_struct used to charge the pages. A future change also uses this to
track a cgroup for controlling pinned pages.

Signed-off-by: Alistair Popple <apop...@nvidia.com>
Cc: Michael Ellerman <m...@ellerman.id.au>
Cc: Nicholas Piggin <npig...@gmail.com>
Cc: Christophe Leroy <christophe.le...@csgroup.eu>
Cc: Alex Williamson <alex.william...@redhat.com>
Cc: Cornelia Huck <coh...@redhat.com>
Cc: Alexey Kardashevskiy <a...@ozlabs.ru>
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-ker...@vger.kernel.org
Cc: k...@vger.kernel.org
---
 arch/powerpc/mm/book3s64/iommu_api.c | 30 ++++++++++++++++++-----------
 drivers/vfio/vfio_iommu_spapr_tce.c  | 16 ++++++++++-----
 2 files changed, 30 insertions(+), 16 deletions(-)

diff --git a/arch/powerpc/mm/book3s64/iommu_api.c 
b/arch/powerpc/mm/book3s64/iommu_api.c
index 7fcfba1..338b111 100644
--- a/arch/powerpc/mm/book3s64/iommu_api.c
+++ b/arch/powerpc/mm/book3s64/iommu_api.c
@@ -18,6 +18,7 @@
 #include <asm/mmu_context.h>
 #include <asm/pte-walk.h>
 #include <linux/mm_inline.h>
+#include <linux/vm_account.h>
 
 static DEFINE_MUTEX(mem_list_mutex);
 
@@ -30,6 +31,7 @@ struct mm_iommu_table_group_mem_t {
        unsigned long used;
        atomic64_t mapped;
        unsigned int pageshift;
+       struct vm_account vm_account;
        u64 ua;                 /* userspace address */
        u64 entries;            /* number of entries in hpas/hpages[] */
        /*
@@ -62,20 +64,24 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
        unsigned int pageshift;
        unsigned long entry, chunk;
 
-       if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
-               ret = account_locked_vm(mm, entries, true);
-               if (ret)
-                       return ret;
-
-               locked_entries = entries;
-       }
-
        mem = kzalloc(sizeof(*mem), GFP_KERNEL);
        if (!mem) {
                ret = -ENOMEM;
                goto unlock_exit;
        }
 
+       vm_account_init_current(&mem->vm_account);
+       if (dev_hpa == MM_IOMMU_TABLE_INVALID_HPA) {
+               ret = vm_account_pinned(&mem->vm_account, entries);
+               if (ret) {
+                       vm_account_release(&mem->vm_account);
+                       kfree(mem);
+                       return ret;
+               }
+
+               locked_entries = entries;
+       }
+
        if (dev_hpa != MM_IOMMU_TABLE_INVALID_HPA) {
                mem->pageshift = __ffs(dev_hpa | (entries << PAGE_SHIFT));
                mem->dev_hpa = dev_hpa;
@@ -175,10 +181,11 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, 
unsigned long ua,
        unpin_user_pages(mem->hpages, pinned);
 
        vfree(mem->hpas);
-       kfree(mem);
 
 unlock_exit:
-       account_locked_vm(mm, locked_entries, false);
+       vm_unaccount_pinned(&mem->vm_account, locked_entries);
+       vm_account_release(&mem->vm_account);
+       kfree(mem);
 
        return ret;
 }
@@ -229,6 +236,7 @@ static void mm_iommu_do_free(struct 
mm_iommu_table_group_mem_t *mem)
 
        mm_iommu_unpin(mem);
        vfree(mem->hpas);
+       vm_account_release(&mem->vm_account);
        kfree(mem);
 }
 
@@ -279,7 +287,7 @@ long mm_iommu_put(struct mm_struct *mm, struct 
mm_iommu_table_group_mem_t *mem)
 unlock_exit:
        mutex_unlock(&mem_list_mutex);
 
-       account_locked_vm(mm, unlock_entries, false);
+       vm_unaccount_pinned(&mem->vm_account, unlock_entries);
 
        return ret;
 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c 
b/drivers/vfio/vfio_iommu_spapr_tce.c
index 60a50ce..454ccc4 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -21,6 +21,7 @@
 #include <linux/sched/mm.h>
 #include <linux/sched/signal.h>
 #include <linux/mm.h>
+#include <linux/vm_account.h>
 #include "vfio.h"
 
 #include <asm/iommu.h>
@@ -67,6 +68,7 @@ struct tce_container {
        bool def_window_pending;
        unsigned long locked_pages;
        struct mm_struct *mm;
+       struct vm_account vm_account;
        struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
        struct list_head group_list;
        struct list_head prereg_list;
@@ -82,6 +84,7 @@ static long tce_iommu_mm_set(struct tce_container *container)
        BUG_ON(!current->mm);
        container->mm = current->mm;
        mmgrab(container->mm);
+       vm_account_init_current(&container->vm_account);
 
        return 0;
 }
@@ -291,7 +294,7 @@ static int tce_iommu_enable(struct tce_container *container)
                return ret;
 
        locked = table_group->tce32_size >> PAGE_SHIFT;
-       ret = account_locked_vm(container->mm, locked, true);
+       ret = vm_account_pinned(&container->vm_accounnt, locked);
        if (ret)
                return ret;
 
@@ -310,7 +313,7 @@ static void tce_iommu_disable(struct tce_container 
*container)
        container->enabled = false;
 
        BUG_ON(!container->mm);
-       account_locked_vm(container->mm, container->locked_pages, false);
+       vm_account_pinned(&container->vm_account, container->locked_pages);
 }
 
 static void *tce_iommu_open(unsigned long arg)
@@ -372,8 +375,10 @@ static void tce_iommu_release(void *iommu_data)
                WARN_ON(tce_iommu_prereg_free(container, tcemem));
 
        tce_iommu_disable(container);
-       if (container->mm)
+       if (container->mm) {
                mmdrop(container->mm);
+               vm_account_release(&container->vm_account);
+       }
        mutex_destroy(&container->lock);
 
        kfree(container);
@@ -619,7 +624,8 @@ static long tce_iommu_create_table(struct tce_container 
*container,
        if (!table_size)
                return -EINVAL;
 
-       ret = account_locked_vm(container->mm, table_size >> PAGE_SHIFT, true);
+       ret = vm_account_pinned(&container->vm_account,
+                               table_size >> PAGE_SHIFT);
        if (ret)
                return ret;
 
@@ -638,7 +644,7 @@ static void tce_iommu_free_table(struct tce_container 
*container,
        unsigned long pages = tbl->it_allocated_size >> PAGE_SHIFT;
 
        iommu_tce_table_put(tbl);
-       account_locked_vm(container->mm, pages, false);
+       vm_unaccount_pinned(&container->vm_account, pages);
 }
 
 static long tce_iommu_create_window(struct tce_container *container,
-- 
git-series 0.9.1

Reply via email to