There are helpers to account locked pages in locked_vm counter, this
reuses these helpers in VFIO-SPAPR-IOMMU driver.

While we are here, update the comment explaining why RLIMIT_MEMLOCK
might be required to be bigger than entire guest RAM.

Signed-off-by: Alexey Kardashevskiy <a...@ozlabs.ru>
---
Changes:
v4:
* added comment explaining how big the ulimit should be
* used try_increment_locked_vm/decrement_locked_vm
---
 drivers/vfio/vfio_iommu_spapr_tce.c | 33 +++++++++++++++------------------
 1 file changed, 15 insertions(+), 18 deletions(-)

diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c 
b/drivers/vfio/vfio_iommu_spapr_tce.c
index d9845af..6ed0fc3 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -58,7 +58,6 @@ static void tce_iommu_take_ownership_notify(struct 
spapr_tce_iommu_group *data,
 static int tce_iommu_enable(struct tce_container *container)
 {
        int ret = 0;
-       unsigned long locked, lock_limit, npages;
        struct iommu_table *tbl;
        struct spapr_tce_iommu_group *data;
 
@@ -92,24 +91,24 @@ static int tce_iommu_enable(struct tce_container *container)
         * Also we don't have a nice way to fail on H_PUT_TCE due to ulimits,
         * that would effectively kill the guest at random points, much better
         * enforcing the limit based on the max that the guest can map.
+        *
+        * Unfortunately at the moment it counts whole tables, no matter how
+        * much memory the guest has. I.e. for 4GB guest and 4 IOMMU groups
+        * each with 2GB DMA window, 8GB will be counted here. The reason for
+        * this is that we cannot tell here the amount of RAM used by the guest
+        * as this information is only available from KVM and VFIO is
+        * KVM agnostic.
         */
        tbl = data->ops->get_table(data, TCE_DEFAULT_WINDOW);
        if (!tbl)
                return -ENXIO;
 
-       down_write(&current->mm->mmap_sem);
-       npages = (tbl->it_size << IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
-       locked = current->mm->locked_vm + npages;
-       lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
-       if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
-               pr_warn("RLIMIT_MEMLOCK (%ld) exceeded\n",
-                               rlimit(RLIMIT_MEMLOCK));
-               ret = -ENOMEM;
-       } else {
-               current->mm->locked_vm += npages;
-               container->enabled = true;
-       }
-       up_write(&current->mm->mmap_sem);
+       ret = try_increment_locked_vm((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >>
+                       PAGE_SHIFT);
+       if (ret)
+               return ret;
+
+       container->enabled = true;
 
        return ret;
 }
@@ -135,10 +134,8 @@ static void tce_iommu_disable(struct tce_container 
*container)
        if (!tbl)
                return;
 
-       down_write(&current->mm->mmap_sem);
-       current->mm->locked_vm -= (tbl->it_size <<
-                       IOMMU_PAGE_SHIFT_4K) >> PAGE_SHIFT;
-       up_write(&current->mm->mmap_sem);
+       decrement_locked_vm((tbl->it_size << IOMMU_PAGE_SHIFT_4K) >>
+                       PAGE_SHIFT);
 }
 
 static void *tce_iommu_open(unsigned long arg)
-- 
2.0.0

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to