Add an operation to populate a part of a drm_mm with device
private memory.

Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/drm_gpusvm.c  |  7 ++-----
 drivers/gpu/drm/drm_pagemap.c | 32 ++++++++++++++++++++++++++++++++
 include/drm/drm_pagemap.h     | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 66 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index 4fade7018507..d84e27283768 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -175,11 +175,8 @@
  *             }
  *
  *             if (driver_migration_policy(range)) {
- *                     mmap_read_lock(mm);
- *                     devmem = driver_alloc_devmem();
- *                     err = drm_pagemap_migrate_to_devmem(devmem, gpusvm->mm, 
gpuva_start,
- *                                                          gpuva_end, 
driver_pgmap_owner());
- *                      mmap_read_unlock(mm);
+ *                     err = 
drm_pagemap_populate_mm(driver_choose_drm_pagemap(),
+ *                                                    gpuva_start, gpuva_end, 
gpusvm->mm);
  *                     if (err)        // CPU mappings may have changed
  *                             goto retry;
  *             }
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index c46bb4384444..27e3f90cf49a 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -6,6 +6,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/migrate.h>
 #include <linux/pagemap.h>
+#include <drm/drm_drv.h>
 #include <drm/drm_pagemap.h>
 
 /**
@@ -782,3 +783,34 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct 
page *page)
        return zdd->devmem_allocation->dpagemap;
 }
 EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
+
+/**
+ * drm_pagemap_populate_mm() - Populate a virtual range with device memory 
pages
+ * @dpagemap: Pointer to the drm_pagemap managing the device memory
+ * @start: Start of the virtual range to populate.
+ * @end: End of the virtual range to populate.
+ * @mm: Pointer to the virtual address space.
+ *
+ * Attempt to populate a virtual range with device memory pages,
+ * clearing them or migrating data from the existing pages if necessary.
+ * The function is best effort only, and implementations may vary
+ * in how hard they try to satisfy the request.
+ *
+ * Return: 0 on success, negative error code on error. If the hardware
+ * device was removed / unbound the function will return -ENODEV;
+ */
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+                           unsigned long start, unsigned long end,
+                           struct mm_struct *mm)
+{
+       int err;
+
+       if (!mmget_not_zero(mm))
+               return -EFAULT;
+       mmap_read_lock(mm);
+       err = dpagemap->ops->populate_mm(dpagemap, start, end, mm);
+       mmap_read_unlock(mm);
+       mmput(mm);
+
+       return err;
+}
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 32f0d7f23075..c591736e7c48 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -92,6 +92,34 @@ struct drm_pagemap_ops {
                             struct device *dev,
                             struct drm_pagemap_device_addr addr);
 
+       /**
+        * @populate_mm: Populate part of the mm with @dpagemap memory,
+        * migrating existing data.
+        * @dpagemap: The struct drm_pagemap managing the memory.
+        * @start: The virtual start address in @mm
+        * @end: The virtual end address in @mm
+        * @mm: Pointer to a live mm. The caller must have an mmget()
+        * reference.
+        *
+        * The caller will have the mm lock at least in read mode.
+        * Note that there is no guarantee that the memory is resident
+        * after the function returns, it's best effort only.
+        * When the mm is not using the memory anymore,
+        * it will be released. The struct drm_pagemap might have a
+        * mechanism in place to reclaim the memory and the data will
+        * then be migrated. Typically to system memory.
+        * The implementation should hold sufficient runtime power-
+        * references while pages are used in an address space and
+        * should ideally guard against hardware device unbind in
+        * a way such that device pages are migrated back to system
+        * followed by device page removal. The implementation should
+        * return -ENODEV after device removal.
+        *
+        * Return: 0 if successful. Negative error code on error.
+        */
+       int (*populate_mm)(struct drm_pagemap *dpagemap,
+                          unsigned long start, unsigned long end,
+                          struct mm_struct *mm);
 };
 
 /**
@@ -202,4 +230,8 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem 
*devmem_allocation,
                             const struct drm_pagemap_devmem_ops *ops,
                             struct drm_pagemap *dpagemap, size_t size);
 
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+                           unsigned long start, unsigned long end,
+                           struct mm_struct *mm);
+
 #endif
-- 
2.48.1

Reply via email to