Add timeslicing support to GPU SVM which will guarantee the GPU a
minimum execution time on piece of physical memory before migration back
to CPU. Intended to implement strict migration policies which require
memory to be in a certain placement for correct execution.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/drm_gpusvm.c | 9 +++++++++
 include/drm/drm_gpusvm.h     | 5 +++++
 2 files changed, 14 insertions(+)

diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c
index edf107809d20..40a56f38ff8e 100644
--- a/drivers/gpu/drm/drm_gpusvm.c
+++ b/drivers/gpu/drm/drm_gpusvm.c
@@ -1770,6 +1770,8 @@ int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm 
*gpusvm,
                goto err_finalize;
 
        /* Upon success bind devmem allocation to range and zdd */
+       devmem_allocation->timeslice_expiration = get_jiffies_64() +
+               msecs_to_jiffies(ctx->timeslice_ms);
        zdd->devmem_allocation = devmem_allocation;     /* Owns ref */
 
 err_finalize:
@@ -1990,6 +1992,13 @@ static int __drm_gpusvm_migrate_to_ram(struct 
vm_area_struct *vas,
        void *buf;
        int i, err = 0;
 
+       if (page) {
+               zdd = page->zone_device_data;
+               if (time_before64(get_jiffies_64(),
+                                 zdd->devmem_allocation->timeslice_expiration))
+                       return 0;
+       }
+
        start = ALIGN_DOWN(fault_addr, size);
        end = ALIGN(fault_addr + 1, size);
 
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 9fd25fc880a4..cce217bc136f 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -89,6 +89,7 @@ struct drm_gpusvm_devmem_ops {
  * @ops: Pointer to the operations structure for GPU SVM device memory
  * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
  * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
  */
 struct drm_gpusvm_devmem {
        struct device *dev;
@@ -97,6 +98,7 @@ struct drm_gpusvm_devmem {
        const struct drm_gpusvm_devmem_ops *ops;
        struct drm_pagemap *dpagemap;
        size_t size;
+       u64 timeslice_expiration;
 };
 
 /**
@@ -283,6 +285,8 @@ struct drm_gpusvm {
  * @check_pages_threshold: Check CPU pages for present if chunk is less than or
  *                         equal to threshold. If not present, reduce chunk
  *                         size.
+ * @timeslice_ms: The timeslice MS which in minimum time a piece of memory
+ *               remains with either exclusive GPU or CPU access.
  * @in_notifier: entering from a MMU notifier
  * @read_only: operating on read-only memory
  * @devmem_possible: possible to use device memory
@@ -292,6 +296,7 @@ struct drm_gpusvm {
  */
 struct drm_gpusvm_ctx {
        unsigned long check_pages_threshold;
+       unsigned long timeslice_ms;
        unsigned int in_notifier :1;
        unsigned int read_only :1;
        unsigned int devmem_possible :1;
-- 
2.34.1

Reply via email to