Register a driver-wide owner list, provide a callback to identify
fast interconnects and use the drm_pagemap_util helper to allocate
or reuse a suitable owner struct. For now we consider pagemaps on
different tiles on the same device as having fast interconnect.

Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/xe/xe_svm.c | 40 +++++++++++++++++++++++++++----------
 drivers/gpu/drm/xe/xe_svm.h |  3 +++
 2 files changed, 32 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 20441da0aff7..25d49d0d7484 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -75,11 +75,6 @@ void xe_svm_range_debug(struct xe_svm_range *range, const 
char *operation)
        range_debug(range, operation);
 }
 
-static void *xe_svm_devm_owner(struct xe_device *xe)
-{
-       return xe;
-}
-
 static struct drm_gpusvm_range *
 xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
 {
@@ -751,7 +746,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap 
*dpagemap,
        /* Ensure the device has a pm ref while there are device pages active. 
*/
        xe_pm_runtime_get_noresume(xe);
        err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm,
-                                           start, end, xe_svm_devm_owner(xe));
+                                           start, end, 
xpagemap->pagemap.owner);
        if (err)
                xe_svm_devmem_release(&bo->devmem_allocation);
 
@@ -791,6 +786,7 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma 
*vma,
                .check_pages_threshold = IS_DGFX(vm->xe) &&
                        IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0,
        };
+       struct drm_pagemap *dpagemap;
        struct xe_svm_range *range;
        struct drm_gpusvm_range *r;
        struct drm_exec exec;
@@ -818,16 +814,14 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct 
xe_vma *vma,
                return 0;
 
        range_debug(range, "PAGE FAULT");
+       dpagemap = xe_tile_local_pagemap(tile);
 
        /* XXX: Add migration policy, for now migrate range once */
        if (!range->skip_migrate && range->base.flags.migrate_devmem &&
            xe_svm_range_size(range) >= SZ_64K) {
-               struct drm_pagemap *dpagemap;
-
                range->skip_migrate = true;
 
                range_debug(range, "ALLOCATE VRAM");
-               dpagemap = xe_tile_local_pagemap(tile);
                err = drm_pagemap_populate_mm(dpagemap, 
xe_svm_range_start(range),
                                              xe_svm_range_end(range),
                                              range->base.gpusvm->mm);
@@ -841,7 +835,8 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma 
*vma,
        }
 
        range_debug(range, "GET PAGES");
-       ctx.device_private_page_owner = xe_svm_devm_owner(vm->xe);
+       ctx.device_private_page_owner = dpagemap ?
+               container_of(dpagemap, struct xe_pagemap, 
dpagemap)->pagemap.owner : NULL;
        err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, r, &ctx);
        /* Corner where CPU mappings have changed */
        if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) {
@@ -962,6 +957,11 @@ static void xe_pagemap_fini(struct xe_pagemap *xpagemap)
                xpagemap->hpa_base = 0;
        }
 
+       if (pagemap->owner) {
+               drm_pagemap_release_owner(&xpagemap->peer);
+               pagemap->owner = NULL;
+       }
+
        if (pagemap->range.start) {
                devm_release_mem_region(dev, pagemap->range.start,
                                        pagemap->range.end - 
pagemap->range.start + 1);
@@ -995,6 +995,19 @@ static void xe_pagemap_destroy(struct drm_pagemap 
*dpagemap)
        complete_all(&xpagemap->cache->queued);
 }
 
+static bool xe_has_interconnect(struct drm_pagemap_peer *peer1,
+                               struct drm_pagemap_peer *peer2)
+{
+       struct xe_pagemap *xpagemap1 = container_of(peer1, typeof(*xpagemap1), 
peer);
+       struct xe_pagemap *xpagemap2 = container_of(peer2, typeof(*xpagemap1), 
peer);
+       struct device *dev1 = xpagemap1->dpagemap.drm->dev;
+       struct device *dev2 = xpagemap2->dpagemap.drm->dev;
+
+       return dev1 == dev2;
+}
+
+static DRM_PAGEMAP_OWNER_LIST_DEFINE(xe_owner_list);
+
 static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
        .device_map = xe_drm_pagemap_device_map,
        .populate_mm = xe_drm_pagemap_populate_mm,
@@ -1046,11 +1059,16 @@ struct xe_pagemap *xe_pagemap_create(struct xe_device 
*xe, struct xe_pagemap_cac
                goto out_err;
        }
 
+       err = drm_pagemap_acquire_owner(&xpagemap->peer, &xe_owner_list,
+                                       xe_has_interconnect);
+       if (err)
+               goto out_err;
+
        pagemap->type = MEMORY_DEVICE_PRIVATE;
        pagemap->range.start = res->start;
        pagemap->range.end = res->end;
        pagemap->nr_range = 1;
-       pagemap->owner = xe_svm_devm_owner(xe);
+       pagemap->owner = xpagemap->peer.owner;
        pagemap->ops = drm_pagemap_pagemap_ops_get();
        addr = devm_memremap_pages(dev, pagemap);
        if (IS_ERR(addr)) {
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index 19469fd91666..3fd8fc125cba 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -10,6 +10,7 @@
 
 #include <drm/drm_pagemap.h>
 #include <drm/drm_gpusvm.h>
+#include <drm/drm_pagemap_util.h>
 
 #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER
 
@@ -54,6 +55,7 @@ struct xe_svm_range {
  * @pagemap: The struct dev_pagemap providing the struct pages.
  * @dpagemap: The drm_pagemap managing allocation and migration.
  * @destroy_work: Handles asnynchronous destruction and caching.
+ * @peer: Used for pagemap owner computation.
  * @hpa_base: The host physical address base for the managemd memory.
  * @cache: Backpointer to the struct xe_pagemap_cache for the memory region.
  * @vr: Backpointer to the xe_vram region.
@@ -65,6 +67,7 @@ struct xe_pagemap {
        struct dev_pagemap pagemap;
        struct drm_pagemap dpagemap;
        struct delayed_work destroy_work;
+       struct drm_pagemap_peer peer;
        resource_size_t hpa_base;
        struct xe_pagemap_cache *cache;
        struct xe_vram_region *vr;
-- 
2.48.1

Reply via email to