From: Thomas Hellström <thomas.hellst...@linux.intel.com>

Add support for mapping device pages to Xe SVM by attaching drm_pagemap
to a memory region, which is then linked to a GPU SVM devmem allocation.
This enables GPU SVM to derive the device page address.

v3:
 - Better commit message (Thomas)
 - New drm_pagemap.h location
v5:
 - s/xe_mem_region/xe_vram_region (Rebase)

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
Signed-off-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Reviewed-by: Matthew Brost <matthew.br...@intel.com>
---
 drivers/gpu/drm/xe/xe_device_types.h |  6 +++
 drivers/gpu/drm/xe/xe_svm.c          | 57 ++++++++++++++++++++++++++++
 2 files changed, 63 insertions(+)

diff --git a/drivers/gpu/drm/xe/xe_device_types.h 
b/drivers/gpu/drm/xe/xe_device_types.h
index 00873d80b10d..15399fcb2bd7 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -10,6 +10,7 @@
 
 #include <drm/drm_device.h>
 #include <drm/drm_file.h>
+#include <drm/drm_pagemap.h>
 #include <drm/ttm/ttm_device.h>
 
 #include "xe_devcoredump_types.h"
@@ -108,6 +109,11 @@ struct xe_vram_region {
        void __iomem *mapping;
        /** @pagemap: Used to remap device memory as ZONE_DEVICE */
        struct dev_pagemap pagemap;
+       /**
+        * @dpagemap: The struct drm_pagemap of the ZONE_DEVICE memory
+        * pages of this tile.
+        */
+       struct drm_pagemap dpagemap;
        /**
         * @hpa_base: base host physical address
         *
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index b1947c75c249..5746f0342906 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -292,6 +292,33 @@ static void xe_svm_garbage_collector_work_func(struct 
work_struct *w)
        up_write(&vm->lock);
 }
 
+static struct xe_vram_region *page_to_vr(struct page *page)
+{
+       return container_of(page->pgmap, struct xe_vram_region, pagemap);
+}
+
+static struct xe_tile *vr_to_tile(struct xe_vram_region *vr)
+{
+       return container_of(vr, struct xe_tile, mem.vram);
+}
+
+static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr,
+                                    struct page *page)
+{
+       u64 dpa;
+       struct xe_tile *tile = vr_to_tile(vr);
+       u64 pfn = page_to_pfn(page);
+       u64 offset;
+
+       xe_tile_assert(tile, is_device_private_page(page));
+       xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base);
+
+       offset = (pfn << PAGE_SHIFT) - vr->hpa_base;
+       dpa = vr->dpa_base + offset;
+
+       return dpa;
+}
+
 static const struct drm_gpusvm_ops gpusvm_ops = {
        .range_alloc = xe_svm_range_alloc,
        .range_free = xe_svm_range_free,
@@ -460,6 +487,32 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 
end)
 }
 
 #if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR)
+static struct drm_pagemap_device_addr
+xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap,
+                         struct device *dev,
+                         struct page *page,
+                         unsigned int order,
+                         enum dma_data_direction dir)
+{
+       struct device *pgmap_dev = dpagemap->dev;
+       enum drm_interconnect_protocol prot;
+       dma_addr_t addr;
+
+       if (pgmap_dev == dev) {
+               addr = xe_vram_region_page_to_dpa(page_to_vr(page), page);
+               prot = XE_INTERCONNECT_VRAM;
+       } else {
+               addr = DMA_MAPPING_ERROR;
+               prot = 0;
+       }
+
+       return drm_pagemap_device_addr_encode(addr, prot, order, dir);
+}
+
+static const struct drm_pagemap_ops xe_drm_pagemap_ops = {
+       .device_map = xe_drm_pagemap_device_map,
+};
+
 /**
  * xe_devm_add: Remap and provide memmap backing for device memory
  * @tile: tile that the memory region belongs to
@@ -492,6 +545,10 @@ int xe_devm_add(struct xe_tile *tile, struct 
xe_vram_region *vr)
        vr->pagemap.ops = drm_gpusvm_pagemap_ops_get();
        vr->pagemap.owner = xe_svm_devm_owner(xe);
        addr = devm_memremap_pages(dev, &vr->pagemap);
+
+       vr->dpagemap.dev = dev;
+       vr->dpagemap.ops = &xe_drm_pagemap_ops;
+
        if (IS_ERR(addr)) {
                devm_release_mem_region(dev, res->start, resource_size(res));
                ret = PTR_ERR(addr);
-- 
2.34.1

Reply via email to