Am 01.11.24 um 00:43 schrieb Matthew Brost:
On Thu, Oct 31, 2024 at 11:10:42AM -0700, Matthew Brost wrote:
Non-contiguous VRAM cannot easily be mapped in TTM nor can non-visible
VRAM easily be accessed. Add ttm_bo_access, which is similar to
ttm_bo_vm_access, to access such memory.

v4:
  - Fix checkpatch warnings (CI)
v5:
  - Fix checkpatch warnings (CI)
v6:
  - Fix kernel doc (Auld)

Christian - Do you mind if I merge patch along with the rest of the
series to drm-xe-next?

I don't see the original patch anywhere in my inbox, please make sure to CC me while sending things out.

Apart from that I absolutely don't see any justification for this patch. You move stuff into ttm_bo_util.c which not even remotely belongs in there.

Regards,
Christian.


Matt

Reported-by: Christoph Manszewski <christoph.manszew...@intel.com>
Suggested-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
Signed-off-by: Matthew Brost <matthew.br...@intel.com>
Tested-by: Mika Kuoppala <mika.kuopp...@linux.intel.com>
Reviewed-by: Matthew Auld <matthew.a...@intel.com>
---
  drivers/gpu/drm/ttm/ttm_bo_util.c | 86 +++++++++++++++++++++++++++++++
  drivers/gpu/drm/ttm/ttm_bo_vm.c   | 65 +----------------------
  include/drm/ttm/ttm_bo.h          |  2 +
  3 files changed, 89 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c 
b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d939925efa81..77e760ea7193 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -919,3 +919,89 @@ s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, 
struct ttm_device *bdev,
return progress;
  }
+
+static int ttm_bo_access_kmap(struct ttm_buffer_object *bo,
+                             unsigned long offset,
+                             void *buf, int len, int write)
+{
+       unsigned long page = offset >> PAGE_SHIFT;
+       unsigned long bytes_left = len;
+       int ret;
+
+       /* Copy a page at a time, that way no extra virtual address
+        * mapping is needed
+        */
+       offset -= page << PAGE_SHIFT;
+       do {
+               unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
+               struct ttm_bo_kmap_obj map;
+               void *ptr;
+               bool is_iomem;
+
+               ret = ttm_bo_kmap(bo, page, 1, &map);
+               if (ret)
+                       return ret;
+
+               ptr = (void *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
+               WARN_ON_ONCE(is_iomem);
+               if (write)
+                       memcpy(ptr, buf, bytes);
+               else
+                       memcpy(buf, ptr, bytes);
+               ttm_bo_kunmap(&map);
+
+               page++;
+               buf += bytes;
+               bytes_left -= bytes;
+               offset = 0;
+       } while (bytes_left);
+
+       return len;
+}
+
+/**
+ * ttm_bo_access - Helper to access a buffer object
+ *
+ * @bo: ttm buffer object
+ * @offset: access offset into buffer object
+ * @buf: pointer to caller memory to read into or write from
+ * @len: length of access
+ * @write: write access
+ *
+ * Utility function to access a buffer object. Useful when buffer object cannot
+ * be easily mapped (non-contiguous, non-visible, etc...).
+ *
+ * Returns:
+ * @len if successful, negative error code on failure.
+ */
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+                 void *buf, int len, int write)
+{
+       int ret;
+
+       if (len < 1 || (offset + len) > bo->base.size)
+               return -EIO;
+
+       ret = ttm_bo_reserve(bo, true, false, NULL);
+       if (ret)
+               return ret;
+
+       switch (bo->resource->mem_type) {
+       case TTM_PL_SYSTEM:
+               fallthrough;
+       case TTM_PL_TT:
+               ret = ttm_bo_access_kmap(bo, offset, buf, len, write);
+               break;
+       default:
+               if (bo->bdev->funcs->access_memory)
+                       ret = bo->bdev->funcs->access_memory
+                               (bo, offset, buf, len, write);
+               else
+                       ret = -EIO;
+       }
+
+       ttm_bo_unreserve(bo);
+
+       return ret;
+}
+EXPORT_SYMBOL(ttm_bo_access);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 2c699ed1963a..20b1e5f78684 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -366,45 +366,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma)
  }
  EXPORT_SYMBOL(ttm_bo_vm_close);
-static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
-                                unsigned long offset,
-                                uint8_t *buf, int len, int write)
-{
-       unsigned long page = offset >> PAGE_SHIFT;
-       unsigned long bytes_left = len;
-       int ret;
-
-       /* Copy a page at a time, that way no extra virtual address
-        * mapping is needed
-        */
-       offset -= page << PAGE_SHIFT;
-       do {
-               unsigned long bytes = min(bytes_left, PAGE_SIZE - offset);
-               struct ttm_bo_kmap_obj map;
-               void *ptr;
-               bool is_iomem;
-
-               ret = ttm_bo_kmap(bo, page, 1, &map);
-               if (ret)
-                       return ret;
-
-               ptr = (uint8_t *)ttm_kmap_obj_virtual(&map, &is_iomem) + offset;
-               WARN_ON_ONCE(is_iomem);
-               if (write)
-                       memcpy(ptr, buf, bytes);
-               else
-                       memcpy(buf, ptr, bytes);
-               ttm_bo_kunmap(&map);
-
-               page++;
-               buf += bytes;
-               bytes_left -= bytes;
-               offset = 0;
-       } while (bytes_left);
-
-       return len;
-}
-
  int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
                     void *buf, int len, int write)
  {
@@ -412,32 +373,8 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned 
long addr,
        unsigned long offset = (addr) - vma->vm_start +
                ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
                 << PAGE_SHIFT);
-       int ret;
-
-       if (len < 1 || (offset + len) > bo->base.size)
-               return -EIO;
- ret = ttm_bo_reserve(bo, true, false, NULL);
-       if (ret)
-               return ret;
-
-       switch (bo->resource->mem_type) {
-       case TTM_PL_SYSTEM:
-               fallthrough;
-       case TTM_PL_TT:
-               ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
-               break;
-       default:
-               if (bo->bdev->funcs->access_memory)
-                       ret = bo->bdev->funcs->access_memory(
-                               bo, offset, buf, len, write);
-               else
-                       ret = -EIO;
-       }
-
-       ttm_bo_unreserve(bo);
-
-       return ret;
+       return ttm_bo_access(bo, offset, buf, len, write);
  }
  EXPORT_SYMBOL(ttm_bo_vm_access);
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 5804408815be..8ea11cd8df39 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -421,6 +421,8 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo);
  int ttm_bo_evict_first(struct ttm_device *bdev,
                       struct ttm_resource_manager *man,
                       struct ttm_operation_ctx *ctx);
+int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
+                 void *buf, int len, int write);
  vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
                             struct vm_fault *vmf);
  vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
--
2.34.1


Reply via email to