On 19/10/2024 20:20, Matthew Brost wrote:
xe_bo_vmap only works on contiguous BOs, disallow xe_bo_vmap on BO
unless we are certain the BO is contiguous.

Signed-off-by: Matthew Brost <matthew.br...@intel.com>
---
  drivers/gpu/drm/xe/xe_bo.c | 23 ++++++++++++++---------
  1 file changed, 14 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 0a7b91df69c2..46c640f8db9e 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -162,6 +162,15 @@ static void try_add_system(struct xe_device *xe, struct 
xe_bo *bo,
        }
  }
+static bool force_contiguous(u32 bo_flags)
+{
+       /*
+        * For eviction / restore on suspend / resume objects pinned in VRAM
+        * must be contiguous, also only contiguous BOs support xe_bo_vmap.
+        */
+       return bo_flags & (XE_BO_FLAG_PINNED | XE_BO_FLAG_GGTT);
+}
+
  static void add_vram(struct xe_device *xe, struct xe_bo *bo,
                     struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 
*c)
  {
@@ -175,12 +184,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo 
*bo,
        xe_assert(xe, vram && vram->usable_size);
        io_size = vram->io_size;
- /*
-        * For eviction / restore on suspend / resume objects
-        * pinned in VRAM must be contiguous
-        */
-       if (bo_flags & (XE_BO_FLAG_PINNED |
-                       XE_BO_FLAG_GGTT))
+       if (force_contiguous(bo_flags))
                place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < vram->usable_size) {
@@ -212,8 +216,7 @@ static void try_add_stolen(struct xe_device *xe, struct 
xe_bo *bo,
bo->placements[*c] = (struct ttm_place) {
                        .mem_type = XE_PL_STOLEN,
-                       .flags = bo_flags & (XE_BO_FLAG_PINNED |
-                                            XE_BO_FLAG_GGTT) ?
+                       .flags = force_contiguous(bo_flags) ?
                                TTM_PL_FLAG_CONTIGUOUS : 0,
                };
                *c += 1;
@@ -2024,13 +2027,15 @@ dma_addr_t xe_bo_addr(struct xe_bo *bo, u64 offset, 
size_t page_size)
int xe_bo_vmap(struct xe_bo *bo)
  {
+       struct xe_device *xe = ttm_to_xe_device(bo->ttm.bdev);
        void *virtual;
        bool is_iomem;
        int ret;
xe_bo_assert_held(bo); - if (!(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS))
+       if (drm_WARN_ON(&xe->drm, !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS) ||
+                       !force_contiguous(bo->flags)))

Hmm was confused why this is not popping in CI for the clear color case, but looks like that is directly calling ttm_bo_kmap()...

Anyway, for this patch,
Reviewed-by: Matthew Auld <matthew.a...@intel.com>

                return -EINVAL;
if (!iosys_map_is_null(&bo->vmap))

Reply via email to