Now that struct intel_dsb_buffer is opaque, it can be made unique to
both drivers, and we can drop the unnecessary struct i915_vma part. Only
the struct xe_bo part is needed.

Signed-off-by: Jani Nikula <jani.nik...@intel.com>
---
 drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 28 +++++++---------------
 1 file changed, 8 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c 
b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index d55858705106..fa0acb11eaad 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -3,7 +3,6 @@
  * Copyright 2023, Intel Corporation.
  */
 
-#include "i915_vma.h"
 #include "intel_dsb_buffer.h"
 #include "xe_bo.h"
 #include "xe_device.h"
@@ -11,30 +10,30 @@
 
 struct intel_dsb_buffer {
        u32 *cmd_buf;
-       struct i915_vma *vma;
+       struct xe_bo *bo;
        size_t buf_size;
 };
 
 u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
 {
-       return xe_bo_ggtt_addr(dsb_buf->vma->bo);
+       return xe_bo_ggtt_addr(dsb_buf->bo);
 }
 
 void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
 {
-       iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
+       iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val);
 }
 
 u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
 {
-       return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32);
+       return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32);
 }
 
 void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 
val, size_t size)
 {
        WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
 
-       iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
+       iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size);
 }
 
 struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, 
size_t size)
@@ -42,19 +41,12 @@ struct intel_dsb_buffer *intel_dsb_buffer_create(struct 
drm_device *drm, size_t
        struct xe_device *xe = to_xe_device(drm);
        struct intel_dsb_buffer *dsb_buf;
        struct xe_bo *obj;
-       struct i915_vma *vma;
        int ret;
 
        dsb_buf = kzalloc(sizeof(*dsb_buf), GFP_KERNEL);
        if (!dsb_buf)
                return ERR_PTR(-ENOMEM);
 
-       vma = kzalloc(sizeof(*vma), GFP_KERNEL);
-       if (!vma) {
-               ret = -ENOMEM;
-               goto err_vma;
-       }
-
        /* Set scanout flag for WC mapping */
        obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
                                        PAGE_ALIGN(size),
@@ -66,15 +58,12 @@ struct intel_dsb_buffer *intel_dsb_buffer_create(struct 
drm_device *drm, size_t
                goto err_pin_map;
        }
 
-       vma->bo = obj;
-       dsb_buf->vma = vma;
+       dsb_buf->bo = obj;
        dsb_buf->buf_size = size;
 
        return dsb_buf;
 
 err_pin_map:
-       kfree(vma);
-err_vma:
        kfree(dsb_buf);
 
        return ERR_PTR(ret);
@@ -82,14 +71,13 @@ struct intel_dsb_buffer *intel_dsb_buffer_create(struct 
drm_device *drm, size_t
 
 void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
 {
-       xe_bo_unpin_map_no_vm(dsb_buf->vma->bo);
-       kfree(dsb_buf->vma);
+       xe_bo_unpin_map_no_vm(dsb_buf->bo);
        kfree(dsb_buf);
 }
 
 void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
 {
-       struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
+       struct xe_device *xe = dsb_buf->bo->tile->xe;
 
        /*
         * The memory barrier here is to ensure coherency of DSB vs MMIO,
-- 
2.47.3

Reply via email to