From: Asahi Lina <[email protected]>

drm_gpuva objects have a flags field. Currently, this can be managed by
drivers out-of-band, without any special handling in drm_gpuvm.

To be able to introduce flags that do affect the logic in the drm_gpuvm
core, we need to plumb it through the map calls. This will allow the
core to check the flags on map and alter the merge/split logic depending
on the requested flags and the flags of the existing drm_gpuva ranges
that are being split.

Signed-off-by: Asahi Lina <[email protected]>
Signed-off-by: Caterina Shablia <[email protected]>
---
 drivers/gpu/drm/drm_gpuvm.c | 14 ++++++++++++--
 include/drm/drm_gpuvm.h     | 18 ++++++++++++++++++
 2 files changed, 30 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c
index 4af7b71abcb4..0d9c821d1b34 100644
--- a/drivers/gpu/drm/drm_gpuvm.c
+++ b/drivers/gpu/drm/drm_gpuvm.c
@@ -2386,6 +2386,7 @@ static bool can_merge(struct drm_gpuvm *gpuvm, const 
struct drm_gpuva *va,
                .va.range = va->va.range,
                .gem.offset = va->gem.offset,
                .gem.obj = va->gem.obj,
+               .flags = va->flags,
        };
        const struct drm_gpuva_op_map *a = new_map, *b = &existing_map;
 
@@ -2395,6 +2396,10 @@ static bool can_merge(struct drm_gpuvm *gpuvm, const 
struct drm_gpuva *va,
        if (a->gem.obj != b->gem.obj || !a->gem.obj)
                return false;
 
+       /* For two VAs to be merged, their flags must be compatible */
+       if ((a->flags & VA_MERGE_MUST_MATCH_FLAGS) != (b->flags & 
VA_MERGE_MUST_MATCH_FLAGS))
+               return false;
+
        /* Order VAs for the rest of the checks. */
        if (a->va.addr > b->va.addr)
                swap(a, b);
@@ -2459,6 +2464,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
                                        .va.range = range - req_range,
                                        .gem.obj = obj,
                                        .gem.offset = offset + req_range,
+                                       .flags = va->flags,
                                };
                                struct drm_gpuva_op_unmap u = {
                                        .va = va,
@@ -2480,6 +2486,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
                                .va.range = ls_range,
                                .gem.obj = obj,
                                .gem.offset = offset,
+                               .flags = va->flags,
                        };
                        struct drm_gpuva_op_unmap u = { .va = va };
 
@@ -2519,8 +2526,8 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
                                        .va.addr = req_end,
                                        .va.range = end - req_end,
                                        .gem.obj = obj,
-                                       .gem.offset = offset + ls_range +
-                                                     req_range,
+                                       .gem.offset = offset + ls_range + 
req_range,
+                                       .flags = va->flags,
                                };
 
                                ret = op_remap_cb(ops, priv, &p, &n, &u);
@@ -2554,6 +2561,7 @@ __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm,
                                        .va.range = end - req_end,
                                        .gem.obj = obj,
                                        .gem.offset = offset + req_end - addr,
+                                       .flags = va->flags,
                                };
                                struct drm_gpuva_op_unmap u = {
                                        .va = va,
@@ -2605,6 +2613,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
                        prev.va.range = req_addr - addr;
                        prev.gem.obj = obj;
                        prev.gem.offset = offset;
+                       prev.flags = va->flags;
 
                        prev_split = true;
                }
@@ -2614,6 +2623,7 @@ __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm,
                        next.va.range = end - req_end;
                        next.gem.obj = obj;
                        next.gem.offset = offset + (req_end - addr);
+                       next.flags = va->flags;
 
                        next_split = true;
                }
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 6a6e64cd2cce..5bf37deb282d 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -63,6 +63,8 @@ enum drm_gpuva_flags {
        DRM_GPUVA_USERBITS = (1 << 2),
 };
 
+#define VA_MERGE_MUST_MATCH_FLAGS (DRM_GPUVA_SPARSE)
+
 /**
  * struct drm_gpuva - structure to track a GPU VA mapping
  *
@@ -886,6 +888,11 @@ struct drm_gpuva_op_map {
                 */
                struct drm_gem_object *obj;
        } gem;
+
+       /**
+        * @flags: requested flags for the &drm_gpuva for this mapping
+        */
+       enum drm_gpuva_flags flags;
 };
 
 /**
@@ -1124,6 +1131,7 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
 static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
                                          const struct drm_gpuva_op_map *op)
 {
+       va->flags = op->flags;
        va->va.addr = op->va.addr;
        va->va.range = op->va.range;
        va->gem.obj = op->gem.obj;
@@ -1249,6 +1257,16 @@ struct drm_gpuvm_ops {
         * used.
         */
        int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+
+       /**
+        * @sm_can_merge_flags: called during &drm_gpuvm_sm_map
+        *
+        * This callback is called to determine whether two va ranges can be 
merged,
+        * based on their flags.
+        *
+        * If NULL, va ranges can only be merged if their flags are equal.
+        */
+       bool (*sm_can_merge_flags)(enum drm_gpuva_flags a, enum drm_gpuva_flags 
b);
 };
 
 int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
-- 
2.53.0

Reply via email to