pin_map needs the ww lock, so ensure we pin both before submission.

Signed-off-by: Maarten Lankhorst <maarten.lankho...@linux.intel.com>
Reviewed-by: Thomas Hellström <thomas.hellst...@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |  3 +
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     | 12 +++
 .../gpu/drm/i915/gt/selftest_workarounds.c    | 95 +++++++++++++------
 3 files changed, 80 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h 
b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 6c3f75adb53c..983f2d4b2a85 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -437,6 +437,9 @@ void i915_gem_object_writeback(struct drm_i915_gem_object 
*obj);
 void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
                                           enum i915_map_type type);
 
+void *__must_check i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object 
*obj,
+                                                   enum i915_map_type type);
+
 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
                                 unsigned long offset,
                                 unsigned long size);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c 
b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index e947d4c0da1f..a24617af3c93 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -400,6 +400,18 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object 
*obj,
        goto out_unlock;
 }
 
+void *i915_gem_object_pin_map_unlocked(struct drm_i915_gem_object *obj,
+                                      enum i915_map_type type)
+{
+       void *ret;
+
+       i915_gem_object_lock(obj, NULL);
+       ret = i915_gem_object_pin_map(obj, type);
+       i915_gem_object_unlock(obj);
+
+       return ret;
+}
+
 void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
                                 unsigned long offset,
                                 unsigned long size)
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c 
b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index de6136bd10ac..a508614b2fd5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -103,15 +103,13 @@ read_nonprivs(struct intel_context *ce, struct i915_vma 
*result)
        int err;
        int i;
 
-       rq = intel_context_create_request(ce);
+       rq = i915_request_create(ce);
        if (IS_ERR(rq))
                return rq;
 
-       i915_vma_lock(result);
        err = i915_request_await_object(rq, result->obj, true);
        if (err == 0)
                err = i915_vma_move_to_active(result, rq, EXEC_OBJECT_WRITE);
-       i915_vma_unlock(result);
        if (err)
                goto err_rq;
 
@@ -176,10 +174,11 @@ static int check_whitelist(struct intel_context *ce)
        u32 *vaddr;
        int i;
 
-       result = __vm_create_scratch_for_read(&engine->gt->ggtt->vm, PAGE_SIZE);
+       result = __vm_create_scratch_for_read_pinned(&engine->gt->ggtt->vm, 
PAGE_SIZE);
        if (IS_ERR(result))
                return PTR_ERR(result);
 
+       i915_gem_object_lock(result->obj, NULL);
        vaddr = i915_gem_object_pin_map(result->obj, I915_MAP_WB);
        if (IS_ERR(vaddr)) {
                err = PTR_ERR(vaddr);
@@ -219,6 +218,8 @@ static int check_whitelist(struct intel_context *ce)
 out_map:
        i915_gem_object_unpin_map(result->obj);
 out_put:
+       i915_vma_unpin(result);
+       i915_gem_object_unlock(result->obj);
        i915_vma_put(result);
        return err;
 }
@@ -279,10 +280,14 @@ static int check_whitelist_across_reset(struct 
intel_engine_cs *engine,
        if (IS_ERR(ce))
                return PTR_ERR(ce);
 
-       err = igt_spinner_init(&spin, engine->gt);
+       err = intel_context_pin(ce);
        if (err)
                goto out_ctx;
 
+       err = igt_spinner_init(&spin, engine->gt);
+       if (err)
+               goto out_unpin;
+
        err = check_whitelist(ce);
        if (err) {
                pr_err("Invalid whitelist *before* %s reset!\n", name);
@@ -315,6 +320,13 @@ static int check_whitelist_across_reset(struct 
intel_engine_cs *engine,
                err = PTR_ERR(tmp);
                goto out_spin;
        }
+       err = intel_context_pin(tmp);
+       if (err) {
+               intel_context_put(tmp);
+               goto out_spin;
+       }
+
+       intel_context_unpin(ce);
        intel_context_put(ce);
        ce = tmp;
 
@@ -327,6 +339,8 @@ static int check_whitelist_across_reset(struct 
intel_engine_cs *engine,
 
 out_spin:
        igt_spinner_fini(&spin);
+out_unpin:
+       intel_context_unpin(ce);
 out_ctx:
        intel_context_put(ce);
        return err;
@@ -475,6 +489,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
 
        for (i = 0; i < engine->whitelist.count; i++) {
                u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+               struct i915_gem_ww_ctx ww;
                u64 addr = scratch->node.start;
                struct i915_request *rq;
                u32 srm, lrm, rsvd;
@@ -490,6 +505,29 @@ static int check_dirty_whitelist(struct intel_context *ce)
 
                ro_reg = ro_register(reg);
 
+               i915_gem_ww_ctx_init(&ww, false);
+retry:
+               cs = NULL;
+               err = i915_gem_object_lock(scratch->obj, &ww);
+               if (!err)
+                       err = i915_gem_object_lock(batch->obj, &ww);
+               if (!err)
+                       err = intel_context_pin_ww(ce, &ww);
+               if (err)
+                       goto out;
+
+               cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+               if (IS_ERR(cs)) {
+                       err = PTR_ERR(cs);
+                       goto out_ctx;
+               }
+
+               results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+               if (IS_ERR(results)) {
+                       err = PTR_ERR(results);
+                       goto out_unmap_batch;
+               }
+
                /* Clear non priv flags */
                reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK;
 
@@ -501,12 +539,6 @@ static int check_dirty_whitelist(struct intel_context *ce)
                pr_debug("%s: Writing garbage to %x\n",
                         engine->name, reg);
 
-               cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
-               if (IS_ERR(cs)) {
-                       err = PTR_ERR(cs);
-                       goto out_batch;
-               }
-
                /* SRM original */
                *cs++ = srm;
                *cs++ = reg;
@@ -553,11 +585,12 @@ static int check_dirty_whitelist(struct intel_context *ce)
                i915_gem_object_flush_map(batch->obj);
                i915_gem_object_unpin_map(batch->obj);
                intel_gt_chipset_flush(engine->gt);
+               cs = NULL;
 
-               rq = intel_context_create_request(ce);
+               rq = i915_request_create(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto out_batch;
+                       goto out_unmap_scratch;
                }
 
                if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
@@ -566,20 +599,16 @@ static int check_dirty_whitelist(struct intel_context *ce)
                                goto err_request;
                }
 
-               i915_vma_lock(batch);
                err = i915_request_await_object(rq, batch->obj, false);
                if (err == 0)
                        err = i915_vma_move_to_active(batch, rq, 0);
-               i915_vma_unlock(batch);
                if (err)
                        goto err_request;
 
-               i915_vma_lock(scratch);
                err = i915_request_await_object(rq, scratch->obj, true);
                if (err == 0)
                        err = i915_vma_move_to_active(scratch, rq,
                                                      EXEC_OBJECT_WRITE);
-               i915_vma_unlock(scratch);
                if (err)
                        goto err_request;
 
@@ -595,13 +624,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
                        pr_err("%s: Futzing %x timedout; cancelling test\n",
                               engine->name, reg);
                        intel_gt_set_wedged(engine->gt);
-                       goto out_batch;
-               }
-
-               results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
-               if (IS_ERR(results)) {
-                       err = PTR_ERR(results);
-                       goto out_batch;
+                       goto out_unmap_scratch;
                }
 
                GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
@@ -612,7 +635,7 @@ static int check_dirty_whitelist(struct intel_context *ce)
                                pr_err("%s: Unable to write to whitelisted 
register %x\n",
                                       engine->name, reg);
                                err = -EINVAL;
-                               goto out_unpin;
+                               goto out_unmap_scratch;
                        }
                } else {
                        rsvd = 0;
@@ -678,15 +701,27 @@ static int check_dirty_whitelist(struct intel_context *ce)
 
                        err = -EINVAL;
                }
-out_unpin:
+out_unmap_scratch:
                i915_gem_object_unpin_map(scratch->obj);
+out_unmap_batch:
+               if (cs)
+                       i915_gem_object_unpin_map(batch->obj);
+out_ctx:
+               intel_context_unpin(ce);
+out:
+               if (err == -EDEADLK) {
+                       err = i915_gem_ww_ctx_backoff(&ww);
+                       if (!err)
+                               goto retry;
+               }
+               i915_gem_ww_ctx_fini(&ww);
                if (err)
                        break;
        }
 
        if (igt_flush_test(engine->i915))
                err = -EIO;
-out_batch:
+
        i915_vma_unpin_and_release(&batch, 0);
 out_scratch:
        i915_vma_unpin_and_release(&scratch, 0);
@@ -820,7 +855,7 @@ static int scrub_whitelisted_registers(struct intel_context 
*ce)
        if (IS_ERR(batch))
                return PTR_ERR(batch);
 
-       cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+       cs = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC);
        if (IS_ERR(cs)) {
                err = PTR_ERR(cs);
                goto err_batch;
@@ -955,11 +990,11 @@ check_whitelisted_registers(struct intel_engine_cs 
*engine,
        u32 *a, *b;
        int i, err;
 
-       a = i915_gem_object_pin_map(A->obj, I915_MAP_WB);
+       a = i915_gem_object_pin_map_unlocked(A->obj, I915_MAP_WB);
        if (IS_ERR(a))
                return PTR_ERR(a);
 
-       b = i915_gem_object_pin_map(B->obj, I915_MAP_WB);
+       b = i915_gem_object_pin_map_unlocked(B->obj, I915_MAP_WB);
        if (IS_ERR(b)) {
                err = PTR_ERR(b);
                goto err_a;
-- 
2.31.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to