On Sat, 09 Mar 2019, Chris Wilson <ch...@chris-wilson.co.uk> wrote:
> In the next patch, we will want to update live state within a context.
> As this state may be in use by the GPU and we haven't been explicitly
> tracking its activity, we instead attach it to a request we send down
> the context setup with its new state and on retiring that request
> cleanup the old state as we then know that it is no longer live.
>
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> Reviewed-by: Tvrtko Ursulin <tvrtko.ursu...@intel.com>

This, or more precisely commit 85fddf0b0027 ("drm/i915: Introduce a
context barrier callback"), breaks build for

CONFIG_DRM_I915_WERROR=y
CONFIG_DRM_I915_SELFTEST=n

with

  CC [M]  drivers/gpu/drm/i915/i915_gem_context.o
drivers/gpu/drm/i915/i915_gem_context.c:698:12: error: ‘context_barrier_task’ 
defined but not used [-Werror=unused-function]
 static int context_barrier_task(struct i915_gem_context *ctx,
            ^~~~~~~~~~~~~~~~~~~~

Please fix.


BR,
Jani.

> ---
>  drivers/gpu/drm/i915/i915_gem_context.c       |  74 ++++++++++++
>  .../gpu/drm/i915/selftests/i915_gem_context.c | 106 ++++++++++++++++++
>  2 files changed, 180 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c 
> b/drivers/gpu/drm/i915/i915_gem_context.c
> index f9a21a891aa4..b6370225dcb5 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -677,6 +677,80 @@ last_request_on_engine(struct i915_timeline *timeline,
>       return NULL;
>  }
>  
> +struct context_barrier_task {
> +     struct i915_active base;
> +     void (*task)(void *data);
> +     void *data;
> +};
> +
> +static void cb_retire(struct i915_active *base)
> +{
> +     struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
> +
> +     if (cb->task)
> +             cb->task(cb->data);
> +
> +     i915_active_fini(&cb->base);
> +     kfree(cb);
> +}
> +
> +I915_SELFTEST_DECLARE(static unsigned long context_barrier_inject_fault);
> +static int context_barrier_task(struct i915_gem_context *ctx,
> +                             unsigned long engines,
> +                             void (*task)(void *data),
> +                             void *data)
> +{
> +     struct drm_i915_private *i915 = ctx->i915;
> +     struct context_barrier_task *cb;
> +     struct intel_context *ce;
> +     intel_wakeref_t wakeref;
> +     int err = 0;
> +
> +     lockdep_assert_held(&i915->drm.struct_mutex);
> +     GEM_BUG_ON(!task);
> +
> +     cb = kmalloc(sizeof(*cb), GFP_KERNEL);
> +     if (!cb)
> +             return -ENOMEM;
> +
> +     i915_active_init(i915, &cb->base, cb_retire);
> +     i915_active_acquire(&cb->base);
> +
> +     wakeref = intel_runtime_pm_get(i915);
> +     list_for_each_entry(ce, &ctx->active_engines, active_link) {
> +             struct intel_engine_cs *engine = ce->engine;
> +             struct i915_request *rq;
> +
> +             if (!(ce->engine->mask & engines))
> +                     continue;
> +
> +             if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
> +                                    engine->mask)) {
> +                     err = -ENXIO;
> +                     break;
> +             }
> +
> +             rq = i915_request_alloc(engine, ctx);
> +             if (IS_ERR(rq)) {
> +                     err = PTR_ERR(rq);
> +                     break;
> +             }
> +
> +             err = i915_active_ref(&cb->base, rq->fence.context, rq);
> +             i915_request_add(rq);
> +             if (err)
> +                     break;
> +     }
> +     intel_runtime_pm_put(i915, wakeref);
> +
> +     cb->task = err ? NULL : task; /* caller needs to unwind instead */
> +     cb->data = data;
> +
> +     i915_active_release(&cb->base);
> +
> +     return err;
> +}
> +
>  int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
>                                     unsigned long mask)
>  {
> diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c 
> b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> index 5b8614b2fbe4..4399ef9ebf15 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
> @@ -1594,10 +1594,116 @@ static int igt_switch_to_kernel_context(void *arg)
>       return err;
>  }
>  
> +static void mock_barrier_task(void *data)
> +{
> +     unsigned int *counter = data;
> +
> +     ++*counter;
> +}
> +
> +static int mock_context_barrier(void *arg)
> +{
> +#undef pr_fmt
> +#define pr_fmt(x) "context_barrier_task():" # x
> +     struct drm_i915_private *i915 = arg;
> +     struct i915_gem_context *ctx;
> +     struct i915_request *rq;
> +     intel_wakeref_t wakeref;
> +     unsigned int counter;
> +     int err;
> +
> +     /*
> +      * The context barrier provides us with a callback after it emits
> +      * a request; useful for retiring old state after loading new.
> +      */
> +
> +     mutex_lock(&i915->drm.struct_mutex);
> +
> +     ctx = mock_context(i915, "mock");
> +     if (IS_ERR(ctx)) {
> +             err = PTR_ERR(ctx);
> +             goto unlock;
> +     }
> +
> +     counter = 0;
> +     err = context_barrier_task(ctx, 0, mock_barrier_task, &counter);
> +     if (err) {
> +             pr_err("Failed at line %d, err=%d\n", __LINE__, err);
> +             goto out;
> +     }
> +     if (counter == 0) {
> +             pr_err("Did not retire immediately with 0 engines\n");
> +             err = -EINVAL;
> +             goto out;
> +     }
> +
> +     counter = 0;
> +     err = context_barrier_task(ctx,
> +                                ALL_ENGINES, mock_barrier_task, &counter);
> +     if (err) {
> +             pr_err("Failed at line %d, err=%d\n", __LINE__, err);
> +             goto out;
> +     }
> +     if (counter == 0) {
> +             pr_err("Did not retire immediately for all inactive engines\n");
> +             err = -EINVAL;
> +             goto out;
> +     }
> +
> +     rq = ERR_PTR(-ENODEV);
> +     with_intel_runtime_pm(i915, wakeref)
> +             rq = i915_request_alloc(i915->engine[RCS0], ctx);
> +     if (IS_ERR(rq)) {
> +             pr_err("Request allocation failed!\n");
> +             goto out;
> +     }
> +     i915_request_add(rq);
> +     GEM_BUG_ON(list_empty(&ctx->active_engines));
> +
> +     counter = 0;
> +     context_barrier_inject_fault = BIT(RCS0);
> +     err = context_barrier_task(ctx,
> +                                ALL_ENGINES, mock_barrier_task, &counter);
> +     context_barrier_inject_fault = 0;
> +     if (err == -ENXIO)
> +             err = 0;
> +     else
> +             pr_err("Did not hit fault injection!\n");
> +     if (counter != 0) {
> +             pr_err("Invoked callback on error!\n");
> +             err = -EIO;
> +     }
> +     if (err)
> +             goto out;
> +
> +     counter = 0;
> +     err = context_barrier_task(ctx,
> +                                ALL_ENGINES, mock_barrier_task, &counter);
> +     if (err) {
> +             pr_err("Failed at line %d, err=%d\n", __LINE__, err);
> +             goto out;
> +     }
> +     mock_device_flush(i915);
> +     if (counter == 0) {
> +             pr_err("Did not retire on each active engines\n");
> +             err = -EINVAL;
> +             goto out;
> +     }
> +
> +out:
> +     mock_context_close(ctx);
> +unlock:
> +     mutex_unlock(&i915->drm.struct_mutex);
> +     return err;
> +#undef pr_fmt
> +#define pr_fmt(x) x
> +}
> +
>  int i915_gem_context_mock_selftests(void)
>  {
>       static const struct i915_subtest tests[] = {
>               SUBTEST(igt_switch_to_kernel_context),
> +             SUBTEST(mock_context_barrier),
>       };
>       struct drm_i915_private *i915;
>       int err;

-- 
Jani Nikula, Intel Open Source Graphics Center
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to