Chris Wilson <ch...@chris-wilson.co.uk> writes:

> The important condition that we need to check after enabling the
> interrupt for signaling is whether the request completed in the process
> (and so we missed that interrupt). A large cost in enabling the
> signaling (rather than waiters) is in waking up the auxiliary signaling
> thread, but we only need to do so to catch that missed interrupt. If we
> know we didn't miss any interrupts (because we didn't arm the interrupt)
> then we can skip waking the auxiliary thread.
>
> Signed-off-by: Chris Wilson <ch...@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahti...@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursu...@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_breadcrumbs.c | 16 +++++++++++-----
>  1 file changed, 11 insertions(+), 5 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c 
> b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> index 183afcb036aa..dbcad9f6b2d5 100644
> --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
> +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
> @@ -329,7 +329,7 @@ static bool __intel_engine_add_wait(struct 
> intel_engine_cs *engine,
>  {
>       struct intel_breadcrumbs *b = &engine->breadcrumbs;
>       struct rb_node **p, *parent, *completed;
> -     bool first;
> +     bool first, irq_armed;
>       u32 seqno;
>  
>       /* Insert the request into the retirement ordered list
> @@ -344,6 +344,7 @@ static bool __intel_engine_add_wait(struct 
> intel_engine_cs *engine,
>        * removing stale elements in the tree, we may be able to reduce the
>        * ping-pong between the old bottom-half and ourselves as first-waiter.
>        */
> +     irq_armed = false;
>       first = true;
>       parent = NULL;
>       completed = NULL;
> @@ -390,6 +391,7 @@ static bool __intel_engine_add_wait(struct 
> intel_engine_cs *engine,
>  
>       if (first) {
>               spin_lock(&b->irq_lock);
> +             irq_armed = !b->irq_armed;

This could use a comment.

>               b->irq_wait = wait;
>               /* After assigning ourselves as the new bottom-half, we must
>                * perform a cursory check to prevent a missed interrupt.
> @@ -426,20 +428,24 @@ static bool __intel_engine_add_wait(struct 
> intel_engine_cs *engine,
>       GEM_BUG_ON(!b->irq_armed);
>       GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
>  
> -     return first;
> +     return irq_armed;
>  }
>  
>  bool intel_engine_add_wait(struct intel_engine_cs *engine,
>                          struct intel_wait *wait)
>  {
>       struct intel_breadcrumbs *b = &engine->breadcrumbs;
> -     bool first;
> +     bool irq_armed;
>  
>       spin_lock_irq(&b->rb_lock);
> -     first = __intel_engine_add_wait(engine, wait);
> +     irq_armed = __intel_engine_add_wait(engine, wait);
>       spin_unlock_irq(&b->rb_lock);
> +     if (irq_armed)
> +             return irq_armed;
>  
> -     return first;
> +     /* Make the caller recheck if its request has already
> started. */

This could be lifted to the function documentation to describe
what the return value actually means. But I am not insisting.

Reviewed-by: Mika Kuoppala <mika.kuopp...@intel.com>

> +     return i915_seqno_passed(intel_engine_get_seqno(engine),
> +                              wait->seqno - 1);
>  }
>  
>  static inline bool chain_wakeup(struct rb_node *rb, int priority)
> -- 
> 2.11.0
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to