On ke, 2016-09-14 at 07:52 +0100, Chris Wilson wrote:
>  i915_next_seqno_get(void *data, u64 *val)
>  {
>       struct drm_i915_private *dev_priv = data;
> -     int ret;
> -
> -     ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
> -     if (ret)
> -             return ret;
> -
> -     *val = dev_priv->next_seqno;
> -     mutex_unlock(&dev_priv->drm.struct_mutex);
>  
> +     *val = dev_priv->gt.global_timeline.next_seqno;

This should be marked as unlocked access somehow.

> +static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int 
> flags)
>  {
> -     struct intel_engine_cs *engine;
> -     int ret;
> +     int ret, i;
>  
> -     for_each_engine(engine, dev_priv) {
> -             if (engine->last_context == NULL)
> -                     continue;
> +     for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {

Lets untangle them headers and not manually roll these loops?

> @@ -4475,13 +4489,24 @@ i915_gem_load_init(struct drm_device *dev)
>                                 SLAB_RECLAIM_ACCOUNT |
>                                 SLAB_DESTROY_BY_RCU,
>                                 NULL);
> +     if (!dev_priv->requests) {
> +             err = -ENOMEM;
> +             goto err_vmas;
> +     }
> +
> +     mutex_lock(&dev_priv->drm.struct_mutex);

Hngg, locking in initializer, maybe we should have our own variant for
assert_held_struct_mutex (or as you suggested have no struct_mutex!)

> +     INIT_LIST_HEAD(&dev_priv->gt.timelines);
> +     err = i915_gem_timeline_init(dev_priv,
> +                                  &dev_priv->gt.global_timeline,
> +                                  "[execution]");

"[global]" would not be clearer to give better mapping between
code/debug?

> 
> -static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
> +static int i915_gem_get_global_seqno(struct drm_i915_private *dev_priv,
> +                                  u32 *seqno)
>  {
> +     struct i915_gem_timeline *tl = &dev_priv->gt.global_timeline;

*gtl? to indicate the global one.

> +struct intel_timeline {
> +     u64 fence_context;
> +     u32 last_submitted_seqno;
> +
> +     /**
> +      * List of breadcrumbs associated with GPU requests currently
> +      * outstanding.
> +      */
> +     struct list_head requests;
> +
> +     /* An RCU guarded pointer to the last request. No reference is
> +      * held to the request, users must carefully acquire a reference to
> +      * the request using i915_gem_active_get_request_rcu(), or hold the
> +      * struct_mutex.
> +      */
> +     struct i915_gem_active last_request;

RCU guarded pointer sounds off when we're speaking of an object.
 
> 
> @@ -217,8 +217,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs 
> *engine)
>  
>  static void intel_engine_init_requests(struct intel_engine_cs *engine)
>  {
> -     init_request_active(&engine->last_request, NULL);
> -     INIT_LIST_HEAD(&engine->request_list);
> +     engine->timeline = &engine->i915->gt.global_timeline.engine[engine->id];

function name is not very current, please update.

> @@ -141,7 +142,6 @@ struct intel_engine_cs {
>               VCS2,   /* Keep instances of the same type engine together. */
>               VECS
>       } id;
> -#define I915_NUM_ENGINES 5
>  #define _VCS(n) (VCS + (n))
>       unsigned int exec_id;
>       enum intel_engine_hw_id {
> @@ -152,10 +152,10 @@ struct intel_engine_cs {
>               VCS2_HW
>       } hw_id;
>       enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
> -     u64 fence_context;
>       u32             mmio_base;
>       unsigned int irq_shift;
>       struct intel_ring *buffer;
> +     struct intel_timeline *timeline;

I don't see why not as a non-pointer member?

Again a rather big patch, with above addressed;

Reviewed-by: Joonas Lahtinen <joonas.lahti...@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to