John Harrison <john.c.harri...@intel.com> writes:

> Please note that a lot of the issues with _i915_add_request are cleaned 
> up by my patch series to remove the outstanding_lazy_request. The add to 
> client in some random client context is fixed, the messy execlist vs 
> legacy ringbuf decisions are removed, the execlist vs legacy one-sided 
> context reference is removed, ...
>
> Also, I am in the process of converting the request structure to use 
> struct fence which will possibly answer some of your locking concerns in 
> the subsequent patch.
>
> So can you hold of on merging these two patches at least until the dust 
> has settled on the anti-OLR series?
>

This was just a quick stab at fixing the hangcheck misreports on ring
being idle when not.

Daniel please just ignore these two.

-Mika

> Thanks.
>
>
> On 19/02/2015 16:18, Mika Kuoppala wrote:
>> Clean __i915_add_request by splitting request submission to
>> preparation, actual submission and adding to client.
>>
>> While doing this we can remove the request->start which
>> was not used.
>>
>> Cc: Chris Wilson <ch...@chris-wilson.co.uk>
>> Signed-off-by: Mika Kuoppala <mika.kuopp...@intel.com>
>> ---
>>   drivers/gpu/drm/i915/i915_gem.c | 116 
>> +++++++++++++++++++++++++++-------------
>>   1 file changed, 78 insertions(+), 38 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem.c 
>> b/drivers/gpu/drm/i915/i915_gem.c
>> index 61134ab..06265e7 100644
>> --- a/drivers/gpu/drm/i915/i915_gem.c
>> +++ b/drivers/gpu/drm/i915/i915_gem.c
>> @@ -2407,26 +2407,34 @@ i915_gem_get_seqno(struct drm_device *dev, u32 
>> *seqno)
>>      return 0;
>>   }
>>   
>> -int __i915_add_request(struct intel_engine_cs *ring,
>> -                   struct drm_file *file,
>> -                   struct drm_i915_gem_object *obj)
>> +static struct intel_ringbuffer *
>> +__request_to_ringbuf(struct drm_i915_gem_request *request)
>> +{
>> +    if (i915.enable_execlists)
>> +            return request->ctx->engine[request->ring->id].ringbuf;
>> +
>> +    return request->ring->buffer;
>> +}
>> +
>> +static struct drm_i915_gem_request *
>> +i915_gem_request_prepare(struct intel_engine_cs *ring, struct drm_file 
>> *file)
>>   {
>> -    struct drm_i915_private *dev_priv = ring->dev->dev_private;
>>      struct drm_i915_gem_request *request;
>>      struct intel_ringbuffer *ringbuf;
>> -    u32 request_start;
>>      int ret;
>>   
>>      request = ring->outstanding_lazy_request;
>>      if (WARN_ON(request == NULL))
>> -            return -ENOMEM;
>> +            return ERR_PTR(-ENOMEM);
>>   
>> -    if (i915.enable_execlists) {
>> -            ringbuf = request->ctx->engine[ring->id].ringbuf;
>> -    } else
>> -            ringbuf = ring->buffer;
>> +    /* execlist submission has this already set */
>> +    if (!request->ctx)
>> +            request->ctx = ring->last_context;
>> +
>> +    ringbuf = __request_to_ringbuf(request);
>> +    if (WARN_ON(ringbuf == NULL))
>> +            return ERR_PTR(-EIO);
>>   
>> -    request_start = intel_ring_get_tail(ringbuf);
>>      /*
>>       * Emit any outstanding flushes - execbuf can fail to emit the flush
>>       * after having emitted the batchbuffer command. Hence we need to fix
>> @@ -2434,21 +2442,30 @@ int __i915_add_request(struct intel_engine_cs *ring,
>>       * is that the flush _must_ happen before the next request, no matter
>>       * what.
>>       */
>> -    if (i915.enable_execlists) {
>> +    if (i915.enable_execlists)
>>              ret = logical_ring_flush_all_caches(ringbuf, request->ctx);
>> -            if (ret)
>> -                    return ret;
>> -    } else {
>> +    else
>>              ret = intel_ring_flush_all_caches(ring);
>> -            if (ret)
>> -                    return ret;
>> -    }
>> +
>> +    if (ret)
>> +            return ERR_PTR(ret);
>> +
>> +    return request;
>> +}
>> +
>> +static int i915_gem_request_submit(struct drm_i915_gem_request *request,
>> +                               struct drm_i915_gem_object *batch)
>> +{
>> +    struct intel_ringbuffer *ringbuf = __request_to_ringbuf(request);
>> +    struct intel_engine_cs *ring = request->ring;
>> +    int ret;
>>   
>>      /* Record the position of the start of the request so that
>>       * should we detect the updated seqno part-way through the
>>       * GPU processing the request, we never over-estimate the
>>       * position of the head.
>>       */
>> +    request->batch_obj = batch;
>>      request->postfix = intel_ring_get_tail(ringbuf);
>>   
>>      if (i915.enable_execlists) {
>> @@ -2461,7 +2478,6 @@ int __i915_add_request(struct intel_engine_cs *ring,
>>                      return ret;
>>      }
>>   
>> -    request->head = request_start;
>>      request->tail = intel_ring_get_tail(ringbuf);
>>   
>>      /* Whilst this request exists, batch_obj will be on the
>> @@ -2470,35 +2486,59 @@ int __i915_add_request(struct intel_engine_cs *ring,
>>       * inactive_list and lose its active reference. Hence we do not need
>>       * to explicitly hold another reference here.
>>       */
>> -    request->batch_obj = obj;
>>   
>> -    if (!i915.enable_execlists) {
>> -            /* Hold a reference to the current context so that we can 
>> inspect
>> -             * it later in case a hangcheck error event fires.
>> +    if (!i915.enable_execlists && request->ctx) {
>> +            /* Hold a reference to the current context so that we can
>> +             * inspect it later in case a hangcheck error event fires.
>>               */
>> -            request->ctx = ring->last_context;
>> -            if (request->ctx)
>> -                    i915_gem_context_reference(request->ctx);
>> +            i915_gem_context_reference(request->ctx);
>>      }
>>   
>>      request->emitted_jiffies = jiffies;
>> +
>>      list_add_tail(&request->list, &ring->request_list);
>> -    request->file_priv = NULL;
>> +    ring->outstanding_lazy_request = NULL;
>>   
>> -    if (file) {
>> -            struct drm_i915_file_private *file_priv = file->driver_priv;
>> +    trace_i915_gem_request_add(request);
>>   
>> -            spin_lock(&file_priv->mm.lock);
>> -            request->file_priv = file_priv;
>> -            list_add_tail(&request->client_list,
>> -                          &file_priv->mm.request_list);
>> -            spin_unlock(&file_priv->mm.lock);
>> +    return 0;
>> +}
>>   
>> -            request->pid = get_pid(task_pid(current));
>> -    }
>> +static void i915_gem_request_add_to_client(struct drm_i915_gem_request 
>> *request)
>> +{
>> +    struct drm_i915_file_private *file_priv;
>>   
>> -    trace_i915_gem_request_add(request);
>> -    ring->outstanding_lazy_request = NULL;
>> +    if (!request->file_priv)
>> +            return;
>> +
>> +    file_priv = request->file_priv;
>> +
>> +    spin_lock(&file_priv->mm.lock);
>> +    request->file_priv = file_priv;
>> +    list_add_tail(&request->client_list,
>> +                  &file_priv->mm.request_list);
>> +    spin_unlock(&file_priv->mm.lock);
>> +
>> +    request->pid = get_pid(task_pid(current));
>> +}
>> +
>> +int __i915_add_request(struct intel_engine_cs *ring,
>> +                   struct drm_file *file,
>> +                   struct drm_i915_gem_object *batch)
>> +{
>> +    struct drm_i915_private *dev_priv = ring->dev->dev_private;
>> +    struct drm_i915_gem_request *request;
>> +    int ret;
>> +
>> +    request = i915_gem_request_prepare(ring, file);
>> +    if (IS_ERR(request))
>> +            return PTR_ERR(request);
>> +
>> +    ret = i915_gem_request_submit(request, batch);
>> +    if (ret)
>> +            return ret;
>> +
>> +    i915_gem_request_add_to_client(request);
>>   
>>      i915_queue_hangcheck(ring->dev);
>>   
>
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to