On 02/18/2016 06:27 AM, john.c.harri...@intel.com wrote:
> From: John Harrison <john.c.harri...@intel.com>
> 
> The scheduler needs to track interdependencies between batch buffers.
> These are calculated by analysing the object lists of the buffers and
> looking for commonality. The scheduler also needs to keep those
> buffers locked long after the initial IOCTL call has returned to user
> land.
> 
> v3: Updated to support read-read optimisation.
> 
> v5: Updated due to changes to earlier patches in series for splitting
> bypass mode into a separate function and consoliding the clean up code.
> 
> For: VIZ-1587
> Signed-off-by: John Harrison <john.c.harri...@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 48 
> ++++++++++++++++++++++++++++--
>  drivers/gpu/drm/i915/i915_scheduler.c      | 15 ++++++++++
>  2 files changed, 61 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c 
> b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 11bea8d..f45f4dc 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1428,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
> *data,
>       struct i915_execbuffer_params *params = &qe.params;
>       const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
>       u32 dispatch_flags;
> -     int ret;
> +     int ret, i;
>       bool need_relocs;
>  
>       if (!i915_gem_check_execbuffer(args))
> @@ -1543,6 +1543,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
> *data,
>               goto pre_mutex_err;
>       }
>  
> +     qe.saved_objects = kzalloc(
> +                     sizeof(*qe.saved_objects) * args->buffer_count,
> +                     GFP_KERNEL);
> +     if (!qe.saved_objects) {
> +             ret = -ENOMEM;
> +             goto err;
> +     }
> +
>       /* Look up object handles */
>       ret = eb_lookup_vmas(eb, exec, args, vm, file);
>       if (ret)
> @@ -1663,7 +1671,30 @@ i915_gem_do_execbuffer(struct drm_device *dev, void 
> *data,
>       params->args_DR1                = args->DR1;
>       params->args_DR4                = args->DR4;
>       params->batch_obj               = batch_obj;
> -     params->ctx                     = ctx;
> +
> +     /*
> +      * Save away the list of objects used by this batch buffer for the
> +      * purpose of tracking inter-buffer dependencies.
> +      */
> +     for (i = 0; i < args->buffer_count; i++) {
> +             struct drm_i915_gem_object *obj;
> +
> +             /*
> +              * NB: 'drm_gem_object_lookup()' increments the object's
> +              * reference count and so must be matched by a
> +              * 'drm_gem_object_unreference' call.
> +              */
> +             obj = to_intel_bo(drm_gem_object_lookup(dev, file,
> +                                                       exec[i].handle));
> +             qe.saved_objects[i].obj       = obj;
> +             qe.saved_objects[i].read_only = obj->base.pending_write_domain 
> == 0;
> +
> +     }
> +     qe.num_objs = i;
> +
> +     /* Lock and save the context object as well. */
> +     i915_gem_context_reference(ctx);
> +     params->ctx = ctx;
>  
>       ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
>       if (ret)
> @@ -1696,6 +1727,19 @@ err:
>       i915_gem_context_unreference(ctx);
>       eb_destroy(eb);
>  
> +     /* Need to release the objects: */
> +     if (qe.saved_objects) {
> +             for (i = 0; i < qe.num_objs; i++)
> +                     drm_gem_object_unreference(
> +                                     &qe.saved_objects[i].obj->base);
> +
> +             kfree(qe.saved_objects);
> +     }
> +
> +     /* Context too */
> +     if (params->ctx)
> +             i915_gem_context_unreference(params->ctx);
> +
>       /*
>        * If the request was created but not successfully submitted then it
>        * must be freed again. If it was submitted then it is being tracked
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c 
> b/drivers/gpu/drm/i915/i915_scheduler.c
> index a3ffd04..60a59d3 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -719,6 +719,8 @@ void i915_scheduler_wakeup(struct drm_device *dev)
>   */
>  void i915_scheduler_clean_node(struct i915_scheduler_queue_entry *node)
>  {
> +     int i;
> +
>       if (!I915_SQS_IS_COMPLETE(node)) {
>               WARN(!node->params.request->cancelled,
>                    "Cleaning active node: %d!\n", node->status);
> @@ -736,6 +738,19 @@ void i915_scheduler_clean_node(struct 
> i915_scheduler_queue_entry *node)
>               node->params.batch_obj = NULL;
>       }
>  
> +     /* Release the locked buffers: */
> +     for (i = 0; i < node->num_objs; i++)
> +             drm_gem_object_unreference(&node->saved_objects[i].obj->base);
> +     kfree(node->saved_objects);
> +     node->saved_objects = NULL;
> +     node->num_objs = 0;
> +
> +     /* Context too: */
> +     if (node->params.ctx) {
> +             i915_gem_context_unreference(node->params.ctx);
> +             node->params.ctx = NULL;
> +     }
> +
>       /* And anything else owned by the node: */
>       if (node->params.cliprects) {
>               kfree(node->params.cliprects);
> 

Reviewed-by: Jesse Barnes <jbar...@virtuousgeek.org>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

Reply via email to