On Wed, Oct 12, 2011 at 03:56:22PM -0700, Ben Widawsky wrote: > This refactor is useful for some future work I'll be doing on the > execbuffer path. In addition to being a pretty easy prerequisite, it > also helped me track down the bug uncovered in the first patch. > > Signed-off-by: Ben Widawsky <b...@bwidawsk.net> > --- > drivers/gpu/drm/i915/i915_gem_execbuffer.c | 102 > ++++++++++++++-------------- > 1 files changed, 51 insertions(+), 51 deletions(-) > > diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > index 182a2b9..b3beaae 100644 > --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c > +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c > @@ -40,6 +40,16 @@ struct change_domains { > uint32_t flips; > }; > > +struct eb_objects { > + struct drm_i915_gem_object *batch_obj; > + struct list_head objects; > + int buffer_count; > + int mode; > + int and;
While you whack this code, can you do an s/and/end, I think that's just a typo ... > + struct hlist_head buckets[0]; > + /* DO NOT PUT ANYTHING HERE */ When you drop the array size, the compiler will enforce that for you (for $compiler = gcc at least). > +}; > + > /* > * Set the next domain for the specified object. This > * may not actually perform the necessary flushing/invaliding though, > @@ -207,11 +217,6 @@ i915_gem_object_set_to_gpu_domain(struct > drm_i915_gem_object *obj, > cd->flush_rings |= ring->id; > } > > -struct eb_objects { > - int and; > - struct hlist_head buckets[0]; > -}; > - > static struct eb_objects * > eb_create(int size) > { > @@ -225,6 +230,8 @@ eb_create(int size) > if (eb == NULL) > return eb; > > + INIT_LIST_HEAD(&eb->objects); > + > eb->and = count - 1; > return eb; > } > @@ -232,12 +239,22 @@ eb_create(int size) > static void > eb_reset(struct eb_objects *eb) > { > + while (!list_empty(&eb->objects)) { list_for_each_entry_safe > + struct drm_i915_gem_object *obj; > + > + obj = list_first_entry(&eb->objects, > + struct drm_i915_gem_object, > + exec_list); > + list_del_init(&obj->exec_list); > + drm_gem_object_unreference(&obj->base); > + } > memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); > } > > static void > eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) > { > + list_add_tail(&obj->exec_list, &eb->objects); > hlist_add_head(&obj->exec_node, > &eb->buckets[obj->exec_handle & eb->and]); > } > @@ -262,6 +279,16 @@ eb_get_object(struct eb_objects *eb, unsigned long > handle) > static void > eb_destroy(struct eb_objects *eb) > { > + while (!list_empty(&eb->objects)) { dito > + struct drm_i915_gem_object *obj; > + > + obj = list_first_entry(&eb->objects, > + struct drm_i915_gem_object, > + exec_list); > + list_del_init(&obj->exec_list); > + drm_gem_object_unreference(&obj->base); > + } > + > kfree(eb); > } > > @@ -436,8 +463,7 @@ i915_gem_execbuffer_relocate_object_slow(struct > drm_i915_gem_object *obj, > > static int > i915_gem_execbuffer_relocate(struct drm_device *dev, > - struct eb_objects *eb, > - struct list_head *objects) > + struct eb_objects *eb) > { > struct drm_i915_gem_object *obj; > int ret = 0; > @@ -450,7 +476,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, > * lockdep complains vehemently. > */ > pagefault_disable(); > - list_for_each_entry(obj, objects, exec_list) { > + list_for_each_entry(obj, &eb->objects, exec_list) { > ret = i915_gem_execbuffer_relocate_object(obj, eb); > if (ret) > break; > @@ -618,24 +644,18 @@ static int > i915_gem_execbuffer_relocate_slow(struct drm_device *dev, > struct drm_file *file, > struct intel_ring_buffer *ring, > - struct list_head *objects, > struct eb_objects *eb, > struct drm_i915_gem_exec_object2 *exec, > int count) > { > struct drm_i915_gem_relocation_entry *reloc; > struct drm_i915_gem_object *obj; > + struct list_head *objects = &eb->objects; > int *reloc_offset; > int i, total, ret; > > /* We may process another execbuffer during the unlock... */ > - while (!list_empty(objects)) { > - obj = list_first_entry(objects, > - struct drm_i915_gem_object, > - exec_list); > - list_del_init(&obj->exec_list); > - drm_gem_object_unreference(&obj->base); > - } > + eb_reset(eb); > > mutex_unlock(&dev->struct_mutex); > > @@ -676,7 +696,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, > } > > /* reacquire the objects */ > - eb_reset(eb); > for (i = 0; i < count; i++) { > obj = to_intel_bo(drm_gem_object_lookup(dev, file, > exec[i].handle)); > @@ -687,7 +706,6 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, > goto err; > } > > - list_add_tail(&obj->exec_list, objects); > obj->exec_handle = exec[i].handle; > obj->exec_entry = &exec[i]; > eb_add_object(eb, obj); > @@ -1004,14 +1022,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > struct drm_i915_gem_exec_object2 *exec) > { > drm_i915_private_t *dev_priv = dev->dev_private; > - struct list_head objects; > - struct eb_objects *eb; > - struct drm_i915_gem_object *batch_obj; > + struct eb_objects *eb = NULL; > + struct drm_i915_gem_object *obj = NULL; > struct drm_clip_rect *cliprects = NULL; > struct intel_ring_buffer *ring; > u32 exec_start, exec_len; > u32 seqno; > - int ret, mode, i; > + int ret, i; > > if (!i915_gem_check_execbuffer(args)) { > DRM_ERROR("execbuf with invalid offset/length\n"); > @@ -1091,11 +1108,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > goto pre_mutex_err; > } > > - /* Look up object handles */ > - INIT_LIST_HEAD(&objects); > for (i = 0; i < args->buffer_count; i++) { > - struct drm_i915_gem_object *obj; > - > obj = to_intel_bo(drm_gem_object_lookup(dev, file, > exec[i].handle)); > if (&obj->base == NULL) { > @@ -1113,28 +1126,25 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > goto err; > } > > - list_add_tail(&obj->exec_list, &objects); > obj->exec_handle = exec[i].handle; > obj->exec_entry = &exec[i]; > eb_add_object(eb, obj); > } > > - /* take note of the batch buffer before we might reorder the lists */ > - batch_obj = list_entry(objects.prev, > - struct drm_i915_gem_object, > - exec_list); > + /* The last object is the batch object */ > + eb->batch_obj = obj; > > /* Move the objects en-masse into the GTT, evicting if necessary. */ > - ret = i915_gem_execbuffer_reserve(ring, file, &objects); > + ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects); > if (ret) > goto err; > > /* The objects are in their final locations, apply the relocations. */ > - ret = i915_gem_execbuffer_relocate(dev, eb, &objects); > + ret = i915_gem_execbuffer_relocate(dev, eb); > if (ret) { > if (ret == -EFAULT) { > ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, > - &objects, eb, > + eb, > exec, > > args->buffer_count); Looks like you've planned to replace args->buffer_count with eb->buffer_count, but didn't see it through. Please do it, cause I like it. > BUG_ON(!mutex_is_locked(&dev->struct_mutex)); > @@ -1143,20 +1153,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > goto err; > } > > - mode = args->flags & I915_EXEC_CONSTANTS_MASK; > - ret = i915_gem_set_constant_offset(ring, mode); > + eb->mode = args->flags & I915_EXEC_CONSTANTS_MASK; > + ret = i915_gem_set_constant_offset(ring, eb->mode); The eb->mode refactor here otoh looks a bit superflous. Is this needed for a future patch of yours? > if (ret) > goto err; > > /* Set the pending read domains for the batch buffer to COMMAND */ > - if (batch_obj->base.pending_write_domain) { > + if (eb->batch_obj->base.pending_write_domain) { > DRM_ERROR("Attempting to use self-modifying batch buffer\n"); > ret = -EINVAL; > goto err; > } > - batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; > + eb->batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND; > > - ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); > + ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); > if (ret) > goto err; > > @@ -1177,7 +1187,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > > trace_i915_gem_ring_dispatch(ring, seqno); > > - exec_start = batch_obj->gtt_offset + args->batch_start_offset; > + exec_start = eb->batch_obj->gtt_offset + args->batch_start_offset; Dito for eb->batch_obj, only used in do_execbuffer, afaics. Again, if you need this later on, maybe explain it in the commit msg? > exec_len = args->batch_len; > if (cliprects) { > for (i = 0; i < args->num_cliprects; i++) { > @@ -1197,21 +1207,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void > *data, > goto err; > } > > - i915_gem_execbuffer_move_to_active(&objects, ring, seqno); > + i915_gem_execbuffer_move_to_active(&eb->objects, ring, seqno); > i915_gem_execbuffer_retire_commands(dev, file, ring); > > err: > eb_destroy(eb); > - while (!list_empty(&objects)) { > - struct drm_i915_gem_object *obj; > - > - obj = list_first_entry(&objects, > - struct drm_i915_gem_object, > - exec_list); > - list_del_init(&obj->exec_list); > - drm_gem_object_unreference(&obj->base); > - } > - > mutex_unlock(&dev->struct_mutex); > > pre_mutex_err: > -- > 1.7.7 > > _______________________________________________ > Intel-gfx mailing list > Intel-gfx@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/intel-gfx -- Daniel Vetter Mail: dan...@ffwll.ch Mobile: +41 (0)79 365 57 48 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/intel-gfx