Hi, Just minor comments
+Usage > +----- > + > +Three different ways to acquire locks within the same w/w class. Common > +definitions for methods #1 and #2: > + > +static DEFINE_WW_CLASS(ww_class); > + > +struct obj { > + struct ww_mutex lock; > + /* obj data */ > +}; > + > +struct obj_entry { > + struct list_head *list; > + struct obj *obj; > +}; > + > +Method 1, using a list in execbuf->buffers that's not allowed to be > reordered. > +This is useful if a list of required objects is already tracked somewhere. > +Furthermore the lock helper can use propagate the -EALREADY return code > back to > +the caller as a signal that an object is twice on the list. This is > useful if > +the list is constructed from userspace input and the ABI requires > userspace to > +not have duplicate entries (e.g. for a gpu commandbuffer submission > ioctl). > + > +int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) > +{ > + struct obj *res_obj = NULL; > + struct obj_entry *contended_entry = NULL; > + struct obj_entry *entry; > + > + ww_acquire_init(ctx, &ww_class); > + > +retry: > + list_for_each_entry (list, entry) { > + if (entry == res_obj) { > if (entry->obj == res_obj) { > + res_obj = NULL; > + continue; > + } > + ret = ww_mutex_lock(&entry->obj->lock, ctx); > + if (ret < 0) { > + contended_obj = entry; > contended_entry = entry; > + goto err; > + } > + } > + > + ww_acquire_done(ctx); > + return 0; > + > +err: > + list_for_each_entry_continue_reverse (list, contended_entry, entry) > list_for_each_entry_continue_reverse(entry, list, list)? > + ww_mutex_unlock(&entry->obj->lock); > + > + if (res_obj) > + ww_mutex_unlock(&res_obj->lock); > + > + if (ret == -EDEADLK) { > + /* we lost out in a seqno race, lock and retry.. */ > + ww_mutex_lock_slow(&contended_entry->obj->lock, ctx); > + res_obj = contended_entry->obj; > + goto retry; > + } > + ww_acquire_fini(ctx); > + > + return ret; > +} > + > +Method 2, using a list in execbuf->buffers that can be reordered. Same > semantics > +of duplicate entry detection using -EALREADY as method 1 above. But the > +list-reordering allows for a bit more idiomatic code. > + > +int lock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) > +{ > + struct obj_entry *entry, *entry2; > + > + ww_acquire_init(ctx, &ww_class); > + > + list_for_each_entry (list, entry) { > + ret = ww_mutex_lock(&entry->obj->lock, ctx); > + if (ret < 0) { > + entry2 = entry; > + > + list_for_each_entry_continue_reverse (list, entry2) > list_for_each_entry_continue_reverse(entry, list, list)? > + ww_mutex_unlock(&entry->obj->lock); > + > + if (ret != -EDEADLK) { > + ww_acquire_fini(ctx); > + return ret; > + } > + > + /* we lost out in a seqno race, lock and retry.. */ > + ww_mutex_lock_slow(&entry->obj->lock, ctx); > shouldn't the wounded task acquire slowpath lock to the above dead locked entry like this, ww_mutex_lock_slow(&entry2->obj->lock, ctx)? > + > + /* > + * Move buf to head of the list, this will point > + * buf->next to the first unlocked entry, > + * restarting the for loop. > + */ > + list_del(&entry->list); > + list_add(&entry->list, list); > + } > + } > + > + ww_acquire_done(ctx); > + return 0; > +} > + > +Unlocking works the same way for both methods #1 and #2: > + > +void unlock_objs(struct list_head *list, struct ww_acquire_ctx *ctx) > +{ > + struct obj_entry *entry; > + > + list_for_each_entry (list, entry) > + ww_mutex_unlock(&entry->obj->lock); > + > + ww_acquire_fini(ctx); > +} > + > > _______________________________________________ > dri-devel mailing list > dri-devel at lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/dri-devel > -------------- next part -------------- An HTML attachment was scrubbed... URL: <http://lists.freedesktop.org/archives/dri-devel/attachments/20130529/3f2d4157/attachment-0001.html>