Commit-ID:  fd1a5b04dfb899f84ddeb8acdaea6b98283df1e5
Gitweb:     https://git.kernel.org/tip/fd1a5b04dfb899f84ddeb8acdaea6b98283df1e5
Author:     Byungchul Park <byungchul.p...@lge.com>
AuthorDate: Wed, 25 Oct 2017 17:56:04 +0900
Committer:  Ingo Molnar <mi...@kernel.org>
CommitDate: Wed, 25 Oct 2017 12:19:03 +0200

workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes

The workqueue code added manual lock acquisition annotations to catch
deadlocks.

After lockdepcrossrelease was introduced, some of those became redundant,
since wait_for_completion() already does the acquisition and tracking.

Remove the duplicate annotations.

Signed-off-by: Byungchul Park <byungchul.p...@lge.com>
Cc: Linus Torvalds <torva...@linux-foundation.org>
Cc: Peter Zijlstra <pet...@infradead.org>
Cc: Thomas Gleixner <t...@linutronix.de>
Cc: amir7...@gmail.com
Cc: ax...@kernel.dk
Cc: darrick.w...@oracle.com
Cc: da...@fromorbit.com
Cc: h...@infradead.org
Cc: idryo...@gmail.com
Cc: jo...@kernel.org
Cc: johannes.b...@intel.com
Cc: kernel-t...@lge.com
Cc: linux-bl...@vger.kernel.org
Cc: linux-fsde...@vger.kernel.org
Cc: linux...@kvack.org
Cc: linux-...@vger.kernel.org
Cc: o...@redhat.com
Cc: t...@kernel.org
Link: 
http://lkml.kernel.org/r/1508921765-15396-9-git-send-email-byungchul.p...@lge.com
Signed-off-by: Ingo Molnar <mi...@kernel.org>
---
 include/linux/workqueue.h |  4 ++--
 kernel/workqueue.c        | 19 +++----------------
 2 files changed, 5 insertions(+), 18 deletions(-)

diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1c49431..c8a572c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -218,7 +218,7 @@ static inline unsigned int work_static(struct work_struct 
*work) { return 0; }
                                                                        \
                __init_work((_work), _onstack);                         \
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
-               lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
+               lockdep_init_map(&(_work)->lockdep_map, 
"(work_completion)"#_work, &__key, 0); \
                INIT_LIST_HEAD(&(_work)->entry);                        \
                (_work)->func = (_func);                                \
        } while (0)
@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, 
int max_active,
        static struct lock_class_key __key;                             \
        const char *__lock_name;                                        \
                                                                        \
-       __lock_name = #fmt#args;                                        \
+       __lock_name = "(wq_completion)"#fmt#args;                       \
                                                                        \
        __alloc_workqueue_key((fmt), (flags), (max_active),             \
                              &__key, __lock_name, ##args);             \
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 39831b2..160fdc6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
 
-       /*
-        * Explicitly init the crosslock for wq_barrier::done, make its lock
-        * key a subkey of the corresponding work. As a result we won't
-        * build a dependency between wq_barrier::done and unrelated work.
-        */
-       lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
-                                  "(complete)wq_barr::done",
-                                  target->lockdep_map.key, 1);
-       __init_completion(&barr->done);
+       init_completion_map(&barr->done, &target->lockdep_map);
+
        barr->task = current;
 
        /*
@@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq)
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
                .flush_color = -1,
-               .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
+               .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, 
wq->lockdep_map),
        };
        int next_color;
 
        if (WARN_ON(!wq_online))
                return;
 
-       lock_map_acquire(&wq->lockdep_map);
-       lock_map_release(&wq->lockdep_map);
-
        mutex_lock(&wq->mutex);
 
        /*
@@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work)
        if (WARN_ON(!wq_online))
                return false;
 
-       lock_map_acquire(&work->lockdep_map);
-       lock_map_release(&work->lockdep_map);
-
        if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);

Reply via email to