The commit
        f831948 2016-11-30 locking/lockdep: Provide a type check for 
lock_is_held
didn't fully support rwsem. Here downgrade_write() supports the added type.

Originally-written-by: Peter Zijlstra <pet...@infradead.org>
See-also: http://marc.info/?l=linux-kernel&m=148581164003149&w=2
Signed-off-by: J. R. Okajima <hooanon...@gmail.com>
---
 include/linux/lockdep.h  |  2 ++
 kernel/locking/lockdep.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++
 kernel/locking/rwsem.c   |  6 ++----
 3 files changed, 59 insertions(+), 4 deletions(-)

diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 0345cbf..22f304c 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map 
*lock,
        lock_set_class(lock, lock->name, lock->key, subclass, ip);
 }
 
+extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
+
 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
 extern void lockdep_clear_current_reclaim_state(void);
 extern void lockdep_trace_alloc(gfp_t mask);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7dc8f8e..6a4a740 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3523,6 +3523,44 @@ __lock_set_class(struct lockdep_map *lock, const char 
*name,
        return 1;
 }
 
+static int __lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+{
+       struct task_struct *curr = current;
+       struct held_lock *hlock;
+       unsigned int depth;
+       int i;
+
+       depth = curr->lockdep_depth;
+       /*
+        * This function is about (re)setting the class of a held lock,
+        * yet we're not actually holding any locks. Naughty user!
+        */
+       if (DEBUG_LOCKS_WARN_ON(!depth))
+               return 0;
+
+       hlock = find_held_lock(curr, lock, depth, &i);
+       if (!hlock)
+               return print_unlock_imbalance_bug(curr, lock, ip);
+
+       curr->lockdep_depth = i;
+       curr->curr_chain_key = hlock->prev_chain_key;
+
+       WARN(hlock->read, "downgrading a read lock");
+       hlock->read = 1;
+       hlock->acquire_ip = ip;
+
+       if (validate_held_lock(curr, depth, i))
+               return 0;
+
+       /*
+        * I took it apart and put it back together again, except now I have
+        * these 'spare' parts.. where shall I put them.
+        */
+       if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
+               return 0;
+       return 1;
+}
+
 /*
  * Remove the lock to the list of currently held locks - this gets
  * called on mutex_unlock()/spin_unlock*() (or on a failed
@@ -3749,6 +3787,23 @@ void lock_set_class(struct lockdep_map *lock, const char 
*name,
 }
 EXPORT_SYMBOL_GPL(lock_set_class);
 
+void lock_downgrade(struct lockdep_map *lock, unsigned long ip)
+{
+       unsigned long flags;
+
+       if (unlikely(current->lockdep_recursion))
+               return;
+
+       raw_local_irq_save(flags);
+       current->lockdep_recursion = 1;
+       check_flags(flags);
+       if (__lock_downgrade(lock, ip))
+               check_chain_key(current);
+       current->lockdep_recursion = 0;
+       raw_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(lock_downgrade);
+
 /*
  * We are not always called with irqs disabled - do that here,
  * and also avoid lockdep recursion:
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 45ba475..31db3ef 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -123,10 +123,8 @@ EXPORT_SYMBOL(up_write);
  */
 void downgrade_write(struct rw_semaphore *sem)
 {
-       /*
-        * lockdep: a downgraded write will live on as a write
-        * dependency.
-        */
+       lock_downgrade(&sem->dep_map, _RET_IP_);
+
        rwsem_set_reader_owned(sem);
        __downgrade_write(sem);
 }
-- 
2.1.4

Reply via email to