Since the bit_spin_lock() operations don't actually dereference
the pointer, it's fine to forcefully drop the RCU annotation.
This fixes 7 sparse warnings per include site.

Fixes: 8f0db018006a ("rhashtable: use bit_spin_locks to protect hash bucket.")
Signed-off-by: Jakub Kicinski <jakub.kicin...@netronome.com>
Reviewed-by: Simon Horman <simon.hor...@netronome.com>
---
 include/linux/rhashtable.h | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index f7714d3b46bd..bea1e0440ab4 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -325,27 +325,27 @@ static inline struct rhash_lock_head __rcu 
**rht_bucket_insert(
  */
 
 static inline void rht_lock(struct bucket_table *tbl,
-                           struct rhash_lock_head **bkt)
+                           struct rhash_lock_head __rcu **bkt)
 {
        local_bh_disable();
-       bit_spin_lock(0, (unsigned long *)bkt);
+       bit_spin_lock(0, (unsigned long __force *)bkt);
        lock_map_acquire(&tbl->dep_map);
 }
 
 static inline void rht_lock_nested(struct bucket_table *tbl,
-                                  struct rhash_lock_head **bucket,
+                                  struct rhash_lock_head __rcu **bkt,
                                   unsigned int subclass)
 {
        local_bh_disable();
-       bit_spin_lock(0, (unsigned long *)bucket);
+       bit_spin_lock(0, (unsigned long __force *)bkt);
        lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
 }
 
 static inline void rht_unlock(struct bucket_table *tbl,
-                             struct rhash_lock_head **bkt)
+                             struct rhash_lock_head __rcu **bkt)
 {
        lock_map_release(&tbl->dep_map);
-       bit_spin_unlock(0, (unsigned long *)bkt);
+       bit_spin_unlock(0, (unsigned long __force *)bkt);
        local_bh_enable();
 }
 
-- 
2.21.0

Reply via email to