It would be nice to validate that the caller of rcu_sync_is_idle() holds the corresponding type of RCU read-side lock. Add the new rcu_sync_ops->held() method and change rcu_sync_is_idle() to WARN() if it returns false.
This obviously penalizes the readers (fast-path), but only if CONFIG_PROVE_RCU. Suggested-by: "Paul E. McKenney" <paul...@linux.vnet.ibm.com> Signed-off-by: Oleg Nesterov <o...@redhat.com> --- include/linux/rcusync.h | 6 ++++++ kernel/rcusync.c | 9 +++++++++ 2 files changed, 15 insertions(+), 0 deletions(-) diff --git a/include/linux/rcusync.h b/include/linux/rcusync.h index 30c6037..ab787c1 100644 --- a/include/linux/rcusync.h +++ b/include/linux/rcusync.h @@ -7,6 +7,9 @@ struct rcu_sync_ops { void (*sync)(void); void (*call)(struct rcu_head *, void (*)(struct rcu_head *)); +#ifdef CONFIG_PROVE_RCU + int (*held)(void); +#endif }; struct rcu_sync_struct { @@ -22,6 +25,9 @@ struct rcu_sync_struct { static inline bool rcu_sync_is_idle(struct rcu_sync_struct *rss) { +#ifdef CONFIG_PROVE_RCU + WARN_ON(!rss->ops->held()); +#endif return !rss->gp_state; /* GP_IDLE */ } diff --git a/kernel/rcusync.c b/kernel/rcusync.c index 1cefb83..21cde9b 100644 --- a/kernel/rcusync.c +++ b/kernel/rcusync.c @@ -6,18 +6,27 @@ enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY }; #define rss_lock gp_wait.lock +#ifdef CONFIG_PROVE_RCU +#define __INIT_HELD(func) .held = func, +#else +#define __INIT_HELD(func) +#endif + struct rcu_sync_ops rcu_sync_ops_array[] = { [RCU_SYNC] = { .sync = synchronize_rcu, .call = call_rcu, + __INIT_HELD(rcu_read_lock_held) }, [RCU_SCHED_SYNC] = { .sync = synchronize_sched, .call = call_rcu_sched, + __INIT_HELD(rcu_read_lock_sched_held) }, [RCU_BH_SYNC] = { .sync = synchronize_rcu_bh, .call = call_rcu_bh, + __INIT_HELD(rcu_read_lock_bh_held) }, }; -- 1.5.5.1 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/