There now is only one rcu_state structure in a given build of the
Linux kernel, so there is no need to pass it as a parameter to RCU's
functions.  This commit therefore removes the rsp parameter from
rcu_report_qs_rsp().

Signed-off-by: Paul E. McKenney <paul...@linux.vnet.ibm.com>
---
 kernel/rcu/tree.c | 16 ++++++++++------
 1 file changed, 10 insertions(+), 6 deletions(-)

diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index c9f4d7f3de91..73dde7c661e7 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -138,7 +138,7 @@ static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
 static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int 
outgoingcpu);
 static void invoke_rcu_core(void);
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
+static void invoke_rcu_callbacks(struct rcu_data *rdp);
 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
 static void sync_sched_exp_online_cleanup(int cpu);
 
@@ -2188,9 +2188,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
  * just-completed grace period.  Note that the caller must hold rnp->lock,
  * which is released before return.
  */
-static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
+static void rcu_report_qs_rsp(unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
+       struct rcu_state *rsp = &rcu_state;
+
        raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
        WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
@@ -2267,7 +2269,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct 
rcu_node *rnp,
         * state for this grace period.  Invoke rcu_report_qs_rsp()
         * to clean up and start the next grace period if one is needed.
         */
-       rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
+       rcu_report_qs_rsp(flags); /* releases rnp->lock. */
 }
 
 /*
@@ -2301,7 +2303,7 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
                 * Only one rcu_node structure in the tree, so don't
                 * try to report up to its nonexistent parent!
                 */
-               rcu_report_qs_rsp(rsp, flags);
+               rcu_report_qs_rsp(flags);
                return;
        }
 
@@ -2760,7 +2762,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
 
        /* If there are callbacks ready, invoke them. */
        if (rcu_segcblist_ready_cbs(&rdp->cblist))
-               invoke_rcu_callbacks(rsp, rdp);
+               invoke_rcu_callbacks(rdp);
 
        /* Do any needed deferred wakeups of rcuo kthreads. */
        do_nocb_deferred_wakeup(rdp);
@@ -2788,8 +2790,10 @@ static __latent_entropy void 
rcu_process_callbacks(struct softirq_action *unused
  * are running on the current CPU with softirqs disabled, the
  * rcu_cpu_kthread_task cannot disappear out from under us.
  */
-static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
+static void invoke_rcu_callbacks(struct rcu_data *rdp)
 {
+       struct rcu_state *rsp = &rcu_state;
+
        if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
                return;
        if (likely(!rsp->boost)) {
-- 
2.17.1

Reply via email to