A KCSAN build revealed we have explicit annoations through atomic_*()
usage, switch to arch_atomic_*() for the respective functions.

vmlinux.o: warning: objtool: rcu_nmi_exit()+0x4d: call to 
__kcsan_check_access() leaves .noinstr.text section
vmlinux.o: warning: objtool: rcu_dynticks_eqs_enter()+0x25: call to 
__kcsan_check_access() leaves .noinstr.text section
vmlinux.o: warning: objtool: rcu_nmi_enter()+0x4f: call to 
__kcsan_check_access() leaves .noinstr.text section
vmlinux.o: warning: objtool: rcu_dynticks_eqs_exit()+0x2a: call to 
__kcsan_check_access() leaves .noinstr.text section
vmlinux.o: warning: objtool: __rcu_is_watching()+0x25: call to 
__kcsan_check_access() leaves .noinstr.text section

Signed-off-by: Peter Zijlstra (Intel) <pet...@infradead.org>
Link: 
https://lkml.kernel.org/r/20200603084818.gb2...@hirez.programming.kicks-ass.net
---
 kernel/rcu/tree.c |   11 +++++------
 1 file changed, 5 insertions(+), 6 deletions(-)

--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -250,7 +250,7 @@ static noinstr void rcu_dynticks_eqs_ent
         * next idle sojourn.
         */
        rcu_dynticks_task_trace_enter();  // Before ->dynticks update!
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is no longer watching.  Better be in extended quiescent state!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     (seq & RCU_DYNTICK_CTRL_CTR));
@@ -274,13 +274,13 @@ static noinstr void rcu_dynticks_eqs_exi
         * and we also must force ordering with the next RCU read-side
         * critical section.
         */
-       seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
+       seq = arch_atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdp->dynticks);
        // RCU is now watching.  Better not be in an extended quiescent state!
        rcu_dynticks_task_trace_exit();  // After ->dynticks update!
        WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
                     !(seq & RCU_DYNTICK_CTRL_CTR));
        if (seq & RCU_DYNTICK_CTRL_MASK) {
-               atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
+               arch_atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdp->dynticks);
                smp_mb__after_atomic(); /* _exit after clearing mask. */
        }
 }
@@ -313,7 +313,7 @@ static __always_inline bool rcu_dynticks
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
-       return !(atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
+       return !(arch_atomic_read(&rdp->dynticks) & RCU_DYNTICK_CTRL_CTR);
 }
 
 /*
@@ -692,6 +692,7 @@ noinstr void rcu_nmi_exit(void)
 {
        struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
+       instrumentation_begin();
        /*
         * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
         * (We are exiting an NMI handler, so RCU better be paying attention
@@ -705,7 +706,6 @@ noinstr void rcu_nmi_exit(void)
         * leave it in non-RCU-idle state.
         */
        if (rdp->dynticks_nmi_nesting != 1) {
-               instrumentation_begin();
                trace_rcu_dyntick(TPS("--="), rdp->dynticks_nmi_nesting, 
rdp->dynticks_nmi_nesting - 2,
                                  atomic_read(&rdp->dynticks));
                WRITE_ONCE(rdp->dynticks_nmi_nesting, /* No store tearing. */
@@ -714,7 +714,6 @@ noinstr void rcu_nmi_exit(void)
                return;
        }
 
-       instrumentation_begin();
        /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
        trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, 
atomic_read(&rdp->dynticks));
        WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */


Reply via email to