On Tue, Mar 03, 2026 at 03:45:58PM -0500, Joel Fernandes wrote:
> On Mon, 02 Mar 2026 11:04:04 +0100, Uladzislau Rezki (Sony) wrote:
> 
> >  * The latch is cleared only when the pending requests are fully
> >    drained(nr == 0);
> 
> > +static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
> > +{
> > +   long nr;
> > +
> > +   llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
> > +   nr = atomic_long_inc_return(&rcu_sr_normal_count);
> > +
> > +   /* Latch: only when flooded and if unlatched. */
> > +   if (nr >= RCU_SR_NORMAL_LATCH_THR)
> > +           (void)atomic_cmpxchg(&rcu_sr_normal_latched, 0, 1);
> > +}
> 
> I think there is a stuck-latch race here. Once llist_add() places the
> entry in srs_next, the GP kthread can pick it up and fire
> rcu_sr_normal_complete() before the latching cmpxchg runs. If the last
> in-flight completion drains count to zero in that window, the unlatch
> cmpxchg(latched, 1, 0) fails (latched is still 0 at that moment), and
> then the latching cmpxchg(latched, 0, 1) fires anyway — with count=0:
> 
>   CPU 0 (add_req, count just hit 64)       GP kthread
>   ----------------------------------       ----------
>   llist_add()    <-- entry now in srs_next
>   inc_return()   --> nr = 64
>   [preempted]
>                                             rcu_sr_normal_complete() x64:
>                                               dec_return -> count: 64..1..0
>                                               count==0:
>                                               cmpxchg(latched, 1, 0)
>                                                 --> FAILS (latched still 0)
>   [resumes]
>   cmpxchg(latched, 0, 1) --> latched = 1
> 
>   Final state: count=0, latched=1  -->  STUCK LATCH
> 
> All subsequent synchronize_rcu() callers see latched==1 and take the
> fallback path (not counted). With no new SR-normal callers,
> rcu_sr_normal_complete() is never reached again, so the unlatch
> cmpxchg(latched, 1, 0) never fires. The latch is permanently stuck.
> 
> This requires preemption for a full GP duration between llist_add() and
> the cmpxchg, which is probably more likely on PREEMPT_RT or heavily loaded
> systems.
> 
> The fix: move the cmpxchg *before* llist_add(), so the entry is not
> visible to the GP kthread until after the latch is already set.
> 
> That should fix it, thoughts?
> 
Yes and thank you!

We can improve it even more by removing atomic_cmpxchg() in
the rcu_sr_normal_add_req() function, because only one context
sees the (nr == RCU_SR_NORMAL_LATCH_THR) condition:

<snip>
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 86dc88a70fd0..72b340940e11 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1640,7 +1640,7 @@ static struct workqueue_struct *sync_wq;
 
 /* Number of in-flight synchronize_rcu() calls queued on srs_next. */
 static atomic_long_t rcu_sr_normal_count;
-static atomic_t rcu_sr_normal_latched;
+static int rcu_sr_normal_latched; /* 0/1 */
 
 static void rcu_sr_normal_complete(struct llist_node *node)
 {
@@ -1662,7 +1662,7 @@ static void rcu_sr_normal_complete(struct llist_node 
*node)
         * drained and if it has been latched.
         */
        if (nr == 0)
-               (void)atomic_cmpxchg(&rcu_sr_normal_latched, 1, 0);
+               (void)cmpxchg(&rcu_sr_normal_latched, 1, 0);
 }
 
 static void rcu_sr_normal_gp_cleanup_work(struct work_struct *work)
@@ -1808,14 +1808,22 @@ static bool rcu_sr_normal_gp_init(void)
 
 static void rcu_sr_normal_add_req(struct rcu_synchronize *rs)
 {
-       long nr;
+       /*
+        * Increment before publish to avoid a complete
+        * vs enqueue race on latch.
+        */
+       long nr = atomic_long_inc_return(&rcu_sr_normal_count);
 
-       llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
-       nr = atomic_long_inc_return(&rcu_sr_normal_count);
+       /*
+        * Latch on threshold crossing. (nr == RCU_SR_NORMAL_LATCH_THR)
+        * can be true only for one context, avoiding contention on the
+        * write path.
+        */
+       if (nr == RCU_SR_NORMAL_LATCH_THR)
+               WRITE_ONCE(rcu_sr_normal_latched, 1);
 
-       /* Latch: only when flooded and if unlatched. */
-       if (nr >= RCU_SR_NORMAL_LATCH_THR)
-               (void)atomic_cmpxchg(&rcu_sr_normal_latched, 0, 1);
+       /* Publish for the GP kthread/worker. */
+       llist_add((struct llist_node *) &rs->head, &rcu_state.srs_next);
 }
 
 /*
@@ -3302,7 +3310,7 @@ static void synchronize_rcu_normal(void)
        trace_rcu_sr_normal(rcu_state.name, &rs.head, TPS("request"));
 
        if (READ_ONCE(rcu_normal_wake_from_gp) < 1 ||
-                       atomic_read(&rcu_sr_normal_latched)) {
+                       READ_ONCE(rcu_sr_normal_latched)) {
                wait_rcu_gp(call_rcu_hurry);
                goto trace_complete_out;
        }
<snip>

--
Uladzislau Rezki

Reply via email to