Under high load, tcp_wfree() has an atomic operation trying to schedule a tasklet over and over.
We can schedule it only if our per cpu list was empty. Signed-off-by: Eric Dumazet <eduma...@google.com> --- net/ipv4/tcp_output.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 9143c52b3105..999f7d583092 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb) for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) { struct tsq_tasklet *tsq; + bool empty; if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED)) goto out; @@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb) /* queue this socket to tasklet queue */ local_irq_save(flags); tsq = this_cpu_ptr(&tsq_tasklet); + empty = list_empty(&tsq->head); list_add(&tp->tsq_node, &tsq->head); - tasklet_schedule(&tsq->tasklet); + if (empty) + tasklet_schedule(&tsq->tasklet); local_irq_restore(flags); return; } -- 2.8.0.rc3.226.g39d4020