prempt_disable/enable tracepoints occurs only in the preemption enabled <-> disable transition. As preempt_latency_stop() and preempt_latency_start() already do this control, avoid code duplication by using these functions in the softirq code as well.
RFC: Should we move preempt_latency_start/preempt_latency_stop to a trace source file... or even a header? Signed-off-by: Daniel Bristot de Oliveira <bris...@redhat.com> Cc: "Steven Rostedt (VMware)" <rost...@goodmis.org> Cc: Ingo Molnar <mi...@redhat.com> Cc: Peter Zijlstra <pet...@infradead.org> Cc: Thomas Gleixner <t...@linutronix.de> Cc: "Paul E. McKenney" <paul...@linux.vnet.ibm.com> Cc: Matthias Kaehlcke <m...@chromium.org> Cc: "Joel Fernandes (Google)" <j...@joelfernandes.org> Cc: Frederic Weisbecker <frede...@kernel.org> Cc: Yangtao Li <tiny.win...@gmail.com> Cc: Tommaso Cucinotta <tommaso.cucino...@santannapisa.it> Cc: linux-kernel@vger.kernel.org --- kernel/sched/core.c | 4 ++-- kernel/softirq.c | 13 +++++-------- 2 files changed, 7 insertions(+), 10 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 874c427742a9..8c0b414e45dc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3152,7 +3152,7 @@ static inline void sched_tick_stop(int cpu) { } * If the value passed in is equal to the current preempt count * then we just disabled preemption. Start timing the latency. */ -static inline void preempt_latency_start(int val) +void preempt_latency_start(int val) { if (preempt_count() == val) { unsigned long ip = get_lock_parent_ip(); @@ -3189,7 +3189,7 @@ NOKPROBE_SYMBOL(preempt_count_add); * If the value passed in equals to the current preempt count * then we just enabled preemption. Stop timing the latency. */ -static inline void preempt_latency_stop(int val) +void preempt_latency_stop(int val) { if (preempt_count() == val) trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); diff --git a/kernel/softirq.c b/kernel/softirq.c index 2c3382378d94..c9ad89c3dfed 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -108,6 +108,8 @@ static bool ksoftirqd_running(unsigned long pending) * where hardirqs are disabled legitimately: */ #ifdef CONFIG_TRACE_IRQFLAGS +extern void preempt_latency_start(int val); +extern void preempt_latency_stop(int val); void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) { unsigned long flags; @@ -130,12 +132,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) trace_softirqs_off(ip); raw_local_irq_restore(flags); - if (preempt_count() == cnt) { -#ifdef CONFIG_DEBUG_PREEMPT - current->preempt_disable_ip = get_lock_parent_ip(); -#endif - trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); - } + preempt_latency_start(cnt); + } EXPORT_SYMBOL(__local_bh_disable_ip); #endif /* CONFIG_TRACE_IRQFLAGS */ @@ -144,8 +142,7 @@ static void __local_bh_enable(unsigned int cnt) { lockdep_assert_irqs_disabled(); - if (preempt_count() == cnt) - trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); + preempt_latency_stop(cnt); if (softirq_count() == (cnt & SOFTIRQ_MASK)) trace_softirqs_on(_RET_IP_); -- 2.20.1