When CONFIG_TRACE_PREEMPT_TOGGLE is enabled, preempt_count_add() and
preempt_count_sub() become external function calls (defined in
kernel/sched/core.c) rather than inlined operations. These functions
also perform preempt_count() checks and call trace_preempt_on/off()
unconditionally, even when no tracing consumer is active.

Reduce this overhead by splitting the #if logic in preempt.h into
three cases. When CONFIG_DEBUG_PREEMPT or CONFIG_PREEMPT_TRACER is
set, keep external function calls because DEBUG_PREEMPT needs runtime
validation checks, and PREEMPT_TRACER needs the preemptoff latency
tracer hooks (tracer_preempt_on/off, called via trace_preempt_on/off).
When CONFIG_TRACE_PREEMPT_TOGGLE alone is set, provide new inline
versions of preempt_count_add/sub() that check the tracepoint static
key via the __preempt_trace_enabled() macro before calling into the
tracing path. The macro evaluates to true when the preempt_enable or
preempt_disable tracepoint has subscribers AND the preempt count
equals val (indicating the first preempt disable or last preempt
enable), preserving the original preempt_latency_start/stop semantics.
When none of the above are set, use pure inline macros with no tracing
overhead.

The preempt_count_dec_and_test() macro is refactored out of the
three-way #if into a separate block shared by the first two cases,
since both need it to call the (potentially inline)
preempt_count_sub() before checking should_resched().

The inline path calls thin __trace_preempt_on/off() wrappers (added
in trace_preemptirq.c) that invoke trace_preempt_on/off(), keeping
the full tracepoint machinery out of the inline code.

The #include <linux/tracepoint-defs.h> is placed inside the
CONFIG_TRACE_PREEMPT_TOGGLE block rather than at the top of the file
to avoid a circular include dependency on architectures where
asm/irqflags.h includes linux/preempt.h (e.g. m68k):

  preempt.h -> tracepoint-defs.h -> static_key.h -> jump_label.h ->
  atomic.h -> irqflags.h -> asm/irqflags.h -> preempt.h (guarded)

If the include were at the top, this chain would be traversed before
hardirq_count() is defined (at line 110), causing a build failure on
m68k. Placing it inside the #elif block ensures it is only pulled in
when CONFIG_TRACE_PREEMPT_TOGGLE is enabled and avoids the cycle for
configurations that do not select it.

In core.c, narrow the compilation guard for the external
preempt_count_add/sub() from CONFIG_DEBUG_PREEMPT ||
CONFIG_TRACE_PREEMPT_TOGGLE to CONFIG_DEBUG_PREEMPT ||
CONFIG_PREEMPT_TRACER, since CONFIG_TRACE_PREEMPT_TOGGLE is now
handled inline.

Signed-off-by: Wander Lairson Costa <[email protected]>
Suggested-by: Steven Rostedt <[email protected]>
---
 include/linux/preempt.h         | 49 +++++++++++++++++++++++++++++++--
 kernel/sched/core.c             |  2 +-
 kernel/trace/trace_preemptirq.c | 19 +++++++++++++
 3 files changed, 66 insertions(+), 4 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index d964f965c8ffc..f59a92f930d81 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -189,17 +189,60 @@ static __always_inline unsigned char 
interrupt_context_level(void)
  */
 #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
 
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
 extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() \
-       ({ preempt_count_sub(1); should_resched(0); })
+#elif defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+/*
+ * Avoid the circular dependency on architectures where asm/irqflags.h
+ * includes linux/preempt.h (e.g. m68k):
+ *
+ * preempt.h <--------------------+
+ *  tracepoint-defs.h             |
+ *   static_key.h                 |
+ *    jump_label.h                |
+ *     atomic.h                   |
+ *      irqflags.h                |
+ *       asm/irqflags.h           |
+ *        preempt.h --------------+
+ */
+#include <linux/tracepoint-defs.h>
+
+extern void __trace_preempt_on(void);
+extern void __trace_preempt_off(void);
+
+DECLARE_TRACEPOINT(preempt_enable);
+DECLARE_TRACEPOINT(preempt_disable);
+
+#define __preempt_trace_enabled(type, val) \
+       (tracepoint_enabled(preempt_##type) && preempt_count() == (val))
+
+static __always_inline void preempt_count_add(int val)
+{
+       __preempt_count_add(val);
+
+       if (__preempt_trace_enabled(disable, val))
+               __trace_preempt_off();
+}
+
+static __always_inline void preempt_count_sub(int val)
+{
+       if (__preempt_trace_enabled(enable, val))
+               __trace_preempt_on();
+
+       __preempt_count_sub(val);
+}
 #else
 #define preempt_count_add(val) __preempt_count_add(val)
 #define preempt_count_sub(val) __preempt_count_sub(val)
 #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
 #endif
 
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
+#define preempt_count_dec_and_test() \
+       ({ preempt_count_sub(1); should_resched(0); })
+#endif
+
 #define __preempt_count_inc() __preempt_count_add(1)
 #define __preempt_count_dec() __preempt_count_sub(1)
 
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7f77c165a6e0..125e5d71d1bd3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5733,7 +5733,7 @@ static inline void sched_tick_stop(int cpu) { }
 #endif /* !CONFIG_NO_HZ_FULL */
 
 #if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
-                               defined(CONFIG_TRACE_PREEMPT_TOGGLE))
+                                  defined(CONFIG_PREEMPT_TRACER))
 /*
  * If the value passed in is equal to the current preempt count
  * then we just disabled preemption. Start timing the latency.
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index 0c42b15c38004..9f098fcb28012 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -115,6 +115,25 @@ NOKPROBE_SYMBOL(trace_hardirqs_off);
 
 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
 
+#if !defined(CONFIG_DEBUG_PREEMPT) && !defined(CONFIG_PREEMPT_TRACER)
+EXPORT_TRACEPOINT_SYMBOL(preempt_disable);
+EXPORT_TRACEPOINT_SYMBOL(preempt_enable);
+
+void __trace_preempt_on(void)
+{
+       trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+}
+EXPORT_SYMBOL(__trace_preempt_on);
+NOKPROBE_SYMBOL(__trace_preempt_on);
+
+void __trace_preempt_off(void)
+{
+       trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip());
+}
+EXPORT_SYMBOL(__trace_preempt_off);
+NOKPROBE_SYMBOL(__trace_preempt_off);
+#endif /* !CONFIG_DEBUG_PREEMPT */
+
 void trace_preempt_on(unsigned long a0, unsigned long a1)
 {
        trace(preempt_enable, TP_ARGS(a0, a1));
-- 
2.53.0


Reply via email to