This flag simplifies debugging of NO_HZ_FULL kernels when processes
are running in PR_CPU_ISOLATED_ENABLE mode.  Such processes should
get no interrupts from the kernel, and if they do, when this boot
flag is specified a kernel stack dump on the console is generated.

It's possible to use ftrace to simply detect whether a cpu_isolated core
has unexpectedly entered the kernel.  But what this boot flag does
is allow the kernel to provide better diagnostics, e.g. by reporting
in the IPI-generating code what remote core and context is preparing
to deliver an interrupt to a cpu_isolated core.

It may be worth considering other ways to generate useful debugging
output rather than console spew, but for now that is simple and direct.

Signed-off-by: Chris Metcalf <[email protected]>
---
 Documentation/kernel-parameters.txt |  6 ++++++
 arch/tile/mm/homecache.c            |  5 ++++-
 include/linux/tick.h                |  2 ++
 kernel/irq_work.c                   |  4 +++-
 kernel/sched/core.c                 | 18 ++++++++++++++++++
 kernel/signal.c                     |  5 +++++
 kernel/smp.c                        |  4 ++++
 kernel/softirq.c                    |  6 ++++++
 8 files changed, 48 insertions(+), 2 deletions(-)

diff --git a/Documentation/kernel-parameters.txt 
b/Documentation/kernel-parameters.txt
index 1d6f0459cd7b..76e8e2ff4a0a 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -749,6 +749,12 @@ bytes respectively. Such letter suffixes can also be 
entirely omitted.
                        /proc/<pid>/coredump_filter.
                        See also Documentation/filesystems/proc.txt.
 
+       cpu_isolated_debug      [KNL]
+                       In kernels built with CONFIG_NO_HZ_FULL and booted
+                       in nohz_full= mode, this setting will generate console
+                       backtraces when the kernel is about to interrupt a
+                       task that has requested PR_CPU_ISOLATED_ENABLE.
+
        cpuidle.off=1   [CPU_IDLE]
                        disable the cpuidle sub-system
 
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 40ca30a9fee3..f336880e1b01 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -31,6 +31,7 @@
 #include <linux/smp.h>
 #include <linux/module.h>
 #include <linux/hugetlb.h>
+#include <linux/tick.h>
 
 #include <asm/page.h>
 #include <asm/sections.h>
@@ -83,8 +84,10 @@ static void hv_flush_update(const struct cpumask 
*cache_cpumask,
         * Don't bother to update atomically; losing a count
         * here is not that critical.
         */
-       for_each_cpu(cpu, &mask)
+       for_each_cpu(cpu, &mask) {
                ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
+               tick_nohz_cpu_isolated_debug(cpu);
+       }
 }
 
 /*
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f79f6945f762..ed65551e2315 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -159,6 +159,7 @@ extern void __tick_nohz_task_switch(struct task_struct 
*tsk);
 extern void tick_nohz_cpu_isolated_enter(void);
 extern void tick_nohz_cpu_isolated_syscall(int nr);
 extern void tick_nohz_cpu_isolated_exception(void);
+extern void tick_nohz_cpu_isolated_debug(int cpu);
 #else
 static inline bool tick_nohz_full_enabled(void) { return false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
@@ -172,6 +173,7 @@ static inline bool tick_nohz_is_cpu_isolated(void) { return 
false; }
 static inline void tick_nohz_cpu_isolated_enter(void) { }
 static inline void tick_nohz_cpu_isolated_syscall(int nr) { }
 static inline void tick_nohz_cpu_isolated_exception(void) { }
+static inline void tick_nohz_cpu_isolated_debug(int cpu) { }
 #endif
 
 static inline bool is_housekeeping_cpu(int cpu)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..7f35c90346de 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -75,8 +75,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
        if (!irq_work_claim(work))
                return false;
 
-       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
+               tick_nohz_cpu_isolated_debug(cpu);
                arch_send_call_function_single_ipi(cpu);
+       }
 
        return true;
 }
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10081..c8388f9206b2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -743,6 +743,24 @@ bool sched_can_stop_tick(void)
 
        return true;
 }
+
+/* Enable debugging of any interrupts of cpu_isolated cores. */
+static int cpu_isolated_debug;
+static int __init cpu_isolated_debug_func(char *str)
+{
+       cpu_isolated_debug = true;
+       return 1;
+}
+__setup("cpu_isolated_debug", cpu_isolated_debug_func);
+
+void tick_nohz_cpu_isolated_debug(int cpu)
+{
+       if (cpu_isolated_debug && tick_nohz_full_cpu(cpu) &&
+           (cpu_curr(cpu)->cpu_isolated_flags & PR_CPU_ISOLATED_ENABLE)) {
+               pr_err("Interrupt detected for cpu_isolated cpu %d\n", cpu);
+               dump_stack();
+       }
+}
 #endif /* CONFIG_NO_HZ_FULL */
 
 void sched_avg_update(struct rq *rq)
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..90ee460c2586 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -684,6 +684,11 @@ int dequeue_signal(struct task_struct *tsk, sigset_t 
*mask, siginfo_t *info)
  */
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
+#ifdef CONFIG_NO_HZ_FULL
+       /* If the task is being killed, don't complain about cpu_isolated. */
+       if (state & TASK_WAKEKILL)
+               t->cpu_isolated_flags = 0;
+#endif
        set_tsk_thread_flag(t, TIF_SIGPENDING);
        /*
         * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
diff --git a/kernel/smp.c b/kernel/smp.c
index 07854477c164..6b7d8e2c8af4 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,7 @@
 #include <linux/smp.h>
 #include <linux/cpu.h>
 #include <linux/sched.h>
+#include <linux/tick.h>
 
 #include "smpboot.h"
 
@@ -178,6 +179,7 @@ static int generic_exec_single(int cpu, struct 
call_single_data *csd,
         * locking and barrier primitives. Generic code isn't really
         * equipped to do the right thing...
         */
+       tick_nohz_cpu_isolated_debug(cpu);
        if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
                arch_send_call_function_single_ipi(cpu);
 
@@ -457,6 +459,8 @@ void smp_call_function_many(const struct cpumask *mask,
        }
 
        /* Send a message to all CPUs in the map */
+       for_each_cpu(cpu, cfd->cpumask)
+               tick_nohz_cpu_isolated_debug(cpu);
        arch_send_call_function_ipi_mask(cfd->cpumask);
 
        if (wait) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 479e4436f787..333872925ff6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -24,6 +24,7 @@
 #include <linux/ftrace.h>
 #include <linux/smp.h>
 #include <linux/smpboot.h>
+#include <linux/context_tracking.h>
 #include <linux/tick.h>
 #include <linux/irq.h>
 
@@ -335,6 +336,11 @@ void irq_enter(void)
                _local_bh_enable();
        }
 
+       if (context_tracking_cpu_is_enabled() &&
+           context_tracking_in_user() &&
+           !in_interrupt())
+               tick_nohz_cpu_isolated_debug(smp_processor_id());
+
        __irq_enter();
 }
 
-- 
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to