To support backtracing of other CPUs in the system on lockups, add the implementation of arch_trigger_cpumask_backtrace() for arm64.
In this patch, we add an arm64 NMI-like IPI based backtracer, referring to the implementation on arm by Russell King and Chris Metcalf. Signed-off-by: Wei Li <liwei...@huawei.com> --- arch/arm64/include/asm/hardirq.h | 2 +- arch/arm64/include/asm/irq.h | 6 ++++++ arch/arm64/kernel/smp.c | 22 +++++++++++++++++++++- 3 files changed, 28 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h index 89691c86640a..a5d94aa59c7c 100644 --- a/arch/arm64/include/asm/hardirq.h +++ b/arch/arm64/include/asm/hardirq.h @@ -24,7 +24,7 @@ #include <asm/kvm_arm.h> #include <asm/sysreg.h> -#define NR_IPI 7 +#define NR_IPI 8 typedef struct { unsigned int __softirq_pending; diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index b2b0c6405eb0..28471df488c0 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h @@ -13,5 +13,11 @@ static inline int nr_legacy_irqs(void) return 0; } +#ifdef CONFIG_SMP +extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask, + bool exclude_self); +#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace +#endif + #endif /* !__ASSEMBLER__ */ #endif diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index bd8fdf6fcd8e..7e862f9124f3 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -35,6 +35,7 @@ #include <linux/smp.h> #include <linux/seq_file.h> #include <linux/irq.h> +#include <linux/nmi.h> #include <linux/irqchip/arm-gic-v3.h> #include <linux/percpu.h> #include <linux/clockchips.h> @@ -83,7 +84,8 @@ enum ipi_msg_type { IPI_CPU_CRASH_STOP, IPI_TIMER, IPI_IRQ_WORK, - IPI_WAKEUP + IPI_WAKEUP, + IPI_CPU_BACKTRACE }; #ifdef CONFIG_HOTPLUG_CPU @@ -787,6 +789,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"), S(IPI_WAKEUP, "CPU wake-up interrupts"), + S(IPI_CPU_BACKTRACE, "Backtrace interrupts"), }; static void smp_cross_call(const struct cpumask *target, unsigned int ipinr) @@ -946,6 +949,12 @@ void handle_IPI(int ipinr, struct pt_regs *regs) break; #endif + case IPI_CPU_BACKTRACE: + nmi_enter(); + nmi_cpu_backtrace(regs); + nmi_exit(); + break; + default: pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); break; @@ -1070,4 +1079,15 @@ bool cpus_are_stuck_in_kernel(void) void ipi_gic_nmi_setup(void __iomem *base) { + gic_sgi_set_prio(base, IPI_CPU_BACKTRACE, GICD_INT_NMI_PRI); +} + +static void raise_nmi(cpumask_t *mask) +{ + smp_cross_call(mask, IPI_CPU_BACKTRACE); +} + +void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) +{ + nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_nmi); } -- 2.17.1