When an NMI is raised while interrupts where disabled, the IRQ tracing
already is in the correct state (i.e. hardirqs_off) and should be left
as such when returning to the interrupted context.

Check whether PMR was masking interrupts when the NMI was raised and
skip IRQ tracing if necessary.

Signed-off-by: Julien Thierry <julien.thie...@arm.com>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <will.dea...@arm.com>
Cc: Marc Zyngier <marc.zyng...@arm.com>
---
 arch/arm64/kernel/entry.S | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index a0b0a22..bd0b078 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -617,7 +617,18 @@ el1_irq:
        kernel_entry 1
        enable_da_f
 #ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+alternative_if ARM64_HAS_IRQ_PRIO_MASKING
+       ldr     x20, [sp, #S_PMR_SAVE]
+alternative_else
+       mov     x20, #GIC_PRIO_IRQON
+alternative_endif
+       cmp     x20, #GIC_PRIO_IRQOFF
+       /* Irqs were disabled, don't trace */
+       b.ls    1f
+#endif
        bl      trace_hardirqs_off
+1:
 #endif
 
        irq_handler
@@ -637,8 +648,18 @@ alternative_else_nop_endif
 1:
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_ARM64_PSEUDO_NMI
+       /*
+        * if IRQs were disabled when we received the interrupt, we have an NMI
+        * and we are not re-enabling interrupt upon eret. Skip tracing.
+        */
+       cmp     x20, #GIC_PRIO_IRQOFF
+       b.ls    1f
+#endif
        bl      trace_hardirqs_on
+1:
 #endif
+
        kernel_exit 1
 ENDPROC(el1_irq)
 
-- 
1.9.1

Reply via email to