Treat code below __end_soft_masked as soft-masked for the purpose of alternate return. 64s already mostly does this for scv entry.
This will be used to exit from interrupts without disabling MSR[EE]. Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/interrupt.h | 8 ++++++-- arch/powerpc/kernel/exceptions-64e.S | 12 +++++++++++- arch/powerpc/kernel/exceptions-64s.S | 19 +++++++++++-------- arch/powerpc/kernel/interrupt_64.S | 6 +++++- 4 files changed, 33 insertions(+), 12 deletions(-) diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 49d9a6fd1bb9..fe02fbbd6b06 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -160,6 +160,10 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup */ if (TRAP(regs) != INTERRUPT_PROGRAM) CT_WARN_ON(ct_state() != CONTEXT_KERNEL); + BUG_ON(regs->nip < (unsigned long)__end_soft_masked); + /* Move this under a debugging check */ + if (arch_irq_disabled_regs(regs)) + BUG_ON(search_kernel_restart_table(regs->nip)); } #endif @@ -254,8 +258,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte local_paca->irq_happened |= PACA_IRQ_HARD_DIS; if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !(regs->msr & MSR_PR) && - regs->nip < (unsigned long)__end_interrupts) { - // Kernel code running below __end_interrupts is + regs->nip < (unsigned long)__end_soft_masked) { + // Kernel code running below __end_soft_masked is // implicitly soft-masked. regs->softe = IRQS_ALL_DISABLED; } diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 1b79f8a75298..22fcd95dd8dc 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -342,7 +342,17 @@ ret_from_mc_except: #define PROLOG_ADDITION_MASKABLE_GEN(n) \ lbz r10,PACAIRQSOFTMASK(r13); /* are irqs soft-masked? */ \ andi. r10,r10,IRQS_DISABLED; /* yes -> go out of line */ \ - bne masked_interrupt_book3e_##n + bne masked_interrupt_book3e_##n; \ + /* Kernel code below __end_soft_masked is implicitly masked */ \ + andi. r10,r11,MSR_PR; \ + bne 1f; /* user -> not masked */ \ + std r14,PACA_EXGEN+EX_R14(r13); \ + LOAD_REG_IMMEDIATE_SYM(r14, r10, __end_soft_masked); \ + mfspr r10,SPRN_SRR0; \ + cmpld r10,r14; \ + ld r14,PACA_EXGEN+EX_R14(r13); \ + blt masked_interrupt_book3e_##n; \ +1: /* * Additional regs must be re-loaded from paca before EXCEPTION_COMMON* is diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 17a213f25c92..2d980addc88c 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -508,10 +508,13 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real) andi. r10,r12,MSR_PR bne 2f - /* Kernel code running below __end_interrupts is implicitly - * soft-masked */ - LOAD_HANDLER(r10, __end_interrupts) + /* + * Kernel code running below __end_soft_masked is implicitly + * soft-masked + */ + LOAD_HANDLER(r10, __end_soft_masked) cmpld r11,r10 + li r10,IMASK blt- 1f @@ -824,17 +827,17 @@ __start_interrupts: * scv instructions enter the kernel without changing EE, RI, ME, or HV. * In particular, this means we can take a maskable interrupt at any point * in the scv handler, which is unlike any other interrupt. This is solved - * by treating the instruction addresses below __end_interrupts as being + * by treating the instruction addresses below __end_soft_masked as being * soft-masked. * * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and * ensure scv is never executed with relocation off, which means AIL-0 * should never happen. * - * Before leaving the below __end_interrupts text, at least of the following - * must be true: + * Before leaving the following inside-__end_soft_masked text, at least of the + * following must be true: * - MSR[PR]=1 (i.e., return to userspace) - * - MSR_EE|MSR_RI is set (no reentrant exceptions) + * - MSR_EE|MSR_RI is clear (no reentrant exceptions) * - Standard kernel environment is set up (stack, paca, etc) * * Call convention: @@ -3099,7 +3102,7 @@ kvmppc_skip_Hinterrupt: USE_FIXED_SECTION(virt_trampolines) /* - * All code below __end_interrupts is treated as soft-masked. If + * All code below __end_soft_masked is treated as soft-masked. If * any code runs here with MSR[EE]=1, it must then cope with pending * soft interrupt being raised (i.e., by ensuring it is replayed). * diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S index 3038c831fc5f..cf53293c8498 100644 --- a/arch/powerpc/kernel/interrupt_64.S +++ b/arch/powerpc/kernel/interrupt_64.S @@ -633,4 +633,8 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) interrupt_return_macro srr #ifdef CONFIG_PPC_BOOK3S interrupt_return_macro hsrr -#endif +#endif /* CONFIG_PPC_BOOK3S */ + + .globl __end_soft_masked +__end_soft_masked: +DEFINE_FIXED_SYMBOL(__end_soft_masked) -- 2.23.0