These will be used by subsequent patches. Signed-off-by: Nicholas Piggin <npig...@gmail.com> --- arch/powerpc/include/asm/interrupt.h | 66 ++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+)
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h index 04b7bdf97a44..3a94153ff0d4 100644 --- a/arch/powerpc/include/asm/interrupt.h +++ b/arch/powerpc/include/asm/interrupt.h @@ -5,6 +5,50 @@ #include <linux/context_tracking.h> #include <asm/ftrace.h> +struct interrupt_state { +}; + +static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +} + +/* + * Care should be taken to note that interrupt_exit_prepare and + * interrupt_async_exit_prepare do not necessarily return immediately to + * regs context (e.g., if regs is usermode, we don't necessarily return to + * user mode). Other interrupts might be taken between here and return, + * context switch / preemption may occur in the exit path after this, or a + * signal may be delivered, etc. + * + * The real interrupt exit code is platform specific, e.g., + * interrupt_exit_user_prepare / interrupt_exit_kernel_prepare for 64s. + * + * However interrupt_nmi_exit_prepare does return directly to regs, because + * NMIs do not do "exit work" or replay soft-masked interrupts. + */ +static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +} + +static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +} + +static inline void interrupt_async_exit_prepare(struct pt_regs *regs, struct interrupt_state *state) +{ +} + +struct interrupt_nmi_state { +}; + +static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ +} + +static inline void interrupt_nmi_exit_prepare(struct pt_regs *regs, struct interrupt_nmi_state *state) +{ +} + /** * DECLARE_INTERRUPT_HANDLER_RAW - Declare raw interrupt handler function * @func: Function name of the entry point @@ -59,7 +103,13 @@ static __always_inline void ___##func(struct pt_regs *regs); \ \ __visible noinstr void func(struct pt_regs *regs) \ { \ + struct interrupt_state state; \ + \ + interrupt_enter_prepare(regs, &state); \ + \ ___##func (regs); \ + \ + interrupt_exit_prepare(regs, &state); \ } \ \ static __always_inline void ___##func(struct pt_regs *regs) @@ -87,10 +137,15 @@ static __always_inline long ___##func(struct pt_regs *regs); \ \ __visible noinstr long func(struct pt_regs *regs) \ { \ + struct interrupt_state state; \ long ret; \ \ + interrupt_enter_prepare(regs, &state); \ + \ ret = ___##func (regs); \ \ + interrupt_exit_prepare(regs, &state); \ + \ return ret; \ } \ \ @@ -117,7 +172,13 @@ static __always_inline void ___##func(struct pt_regs *regs); \ \ __visible noinstr void func(struct pt_regs *regs) \ { \ + struct interrupt_state state; \ + \ + interrupt_async_enter_prepare(regs, &state); \ + \ ___##func (regs); \ + \ + interrupt_async_exit_prepare(regs, &state); \ } \ \ static __always_inline void ___##func(struct pt_regs *regs) @@ -145,10 +206,15 @@ static __always_inline long ___##func(struct pt_regs *regs); \ \ __visible noinstr long func(struct pt_regs *regs) \ { \ + struct interrupt_nmi_state state; \ long ret; \ \ + interrupt_nmi_enter_prepare(regs, &state); \ + \ ret = ___##func (regs); \ \ + interrupt_nmi_exit_prepare(regs, &state); \ + \ return ret; \ } \ \ -- 2.23.0