Add new soft_enabled_* manipulation function and implement arch_local_* using the soft_enabled_* wrappers.
Signed-off-by: Madhavan Srinivasan <ma...@linux.vnet.ibm.com> --- arch/powerpc/include/asm/hw_irq.h | 32 ++++++++++++++------------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index f828b8f8df02..dc3c248f9244 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h @@ -53,21 +53,7 @@ static inline notrace void soft_enabled_set(unsigned long enable) : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); } -static inline notrace unsigned long soft_enabled_set_return(unsigned long enable) -{ - unsigned long flags; - - asm volatile( - "lbz %0,%1(13); stb %2,%1(13)" - : "=r" (flags) - : "i" (offsetof(struct paca_struct, soft_enabled)),\ - "r" (enable) - : "memory"); - - return flags; -} - -static inline unsigned long arch_local_save_flags(void) +static inline notrace unsigned long soft_enabled_return(void) { unsigned long flags; @@ -79,20 +65,30 @@ static inline unsigned long arch_local_save_flags(void) return flags; } -static inline unsigned long arch_local_irq_disable(void) +static inline notrace unsigned long soft_enabled_set_return(unsigned long enable) { unsigned long flags, zero; asm volatile( - "li %1,%3; lbz %0,%2(13); stb %1,%2(13)" + "mr %1,%3; lbz %0,%2(13); stb %1,%2(13)" : "=r" (flags), "=&r" (zero) : "i" (offsetof(struct paca_struct, soft_enabled)),\ - "i" (IRQ_DISABLE_MASK_LINUX) + "r" (enable) : "memory"); return flags; } +static inline unsigned long arch_local_save_flags(void) +{ + return soft_enabled_return(); +} + +static inline unsigned long arch_local_irq_disable(void) +{ + return soft_enabled_set_return(IRQ_DISABLE_MASK_LINUX); +} + extern void arch_local_irq_restore(unsigned long); static inline void arch_local_irq_enable(void) -- 2.7.4