Christophe asked about doing this, most of the code is still in asm but maybe it's slightly nicer? I don't know if it's worthwhile. --- arch/powerpc/kernel/idle.c | 25 ++++++++++++++++++++----- arch/powerpc/kernel/idle_book3s.S | 22 ---------------------- 2 files changed, 20 insertions(+), 27 deletions(-)
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c index ae0e2632393d..849e77a45915 100644 --- a/arch/powerpc/kernel/idle.c +++ b/arch/powerpc/kernel/idle.c @@ -72,6 +72,9 @@ int powersave_nap; #ifdef CONFIG_PPC_970_NAP void power4_idle(void) { + unsigned long msr_idle = MSR_KERNEL|MSR_EE|MSR_POW; + unsigned long tmp1, tmp2; + if (!cpu_has_feature(CPU_FTR_CAN_NAP)) return; @@ -84,13 +87,25 @@ void power4_idle(void) if (cpu_has_feature(CPU_FTR_ALTIVEC)) asm volatile("DSSALL ; sync" ::: "memory"); - power4_idle_nap(); - + asm volatile( +" ld %0,PACA_THREAD_INFO(r13) \n" +" ld %1,TI_LOCAL_FLAGS(%0) \n" +" ori %1,%1,_TLF_NAPPING \n" +" std %1,TI_LOCAL_FLAGS(%0) \n" /* - * power4_idle_nap returns with interrupts enabled (soft and hard). - * to our caller with interrupts enabled (soft and hard). Our caller - * can cope with either interrupts disabled or enabled upon return. + * NAPPING bit is set, from this point onward nap_adjust_return() + * will cause interrupts to return to power4_idle_nap_return. */ +"1: sync \n" +" isync \n" +" mtmsrd %2 \n" +" isync \n" +" b 1b \n" +" .globl power4_idle_nap_return \n" +"power4_idle_nap_return: \n" + : "=r"(tmp1), "=r"(tmp2) + : "r"(msr_idle) + ); } #endif diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 27d2e6a72ec9..d4047f3c672e 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -184,25 +184,3 @@ _GLOBAL(isa206_idle_insn_mayloss) IDLE_STATE_ENTER_SEQ_NORET(PPC_SLEEP) 2: IDLE_STATE_ENTER_SEQ_NORET(PPC_WINKLE) #endif - -#ifdef CONFIG_PPC_970_NAP -_GLOBAL(power4_idle_nap) - LOAD_REG_IMMEDIATE(r7, MSR_KERNEL|MSR_EE|MSR_POW) - ld r9,PACA_THREAD_INFO(r13) - ld r8,TI_LOCAL_FLAGS(r9) - ori r8,r8,_TLF_NAPPING - std r8,TI_LOCAL_FLAGS(r9) - /* - * NAPPING bit is set, from this point onward power4_fixup_nap - * will cause exceptions to return to power4_idle_nap_return. - */ -1: sync - isync - mtmsrd r7 - isync - b 1b - - .globl power4_idle_nap_return -power4_idle_nap_return: - blr -#endif -- 2.23.0