Implement inline static calls: - Put a 'bl' to the destination function ('b' if tail call) - Put a 'nop' when the destination function is NULL ('blr' if tail call) - Put a 'li r3,0' when the destination is the RET0 function and not a tail call.
If the destination is too far (over the 32Mb limit), go via the trampoline. Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu> --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/static_call.h | 2 + arch/powerpc/kernel/static_call.c | 56 +++++++++++++++++++------- 3 files changed, 44 insertions(+), 15 deletions(-) diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 00a43eb26418..cb92887acc3f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -251,6 +251,7 @@ config PPC select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2) select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13) select HAVE_STATIC_CALL if PPC32 + select HAVE_STATIC_CALL_INLINE if PPC32 select HAVE_SYSCALL_TRACEPOINTS select HAVE_VIRT_CPU_ACCOUNTING select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE diff --git a/arch/powerpc/include/asm/static_call.h b/arch/powerpc/include/asm/static_call.h index de1018cc522b..e3d5d3823dac 100644 --- a/arch/powerpc/include/asm/static_call.h +++ b/arch/powerpc/include/asm/static_call.h @@ -26,4 +26,6 @@ #define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) __PPC_SCT(name, "blr") #define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) __PPC_SCT(name, "b .+20") +#define CALL_INSN_SIZE 4 + #endif /* _ASM_POWERPC_STATIC_CALL_H */ diff --git a/arch/powerpc/kernel/static_call.c b/arch/powerpc/kernel/static_call.c index 863a7aa24650..0093b471186d 100644 --- a/arch/powerpc/kernel/static_call.c +++ b/arch/powerpc/kernel/static_call.c @@ -8,26 +8,52 @@ void arch_static_call_transform(void *site, void *tramp, void *func, bool tail) { int err; bool is_ret0 = (func == __static_call_return0); - unsigned long target = (unsigned long)(is_ret0 ? tramp + PPC_SCT_RET0 : func); - bool is_short = is_offset_in_branch_range((long)target - (long)tramp); - - if (!tramp) - return; + unsigned long _tramp = (unsigned long)tramp; + unsigned long _func = (unsigned long)func; + unsigned long _ret0 = _tramp + PPC_SCT_RET0; + bool is_short = is_offset_in_branch_range((long)func - (long)(site ? : tramp)); mutex_lock(&text_mutex); - if (func && !is_short) { - err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(target)); - if (err) - goto out; + if (site && !tail) { + if (!func) + err = patch_instruction(site, ppc_inst(PPC_RAW_NOP())); + else if (is_ret0) + err = patch_instruction(site, ppc_inst(PPC_RAW_LI(_R3, 0))); + else if (is_short) + err = patch_branch(site, _func, BRANCH_SET_LINK); + else if (tramp) + err = patch_branch(site, _tramp, BRANCH_SET_LINK); + else + err = 0; + } else if (site) { + if (!func) + err = patch_instruction(site, ppc_inst(PPC_RAW_BLR())); + else if (is_ret0) + err = patch_branch(site, _ret0, 0); + else if (is_short) + err = patch_branch(site, _func, 0); + else if (tramp) + err = patch_branch(site, _tramp, 0); + else + err = 0; + } else if (tramp) { + if (func && !is_short) { + err = patch_instruction(tramp + PPC_SCT_DATA, ppc_inst(_func)); + if (err) + goto out; + } + + if (!func) + err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR())); + else if (is_ret0) + err = patch_branch(tramp, _ret0, 0); + else if (is_short) + err = patch_branch(tramp, _func, 0); + else + err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP())); } - if (!func) - err = patch_instruction(tramp, ppc_inst(PPC_RAW_BLR())); - else if (is_short) - err = patch_branch(tramp, target, 0); - else - err = patch_instruction(tramp, ppc_inst(PPC_RAW_NOP())); out: mutex_unlock(&text_mutex); -- 2.36.1