book3e/64 is the last one calling __bad_page_fault() from assembly. Save non volatile registers before calling do_page_fault() and modify do_page_fault() to call __bad_page_fault() for all platforms.
Then it can be refactored by the call of bad_page_fault() which avoids the duplication of the exception table search. Signed-off-by: Christophe Leroy <christophe.le...@csgroup.eu> --- arch/powerpc/kernel/exceptions-64e.S | 8 +------- arch/powerpc/mm/fault.c | 17 ++++------------- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index e8eb9992a270..b60f89078a3f 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -1010,15 +1010,9 @@ storage_fault_common: addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) + bl save_nvgprs bl do_page_fault - cmpdi r3,0 - bne- 1f b ret_from_except_lite -1: bl save_nvgprs - mr r4,r3 - addi r3,r1,STACK_FRAME_OVERHEAD - bl __bad_page_fault - b ret_from_except /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2e54bac99a22..7bcff3fca110 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -541,24 +541,15 @@ NOKPROBE_SYMBOL(___do_page_fault); static long __do_page_fault(struct pt_regs *regs) { - const struct exception_table_entry *entry; long err; err = ___do_page_fault(regs, regs->dar, regs->dsisr); if (likely(!err)) - return err; - - entry = search_exception_tables(regs->nip); - if (likely(entry)) { - instruction_pointer_set(regs, extable_fixup(entry)); return 0; - } else if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64)) { - __bad_page_fault(regs, err); - return 0; - } else { - /* 32 and 64e handle the bad page fault in asm */ - return err; - } + + bad_page_fault(regs, err); + + return 0; } NOKPROBE_SYMBOL(__do_page_fault); -- 2.25.0