Implement FTRACE_WITH_REGS for powerpc64, on ELF ABI v2.
Initial work started by Vojtech Pavlik, used with permission.

  * arch/powerpc/kernel/entry_64.S:
    - enhance _mcount with a stub to support call sites
      generated by -mprofile-kernel. This is backward-compatible.
    - Implement an effective ftrace_caller that works from
      within the kernel binary as well as from modules.
  * arch/powerpc/kernel/ftrace.c:
    - be prepared to deal with ppc64 ELF ABI v2, especially
      calls to _mcount that result from gcc -mprofile-kernel
    - a little more error verbosity
  * arch/powerpc/kernel/module_64.c:
    - do not save the TOC pointer on the trampoline when the
      destination is ftrace_caller. This trampoline jump happens from
      a function prologue before a new stack frame is set up, so bad
      things may happen otherwise...
    - relax is_module_trampoline() to recognise the modified
      trampoline.

Signed-off-by: Torsten Duwe <d...@suse.de>
---
 arch/powerpc/include/asm/ftrace.h |   5 ++
 arch/powerpc/kernel/entry_64.S    | 113 +++++++++++++++++++++++++++++++++++++-
 arch/powerpc/kernel/ftrace.c      |  72 +++++++++++++++++++++---
 arch/powerpc/kernel/module_64.c   |  39 ++++++++++++-
 4 files changed, 217 insertions(+), 12 deletions(-)

diff --git a/arch/powerpc/include/asm/ftrace.h 
b/arch/powerpc/include/asm/ftrace.h
index ef89b14..6eb9fbc 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -46,6 +46,8 @@
 extern void _mcount(void);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
+# define FTRACE_ADDR ((unsigned long)ftrace_caller)
+# define FTRACE_REGS_ADDR FTRACE_ADDR
 static inline unsigned long ftrace_call_adjust(unsigned long addr)
 {
        /* reloction of mcount call site is the same as the address */
@@ -58,6 +60,9 @@ struct dyn_arch_ftrace {
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 #endif /* __ASSEMBLY__ */
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#endif
 #endif
 
 #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && 
!defined(__ASSEMBLY__)
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index a94f155..b0dfbfe 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1206,8 +1206,13 @@ _GLOBAL(enter_prom)
 #ifdef CONFIG_DYNAMIC_FTRACE
 _GLOBAL(mcount)
 _GLOBAL(_mcount)
-       blr
+       mflr    r0
+       mtctr   r0
+       ld      r0,LRSAVE(r1)
+       mtlr    r0
+       bctr
 
+#ifndef CC_USING_MPROFILE_KERNEL
 _GLOBAL_TOC(ftrace_caller)
        /* Taken from output of objdump from lib64/glibc */
        mflr    r3
@@ -1229,6 +1234,81 @@ _GLOBAL(ftrace_graph_stub)
        ld      r0, 128(r1)
        mtlr    r0
        addi    r1, r1, 112
+#else
+_GLOBAL(ftrace_caller)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+       mflr    r0
+       bl      2f
+2:     mflr    r12
+       mtlr    r0
+       mr      r0,r2   // save callee's TOC
+       addis   r2,r12,(.TOC.-ftrace_caller-8)@ha
+       addi    r2,r2,(.TOC.-ftrace_caller-8)@l
+#else
+       mr      r0,r2
+#endif
+       ld      r12,LRSAVE(r1)  // get caller's address
+
+       stdu    r1,-SWITCH_FRAME_SIZE(r1)
+
+       std     r12, _LINK(r1)
+       SAVE_8GPRS(0,r1)
+       std     r0, 24(r1)      // save TOC
+       SAVE_8GPRS(8,r1)
+       SAVE_8GPRS(16,r1)
+       SAVE_8GPRS(24,r1)
+
+       LOAD_REG_IMMEDIATE(r3,function_trace_op)
+       ld      r5,0(r3)
+
+       mflr    r3
+       std     r3, _NIP(r1)
+       std     r3, 16(r1)
+       subi    r3, r3, MCOUNT_INSN_SIZE
+       mfmsr   r4
+       std     r4, _MSR(r1)
+       mfctr   r4
+       std     r4, _CTR(r1)
+       mfxer   r4
+       std     r4, _XER(r1)
+       mr      r4, r12
+       addi    r6, r1 ,STACK_FRAME_OVERHEAD
+
+.globl ftrace_call
+ftrace_call:
+       bl      ftrace_stub
+       nop
+
+       ld      r3, _NIP(r1)
+       mtlr    r3
+
+       REST_8GPRS(0,r1)
+       REST_8GPRS(8,r1)
+       REST_8GPRS(16,r1)
+       REST_8GPRS(24,r1)
+
+       addi r1, r1, SWITCH_FRAME_SIZE
+
+       ld      r12, LRSAVE(r1)  // get caller's address
+       mtlr    r12
+       mr      r2,r0           // restore callee's TOC
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       stdu    r1, -112(r1)
+.globl ftrace_graph_call
+ftrace_graph_call:
+       b       ftrace_graph_stub
+_GLOBAL(ftrace_graph_stub)
+       addi    r1, r1, 112
+#endif
+
+       mflr    r0              // move this LR to CTR
+       mtctr   r0
+
+       ld      r0,LRSAVE(r1)   // restore callee's lr at _mcount site
+       mtlr    r0
+       bctr                    // jump after _mcount site
+#endif /* CC_USING_MPROFILE_KERNEL */
 _GLOBAL(ftrace_stub)
        blr
 #else
@@ -1262,6 +1342,19 @@ _GLOBAL(ftrace_stub)
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 _GLOBAL(ftrace_graph_caller)
+#ifdef CC_USING_MPROFILE_KERNEL
+       // with -mprofile-kernel, parameter regs are still alive at _mcount
+       std     r10, 104(r1)
+       std     r9, 96(r1)
+       std     r8, 88(r1)
+       std     r7, 80(r1)
+       std     r6, 72(r1)
+       std     r5, 64(r1)
+       std     r4, 56(r1)
+       std     r3, 48(r1)
+       mflr    r0
+       std     r0, 40(r1)
+#endif
        /* load r4 with local address */
        ld      r4, 128(r1)
        subi    r4, r4, MCOUNT_INSN_SIZE
@@ -1280,10 +1373,28 @@ _GLOBAL(ftrace_graph_caller)
        ld      r11, 112(r1)
        std     r3, 16(r11)
 
+#ifdef CC_USING_MPROFILE_KERNEL
+       ld      r0, 40(r1)
+       mtctr   r0
+       ld      r10, 104(r1)
+       ld      r9, 96(r1)
+       ld      r8, 88(r1)
+       ld      r7, 80(r1)
+       ld      r6, 72(r1)
+       ld      r5, 64(r1)
+       ld      r4, 56(r1)
+       ld      r3, 48(r1)
+
+       addi    r1, r1, 112
+       ld      r0, LRSAVE(r1)
+       mtlr    r0
+       bctr
+#else
        ld      r0, 128(r1)
        mtlr    r0
        addi    r1, r1, 112
        blr
+#endif
 
 _GLOBAL(return_to_handler)
        /* need to save return values */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 44d4d8e..310137f 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -61,8 +61,11 @@ ftrace_modify_code(unsigned long ip, unsigned int old, 
unsigned int new)
                return -EFAULT;
 
        /* Make sure it is what we expect it to be */
-       if (replaced != old)
+       if (replaced != old) {
+               pr_err("%p: replaced (%#x) != old (%#x)",
+               (void *)ip, replaced, old);
                return -EINVAL;
+       }
 
        /* replace the text with the new text */
        if (patch_instruction((unsigned int *)ip, new))
@@ -106,14 +109,16 @@ static int
 __ftrace_make_nop(struct module *mod,
                  struct dyn_ftrace *rec, unsigned long addr)
 {
-       unsigned int op;
+       unsigned int op, op0, op1, pop;
        unsigned long entry, ptr;
        unsigned long ip = rec->ip;
        void *tramp;
 
        /* read where this goes */
-       if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
+       if (probe_kernel_read(&op, (void *)ip, sizeof(int))) {
+               pr_err("Fetching opcode failed.\n");
                return -EFAULT;
+       }
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
@@ -158,10 +163,46 @@ __ftrace_make_nop(struct module *mod,
         *
         * Use a b +8 to jump over the load.
         */
-       op = 0x48000008;        /* b +8 */
 
-       if (patch_instruction((unsigned int *)ip, op))
+       pop = 0x48000008;       /* b +8 */
+
+       /*
+        * Check what is in the next instruction. We can see ld r2,40(r1), but
+        * on first pass after boot we will see mflr r0.
+        */
+       if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) {
+               pr_err("Fetching op failed.\n");
+               return -EFAULT;
+       }
+
+       if (op != 0xe8410028) { /* ld r2,STACK_OFFSET(r1) */
+
+               if (probe_kernel_read(&op0, (void *)(ip-8), MCOUNT_INSN_SIZE)) {
+                       pr_err("Fetching op0 failed.\n");
+                       return -EFAULT;
+               }
+
+               if (probe_kernel_read(&op1, (void *)(ip-4), MCOUNT_INSN_SIZE)) {
+                       pr_err("Fetching op1 failed.\n");
+                       return -EFAULT;
+               }
+
+               /* mflr r0 ; std r0,LRSAVE(r1) */
+               if (op0 != 0x7c0802a6 && op1 != 0xf8010010) {
+                       pr_err("Unexpected instructions around bl\n"
+                               "when enabling dynamic ftrace!\t"
+                               "(%08x,%08x,bl,%08x)\n", op0, op1, op);
+                       return -EINVAL;
+               }
+
+               /* When using -mkernel_profile there is no load to jump over */
+               pop = PPC_INST_NOP;
+       }
+
+       if (patch_instruction((unsigned int *)ip, pop)) {
+               pr_err("Patching NOP failed.\n");
                return -EPERM;
+       }
 
        return 0;
 }
@@ -287,6 +328,13 @@ int ftrace_make_nop(struct module *mod,
 
 #ifdef CONFIG_MODULES
 #ifdef CONFIG_PPC64
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+                       unsigned long addr)
+{
+       return ftrace_make_call(rec, addr);
+}
+#endif
 static int
 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
@@ -306,11 +354,19 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long 
addr)
         * The load offset is different depending on the ABI. For simplicity
         * just mask it out when doing the compare.
         */
+#ifndef CC_USING_MPROFILE_KERNEL
        if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
-               pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
+               pr_err("Unexpected call sequence at %p: %x %x\n",
+               ip, op[0], op[1]);
                return -EINVAL;
        }
-
+#else
+       /* look for patched "NOP" on ppc64 with -mprofile-kernel */
+       if (op[0] != 0x60000000) {
+               pr_err("Unexpected call at %p: %x\n", ip, op[0]);
+               return -EINVAL;
+       }
+#endif
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
                pr_err("No ftrace trampoline\n");
@@ -330,7 +386,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long 
addr)
 
        return 0;
 }
-#else
+#else  /* !CONFIG_PPC64: */
 static int
 __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 {
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 6838451..e62c41f 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -138,12 +138,21 @@ static u32 ppc64_stub_insns[] = {
        0x4e800420                      /* bctr */
 };
 
+/* In case of _mcount calls or dynamic ftracing, Do not save the
+ * current callee's TOC (in R2) again into the original caller's stack
+ * frame during this trampoline hop. The stack frame already holds
+ * that of the original caller.  _mcount and ftrace_caller will take
+ * care of this TOC value themselves.
+ */
+#define SQUASH_TOC_SAVE_INSN(trampoline_addr) \
+       (((struct ppc64_stub_entry *)(trampoline_addr))->jump[2] = PPC_INST_NOP)
+
 #ifdef CONFIG_DYNAMIC_FTRACE
 
 static u32 ppc64_stub_mask[] = {
        0xffff0000,
        0xffff0000,
-       0xffffffff,
+       0x00000000,
        0xffffffff,
 #if !defined(_CALL_ELF) || _CALL_ELF != 2
        0xffffffff,
@@ -170,6 +179,9 @@ bool is_module_trampoline(u32 *p)
                if ((insna & mask) != (insnb & mask))
                        return false;
        }
+       if (insns[2] != ppc64_stub_insns[2] &&
+           insns[2] != PPC_INST_NOP)
+               return false;
 
        return true;
 }
@@ -475,6 +487,19 @@ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs,
 static int restore_r2(u32 *instruction, struct module *me)
 {
        if (*instruction != PPC_INST_NOP) {
+
+               /* -mprofile_kernel sequence starting with
+                * mflr r0; std r0, LRSAVE(r1)
+                */
+               if (instruction[-3] == 0x7c0802a6 &&
+                   instruction[-2] == 0xf8010010) {
+                       /* Nothing to be done here, it's an _mcount
+                        * call location and r2 will have to be
+                        * restored in the _mcount function.
+                        */
+                       return 2;
+               };
+
                pr_err("%s: Expect noop after relocate, got %08x\n",
                       me->name, *instruction);
                return 0;
@@ -490,7 +515,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                       unsigned int relsec,
                       struct module *me)
 {
-       unsigned int i;
+       unsigned int i, r2;
        Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
        Elf64_Sym *sym;
        unsigned long *location;
@@ -603,8 +628,12 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
                                value = stub_for_addr(sechdrs, value, me);
                                if (!value)
                                        return -ENOENT;
-                               if (!restore_r2((u32 *)location + 1, me))
+                               r2 = restore_r2((u32 *)location + 1, me);
+                               if (!r2)
                                        return -ENOEXEC;
+                               /* Squash the TOC saver for profiler calls */
+                               if (!strcmp("_mcount", strtab+sym->st_name))
+                                       SQUASH_TOC_SAVE_INSN(value);
                        } else
                                value += local_entry_offset(sym);
 
@@ -665,6 +694,10 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
        me->arch.tramp = stub_for_addr(sechdrs,
                                       (unsigned long)ftrace_caller,
                                       me);
+       /* ftrace_caller will take care of the TOC;
+        * do not clobber original caller's value.
+        */
+       SQUASH_TOC_SAVE_INSN(me->arch.tramp);
 #endif
 
        return 0;
-- 
1.8.5.6

_______________________________________________
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev

Reply via email to