KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

Tested on berlin arm64 platform.

~ # mount -t debugfs debugfs /sys/kernel/debug/
~ # cd /sys/kernel/debug/
/sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events

before the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009fe28  k  _do_fork+0x0    [DISABLED]

after the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009ff54  k  _do_fork+0x4    [DISABLED][FTRACE]

Signed-off-by: Jisheng Zhang <jisheng.zh...@synaptics.com>
---
KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

Applied after arm64 FTRACE_WITH_REGS:
http://lists.infradead.org/pipermail/linux-arm-kernel/2019-August/674404.html

Changes since v3:
  - move kprobe_lookup_name() and arch_kprobe_on_func_entry to ftrace.c since
    we only want to choose the ftrace entry for KPROBES_ON_FTRACE.
  - only choose ftrace entry if (addr && !offset)

Changes since v2:
  - remove patch1, make it a single cleanup patch
  - remove "This patch" in the change log
  - implement arm64's kprobe_lookup_name() and arch_kprobe_on_func_entry instead
    of patching the common kprobes code

Changes since v1:
  - make the kprobes/x86: use instruction_pointer and instruction_pointer_set
    as patch1
  - add Masami's ACK to patch1
  - add some description about KPROBES_ON_FTRACE and why we need it on
    arm64
  - correct the log before the patch
  - remove the consolidation patch, make it as TODO
  - only adjust kprobe's addr when KPROBE_FLAG_FTRACE is set
  - if KPROBES_ON_FTRACE, ftrace_call_adjust() the kprobe's addr before
    calling ftrace_location()
  - update the kprobes-on-ftrace/arch-support.txt in doc


 .../debug/kprobes-on-ftrace/arch-support.txt  |  2 +-
 arch/arm64/Kconfig                            |  1 +
 arch/arm64/kernel/probes/Makefile             |  1 +
 arch/arm64/kernel/probes/ftrace.c             | 84 +++++++++++++++++++
 4 files changed, 87 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kernel/probes/ftrace.c

diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt 
b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 68f266944d5f..e8358a38981c 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -9,7 +9,7 @@
     |       alpha: | TODO |
     |         arc: | TODO |
     |         arm: | TODO |
-    |       arm64: | TODO |
+    |       arm64: |  ok  |
     |         c6x: | TODO |
     |        csky: | TODO |
     |       h8300: | TODO |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 663392d1eae2..928700f15e23 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -167,6 +167,7 @@ config ARM64
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
        select HAVE_KPROBES
+       select HAVE_KPROBES_ON_FTRACE
        select HAVE_KRETPROBES
        select HAVE_GENERIC_VDSO
        select IOMMU_DMA if IOMMU_SUPPORT
diff --git a/arch/arm64/kernel/probes/Makefile 
b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..4020cfc66564 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES)           += kprobes.o decode-insn.o      
\
                                   simulate-insn.o
 obj-$(CONFIG_UPROBES)          += uprobes.o decode-insn.o      \
                                   simulate-insn.o
+obj-$(CONFIG_KPROBES_ON_FTRACE)        += ftrace.o
diff --git a/arch/arm64/kernel/probes/ftrace.c 
b/arch/arm64/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..5989c57660f3
--- /dev/null
+++ b/arch/arm64/kernel/probes/ftrace.c
@@ -0,0 +1,84 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) 2019 Jisheng Zhang <jszh...@kernel.org>
+ *                   Synaptics Incorporated
+ */
+
+#include <linux/kprobes.h>
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+                          struct ftrace_ops *ops, struct pt_regs *regs)
+{
+       struct kprobe *p;
+       struct kprobe_ctlblk *kcb;
+
+       /* Preempt is disabled by ftrace */
+       p = get_kprobe((kprobe_opcode_t *)ip);
+       if (unlikely(!p) || kprobe_disabled(p))
+               return;
+
+       kcb = get_kprobe_ctlblk();
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(p);
+       } else {
+               unsigned long orig_ip = instruction_pointer(regs);
+               /* Kprobe handler expects regs->pc = pc + 4 as breakpoint hit */
+               instruction_pointer_set(regs, ip + sizeof(kprobe_opcode_t));
+
+               __this_cpu_write(current_kprobe, p);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               if (!p->pre_handler || !p->pre_handler(p, regs)) {
+                       /*
+                        * Emulate singlestep (and also recover regs->pc)
+                        * as if there is a nop
+                        */
+                       instruction_pointer_set(regs,
+                               (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+                       if (unlikely(p->post_handler)) {
+                               kcb->kprobe_status = KPROBE_HIT_SSDONE;
+                               p->post_handler(p, regs, 0);
+                       }
+                       instruction_pointer_set(regs, orig_ip);
+               }
+               /*
+                * If pre_handler returns !0, it changes regs->pc. We have to
+                * skip emulating post_handler.
+                */
+               __this_cpu_write(current_kprobe, NULL);
+       }
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset)
+{
+       unsigned long addr = kallsyms_lookup_name(name);
+
+       if (addr && !offset) {
+               unsigned long faddr;
+               /*
+                * with -fpatchable-function-entry=2, the first 4 bytes is the
+                * LR saver, then the actual call insn. So ftrace location is
+                * always on the first 4 bytes offset.
+                */
+               faddr = ftrace_location_range(addr,
+                                             addr + AARCH64_INSN_SIZE);
+               if (faddr)
+                       return (kprobe_opcode_t *)faddr;
+       }
+       return (kprobe_opcode_t *)addr;
+}
+
+bool arch_kprobe_on_func_entry(unsigned long offset)
+{
+       return offset <= AARCH64_INSN_SIZE;
+}
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+       p->ainsn.api.insn = NULL;
+       return 0;
+}
-- 
2.23.0.rc1

Reply via email to