If the callchain is available, BPF program can use bpf_probe_read_kernel()
to fetch the callchain, or use it in a BPF helper.

Signed-off-by: Song Liu <songliubrav...@fb.com>
---
 include/linux/perf_event.h                |  5 -----
 include/linux/trace_events.h              |  5 +++++
 include/uapi/linux/bpf_perf_event.h       |  7 ++++++
 kernel/bpf/btf.c                          |  5 +++++
 kernel/trace/bpf_trace.c                  | 27 +++++++++++++++++++++++
 tools/include/uapi/linux/bpf_perf_event.h |  8 +++++++
 6 files changed, 52 insertions(+), 5 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 00ab5efa38334..3a68c999f50d1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -59,11 +59,6 @@ struct perf_guest_info_callbacks {
 #include <linux/security.h>
 #include <asm/local.h>
 
-struct perf_callchain_entry {
-       __u64                           nr;
-       __u64                           ip[]; /* 
/proc/sys/kernel/perf_event_max_stack */
-};
-
 struct perf_callchain_entry_ctx {
        struct perf_callchain_entry *entry;
        u32                         max_stack;
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 5c69433540494..8e1e88f40eef9 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -631,6 +631,7 @@ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
                            u32 *fd_type, const char **buf,
                            u64 *probe_offset, u64 *probe_addr);
+int bpf_trace_init_btf_ids(struct btf *btf);
 #else
 static inline unsigned int trace_call_bpf(struct trace_event_call *call, void 
*ctx)
 {
@@ -672,6 +673,10 @@ static inline int bpf_get_perf_event_info(const struct 
perf_event *event,
 {
        return -EOPNOTSUPP;
 }
+int bpf_trace_init_btf_ids(struct btf *btf)
+{
+       return -EOPNOTSUPP;
+}
 #endif
 
 enum {
diff --git a/include/uapi/linux/bpf_perf_event.h 
b/include/uapi/linux/bpf_perf_event.h
index eb1b9d21250c6..40f4df80ab4fa 100644
--- a/include/uapi/linux/bpf_perf_event.h
+++ b/include/uapi/linux/bpf_perf_event.h
@@ -9,11 +9,18 @@
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
 #include <asm/bpf_perf_event.h>
+#include <linux/bpf.h>
+
+struct perf_callchain_entry {
+       __u64                           nr;
+       __u64                           ip[]; /* 
/proc/sys/kernel/perf_event_max_stack */
+};
 
 struct bpf_perf_event_data {
        bpf_user_pt_regs_t regs;
        __u64 sample_period;
        __u64 addr;
+       __bpf_md_ptr(struct perf_callchain_entry *, callchain);
 };
 
 #endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 4c3007f428b16..cb122e14dba38 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -20,6 +20,7 @@
 #include <linux/btf.h>
 #include <linux/skmsg.h>
 #include <linux/perf_event.h>
+#include <linux/trace_events.h>
 #include <net/sock.h>
 
 /* BTF (BPF Type Format) is the meta data format which describes
@@ -3673,6 +3674,10 @@ struct btf *btf_parse_vmlinux(void)
        if (err < 0)
                goto errout;
 
+       err = bpf_trace_init_btf_ids(btf);
+       if (err < 0)
+               goto errout;
+
        bpf_struct_ops_init(btf, log);
        init_btf_sock_ids(btf);
 
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e0b7775039ab9..c014846c2723c 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/slab.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/bpf_perf_event.h>
 #include <linux/filter.h>
 #include <linux/uaccess.h>
@@ -31,6 +32,20 @@ struct bpf_trace_module {
 static LIST_HEAD(bpf_trace_modules);
 static DEFINE_MUTEX(bpf_module_mutex);
 
+static u32 perf_callchain_entry_btf_id;
+
+int bpf_trace_init_btf_ids(struct btf *btf)
+{
+       s32 type_id;
+
+       type_id = btf_find_by_name_kind(btf, "perf_callchain_entry",
+                                       BTF_KIND_STRUCT);
+       if (type_id < 0)
+               return -EINVAL;
+       perf_callchain_entry_btf_id = type_id;
+       return 0;
+}
+
 static struct bpf_raw_event_map *bpf_get_raw_tracepoint_module(const char 
*name)
 {
        struct bpf_raw_event_map *btp, *ret = NULL;
@@ -1650,6 +1665,10 @@ static bool pe_prog_is_valid_access(int off, int size, 
enum bpf_access_type type
                if (!bpf_ctx_narrow_access_ok(off, size, size_u64))
                        return false;
                break;
+       case bpf_ctx_range(struct bpf_perf_event_data, callchain):
+               info->reg_type = PTR_TO_BTF_ID;
+               info->btf_id = perf_callchain_entry_btf_id;
+               break;
        default:
                if (size != sizeof(long))
                        return false;
@@ -1682,6 +1701,14 @@ static u32 pe_prog_convert_ctx_access(enum 
bpf_access_type type,
                                      bpf_target_off(struct perf_sample_data, 
addr, 8,
                                                     target_size));
                break;
+       case offsetof(struct bpf_perf_event_data, callchain):
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct 
bpf_perf_event_data_kern,
+                                                      data), si->dst_reg, 
si->src_reg,
+                                     offsetof(struct bpf_perf_event_data_kern, 
data));
+               *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
+                                     bpf_target_off(struct perf_sample_data, 
callchain,
+                                                    8, target_size));
+               break;
        default:
                *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct 
bpf_perf_event_data_kern,
                                                       regs), si->dst_reg, 
si->src_reg,
diff --git a/tools/include/uapi/linux/bpf_perf_event.h 
b/tools/include/uapi/linux/bpf_perf_event.h
index 8f95303f9d807..40f4df80ab4fa 100644
--- a/tools/include/uapi/linux/bpf_perf_event.h
+++ b/tools/include/uapi/linux/bpf_perf_event.h
@@ -9,10 +9,18 @@
 #define _UAPI__LINUX_BPF_PERF_EVENT_H__
 
 #include <asm/bpf_perf_event.h>
+#include <linux/bpf.h>
+
+struct perf_callchain_entry {
+       __u64                           nr;
+       __u64                           ip[]; /* 
/proc/sys/kernel/perf_event_max_stack */
+};
 
 struct bpf_perf_event_data {
        bpf_user_pt_regs_t regs;
        __u64 sample_period;
+       __u64 addr;
+       __bpf_md_ptr(struct perf_callchain_entry *, callchain);
 };
 
 #endif /* _UAPI__LINUX_BPF_PERF_EVENT_H__ */
-- 
2.24.1

Reply via email to