Function load_and_attach() is updated to be able to create kprobes with either old text based API, or the new PERF_TYPE_KPROBE API.
A global flag use_perf_type_probe is added to select between the two APIs. Signed-off-by: Song Liu <songliubrav...@fb.com> Reviewed-by: Josef Bacik <jba...@fb.com> --- samples/bpf/bpf_load.c | 54 +++++++++++++++++++++++++++++++------------------- samples/bpf/bpf_load.h | 8 ++++++++ 2 files changed, 42 insertions(+), 20 deletions(-) diff --git a/samples/bpf/bpf_load.c b/samples/bpf/bpf_load.c index 2325d7a..872510e 100644 --- a/samples/bpf/bpf_load.c +++ b/samples/bpf/bpf_load.c @@ -8,7 +8,6 @@ #include <errno.h> #include <unistd.h> #include <string.h> -#include <stdbool.h> #include <stdlib.h> #include <linux/bpf.h> #include <linux/filter.h> @@ -42,6 +41,7 @@ int prog_array_fd = -1; struct bpf_map_data map_data[MAX_MAPS]; int map_data_count = 0; +bool use_perf_type_probe = true; static int populate_prog_array(const char *event, int prog_fd) { @@ -70,7 +70,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) size_t insns_cnt = size / sizeof(struct bpf_insn); enum bpf_prog_type prog_type; char buf[256]; - int fd, efd, err, id; + int fd, efd, err, id = -1; struct perf_event_attr attr = {}; attr.type = PERF_TYPE_TRACEPOINT; @@ -128,7 +128,7 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) return populate_prog_array(event, fd); } - if (is_kprobe || is_kretprobe) { + if (!use_perf_type_probe && (is_kprobe || is_kretprobe)) { if (is_kprobe) event += 7; else @@ -169,27 +169,41 @@ static int load_and_attach(const char *event, struct bpf_insn *prog, int size) strcat(buf, "/id"); } - efd = open(buf, O_RDONLY, 0); - if (efd < 0) { - printf("failed to open event %s\n", event); - return -1; - } - - err = read(efd, buf, sizeof(buf)); - if (err < 0 || err >= sizeof(buf)) { - printf("read from '%s' failed '%s'\n", event, strerror(errno)); - return -1; + if (use_perf_type_probe && (is_kprobe || is_kretprobe)) { + attr.type = PERF_TYPE_KPROBE; + attr.kprobe_func = ptr_to_u64( + event + strlen(is_kprobe ? "kprobe/" : "kretprobe/")); + attr.probe_offset = 0; + attr.config = !!is_kretprobe; + } else { + efd = open(buf, O_RDONLY, 0); + if (efd < 0) { + printf("failed to open event %s\n", event); + return -1; + } + err = read(efd, buf, sizeof(buf)); + if (err < 0 || err >= sizeof(buf)) { + printf("read from '%s' failed '%s'\n", event, + strerror(errno)); + return -1; + } + close(efd); + buf[err] = 0; + id = atoi(buf); + attr.config = id; } - close(efd); - - buf[err] = 0; - id = atoi(buf); - attr.config = id; - efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0); if (efd < 0) { - printf("event %d fd %d err %s\n", id, efd, strerror(errno)); + if (use_perf_type_probe && (is_kprobe || is_kretprobe)) + printf("k%sprobe %s fd %d err %s\n", + is_kprobe ? "" : "ret", + event + strlen(is_kprobe ? "kprobe/" + : "kretprobe/"), + efd, strerror(errno)); + else + printf("event %d fd %d err %s\n", id, efd, + strerror(errno)); return -1; } event_fd[prog_cnt - 1] = efd; diff --git a/samples/bpf/bpf_load.h b/samples/bpf/bpf_load.h index 7d57a42..e7a8a21 100644 --- a/samples/bpf/bpf_load.h +++ b/samples/bpf/bpf_load.h @@ -2,6 +2,7 @@ #ifndef __BPF_LOAD_H #define __BPF_LOAD_H +#include <stdbool.h> #include "libbpf.h" #define MAX_MAPS 32 @@ -38,6 +39,8 @@ extern int map_fd[MAX_MAPS]; extern struct bpf_map_data map_data[MAX_MAPS]; extern int map_data_count; +extern bool use_perf_type_probe; + /* parses elf file compiled by llvm .c->.o * . parses 'maps' section and creates maps via BPF syscall * . parses 'license' section and passes it to syscall @@ -59,6 +62,11 @@ struct ksym { char *name; }; +static inline __u64 ptr_to_u64(const void *ptr) +{ + return (__u64) (unsigned long) ptr; +} + int load_kallsyms(void); struct ksym *ksym_search(long key); int set_link_xdp_fd(int ifindex, int fd, __u32 flags); -- 2.9.5