Commit d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct pt_regs-based sys_*() to __x64_sys_*()") renamed a lot of syscall function sys_*() to __x64_sys_*(). This caused several kprobe based samples/bpf tests failing.
This patch fixed the problem by using __x64_sys_*(), instead of sys_*(), in bpf program SEC annotations if the target arch is __TARGET_ARCH_x86. Fixes: d5a00528b58c ("syscalls/core, syscalls/x86: Rename struct pt_regs-based sys_*() to __x64_sys_*()") Signed-off-by: Yonghong Song <y...@fb.com> --- samples/bpf/map_perf_test_kern.c | 32 +++++++++++++++++++++++ samples/bpf/test_current_task_under_cgroup_kern.c | 4 +++ samples/bpf/test_map_in_map_kern.c | 4 +++ samples/bpf/test_probe_write_user_kern.c | 4 +++ samples/bpf/trace_output_kern.c | 4 +++ samples/bpf/tracex2_kern.c | 4 +++ 6 files changed, 52 insertions(+) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 2b2ffb9..79f4320 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -95,7 +95,11 @@ struct bpf_map_def SEC("maps") lru_hash_lookup_map = { .max_entries = MAX_ENTRIES, }; +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_getuid") +#else SEC("kprobe/sys_getuid") +#endif int stress_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -110,7 +114,11 @@ int stress_hmap(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_geteuid") +#else SEC("kprobe/sys_geteuid") +#endif int stress_percpu_hmap(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -124,7 +132,11 @@ int stress_percpu_hmap(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_getgid") +#else SEC("kprobe/sys_getgid") +#endif int stress_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -138,7 +150,11 @@ int stress_hmap_alloc(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_getegid") +#else SEC("kprobe/sys_getegid") +#endif int stress_percpu_hmap_alloc(struct pt_regs *ctx) { u32 key = bpf_get_current_pid_tgid(); @@ -152,7 +168,11 @@ int stress_percpu_hmap_alloc(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_connect") +#else SEC("kprobe/sys_connect") +#endif int stress_lru_hmap_alloc(struct pt_regs *ctx) { char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn"; @@ -231,7 +251,11 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_gettid") +#else SEC("kprobe/sys_gettid") +#endif int stress_lpm_trie_map_alloc(struct pt_regs *ctx) { union { @@ -253,7 +277,11 @@ int stress_lpm_trie_map_alloc(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_getpgid") +#else SEC("kprobe/sys_getpgid") +#endif int stress_hash_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; @@ -266,7 +294,11 @@ int stress_hash_map_lookup(struct pt_regs *ctx) return 0; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_getppid") +#else SEC("kprobe/sys_getppid") +#endif int stress_array_map_lookup(struct pt_regs *ctx) { u32 key = 1, i; diff --git a/samples/bpf/test_current_task_under_cgroup_kern.c b/samples/bpf/test_current_task_under_cgroup_kern.c index 86b28d7..eb375a2 100644 --- a/samples/bpf/test_current_task_under_cgroup_kern.c +++ b/samples/bpf/test_current_task_under_cgroup_kern.c @@ -26,7 +26,11 @@ struct bpf_map_def SEC("maps") perf_map = { }; /* Writes the last PID that called sync to a map at index 0 */ +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_sync") +#else SEC("kprobe/sys_sync") +#endif int bpf_prog1(struct pt_regs *ctx) { u64 pid = bpf_get_current_pid_tgid(); diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 42c44d0..89678a1 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -100,7 +100,11 @@ static __always_inline int do_inline_hash_lookup(void *inner_map, u32 port) return result ? *result : -ENOENT; } +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_connect") +#else SEC("kprobe/sys_connect") +#endif int trace_sys_connect(struct pt_regs *ctx) { struct sockaddr_in6 *in6; diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c index 3a677c8..da677f7 100644 --- a/samples/bpf/test_probe_write_user_kern.c +++ b/samples/bpf/test_probe_write_user_kern.c @@ -25,7 +25,11 @@ struct bpf_map_def SEC("maps") dnat_map = { * This example sits on a syscall, and the syscall ABI is relatively stable * of course, across platforms, and over time, the ABI may change. */ +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_connect") +#else SEC("kprobe/sys_connect") +#endif int bpf_prog1(struct pt_regs *ctx) { struct sockaddr_in new_addr, orig_addr = {}; diff --git a/samples/bpf/trace_output_kern.c b/samples/bpf/trace_output_kern.c index 9b96f4f..868153a 100644 --- a/samples/bpf/trace_output_kern.c +++ b/samples/bpf/trace_output_kern.c @@ -10,7 +10,11 @@ struct bpf_map_def SEC("maps") my_map = { .max_entries = 2, }; +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_write") +#else SEC("kprobe/sys_write") +#endif int bpf_prog1(struct pt_regs *ctx) { struct S { diff --git a/samples/bpf/tracex2_kern.c b/samples/bpf/tracex2_kern.c index 5e11c20..93ca2a4 100644 --- a/samples/bpf/tracex2_kern.c +++ b/samples/bpf/tracex2_kern.c @@ -76,7 +76,11 @@ struct bpf_map_def SEC("maps") my_hist_map = { .max_entries = 1024, }; +#ifdef __TARGET_ARCH_x86 +SEC("kprobe/__x64_sys_write") +#else SEC("kprobe/sys_write") +#endif int bpf_prog3(struct pt_regs *ctx) { long write_size = PT_REGS_PARM3(ctx); -- 2.9.5