Add a basic test for the ability to mmap /sys/kernel/btf/vmlinux. Since
libbpf doesn't have an API to parse BTF from memory we do some basic
sanity checks ourselves.

Signed-off-by: Lorenz Bauer <l...@isovalent.com>
---
 tools/testing/selftests/bpf/prog_tests/btf_sysfs.c | 83 ++++++++++++++++++++++
 1 file changed, 83 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c 
b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c
new file mode 100644
index 
0000000000000000000000000000000000000000..3319cf758897d46cefa8ca25e16acb162f4e9889
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/btf_sysfs.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright (c) 2025 Isovalent */
+
+#include <test_progs.h>
+#include <bpf/btf.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <fcntl.h>
+#include <unistd.h>
+
+static void test_btf_mmap_sysfs(const char *path, struct btf *base)
+{
+       struct stat st;
+       __u64 btf_size, end;
+       void *raw_data = NULL;
+       int fd = -1;
+       long page_size;
+       struct btf *btf = NULL;
+
+       page_size = sysconf(_SC_PAGESIZE);
+       if (!ASSERT_GE(page_size, 0, "get_page_size"))
+               goto cleanup;
+
+       if (!ASSERT_OK(stat(path, &st), "stat_btf"))
+               goto cleanup;
+
+       btf_size = st.st_size;
+       end = (btf_size + page_size - 1) / page_size * page_size;
+
+       fd = open(path, O_RDONLY);
+       if (!ASSERT_GE(fd, 0, "open_btf"))
+               goto cleanup;
+
+       raw_data = mmap(NULL, btf_size, PROT_READ | PROT_WRITE, MAP_PRIVATE, 
fd, 0);
+       if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_writable"))
+               goto cleanup;
+
+       raw_data = mmap(NULL, btf_size, PROT_READ, MAP_SHARED, fd, 0);
+       if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_shared"))
+               goto cleanup;
+
+       raw_data = mmap(NULL, end + 1, PROT_READ, MAP_PRIVATE, fd, 0);
+       if (!ASSERT_EQ(raw_data, MAP_FAILED, "mmap_btf_invalid_size"))
+               goto cleanup;
+
+       raw_data = mmap(NULL, end, PROT_READ, MAP_PRIVATE, fd, 0);
+       if (!ASSERT_NEQ(raw_data, MAP_FAILED, "mmap_btf"))
+               goto cleanup;
+
+       if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_WRITE), -1,
+           "mprotect_writable"))
+               goto cleanup;
+
+       if (!ASSERT_EQ(mprotect(raw_data, btf_size, PROT_READ | PROT_EXEC), -1,
+           "mprotect_executable"))
+               goto cleanup;
+
+       /* Check padding is zeroed */
+       for (int i = btf_size; i < end; i++) {
+               if (((__u8 *)raw_data)[i] != 0) {
+                       PRINT_FAIL("tail of BTF is not zero at page offset 
%d\n", i);
+                       goto cleanup;
+               }
+       }
+
+       btf = btf__new_split(raw_data, btf_size, base);
+       if (!ASSERT_NEQ(btf, NULL, "parse_btf"))
+               goto cleanup;
+
+cleanup:
+       if (raw_data && raw_data != MAP_FAILED)
+               munmap(raw_data, btf_size);
+       if (btf)
+               btf__free(btf);
+       if (fd >= 0)
+               close(fd);
+}
+
+void test_btf_sysfs(void)
+{
+       if (test__start_subtest("vmlinux"))
+               test_btf_mmap_sysfs("/sys/kernel/btf/vmlinux", NULL);
+}

-- 
2.49.0


Reply via email to