This patch enables the FENTRY/FEXIT/RAW_TP tracing program to use
the bpf_sk_storage_(get|delete) helper, so those tracing programs
can access the sk's bpf_local_storage and the later selftest
will show some examples.

The bpf_sk_storage is currently used in bpf-tcp-cc, tc,
cg sockops...etc which is running either in softirq or
task context.

This patch adds bpf_sk_storage_get_tracing_proto and
bpf_sk_storage_delete_tracing_proto.  They will check
in runtime that the helpers can only be called when serving
softirq or running in a task context.  That should enable
most common tracing use cases on sk.

During the load time, the new tracing_allowed() function
will ensure the tracing prog using the bpf_sk_storage_(get|delete)
helper is not tracing any *sk_storage*() function itself.
The sk is passed as "void *" when calling into bpf_local_storage.

Signed-off-by: Martin KaFai Lau <ka...@fb.com>
---
 include/net/bpf_sk_storage.h |  2 +
 kernel/trace/bpf_trace.c     |  5 +++
 net/core/bpf_sk_storage.c    | 73 ++++++++++++++++++++++++++++++++++++
 3 files changed, 80 insertions(+)

diff --git a/include/net/bpf_sk_storage.h b/include/net/bpf_sk_storage.h
index 3c516dd07caf..0e85713f56df 100644
--- a/include/net/bpf_sk_storage.h
+++ b/include/net/bpf_sk_storage.h
@@ -20,6 +20,8 @@ void bpf_sk_storage_free(struct sock *sk);
 
 extern const struct bpf_func_proto bpf_sk_storage_get_proto;
 extern const struct bpf_func_proto bpf_sk_storage_delete_proto;
+extern const struct bpf_func_proto bpf_sk_storage_get_tracing_proto;
+extern const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto;
 
 struct bpf_local_storage_elem;
 struct bpf_sk_storage_diag;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index e4515b0f62a8..cfce60ad1cb5 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -16,6 +16,7 @@
 #include <linux/syscalls.h>
 #include <linux/error-injection.h>
 #include <linux/btf_ids.h>
+#include <net/bpf_sk_storage.h>
 
 #include <uapi/linux/bpf.h>
 #include <uapi/linux/btf.h>
@@ -1735,6 +1736,10 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const 
struct bpf_prog *prog)
                return &bpf_skc_to_tcp_request_sock_proto;
        case BPF_FUNC_skc_to_udp6_sock:
                return &bpf_skc_to_udp6_sock_proto;
+       case BPF_FUNC_sk_storage_get:
+               return &bpf_sk_storage_get_tracing_proto;
+       case BPF_FUNC_sk_storage_delete:
+               return &bpf_sk_storage_delete_tracing_proto;
 #endif
        case BPF_FUNC_seq_printf:
                return prog->expected_attach_type == BPF_TRACE_ITER ?
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 001eac65e40f..1a41c917e08d 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -6,6 +6,7 @@
 #include <linux/types.h>
 #include <linux/spinlock.h>
 #include <linux/bpf.h>
+#include <linux/btf.h>
 #include <linux/btf_ids.h>
 #include <linux/bpf_local_storage.h>
 #include <net/bpf_sk_storage.h>
@@ -378,6 +379,78 @@ const struct bpf_func_proto bpf_sk_storage_delete_proto = {
        .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
 };
 
+static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
+{
+       const struct btf *btf_vmlinux;
+       const struct btf_type *t;
+       const char *tname;
+       u32 btf_id;
+
+       if (prog->aux->dst_prog)
+               return false;
+
+       /* Ensure the tracing program is not tracing
+        * any *sk_storage*() function and also
+        * use the bpf_sk_storage_(get|delete) helper.
+        */
+       switch (prog->expected_attach_type) {
+       case BPF_TRACE_RAW_TP:
+               /* bpf_sk_storage has no trace point */
+               return true;
+       case BPF_TRACE_FENTRY:
+       case BPF_TRACE_FEXIT:
+               btf_vmlinux = bpf_get_btf_vmlinux();
+               btf_id = prog->aux->attach_btf_id;
+               t = btf_type_by_id(btf_vmlinux, btf_id);
+               tname = btf_name_by_offset(btf_vmlinux, t->name_off);
+               return !strstr(tname, "sk_storage");
+       default:
+               return false;
+       }
+
+       return false;
+}
+
+BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, 
sk,
+          void *, value, u64, flags)
+{
+       if (!in_serving_softirq() && !in_task())
+               return (unsigned long)NULL;
+
+       return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags);
+}
+
+BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
+          struct sock *, sk)
+{
+       if (!in_serving_softirq() && !in_task())
+               return -EPERM;
+
+       return ____bpf_sk_storage_delete(map, sk);
+}
+
+const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
+       .func           = bpf_sk_storage_get_tracing,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+       .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
+       .arg4_type      = ARG_ANYTHING,
+       .allowed        = bpf_sk_storage_tracing_allowed,
+};
+
+const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
+       .func           = bpf_sk_storage_delete_tracing,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_CONST_MAP_PTR,
+       .arg2_type      = ARG_PTR_TO_BTF_ID,
+       .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
+       .allowed        = bpf_sk_storage_tracing_allowed,
+};
+
 struct bpf_sk_storage_diag {
        u32 nr_maps;
        struct bpf_map *maps[];
-- 
2.24.1

Reply via email to