The ID is generated by the existing idr_alloc_cyclic().

This patch also adds BPF_PROG_GET_NEXT_ID to allow userspace to iterate
all bpf_prog id(s).  The API is trying to be consistent with the
existing BPF_MAP_GET_NEXT_KEY.

Signed-off-by: Martin KaFai Lau <ka...@fb.com>
---
 include/linux/filter.h   |  1 +
 include/uapi/linux/bpf.h |  6 ++++++
 kernel/bpf/syscall.c     | 47 ++++++++++++++++++++++++++++++++++++++++++++++-
 3 files changed, 53 insertions(+), 1 deletion(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 9a7786db14fa..6eeeb83c4013 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -417,6 +417,7 @@ struct bpf_prog {
        kmemcheck_bitfield_end(meta);
        enum bpf_prog_type      type;           /* Type of BPF program */
        u32                     len;            /* Number of filter blocks */
+       u32                     id;
        u8                      tag[BPF_TAG_SIZE];
        struct bpf_prog_aux     *aux;           /* Auxiliary fields */
        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index e553529929f6..270f501c5597 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -82,6 +82,7 @@ enum bpf_cmd {
        BPF_PROG_ATTACH,
        BPF_PROG_DETACH,
        BPF_PROG_TEST_RUN,
+       BPF_PROG_GET_NEXT_ID,
 };
 
 enum bpf_map_type {
@@ -201,6 +202,11 @@ union bpf_attr {
                __u32           repeat;
                __u32           duration;
        } test;
+
+       struct { /* anonymous struct used by BPF_PROG_GET_NEXT_ID */
+               __u32           start_id;
+               __aligned_u64   next_id;
+       };
 } __attribute__((aligned(8)));
 
 /* BPF helper function descriptions:
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 13642c73dca0..6a654e17bd3c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -22,8 +22,11 @@
 #include <linux/filter.h>
 #include <linux/version.h>
 #include <linux/kernel.h>
+#include <linux/idr.h>
 
 DEFINE_PER_CPU(int, bpf_prog_active);
+DEFINE_IDR(prog_idr);
+DEFINE_SPINLOCK(prog_idr_lock);
 
 int sysctl_unprivileged_bpf_disabled __read_mostly;
 
@@ -663,6 +666,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 void bpf_prog_put(struct bpf_prog *prog)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
+               spin_lock(&prog_idr_lock);
+               idr_remove(&prog_idr, prog->id);
+               spin_unlock(&prog_idr_lock);
                trace_bpf_prog_put_rcu(prog);
                bpf_prog_kallsyms_del(prog);
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
@@ -790,7 +796,7 @@ static int bpf_prog_load(union bpf_attr *attr)
 {
        enum bpf_prog_type type = attr->prog_type;
        struct bpf_prog *prog;
-       int err;
+       int err, id;
        char license[128];
        bool is_gpl;
 
@@ -848,6 +854,15 @@ static int bpf_prog_load(union bpf_attr *attr)
        if (err < 0)
                goto free_used_maps;
 
+       spin_lock(&prog_idr_lock);
+       id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_USER);
+       spin_unlock(&prog_idr_lock);
+       if (id < 0) {
+               err = id;
+               goto free_used_maps;
+       }
+       prog->id = id;
+
        /* eBPF program is ready to be JITed */
        prog = bpf_prog_select_runtime(prog, &err);
        if (err < 0)
@@ -995,6 +1010,33 @@ static int bpf_prog_test_run(const union bpf_attr *attr,
        return ret;
 }
 
+#define BPF_PROG_GET_NEXT_ID_LAST_FIELD next_id
+
+static int bpf_prog_get_next_id(union bpf_attr *attr)
+{
+       u64 __user *unext_id = u64_to_user_ptr(attr->next_id);
+       u32 next_id = attr->start_id;
+       struct bpf_prog *prog;
+
+       if (CHECK_ATTR(BPF_PROG_GET_NEXT_ID))
+               return -EINVAL;
+
+       if (next_id++ >= INT_MAX)
+               return -EINVAL;
+
+       spin_lock(&prog_idr_lock);
+       prog = idr_get_next(&prog_idr, &next_id);
+       spin_unlock(&prog_idr_lock);
+
+       if (!prog)
+               return -ENOENT;
+
+       if (copy_to_user(unext_id, &next_id, sizeof(next_id)))
+               return -EFAULT;
+
+       return 0;
+}
+
 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, 
size)
 {
        union bpf_attr attr = {};
@@ -1072,6 +1114,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, 
uattr, unsigned int, siz
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
+       case BPF_PROG_GET_NEXT_ID:
+               err = bpf_prog_get_next_id(&attr);
+               break;
        default:
                err = -EINVAL;
                break;
-- 
2.9.3

Reply via email to