We will call sock_reuseport.prog for socket migration in the next commit,
so the eBPF program has to know which listener is closing in order to
select the new listener.

Currently, we can get a unique ID for each listener in the userspace by
calling bpf_map_lookup_elem() for BPF_MAP_TYPE_REUSEPORT_SOCKARRAY map.
This patch exposes the ID to the eBPF program.

Reviewed-by: Benjamin Herrenschmidt <b...@amazon.com>
Signed-off-by: Kuniyuki Iwashima <kun...@amazon.co.jp>
---
 include/linux/bpf.h            | 1 +
 include/uapi/linux/bpf.h       | 1 +
 net/core/filter.c              | 8 ++++++++
 tools/include/uapi/linux/bpf.h | 1 +
 4 files changed, 11 insertions(+)

diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 581b2a2e78eb..c0646eceffa2 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1897,6 +1897,7 @@ struct sk_reuseport_kern {
        u32 hash;
        u32 reuseport_id;
        bool bind_inany;
+       u64 cookie;
 };
 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
                                  struct bpf_insn_access_aux *info);
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 162999b12790..3fcddb032838 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -4403,6 +4403,7 @@ struct sk_reuseport_md {
        __u32 ip_protocol;      /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
        __u32 bind_inany;       /* Is sock bound to an INANY address? */
        __u32 hash;             /* A hash of the packet 4 tuples */
+       __u64 cookie;           /* ID of the listener in map */
 };
 
 #define BPF_TAG_SIZE   8
diff --git a/net/core/filter.c b/net/core/filter.c
index 2ca5eecebacf..01e28f283962 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -9862,6 +9862,7 @@ static void bpf_init_reuseport_kern(struct 
sk_reuseport_kern *reuse_kern,
        reuse_kern->hash = hash;
        reuse_kern->reuseport_id = reuse->reuseport_id;
        reuse_kern->bind_inany = reuse->bind_inany;
+       reuse_kern->cookie = sock_gen_cookie(sk);
 }
 
 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock 
*sk,
@@ -10010,6 +10011,9 @@ sk_reuseport_is_valid_access(int off, int size,
        case offsetof(struct sk_reuseport_md, hash):
                return size == size_default;
 
+       case bpf_ctx_range(struct sk_reuseport_md, cookie):
+               return size == sizeof(__u64);
+
        /* Fields that allow narrowing */
        case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
                if (size < sizeof_field(struct sk_buff, protocol))
@@ -10082,6 +10086,10 @@ static u32 sk_reuseport_convert_ctx_access(enum 
bpf_access_type type,
        case offsetof(struct sk_reuseport_md, bind_inany):
                SK_REUSEPORT_LOAD_FIELD(bind_inany);
                break;
+
+       case offsetof(struct sk_reuseport_md, cookie):
+               SK_REUSEPORT_LOAD_FIELD(cookie);
+               break;
        }
 
        return insn - insn_buf;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 162999b12790..3fcddb032838 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -4403,6 +4403,7 @@ struct sk_reuseport_md {
        __u32 ip_protocol;      /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
        __u32 bind_inany;       /* Is sock bound to an INANY address? */
        __u32 hash;             /* A hash of the packet 4 tuples */
+       __u64 cookie;           /* ID of the listener in map */
 };
 
 #define BPF_TAG_SIZE   8
-- 
2.17.2 (Apple Git-113)

Reply via email to