From: Björn Töpel <bjorn.to...@intel.com>

This commit wires up the xskmap to XDP_SKB layer.

Signed-off-by: Björn Töpel <bjorn.to...@intel.com>
---
 include/linux/filter.h |  2 +-
 net/core/dev.c         | 34 ++++++++++++++++++----------------
 net/core/filter.c      | 17 ++++++++++++++---
 3 files changed, 33 insertions(+), 20 deletions(-)

diff --git a/include/linux/filter.h b/include/linux/filter.h
index 4da8b2308174..6ab9a6765b00 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -759,7 +759,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog 
*prog, u32 off,
  * This does not appear to be a real limitation for existing software.
  */
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *prog);
+                           struct xdp_buff *xdp, struct bpf_prog *prog);
 int xdp_do_redirect(struct net_device *dev,
                    struct xdp_buff *xdp,
                    struct bpf_prog *prog);
diff --git a/net/core/dev.c b/net/core/dev.c
index c624a04dad1f..6e8e35af9a8b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3994,12 +3994,12 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct 
sk_buff *skb)
 }
 
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+                                    struct xdp_buff *xdp,
                                     struct bpf_prog *xdp_prog)
 {
        struct netdev_rx_queue *rxqueue;
        void *orig_data, *orig_data_end;
        u32 metalen, act = XDP_DROP;
-       struct xdp_buff xdp;
        int hlen, off;
        u32 mac_len;
 
@@ -4034,19 +4034,19 @@ static u32 netif_receive_generic_xdp(struct sk_buff 
*skb,
         */
        mac_len = skb->data - skb_mac_header(skb);
        hlen = skb_headlen(skb) + mac_len;
-       xdp.data = skb->data - mac_len;
-       xdp.data_meta = xdp.data;
-       xdp.data_end = xdp.data + hlen;
-       xdp.data_hard_start = skb->data - skb_headroom(skb);
-       orig_data_end = xdp.data_end;
-       orig_data = xdp.data;
+       xdp->data = skb->data - mac_len;
+       xdp->data_meta = xdp->data;
+       xdp->data_end = xdp->data + hlen;
+       xdp->data_hard_start = skb->data - skb_headroom(skb);
+       orig_data_end = xdp->data_end;
+       orig_data = xdp->data;
 
        rxqueue = netif_get_rxqueue(skb);
-       xdp.rxq = &rxqueue->xdp_rxq;
+       xdp->rxq = &rxqueue->xdp_rxq;
 
-       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
 
-       off = xdp.data - orig_data;
+       off = xdp->data - orig_data;
        if (off > 0)
                __skb_pull(skb, off);
        else if (off < 0)
@@ -4056,9 +4056,9 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
        /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
         * pckt.
         */
-       off = orig_data_end - xdp.data_end;
+       off = orig_data_end - xdp->data_end;
        if (off != 0)
-               skb_set_tail_pointer(skb, xdp.data_end - xdp.data);
+               skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
 
        switch (act) {
        case XDP_REDIRECT:
@@ -4066,7 +4066,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                __skb_push(skb, mac_len);
                break;
        case XDP_PASS:
-               metalen = xdp.data - xdp.data_meta;
+               metalen = xdp->data - xdp->data_meta;
                if (metalen)
                        skb_metadata_set(skb, metalen);
                break;
@@ -4116,17 +4116,19 @@ static struct static_key generic_xdp_needed 
__read_mostly;
 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
 {
        if (xdp_prog) {
-               u32 act = netif_receive_generic_xdp(skb, xdp_prog);
+               struct xdp_buff xdp;
+               u32 act;
                int err;
 
+               act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
                if (act != XDP_PASS) {
                        switch (act) {
                        case XDP_REDIRECT:
                                err = xdp_do_generic_redirect(skb->dev, skb,
-                                                             xdp_prog);
+                                                             &xdp, xdp_prog);
                                if (err)
                                        goto out_redir;
-                       /* fallthru to submit skb */
+                               break;
                        case XDP_TX:
                                generic_xdp_tx(skb, xdp_prog);
                                break;
diff --git a/net/core/filter.c b/net/core/filter.c
index f053cc799253..315bf3b8d576 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -58,6 +58,7 @@
 #include <net/busy_poll.h>
 #include <net/tcp.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
 
 /**
  *     sk_filter_trim_cap - run a packet through a socket filter
@@ -2972,13 +2973,14 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff 
*skb, struct net_device *fwd)
 
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
+                                      struct xdp_buff *xdp,
                                       struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        unsigned long map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
-       struct net_device *fwd = NULL;
        u32 index = ri->ifindex;
+       void *fwd = NULL;
        int err = 0;
 
        ri->ifindex = 0;
@@ -3000,6 +3002,14 @@ static int xdp_do_generic_redirect_map(struct net_device 
*dev,
                if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
                        goto err;
                skb->dev = fwd;
+               generic_xdp_tx(skb, xdp_prog);
+       } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
+               struct xdp_sock *xs = fwd;
+
+               err = xsk_generic_rcv(xs, xdp);
+               if (err)
+                       goto err;
+               consume_skb(skb);
        } else {
                /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
                err = -EBADRQC;
@@ -3014,7 +3024,7 @@ static int xdp_do_generic_redirect_map(struct net_device 
*dev,
 }
 
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *xdp_prog)
+                           struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        u32 index = ri->ifindex;
@@ -3022,7 +3032,7 @@ int xdp_do_generic_redirect(struct net_device *dev, 
struct sk_buff *skb,
        int err = 0;
 
        if (ri->map)
-               return xdp_do_generic_redirect_map(dev, skb, xdp_prog);
+               return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
 
        ri->ifindex = 0;
        fwd = dev_get_by_index_rcu(dev_net(dev), index);
@@ -3036,6 +3046,7 @@ int xdp_do_generic_redirect(struct net_device *dev, 
struct sk_buff *skb,
 
        skb->dev = fwd;
        _trace_xdp_redirect(dev, xdp_prog, index);
+       generic_xdp_tx(skb, xdp_prog);
        return 0;
 err:
        _trace_xdp_redirect_err(dev, xdp_prog, index, err);
-- 
2.14.1

Reply via email to