Vladimir Oltean <olte...@gmail.com> writes:

> From: Vladimir Oltean <vladimir.olt...@nxp.com>
>
> The driver implementation of the XDP_REDIRECT action reuses parts from
> XDP_TX, most notably the enetc_xdp_tx function which transmits an array
> of TX software BDs. Only this time, the buffers don't have DMA mappings,
> we need to create them.
>
> When a BPF program reaches the XDP_REDIRECT verdict for a frame, we can
> employ the same buffer reuse strategy as for the normal processing path
> and for XDP_PASS: we can flip to the other page half and seed that to
> the RX ring.
>
> Note that scatter/gather support is there, but disabled due to lack of
> multi-buffer support in XDP (which is added by this series):
> https://patchwork.kernel.org/project/netdevbpf/cover/cover.1616179034.git.lore...@kernel.org/
>
> Signed-off-by: Vladimir Oltean <vladimir.olt...@nxp.com>
> ---
>  drivers/net/ethernet/freescale/enetc/enetc.c  | 212 +++++++++++++++++-
>  drivers/net/ethernet/freescale/enetc/enetc.h  |  11 +-
>  .../ethernet/freescale/enetc/enetc_ethtool.c  |   6 +
>  .../net/ethernet/freescale/enetc/enetc_pf.c   |   1 +
>  4 files changed, 218 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c 
> b/drivers/net/ethernet/freescale/enetc/enetc.c
> index ba5313a5d7a4..57049ae97201 100644
> --- a/drivers/net/ethernet/freescale/enetc/enetc.c
> +++ b/drivers/net/ethernet/freescale/enetc/enetc.c
> @@ -8,6 +8,23 @@
>  #include <linux/vmalloc.h>
>  #include <net/pkt_sched.h>
>  
> +static struct sk_buff *enetc_tx_swbd_get_skb(struct enetc_tx_swbd *tx_swbd)
> +{
> +     if (tx_swbd->is_xdp_tx || tx_swbd->is_xdp_redirect)
> +             return NULL;
> +
> +     return tx_swbd->skb;
> +}
> +
> +static struct xdp_frame *
> +enetc_tx_swbd_get_xdp_frame(struct enetc_tx_swbd *tx_swbd)
> +{
> +     if (tx_swbd->is_xdp_redirect)
> +             return tx_swbd->xdp_frame;
> +
> +     return NULL;
> +}
> +
>  static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
>                               struct enetc_tx_swbd *tx_swbd)
>  {
> @@ -25,14 +42,20 @@ static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
>       tx_swbd->dma = 0;
>  }
>  
> -static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
> -                           struct enetc_tx_swbd *tx_swbd)
> +static void enetc_free_tx_frame(struct enetc_bdr *tx_ring,
> +                             struct enetc_tx_swbd *tx_swbd)
>  {
> +     struct xdp_frame *xdp_frame = enetc_tx_swbd_get_xdp_frame(tx_swbd);
> +     struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
> +
>       if (tx_swbd->dma)
>               enetc_unmap_tx_buff(tx_ring, tx_swbd);
>  
> -     if (tx_swbd->skb) {
> -             dev_kfree_skb_any(tx_swbd->skb);
> +     if (xdp_frame) {
> +             xdp_return_frame(tx_swbd->xdp_frame);
> +             tx_swbd->xdp_frame = NULL;
> +     } else if (skb) {
> +             dev_kfree_skb_any(skb);
>               tx_swbd->skb = NULL;
>       }
>  }
> @@ -183,7 +206,7 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, 
> struct sk_buff *skb,
>  
>       do {
>               tx_swbd = &tx_ring->tx_swbd[i];
> -             enetc_free_tx_skb(tx_ring, tx_swbd);
> +             enetc_free_tx_frame(tx_ring, tx_swbd);
>               if (i == 0)
>                       i = tx_ring->bd_count;
>               i--;
> @@ -381,6 +404,9 @@ static bool enetc_clean_tx_ring(struct enetc_bdr 
> *tx_ring, int napi_budget)
>       do_tstamp = false;
>  
>       while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
> +             struct xdp_frame *xdp_frame = 
> enetc_tx_swbd_get_xdp_frame(tx_swbd);
> +             struct sk_buff *skb = enetc_tx_swbd_get_skb(tx_swbd);
> +
>               if (unlikely(tx_swbd->check_wb)) {
>                       struct enetc_ndev_priv *priv = netdev_priv(ndev);
>                       union enetc_tx_bd *txbd;
> @@ -400,12 +426,15 @@ static bool enetc_clean_tx_ring(struct enetc_bdr 
> *tx_ring, int napi_budget)
>               else if (likely(tx_swbd->dma))
>                       enetc_unmap_tx_buff(tx_ring, tx_swbd);
>  
> -             if (tx_swbd->skb) {
> +             if (xdp_frame) {
> +                     xdp_return_frame(xdp_frame);
> +                     tx_swbd->xdp_frame = NULL;
> +             } else if (skb) {
>                       if (unlikely(do_tstamp)) {
> -                             enetc_tstamp_tx(tx_swbd->skb, tstamp);
> +                             enetc_tstamp_tx(skb, tstamp);
>                               do_tstamp = false;
>                       }
> -                     napi_consume_skb(tx_swbd->skb, napi_budget);
> +                     napi_consume_skb(skb, napi_budget);
>                       tx_swbd->skb = NULL;
>               }
>  
> @@ -827,6 +856,109 @@ static bool enetc_xdp_tx(struct enetc_bdr *tx_ring,
>       return true;
>  }
>  
> +static int enetc_xdp_frame_to_xdp_tx_swbd(struct enetc_bdr *tx_ring,
> +                                       struct enetc_tx_swbd *xdp_tx_arr,
> +                                       struct xdp_frame *xdp_frame)
> +{
> +     struct enetc_tx_swbd *xdp_tx_swbd = &xdp_tx_arr[0];
> +     struct skb_shared_info *shinfo;
> +     void *data = xdp_frame->data;
> +     int len = xdp_frame->len;
> +     skb_frag_t *frag;
> +     dma_addr_t dma;
> +     unsigned int f;
> +     int n = 0;
> +
> +     dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
> +     if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
> +             netdev_err(tx_ring->ndev, "DMA map error\n");
> +             return -1;
> +     }
> +
> +     xdp_tx_swbd->dma = dma;
> +     xdp_tx_swbd->dir = DMA_TO_DEVICE;
> +     xdp_tx_swbd->len = len;
> +     xdp_tx_swbd->is_xdp_redirect = true;
> +     xdp_tx_swbd->is_eof = false;
> +     xdp_tx_swbd->xdp_frame = NULL;
> +
> +     n++;
> +     xdp_tx_swbd = &xdp_tx_arr[n];
> +
> +     shinfo = xdp_get_shared_info_from_frame(xdp_frame);
> +
> +     for (f = 0, frag = &shinfo->frags[0]; f < shinfo->nr_frags;
> +          f++, frag++) {
> +             data = skb_frag_address(frag);
> +             len = skb_frag_size(frag);
> +
> +             dma = dma_map_single(tx_ring->dev, data, len, DMA_TO_DEVICE);
> +             if (unlikely(dma_mapping_error(tx_ring->dev, dma))) {
> +                     /* Undo the DMA mapping for all fragments */
> +                     while (n-- >= 0)
> +                             enetc_unmap_tx_buff(tx_ring, &xdp_tx_arr[n]);
> +
> +                     netdev_err(tx_ring->ndev, "DMA map error\n");
> +                     return -1;
> +             }
> +
> +             xdp_tx_swbd->dma = dma;
> +             xdp_tx_swbd->dir = DMA_TO_DEVICE;
> +             xdp_tx_swbd->len = len;
> +             xdp_tx_swbd->is_xdp_redirect = true;
> +             xdp_tx_swbd->is_eof = false;
> +             xdp_tx_swbd->xdp_frame = NULL;
> +
> +             n++;
> +             xdp_tx_swbd = &xdp_tx_arr[n];
> +     }
> +
> +     xdp_tx_arr[n - 1].is_eof = true;
> +     xdp_tx_arr[n - 1].xdp_frame = xdp_frame;
> +
> +     return n;
> +}
> +
> +int enetc_xdp_xmit(struct net_device *ndev, int num_frames,
> +                struct xdp_frame **frames, u32 flags)
> +{
> +     struct enetc_tx_swbd xdp_redirect_arr[ENETC_MAX_SKB_FRAGS] = {0};
> +     struct enetc_ndev_priv *priv = netdev_priv(ndev);
> +     struct enetc_bdr *tx_ring;
> +     int xdp_tx_bd_cnt, i, k;
> +     int xdp_tx_frm_cnt = 0;
> +
> +     tx_ring = priv->tx_ring[smp_processor_id()];

What mechanism guarantees that this won't overflow the array? :)

-Toke

Reply via email to