On Fri, Oct 11, 2024 at 11:01:04AM +0200, Kurt Kanzenbach wrote:
> From: Sriram Yagnaraman <sriram.yagnara...@est.tech>
> 
> Add support for AF_XDP zero-copy transmit path.
> 
> A new TX buffer type IGB_TYPE_XSK is introduced to indicate that the Tx
> frame was allocated from the xsk buff pool, so igb_clean_tx_ring() and
> igb_clean_tx_irq() can clean the buffers correctly based on type.
> 
> igb_xmit_zc() performs the actual packet transmit when AF_XDP zero-copy is
> enabled. We share the TX ring between slow path, XDP and AF_XDP
> zero-copy, so we use the netdev queue lock to ensure mutual exclusion.
> 
> Signed-off-by: Sriram Yagnaraman <sriram.yagnara...@est.tech>
> [Kurt: Set olinfo_status in igb_xmit_zc() so that frames are transmitted,
>        Use READ_ONCE() for xsk_pool and check Tx disabled and carrier in
>        igb_xmit_zc(), Add FIXME for RS bit]
> Signed-off-by: Kurt Kanzenbach <k...@linutronix.de>
> Reviewed-by: Maciej Fijalkowski <maciej.fijalkow...@intel.com>
> ---
>  drivers/net/ethernet/intel/igb/igb.h      |  2 +
>  drivers/net/ethernet/intel/igb/igb_main.c | 61 ++++++++++++++++++++++++-----
>  drivers/net/ethernet/intel/igb/igb_xsk.c  | 64 
> +++++++++++++++++++++++++++++++
>  3 files changed, 117 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/net/ethernet/intel/igb/igb.h 
> b/drivers/net/ethernet/intel/igb/igb.h
> index e4a85867aa18..f6ac74327bb3 100644
> --- a/drivers/net/ethernet/intel/igb/igb.h
> +++ b/drivers/net/ethernet/intel/igb/igb.h
> @@ -258,6 +258,7 @@ enum igb_tx_flags {
>  enum igb_tx_buf_type {
>       IGB_TYPE_SKB = 0,
>       IGB_TYPE_XDP,
> +     IGB_TYPE_XSK
>  };
>  
>  /* wrapper around a pointer to a socket buffer,
> @@ -859,6 +860,7 @@ bool igb_alloc_rx_buffers_zc(struct igb_ring *rx_ring,
>  void igb_clean_rx_ring_zc(struct igb_ring *rx_ring);
>  int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
>                       struct xsk_buff_pool *xsk_pool, const int budget);
> +bool igb_xmit_zc(struct igb_ring *tx_ring);
>  int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags);
>  
>  #endif /* _IGB_H_ */
> diff --git a/drivers/net/ethernet/intel/igb/igb_main.c 
> b/drivers/net/ethernet/intel/igb/igb_main.c
> index 711b60cab594..5f396c02e3b9 100644
> --- a/drivers/net/ethernet/intel/igb/igb_main.c
> +++ b/drivers/net/ethernet/intel/igb/igb_main.c
> @@ -2979,6 +2979,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
>       if (unlikely(!tx_ring))
>               return -ENXIO;
>  
> +     if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
> +             return -ENXIO;
> +
>       nq = txring_txq(tx_ring);
>       __netif_tx_lock(nq, cpu);
>  
> @@ -3326,7 +3329,8 @@ static int igb_probe(struct pci_dev *pdev, const struct 
> pci_device_id *ent)
>       netdev->priv_flags |= IFF_SUPP_NOFCS;
>  
>       netdev->priv_flags |= IFF_UNICAST_FLT;
> -     netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT;
> +     netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
> +                            NETDEV_XDP_ACT_XSK_ZEROCOPY;
>  
>       /* MTU range: 68 - 9216 */
>       netdev->min_mtu = ETH_MIN_MTU;
> @@ -4900,15 +4904,20 @@ void igb_clean_tx_ring(struct igb_ring *tx_ring)
>  {
>       u16 i = tx_ring->next_to_clean;
>       struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
> +     u32 xsk_frames = 0;
>  
>       while (i != tx_ring->next_to_use) {
>               union e1000_adv_tx_desc *eop_desc, *tx_desc;
>  
>               /* Free all the Tx ring sk_buffs or xdp frames */
> -             if (tx_buffer->type == IGB_TYPE_SKB)
> +             if (tx_buffer->type == IGB_TYPE_SKB) {
>                       dev_kfree_skb_any(tx_buffer->skb);
> -             else
> +             } else if (tx_buffer->type == IGB_TYPE_XDP) {
>                       xdp_return_frame(tx_buffer->xdpf);
> +             } else if (tx_buffer->type == IGB_TYPE_XSK) {
> +                     xsk_frames++;
> +                     goto skip_for_xsk;
> +             }
>  
>               /* unmap skb header data */
>               dma_unmap_single(tx_ring->dev,
> @@ -4939,6 +4948,7 @@ void igb_clean_tx_ring(struct igb_ring *tx_ring)
>                                              DMA_TO_DEVICE);
>               }
>  
> +skip_for_xsk:
>               tx_buffer->next_to_watch = NULL;
>  
>               /* move us one more past the eop_desc for start of next pkt */
> @@ -4953,6 +4963,9 @@ void igb_clean_tx_ring(struct igb_ring *tx_ring)
>       /* reset BQL for queue */
>       netdev_tx_reset_queue(txring_txq(tx_ring));
>  
> +     if (tx_ring->xsk_pool && xsk_frames)
> +             xsk_tx_completed(tx_ring->xsk_pool, xsk_frames);
> +
>       /* reset next_to_use and next_to_clean */
>       tx_ring->next_to_use = 0;
>       tx_ring->next_to_clean = 0;
> @@ -6486,6 +6499,9 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
>               return NETDEV_TX_BUSY;
>       }
>  
> +     if (unlikely(test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags)))
> +             return NETDEV_TX_BUSY;
> +
>       /* record the location of the first descriptor for this packet */
>       first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
>       first->type = IGB_TYPE_SKB;
> @@ -8260,13 +8276,18 @@ static int igb_poll(struct napi_struct *napi, int 
> budget)
>   **/
>  static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
>  {
> -     struct igb_adapter *adapter = q_vector->adapter;
> -     struct igb_ring *tx_ring = q_vector->tx.ring;
> -     struct igb_tx_buffer *tx_buffer;
> -     union e1000_adv_tx_desc *tx_desc;
>       unsigned int total_bytes = 0, total_packets = 0;
> +     struct igb_adapter *adapter = q_vector->adapter;
>       unsigned int budget = q_vector->tx.work_limit;
> +     struct igb_ring *tx_ring = q_vector->tx.ring;
>       unsigned int i = tx_ring->next_to_clean;
> +     union e1000_adv_tx_desc *tx_desc;
> +     struct igb_tx_buffer *tx_buffer;
> +     struct xsk_buff_pool *xsk_pool;
> +     int cpu = smp_processor_id();
> +     bool xsk_xmit_done = true;
> +     struct netdev_queue *nq;
> +     u32 xsk_frames = 0;
>  
>       if (test_bit(__IGB_DOWN, &adapter->state))
>               return true;
> @@ -8297,10 +8318,14 @@ static bool igb_clean_tx_irq(struct igb_q_vector 
> *q_vector, int napi_budget)
>               total_packets += tx_buffer->gso_segs;
>  
>               /* free the skb */
> -             if (tx_buffer->type == IGB_TYPE_SKB)
> +             if (tx_buffer->type == IGB_TYPE_SKB) {
>                       napi_consume_skb(tx_buffer->skb, napi_budget);
> -             else
> +             } else if (tx_buffer->type == IGB_TYPE_XDP) {
>                       xdp_return_frame(tx_buffer->xdpf);
> +             } else if (tx_buffer->type == IGB_TYPE_XSK) {
> +                     xsk_frames++;
> +                     goto skip_for_xsk;
> +             }
>  
>               /* unmap skb header data */
>               dma_unmap_single(tx_ring->dev,
> @@ -8332,6 +8357,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector 
> *q_vector, int napi_budget)
>                       }
>               }
>  
> +skip_for_xsk:
>               /* move us one more past the eop_desc for start of next pkt */
>               tx_buffer++;
>               tx_desc++;
> @@ -8360,6 +8386,21 @@ static bool igb_clean_tx_irq(struct igb_q_vector 
> *q_vector, int napi_budget)
>       q_vector->tx.total_bytes += total_bytes;
>       q_vector->tx.total_packets += total_packets;
>  
> +     xsk_pool = READ_ONCE(tx_ring->xsk_pool);
> +     if (xsk_pool) {
> +             if (xsk_frames)
> +                     xsk_tx_completed(xsk_pool, xsk_frames);
> +             if (xsk_uses_need_wakeup(xsk_pool))
> +                     xsk_set_tx_need_wakeup(xsk_pool);
> +
> +             nq = txring_txq(tx_ring);
> +             __netif_tx_lock(nq, cpu);
> +             /* Avoid transmit queue timeout since we share it with the slow 
> path */
> +             txq_trans_cond_update(nq);
> +             xsk_xmit_done = igb_xmit_zc(tx_ring);
> +             __netif_tx_unlock(nq);
> +     }
> +
>       if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
>               struct e1000_hw *hw = &adapter->hw;
>  
> @@ -8422,7 +8463,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector 
> *q_vector, int napi_budget)
>               }
>       }
>  
> -     return !!budget;
> +     return !!budget && xsk_xmit_done;
>  }
>  
>  /**
> diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c 
> b/drivers/net/ethernet/intel/igb/igb_xsk.c
> index 22d234db0fab..d962c5e22b71 100644
> --- a/drivers/net/ethernet/intel/igb/igb_xsk.c
> +++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
> @@ -465,6 +465,70 @@ int igb_clean_rx_irq_zc(struct igb_q_vector *q_vector,
>       return failure ? budget : (int)total_packets;
>  }
>  
> +bool igb_xmit_zc(struct igb_ring *tx_ring)
> +{
> +     unsigned int budget = igb_desc_unused(tx_ring);
> +     struct xsk_buff_pool *pool = tx_ring->xsk_pool;

Ughh that's another read of pool ptr, you should have passed it by arg to
igb_xmit_zc()...

> +     u32 cmd_type, olinfo_status, nb_pkts, i = 0;
> +     struct xdp_desc *descs = pool->tx_descs;
> +     union e1000_adv_tx_desc *tx_desc = NULL;
> +     struct igb_tx_buffer *tx_buffer_info;
> +     unsigned int total_bytes = 0;
> +     dma_addr_t dma;
> +
> +     if (!netif_carrier_ok(tx_ring->netdev))
> +             return true;
> +
> +     if (test_bit(IGB_RING_FLAG_TX_DISABLED, &tx_ring->flags))
> +             return true;
> +
> +     nb_pkts = xsk_tx_peek_release_desc_batch(pool, budget);
> +     if (!nb_pkts)
> +             return true;
> +
> +     while (nb_pkts-- > 0) {
> +             dma = xsk_buff_raw_get_dma(pool, descs[i].addr);
> +             xsk_buff_raw_dma_sync_for_device(pool, dma, descs[i].len);
> +
> +             tx_buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
> +             tx_buffer_info->bytecount = descs[i].len;
> +             tx_buffer_info->type = IGB_TYPE_XSK;
> +             tx_buffer_info->xdpf = NULL;
> +             tx_buffer_info->gso_segs = 1;
> +             tx_buffer_info->time_stamp = jiffies;
> +
> +             tx_desc = IGB_TX_DESC(tx_ring, tx_ring->next_to_use);
> +             tx_desc->read.buffer_addr = cpu_to_le64(dma);
> +
> +             /* put descriptor type bits */
> +             cmd_type = E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_DEXT |
> +                        E1000_ADVTXD_DCMD_IFCS;
> +             olinfo_status = descs[i].len << E1000_ADVTXD_PAYLEN_SHIFT;
> +
> +             /* FIXME: This sets the Report Status (RS) bit for every
> +              * descriptor. One nice to have optimization would be to set it
> +              * only for the last descriptor in the whole batch. See Intel
> +              * ice driver for an example on how to do it.
> +              */
> +             cmd_type |= descs[i].len | IGB_TXD_DCMD;
> +             tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
> +             tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
> +
> +             total_bytes += descs[i].len;
> +
> +             i++;
> +             tx_ring->next_to_use++;
> +             tx_buffer_info->next_to_watch = tx_desc;
> +             if (tx_ring->next_to_use == tx_ring->count)
> +                     tx_ring->next_to_use = 0;
> +     }
> +
> +     netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes);
> +     igb_xdp_ring_update_tail(tx_ring);
> +
> +     return nb_pkts < budget;
> +}
> +
>  int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
>  {
>       struct igb_adapter *adapter = netdev_priv(dev);
> 
> -- 
> 2.39.5
> 

Reply via email to