On Fri, Aug 16, 2024 at 11:24:00AM +0200, Kurt Kanzenbach wrote:
> From: Sriram Yagnaraman <sriram.yagnara...@est.tech>
> 
> Always call igb_xdp_ring_update_tail() under __netif_tx_lock(), add a
> comment to indicate that. This is needed to share the same TX ring between
> XDP, XSK and slow paths.

Sorry for being a-hole here but I think this should go to -net as a fix...
I should have brought it up earlier, current igb XDP support is racy on
tail updates which you're fixing here.

Do you agree...or not?:)

> 
> Signed-off-by: Sriram Yagnaraman <sriram.yagnara...@est.tech>
> [Kurt: Split patches]
> Signed-off-by: Kurt Kanzenbach <k...@linutronix.de>
> ---
>  drivers/net/ethernet/intel/igb/igb_main.c | 14 ++++++++++----
>  1 file changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/intel/igb/igb_main.c 
> b/drivers/net/ethernet/intel/igb/igb_main.c
> index 11be39f435f3..4d5e5691c9bd 100644
> --- a/drivers/net/ethernet/intel/igb/igb_main.c
> +++ b/drivers/net/ethernet/intel/igb/igb_main.c
> @@ -2914,6 +2914,7 @@ static int igb_xdp(struct net_device *dev, struct 
> netdev_bpf *xdp)
>       }
>  }
>  
> +/* This function assumes __netif_tx_lock is held by the caller. */
>  static void igb_xdp_ring_update_tail(struct igb_ring *ring)
>  {
>       /* Force memory writes to complete before letting h/w know there
> @@ -3000,11 +3001,11 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
>               nxmit++;
>       }
>  
> -     __netif_tx_unlock(nq);
> -
>       if (unlikely(flags & XDP_XMIT_FLUSH))
>               igb_xdp_ring_update_tail(tx_ring);
>  
> +     __netif_tx_unlock(nq);
> +
>       return nxmit;
>  }
>  
> @@ -8853,12 +8854,14 @@ static void igb_put_rx_buffer(struct igb_ring 
> *rx_ring,
>  
>  static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
>  {
> +     unsigned int total_bytes = 0, total_packets = 0;
>       struct igb_adapter *adapter = q_vector->adapter;
>       struct igb_ring *rx_ring = q_vector->rx.ring;
> -     struct sk_buff *skb = rx_ring->skb;
> -     unsigned int total_bytes = 0, total_packets = 0;
>       u16 cleaned_count = igb_desc_unused(rx_ring);
> +     struct sk_buff *skb = rx_ring->skb;
> +     int cpu = smp_processor_id();
>       unsigned int xdp_xmit = 0;
> +     struct netdev_queue *nq;
>       struct xdp_buff xdp;
>       u32 frame_sz = 0;
>       int rx_buf_pgcnt;
> @@ -8986,7 +8989,10 @@ static int igb_clean_rx_irq(struct igb_q_vector 
> *q_vector, const int budget)
>       if (xdp_xmit & IGB_XDP_TX) {
>               struct igb_ring *tx_ring = igb_xdp_tx_queue_mapping(adapter);
>  
> +             nq = txring_txq(tx_ring);
> +             __netif_tx_lock(nq, cpu);
>               igb_xdp_ring_update_tail(tx_ring);
> +             __netif_tx_unlock(nq);
>       }
>  
>       u64_stats_update_begin(&rx_ring->rx_syncp);
> 
> -- 
> 2.39.2
> 

Reply via email to