xkq_peek_addr() returns chunk-aligned handles, so have the rq behave
the same way.  Clean up callsites.

Signed-off-by: Jonathan Lemon <jonathan.le...@gmail.com>
Cc: Björn Töpel <bjorn.to...@intel.com>
Cc: Maxim Mikityanskiy <maxi...@mellanox.com>
Cc: netdev@vger.kernel.org
---
 drivers/net/ethernet/intel/i40e/i40e_xsk.c          | 2 --
 drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c        | 2 --
 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c | 2 +-
 include/net/xdp_sock.h                              | 2 +-
 4 files changed, 2 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c 
b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index d28b629afc8a..d2e212d007c3 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -285,8 +285,6 @@ static bool i40e_alloc_buffer_slow_zc(struct i40e_ring 
*rx_ring,
                return false;
        }
 
-       handle &= rx_ring->xsk_umem->chunk_mask;
-
        hr = umem->headroom + XDP_PACKET_HEADROOM;
 
        bi->dma = xdp_umem_get_dma(umem, handle);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c 
b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 582d83d35f7e..fc5ca35b4dbf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -343,8 +343,6 @@ static bool ixgbe_alloc_buffer_slow_zc(struct ixgbe_ring 
*rx_ring,
                return false;
        }
 
-       handle &= rx_ring->xsk_umem->chunk_mask;
-
        hr = umem->headroom + XDP_PACKET_HEADROOM;
 
        bi->dma = xdp_umem_get_dma(umem, handle);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c 
b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index 61d75a7b3ce2..2f0b5fd1ee97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -309,7 +309,7 @@ int mlx5e_xsk_page_alloc_umem(struct mlx5e_rq *rq,
 
 static inline void mlx5e_xsk_recycle_frame(struct mlx5e_rq *rq, u64 handle)
 {
-       xsk_umem_fq_reuse(rq->umem, handle & rq->umem->chunk_mask);
+       xsk_umem_fq_reuse(rq->umem, handle);
 }
 
 /* XSKRQ uses pages from UMEM, they must not be released. They are returned to
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 7ebcb62fedf1..9d97ae73a287 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -118,7 +118,7 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem 
*umem, u64 *addr)
        if (!rq->length)
                return xsk_umem_peek_addr(umem, addr);
 
-       *addr = rq->handles[rq->length - 1];
+       *addr = rq->handles[rq->length - 1] & umem->chunk_mask;
        return addr;
 }
 
-- 
2.17.1

Reply via email to