The current calculation assumes that the mbufs are contiguous. However, this assumption is incorrect when the memory spans across a huge page. Correct to directly read the size from the mempool memory chunks.
Signed-off-by: Frank Du <frank...@intel.com> --- drivers/net/af_xdp/rte_eth_af_xdp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/af_xdp/rte_eth_af_xdp.c b/drivers/net/af_xdp/rte_eth_af_xdp.c index 268a130c49..cb95d17d13 100644 --- a/drivers/net/af_xdp/rte_eth_af_xdp.c +++ b/drivers/net/af_xdp/rte_eth_af_xdp.c @@ -1039,7 +1039,7 @@ eth_link_update(struct rte_eth_dev *dev __rte_unused, } #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG) -static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) +static inline uintptr_t get_memhdr_info(struct rte_mempool *mp, uint64_t *align, size_t *len) { struct rte_mempool_memhdr *memhdr; uintptr_t memhdr_addr, aligned_addr; @@ -1048,6 +1048,7 @@ static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align) memhdr_addr = (uintptr_t)memhdr->addr; aligned_addr = memhdr_addr & ~(getpagesize() - 1); *align = memhdr_addr - aligned_addr; + *len = memhdr->len; return aligned_addr; } @@ -1125,6 +1126,7 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, void *base_addr = NULL; struct rte_mempool *mb_pool = rxq->mb_pool; uint64_t umem_size, align = 0; + size_t len = 0; if (internals->shared_umem) { if (get_shared_umem(rxq, internals->if_name, &umem) < 0) @@ -1156,10 +1158,8 @@ xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals, } umem->mb_pool = mb_pool; - base_addr = (void *)get_base_addr(mb_pool, &align); - umem_size = (uint64_t)mb_pool->populated_size * - (uint64_t)usr_config.frame_size + - align; + base_addr = (void *)get_memhdr_info(mb_pool, &align, &len); + umem_size = (uint64_t)len + align; ret = xsk_umem__create(&umem->umem, base_addr, umem_size, &rxq->fq, &rxq->cq, &usr_config); -- 2.34.1