In vector mode, DF bit is not programmed correctly, as the return value of vsetq_lane_u64() is ignored, which actually contains the updated value, leading HW to free mbufs though NIX_TX_OFFLOAD_MBUF_NOFF_F flag is set.
Hence, save return value of vsetq_lane_u64() appropriately so that DF bit is programmed correctly. Signed-off-by: Hanumanth Pothula <hpoth...@marvell.com> --- drivers/net/cnxk/cn10k_tx.h | 8 ++++---- drivers/net/cnxk/cn9k_tx.h | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/cnxk/cn10k_tx.h b/drivers/net/cnxk/cn10k_tx.h index ea13866b20..fb33e7150b 100644 --- a/drivers/net/cnxk/cn10k_tx.h +++ b/drivers/net/cnxk/cn10k_tx.h @@ -2477,28 +2477,28 @@ cn10k_nix_xmit_pkts_vector(void *tx_queue, uint64_t *ws, mbuf3 = (uint64_t *)tx_pkts[3]; if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) - vsetq_lane_u64(0x80000, xmask01, 0); + xmask01 = vsetq_lane_u64(0x80000, xmask01, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf0)->pool, (void **)&mbuf0, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) - vsetq_lane_u64(0x80000, xmask01, 1); + xmask01 = vsetq_lane_u64(0x80000, xmask01, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf1)->pool, (void **)&mbuf1, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) - vsetq_lane_u64(0x80000, xmask23, 0); + xmask23 = vsetq_lane_u64(0x80000, xmask23, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf2)->pool, (void **)&mbuf2, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) - vsetq_lane_u64(0x80000, xmask23, 1); + xmask23 = vsetq_lane_u64(0x80000, xmask23, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf3)->pool, diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h index 6ce81f5c96..a609814dfb 100644 --- a/drivers/net/cnxk/cn9k_tx.h +++ b/drivers/net/cnxk/cn9k_tx.h @@ -1705,28 +1705,28 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, mbuf3 = (uint64_t *)tx_pkts[3]; if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf0)) - vsetq_lane_u64(0x80000, xmask01, 0); + xmask01 = vsetq_lane_u64(0x80000, xmask01, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf0)->pool, (void **)&mbuf0, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf1)) - vsetq_lane_u64(0x80000, xmask01, 1); + xmask01 = vsetq_lane_u64(0x80000, xmask01, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf1)->pool, (void **)&mbuf1, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf2)) - vsetq_lane_u64(0x80000, xmask23, 0); + xmask23 = vsetq_lane_u64(0x80000, xmask23, 0); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf2)->pool, (void **)&mbuf2, 1, 0); if (cnxk_nix_prefree_seg((struct rte_mbuf *)mbuf3)) - vsetq_lane_u64(0x80000, xmask23, 1); + xmask23 = vsetq_lane_u64(0x80000, xmask23, 1); else RTE_MEMPOOL_CHECK_COOKIES( ((struct rte_mbuf *)mbuf3)->pool, -- 2.25.1