From: Beilei Xing <beilei.x...@intel.com>

Refine rxq setup and txq setup.
Move some help functions of queue setup and queue release
to common module.

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
---
 drivers/common/idpf/idpf_common_rxtx.c  |  414 +++++++++
 drivers/common/idpf/idpf_common_rxtx.h  |   57 ++
 drivers/common/idpf/meson.build         |    1 +
 drivers/common/idpf/version.map         |   15 +
 drivers/net/idpf/idpf_rxtx.c            | 1029 ++++++-----------------
 drivers/net/idpf/idpf_rxtx.h            |    9 -
 drivers/net/idpf/idpf_rxtx_vec_avx512.c |    2 +-
 7 files changed, 762 insertions(+), 765 deletions(-)
 create mode 100644 drivers/common/idpf/idpf_common_rxtx.c

diff --git a/drivers/common/idpf/idpf_common_rxtx.c 
b/drivers/common/idpf/idpf_common_rxtx.c
new file mode 100644
index 0000000000..440acc55a6
--- /dev/null
+++ b/drivers/common/idpf/idpf_common_rxtx.c
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2022 Intel Corporation
+ */
+
+#include <rte_mbuf_dyn.h>
+#include "idpf_common_rxtx.h"
+
+int
+check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
+{
+       /* The following constraints must be satisfied:
+        *   thresh < rxq->nb_rx_desc
+        */
+       if (thresh >= nb_desc) {
+               DRV_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
+                       thresh, nb_desc);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int
+check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+               uint16_t tx_free_thresh)
+{
+       /* TX descriptors will have their RS bit set after tx_rs_thresh
+        * descriptors have been used. The TX descriptor ring will be cleaned
+        * after tx_free_thresh descriptors are used or if the number of
+        * descriptors required to transmit a packet is greater than the
+        * number of free TX descriptors.
+        *
+        * The following constraints must be satisfied:
+        *  - tx_rs_thresh must be less than the size of the ring minus 2.
+        *  - tx_free_thresh must be less than the size of the ring minus 3.
+        *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
+        *  - tx_rs_thresh must be a divisor of the ring size.
+        *
+        * One descriptor in the TX ring is used as a sentinel to avoid a H/W
+        * race condition, hence the maximum threshold constraints. When set
+        * to zero use default values.
+        */
+       if (tx_rs_thresh >= (nb_desc - 2)) {
+               DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
+                       "number of TX descriptors (%u) minus 2",
+                       tx_rs_thresh, nb_desc);
+               return -EINVAL;
+       }
+       if (tx_free_thresh >= (nb_desc - 3)) {
+               DRV_LOG(ERR, "tx_free_thresh (%u) must be less than the "
+                       "number of TX descriptors (%u) minus 3.",
+                       tx_free_thresh, nb_desc);
+               return -EINVAL;
+       }
+       if (tx_rs_thresh > tx_free_thresh) {
+               DRV_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
+                       "equal to tx_free_thresh (%u).",
+                       tx_rs_thresh, tx_free_thresh);
+               return -EINVAL;
+       }
+       if ((nb_desc % tx_rs_thresh) != 0) {
+               DRV_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
+                       "number of TX descriptors (%u).",
+                       tx_rs_thresh, nb_desc);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       uint16_t i;
+
+       if (rxq->sw_ring == NULL)
+               return;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               if (rxq->sw_ring[i] != NULL) {
+                       rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+                       rxq->sw_ring[i] = NULL;
+               }
+       }
+}
+
+void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+       uint16_t nb_desc, i;
+
+       if (txq == NULL || txq->sw_ring == NULL) {
+               DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+               return;
+       }
+
+       if (txq->sw_nb_desc != 0) {
+               /* For split queue model, descriptor ring */
+               nb_desc = txq->sw_nb_desc;
+       } else {
+               /* For single queue model */
+               nb_desc = txq->nb_tx_desc;
+       }
+       for (i = 0; i < nb_desc; i++) {
+               if (txq->sw_ring[i].mbuf != NULL) {
+                       rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                       txq->sw_ring[i].mbuf = NULL;
+               }
+       }
+}
+
+void
+reset_split_rx_descq(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (rxq == NULL)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       rxq->rx_tail = 0;
+       rxq->expected_gen_id = 1;
+}
+
+void
+reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (rxq == NULL)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+       /* The next descriptor id which can be received. */
+       rxq->rx_next_avail = 0;
+
+       /* The next descriptor id which can be refilled. */
+       rxq->rx_tail = 0;
+       /* The number of descriptors which can be refilled. */
+       rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
+
+       rxq->bufq1 = NULL;
+       rxq->bufq2 = NULL;
+}
+
+void
+reset_split_rx_queue(struct idpf_rx_queue *rxq)
+{
+       reset_split_rx_descq(rxq);
+       reset_split_rx_bufq(rxq->bufq1);
+       reset_split_rx_bufq(rxq->bufq2);
+}
+
+void
+reset_single_rx_queue(struct idpf_rx_queue *rxq)
+{
+       uint16_t len;
+       uint32_t i;
+
+       if (rxq == NULL)
+               return;
+
+       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
+
+       for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
+            i++)
+               ((volatile char *)rxq->rx_ring)[i] = 0;
+
+       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
+
+       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
+               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
+
+       rxq->rx_tail = 0;
+       rxq->nb_rx_hold = 0;
+
+       rte_pktmbuf_free(rxq->pkt_first_seg);
+
+       rxq->pkt_first_seg = NULL;
+       rxq->pkt_last_seg = NULL;
+       rxq->rxrearm_start = 0;
+       rxq->rxrearm_nb = 0;
+}
+
+void
+reset_split_tx_descq(struct idpf_tx_queue *txq)
+{
+       struct idpf_tx_entry *txe;
+       uint32_t i, size;
+       uint16_t prev;
+
+       if (txq == NULL) {
+               DRV_LOG(DEBUG, "Pointer to txq is NULL");
+               return;
+       }
+
+       size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)txq->desc_ring)[i] = 0;
+
+       txe = txq->sw_ring;
+       prev = (uint16_t)(txq->sw_nb_desc - 1);
+       for (i = 0; i < txq->sw_nb_desc; i++) {
+               txe[i].mbuf = NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_used = 0;
+
+       /* Use this as next to clean for split desc queue */
+       txq->last_desc_cleaned = 0;
+       txq->sw_tail = 0;
+       txq->nb_free = txq->nb_tx_desc - 1;
+}
+
+void
+reset_split_tx_complq(struct idpf_tx_queue *cq)
+{
+       uint32_t i, size;
+
+       if (cq == NULL) {
+               DRV_LOG(DEBUG, "Pointer to complq is NULL");
+               return;
+       }
+
+       size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)cq->compl_ring)[i] = 0;
+
+       cq->tx_tail = 0;
+       cq->expected_gen_id = 1;
+}
+
+void
+reset_single_tx_queue(struct idpf_tx_queue *txq)
+{
+       struct idpf_tx_entry *txe;
+       uint32_t i, size;
+       uint16_t prev;
+
+       if (txq == NULL) {
+               DRV_LOG(DEBUG, "Pointer to txq is NULL");
+               return;
+       }
+
+       txe = txq->sw_ring;
+       size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
+       for (i = 0; i < size; i++)
+               ((volatile char *)txq->tx_ring)[i] = 0;
+
+       prev = (uint16_t)(txq->nb_tx_desc - 1);
+       for (i = 0; i < txq->nb_tx_desc; i++) {
+               txq->tx_ring[i].qw1.cmd_dtype =
+                       rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
+               txe[i].mbuf =  NULL;
+               txe[i].last_id = i;
+               txe[prev].next_id = i;
+               prev = i;
+       }
+
+       txq->tx_tail = 0;
+       txq->nb_used = 0;
+
+       txq->last_desc_cleaned = txq->nb_tx_desc - 1;
+       txq->nb_free = txq->nb_tx_desc - 1;
+
+       txq->next_dd = txq->rs_thresh - 1;
+       txq->next_rs = txq->rs_thresh - 1;
+}
+
+void
+idpf_rx_queue_release(void *rxq)
+{
+       struct idpf_rx_queue *q = rxq;
+
+       if (q == NULL)
+               return;
+
+       /* Split queue */
+       if (q->bufq1 != NULL && q->bufq2 != NULL) {
+               q->bufq1->ops->release_mbufs(q->bufq1);
+               rte_free(q->bufq1->sw_ring);
+               rte_memzone_free(q->bufq1->mz);
+               rte_free(q->bufq1);
+               q->bufq2->ops->release_mbufs(q->bufq2);
+               rte_free(q->bufq2->sw_ring);
+               rte_memzone_free(q->bufq2->mz);
+               rte_free(q->bufq2);
+               rte_memzone_free(q->mz);
+               rte_free(q);
+               return;
+       }
+
+       /* Single queue */
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(q);
+}
+
+void
+idpf_tx_queue_release(void *txq)
+{
+       struct idpf_tx_queue *q = txq;
+
+       if (q == NULL)
+               return;
+
+       if (q->complq) {
+               rte_memzone_free(q->complq->mz);
+               rte_free(q->complq);
+       }
+
+       q->ops->release_mbufs(q);
+       rte_free(q->sw_ring);
+       rte_memzone_free(q->mz);
+       rte_free(q);
+}
+
+int
+idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
+       struct rte_mbuf *mbuf = NULL;
+       uint64_t dma_addr;
+       uint16_t i;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               mbuf = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(mbuf == NULL)) {
+                       DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+                       return -ENOMEM;
+               }
+
+               rte_mbuf_refcnt_set(mbuf, 1);
+               mbuf->next = NULL;
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->port = rxq->port_id;
+
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+               rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc 
*)(rxq->rx_ring))[i];
+               rxd->pkt_addr = dma_addr;
+               rxd->hdr_addr = 0;
+               rxd->rsvd1 = 0;
+               rxd->rsvd2 = 0;
+               rxq->sw_ring[i] = mbuf;
+       }
+
+       return 0;
+}
+
+int
+idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
+       struct rte_mbuf *mbuf = NULL;
+       uint64_t dma_addr;
+       uint16_t i;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               mbuf = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(mbuf == NULL)) {
+                       DRV_LOG(ERR, "Failed to allocate mbuf for RX");
+                       return -ENOMEM;
+               }
+
+               rte_mbuf_refcnt_set(mbuf, 1);
+               mbuf->next = NULL;
+               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+               mbuf->nb_segs = 1;
+               mbuf->port = rxq->port_id;
+
+               dma_addr =
+                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+
+               rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc 
*)(rxq->rx_ring))[i];
+               rxd->qword0.buf_id = i;
+               rxd->qword0.rsvd0 = 0;
+               rxd->qword0.rsvd1 = 0;
+               rxd->pkt_addr = dma_addr;
+               rxd->hdr_addr = 0;
+               rxd->rsvd2 = 0;
+
+               rxq->sw_ring[i] = mbuf;
+       }
+
+       rxq->nb_rx_hold = 0;
+       rxq->rx_tail = rxq->nb_rx_desc - 1;
+
+       return 0;
+}
diff --git a/drivers/common/idpf/idpf_common_rxtx.h 
b/drivers/common/idpf/idpf_common_rxtx.h
index a9ed31c08a..9abf321519 100644
--- a/drivers/common/idpf/idpf_common_rxtx.h
+++ b/drivers/common/idpf/idpf_common_rxtx.h
@@ -5,11 +5,28 @@
 #ifndef _IDPF_COMMON_RXTX_H_
 #define _IDPF_COMMON_RXTX_H_
 
+#include <rte_mbuf.h>
 #include <rte_mbuf_ptype.h>
 #include <rte_mbuf_core.h>
 
 #include "idpf_common_device.h"
 
+#define IDPF_RX_MAX_BURST              32
+
+#define IDPF_RX_OFFLOAD_IPV4_CKSUM             RTE_BIT64(1)
+#define IDPF_RX_OFFLOAD_UDP_CKSUM              RTE_BIT64(2)
+#define IDPF_RX_OFFLOAD_TCP_CKSUM              RTE_BIT64(3)
+#define IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM       RTE_BIT64(6)
+#define IDPF_RX_OFFLOAD_TIMESTAMP              RTE_BIT64(14)
+
+#define IDPF_TX_OFFLOAD_IPV4_CKSUM       RTE_BIT64(1)
+#define IDPF_TX_OFFLOAD_UDP_CKSUM        RTE_BIT64(2)
+#define IDPF_TX_OFFLOAD_TCP_CKSUM        RTE_BIT64(3)
+#define IDPF_TX_OFFLOAD_SCTP_CKSUM       RTE_BIT64(4)
+#define IDPF_TX_OFFLOAD_TCP_TSO          RTE_BIT64(5)
+#define IDPF_TX_OFFLOAD_MULTI_SEGS       RTE_BIT64(15)
+#define IDPF_TX_OFFLOAD_MBUF_FAST_FREE   RTE_BIT64(16)
+
 struct idpf_rx_stats {
        uint64_t mbuf_alloc_failed;
 };
@@ -109,4 +126,44 @@ struct idpf_tx_queue {
        struct idpf_tx_queue *complq;
 };
 
+struct idpf_rxq_ops {
+       void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+       void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
+__rte_internal
+int check_rx_thresh(uint16_t nb_desc, uint16_t thresh);
+__rte_internal
+int check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
+                   uint16_t tx_free_thresh);
+__rte_internal
+void release_rxq_mbufs(struct idpf_rx_queue *rxq);
+__rte_internal
+void release_txq_mbufs(struct idpf_tx_queue *txq);
+__rte_internal
+void reset_split_rx_descq(struct idpf_rx_queue *rxq);
+__rte_internal
+void reset_split_rx_bufq(struct idpf_rx_queue *rxq);
+__rte_internal
+void reset_split_rx_queue(struct idpf_rx_queue *rxq);
+__rte_internal
+void reset_single_rx_queue(struct idpf_rx_queue *rxq);
+__rte_internal
+void reset_split_tx_descq(struct idpf_tx_queue *txq);
+__rte_internal
+void reset_split_tx_complq(struct idpf_tx_queue *cq);
+__rte_internal
+void reset_single_tx_queue(struct idpf_tx_queue *txq);
+__rte_internal
+void idpf_rx_queue_release(void *rxq);
+__rte_internal
+void idpf_tx_queue_release(void *txq);
+__rte_internal
+int idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq);
+__rte_internal
+int idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq);
+
 #endif /* _IDPF_COMMON_RXTX_H_ */
diff --git a/drivers/common/idpf/meson.build b/drivers/common/idpf/meson.build
index c6cc7a196b..5ee071fdb2 100644
--- a/drivers/common/idpf/meson.build
+++ b/drivers/common/idpf/meson.build
@@ -5,6 +5,7 @@ deps += ['mbuf']
 
 sources = files(
     'idpf_common_device.c',
+    'idpf_common_rxtx.c',
     'idpf_common_virtchnl.c',
 )
 
diff --git a/drivers/common/idpf/version.map b/drivers/common/idpf/version.map
index 0e3ed57b88..648f94bf16 100644
--- a/drivers/common/idpf/version.map
+++ b/drivers/common/idpf/version.map
@@ -22,6 +22,21 @@ INTERNAL {
        idpf_config_irq_map;
        idpf_config_irq_unmap;
        idpf_create_vport_info_init;
+       check_rx_thresh;
+       check_tx_thresh;
+       release_rxq_mbufs;
+       release_txq_mbufs;
+       reset_split_rx_descq;
+       reset_split_rx_bufq;
+       reset_split_rx_queue;
+       reset_single_rx_queue;
+       reset_split_tx_descq;
+       reset_split_tx_complq;
+       reset_single_tx_queue;
+       idpf_rx_queue_release;
+       idpf_tx_queue_release;
+       idpf_alloc_single_rxq_mbufs;
+       idpf_alloc_split_rxq_mbufs;
 
        local: *;
 };
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index f0eff493f8..6c693f4c3a 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -12,108 +12,44 @@
 
 static int idpf_timestamp_dynfield_offset = -1;
 
-static int
-check_rx_thresh(uint16_t nb_desc, uint16_t thresh)
-{
-       /* The following constraints must be satisfied:
-        *   thresh < rxq->nb_rx_desc
-        */
-       if (thresh >= nb_desc) {
-               PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be less than %u",
-                            thresh, nb_desc);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
-               uint16_t tx_free_thresh)
-{
-       /* TX descriptors will have their RS bit set after tx_rs_thresh
-        * descriptors have been used. The TX descriptor ring will be cleaned
-        * after tx_free_thresh descriptors are used or if the number of
-        * descriptors required to transmit a packet is greater than the
-        * number of free TX descriptors.
-        *
-        * The following constraints must be satisfied:
-        *  - tx_rs_thresh must be less than the size of the ring minus 2.
-        *  - tx_free_thresh must be less than the size of the ring minus 3.
-        *  - tx_rs_thresh must be less than or equal to tx_free_thresh.
-        *  - tx_rs_thresh must be a divisor of the ring size.
-        *
-        * One descriptor in the TX ring is used as a sentinel to avoid a H/W
-        * race condition, hence the maximum threshold constraints. When set
-        * to zero use default values.
-        */
-       if (tx_rs_thresh >= (nb_desc - 2)) {
-               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than the "
-                            "number of TX descriptors (%u) minus 2",
-                            tx_rs_thresh, nb_desc);
-               return -EINVAL;
-       }
-       if (tx_free_thresh >= (nb_desc - 3)) {
-               PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be less than the "
-                            "number of TX descriptors (%u) minus 3.",
-                            tx_free_thresh, nb_desc);
-               return -EINVAL;
-       }
-       if (tx_rs_thresh > tx_free_thresh) {
-               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be less than or "
-                            "equal to tx_free_thresh (%u).",
-                            tx_rs_thresh, tx_free_thresh);
-               return -EINVAL;
-       }
-       if ((nb_desc % tx_rs_thresh) != 0) {
-               PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be a divisor of the "
-                            "number of TX descriptors (%u).",
-                            tx_rs_thresh, nb_desc);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static void
-release_rxq_mbufs(struct idpf_rx_queue *rxq)
+static uint64_t
+idpf_rx_offload_convert(uint64_t offload)
 {
-       uint16_t i;
-
-       if (rxq->sw_ring == NULL)
-               return;
-
-       for (i = 0; i < rxq->nb_rx_desc; i++) {
-               if (rxq->sw_ring[i] != NULL) {
-                       rte_pktmbuf_free_seg(rxq->sw_ring[i]);
-                       rxq->sw_ring[i] = NULL;
-               }
-       }
+       uint64_t ol = 0;
+
+       if ((offload & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) != 0)
+               ol |= IDPF_RX_OFFLOAD_IPV4_CKSUM;
+       if ((offload & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) != 0)
+               ol |= IDPF_RX_OFFLOAD_UDP_CKSUM;
+       if ((offload & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) != 0)
+               ol |= IDPF_RX_OFFLOAD_TCP_CKSUM;
+       if ((offload & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
+               ol |= IDPF_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+       if ((offload & RTE_ETH_RX_OFFLOAD_TIMESTAMP) != 0)
+               ol |= IDPF_RX_OFFLOAD_TIMESTAMP;
+
+       return ol;
 }
 
-static void
-release_txq_mbufs(struct idpf_tx_queue *txq)
+static uint64_t
+idpf_tx_offload_convert(uint64_t offload)
 {
-       uint16_t nb_desc, i;
-
-       if (txq == NULL || txq->sw_ring == NULL) {
-               PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
-               return;
-       }
-
-       if (txq->sw_nb_desc != 0) {
-               /* For split queue model, descriptor ring */
-               nb_desc = txq->sw_nb_desc;
-       } else {
-               /* For single queue model */
-               nb_desc = txq->nb_tx_desc;
-       }
-       for (i = 0; i < nb_desc; i++) {
-               if (txq->sw_ring[i].mbuf != NULL) {
-                       rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
-                       txq->sw_ring[i].mbuf = NULL;
-               }
-       }
+       uint64_t ol = 0;
+
+       if ((offload & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) != 0)
+               ol |= IDPF_TX_OFFLOAD_IPV4_CKSUM;
+       if ((offload & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) != 0)
+               ol |= IDPF_TX_OFFLOAD_UDP_CKSUM;
+       if ((offload & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) != 0)
+               ol |= IDPF_TX_OFFLOAD_TCP_CKSUM;
+       if ((offload & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) != 0)
+               ol |= IDPF_TX_OFFLOAD_SCTP_CKSUM;
+       if ((offload & RTE_ETH_TX_OFFLOAD_MULTI_SEGS) != 0)
+               ol |= IDPF_TX_OFFLOAD_MULTI_SEGS;
+       if ((offload & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) != 0)
+               ol |= IDPF_TX_OFFLOAD_MBUF_FAST_FREE;
+
+       return ol;
 }
 
 static const struct idpf_rxq_ops def_rxq_ops = {
@@ -124,246 +60,93 @@ static const struct idpf_txq_ops def_txq_ops = {
        .release_mbufs = release_txq_mbufs,
 };
 
-static void
-reset_split_rx_descq(struct idpf_rx_queue *rxq)
-{
-       uint16_t len;
-       uint32_t i;
-
-       if (rxq == NULL)
-               return;
-
-       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
-
-       for (i = 0; i < len * sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3);
-            i++)
-               ((volatile char *)rxq->rx_ring)[i] = 0;
-
-       rxq->rx_tail = 0;
-       rxq->expected_gen_id = 1;
-}
-
-static void
-reset_split_rx_bufq(struct idpf_rx_queue *rxq)
+static const struct rte_memzone *
+idpf_dma_zone_reserve(struct rte_eth_dev *dev, uint16_t queue_idx,
+                     uint16_t len, uint16_t queue_type,
+                     unsigned int socket_id, bool splitq)
 {
-       uint16_t len;
-       uint32_t i;
-
-       if (rxq == NULL)
-               return;
-
-       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
-
-       for (i = 0; i < len * sizeof(struct virtchnl2_splitq_rx_buf_desc);
-            i++)
-               ((volatile char *)rxq->rx_ring)[i] = 0;
-
-       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
-
-       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
-               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
-
-       /* The next descriptor id which can be received. */
-       rxq->rx_next_avail = 0;
-
-       /* The next descriptor id which can be refilled. */
-       rxq->rx_tail = 0;
-       /* The number of descriptors which can be refilled. */
-       rxq->nb_rx_hold = rxq->nb_rx_desc - 1;
-
-       rxq->bufq1 = NULL;
-       rxq->bufq2 = NULL;
-}
-
-static void
-idpf_rx_queue_release(void *rxq)
-{
-       struct idpf_rx_queue *q = rxq;
-
-       if (q == NULL)
-               return;
-
-       /* Split queue */
-       if (q->bufq1 != NULL && q->bufq2 != NULL) {
-               q->bufq1->ops->release_mbufs(q->bufq1);
-               rte_free(q->bufq1->sw_ring);
-               rte_memzone_free(q->bufq1->mz);
-               rte_free(q->bufq1);
-               q->bufq2->ops->release_mbufs(q->bufq2);
-               rte_free(q->bufq2->sw_ring);
-               rte_memzone_free(q->bufq2->mz);
-               rte_free(q->bufq2);
-               rte_memzone_free(q->mz);
-               rte_free(q);
-               return;
-       }
-
-       /* Single queue */
-       q->ops->release_mbufs(q);
-       rte_free(q->sw_ring);
-       rte_memzone_free(q->mz);
-       rte_free(q);
-}
-
-static void
-idpf_tx_queue_release(void *txq)
-{
-       struct idpf_tx_queue *q = txq;
-
-       if (q == NULL)
-               return;
-
-       if (q->complq) {
-               rte_memzone_free(q->complq->mz);
-               rte_free(q->complq);
-       }
-
-       q->ops->release_mbufs(q);
-       rte_free(q->sw_ring);
-       rte_memzone_free(q->mz);
-       rte_free(q);
-}
-
-static inline void
-reset_split_rx_queue(struct idpf_rx_queue *rxq)
-{
-       reset_split_rx_descq(rxq);
-       reset_split_rx_bufq(rxq->bufq1);
-       reset_split_rx_bufq(rxq->bufq2);
-}
-
-static void
-reset_single_rx_queue(struct idpf_rx_queue *rxq)
-{
-       uint16_t len;
-       uint32_t i;
-
-       if (rxq == NULL)
-               return;
-
-       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
-
-       for (i = 0; i < len * sizeof(struct virtchnl2_singleq_rx_buf_desc);
-            i++)
-               ((volatile char *)rxq->rx_ring)[i] = 0;
-
-       memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
-
-       for (i = 0; i < IDPF_RX_MAX_BURST; i++)
-               rxq->sw_ring[rxq->nb_rx_desc + i] = &rxq->fake_mbuf;
-
-       rxq->rx_tail = 0;
-       rxq->nb_rx_hold = 0;
-
-       rte_pktmbuf_free(rxq->pkt_first_seg);
-
-       rxq->pkt_first_seg = NULL;
-       rxq->pkt_last_seg = NULL;
-       rxq->rxrearm_start = 0;
-       rxq->rxrearm_nb = 0;
-}
-
-static void
-reset_split_tx_descq(struct idpf_tx_queue *txq)
-{
-       struct idpf_tx_entry *txe;
-       uint32_t i, size;
-       uint16_t prev;
-
-       if (txq == NULL) {
-               PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
-               return;
-       }
+       char ring_name[RTE_MEMZONE_NAMESIZE];
+       const struct rte_memzone *mz;
+       uint32_t ring_size;
 
-       size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;
-       for (i = 0; i < size; i++)
-               ((volatile char *)txq->desc_ring)[i] = 0;
-
-       txe = txq->sw_ring;
-       prev = (uint16_t)(txq->sw_nb_desc - 1);
-       for (i = 0; i < txq->sw_nb_desc; i++) {
-               txe[i].mbuf = NULL;
-               txe[i].last_id = i;
-               txe[prev].next_id = i;
-               prev = i;
+       memset(ring_name, 0, RTE_MEMZONE_NAMESIZE);
+       switch (queue_type) {
+       case VIRTCHNL2_QUEUE_TYPE_TX:
+               if (splitq)
+                       ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_sched_desc),
+                                             IDPF_DMA_MEM_ALIGN);
+               else
+                       ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_flex_tx_desc),
+                                             IDPF_DMA_MEM_ALIGN);
+               rte_memcpy(ring_name, "idpf Tx ring", sizeof("idpf Tx ring"));
+               break;
+       case VIRTCHNL2_QUEUE_TYPE_RX:
+               if (splitq)
+                       ring_size = RTE_ALIGN(len * sizeof(struct 
virtchnl2_rx_flex_desc_adv_nic_3),
+                                             IDPF_DMA_MEM_ALIGN);
+               else
+                       ring_size = RTE_ALIGN(len * sizeof(struct 
virtchnl2_singleq_rx_buf_desc),
+                                             IDPF_DMA_MEM_ALIGN);
+               rte_memcpy(ring_name, "idpf Rx ring", sizeof("idpf Rx ring"));
+               break;
+       case VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION:
+               ring_size = RTE_ALIGN(len * sizeof(struct 
idpf_splitq_tx_compl_desc),
+                                     IDPF_DMA_MEM_ALIGN);
+               rte_memcpy(ring_name, "idpf Tx compl ring", sizeof("idpf Tx 
compl ring"));
+               break;
+       case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
+               ring_size = RTE_ALIGN(len * sizeof(struct 
virtchnl2_splitq_rx_buf_desc),
+                                     IDPF_DMA_MEM_ALIGN);
+               rte_memcpy(ring_name, "idpf Rx buf ring", sizeof("idpf Rx buf 
ring"));
+               break;
+       default:
+               PMD_INIT_LOG(ERR, "Invalid queue type");
+               return NULL;
        }
 
-       txq->tx_tail = 0;
-       txq->nb_used = 0;
-
-       /* Use this as next to clean for split desc queue */
-       txq->last_desc_cleaned = 0;
-       txq->sw_tail = 0;
-       txq->nb_free = txq->nb_tx_desc - 1;
-}
-
-static void
-reset_split_tx_complq(struct idpf_tx_queue *cq)
-{
-       uint32_t i, size;
-
-       if (cq == NULL) {
-               PMD_DRV_LOG(DEBUG, "Pointer to complq is NULL");
-               return;
+       mz = rte_eth_dma_zone_reserve(dev, ring_name, queue_idx,
+                                     ring_size, IDPF_RING_BASE_ALIGN,
+                                     socket_id);
+       if (mz == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for ring");
+               return NULL;
        }
 
-       size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;
-       for (i = 0; i < size; i++)
-               ((volatile char *)cq->compl_ring)[i] = 0;
+       /* Zero all the descriptors in the ring. */
+       memset(mz->addr, 0, ring_size);
 
-       cq->tx_tail = 0;
-       cq->expected_gen_id = 1;
+       return mz;
 }
 
 static void
-reset_single_tx_queue(struct idpf_tx_queue *txq)
+idpf_dma_zone_release(const struct rte_memzone *mz)
 {
-       struct idpf_tx_entry *txe;
-       uint32_t i, size;
-       uint16_t prev;
-
-       if (txq == NULL) {
-               PMD_DRV_LOG(DEBUG, "Pointer to txq is NULL");
-               return;
-       }
-
-       txe = txq->sw_ring;
-       size = sizeof(struct idpf_flex_tx_desc) * txq->nb_tx_desc;
-       for (i = 0; i < size; i++)
-               ((volatile char *)txq->tx_ring)[i] = 0;
-
-       prev = (uint16_t)(txq->nb_tx_desc - 1);
-       for (i = 0; i < txq->nb_tx_desc; i++) {
-               txq->tx_ring[i].qw1.cmd_dtype =
-                       rte_cpu_to_le_16(IDPF_TX_DESC_DTYPE_DESC_DONE);
-               txe[i].mbuf =  NULL;
-               txe[i].last_id = i;
-               txe[prev].next_id = i;
-               prev = i;
-       }
-
-       txq->tx_tail = 0;
-       txq->nb_used = 0;
-
-       txq->last_desc_cleaned = txq->nb_tx_desc - 1;
-       txq->nb_free = txq->nb_tx_desc - 1;
-
-       txq->next_dd = txq->rs_thresh - 1;
-       txq->next_rs = txq->rs_thresh - 1;
+       rte_memzone_free(mz);
 }
 
 static int
-idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *bufq,
+idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct idpf_rx_queue *rxq,
                         uint16_t queue_idx, uint16_t rx_free_thresh,
                         uint16_t nb_desc, unsigned int socket_id,
-                        struct rte_mempool *mp)
+                        struct rte_mempool *mp, uint8_t bufq_id)
 {
        struct idpf_vport *vport = dev->data->dev_private;
        struct idpf_adapter *adapter = vport->adapter;
        struct idpf_hw *hw = &adapter->hw;
        const struct rte_memzone *mz;
-       uint32_t ring_size;
+       struct idpf_rx_queue *bufq;
        uint16_t len;
+       int ret;
+
+       bufq = rte_zmalloc_socket("idpf bufq",
+                                  sizeof(struct idpf_rx_queue),
+                                  RTE_CACHE_LINE_SIZE,
+                                  socket_id);
+       if (bufq == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer 
queue.");
+               ret = -ENOMEM;
+               goto err_bufq1_alloc;
+       }
 
        bufq->mp = mp;
        bufq->nb_rx_desc = nb_desc;
@@ -376,8 +159,21 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *bufq,
        len = rte_pktmbuf_data_room_size(bufq->mp) - RTE_PKTMBUF_HEADROOM;
        bufq->rx_buf_len = len;
 
-       /* Allocate the software ring. */
+       /* Allocate a little more to support bulk allocate. */
        len = nb_desc + IDPF_RX_MAX_BURST;
+
+       mz = idpf_dma_zone_reserve(dev, queue_idx, len,
+                                  VIRTCHNL2_QUEUE_TYPE_RX_BUFFER,
+                                  socket_id, true);
+       if (mz == NULL) {
+               ret = -ENOMEM;
+               goto err_mz_reserve;
+       }
+
+       bufq->rx_ring_phys_addr = mz->iova;
+       bufq->rx_ring = mz->addr;
+       bufq->mz = mz;
+
        bufq->sw_ring =
                rte_zmalloc_socket("idpf rx bufq sw ring",
                                   sizeof(struct rte_mbuf *) * len,
@@ -385,55 +181,60 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *bufq,
                                   socket_id);
        if (bufq->sw_ring == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
-               return -ENOMEM;
-       }
-
-       /* Allocate a liitle more to support bulk allocate. */
-       len = nb_desc + IDPF_RX_MAX_BURST;
-       ring_size = RTE_ALIGN(len *
-                             sizeof(struct virtchnl2_splitq_rx_buf_desc),
-                             IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "rx_buf_ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer 
queue.");
-               rte_free(bufq->sw_ring);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_sw_ring_alloc;
        }
 
-       /* Zero all the descriptors in the ring. */
-       memset(mz->addr, 0, ring_size);
-       bufq->rx_ring_phys_addr = mz->iova;
-       bufq->rx_ring = mz->addr;
-
-       bufq->mz = mz;
        reset_split_rx_bufq(bufq);
-       bufq->q_set = true;
        bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
                         queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
        bufq->ops = &def_rxq_ops;
+       bufq->q_set = true;
 
-       /* TODO: allow bulk or vec */
+       if (bufq_id == 1) {
+               rxq->bufq1 = bufq;
+       } else if (bufq_id == 2) {
+               rxq->bufq2 = bufq;
+       } else {
+               PMD_INIT_LOG(ERR, "Invalid buffer queue index.");
+               ret = -EINVAL;
+               goto err_bufq_id;
+       }
 
        return 0;
+
+err_bufq_id:
+       rte_free(bufq->sw_ring);
+err_sw_ring_alloc:
+       idpf_dma_zone_release(mz);
+err_mz_reserve:
+       rte_free(bufq);
+err_bufq1_alloc:
+       return ret;
 }
 
-static int
-idpf_rx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                         uint16_t nb_desc, unsigned int socket_id,
-                         const struct rte_eth_rxconf *rx_conf,
-                         struct rte_mempool *mp)
+static void
+idpf_rx_split_bufq_release(struct idpf_rx_queue *bufq)
+{
+       rte_free(bufq->sw_ring);
+       idpf_dma_zone_release(bufq->mz);
+       rte_free(bufq);
+}
+
+int
+idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                   uint16_t nb_desc, unsigned int socket_id,
+                   const struct rte_eth_rxconf *rx_conf,
+                   struct rte_mempool *mp)
 {
        struct idpf_vport *vport = dev->data->dev_private;
        struct idpf_adapter *adapter = vport->adapter;
-       struct idpf_rx_queue *bufq1, *bufq2;
+       struct idpf_hw *hw = &adapter->hw;
        const struct rte_memzone *mz;
        struct idpf_rx_queue *rxq;
        uint16_t rx_free_thresh;
-       uint32_t ring_size;
        uint64_t offloads;
-       uint16_t qid;
+       bool is_splitq;
        uint16_t len;
        int ret;
 
@@ -452,16 +253,19 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                dev->data->rx_queues[queue_idx] = NULL;
        }
 
-       /* Setup Rx description queue */
+       /* Setup Rx queue */
        rxq = rte_zmalloc_socket("idpf rxq",
                                 sizeof(struct idpf_rx_queue),
                                 RTE_CACHE_LINE_SIZE,
                                 socket_id);
        if (rxq == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data 
structure");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_rxq_alloc;
        }
 
+       is_splitq = !!(vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
+
        rxq->mp = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_free_thresh;
@@ -470,343 +274,129 @@ idpf_rx_split_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
        rxq->rx_hdr_len = 0;
        rxq->adapter = adapter;
-       rxq->offloads = offloads;
+       rxq->offloads = idpf_rx_offload_convert(offloads);
 
        len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
        rxq->rx_buf_len = len;
 
-       len = rxq->nb_rx_desc + IDPF_RX_MAX_BURST;
-       ring_size = RTE_ALIGN(len *
-                             sizeof(struct virtchnl2_rx_flex_desc_adv_nic_3),
-                             IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "rx_cpmpl_ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
-
+       /* Allocate a little more to support bulk allocate. */
+       len = nb_desc + IDPF_RX_MAX_BURST;
+       mz = idpf_dma_zone_reserve(dev, queue_idx, len, VIRTCHNL2_QUEUE_TYPE_RX,
+                                  socket_id, is_splitq);
        if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX");
                ret = -ENOMEM;
-               goto free_rxq;
+               goto err_mz_reserve;
        }
-
-       /* Zero all the descriptors in the ring. */
-       memset(mz->addr, 0, ring_size);
        rxq->rx_ring_phys_addr = mz->iova;
        rxq->rx_ring = mz->addr;
-
        rxq->mz = mz;
-       reset_split_rx_descq(rxq);
 
-       /* TODO: allow bulk or vec */
+       if (!is_splitq) {
+               rxq->sw_ring = rte_zmalloc_socket("idpf rxq sw ring",
+                                                 sizeof(struct rte_mbuf *) * 
len,
+                                                 RTE_CACHE_LINE_SIZE,
+                                                 socket_id);
+               if (rxq->sw_ring == NULL) {
+                       PMD_INIT_LOG(ERR, "Failed to allocate memory for SW 
ring");
+                       ret = -ENOMEM;
+                       goto err_sw_ring_alloc;
+               }
 
-       /* setup Rx buffer queue */
-       bufq1 = rte_zmalloc_socket("idpf bufq1",
-                                  sizeof(struct idpf_rx_queue),
-                                  RTE_CACHE_LINE_SIZE,
-                                  socket_id);
-       if (bufq1 == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer 
queue 1.");
-               ret = -ENOMEM;
-               goto free_mz;
-       }
-       qid = 2 * queue_idx;
-       ret = idpf_rx_split_bufq_setup(dev, bufq1, qid, rx_free_thresh,
-                                      nb_desc, socket_id, mp);
-       if (ret != 0) {
-               PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
-               ret = -EINVAL;
-               goto free_bufq1;
-       }
-       rxq->bufq1 = bufq1;
+               reset_single_rx_queue(rxq);
+               rxq->qrx_tail = hw->hw_addr + 
(vport->chunks_info.rx_qtail_start +
+                               queue_idx * 
vport->chunks_info.rx_qtail_spacing);
+               rxq->ops = &def_rxq_ops;
+       } else {
+               reset_split_rx_descq(rxq);
 
-       bufq2 = rte_zmalloc_socket("idpf bufq2",
-                                  sizeof(struct idpf_rx_queue),
-                                  RTE_CACHE_LINE_SIZE,
-                                  socket_id);
-       if (bufq2 == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx buffer 
queue 2.");
-               rte_free(bufq1->sw_ring);
-               rte_memzone_free(bufq1->mz);
-               ret = -ENOMEM;
-               goto free_bufq1;
-       }
-       qid = 2 * queue_idx + 1;
-       ret = idpf_rx_split_bufq_setup(dev, bufq2, qid, rx_free_thresh,
-                                      nb_desc, socket_id, mp);
-       if (ret != 0) {
-               PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
-               rte_free(bufq1->sw_ring);
-               rte_memzone_free(bufq1->mz);
-               ret = -EINVAL;
-               goto free_bufq2;
+               /* Setup Rx buffer queues */
+               ret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx,
+                                              rx_free_thresh, nb_desc,
+                                              socket_id, mp, 1);
+               if (ret != 0) {
+                       PMD_INIT_LOG(ERR, "Failed to setup buffer queue 1");
+                       ret = -EINVAL;
+                       goto err_bufq1_setup;
+               }
+
+               ret = idpf_rx_split_bufq_setup(dev, rxq, 2 * queue_idx + 1,
+                                              rx_free_thresh, nb_desc,
+                                              socket_id, mp, 2);
+               if (ret != 0) {
+                       PMD_INIT_LOG(ERR, "Failed to setup buffer queue 2");
+                       ret = -EINVAL;
+                       goto err_bufq2_setup;
+               }
        }
-       rxq->bufq2 = bufq2;
 
        rxq->q_set = true;
        dev->data->rx_queues[queue_idx] = rxq;
 
        return 0;
 
-free_bufq2:
-       rte_free(bufq2);
-free_bufq1:
-       rte_free(bufq1);
-free_mz:
-       rte_memzone_free(mz);
-free_rxq:
+err_bufq2_setup:
+       idpf_rx_split_bufq_release(rxq->bufq1);
+err_bufq1_setup:
+err_sw_ring_alloc:
+       idpf_dma_zone_release(mz);
+err_mz_reserve:
        rte_free(rxq);
-
+err_rxq_alloc:
        return ret;
 }
 
 static int
-idpf_rx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                          uint16_t nb_desc, unsigned int socket_id,
-                          const struct rte_eth_rxconf *rx_conf,
-                          struct rte_mempool *mp)
-{
-       struct idpf_vport *vport = dev->data->dev_private;
-       struct idpf_adapter *adapter = vport->adapter;
-       struct idpf_hw *hw = &adapter->hw;
-       const struct rte_memzone *mz;
-       struct idpf_rx_queue *rxq;
-       uint16_t rx_free_thresh;
-       uint32_t ring_size;
-       uint64_t offloads;
-       uint16_t len;
-
-       offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
-
-       /* Check free threshold */
-       rx_free_thresh = (rx_conf->rx_free_thresh == 0) ?
-               IDPF_DEFAULT_RX_FREE_THRESH :
-               rx_conf->rx_free_thresh;
-       if (check_rx_thresh(nb_desc, rx_free_thresh) != 0)
-               return -EINVAL;
-
-       /* Free memory if needed */
-       if (dev->data->rx_queues[queue_idx] != NULL) {
-               idpf_rx_queue_release(dev->data->rx_queues[queue_idx]);
-               dev->data->rx_queues[queue_idx] = NULL;
-       }
-
-       /* Setup Rx description queue */
-       rxq = rte_zmalloc_socket("idpf rxq",
-                                sizeof(struct idpf_rx_queue),
-                                RTE_CACHE_LINE_SIZE,
-                                socket_id);
-       if (rxq == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue data 
structure");
-               return -ENOMEM;
-       }
-
-       rxq->mp = mp;
-       rxq->nb_rx_desc = nb_desc;
-       rxq->rx_free_thresh = rx_free_thresh;
-       rxq->queue_id = vport->chunks_info.rx_start_qid + queue_idx;
-       rxq->port_id = dev->data->port_id;
-       rxq->rx_deferred_start = rx_conf->rx_deferred_start;
-       rxq->rx_hdr_len = 0;
-       rxq->adapter = adapter;
-       rxq->offloads = offloads;
-
-       len = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
-       rxq->rx_buf_len = len;
-
-       len = nb_desc + IDPF_RX_MAX_BURST;
-       rxq->sw_ring =
-               rte_zmalloc_socket("idpf rxq sw ring",
-                                  sizeof(struct rte_mbuf *) * len,
-                                  RTE_CACHE_LINE_SIZE,
-                                  socket_id);
-       if (rxq->sw_ring == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW ring");
-               rte_free(rxq);
-               return -ENOMEM;
-       }
-
-       /* Allocate a liitle more to support bulk allocate. */
-       len = nb_desc + IDPF_RX_MAX_BURST;
-       ring_size = RTE_ALIGN(len *
-                             sizeof(struct virtchnl2_singleq_rx_buf_desc),
-                             IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "rx ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for RX buffer 
queue.");
-               rte_free(rxq->sw_ring);
-               rte_free(rxq);
-               return -ENOMEM;
-       }
-
-       /* Zero all the descriptors in the ring. */
-       memset(mz->addr, 0, ring_size);
-       rxq->rx_ring_phys_addr = mz->iova;
-       rxq->rx_ring = mz->addr;
-
-       rxq->mz = mz;
-       reset_single_rx_queue(rxq);
-       rxq->q_set = true;
-       dev->data->rx_queues[queue_idx] = rxq;
-       rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
-                       queue_idx * vport->chunks_info.rx_qtail_spacing);
-       rxq->ops = &def_rxq_ops;
-
-       return 0;
-}
-
-int
-idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                   uint16_t nb_desc, unsigned int socket_id,
-                   const struct rte_eth_rxconf *rx_conf,
-                   struct rte_mempool *mp)
-{
-       struct idpf_vport *vport = dev->data->dev_private;
-
-       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-               return idpf_rx_single_queue_setup(dev, queue_idx, nb_desc,
-                                                 socket_id, rx_conf, mp);
-       else
-               return idpf_rx_split_queue_setup(dev, queue_idx, nb_desc,
-                                                socket_id, rx_conf, mp);
-}
-
-static int
-idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                         uint16_t nb_desc, unsigned int socket_id,
-                         const struct rte_eth_txconf *tx_conf)
+idpf_tx_complq_setup(struct rte_eth_dev *dev, struct idpf_tx_queue *txq,
+                    uint16_t queue_idx, uint16_t nb_desc,
+                    unsigned int socket_id)
 {
        struct idpf_vport *vport = dev->data->dev_private;
-       struct idpf_adapter *adapter = vport->adapter;
-       uint16_t tx_rs_thresh, tx_free_thresh;
-       struct idpf_hw *hw = &adapter->hw;
-       struct idpf_tx_queue *txq, *cq;
        const struct rte_memzone *mz;
-       uint32_t ring_size;
-       uint64_t offloads;
+       struct idpf_tx_queue *cq;
        int ret;
 
-       offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
-
-       tx_rs_thresh = (uint16_t)((tx_conf->tx_rs_thresh != 0) ?
-               tx_conf->tx_rs_thresh : IDPF_DEFAULT_TX_RS_THRESH);
-       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh != 0) ?
-               tx_conf->tx_free_thresh : IDPF_DEFAULT_TX_FREE_THRESH);
-       if (check_tx_thresh(nb_desc, tx_rs_thresh, tx_free_thresh) != 0)
-               return -EINVAL;
-
-       /* Free memory if needed. */
-       if (dev->data->tx_queues[queue_idx] != NULL) {
-               idpf_tx_queue_release(dev->data->tx_queues[queue_idx]);
-               dev->data->tx_queues[queue_idx] = NULL;
-       }
-
-       /* Allocate the TX queue data structure. */
-       txq = rte_zmalloc_socket("idpf split txq",
-                                sizeof(struct idpf_tx_queue),
-                                RTE_CACHE_LINE_SIZE,
-                                socket_id);
-       if (txq == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
-               return -ENOMEM;
-       }
-
-       txq->nb_tx_desc = nb_desc;
-       txq->rs_thresh = tx_rs_thresh;
-       txq->free_thresh = tx_free_thresh;
-       txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
-       txq->port_id = dev->data->port_id;
-       txq->offloads = offloads;
-       txq->tx_deferred_start = tx_conf->tx_deferred_start;
-
-       /* Allocate software ring */
-       txq->sw_nb_desc = 2 * nb_desc;
-       txq->sw_ring =
-               rte_zmalloc_socket("idpf split tx sw ring",
-                                  sizeof(struct idpf_tx_entry) *
-                                  txq->sw_nb_desc,
-                                  RTE_CACHE_LINE_SIZE,
-                                  socket_id);
-       if (txq->sw_ring == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
-               ret = -ENOMEM;
-               goto err_txq_sw_ring;
-       }
-
-       /* Allocate TX hardware ring descriptors. */
-       ring_size = sizeof(struct idpf_flex_tx_sched_desc) * txq->nb_tx_desc;
-       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "split_tx_ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
-       if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
-               ret = -ENOMEM;
-               goto err_txq_mz;
-       }
-       txq->tx_ring_phys_addr = mz->iova;
-       txq->desc_ring = mz->addr;
-
-       txq->mz = mz;
-       reset_split_tx_descq(txq);
-       txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
-                       queue_idx * vport->chunks_info.tx_qtail_spacing);
-       txq->ops = &def_txq_ops;
-
-       /* Allocate the TX completion queue data structure. */
-       txq->complq = rte_zmalloc_socket("idpf splitq cq",
-                                        sizeof(struct idpf_tx_queue),
-                                        RTE_CACHE_LINE_SIZE,
-                                        socket_id);
-       cq = txq->complq;
+       cq = rte_zmalloc_socket("idpf splitq cq",
+                               sizeof(struct idpf_tx_queue),
+                               RTE_CACHE_LINE_SIZE,
+                               socket_id);
        if (cq == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for Tx compl 
queue");
                ret = -ENOMEM;
-               goto err_cq;
+               goto err_cq_alloc;
        }
-       cq->nb_tx_desc = 2 * nb_desc;
+
+       cq->nb_tx_desc = nb_desc;
        cq->queue_id = vport->chunks_info.tx_compl_start_qid + queue_idx;
        cq->port_id = dev->data->port_id;
        cq->txqs = dev->data->tx_queues;
        cq->tx_start_qid = vport->chunks_info.tx_start_qid;
 
-       ring_size = sizeof(struct idpf_splitq_tx_compl_desc) * cq->nb_tx_desc;
-       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "tx_split_compl_ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
+       mz = idpf_dma_zone_reserve(dev, queue_idx, nb_desc,
+                                  VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION,
+                                  socket_id, true);
        if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX 
completion queue");
                ret = -ENOMEM;
-               goto err_cq_mz;
+               goto err_mz_reserve;
        }
        cq->tx_ring_phys_addr = mz->iova;
        cq->compl_ring = mz->addr;
        cq->mz = mz;
        reset_split_tx_complq(cq);
 
-       txq->q_set = true;
-       dev->data->tx_queues[queue_idx] = txq;
+       txq->complq = cq;
 
        return 0;
 
-err_cq_mz:
+err_mz_reserve:
        rte_free(cq);
-err_cq:
-       rte_memzone_free(txq->mz);
-err_txq_mz:
-       rte_free(txq->sw_ring);
-err_txq_sw_ring:
-       rte_free(txq);
-
+err_cq_alloc:
        return ret;
 }
 
-static int
-idpf_tx_single_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                          uint16_t nb_desc, unsigned int socket_id,
-                          const struct rte_eth_txconf *tx_conf)
+int
+idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                   uint16_t nb_desc, unsigned int socket_id,
+                   const struct rte_eth_txconf *tx_conf)
 {
        struct idpf_vport *vport = dev->data->dev_private;
        struct idpf_adapter *adapter = vport->adapter;
@@ -814,8 +404,10 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        struct idpf_hw *hw = &adapter->hw;
        const struct rte_memzone *mz;
        struct idpf_tx_queue *txq;
-       uint32_t ring_size;
        uint64_t offloads;
+       uint16_t len;
+       bool is_splitq;
+       int ret;
 
        offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
 
@@ -839,71 +431,74 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                                 socket_id);
        if (txq == NULL) {
                PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue 
structure");
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_txq_alloc;
        }
 
-       /* TODO: vlan offload */
+       is_splitq = !!(vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT);
 
        txq->nb_tx_desc = nb_desc;
        txq->rs_thresh = tx_rs_thresh;
        txq->free_thresh = tx_free_thresh;
        txq->queue_id = vport->chunks_info.tx_start_qid + queue_idx;
        txq->port_id = dev->data->port_id;
-       txq->offloads = offloads;
+       txq->offloads = idpf_tx_offload_convert(offloads);
        txq->tx_deferred_start = tx_conf->tx_deferred_start;
 
-       /* Allocate software ring */
-       txq->sw_ring =
-               rte_zmalloc_socket("idpf tx sw ring",
-                                  sizeof(struct idpf_tx_entry) * nb_desc,
-                                  RTE_CACHE_LINE_SIZE,
-                                  socket_id);
-       if (txq->sw_ring == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
-               rte_free(txq);
-               return -ENOMEM;
-       }
+       if (is_splitq)
+               len = 2 * nb_desc;
+       else
+               len = nb_desc;
+       txq->sw_nb_desc = len;
 
        /* Allocate TX hardware ring descriptors. */
-       ring_size = sizeof(struct idpf_flex_tx_desc) * nb_desc;
-       ring_size = RTE_ALIGN(ring_size, IDPF_DMA_MEM_ALIGN);
-       mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                     ring_size, IDPF_RING_BASE_ALIGN,
-                                     socket_id);
+       mz = idpf_dma_zone_reserve(dev, queue_idx, nb_desc, 
VIRTCHNL2_QUEUE_TYPE_TX,
+                                  socket_id, is_splitq);
        if (mz == NULL) {
-               PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
-               rte_free(txq->sw_ring);
-               rte_free(txq);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto err_mz_reserve;
        }
-
        txq->tx_ring_phys_addr = mz->iova;
-       txq->tx_ring = mz->addr;
-
        txq->mz = mz;
-       reset_single_tx_queue(txq);
-       txq->q_set = true;
-       dev->data->tx_queues[queue_idx] = txq;
+
+       txq->sw_ring = rte_zmalloc_socket("idpf tx sw ring",
+                                         sizeof(struct idpf_tx_entry) * len,
+                                         RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq->sw_ring == NULL) {
+               PMD_INIT_LOG(ERR, "Failed to allocate memory for SW TX ring");
+               ret = -ENOMEM;
+               goto err_sw_ring_alloc;
+       }
+
+       if (!is_splitq) {
+               txq->tx_ring = mz->addr;
+               reset_single_tx_queue(txq);
+       } else {
+               txq->desc_ring = mz->addr;
+               reset_split_tx_descq(txq);
+
+               /* Setup tx completion queue if split model */
+               ret = idpf_tx_complq_setup(dev, txq, queue_idx,
+                                          2 * nb_desc, socket_id);
+               if (ret != 0)
+                       goto err_complq_setup;
+       }
+
        txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
                        queue_idx * vport->chunks_info.tx_qtail_spacing);
        txq->ops = &def_txq_ops;
+       txq->q_set = true;
+       dev->data->tx_queues[queue_idx] = txq;
 
        return 0;
-}
 
-int
-idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                   uint16_t nb_desc, unsigned int socket_id,
-                   const struct rte_eth_txconf *tx_conf)
-{
-       struct idpf_vport *vport = dev->data->dev_private;
-
-       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)
-               return idpf_tx_single_queue_setup(dev, queue_idx, nb_desc,
-                                                 socket_id, tx_conf);
-       else
-               return idpf_tx_split_queue_setup(dev, queue_idx, nb_desc,
-                                                socket_id, tx_conf);
+err_complq_setup:
+err_sw_ring_alloc:
+       idpf_dma_zone_release(mz);
+err_mz_reserve:
+       rte_free(txq);
+err_txq_alloc:
+       return ret;
 }
 
 static int
@@ -916,89 +511,13 @@ idpf_register_ts_mbuf(struct idpf_rx_queue *rxq)
                                                         
&idpf_timestamp_dynflag);
                if (err != 0) {
                        PMD_DRV_LOG(ERR,
-                               "Cannot register mbuf field/flag for 
timestamp");
+                                   "Cannot register mbuf field/flag for 
timestamp");
                        return -EINVAL;
                }
        }
        return 0;
 }
 
-static int
-idpf_alloc_single_rxq_mbufs(struct idpf_rx_queue *rxq)
-{
-       volatile struct virtchnl2_singleq_rx_buf_desc *rxd;
-       struct rte_mbuf *mbuf = NULL;
-       uint64_t dma_addr;
-       uint16_t i;
-
-       for (i = 0; i < rxq->nb_rx_desc; i++) {
-               mbuf = rte_mbuf_raw_alloc(rxq->mp);
-               if (unlikely(mbuf == NULL)) {
-                       PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
-                       return -ENOMEM;
-               }
-
-               rte_mbuf_refcnt_set(mbuf, 1);
-               mbuf->next = NULL;
-               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-               mbuf->nb_segs = 1;
-               mbuf->port = rxq->port_id;
-
-               dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
-
-               rxd = &((volatile struct virtchnl2_singleq_rx_buf_desc 
*)(rxq->rx_ring))[i];
-               rxd->pkt_addr = dma_addr;
-               rxd->hdr_addr = 0;
-               rxd->rsvd1 = 0;
-               rxd->rsvd2 = 0;
-               rxq->sw_ring[i] = mbuf;
-       }
-
-       return 0;
-}
-
-static int
-idpf_alloc_split_rxq_mbufs(struct idpf_rx_queue *rxq)
-{
-       volatile struct virtchnl2_splitq_rx_buf_desc *rxd;
-       struct rte_mbuf *mbuf = NULL;
-       uint64_t dma_addr;
-       uint16_t i;
-
-       for (i = 0; i < rxq->nb_rx_desc; i++) {
-               mbuf = rte_mbuf_raw_alloc(rxq->mp);
-               if (unlikely(mbuf == NULL)) {
-                       PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX");
-                       return -ENOMEM;
-               }
-
-               rte_mbuf_refcnt_set(mbuf, 1);
-               mbuf->next = NULL;
-               mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-               mbuf->nb_segs = 1;
-               mbuf->port = rxq->port_id;
-
-               dma_addr =
-                       rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
-
-               rxd = &((volatile struct virtchnl2_splitq_rx_buf_desc 
*)(rxq->rx_ring))[i];
-               rxd->qword0.buf_id = i;
-               rxd->qword0.rsvd0 = 0;
-               rxd->qword0.rsvd1 = 0;
-               rxd->pkt_addr = dma_addr;
-               rxd->hdr_addr = 0;
-               rxd->rsvd2 = 0;
-
-               rxq->sw_ring[i] = mbuf;
-       }
-
-       rxq->nb_rx_hold = 0;
-       rxq->rx_tail = rxq->nb_rx_desc - 1;
-
-       return 0;
-}
-
 int
 idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index b8325f9b96..4efbf10295 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -51,7 +51,6 @@
 /* Base address of the HW descriptor ring should be 128B aligned. */
 #define IDPF_RING_BASE_ALIGN   128
 
-#define IDPF_RX_MAX_BURST              32
 #define IDPF_DEFAULT_RX_FREE_THRESH    32
 
 /* used for Vector PMD */
@@ -101,14 +100,6 @@ union idpf_tx_offload {
        };
 };
 
-struct idpf_rxq_ops {
-       void (*release_mbufs)(struct idpf_rx_queue *rxq);
-};
-
-struct idpf_txq_ops {
-       void (*release_mbufs)(struct idpf_tx_queue *txq);
-};
-
 int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        uint16_t nb_desc, unsigned int socket_id,
                        const struct rte_eth_rxconf *rx_conf,
diff --git a/drivers/net/idpf/idpf_rxtx_vec_avx512.c 
b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
index fb2b6bb53c..71a6c59823 100644
--- a/drivers/net/idpf/idpf_rxtx_vec_avx512.c
+++ b/drivers/net/idpf/idpf_rxtx_vec_avx512.c
@@ -562,7 +562,7 @@ idpf_tx_free_bufs_avx512(struct idpf_tx_queue *txq)
        txep = (void *)txq->sw_ring;
        txep += txq->next_dd - (n - 1);
 
-       if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) 
{
+       if (txq->offloads & IDPF_TX_OFFLOAD_MBUF_FAST_FREE && (n & 31) == 0) {
                struct rte_mempool *mp = txep[0].mbuf->pool;
                struct rte_mempool_cache *cache = rte_mempool_default_cache(mp,
                                                                rte_lcore_id());
-- 
2.26.2

Reply via email to