Split out the data struct and logics of NFD3 into new file. The code is
moved verbatim, no functional change.

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderl...@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower.c           |   1 +
 drivers/net/nfp/flower/nfp_flower_ctrl.c      |   1 +
 .../net/nfp/flower/nfp_flower_representor.c   |   1 +
 drivers/net/nfp/meson.build                   |   1 +
 drivers/net/nfp/nfd3/nfp_nfd3.h               | 166 +++++++++
 drivers/net/nfp/nfd3/nfp_nfd3_dp.c            | 346 ++++++++++++++++++
 drivers/net/nfp/nfp_common.c                  |   2 +
 drivers/net/nfp/nfp_ethdev.c                  |   1 +
 drivers/net/nfp/nfp_ethdev_vf.c               |   1 +
 drivers/net/nfp/nfp_rxtx.c                    | 336 +----------------
 drivers/net/nfp/nfp_rxtx.h                    | 153 +-------
 11 files changed, 526 insertions(+), 483 deletions(-)
 create mode 100644 drivers/net/nfp/nfd3/nfp_nfd3.h
 create mode 100644 drivers/net/nfp/nfd3/nfp_nfd3_dp.c

diff --git a/drivers/net/nfp/flower/nfp_flower.c 
b/drivers/net/nfp/flower/nfp_flower.c
index 4af1900bde..9212e6606b 100644
--- a/drivers/net/nfp/flower/nfp_flower.c
+++ b/drivers/net/nfp/flower/nfp_flower.c
@@ -15,6 +15,7 @@
 #include "../nfp_ctrl.h"
 #include "../nfp_cpp_bridge.h"
 #include "../nfp_rxtx.h"
+#include "../nfd3/nfp_nfd3.h"
 #include "../nfpcore/nfp_mip.h"
 #include "../nfpcore/nfp_rtsym.h"
 #include "../nfpcore/nfp_nsp.h"
diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c 
b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index 3e083d948e..7f9dc5683b 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -11,6 +11,7 @@
 #include "../nfp_logs.h"
 #include "../nfp_ctrl.h"
 #include "../nfp_rxtx.h"
+#include "../nfd3/nfp_nfd3.h"
 #include "nfp_flower.h"
 #include "nfp_flower_ctrl.h"
 #include "nfp_flower_cmsg.h"
diff --git a/drivers/net/nfp/flower/nfp_flower_representor.c 
b/drivers/net/nfp/flower/nfp_flower_representor.c
index 362c67f7b5..3eb76cb489 100644
--- a/drivers/net/nfp/flower/nfp_flower_representor.c
+++ b/drivers/net/nfp/flower/nfp_flower_representor.c
@@ -10,6 +10,7 @@
 #include "../nfp_logs.h"
 #include "../nfp_ctrl.h"
 #include "../nfp_rxtx.h"
+#include "../nfd3/nfp_nfd3.h"
 #include "../nfpcore/nfp_mip.h"
 #include "../nfpcore/nfp_rtsym.h"
 #include "../nfpcore/nfp_nsp.h"
diff --git a/drivers/net/nfp/meson.build b/drivers/net/nfp/meson.build
index 6d122f5ce9..697a1479c8 100644
--- a/drivers/net/nfp/meson.build
+++ b/drivers/net/nfp/meson.build
@@ -10,6 +10,7 @@ sources = files(
         'flower/nfp_flower_cmsg.c',
         'flower/nfp_flower_ctrl.c',
         'flower/nfp_flower_representor.c',
+        'nfd3/nfp_nfd3_dp.c',
         'nfpcore/nfp_cpp_pcie_ops.c',
         'nfpcore/nfp_nsp.c',
         'nfpcore/nfp_cppcore.c',
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3.h b/drivers/net/nfp/nfd3/nfp_nfd3.h
new file mode 100644
index 0000000000..5c6162aada
--- /dev/null
+++ b/drivers/net/nfp/nfd3/nfp_nfd3.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#ifndef _NFP_NFD3_H_
+#define _NFP_NFD3_H_
+
+/* TX descriptor format */
+#define PCIE_DESC_TX_EOP                (1 << 7)
+#define PCIE_DESC_TX_OFFSET_MASK        (0x7f)
+
+/* Flags in the host TX descriptor */
+#define PCIE_DESC_TX_CSUM               (1 << 7)
+#define PCIE_DESC_TX_IP4_CSUM           (1 << 6)
+#define PCIE_DESC_TX_TCP_CSUM           (1 << 5)
+#define PCIE_DESC_TX_UDP_CSUM           (1 << 4)
+#define PCIE_DESC_TX_VLAN               (1 << 3)
+#define PCIE_DESC_TX_LSO                (1 << 2)
+#define PCIE_DESC_TX_ENCAP_NONE         (0)
+#define PCIE_DESC_TX_ENCAP              (1 << 1)
+#define PCIE_DESC_TX_O_IP4_CSUM         (1 << 0)
+
+#define NFD3_TX_DESC_PER_SIMPLE_PKT     1
+
+struct nfp_net_nfd3_tx_desc {
+       union {
+               struct {
+                       uint8_t dma_addr_hi; /* High bits of host buf address */
+                       __le16 dma_len;     /* Length to DMA for this desc */
+                       uint8_t offset_eop; /* Offset in buf where pkt starts +
+                                            * highest bit is eop flag, low 
7bit is meta_len.
+                                            */
+                       __le32 dma_addr_lo; /* Low 32bit of host buf addr */
+
+                       __le16 mss;         /* MSS to be used for LSO */
+                       uint8_t lso_hdrlen; /* LSO, where the data starts */
+                       uint8_t flags;      /* TX Flags, see @PCIE_DESC_TX_* */
+
+                       union {
+                               struct {
+                                       /*
+                                        * L3 and L4 header offsets required
+                                        * for TSOv2
+                                        */
+                                       uint8_t l3_offset;
+                                       uint8_t l4_offset;
+                               };
+                               __le16 vlan; /* VLAN tag to add if indicated */
+                       };
+                       __le16 data_len;    /* Length of frame + meta data */
+               } __rte_packed;
+               __le32 vals[4];
+       };
+};
+
+/* Leaving always free descriptors for avoiding wrapping confusion */
+static inline uint32_t
+nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
+{
+       if (txq->wr_p >= txq->rd_p)
+               return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
+       else
+               return txq->rd_p - txq->wr_p - 8;
+}
+
+/*
+ * nfp_net_nfd3_txq_full() - Check if the TX queue free descriptors
+ * is below tx_free_threshold for firmware of nfd3
+ *
+ * @txq: TX queue to check
+ *
+ * This function uses the host copy* of read/write pointers.
+ */
+static inline uint32_t
+nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
+{
+       return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh);
+}
+
+/* nfp_net_nfd3_tx_tso() - Set NFD3 TX descriptor for TSO */
+static inline void
+nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq,
+               struct nfp_net_nfd3_tx_desc *txd,
+               struct rte_mbuf *mb)
+{
+       uint64_t ol_flags;
+       struct nfp_net_hw *hw = txq->hw;
+
+       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
+               goto clean_txd;
+
+       ol_flags = mb->ol_flags;
+
+       if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
+               goto clean_txd;
+
+       txd->l3_offset = mb->l2_len;
+       txd->l4_offset = mb->l2_len + mb->l3_len;
+       txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
+
+       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
+               txd->l3_offset += mb->outer_l2_len + mb->outer_l3_len;
+               txd->l4_offset += mb->outer_l2_len + mb->outer_l3_len;
+               txd->lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len;
+       }
+
+       txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
+       txd->flags = PCIE_DESC_TX_LSO;
+       return;
+
+clean_txd:
+       txd->flags = 0;
+       txd->l3_offset = 0;
+       txd->l4_offset = 0;
+       txd->lso_hdrlen = 0;
+       txd->mss = 0;
+}
+
+/* nfp_net_nfd3_tx_cksum() - Set TX CSUM offload flags in NFD3 TX descriptor */
+static inline void
+nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc 
*txd,
+                struct rte_mbuf *mb)
+{
+       uint64_t ol_flags;
+       struct nfp_net_hw *hw = txq->hw;
+
+       if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
+               return;
+
+       ol_flags = mb->ol_flags;
+
+       /* Set TCP csum offload if TSO enabled. */
+       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
+
+       /* IPv6 does not need checksum */
+       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+               txd->flags |= PCIE_DESC_TX_IP4_CSUM;
+
+       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+               txd->flags |= PCIE_DESC_TX_ENCAP;
+
+       switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+       case RTE_MBUF_F_TX_UDP_CKSUM:
+               txd->flags |= PCIE_DESC_TX_UDP_CSUM;
+               break;
+       case RTE_MBUF_F_TX_TCP_CKSUM:
+               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
+               break;
+       }
+
+       if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
+               txd->flags |= PCIE_DESC_TX_CSUM;
+}
+
+uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue,
+               struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts);
+int nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev,
+               uint16_t queue_idx,
+               uint16_t nb_desc,
+               unsigned int socket_id,
+               const struct rte_eth_txconf *tx_conf);
+
+#endif /* _NFP_NFD3_H_ */
diff --git a/drivers/net/nfp/nfd3/nfp_nfd3_dp.c 
b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
new file mode 100644
index 0000000000..88bcd26ad8
--- /dev/null
+++ b/drivers/net/nfp/nfd3/nfp_nfd3_dp.c
@@ -0,0 +1,346 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2023 Corigine, Inc.
+ * All rights reserved.
+ */
+
+#include <ethdev_driver.h>
+#include <bus_pci_driver.h>
+#include <rte_malloc.h>
+
+#include "../nfp_logs.h"
+#include "../nfp_common.h"
+#include "../nfp_rxtx.h"
+#include "nfp_nfd3.h"
+
+/*
+ * nfp_net_nfd3_tx_vlan() - Set vlan info in the nfd3 tx desc
+ *
+ * If enable NFP_NET_CFG_CTRL_TXVLAN_V2
+ *     Vlan_info is stored in the meta and
+ *     is handled in the nfp_net_nfd3_set_meta_vlan
+ * else if enable NFP_NET_CFG_CTRL_TXVLAN
+ *     Vlan_info is stored in the tx_desc and
+ *     is handled in the nfp_net_nfd3_tx_vlan
+ */
+static void
+nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
+               struct nfp_net_nfd3_tx_desc *txd,
+               struct rte_mbuf *mb)
+{
+       struct nfp_net_hw *hw = txq->hw;
+
+       if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0 ||
+               (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) == 0)
+               return;
+
+       if ((mb->ol_flags & RTE_MBUF_F_TX_VLAN) != 0) {
+               txd->flags |= PCIE_DESC_TX_VLAN;
+               txd->vlan = mb->vlan_tci;
+       }
+}
+
+static void
+nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
+               struct nfp_net_txq *txq,
+               struct rte_mbuf *pkt)
+{
+       uint8_t vlan_layer = 0;
+       struct nfp_net_hw *hw;
+       uint32_t meta_info;
+       uint8_t layer = 0;
+       char *meta;
+
+       hw = txq->hw;
+
+       if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) != 0 &&
+                       (hw->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0) {
+               if (meta_data->length == 0)
+                       meta_data->length = NFP_NET_META_HEADER_SIZE;
+               meta_data->length += NFP_NET_META_FIELD_SIZE;
+               meta_data->header |= NFP_NET_META_VLAN;
+       }
+
+       if (meta_data->length == 0)
+               return;
+
+       meta_info = meta_data->header;
+       meta_data->header = rte_cpu_to_be_32(meta_data->header);
+       meta = rte_pktmbuf_prepend(pkt, meta_data->length);
+       memcpy(meta, &meta_data->header, sizeof(meta_data->header));
+       meta += NFP_NET_META_HEADER_SIZE;
+
+       for (; meta_info != 0; meta_info >>= NFP_NET_META_FIELD_SIZE, layer++,
+                       meta += NFP_NET_META_FIELD_SIZE) {
+               switch (meta_info & NFP_NET_META_FIELD_MASK) {
+               case NFP_NET_META_VLAN:
+                       if (vlan_layer > 0) {
+                               PMD_DRV_LOG(ERR, "At most 1 layers of vlan is 
supported");
+                               return;
+                       }
+                       nfp_net_set_meta_vlan(meta_data, pkt, layer);
+                       vlan_layer++;
+                       break;
+               default:
+                       PMD_DRV_LOG(ERR, "The metadata type not supported");
+                       return;
+               }
+
+               memcpy(meta, &meta_data->data[layer], 
sizeof(meta_data->data[layer]));
+       }
+}
+
+uint16_t
+nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+{
+       struct nfp_net_txq *txq;
+       struct nfp_net_hw *hw;
+       struct nfp_net_nfd3_tx_desc *txds, txd;
+       struct nfp_net_meta_raw meta_data;
+       struct rte_mbuf *pkt;
+       uint64_t dma_addr;
+       int pkt_size, dma_size;
+       uint16_t free_descs, issued_descs;
+       struct rte_mbuf **lmbuf;
+       int i;
+
+       txq = tx_queue;
+       hw = txq->hw;
+       txds = &txq->txds[txq->wr_p];
+
+       PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
+                  txq->qidx, txq->wr_p, nb_pkts);
+
+       if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_SIMPLE_PKT * 
nb_pkts ||
+           nfp_net_nfd3_txq_full(txq))
+               nfp_net_tx_free_bufs(txq);
+
+       free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
+       if (unlikely(free_descs == 0))
+               return 0;
+
+       pkt = *tx_pkts;
+
+       issued_descs = 0;
+       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
+                  txq->qidx, nb_pkts);
+       /* Sending packets */
+       for (i = 0; i < nb_pkts && free_descs > 0; i++) {
+               memset(&meta_data, 0, sizeof(meta_data));
+               /* Grabbing the mbuf linked to the current descriptor */
+               lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+               /* Warming the cache for releasing the mbuf later on */
+               RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
+
+               pkt = *(tx_pkts + i);
+
+               nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
+
+               if (unlikely(pkt->nb_segs > 1 &&
+                            !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
+                       PMD_INIT_LOG(ERR, "Multisegment packet not supported");
+                       goto xmit_end;
+               }
+
+               /* Checking if we have enough descriptors */
+               if (unlikely(pkt->nb_segs > free_descs))
+                       goto xmit_end;
+
+               /*
+                * Checksum and VLAN flags just in the first descriptor for a
+                * multisegment packet, but TSO info needs to be in all of them.
+                */
+               txd.data_len = pkt->pkt_len;
+               nfp_net_nfd3_tx_tso(txq, &txd, pkt);
+               nfp_net_nfd3_tx_cksum(txq, &txd, pkt);
+               nfp_net_nfd3_tx_vlan(txq, &txd, pkt);
+
+               /*
+                * mbuf data_len is the data in one segment and pkt_len data
+                * in the whole packet. When the packet is just one segment,
+                * then data_len = pkt_len
+                */
+               pkt_size = pkt->pkt_len;
+
+               while (pkt != NULL && free_descs > 0) {
+                       /* Copying TSO, VLAN and cksum info */
+                       *txds = txd;
+
+                       /* Releasing mbuf used by this descriptor previously*/
+                       if (*lmbuf)
+                               rte_pktmbuf_free_seg(*lmbuf);
+
+                       /*
+                        * Linking mbuf with descriptor for being released
+                        * next time descriptor is used
+                        */
+                       *lmbuf = pkt;
+
+                       dma_size = pkt->data_len;
+                       dma_addr = rte_mbuf_data_iova(pkt);
+                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
+                                  "%" PRIx64 "", dma_addr);
+
+                       /* Filling descriptors fields */
+                       txds->dma_len = dma_size;
+                       txds->data_len = txd.data_len;
+                       txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
+                       txds->dma_addr_lo = (dma_addr & 0xffffffff);
+                       free_descs--;
+
+                       txq->wr_p++;
+                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
+                               txq->wr_p = 0;
+
+                       pkt_size -= dma_size;
+
+                       /*
+                        * Making the EOP, packets with just one segment
+                        * the priority
+                        */
+                       if (likely(pkt_size == 0))
+                               txds->offset_eop = PCIE_DESC_TX_EOP;
+                       else
+                               txds->offset_eop = 0;
+
+                       /* Set the meta_len */
+                       txds->offset_eop |= meta_data.length;
+
+                       pkt = pkt->next;
+                       /* Referencing next free TX descriptor */
+                       txds = &txq->txds[txq->wr_p];
+                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
+                       issued_descs++;
+               }
+       }
+
+xmit_end:
+       /* Increment write pointers. Force memory write before we let HW know */
+       rte_wmb();
+       nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
+
+       return i;
+}
+
+int
+nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+                      uint16_t nb_desc, unsigned int socket_id,
+                      const struct rte_eth_txconf *tx_conf)
+{
+       int ret;
+       uint16_t min_tx_desc;
+       uint16_t max_tx_desc;
+       const struct rte_memzone *tz;
+       struct nfp_net_txq *txq;
+       uint16_t tx_free_thresh;
+       struct nfp_net_hw *hw;
+       uint32_t tx_desc_sz;
+
+       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       PMD_INIT_FUNC_TRACE();
+
+       ret = nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
+       if (ret != 0)
+               return ret;
+
+       /* Validating number of descriptors */
+       tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
+       if ((NFD3_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 
0 ||
+            nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
+               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
+               return -EINVAL;
+       }
+
+       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
+                                   tx_conf->tx_free_thresh :
+                                   DEFAULT_TX_FREE_THRESH);
+
+       if (tx_free_thresh > (nb_desc)) {
+               PMD_DRV_LOG(ERR,
+                       "tx_free_thresh must be less than the number of TX "
+                       "descriptors. (tx_free_thresh=%u port=%d "
+                       "queue=%d)", (unsigned int)tx_free_thresh,
+                       dev->data->port_id, (int)queue_idx);
+               return -(EINVAL);
+       }
+
+       /*
+        * Free memory prior to re-allocation if needed. This is the case after
+        * calling nfp_net_stop
+        */
+       if (dev->data->tx_queues[queue_idx]) {
+               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
+                          queue_idx);
+               nfp_net_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
+       }
+
+       /* Allocating tx queue data structure */
+       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
+                                RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               return -ENOMEM;
+       }
+
+       dev->data->tx_queues[queue_idx] = txq;
+
+       /*
+        * Allocate TX ring hardware descriptors. A memzone large enough to
+        * handle the maximum ring size is allocated in order to allow for
+        * resizing in later calls to the queue setup function.
+        */
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+                                  sizeof(struct nfp_net_nfd3_tx_desc) *
+                                  NFD3_TX_DESC_PER_SIMPLE_PKT *
+                                  max_tx_desc, NFP_MEMZONE_ALIGN,
+                                  socket_id);
+       if (tz == NULL) {
+               PMD_DRV_LOG(ERR, "Error allocating tx dma");
+               nfp_net_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
+               return -ENOMEM;
+       }
+
+       txq->tx_count = nb_desc * NFD3_TX_DESC_PER_SIMPLE_PKT;
+       txq->tx_free_thresh = tx_free_thresh;
+       txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
+       txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
+       txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
+
+       /* queue mapping based on firmware configuration */
+       txq->qidx = queue_idx;
+       txq->tx_qcidx = queue_idx * hw->stride_tx;
+       txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
+
+       txq->port_id = dev->data->port_id;
+
+       /* Saving physical and virtual addresses for the TX ring */
+       txq->dma = (uint64_t)tz->iova;
+       txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
+
+       /* mbuf pointers array for referencing mbufs linked to TX descriptors */
+       txq->txbufs = rte_zmalloc_socket("txq->txbufs",
+                                        sizeof(*txq->txbufs) * txq->tx_count,
+                                        RTE_CACHE_LINE_SIZE, socket_id);
+       if (txq->txbufs == NULL) {
+               nfp_net_tx_queue_release(dev, queue_idx);
+               dev->data->tx_queues[queue_idx] = NULL;
+               return -ENOMEM;
+       }
+       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
+                  txq->txbufs, txq->txds, (unsigned long)txq->dma);
+
+       nfp_net_reset_tx_queue(txq);
+
+       txq->hw = hw;
+
+       /*
+        * Telling the HW about the physical address of the TX ring and number
+        * of descriptors in log2 format
+        */
+       nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), 
rte_log2_u32(txq->tx_count));
+
+       return 0;
+}
diff --git a/drivers/net/nfp/nfp_common.c b/drivers/net/nfp/nfp_common.c
index f300d6d892..d1b6ef3bc9 100644
--- a/drivers/net/nfp/nfp_common.c
+++ b/drivers/net/nfp/nfp_common.c
@@ -44,6 +44,8 @@
 #include "nfp_logs.h"
 #include "nfp_cpp_bridge.h"
 
+#include "nfd3/nfp_nfd3.h"
+
 #include <sys/types.h>
 #include <sys/socket.h>
 #include <sys/un.h>
diff --git a/drivers/net/nfp/nfp_ethdev.c b/drivers/net/nfp/nfp_ethdev.c
index 26cf9cd01c..f212a4a10e 100644
--- a/drivers/net/nfp/nfp_ethdev.c
+++ b/drivers/net/nfp/nfp_ethdev.c
@@ -38,6 +38,7 @@
 #include "nfp_logs.h"
 #include "nfp_cpp_bridge.h"
 
+#include "nfd3/nfp_nfd3.h"
 #include "flower/nfp_flower.h"
 
 static int
diff --git a/drivers/net/nfp/nfp_ethdev_vf.c b/drivers/net/nfp/nfp_ethdev_vf.c
index d69ac8cd37..80a8983deb 100644
--- a/drivers/net/nfp/nfp_ethdev_vf.c
+++ b/drivers/net/nfp/nfp_ethdev_vf.c
@@ -22,6 +22,7 @@
 #include "nfp_ctrl.h"
 #include "nfp_rxtx.h"
 #include "nfp_logs.h"
+#include "nfd3/nfp_nfd3.h"
 
 static void
 nfp_netvf_read_mac(struct nfp_net_hw *hw)
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index 16a124fd7d..76021b64ee 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -20,6 +20,7 @@
 #include "nfp_ctrl.h"
 #include "nfp_rxtx.h"
 #include "nfp_logs.h"
+#include "nfd3/nfp_nfd3.h"
 #include "nfpcore/nfp_mip.h"
 #include "nfpcore/nfp_rtsym.h"
 
@@ -749,158 +750,7 @@ nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
        txq->rd_p = 0;
 }
 
-static int
-nfp_net_nfd3_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
-                      uint16_t nb_desc, unsigned int socket_id,
-                      const struct rte_eth_txconf *tx_conf)
-{
-       int ret;
-       uint16_t min_tx_desc;
-       uint16_t max_tx_desc;
-       const struct rte_memzone *tz;
-       struct nfp_net_txq *txq;
-       uint16_t tx_free_thresh;
-       struct nfp_net_hw *hw;
-       uint32_t tx_desc_sz;
-
-       hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       PMD_INIT_FUNC_TRACE();
-
-       ret = nfp_net_tx_desc_limits(hw, &min_tx_desc, &max_tx_desc);
-       if (ret != 0)
-               return ret;
-
-       /* Validating number of descriptors */
-       tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfd3_tx_desc);
-       if ((NFD3_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 
0 ||
-            nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
-               PMD_DRV_LOG(ERR, "Wrong nb_desc value");
-               return -EINVAL;
-       }
-
-       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
-                                   tx_conf->tx_free_thresh :
-                                   DEFAULT_TX_FREE_THRESH);
-
-       if (tx_free_thresh > (nb_desc)) {
-               PMD_DRV_LOG(ERR,
-                       "tx_free_thresh must be less than the number of TX "
-                       "descriptors. (tx_free_thresh=%u port=%d "
-                       "queue=%d)", (unsigned int)tx_free_thresh,
-                       dev->data->port_id, (int)queue_idx);
-               return -(EINVAL);
-       }
-
-       /*
-        * Free memory prior to re-allocation if needed. This is the case after
-        * calling nfp_net_stop
-        */
-       if (dev->data->tx_queues[queue_idx]) {
-               PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
-                          queue_idx);
-               nfp_net_tx_queue_release(dev, queue_idx);
-               dev->data->tx_queues[queue_idx] = NULL;
-       }
-
-       /* Allocating tx queue data structure */
-       txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
-                                RTE_CACHE_LINE_SIZE, socket_id);
-       if (txq == NULL) {
-               PMD_DRV_LOG(ERR, "Error allocating tx dma");
-               return -ENOMEM;
-       }
-
-       dev->data->tx_queues[queue_idx] = txq;
-
-       /*
-        * Allocate TX ring hardware descriptors. A memzone large enough to
-        * handle the maximum ring size is allocated in order to allow for
-        * resizing in later calls to the queue setup function.
-        */
-       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                                  sizeof(struct nfp_net_nfd3_tx_desc) *
-                                  NFD3_TX_DESC_PER_SIMPLE_PKT *
-                                  max_tx_desc, NFP_MEMZONE_ALIGN,
-                                  socket_id);
-       if (tz == NULL) {
-               PMD_DRV_LOG(ERR, "Error allocating tx dma");
-               nfp_net_tx_queue_release(dev, queue_idx);
-               dev->data->tx_queues[queue_idx] = NULL;
-               return -ENOMEM;
-       }
-
-       txq->tx_count = nb_desc * NFD3_TX_DESC_PER_SIMPLE_PKT;
-       txq->tx_free_thresh = tx_free_thresh;
-       txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
-       txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
-       txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
-
-       /* queue mapping based on firmware configuration */
-       txq->qidx = queue_idx;
-       txq->tx_qcidx = queue_idx * hw->stride_tx;
-       txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
-
-       txq->port_id = dev->data->port_id;
-
-       /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->iova;
-       txq->txds = (struct nfp_net_nfd3_tx_desc *)tz->addr;
-
-       /* mbuf pointers array for referencing mbufs linked to TX descriptors */
-       txq->txbufs = rte_zmalloc_socket("txq->txbufs",
-                                        sizeof(*txq->txbufs) * txq->tx_count,
-                                        RTE_CACHE_LINE_SIZE, socket_id);
-       if (txq->txbufs == NULL) {
-               nfp_net_tx_queue_release(dev, queue_idx);
-               dev->data->tx_queues[queue_idx] = NULL;
-               return -ENOMEM;
-       }
-       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-                  txq->txbufs, txq->txds, (unsigned long)txq->dma);
-
-       nfp_net_reset_tx_queue(txq);
-
-       txq->hw = hw;
-
-       /*
-        * Telling the HW about the physical address of the TX ring and number
-        * of descriptors in log2 format
-        */
-       nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), 
rte_log2_u32(txq->tx_count));
-
-       return 0;
-}
-
-/*
- * nfp_net_nfd3_tx_vlan() - Set vlan info in the nfd3 tx desc
- *
- * If enable NFP_NET_CFG_CTRL_TXVLAN_V2
- *     Vlan_info is stored in the meta and
- *     is handled in the nfp_net_nfd3_set_meta_vlan
- * else if enable NFP_NET_CFG_CTRL_TXVLAN
- *     Vlan_info is stored in the tx_desc and
- *     is handled in the nfp_net_nfd3_tx_vlan
- */
-static void
-nfp_net_nfd3_tx_vlan(struct nfp_net_txq *txq,
-               struct nfp_net_nfd3_tx_desc *txd,
-               struct rte_mbuf *mb)
-{
-       struct nfp_net_hw *hw = txq->hw;
-
-       if ((hw->cap & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0 ||
-               (hw->cap & NFP_NET_CFG_CTRL_TXVLAN) == 0)
-               return;
-
-       if ((mb->ol_flags & RTE_MBUF_F_TX_VLAN) != 0) {
-               txd->flags |= PCIE_DESC_TX_VLAN;
-               txd->vlan = mb->vlan_tci;
-       }
-}
-
-static void
+void
 nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data,
                struct rte_mbuf *pkt,
                uint8_t layer)
@@ -914,188 +764,6 @@ nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data,
        meta_data->data[layer] = rte_cpu_to_be_32(tpid << 16 | vlan_tci);
 }
 
-static void
-nfp_net_nfd3_set_meta_data(struct nfp_net_meta_raw *meta_data,
-               struct nfp_net_txq *txq,
-               struct rte_mbuf *pkt)
-{
-       uint8_t vlan_layer = 0;
-       struct nfp_net_hw *hw;
-       uint32_t meta_info;
-       uint8_t layer = 0;
-       char *meta;
-
-       hw = txq->hw;
-
-       if ((pkt->ol_flags & RTE_MBUF_F_TX_VLAN) != 0 &&
-                       (hw->ctrl & NFP_NET_CFG_CTRL_TXVLAN_V2) != 0) {
-               if (meta_data->length == 0)
-                       meta_data->length = NFP_NET_META_HEADER_SIZE;
-               meta_data->length += NFP_NET_META_FIELD_SIZE;
-               meta_data->header |= NFP_NET_META_VLAN;
-       }
-
-       if (meta_data->length == 0)
-               return;
-
-       meta_info = meta_data->header;
-       meta_data->header = rte_cpu_to_be_32(meta_data->header);
-       meta = rte_pktmbuf_prepend(pkt, meta_data->length);
-       memcpy(meta, &meta_data->header, sizeof(meta_data->header));
-       meta += NFP_NET_META_HEADER_SIZE;
-
-       for (; meta_info != 0; meta_info >>= NFP_NET_META_FIELD_SIZE, layer++,
-                       meta += NFP_NET_META_FIELD_SIZE) {
-               switch (meta_info & NFP_NET_META_FIELD_MASK) {
-               case NFP_NET_META_VLAN:
-                       if (vlan_layer > 0) {
-                               PMD_DRV_LOG(ERR, "At most 1 layers of vlan is 
supported");
-                               return;
-                       }
-                       nfp_net_set_meta_vlan(meta_data, pkt, layer);
-                       vlan_layer++;
-                       break;
-               default:
-                       PMD_DRV_LOG(ERR, "The metadata type not supported");
-                       return;
-               }
-
-               memcpy(meta, &meta_data->data[layer], 
sizeof(meta_data->data[layer]));
-       }
-}
-
-uint16_t
-nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
-{
-       struct nfp_net_txq *txq;
-       struct nfp_net_hw *hw;
-       struct nfp_net_nfd3_tx_desc *txds, txd;
-       struct nfp_net_meta_raw meta_data;
-       struct rte_mbuf *pkt;
-       uint64_t dma_addr;
-       int pkt_size, dma_size;
-       uint16_t free_descs, issued_descs;
-       struct rte_mbuf **lmbuf;
-       int i;
-
-       txq = tx_queue;
-       hw = txq->hw;
-       txds = &txq->txds[txq->wr_p];
-
-       PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
-                  txq->qidx, txq->wr_p, nb_pkts);
-
-       if (nfp_net_nfd3_free_tx_desc(txq) < NFD3_TX_DESC_PER_SIMPLE_PKT * 
nb_pkts ||
-           nfp_net_nfd3_txq_full(txq))
-               nfp_net_tx_free_bufs(txq);
-
-       free_descs = (uint16_t)nfp_net_nfd3_free_tx_desc(txq);
-       if (unlikely(free_descs == 0))
-               return 0;
-
-       pkt = *tx_pkts;
-
-       issued_descs = 0;
-       PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
-                  txq->qidx, nb_pkts);
-       /* Sending packets */
-       for (i = 0; i < nb_pkts && free_descs > 0; i++) {
-               memset(&meta_data, 0, sizeof(meta_data));
-               /* Grabbing the mbuf linked to the current descriptor */
-               lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-               /* Warming the cache for releasing the mbuf later on */
-               RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
-
-               pkt = *(tx_pkts + i);
-
-               nfp_net_nfd3_set_meta_data(&meta_data, txq, pkt);
-
-               if (unlikely(pkt->nb_segs > 1 &&
-                            !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-                       PMD_INIT_LOG(ERR, "Multisegment packet not supported");
-                       goto xmit_end;
-               }
-
-               /* Checking if we have enough descriptors */
-               if (unlikely(pkt->nb_segs > free_descs))
-                       goto xmit_end;
-
-               /*
-                * Checksum and VLAN flags just in the first descriptor for a
-                * multisegment packet, but TSO info needs to be in all of them.
-                */
-               txd.data_len = pkt->pkt_len;
-               nfp_net_nfd3_tx_tso(txq, &txd, pkt);
-               nfp_net_nfd3_tx_cksum(txq, &txd, pkt);
-               nfp_net_nfd3_tx_vlan(txq, &txd, pkt);
-
-               /*
-                * mbuf data_len is the data in one segment and pkt_len data
-                * in the whole packet. When the packet is just one segment,
-                * then data_len = pkt_len
-                */
-               pkt_size = pkt->pkt_len;
-
-               while (pkt != NULL && free_descs > 0) {
-                       /* Copying TSO, VLAN and cksum info */
-                       *txds = txd;
-
-                       /* Releasing mbuf used by this descriptor previously*/
-                       if (*lmbuf)
-                               rte_pktmbuf_free_seg(*lmbuf);
-
-                       /*
-                        * Linking mbuf with descriptor for being released
-                        * next time descriptor is used
-                        */
-                       *lmbuf = pkt;
-
-                       dma_size = pkt->data_len;
-                       dma_addr = rte_mbuf_data_iova(pkt);
-                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
-                                  "%" PRIx64 "", dma_addr);
-
-                       /* Filling descriptors fields */
-                       txds->dma_len = dma_size;
-                       txds->data_len = txd.data_len;
-                       txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
-                       txds->dma_addr_lo = (dma_addr & 0xffffffff);
-                       free_descs--;
-
-                       txq->wr_p++;
-                       if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
-                               txq->wr_p = 0;
-
-                       pkt_size -= dma_size;
-
-                       /*
-                        * Making the EOP, packets with just one segment
-                        * the priority
-                        */
-                       if (likely(pkt_size == 0))
-                               txds->offset_eop = PCIE_DESC_TX_EOP;
-                       else
-                               txds->offset_eop = 0;
-
-                       /* Set the meta_len */
-                       txds->offset_eop |= meta_data.length;
-
-                       pkt = pkt->next;
-                       /* Referencing next free TX descriptor */
-                       txds = &txq->txds[txq->wr_p];
-                       lmbuf = &txq->txbufs[txq->wr_p].mbuf;
-                       issued_descs++;
-               }
-       }
-
-xmit_end:
-       /* Increment write pointers. Force memory write before we let HW know */
-       rte_wmb();
-       nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
-
-       return i;
-}
-
 static void
 nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
                struct nfp_net_txq *txq,
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index f016bf732c..6c81a98ae0 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -96,26 +96,10 @@ struct nfp_meta_parsed {
 /* Descriptor alignment */
 #define NFP_ALIGN_RING_DESC 128
 
-/* TX descriptor format */
-#define PCIE_DESC_TX_EOP                (1 << 7)
-#define PCIE_DESC_TX_OFFSET_MASK        (0x7f)
-
-/* Flags in the host TX descriptor */
-#define PCIE_DESC_TX_CSUM               (1 << 7)
-#define PCIE_DESC_TX_IP4_CSUM           (1 << 6)
-#define PCIE_DESC_TX_TCP_CSUM           (1 << 5)
-#define PCIE_DESC_TX_UDP_CSUM           (1 << 4)
-#define PCIE_DESC_TX_VLAN               (1 << 3)
-#define PCIE_DESC_TX_LSO                (1 << 2)
-#define PCIE_DESC_TX_ENCAP_NONE         (0)
-#define PCIE_DESC_TX_ENCAP              (1 << 1)
-#define PCIE_DESC_TX_O_IP4_CSUM         (1 << 0)
-
 #define NFDK_TX_MAX_DATA_PER_HEAD       0x00001000
 #define NFDK_DESC_TX_DMA_LEN_HEAD       0x0fff
 #define NFDK_DESC_TX_TYPE_HEAD          0xf000
 #define NFDK_DESC_TX_DMA_LEN            0x3fff
-#define NFD3_TX_DESC_PER_SIMPLE_PKT     1
 #define NFDK_TX_DESC_PER_SIMPLE_PKT     2
 #define NFDK_DESC_TX_TYPE_TSO           2
 #define NFDK_DESC_TX_TYPE_SIMPLE        8
@@ -139,37 +123,6 @@ struct nfp_meta_parsed {
                                        (idx) % NFDK_TX_DESC_BLOCK_CNT)
 #define D_IDX(ring, idx)               ((idx) & ((ring)->tx_count - 1))
 
-struct nfp_net_nfd3_tx_desc {
-       union {
-               struct {
-                       uint8_t dma_addr_hi; /* High bits of host buf address */
-                       __le16 dma_len;     /* Length to DMA for this desc */
-                       uint8_t offset_eop; /* Offset in buf where pkt starts +
-                                            * highest bit is eop flag, low 
7bit is meta_len.
-                                            */
-                       __le32 dma_addr_lo; /* Low 32bit of host buf addr */
-
-                       __le16 mss;         /* MSS to be used for LSO */
-                       uint8_t lso_hdrlen; /* LSO, where the data starts */
-                       uint8_t flags;      /* TX Flags, see @PCIE_DESC_TX_* */
-
-                       union {
-                               struct {
-                                       /*
-                                        * L3 and L4 header offsets required
-                                        * for TSOv2
-                                        */
-                                       uint8_t l3_offset;
-                                       uint8_t l4_offset;
-                               };
-                               __le16 vlan; /* VLAN tag to add if indicated */
-                       };
-                       __le16 data_len;    /* Length of frame + meta data */
-               } __rte_packed;
-               __le32 vals[4];
-       };
-};
-
 struct nfp_net_nfdk_tx_desc {
        union {
                struct {
@@ -397,30 +350,6 @@ nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
        rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
 }
 
-/* Leaving always free descriptors for avoiding wrapping confusion */
-static inline uint32_t
-nfp_net_nfd3_free_tx_desc(struct nfp_net_txq *txq)
-{
-       if (txq->wr_p >= txq->rd_p)
-               return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
-       else
-               return txq->rd_p - txq->wr_p - 8;
-}
-
-/*
- * nfp_net_nfd3_txq_full() - Check if the TX queue free descriptors
- * is below tx_free_threshold for firmware of nfd3
- *
- * @txq: TX queue to check
- *
- * This function uses the host copy* of read/write pointers.
- */
-static inline uint32_t
-nfp_net_nfd3_txq_full(struct nfp_net_txq *txq)
-{
-       return (nfp_net_nfd3_free_tx_desc(txq) < txq->tx_free_thresh);
-}
-
 /* set mbuf checksum flags based on RX descriptor flags */
 static inline void
 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
@@ -449,82 +378,6 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct 
nfp_net_rx_desc *rxd,
                mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 }
 
-/* nfp_net_nfd3_tx_tso() - Set NFD3 TX descriptor for TSO */
-static inline void
-nfp_net_nfd3_tx_tso(struct nfp_net_txq *txq,
-               struct nfp_net_nfd3_tx_desc *txd,
-               struct rte_mbuf *mb)
-{
-       uint64_t ol_flags;
-       struct nfp_net_hw *hw = txq->hw;
-
-       if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
-               goto clean_txd;
-
-       ol_flags = mb->ol_flags;
-
-       if (!(ol_flags & RTE_MBUF_F_TX_TCP_SEG))
-               goto clean_txd;
-
-       txd->l3_offset = mb->l2_len;
-       txd->l4_offset = mb->l2_len + mb->l3_len;
-       txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
-
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-               txd->l3_offset += mb->outer_l2_len + mb->outer_l3_len;
-               txd->l4_offset += mb->outer_l2_len + mb->outer_l3_len;
-               txd->lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len;
-       }
-
-       txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
-       txd->flags = PCIE_DESC_TX_LSO;
-       return;
-
-clean_txd:
-       txd->flags = 0;
-       txd->l3_offset = 0;
-       txd->l4_offset = 0;
-       txd->lso_hdrlen = 0;
-       txd->mss = 0;
-}
-
-/* nfp_net_nfd3_tx_cksum() - Set TX CSUM offload flags in NFD3 TX descriptor */
-static inline void
-nfp_net_nfd3_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_nfd3_tx_desc 
*txd,
-                struct rte_mbuf *mb)
-{
-       uint64_t ol_flags;
-       struct nfp_net_hw *hw = txq->hw;
-
-       if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
-               return;
-
-       ol_flags = mb->ol_flags;
-
-       /* Set TCP csum offload if TSO enabled. */
-       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
-               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
-
-       /* IPv6 does not need checksum */
-       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
-               txd->flags |= PCIE_DESC_TX_IP4_CSUM;
-
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
-               txd->flags |= PCIE_DESC_TX_ENCAP;
-
-       switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
-       case RTE_MBUF_F_TX_UDP_CKSUM:
-               txd->flags |= PCIE_DESC_TX_UDP_CSUM;
-               break;
-       case RTE_MBUF_F_TX_TCP_CKSUM:
-               txd->flags |= PCIE_DESC_TX_TCP_CSUM;
-               break;
-       }
-
-       if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
-               txd->flags |= PCIE_DESC_TX_CSUM;
-}
-
 int nfp_net_rx_freelist_setup(struct rte_eth_dev *dev);
 uint32_t nfp_net_rx_queue_count(void *rx_queue);
 uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -537,8 +390,7 @@ int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
                                  struct rte_mempool *mp);
 void nfp_net_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx);
 void nfp_net_reset_tx_queue(struct nfp_net_txq *txq);
-uint16_t nfp_net_nfd3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
-                                 uint16_t nb_pkts);
+
 int nfp_net_tx_queue_setup(struct rte_eth_dev *dev,
                uint16_t queue_idx,
                uint16_t nb_desc,
@@ -548,6 +400,9 @@ uint16_t nfp_net_nfdk_xmit_pkts(void *tx_queue,
                struct rte_mbuf **tx_pkts,
                uint16_t nb_pkts);
 int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
+void nfp_net_set_meta_vlan(struct nfp_net_meta_raw *meta_data,
+               struct rte_mbuf *pkt,
+               uint8_t layer);
 
 #endif /* _NFP_RXTX_H_ */
 /*
-- 
2.39.1

Reply via email to