Adjust the coding style for NFDk struct and logics.
Delete some unneeded comment messages.

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Niklas Söderlund <niklas.soderl...@corigine.com>
---
 drivers/net/nfp/nfdk/nfp_nfdk.h    |  69 +++++++-------
 drivers/net/nfp/nfdk/nfp_nfdk_dp.c | 140 +++++++++++++++--------------
 2 files changed, 103 insertions(+), 106 deletions(-)

diff --git a/drivers/net/nfp/nfdk/nfp_nfdk.h b/drivers/net/nfp/nfdk/nfp_nfdk.h
index 43e4d75432..9af9176eb7 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk.h
+++ b/drivers/net/nfp/nfdk/nfp_nfdk.h
@@ -43,27 +43,27 @@
 struct nfp_net_nfdk_tx_desc {
        union {
                struct {
-                       __le16 dma_addr_hi;  /* High bits of host buf address */
-                       __le16 dma_len_type; /* Length to DMA for this desc */
-                       __le32 dma_addr_lo;  /* Low 32bit of host buf addr */
+                       uint16_t dma_addr_hi;  /* High bits of host buf address 
*/
+                       uint16_t dma_len_type; /* Length to DMA for this desc */
+                       uint32_t dma_addr_lo;  /* Low 32bit of host buf addr */
                };
 
                struct {
-                       __le16 mss;     /* MSS to be used for LSO */
-                       uint8_t lso_hdrlen;  /* LSO, TCP payload offset */
-                       uint8_t lso_totsegs; /* LSO, total segments */
-                       uint8_t l3_offset;   /* L3 header offset */
-                       uint8_t l4_offset;   /* L4 header offset */
-                       __le16 lso_meta_res; /* Rsvd bits in TSO metadata */
+                       uint16_t mss;          /* MSS to be used for LSO */
+                       uint8_t lso_hdrlen;    /* LSO, TCP payload offset */
+                       uint8_t lso_totsegs;   /* LSO, total segments */
+                       uint8_t l3_offset;     /* L3 header offset */
+                       uint8_t l4_offset;     /* L4 header offset */
+                       uint16_t lso_meta_res; /* Rsvd bits in TSO metadata */
                };
 
                struct {
-                       uint8_t flags;  /* TX Flags, see @NFDK_DESC_TX_* */
-                       uint8_t reserved[7];    /* meta byte placeholder */
+                       uint8_t flags;         /* TX Flags, see @NFDK_DESC_TX_* 
*/
+                       uint8_t reserved[7];   /* meta byte placeholder */
                };
 
-               __le32 vals[2];
-               __le64 raw;
+               uint32_t vals[2];
+               uint64_t raw;
        };
 };
 
@@ -89,7 +89,7 @@ nfp_net_nfdk_free_tx_desc(struct nfp_net_txq *txq)
  *
  * This function uses the host copy* of read/write pointers.
  */
-static inline uint32_t
+static inline bool
 nfp_net_nfdk_txq_full(struct nfp_net_txq *txq)
 {
        return (nfp_net_nfdk_free_tx_desc(txq) < txq->tx_free_thresh);
@@ -97,7 +97,8 @@ nfp_net_nfdk_txq_full(struct nfp_net_txq *txq)
 
 /* nfp_net_nfdk_tx_cksum() - Set TX CSUM offload flags in TX descriptor of 
nfdk */
 static inline uint64_t
-nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, struct rte_mbuf *mb,
+nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq,
+               struct rte_mbuf *mb,
                uint64_t flags)
 {
        uint64_t ol_flags;
@@ -109,17 +110,17 @@ nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, struct 
rte_mbuf *mb,
        ol_flags = mb->ol_flags;
 
        /* Set TCP csum offload if TSO enabled. */
-       if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+       if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
                flags |= NFDK_DESC_TX_L4_CSUM;
 
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
+       if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0)
                flags |= NFDK_DESC_TX_ENCAP;
 
        /* IPv6 does not need checksum */
-       if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
+       if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0)
                flags |= NFDK_DESC_TX_L3_CSUM;
 
-       if (ol_flags & RTE_MBUF_F_TX_L4_MASK)
+       if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != 0)
                flags |= NFDK_DESC_TX_L4_CSUM;
 
        return flags;
@@ -127,19 +128,22 @@ nfp_net_nfdk_tx_cksum(struct nfp_net_txq *txq, struct 
rte_mbuf *mb,
 
 /* nfp_net_nfdk_tx_tso() - Set TX descriptor for TSO of nfdk */
 static inline uint64_t
-nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq, struct rte_mbuf *mb)
+nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq,
+               struct rte_mbuf *mb)
 {
+       uint8_t outer_len;
        uint64_t ol_flags;
        struct nfp_net_nfdk_tx_desc txd;
        struct nfp_net_hw *hw = txq->hw;
 
+       txd.raw = 0;
+
        if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) == 0)
-               goto clean_txd;
+               return txd.raw;
 
        ol_flags = mb->ol_flags;
-
        if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0)
-               goto clean_txd;
+               return txd.raw;
 
        txd.l3_offset = mb->l2_len;
        txd.l4_offset = mb->l2_len + mb->l3_len;
@@ -148,22 +152,13 @@ nfp_net_nfdk_tx_tso(struct nfp_net_txq *txq, struct 
rte_mbuf *mb)
        txd.lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
        txd.lso_totsegs = (mb->pkt_len + mb->tso_segsz) / mb->tso_segsz;
 
-       if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
-               txd.l3_offset += mb->outer_l2_len + mb->outer_l3_len;
-               txd.l4_offset += mb->outer_l2_len + mb->outer_l3_len;
-               txd.lso_hdrlen += mb->outer_l2_len + mb->outer_l3_len;
+       if ((ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) != 0) {
+               outer_len = mb->outer_l2_len + mb->outer_l3_len;
+               txd.l3_offset += outer_len;
+               txd.l4_offset += outer_len;
+               txd.lso_hdrlen += outer_len;
        }
 
-       return txd.raw;
-
-clean_txd:
-       txd.l3_offset = 0;
-       txd.l4_offset = 0;
-       txd.lso_hdrlen = 0;
-       txd.mss = 0;
-       txd.lso_totsegs = 0;
-       txd.lso_meta_res = 0;
-
        return txd.raw;
 }
 
diff --git a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c 
b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
index ec937c1f50..013f369b55 100644
--- a/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
+++ b/drivers/net/nfp/nfdk/nfp_nfdk_dp.c
@@ -24,14 +24,18 @@ nfp_net_nfdk_headlen_to_segs(unsigned int headlen)
 }
 
 static int
-nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq, struct rte_mbuf 
*pkt)
+nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq,
+               struct rte_mbuf *pkt)
 {
-       unsigned int n_descs, wr_p, i, nop_slots;
+       uint32_t i;
+       uint32_t wr_p;
+       uint16_t n_descs;
+       uint32_t nop_slots;
        struct rte_mbuf *pkt_temp;
 
        pkt_temp = pkt;
        n_descs = nfp_net_nfdk_headlen_to_segs(pkt_temp->data_len);
-       while (pkt_temp->next) {
+       while (pkt_temp->next != NULL) {
                pkt_temp = pkt_temp->next;
                n_descs += DIV_ROUND_UP(pkt_temp->data_len, 
NFDK_TX_MAX_DATA_PER_DESC);
        }
@@ -39,14 +43,14 @@ nfp_net_nfdk_tx_maybe_close_block(struct nfp_net_txq *txq, 
struct rte_mbuf *pkt)
        if (unlikely(n_descs > NFDK_TX_DESC_GATHER_MAX))
                return -EINVAL;
 
-       /* Under count by 1 (don't count meta) for the round down to work out */
-       n_descs += !!(pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG);
+       if ((pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0)
+               n_descs++;
 
        if (round_down(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
                        round_down(txq->wr_p + n_descs, NFDK_TX_DESC_BLOCK_CNT))
                goto close_block;
 
-       if ((uint32_t)txq->data_pending + pkt->pkt_len > 
NFDK_TX_MAX_DATA_PER_BLOCK)
+       if (txq->data_pending + pkt->pkt_len > NFDK_TX_MAX_DATA_PER_BLOCK)
                goto close_block;
 
        return 0;
@@ -126,39 +130,47 @@ nfp_net_nfdk_set_meta_data(struct rte_mbuf *pkt,
 }
 
 uint16_t
-nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t 
nb_pkts)
+nfp_net_nfdk_xmit_pkts(void *tx_queue,
+               struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
 {
        uint32_t buf_idx;
        uint64_t dma_addr;
-       uint16_t free_descs;
+       uint32_t free_descs;
        uint32_t npkts = 0;
        uint64_t metadata = 0;
-       uint16_t issued_descs = 0;
-       struct nfp_net_txq *txq;
+       struct rte_mbuf *pkt;
        struct nfp_net_hw *hw;
-       struct nfp_net_nfdk_tx_desc *ktxds;
-       struct rte_mbuf *pkt, *temp_pkt;
        struct rte_mbuf **lmbuf;
+       struct nfp_net_txq *txq;
+       uint32_t issued_descs = 0;
+       struct rte_mbuf *temp_pkt;
+       struct nfp_net_nfdk_tx_desc *ktxds;
 
        txq = tx_queue;
        hw = txq->hw;
 
        PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
-               txq->qidx, txq->wr_p, nb_pkts);
+                       txq->qidx, txq->wr_p, nb_pkts);
 
-       if ((nfp_net_nfdk_free_tx_desc(txq) < NFDK_TX_DESC_PER_SIMPLE_PKT *
-                       nb_pkts) || (nfp_net_nfdk_txq_full(txq)))
+       if (nfp_net_nfdk_free_tx_desc(txq) < NFDK_TX_DESC_PER_SIMPLE_PKT * 
nb_pkts ||
+                       nfp_net_nfdk_txq_full(txq))
                nfp_net_tx_free_bufs(txq);
 
-       free_descs = (uint16_t)nfp_net_nfdk_free_tx_desc(txq);
+       free_descs = nfp_net_nfdk_free_tx_desc(txq);
        if (unlikely(free_descs == 0))
                return 0;
 
        PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets", txq->qidx, nb_pkts);
+
        /* Sending packets */
-       while ((npkts < nb_pkts) && free_descs) {
-               uint32_t type, dma_len, dlen_type, tmp_dlen;
-               int nop_descs, used_descs;
+       while (npkts < nb_pkts && free_descs > 0) {
+               int nop_descs;
+               uint32_t type;
+               uint32_t dma_len;
+               uint32_t tmp_dlen;
+               uint32_t dlen_type;
+               uint32_t used_descs;
 
                pkt = *(tx_pkts + npkts);
                nop_descs = nfp_net_nfdk_tx_maybe_close_block(txq, pkt);
@@ -167,6 +179,7 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
 
                issued_descs += nop_descs;
                ktxds = &txq->ktxds[txq->wr_p];
+
                /* Grabbing the mbuf linked to the current descriptor */
                buf_idx = txq->wr_p;
                lmbuf = &txq->txbufs[buf_idx++].mbuf;
@@ -177,8 +190,8 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                nfp_net_nfdk_set_meta_data(pkt, txq, &metadata);
 
                if (unlikely(pkt->nb_segs > 1 &&
-                               !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
-                       PMD_INIT_LOG(ERR, "Multisegment packet not supported");
+                               (hw->cap & NFP_NET_CFG_CTRL_GATHER) == 0)) {
+                       PMD_TX_LOG(ERR, "Multisegment packet not supported");
                        goto xmit_end;
                }
 
@@ -186,10 +199,9 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                 * Checksum and VLAN flags just in the first descriptor for a
                 * multisegment packet, but TSO info needs to be in all of them.
                 */
-
                dma_len = pkt->data_len;
-               if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) &&
-                               (pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0 &&
+                               (pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0) {
                        type = NFDK_DESC_TX_TYPE_TSO;
                } else if (pkt->next == NULL && dma_len <= 
NFDK_TX_MAX_DATA_PER_HEAD) {
                        type = NFDK_DESC_TX_TYPE_SIMPLE;
@@ -207,13 +219,11 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                 * value of 'dma_len & NFDK_DESC_TX_DMA_LEN_HEAD' will be less
                 * than packet head len.
                 */
-               dlen_type = (dma_len > NFDK_DESC_TX_DMA_LEN_HEAD ?
-                               NFDK_DESC_TX_DMA_LEN_HEAD : dma_len) |
-                       (NFDK_DESC_TX_TYPE_HEAD & (type << 12));
+               if (dma_len > NFDK_DESC_TX_DMA_LEN_HEAD)
+                       dma_len = NFDK_DESC_TX_DMA_LEN_HEAD;
+               dlen_type = dma_len | (NFDK_DESC_TX_TYPE_HEAD & (type << 12));
                ktxds->dma_len_type = rte_cpu_to_le_16(dlen_type);
                dma_addr = rte_mbuf_data_iova(pkt);
-               PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
-                               "%" PRIx64 "", dma_addr);
                ktxds->dma_addr_hi = rte_cpu_to_le_16(dma_addr >> 32);
                ktxds->dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
                ktxds++;
@@ -230,8 +240,8 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                 * The rest of the data (if any) will be in larger DMA 
descriptors
                 * and is handled with the dma_len loop.
                 */
-               while (pkt) {
-                       if (*lmbuf)
+               while (pkt != NULL) {
+                       if (*lmbuf != NULL)
                                rte_pktmbuf_free_seg(*lmbuf);
                        *lmbuf = pkt;
                        while (dma_len > 0) {
@@ -253,8 +263,6 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                        pkt = pkt->next;
                        dma_len = pkt->data_len;
                        dma_addr = rte_mbuf_data_iova(pkt);
-                       PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
-                               "%" PRIx64 "", dma_addr);
 
                        lmbuf = &txq->txbufs[buf_idx++].mbuf;
                }
@@ -264,16 +272,16 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
                ktxds->raw = rte_cpu_to_le_64(nfp_net_nfdk_tx_cksum(txq, 
temp_pkt, metadata));
                ktxds++;
 
-               if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) &&
-                               (temp_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG)) {
+               if ((hw->cap & NFP_NET_CFG_CTRL_LSO_ANY) != 0 &&
+                               (temp_pkt->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 
0) {
                        ktxds->raw = rte_cpu_to_le_64(nfp_net_nfdk_tx_tso(txq, 
temp_pkt));
                        ktxds++;
                }
 
                used_descs = ktxds - txq->ktxds - txq->wr_p;
                if (round_down(txq->wr_p, NFDK_TX_DESC_BLOCK_CNT) !=
-                       round_down(txq->wr_p + used_descs - 1, 
NFDK_TX_DESC_BLOCK_CNT)) {
-                       PMD_INIT_LOG(INFO, "Used descs cross block boundary");
+                               round_down(txq->wr_p + used_descs - 1, 
NFDK_TX_DESC_BLOCK_CNT)) {
+                       PMD_TX_LOG(INFO, "Used descs cross block boundary");
                        goto xmit_end;
                }
 
@@ -285,7 +293,7 @@ nfp_net_nfdk_xmit_pkts(void *tx_queue, struct rte_mbuf 
**tx_pkts, uint16_t nb_pk
 
                issued_descs += used_descs;
                npkts++;
-               free_descs = (uint16_t)nfp_net_nfdk_free_tx_desc(txq);
+               free_descs = nfp_net_nfdk_free_tx_desc(txq);
        }
 
 xmit_end:
@@ -304,13 +312,14 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
                const struct rte_eth_txconf *tx_conf)
 {
        int ret;
+       size_t size;
+       uint32_t tx_desc_sz;
        uint16_t min_tx_desc;
        uint16_t max_tx_desc;
-       const struct rte_memzone *tz;
-       struct nfp_net_txq *txq;
-       uint16_t tx_free_thresh;
        struct nfp_net_hw *hw;
-       uint32_t tx_desc_sz;
+       uint16_t tx_free_thresh;
+       struct nfp_net_txq *txq;
+       const struct rte_memzone *tz;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
@@ -323,30 +332,27 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
        /* Validating number of descriptors */
        tx_desc_sz = nb_desc * sizeof(struct nfp_net_nfdk_tx_desc);
        if ((NFDK_TX_DESC_PER_SIMPLE_PKT * tx_desc_sz) % NFP_ALIGN_RING_DESC != 
0 ||
-           (NFDK_TX_DESC_PER_SIMPLE_PKT * nb_desc) % NFDK_TX_DESC_BLOCK_CNT != 
0 ||
-            nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
+                       (NFDK_TX_DESC_PER_SIMPLE_PKT * nb_desc) % 
NFDK_TX_DESC_BLOCK_CNT != 0 ||
+                       nb_desc > max_tx_desc || nb_desc < min_tx_desc) {
                PMD_DRV_LOG(ERR, "Wrong nb_desc value");
                return -EINVAL;
        }
 
-       tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
-                               tx_conf->tx_free_thresh :
-                               DEFAULT_TX_FREE_THRESH);
-
-       if (tx_free_thresh > (nb_desc)) {
-               PMD_DRV_LOG(ERR,
-                       "tx_free_thresh must be less than the number of TX "
-                       "descriptors. (tx_free_thresh=%u port=%d "
-                       "queue=%d)", (unsigned int)tx_free_thresh,
-                       dev->data->port_id, (int)queue_idx);
-               return -(EINVAL);
+       tx_free_thresh = tx_conf->tx_free_thresh;
+       if (tx_free_thresh == 0)
+               tx_free_thresh = DEFAULT_TX_FREE_THRESH;
+       if (tx_free_thresh > nb_desc) {
+               PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the number 
of TX "
+                               "descriptors. (tx_free_thresh=%u port=%d 
queue=%d)",
+                               tx_free_thresh, dev->data->port_id, queue_idx);
+               return -EINVAL;
        }
 
        /*
         * Free memory prior to re-allocation if needed. This is the case after
         * calling nfp_net_stop
         */
-       if (dev->data->tx_queues[queue_idx]) {
+       if (dev->data->tx_queues[queue_idx] != NULL) {
                PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
                                queue_idx);
                nfp_net_tx_queue_release(dev, queue_idx);
@@ -366,11 +372,10 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
         * handle the maximum ring size is allocated in order to allow for
         * resizing in later calls to the queue setup function.
         */
-       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
-                               sizeof(struct nfp_net_nfdk_tx_desc) *
-                               NFDK_TX_DESC_PER_SIMPLE_PKT *
-                               max_tx_desc, NFP_MEMZONE_ALIGN,
-                               socket_id);
+       size = sizeof(struct nfp_net_nfdk_tx_desc) * max_tx_desc *
+                       NFDK_TX_DESC_PER_SIMPLE_PKT;
+       tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
+                       NFP_MEMZONE_ALIGN, socket_id);
        if (tz == NULL) {
                PMD_DRV_LOG(ERR, "Error allocating tx dma");
                nfp_net_tx_queue_release(dev, queue_idx);
@@ -387,29 +392,26 @@ nfp_net_nfdk_tx_queue_setup(struct rte_eth_dev *dev,
        txq->qidx = queue_idx;
        txq->tx_qcidx = queue_idx * hw->stride_tx;
        txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
-
        txq->port_id = dev->data->port_id;
 
        /* Saving physical and virtual addresses for the TX ring */
-       txq->dma = (uint64_t)tz->iova;
-       txq->ktxds = (struct nfp_net_nfdk_tx_desc *)tz->addr;
+       txq->dma = tz->iova;
+       txq->ktxds = tz->addr;
 
        /* mbuf pointers array for referencing mbufs linked to TX descriptors */
        txq->txbufs = rte_zmalloc_socket("txq->txbufs",
-                               sizeof(*txq->txbufs) * txq->tx_count,
-                               RTE_CACHE_LINE_SIZE, socket_id);
-
+                       sizeof(*txq->txbufs) * txq->tx_count,
+                       RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->txbufs == NULL) {
                nfp_net_tx_queue_release(dev, queue_idx);
                return -ENOMEM;
        }
-       PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
-               txq->txbufs, txq->ktxds, (unsigned long)txq->dma);
 
        nfp_net_reset_tx_queue(txq);
 
        dev->data->tx_queues[queue_idx] = txq;
        txq->hw = hw;
+
        /*
         * Telling the HW about the physical address of the TX ring and number
         * of descriptors in log2 format
-- 
2.39.1

Reply via email to