The data endian of Rx descriptor should be little endian, and the
related logic also should modify.

Fixes: 3745dd9dd86f ("net/nfp: adjust coding style for NFD3")
Cc: sta...@dpdk.org

Signed-off-by: Chaoyong He <chaoyong...@corigine.com>
Reviewed-by: Long Wu <long...@corigine.com>
Reviewed-by: Peng Zhang <peng.zh...@corigine.com>
---
 drivers/net/nfp/flower/nfp_flower_ctrl.c | 10 ++++---
 drivers/net/nfp/nfp_net_meta.c           |  8 ++++--
 drivers/net/nfp/nfp_rxtx.c               | 36 ++++++++++++++----------
 drivers/net/nfp/nfp_rxtx.h               | 16 +++++------
 drivers/net/nfp/nfp_rxtx_vec_avx2.c      |  6 ++--
 5 files changed, 45 insertions(+), 31 deletions(-)

diff --git a/drivers/net/nfp/flower/nfp_flower_ctrl.c 
b/drivers/net/nfp/flower/nfp_flower_ctrl.c
index 21bf26b738..23d1b770d8 100644
--- a/drivers/net/nfp/flower/nfp_flower_ctrl.c
+++ b/drivers/net/nfp/flower/nfp_flower_ctrl.c
@@ -22,6 +22,7 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
                struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
+       uint16_t data_len;
        uint64_t dma_addr;
        uint16_t avail = 0;
        struct rte_mbuf *mb;
@@ -78,9 +79,10 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
                 */
                mb = rxb->mbuf;
                rxb->mbuf = new_mb;
+               data_len = rte_le_to_cpu_16(rxds->rxd.data_len);
 
                /* Size of this segment */
-               mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+               mb->data_len = data_len - NFP_DESC_META_LEN(rxds);
                /* Size of the whole packet. We just support 1 segment */
                mb->pkt_len = mb->data_len;
 
@@ -111,10 +113,10 @@ nfp_flower_ctrl_vnic_recv(void *rx_queue,
                /* Now resetting and updating the descriptor */
                rxds->vals[0] = 0;
                rxds->vals[1] = 0;
-               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+               dma_addr = rte_mbuf_data_iova_default(new_mb);
                rxds->fld.dd = 0;
-               rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff;
-               rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+               rxds->fld.dma_addr_hi = rte_cpu_to_le_16((dma_addr >> 32) & 
0xffff);
+               rxds->fld.dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
                nb_hold++;
 
                rxq->rd_p++;
diff --git a/drivers/net/nfp/nfp_net_meta.c b/drivers/net/nfp/nfp_net_meta.c
index 70169eba6b..6dfe7f0eb2 100644
--- a/drivers/net/nfp/nfp_net_meta.c
+++ b/drivers/net/nfp/nfp_net_meta.c
@@ -111,6 +111,7 @@ nfp_net_meta_parse_vlan(const struct nfp_net_meta_parsed 
*meta,
                struct nfp_net_rxq *rxq,
                struct rte_mbuf *mb)
 {
+       uint16_t flags;
        uint32_t ctrl = rxq->hw->super.ctrl;
 
        /* Skip if hardware don't support setting vlan. */
@@ -131,7 +132,8 @@ nfp_net_meta_parse_vlan(const struct nfp_net_meta_parsed 
*meta,
                        mb->ol_flags |= RTE_MBUF_F_RX_VLAN | 
RTE_MBUF_F_RX_VLAN_STRIPPED;
                }
        } else if ((ctrl & NFP_NET_CFG_CTRL_RXVLAN) != 0) {
-               if ((rxd->rxd.flags & PCIE_DESC_RX_VLAN) != 0) {
+               flags = rte_le_to_cpu_16(rxd->rxd.flags);
+               if ((flags & PCIE_DESC_RX_VLAN) != 0) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxd->rxd.offload_info);
                        mb->ol_flags |= RTE_MBUF_F_RX_VLAN | 
RTE_MBUF_F_RX_VLAN_STRIPPED;
                }
@@ -234,10 +236,12 @@ nfp_net_meta_parse(struct nfp_net_rx_desc *rxds,
                struct rte_mbuf *mb,
                struct nfp_net_meta_parsed *meta)
 {
+       uint16_t flags;
        uint8_t *meta_base;
        rte_be32_t meta_header;
 
        meta->flags = 0;
+       flags = rte_le_to_cpu_16(rxds->rxd.flags);
 
        if (unlikely(NFP_DESC_META_LEN(rxds) == 0))
                return;
@@ -258,7 +262,7 @@ nfp_net_meta_parse(struct nfp_net_rx_desc *rxds,
                }
                break;
        case NFP_NET_METAFORMAT_SINGLE:
-               if ((rxds->rxd.flags & PCIE_DESC_RX_RSS) != 0) {
+               if ((flags & PCIE_DESC_RX_RSS) != 0) {
                        nfp_net_meta_parse_single(meta_base, meta_header, meta);
                        nfp_net_meta_parse_hash(meta, rxq, mb);
                }
diff --git a/drivers/net/nfp/nfp_rxtx.c b/drivers/net/nfp/nfp_rxtx.c
index e0c1fb0987..c86bba9d87 100644
--- a/drivers/net/nfp/nfp_rxtx.c
+++ b/drivers/net/nfp/nfp_rxtx.c
@@ -122,24 +122,27 @@ nfp_net_rx_cksum(struct nfp_net_rxq *rxq,
                struct nfp_net_rx_desc *rxd,
                struct rte_mbuf *mb)
 {
+       uint16_t flags;
        struct nfp_net_hw *hw = rxq->hw;
 
        if ((hw->super.ctrl & NFP_NET_CFG_CTRL_RXCSUM) == 0)
                return;
 
+       flags = rte_le_to_cpu_16(rxd->rxd.flags);
+
        /* If IPv4 and IP checksum error, fail */
-       if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) != 0 &&
-                       (rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK) == 0))
+       if (unlikely((flags & PCIE_DESC_RX_IP4_CSUM) != 0 &&
+                       (flags & PCIE_DESC_RX_IP4_CSUM_OK) == 0))
                mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
        else
                mb->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
 
        /* If neither UDP nor TCP return */
-       if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) == 0 &&
-                       (rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) == 0)
+       if ((flags & PCIE_DESC_RX_TCP_CSUM) == 0 &&
+                       (flags & PCIE_DESC_RX_UDP_CSUM) == 0)
                return;
 
-       if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK) != 0)
+       if (likely(flags & PCIE_DESC_RX_L4_CSUM_OK) != 0)
                mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
        else
                mb->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
@@ -165,12 +168,12 @@ nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
                        return -ENOMEM;
                }
 
-               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
+               dma_addr = rte_mbuf_data_iova_default(mbuf);
 
                rxd = &rxq->rxds[i];
                rxd->fld.dd = 0;
-               rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff;
-               rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
+               rxd->fld.dma_addr_hi = rte_cpu_to_le_16((dma_addr >> 32) & 
0xffff);
+               rxd->fld.dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
 
                rxe[i].mbuf = mbuf;
        }
@@ -356,13 +359,14 @@ nfp_net_parse_ptype(struct nfp_net_rxq *rxq,
                struct nfp_net_rx_desc *rxds,
                struct rte_mbuf *mb)
 {
+       uint16_t rxd_ptype;
        struct nfp_net_hw *hw = rxq->hw;
        struct nfp_ptype_parsed nfp_ptype;
-       uint16_t rxd_ptype = rxds->rxd.offload_info;
 
        if ((hw->super.ctrl_ext & NFP_NET_CFG_CTRL_PKT_TYPE) == 0)
                return;
 
+       rxd_ptype = rte_le_to_cpu_16(rxds->rxd.offload_info);
        if (rxd_ptype == 0 || (rxds->rxd.flags & PCIE_DESC_RX_VLAN) != 0)
                return;
 
@@ -410,6 +414,7 @@ nfp_net_recv_pkts(void *rx_queue,
                struct rte_mbuf **rx_pkts,
                uint16_t nb_pkts)
 {
+       uint16_t data_len;
        uint64_t dma_addr;
        uint16_t avail = 0;
        struct rte_mbuf *mb;
@@ -470,14 +475,15 @@ nfp_net_recv_pkts(void *rx_queue,
                 */
                mb = rxb->mbuf;
                rxb->mbuf = new_mb;
+               data_len = rte_le_to_cpu_16(rxds->rxd.data_len);
 
                PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u.",
-                               rxds->rxd.data_len, rxq->mbuf_size);
+                               data_len, rxq->mbuf_size);
 
                /* Size of this segment */
-               mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+               mb->data_len = data_len - NFP_DESC_META_LEN(rxds);
                /* Size of the whole packet. We just support 1 segment */
-               mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+               mb->pkt_len = data_len - NFP_DESC_META_LEN(rxds);
 
                if (unlikely((mb->data_len + hw->rx_offset) > rxq->mbuf_size)) {
                        /*
@@ -512,10 +518,10 @@ nfp_net_recv_pkts(void *rx_queue,
                /* Now resetting and updating the descriptor */
                rxds->vals[0] = 0;
                rxds->vals[1] = 0;
-               dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(new_mb));
+               dma_addr = rte_mbuf_data_iova_default(new_mb);
                rxds->fld.dd = 0;
-               rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xffff;
-               rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
+               rxds->fld.dma_addr_hi = rte_cpu_to_le_16((dma_addr >> 32) & 
0xffff);
+               rxds->fld.dma_addr_lo = rte_cpu_to_le_32(dma_addr & 0xffffffff);
                nb_hold++;
 
                rxq->rd_p++;
diff --git a/drivers/net/nfp/nfp_rxtx.h b/drivers/net/nfp/nfp_rxtx.h
index 4e0ed9da38..cbb6df201f 100644
--- a/drivers/net/nfp/nfp_rxtx.h
+++ b/drivers/net/nfp/nfp_rxtx.h
@@ -118,24 +118,24 @@ struct nfp_net_rx_desc {
        union {
                /** Freelist descriptor. */
                struct __rte_packed_begin {
-                       uint16_t dma_addr_hi;  /**< High bits of buffer 
address. */
-                       uint8_t spare;         /**< Reserved, must be zero. */
-                       uint8_t dd;            /**< Whether descriptor 
available. */
-                       uint32_t dma_addr_lo;  /**< Low bits of buffer address. 
*/
+                       rte_le16_t dma_addr_hi;  /**< High bits of buffer 
address. */
+                       uint8_t spare;           /**< Reserved, must be zero. */
+                       uint8_t dd;              /**< Whether descriptor 
available. */
+                       rte_le32_t dma_addr_lo;  /**< Low bits of buffer 
address. */
                } __rte_packed_end fld;
 
                /** RX descriptor. */
                struct __rte_packed_begin {
-                       uint16_t data_len;     /**< Length of frame + metadata. 
*/
+                       rte_le16_t data_len;     /**< Length of frame + 
metadata. */
                        uint8_t reserved;      /**< Reserved, must be zero. */
                        uint8_t meta_len_dd;   /**< Length of metadata + done 
flag. */
 
-                       uint16_t flags;        /**< RX flags. */
-                       uint16_t offload_info; /**< Offloading info. */
+                       rte_le16_t flags;        /**< RX flags. */
+                       rte_le16_t offload_info; /**< Offloading info. */
                } __rte_packed_end rxd;
 
                /** Reserved. */
-               uint32_t vals[2];
+               rte_le32_t vals[2];
        };
 };
 
diff --git a/drivers/net/nfp/nfp_rxtx_vec_avx2.c 
b/drivers/net/nfp/nfp_rxtx_vec_avx2.c
index 66d003f64d..faf3d167d9 100644
--- a/drivers/net/nfp/nfp_rxtx_vec_avx2.c
+++ b/drivers/net/nfp/nfp_rxtx_vec_avx2.c
@@ -92,12 +92,14 @@ nfp_vec_avx2_recv_set_rxpkt1(struct nfp_net_rxq *rxq,
                struct nfp_net_rx_desc *rxds,
                struct rte_mbuf *rx_pkt)
 {
+       uint16_t data_len;
        struct nfp_net_hw *hw = rxq->hw;
        struct nfp_net_meta_parsed meta;
 
-       rx_pkt->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+       data_len = rte_le_to_cpu_16(rxds->rxd.data_len);
+       rx_pkt->data_len = data_len - NFP_DESC_META_LEN(rxds);
        /* Size of the whole packet. We just support 1 segment */
-       rx_pkt->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
+       rx_pkt->pkt_len = data_len - NFP_DESC_META_LEN(rxds);
 
        /* Filling the received mbuf with packet info */
        if (hw->rx_offset)
-- 
2.43.5

Reply via email to