From: Xuelin Shi <xuelin....@freescale.com>

enforce rules of the cpu and ixgbe exchange data.
1. cpu use data owned by ixgbe must use rte_le_to_cpu_xx(...)
2. cpu fill data to ixgbe must use rte_cpu_to_le_xx(...)

Signed-off-by: Xuelin Shi <xuelin.shi at freescale.com>
---
changes for v2:
  rebased on latest head.
  fix some style issue detected by checkpatch.pl

 lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 104 ++++++++++++++++++++++++--------------
 1 file changed, 66 insertions(+), 38 deletions(-)

diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
index 9da2c7e..961ac08 100644
--- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
+++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
@@ -128,7 +128,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq)
        int i;

        /* check DD bit on threshold descriptor */
-       status = txq->tx_ring[txq->tx_next_dd].wb.status;
+       status = rte_le_to_cpu_32(txq->tx_ring[txq->tx_next_dd].wb.status);
        if (! (status & IXGBE_ADVTXD_STAT_DD))
                return 0;

@@ -174,11 +174,14 @@ tx4(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
                pkt_len = (*pkts)->data_len;

                /* write data to descriptor */
-               txdp->read.buffer_addr = buf_dma_addr;
+               txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
                txdp->read.cmd_type_len =
-                               ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+                       rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
                txdp->read.olinfo_status =
-                               (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+                       rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
                rte_prefetch0(&(*pkts)->pool);
        }
 }
@@ -194,11 +197,14 @@ tx1(volatile union ixgbe_adv_tx_desc *txdp, struct 
rte_mbuf **pkts)
        pkt_len = (*pkts)->data_len;

        /* write data to descriptor */
-       txdp->read.buffer_addr = buf_dma_addr;
+       txdp->read.buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
+
        txdp->read.cmd_type_len =
-                       ((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+                       rte_cpu_to_le_32((uint32_t)DCMD_DTYP_FLAGS | pkt_len);
+
        txdp->read.olinfo_status =
-                       (pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+                       rte_cpu_to_le_32(pkt_len << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
        rte_prefetch0(&(*pkts)->pool);
 }

@@ -285,7 +291,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                 * a divisor of the ring size
                 */
                tx_r[txq->tx_next_rs].read.cmd_type_len |=
-                       rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+                               rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
                txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);

                txq->tx_tail = 0;
@@ -304,7 +310,7 @@ tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
         */
        if (txq->tx_tail > txq->tx_next_rs) {
                tx_r[txq->tx_next_rs].read.cmd_type_len |=
-                       rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
+                               rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS);
                txq->tx_next_rs = (uint16_t)(txq->tx_next_rs +
                                                txq->tx_rs_thresh);
                if (txq->tx_next_rs >= txq->nb_tx_desc)
@@ -505,6 +511,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
        uint16_t nb_tx_desc = txq->nb_tx_desc;
        uint16_t desc_to_clean_to;
        uint16_t nb_tx_to_clean;
+       uint32_t stat;

        /* Determine the last descriptor needing to be cleaned */
        desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
@@ -513,7 +520,9 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)

        /* Check to make sure the last descriptor to clean is done */
        desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
-       if (! (txr[desc_to_clean_to].wb.status & IXGBE_TXD_STAT_DD))
+
+       stat = rte_le_to_cpu_32(txr[desc_to_clean_to].wb.status);
+       if (!(stat & IXGBE_TXD_STAT_DD))
        {
                PMD_TX_FREE_LOG(DEBUG,
                                "TX descriptor %4u is not done"
@@ -544,7 +553,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq)
         * up to the last descriptor with the RS bit set
         * are done. Only reset the threshold descriptor.
         */
-       txr[desc_to_clean_to].wb.status = 0;
+       txr[desc_to_clean_to].wb.status = rte_cpu_to_le_32(0);

        /* Update the txq to reflect the last descriptor that was cleaned */
        txq->last_desc_cleaned = desc_to_clean_to;
@@ -801,12 +810,14 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                         */
                        slen = m_seg->data_len;
                        buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
                        txd->read.buffer_addr =
-                               rte_cpu_to_le_64(buf_dma_addr);
+                                       rte_cpu_to_le_64(buf_dma_addr);
                        txd->read.cmd_type_len =
-                               rte_cpu_to_le_32(cmd_type_len | slen);
+                                       rte_cpu_to_le_32(cmd_type_len | slen);
                        txd->read.olinfo_status =
-                               rte_cpu_to_le_32(olinfo_status);
+                                       rte_cpu_to_le_32(olinfo_status);
+
                        txe->last_id = tx_last;
                        tx_id = txe->next_id;
                        txe = txn;
@@ -946,14 +957,16 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
        uint64_t pkt_flags;
        int s[LOOK_AHEAD], nb_dd;
        int i, j, nb_rx = 0;
+       uint32_t stat;


        /* get references to current descriptor and S/W ring entry */
        rxdp = &rxq->rx_ring[rxq->rx_tail];
        rxep = &rxq->sw_ring[rxq->rx_tail];

+       stat = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
        /* check to make sure there is at least 1 packet to receive */
-       if (! (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD))
+       if (!(stat & IXGBE_RXDADV_STAT_DD))
                return 0;

        /*
@@ -965,7 +978,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)
        {
                /* Read desc statuses backwards to avoid race condition */
                for (j = LOOK_AHEAD-1; j >= 0; --j)
-                       s[j] = rxdp[j].wb.upper.status_error;
+                       s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error);

                /* Compute how many status bits were set */
                nb_dd = 0;
@@ -976,28 +989,36 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq)

                /* Translate descriptor info to mbuf format */
                for (j = 0; j < nb_dd; ++j) {
+                       uint16_t tmp16;
+                       uint32_t tmp32;
+
                        mb = rxep[j].mbuf;
-                       pkt_len = (uint16_t)(rxdp[j].wb.upper.length - 
rxq->crc_len);
+                       tmp16 = rte_le_to_cpu_16(rxdp[j].wb.upper.length);
+                       pkt_len = tmp16 - rxq->crc_len;
                        mb->data_len = pkt_len;
                        mb->pkt_len = pkt_len;
-                       mb->vlan_tci = rxdp[j].wb.upper.vlan;
                        mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan);

                        /* convert descriptor fields to rte mbuf flags */
-                       pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(
+                       tmp32 = rte_le_to_cpu_32(
                                        rxdp[j].wb.lower.lo_dword.data);
+                       pkt_flags  = rx_desc_hlen_type_rss_to_pkt_flags(tmp32);
+
                        /* reuse status field from scan list */
                        pkt_flags |= rx_desc_status_to_pkt_flags(s[j]);
                        pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
                        mb->ol_flags = pkt_flags;

                        if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                               mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
+                               mb->hash.rss = rte_le_to_cpu_32(
+                                               rxdp[j].wb.lower.hi_dword.rss);
                        else if (pkt_flags & PKT_RX_FDIR) {
-                               mb->hash.fdir.hash =
-                                       
(uint16_t)((rxdp[j].wb.lower.hi_dword.csum_ip.csum)
-                                               & IXGBE_ATR_HASH_MASK);
-                               mb->hash.fdir.id = 
rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+                               tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.csum;
+                               mb->hash.fdir.hash = rte_le_to_cpu_16(
+                                               tmp16 & IXGBE_ATR_HASH_MASK);
+
+                               tmp16 = rxdp[j].wb.lower.hi_dword.csum_ip.ip_id;
+                               mb->hash.fdir.id = rte_le_to_cpu_16(tmp16);
                        }
                }

@@ -1051,8 +1072,8 @@ ixgbe_rx_alloc_bufs(struct ixgbe_rx_queue *rxq)

                /* populate the descriptors */
                dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
-               rxdp[i].read.hdr_addr = dma_addr;
-               rxdp[i].read.pkt_addr = dma_addr;
+               rxdp[i].read.hdr_addr = rte_cpu_to_le_64(dma_addr);
+               rxdp[i].read.pkt_addr = rte_cpu_to_le_64(dma_addr);
        }

        /* update tail pointer */
@@ -1220,8 +1241,8 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                 * using invalid descriptor fields when read from rxd.
                 */
                rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+               staterr = rte_le_to_cpu_32(rxdp->wb.upper.status_error);
+               if (!(staterr & IXGBE_RXDADV_STAT_DD))
                        break;
                rxd = *rxdp;

@@ -1325,12 +1346,17 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                rxm->ol_flags = pkt_flags;

                if (likely(pkt_flags & PKT_RX_RSS_HASH))
-                       rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
+                       rxm->hash.rss = rte_le_to_cpu_32(
+                                               rxd.wb.lower.hi_dword.rss);
                else if (pkt_flags & PKT_RX_FDIR) {
-                       rxm->hash.fdir.hash =
-                               (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
-                                          & IXGBE_ATR_HASH_MASK);
-                       rxm->hash.fdir.id = rxd.wb.lower.hi_dword.csum_ip.ip_id;
+                       uint16_t tmp16;
+
+                       tmp16 = rxd.wb.lower.hi_dword.csum_ip.csum;
+                       rxm->hash.fdir.hash = rte_le_to_cpu_16(
+                                               tmp16 & IXGBE_ATR_HASH_MASK);
+
+                       rxm->hash.fdir.id = rte_le_to_cpu_16(
+                                       rxd.wb.lower.hi_dword.csum_ip.ip_id);
                }
                /*
                 * Store the mbuf address into the next entry of the array
@@ -1412,8 +1438,8 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct rte_mbuf 
**rx_pkts,
                 * using invalid descriptor fields when read from rxd.
                 */
                rxdp = &rx_ring[rx_id];
-               staterr = rxdp->wb.upper.status_error;
-               if (! (staterr & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD)))
+               staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
+               if (!(staterr & IXGBE_RXDADV_STAT_DD))
                        break;
                rxd = *rxdp;

@@ -1742,7 +1768,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
        prev = (uint16_t) (txq->nb_tx_desc - 1);
        for (i = 0; i < txq->nb_tx_desc; i++) {
                volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i];
-               txd->wb.status = IXGBE_TXD_STAT_DD;
+               txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD);
                txe[i].mbuf = NULL;
                txe[i].last_id = i;
                txe[prev].next_id = i;
@@ -2293,7 +2319,8 @@ ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev, 
uint16_t rx_queue_id)
        rxdp = &(rxq->rx_ring[rxq->rx_tail]);

        while ((desc < rxq->nb_rx_desc) &&
-               (rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD)) {
+               (rte_le_to_cpu_32(rxdp->wb.upper.status_error) &
+                                 IXGBE_RXDADV_STAT_DD)) {
                desc += IXGBE_RXQ_SCAN_INTERVAL;
                rxdp += IXGBE_RXQ_SCAN_INTERVAL;
                if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
@@ -2318,7 +2345,8 @@ ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t 
offset)
                desc -= rxq->nb_rx_desc;

        rxdp = &rxq->rx_ring[desc];
-       return !!(rxdp->wb.upper.status_error & IXGBE_RXDADV_STAT_DD);
+       return !!(rte_le_to_cpu_32(rxdp->wb.upper.status_error) &
+                       IXGBE_RXDADV_STAT_DD);
 }

 void
-- 
1.9.1

Reply via email to