Hi Vlad,

> -----Original Message-----
> From: dev [mailto:dev-bounces at dpdk.org] On Behalf Of Vlad Zolotarov
> Sent: Monday, March 09, 2015 10:13 AM
> To: dev at dpdk.org
> Subject: [dpdk-dev] [PATCH v1 1/3] ixgbe: Use the 
> rte_le_to_cpu_xx()/rte_cpu_to_le_xx() when reading/setting HW ring descriptor
> fields
> 
> Fixed the above in ixgbe_rx_alloc_bufs() and in ixgbe_recv_scattered_pkts().
> 
> Signed-off-by: Vlad Zolotarov <vladz at cloudius-systems.com>
> ---
>  lib/librte_pmd_ixgbe/ixgbe_rxtx.c | 13 +++++++------
>  1 file changed, 7 insertions(+), 6 deletions(-)
> 
> diff --git a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c 
> b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> index 9ecf3e5..b033e04 100644
> --- a/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> +++ b/lib/librte_pmd_ixgbe/ixgbe_rxtx.c
> @@ -1028,7 +1028,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
>       struct igb_rx_entry *rxep;
>       struct rte_mbuf *mb;
>       uint16_t alloc_idx;
> -     uint64_t dma_addr;
> +     __le64 dma_addr;

Wonder Why you changed from uint64_t to __le64 here?
Effectively __le64 is the same a uint64_t, and I think it is better to use 
always the same type across all PMD code for consistency.
Konstantin 


>       int diag, i;
> 
>       /* allocate buffers in bulk directly into the S/W ring */
> @@ -1051,7 +1051,7 @@ ixgbe_rx_alloc_bufs(struct igb_rx_queue *rxq)
>               mb->port = rxq->port_id;
> 
>               /* populate the descriptors */
> -             dma_addr = (uint64_t)mb->buf_physaddr + RTE_PKTMBUF_HEADROOM;
> +             dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb));
>               rxdp[i].read.hdr_addr = dma_addr;
>               rxdp[i].read.pkt_addr = dma_addr;
>       }
> @@ -1559,13 +1559,14 @@ ixgbe_recv_scattered_pkts(void *rx_queue, struct 
> rte_mbuf **rx_pkts,
>               first_seg->ol_flags = pkt_flags;
> 
>               if (likely(pkt_flags & PKT_RX_RSS_HASH))
> -                     first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
> +                     first_seg->hash.rss =
> +                                 rte_le_to_cpu_32(rxd.wb.lower.hi_dword.rss);
>               else if (pkt_flags & PKT_RX_FDIR) {
>                       first_seg->hash.fdir.hash =
> -                             (uint16_t)((rxd.wb.lower.hi_dword.csum_ip.csum)
> -                                        & IXGBE_ATR_HASH_MASK);
> +                         rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.csum)
> +                                        & IXGBE_ATR_HASH_MASK;
>                       first_seg->hash.fdir.id =
> -                             rxd.wb.lower.hi_dword.csum_ip.ip_id;
> +                       rte_le_to_cpu_16(rxd.wb.lower.hi_dword.csum_ip.ip_id);
>               }
> 
>               /* Prefetch data of first segment, if configured to do so. */
> --
> 2.1.0

Reply via email to