From: Chengwen Feng <fengcheng...@huawei.com>

The network engine has Rx DMA address align requirement, if this
requirement is violated, the Rx function will be abnormal. The detail
requirement is:
1) For HIP08 platform, require 64-bytes alignment.
2) For later platform, require 128-bytes alignment.

The setup Rx DMA address exists both on the control and data plane, to
ensure performance, the alignment check is added only on the control
plane.

Fixes: bba636698316 ("net/hns3: support Rx/Tx and related operations")
Cc: sta...@dpdk.org

Signed-off-by: Chengwen Feng <fengcheng...@huawei.com>
Signed-off-by: Jie Hai <haij...@huawei.com>
---
 drivers/net/hns3/hns3_ethdev.c    |  2 ++
 drivers/net/hns3/hns3_ethdev.h    |  8 ++++++++
 drivers/net/hns3/hns3_ethdev_vf.c |  2 ++
 drivers/net/hns3/hns3_rxtx.c      | 21 +++++++++++++++++++++
 4 files changed, 33 insertions(+)

diff --git a/drivers/net/hns3/hns3_ethdev.c b/drivers/net/hns3/hns3_ethdev.c
index 9730b9a7e9f6..2340fb21b1ad 100644
--- a/drivers/net/hns3/hns3_ethdev.c
+++ b/drivers/net/hns3/hns3_ethdev.c
@@ -2738,6 +2738,7 @@ hns3_get_capability(struct hns3_hw *hw)
                hw->rss_info.ipv6_sctp_offload_supported = false;
                hw->udp_cksum_mode = HNS3_SPECIAL_PORT_SW_CKSUM_MODE;
                pf->support_multi_tc_pause = false;
+               hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64;
                return 0;
        }
 
@@ -2758,6 +2759,7 @@ hns3_get_capability(struct hns3_hw *hw)
        hw->rss_info.ipv6_sctp_offload_supported = true;
        hw->udp_cksum_mode = HNS3_SPECIAL_PORT_HW_CKSUM_MODE;
        pf->support_multi_tc_pause = true;
+       hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128;
 
        return 0;
 }
diff --git a/drivers/net/hns3/hns3_ethdev.h b/drivers/net/hns3/hns3_ethdev.h
index e70c5fff2a45..c190d5109b91 100644
--- a/drivers/net/hns3/hns3_ethdev.h
+++ b/drivers/net/hns3/hns3_ethdev.h
@@ -487,6 +487,9 @@ struct hns3_queue_intr {
 #define HNS3_PKTS_DROP_STATS_MODE1             0
 #define HNS3_PKTS_DROP_STATS_MODE2             1
 
+#define HNS3_RX_DMA_ADDR_ALIGN_128     128
+#define HNS3_RX_DMA_ADDR_ALIGN_64      64
+
 struct hns3_hw {
        struct rte_eth_dev_data *data;
        void *io_base;
@@ -554,6 +557,11 @@ struct hns3_hw {
         * direction.
         */
        uint8_t min_tx_pkt_len;
+       /*
+        * The required alignment of the DMA address of the RX buffer.
+        * See HNS3_RX_DMA_ADDR_ALIGN_XXX for available values.
+        */
+       uint16_t rx_dma_addr_align;
 
        struct hns3_queue_intr intr;
        /*
diff --git a/drivers/net/hns3/hns3_ethdev_vf.c 
b/drivers/net/hns3/hns3_ethdev_vf.c
index 4eeb46a34448..465280dce40c 100644
--- a/drivers/net/hns3/hns3_ethdev_vf.c
+++ b/drivers/net/hns3/hns3_ethdev_vf.c
@@ -707,6 +707,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
                hw->min_tx_pkt_len = HNS3_HIP08_MIN_TX_PKT_LEN;
                hw->rss_info.ipv6_sctp_offload_supported = false;
                hw->promisc_mode = HNS3_UNLIMIT_PROMISC_MODE;
+               hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_64;
                return 0;
        }
 
@@ -724,6 +725,7 @@ hns3vf_get_capability(struct hns3_hw *hw)
        hw->drop_stats_mode = HNS3_PKTS_DROP_STATS_MODE2;
        hw->rss_info.ipv6_sctp_offload_supported = true;
        hw->promisc_mode = HNS3_LIMIT_PROMISC_MODE;
+       hw->rx_dma_addr_align = HNS3_RX_DMA_ADDR_ALIGN_128;
 
        return 0;
 }
diff --git a/drivers/net/hns3/hns3_rxtx.c b/drivers/net/hns3/hns3_rxtx.c
index f4ec1f8e5823..2354091e1dd6 100644
--- a/drivers/net/hns3/hns3_rxtx.c
+++ b/drivers/net/hns3/hns3_rxtx.c
@@ -274,12 +274,27 @@ hns3_free_all_queues(struct rte_eth_dev *dev)
        hns3_free_tx_queues(dev);
 }
 
+static int
+hns3_check_rx_dma_addr(struct hns3_hw *hw, uint64_t dma_addr)
+{
+       uint64_t rem;
+
+       rem = dma_addr & (hw->rx_dma_addr_align - 1);
+       if (rem > 0) {
+               hns3_err(hw, "The IO address of the beginning of the mbuf data "
+                        "must be %u-byte aligned", hw->rx_dma_addr_align);
+               return -EINVAL;
+       }
+       return 0;
+}
+
 static int
 hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct hns3_rx_queue *rxq)
 {
        struct rte_mbuf *mbuf;
        uint64_t dma_addr;
        uint16_t i;
+       int ret;
 
        for (i = 0; i < rxq->nb_rx_desc; i++) {
                mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
@@ -300,6 +315,12 @@ hns3_alloc_rx_queue_mbufs(struct hns3_hw *hw, struct 
hns3_rx_queue *rxq)
                dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
                rxq->rx_ring[i].addr = dma_addr;
                rxq->rx_ring[i].rx.bd_base_info = 0;
+
+               ret = hns3_check_rx_dma_addr(hw, dma_addr);
+               if (ret != 0) {
+                       hns3_rx_queue_release_mbufs(rxq);
+                       return ret;
+               }
        }
 
        return 0;
-- 
2.33.0

Reply via email to