From: Beilei Xing <beilei.x...@intel.com>

This patch supports Rx/Tx hairpin queue configuration.

Signed-off-by: Xiao Wang <xiao.w.w...@intel.com>
Signed-off-by: Mingxia Liu <mingxia....@intel.com>
Signed-off-by: Beilei Xing <beilei.x...@intel.com>
---
 drivers/net/cpfl/cpfl_ethdev.c | 136 +++++++++++++++++++++++++++++++--
 drivers/net/cpfl/cpfl_rxtx.c   |  80 +++++++++++++++++++
 drivers/net/cpfl/cpfl_rxtx.h   |   7 ++
 3 files changed, 217 insertions(+), 6 deletions(-)

diff --git a/drivers/net/cpfl/cpfl_ethdev.c b/drivers/net/cpfl/cpfl_ethdev.c
index b17c538ec2..a06def06d0 100644
--- a/drivers/net/cpfl/cpfl_ethdev.c
+++ b/drivers/net/cpfl/cpfl_ethdev.c
@@ -742,33 +742,157 @@ cpfl_config_rx_queues_irqs(struct rte_eth_dev *dev)
        return idpf_vport_irq_map_config(vport, nb_rx_queues);
 }
 
+/* Update hairpin_info for dev's tx hairpin queue */
+static int
+cpfl_txq_hairpin_info_update(struct rte_eth_dev *dev, uint16_t rx_port)
+{
+       struct cpfl_vport *cpfl_tx_vport = dev->data->dev_private;
+       struct rte_eth_dev *peer_dev = &rte_eth_devices[rx_port];
+       struct cpfl_vport *cpfl_rx_vport = peer_dev->data->dev_private;
+       struct cpfl_txq_hairpin_info *hairpin_info;
+       struct cpfl_tx_queue *cpfl_txq;
+       int i;
+
+       for (i = cpfl_tx_vport->nb_data_txq; i < dev->data->nb_tx_queues; i++) {
+               cpfl_txq = dev->data->tx_queues[i];
+               hairpin_info = &cpfl_txq->hairpin_info;
+               if (hairpin_info->peer_rxp != rx_port) {
+                       PMD_DRV_LOG(ERR, "port %d is not the peer port", 
rx_port);
+                       return -EINVAL;
+               }
+               hairpin_info->peer_rxq_id =
+                       
cpfl_hw_qid_get(cpfl_rx_vport->p2p_q_chunks_info->rx_start_qid,
+                                       hairpin_info->peer_rxq_id - 
cpfl_rx_vport->nb_data_rxq);
+       }
+
+       return 0;
+}
+
+/* Bind Rx hairpin queue's memory zone to peer Tx hairpin queue's memory zone 
*/
+static void
+cpfl_rxq_hairpin_mz_bind(struct rte_eth_dev *dev)
+{
+       struct cpfl_vport *cpfl_rx_vport = dev->data->dev_private;
+       struct idpf_vport *vport = &cpfl_rx_vport->base;
+       struct idpf_adapter *adapter = vport->adapter;
+       struct idpf_hw *hw = &adapter->hw;
+       struct cpfl_rx_queue *cpfl_rxq;
+       struct cpfl_tx_queue *cpfl_txq;
+       struct rte_eth_dev *peer_dev;
+       const struct rte_memzone *mz;
+       uint16_t peer_tx_port;
+       uint16_t peer_tx_qid;
+       int i;
+
+       for (i = cpfl_rx_vport->nb_data_rxq; i < dev->data->nb_rx_queues; i++) {
+               cpfl_rxq = dev->data->rx_queues[i];
+               peer_tx_port = cpfl_rxq->hairpin_info.peer_txp;
+               peer_tx_qid = cpfl_rxq->hairpin_info.peer_txq_id;
+               peer_dev = &rte_eth_devices[peer_tx_port];
+               cpfl_txq = peer_dev->data->tx_queues[peer_tx_qid];
+
+               /* bind rx queue */
+               mz = cpfl_txq->base.mz;
+               cpfl_rxq->base.rx_ring_phys_addr = mz->iova;
+               cpfl_rxq->base.rx_ring = mz->addr;
+               cpfl_rxq->base.mz = mz;
+
+               /* bind rx buffer queue */
+               mz = cpfl_txq->base.complq->mz;
+               cpfl_rxq->base.bufq1->rx_ring_phys_addr = mz->iova;
+               cpfl_rxq->base.bufq1->rx_ring = mz->addr;
+               cpfl_rxq->base.bufq1->mz = mz;
+               cpfl_rxq->base.bufq1->qrx_tail = hw->hw_addr +
+                       
cpfl_hw_qtail_get(cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_start,
+                                       0, 
cpfl_rx_vport->p2p_q_chunks_info->rx_buf_qtail_spacing);
+       }
+}
+
 static int
 cpfl_start_queues(struct rte_eth_dev *dev)
 {
+       struct cpfl_vport *cpfl_vport = dev->data->dev_private;
+       struct idpf_vport *vport = &cpfl_vport->base;
        struct cpfl_rx_queue *cpfl_rxq;
        struct cpfl_tx_queue *cpfl_txq;
+       int update_flag = 0;
        int err = 0;
        int i;
 
+       /* For normal data queues, configure, init and enale Txq.
+        * For non-manual bind hairpin queues, configure Txq.
+        */
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                cpfl_txq = dev->data->tx_queues[i];
                if (cpfl_txq == NULL || cpfl_txq->base.tx_deferred_start)
                        continue;
-               err = cpfl_tx_queue_start(dev, i);
+               if (!cpfl_txq->hairpin_info.hairpin_q) {
+                       err = cpfl_tx_queue_start(dev, i);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", 
i);
+                               return err;
+                       }
+               } else if (!cpfl_vport->p2p_manual_bind) {
+                       if (update_flag == 0) {
+                               err = cpfl_txq_hairpin_info_update(dev,
+                                                                  
cpfl_txq->hairpin_info.peer_rxp);
+                               if (err != 0) {
+                                       PMD_DRV_LOG(ERR, "Fail to update Tx 
hairpin queue info");
+                                       return err;
+                               }
+                               update_flag = 1;
+                       }
+                       err = cpfl_hairpin_txq_config(vport, cpfl_txq);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Fail to configure hairpin Tx 
queue %u", i);
+                               return err;
+                       }
+               }
+       }
+
+       /* For non-manual bind hairpin queues, configure Tx completion queue 
first.*/
+       if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_tx_complq != NULL) {
+               err = cpfl_hairpin_tx_complq_config(cpfl_vport);
                if (err != 0) {
-                       PMD_DRV_LOG(ERR, "Fail to start Tx queue %u", i);
+                       PMD_DRV_LOG(ERR, "Fail to config Tx completion queue");
                        return err;
                }
        }
 
+       /* For non-manual bind hairpin queues, configure Rx buffer queue.*/
+       if (!cpfl_vport->p2p_manual_bind && cpfl_vport->p2p_rx_bufq != NULL) {
+               cpfl_rxq_hairpin_mz_bind(dev);
+               err = cpfl_hairpin_rx_bufq_config(cpfl_vport);
+               if (err != 0) {
+                       PMD_DRV_LOG(ERR, "Fail to config Rx buffer queue");
+                       return err;
+               }
+       }
+
+       /* For normal data queues, configure, init and enale Rxq.
+        * For non-manual bind hairpin queues, configure Rxq, and then init Rxq.
+        */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                cpfl_rxq = dev->data->rx_queues[i];
                if (cpfl_rxq == NULL || cpfl_rxq->base.rx_deferred_start)
                        continue;
-               err = cpfl_rx_queue_start(dev, i);
-               if (err != 0) {
-                       PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", i);
-                       return err;
+               if (!cpfl_rxq->hairpin_info.hairpin_q) {
+                       err = cpfl_rx_queue_start(dev, i);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Fail to start Rx queue %u", 
i);
+                               return err;
+                       }
+               } else if (!cpfl_vport->p2p_manual_bind) {
+                       err = cpfl_hairpin_rxq_config(vport, cpfl_rxq);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Fail to configure hairpin Rx 
queue %u", i);
+                               return err;
+                       }
+                       err = cpfl_rx_queue_init(dev, i);
+                       if (err != 0) {
+                               PMD_DRV_LOG(ERR, "Fail to init hairpin Rx queue 
%u", i);
+                               return err;
+                       }
                }
        }
 
diff --git a/drivers/net/cpfl/cpfl_rxtx.c b/drivers/net/cpfl/cpfl_rxtx.c
index 90b408d1f4..9408c6e1a4 100644
--- a/drivers/net/cpfl/cpfl_rxtx.c
+++ b/drivers/net/cpfl/cpfl_rxtx.c
@@ -922,6 +922,86 @@ cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        return ret;
 }
 
+int
+cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport)
+{
+       struct idpf_rx_queue *rx_bufq = cpfl_vport->p2p_rx_bufq;
+       struct virtchnl2_rxq_info rxq_info[1] = {0};
+
+       rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+       rxq_info[0].queue_id = rx_bufq->queue_id;
+       rxq_info[0].ring_len = rx_bufq->nb_rx_desc;
+       rxq_info[0].dma_ring_addr = rx_bufq->rx_ring_phys_addr;
+       rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+       rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+       rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+       rxq_info[0].data_buffer_size = rx_bufq->rx_buf_len;
+       rxq_info[0].buffer_notif_stride = CPFL_RX_BUF_STRIDE;
+
+       return idpf_vc_rxq_config_by_info(&cpfl_vport->base, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue 
*cpfl_rxq)
+{
+       struct virtchnl2_rxq_info rxq_info[1] = {0};
+       struct idpf_rx_queue *rxq = &cpfl_rxq->base;
+
+       rxq_info[0].type = VIRTCHNL2_QUEUE_TYPE_RX;
+       rxq_info[0].queue_id = rxq->queue_id;
+       rxq_info[0].ring_len = rxq->nb_rx_desc;
+       rxq_info[0].dma_ring_addr = rxq->rx_ring_phys_addr;
+       rxq_info[0].rx_bufq1_id = rxq->bufq1->queue_id;
+       rxq_info[0].max_pkt_size = vport->max_pkt_len;
+       rxq_info[0].desc_ids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
+       rxq_info[0].qflags |= VIRTCHNL2_RX_DESC_SIZE_16BYTE;
+
+       rxq_info[0].data_buffer_size = rxq->rx_buf_len;
+       rxq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+       rxq_info[0].rx_buffer_low_watermark = CPFL_RXBUF_LOW_WATERMARK;
+
+       PMD_DRV_LOG(NOTICE, "hairpin: vport %u, Rxq id 0x%x",
+               vport->vport_id, rxq_info[0].queue_id);
+
+       return idpf_vc_rxq_config_by_info(vport, rxq_info, 1);
+}
+
+int
+cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport)
+{
+       struct idpf_tx_queue *tx_complq = cpfl_vport->p2p_tx_complq;
+       struct virtchnl2_txq_info txq_info[1] = {0};
+
+       txq_info[0].dma_ring_addr = tx_complq->tx_ring_phys_addr;
+       txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+       txq_info[0].queue_id = tx_complq->queue_id;
+       txq_info[0].ring_len = tx_complq->nb_tx_desc;
+       txq_info[0].peer_rx_queue_id = cpfl_vport->p2p_rx_bufq->queue_id;
+       txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+       txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+       return idpf_vc_txq_config_by_info(&cpfl_vport->base, txq_info, 1);
+}
+
+int
+cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue 
*cpfl_txq)
+{
+       struct idpf_tx_queue *txq = &cpfl_txq->base;
+       struct virtchnl2_txq_info txq_info[1] = {0};
+
+       txq_info[0].dma_ring_addr = txq->tx_ring_phys_addr;
+       txq_info[0].type = VIRTCHNL2_QUEUE_TYPE_TX;
+       txq_info[0].queue_id = txq->queue_id;
+       txq_info[0].ring_len = txq->nb_tx_desc;
+       txq_info[0].tx_compl_queue_id = txq->complq->queue_id;
+       txq_info[0].relative_queue_id = txq->queue_id;
+       txq_info[0].peer_rx_queue_id = cpfl_txq->hairpin_info.peer_rxq_id;
+       txq_info[0].model = VIRTCHNL2_QUEUE_MODEL_SPLIT;
+       txq_info[0].sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
+
+       return idpf_vc_txq_config_by_info(vport, txq_info, 1);
+}
+
 int
 cpfl_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
diff --git a/drivers/net/cpfl/cpfl_rxtx.h b/drivers/net/cpfl/cpfl_rxtx.h
index 06198d4aad..872ebc1bfd 100644
--- a/drivers/net/cpfl/cpfl_rxtx.h
+++ b/drivers/net/cpfl/cpfl_rxtx.h
@@ -32,12 +32,15 @@
 #define CPFL_RING_BASE_ALIGN   128
 
 #define CPFL_DEFAULT_RX_FREE_THRESH    32
+#define CPFL_RXBUF_LOW_WATERMARK       64
 
 #define CPFL_DEFAULT_TX_RS_THRESH      32
 #define CPFL_DEFAULT_TX_FREE_THRESH    32
 
 #define CPFL_SUPPORT_CHAIN_NUM 5
 
+#define CPFL_RX_BUF_STRIDE 64
+
 struct cpfl_rxq_hairpin_info {
        bool hairpin_q;         /* if rx queue is a hairpin queue */
        uint16_t peer_txp;
@@ -95,4 +98,8 @@ int cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
 int cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                uint16_t nb_desc,
                                const struct rte_eth_hairpin_conf *conf);
+int cpfl_hairpin_tx_complq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_txq_config(struct idpf_vport *vport, struct cpfl_tx_queue 
*cpfl_txq);
+int cpfl_hairpin_rx_bufq_config(struct cpfl_vport *cpfl_vport);
+int cpfl_hairpin_rxq_config(struct idpf_vport *vport, struct cpfl_rx_queue 
*cpfl_rxq);
 #endif /* _CPFL_RXTX_H_ */
-- 
2.26.2

Reply via email to