> -----Original Message-----
> From: Liu, Mingxia
> Sent: Tuesday, May 30, 2023 10:27 AM
> To: Xing, Beilei <beilei.x...@intel.com>; Wu, Jingjing <jingjing...@intel.com>
> Cc: dev@dpdk.org; Wang, Xiao W <xiao.w.w...@intel.com>
> Subject: RE: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and 
> release
> 
> 
> 
> > -----Original Message-----
> > From: Xing, Beilei <beilei.x...@intel.com>
> > Sent: Friday, May 26, 2023 3:39 PM
> > To: Wu, Jingjing <jingjing...@intel.com>
> > Cc: dev@dpdk.org; Liu, Mingxia <mingxia....@intel.com>; Xing, Beilei
> > <beilei.x...@intel.com>; Wang, Xiao W <xiao.w.w...@intel.com>
> > Subject: [PATCH v4 05/13] net/cpfl: support hairpin queue setup and
> > release
> >
> > From: Beilei Xing <beilei.x...@intel.com>
> >
> > Support hairpin Rx/Tx queue setup and release.
> >
> > Signed-off-by: Xiao Wang <xiao.w.w...@intel.com>
> > Signed-off-by: Mingxia Liu <mingxia....@intel.com>
> > Signed-off-by: Beilei Xing <beilei.x...@intel.com>
> > ---
> >  drivers/net/cpfl/cpfl_ethdev.c          |   6 +
> >  drivers/net/cpfl/cpfl_ethdev.h          |  11 +
> >  drivers/net/cpfl/cpfl_rxtx.c            | 353 +++++++++++++++++++++++-
> >  drivers/net/cpfl/cpfl_rxtx.h            |  36 +++
> >  drivers/net/cpfl/cpfl_rxtx_vec_common.h |   4 +
> >  5 files changed, 409 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/net/cpfl/cpfl_ethdev.c
> > b/drivers/net/cpfl/cpfl_ethdev.c index
> > 40b4515539..b17c538ec2 100644
> > --- a/drivers/net/cpfl/cpfl_ethdev.c
> > +++ b/drivers/net/cpfl/cpfl_ethdev.c
> > @@ -879,6 +879,10 @@ cpfl_dev_close(struct rte_eth_dev *dev)
> >     struct cpfl_adapter_ext *adapter = CPFL_ADAPTER_TO_EXT(vport-
> > >adapter);
> >
> >     cpfl_dev_stop(dev);
> > +   if (cpfl_vport->p2p_mp) {
> > +           rte_mempool_free(cpfl_vport->p2p_mp);
> > +           cpfl_vport->p2p_mp = NULL;
> > +   }
> >
> >     if (!adapter->base.is_rx_singleq && !adapter->base.is_tx_singleq)
> >             cpfl_p2p_queue_grps_del(vport);
> > @@ -922,6 +926,8 @@ static const struct eth_dev_ops cpfl_eth_dev_ops = {
> >     .xstats_get_names               = cpfl_dev_xstats_get_names,
> >     .xstats_reset                   = cpfl_dev_xstats_reset,
> >     .hairpin_cap_get                = cpfl_hairpin_cap_get,
> > +   .rx_hairpin_queue_setup         = cpfl_rx_hairpin_queue_setup,
> > +   .tx_hairpin_queue_setup         = cpfl_tx_hairpin_queue_setup,
> >  };
> >
> > +int
> > +cpfl_rx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > +                       uint16_t nb_desc,
> > +                       const struct rte_eth_hairpin_conf *conf) {
> > +   struct cpfl_vport *cpfl_vport = (struct cpfl_vport *)dev->data-
> > >dev_private;
> > +   struct idpf_vport *vport = &cpfl_vport->base;
> > +   struct idpf_adapter *adapter_base = vport->adapter;
> > +   uint16_t logic_qid = cpfl_vport->nb_p2p_rxq;
> > +   struct cpfl_rxq_hairpin_info *hairpin_info;
> > +   struct cpfl_rx_queue *cpfl_rxq;
> > +   struct idpf_rx_queue *bufq1 = NULL;
> > +   struct idpf_rx_queue *rxq;
> > +   uint16_t peer_port, peer_q;
> > +   uint16_t qid;
> > +   int ret;
> > +
> > +   if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > +           PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > +           return -EINVAL;
> > +   }
> > +
> > +   if (conf->peer_count != 1) {
> > +           PMD_INIT_LOG(ERR, "Can't support Rx hairpin queue peer
> > count %d", conf->peer_count);
> > +           return -EINVAL;
> > +   }
> > +
> > +   peer_port = conf->peers[0].port;
> > +   peer_q = conf->peers[0].queue;
> > +
> > +   if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > +       nb_desc > CPFL_MAX_RING_DESC ||
> > +       nb_desc < CPFL_MIN_RING_DESC) {
> > +           PMD_INIT_LOG(ERR, "Number (%u) of receive descriptors is
> > invalid", nb_desc);
> > +           return -EINVAL;
> > +   }
> > +
> > +   /* Free memory if needed */
> > +   if (dev->data->rx_queues[queue_idx]) {
> > +           cpfl_rx_queue_release(dev->data->rx_queues[queue_idx]);
> > +           dev->data->rx_queues[queue_idx] = NULL;
> > +   }
> > +
> > +   /* Setup Rx description queue */
> > +   cpfl_rxq = rte_zmalloc_socket("cpfl hairpin rxq",
> > +                            sizeof(struct cpfl_rx_queue),
> > +                            RTE_CACHE_LINE_SIZE,
> > +                            SOCKET_ID_ANY);
> > +   if (!cpfl_rxq) {
> > +           PMD_INIT_LOG(ERR, "Failed to allocate memory for rx queue
> > data structure");
> > +           return -ENOMEM;
> > +   }
> > +
> > +   rxq = &cpfl_rxq->base;
> > +   hairpin_info = &cpfl_rxq->hairpin_info;
> > +   rxq->nb_rx_desc = nb_desc * 2;
> > +   rxq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >rx_start_qid, logic_qid);
> > +   rxq->port_id = dev->data->port_id;
> > +   rxq->adapter = adapter_base;
> > +   rxq->rx_buf_len = CPFL_P2P_MBUF_SIZE - RTE_PKTMBUF_HEADROOM;
> > +   hairpin_info->hairpin_q = true;
> > +   hairpin_info->peer_txp = peer_port;
> > +   hairpin_info->peer_txq_id = peer_q;
> > +
> > +   if (conf->manual_bind != 0)
> > +           cpfl_vport->p2p_manual_bind = true;
> > +   else
> > +           cpfl_vport->p2p_manual_bind = false;
> > +
> > +   if (cpfl_vport->p2p_rx_bufq == NULL) {
> > +           bufq1 = rte_zmalloc_socket("hairpin rx bufq1",
> > +                                      sizeof(struct idpf_rx_queue),
> > +                                      RTE_CACHE_LINE_SIZE,
> > +                                      SOCKET_ID_ANY);
> > +           if (!bufq1) {
> > +                   PMD_INIT_LOG(ERR, "Failed to allocate memory for
> > hairpin Rx buffer queue 1.");
> > +                   ret = -ENOMEM;
> > +                   goto err_alloc_bufq1;
> > +           }
> > +           qid = 2 * logic_qid;
> > +           ret = cpfl_rx_hairpin_bufq_setup(dev, bufq1, qid, nb_desc);
> > +           if (ret) {
> > +                   PMD_INIT_LOG(ERR, "Failed to setup hairpin Rx buffer
> > queue 1");
> > +                   ret = -EINVAL;
> > +                   goto err_setup_bufq1;
> > +           }
> > +           cpfl_vport->p2p_rx_bufq = bufq1;
> > +   }
> > +
> > +   rxq->bufq1 = cpfl_vport->p2p_rx_bufq;
> > +   rxq->bufq2 = NULL;
> > +
> > +   cpfl_vport->nb_p2p_rxq++;
> > +   rxq->q_set = true;
> > +   dev->data->rx_queues[queue_idx] = cpfl_rxq;
> > +
> > +   return 0;
> > +
> > +err_setup_bufq1:
> > +   rte_free(bufq1);
> > +err_alloc_bufq1:
> > +   rte_free(rxq);
> [Liu, Mingxia] Here should free cpfl_rxq, right?
> > +
> > +   return ret;
> > +}
> > +
> > +int
> > +cpfl_tx_hairpin_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
> > +                       uint16_t nb_desc,
> > +                       const struct rte_eth_hairpin_conf *conf) {
> > +   struct cpfl_vport *cpfl_vport =
> > +       (struct cpfl_vport *)dev->data->dev_private;
> > +
> > +   struct idpf_vport *vport = &cpfl_vport->base;
> > +   struct idpf_adapter *adapter_base = vport->adapter;
> > +   uint16_t logic_qid = cpfl_vport->nb_p2p_txq;
> > +   struct cpfl_txq_hairpin_info *hairpin_info;
> > +   struct idpf_hw *hw = &adapter_base->hw;
> > +   struct cpfl_tx_queue *cpfl_txq;
> > +   struct idpf_tx_queue *txq, *cq;
> > +   const struct rte_memzone *mz;
> > +   uint32_t ring_size;
> > +   uint16_t peer_port, peer_q;
> > +
> > +   if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
> > +           PMD_INIT_LOG(ERR, "Only spilt queue model supports hairpin
> > queue.");
> > +           return -EINVAL;
> > +   }
> > +
> > +   if (conf->peer_count != 1) {
> > +           PMD_INIT_LOG(ERR, "Can't support Tx hairpin queue peer
> > count %d", conf->peer_count);
> > +           return -EINVAL;
> > +   }
> > +
> > +   peer_port = conf->peers[0].port;
> > +   peer_q = conf->peers[0].queue;
> > +
> > +   if (nb_desc % CPFL_ALIGN_RING_DESC != 0 ||
> > +       nb_desc > CPFL_MAX_RING_DESC ||
> > +       nb_desc < CPFL_MIN_RING_DESC) {
> > +           PMD_INIT_LOG(ERR, "Number (%u) of transmit descriptors is
> > invalid",
> > +                        nb_desc);
> > +           return -EINVAL;
> > +   }
> > +
> > +   /* Free memory if needed. */
> > +   if (dev->data->tx_queues[queue_idx]) {
> > +           cpfl_tx_queue_release(dev->data->tx_queues[queue_idx]);
> > +           dev->data->tx_queues[queue_idx] = NULL;
> > +   }
> > +
> > +   /* Allocate the TX queue data structure. */
> > +   cpfl_txq = rte_zmalloc_socket("cpfl hairpin txq",
> > +                            sizeof(struct cpfl_tx_queue),
> > +                            RTE_CACHE_LINE_SIZE,
> > +                            SOCKET_ID_ANY);
> > +   if (!cpfl_txq) {
> > +           PMD_INIT_LOG(ERR, "Failed to allocate memory for tx queue
> > structure");
> > +           return -ENOMEM;
> > +   }
> > +
> > +   txq = &cpfl_txq->base;
> > +   hairpin_info = &cpfl_txq->hairpin_info;
> > +   /* Txq ring length should be 2 times of Tx completion queue size. */
> > +   txq->nb_tx_desc = nb_desc * 2;
> > +   txq->queue_id = cpfl_hw_qid_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_start_qid, logic_qid);
> > +   txq->port_id = dev->data->port_id;
> > +   hairpin_info->hairpin_q = true;
> > +   hairpin_info->peer_rxp = peer_port;
> > +   hairpin_info->peer_rxq_id = peer_q;
> > +
> > +   if (conf->manual_bind != 0)
> > +           cpfl_vport->p2p_manual_bind = true;
> > +   else
> > +           cpfl_vport->p2p_manual_bind = false;
> > +
> > +   /* Always Tx hairpin queue allocates Tx HW ring */
> > +   ring_size = RTE_ALIGN(txq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > +                         CPFL_DMA_MEM_ALIGN);
> > +   mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_ring", logic_qid,
> > +                                 ring_size + CPFL_P2P_RING_BUF,
> > +                                 CPFL_RING_BASE_ALIGN,
> > +                                 dev->device->numa_node);
> > +   if (!mz) {
> > +           PMD_INIT_LOG(ERR, "Failed to reserve DMA memory for TX");
> > +           rte_free(txq);
> [Liu, Mingxia] Here should free cpfl_txq, right?
> > +           return -ENOMEM;
> > +   }
> > +
> > +   txq->tx_ring_phys_addr = mz->iova;
> > +   txq->desc_ring = mz->addr;
> > +   txq->mz = mz;
> > +
> > +   cpfl_tx_hairpin_descq_reset(txq);
> > +   txq->qtx_tail = hw->hw_addr +
> > +           cpfl_hw_qtail_get(cpfl_vport->p2p_q_chunks_info-
> > >tx_qtail_start,
> > +                             logic_qid, cpfl_vport->p2p_q_chunks_info-
> > >tx_qtail_spacing);
> > +   txq->ops = &def_txq_ops;
> > +
> > +   if (cpfl_vport->p2p_tx_complq == NULL) {
> > +           cq = rte_zmalloc_socket("cpfl hairpin cq",
> > +                                   sizeof(struct idpf_tx_queue),
> > +                                   RTE_CACHE_LINE_SIZE,
> > +                                   dev->device->numa_node);
> > +           if (!cq) {
> > +                   PMD_INIT_LOG(ERR, "Failed to allocate memory for tx
> > queue structure");
> [Liu, Mingxia] Before returning, should free some resource, such as free 
> cpfl_txq,
> right?
[Liu, Mingxia] In addition, should txq->mz be freed before release cpfl_txq ?
> > +                   return -ENOMEM;
> > +           }
> > +
> > +           cq->nb_tx_desc = nb_desc;
> > +           cq->queue_id = cpfl_hw_qid_get(cpfl_vport-
> > >p2p_q_chunks_info->tx_compl_start_qid,
> > +                                          0);
> > +           cq->port_id = dev->data->port_id;
> > +
> > +           /* Tx completion queue always allocates the HW ring */
> > +           ring_size = RTE_ALIGN(cq->nb_tx_desc * CPFL_P2P_DESC_LEN,
> > +                                 CPFL_DMA_MEM_ALIGN);
> > +           mz = rte_eth_dma_zone_reserve(dev, "hairpin_tx_compl_ring",
> > logic_qid,
> > +                                         ring_size + CPFL_P2P_RING_BUF,
> > +                                         CPFL_RING_BASE_ALIGN,
> > +                                         dev->device->numa_node);
> > +           if (!mz) {
> > +                   PMD_INIT_LOG(ERR, "Failed to reserve DMA memory
> > for TX completion queue");
> > +                   rte_free(txq);
> 
> [Liu, Mingxia]Here should free cpfl_txq, right?In addition, should cq resource
> be released?
[Liu, Mingxia] In addition, should txq->mz be freed before release cpfl_txq ?
> 
> 

Reply via email to