> -----Original Message-----
> From: Ananyev, Konstantin
> Sent: Wednesday, March 14, 2018 8:36 PM
> To: Zhang, Qi Z <qi.z.zh...@intel.com>; tho...@monjalon.net
> Cc: dev@dpdk.org; Xing, Beilei <beilei.x...@intel.com>; Wu, Jingjing
> <jingjing...@intel.com>; Lu, Wenzhuo <wenzhuo...@intel.com>; Zhang, Qi Z
> <qi.z.zh...@intel.com>
> Subject: RE: [dpdk-dev] [PATCH v2 4/4] net/i40e: enable deferred queue
> setup
> 
> 
> 
> > -----Original Message-----
> > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Qi Zhang
> > Sent: Friday, March 2, 2018 4:13 AM
> > To: tho...@monjalon.net
> > Cc: dev@dpdk.org; Xing, Beilei <beilei.x...@intel.com>; Wu, Jingjing
> > <jingjing...@intel.com>; Lu, Wenzhuo <wenzhuo...@intel.com>; Zhang,
> Qi
> > Z <qi.z.zh...@intel.com>
> > Subject: [dpdk-dev] [PATCH v2 4/4] net/i40e: enable deferred queue
> > setup
> >
> > Expose the deferred queue configuration capability and enhance
> > i40e_dev_[rx|tx]_queue_[setup|release] to handle the situation when
> > device already started.
> >
> > Signed-off-by: Qi Zhang <qi.z.zh...@intel.com>
> > ---
> >  drivers/net/i40e/i40e_ethdev.c |  6 ++++
> >  drivers/net/i40e/i40e_rxtx.c   | 62
> ++++++++++++++++++++++++++++++++++++++++--
> >  2 files changed, 66 insertions(+), 2 deletions(-)
> >
> > diff --git a/drivers/net/i40e/i40e_ethdev.c
> > b/drivers/net/i40e/i40e_ethdev.c index 06b0f03a1..843a0c42a 100644
> > --- a/drivers/net/i40e/i40e_ethdev.c
> > +++ b/drivers/net/i40e/i40e_ethdev.c
> > @@ -3195,6 +3195,12 @@ i40e_dev_info_get(struct rte_eth_dev *dev,
> struct rte_eth_dev_info *dev_info)
> >             DEV_TX_OFFLOAD_GRE_TNL_TSO |
> >             DEV_TX_OFFLOAD_IPIP_TNL_TSO |
> >             DEV_TX_OFFLOAD_GENEVE_TNL_TSO;
> > +   dev_info->deferred_queue_config_capa =
> > +           DEV_DEFERRED_RX_QUEUE_SETUP |
> > +           DEV_DEFERRED_TX_QUEUE_SETUP |
> > +           DEV_DEFERRED_RX_QUEUE_RELEASE |
> > +           DEV_DEFERRED_TX_QUEUE_RELEASE;
> > +
> >     dev_info->hash_key_size = (I40E_PFQF_HKEY_MAX_INDEX + 1) *
> >                                             sizeof(uint32_t);
> >     dev_info->reta_size = pf->hash_lut_size; diff --git
> > a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index
> > 1217e5a61..e5f532cf7 100644
> > --- a/drivers/net/i40e/i40e_rxtx.c
> > +++ b/drivers/net/i40e/i40e_rxtx.c
> > @@ -1712,6 +1712,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev
> *dev,
> >     uint16_t len, i;
> >     uint16_t reg_idx, base, bsf, tc_mapping;
> >     int q_offset, use_def_burst_func = 1;
> > +   int ret = 0;
> >
> >     if (hw->mac.type == I40E_MAC_VF || hw->mac.type ==
> I40E_MAC_X722_VF) {
> >             vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> > @@ -1841,6 +1842,25 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev
> *dev,
> >                     rxq->dcb_tc = i;
> >     }
> >
> > +   if (dev->data->dev_started) {
> > +           ret = i40e_rx_queue_init(rxq);
> > +           if (ret != I40E_SUCCESS) {
> > +                   PMD_DRV_LOG(ERR,
> > +                               "Failed to do RX queue initialization");
> > +                   return ret;
> > +           }
> > +           if (ad->rx_vec_allowed)
> 
> Better to check what rx function is installed right now.
Yes, it should be fixed, need to return fail if any conflict
> 
> > +                   i40e_rxq_vec_setup(rxq);
> > +           if (!rxq->rx_deferred_start) {
> > +                   ret = i40e_dev_rx_queue_start(dev, queue_idx);
> 
> I don't think it is a good idea to start/stop queue inside
> queue_setup/queue_release.
> There is special API (queue_start/queue_stop) to do this.

The idea is if dev already started, the queue is supposed to be started 
automatically after queue_setup.
The defered_start flag can be used if application don't want this.
But maybe it's better to call dev_ops->rx_queue_stop in etherdev layer. (same 
thing for queue_stop in previous patch)

Thanks
Qi

> Konstantin
> 
> > +                   if (ret != I40E_SUCCESS) {
> > +                           PMD_DRV_LOG(ERR,
> > +                                       "Failed to start RX queue");
> > +                           return ret;
> > +                   }
> > +           }
> > +   }
> > +
> >     return 0;
> >  }
> >
> > @@ -1848,13 +1868,21 @@ void
> >  i40e_dev_rx_queue_release(void *rxq)
> >  {
> >     struct i40e_rx_queue *q = (struct i40e_rx_queue *)rxq;
> > +   struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
> >
> >     if (!q) {
> >             PMD_DRV_LOG(DEBUG, "Pointer to rxq is NULL");
> >             return;
> >     }
> >
> > -   i40e_rx_queue_release_mbufs(q);
> > +   if (dev->data->dev_started) {
> > +           if (dev->data->rx_queue_state[q->queue_id] ==
> > +                   RTE_ETH_QUEUE_STATE_STARTED)
> > +                   i40e_dev_rx_queue_stop(dev, q->queue_id);
> > +   } else {
> > +           i40e_rx_queue_release_mbufs(q);
> > +   }
> > +
> >     rte_free(q->sw_ring);
> >     rte_free(q);
> >  }
> > @@ -1980,6 +2008,8 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev
> *dev,
> >                     const struct rte_eth_txconf *tx_conf)  {
> >     struct i40e_hw *hw =
> I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
> > +   struct i40e_adapter *ad =
> > +           I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
> >     struct i40e_vsi *vsi;
> >     struct i40e_pf *pf = NULL;
> >     struct i40e_vf *vf = NULL;
> > @@ -1989,6 +2019,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev
> *dev,
> >     uint16_t tx_rs_thresh, tx_free_thresh;
> >     uint16_t reg_idx, i, base, bsf, tc_mapping;
> >     int q_offset;
> > +   int ret = 0;
> >
> >     if (hw->mac.type == I40E_MAC_VF || hw->mac.type ==
> I40E_MAC_X722_VF) {
> >             vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
> > @@ -2162,6 +2193,25 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev
> *dev,
> >                     txq->dcb_tc = i;
> >     }
> >
> > +   if (dev->data->dev_started) {
> > +           ret = i40e_tx_queue_init(txq);
> > +           if (ret != I40E_SUCCESS) {
> > +                   PMD_DRV_LOG(ERR,
> > +                               "Failed to do TX queue initialization");
> > +                   return ret;
> > +           }
> > +           if (ad->tx_vec_allowed)
> > +                   i40e_txq_vec_setup(txq);
> > +           if (!txq->tx_deferred_start) {
> > +                   ret = i40e_dev_tx_queue_start(dev, queue_idx);
> > +                   if (ret != I40E_SUCCESS) {
> > +                           PMD_DRV_LOG(ERR,
> > +                                       "Failed to start TX queue");
> > +                           return ret;
> > +                   }
> > +           }
> > +   }
> > +
> >     return 0;
> >  }
> >
> > @@ -2169,13 +2219,21 @@ void
> >  i40e_dev_tx_queue_release(void *txq)
> >  {
> >     struct i40e_tx_queue *q = (struct i40e_tx_queue *)txq;
> > +   struct rte_eth_dev *dev = &rte_eth_devices[q->port_id];
> >
> >     if (!q) {
> >             PMD_DRV_LOG(DEBUG, "Pointer to TX queue is NULL");
> >             return;
> >     }
> >
> > -   i40e_tx_queue_release_mbufs(q);
> > +   if (dev->data->dev_started) {
> > +           if (dev->data->tx_queue_state[q->queue_id] ==
> > +                   RTE_ETH_QUEUE_STATE_STARTED)
> > +                   i40e_dev_tx_queue_stop(dev, q->queue_id);
> > +   } else {
> > +           i40e_tx_queue_release_mbufs(q);
> > +   }
> > +
> >     rte_free(q->sw_ring);
> >     rte_free(q);
> >  }
> > --
> > 2.13.6

Reply via email to