Add support for these device ops:
 - rx_queue_stop
 - tx_queue_stop

Signed-off-by: Beilei Xing <beilei.x...@intel.com>
Signed-off-by: Xiaoyun Li <xiaoyun...@intel.com>
Signed-off-by: Junfeng Guo <junfeng....@intel.com>
---
 doc/guides/nics/features/idpf.ini |   1 +
 drivers/net/idpf/idpf_ethdev.c    |  14 ++-
 drivers/net/idpf/idpf_rxtx.c      | 140 ++++++++++++++++++++++++++++++
 drivers/net/idpf/idpf_rxtx.h      |  11 +++
 drivers/net/idpf/idpf_vchnl.c     |  69 +++++++++++++++
 5 files changed, 232 insertions(+), 3 deletions(-)

diff --git a/doc/guides/nics/features/idpf.ini 
b/doc/guides/nics/features/idpf.ini
index 7a44b8b5e4..30e1c0831e 100644
--- a/doc/guides/nics/features/idpf.ini
+++ b/doc/guides/nics/features/idpf.ini
@@ -7,4 +7,5 @@
 ; is selected.
 ;
 [Features]
+Queue start/stop     = Y
 Linux                = Y
diff --git a/drivers/net/idpf/idpf_ethdev.c b/drivers/net/idpf/idpf_ethdev.c
index 954bd0bf4b..6040050dd9 100644
--- a/drivers/net/idpf/idpf_ethdev.c
+++ b/drivers/net/idpf/idpf_ethdev.c
@@ -62,7 +62,9 @@ static const struct eth_dev_ops idpf_eth_dev_ops = {
        .dev_stop                       = idpf_dev_stop,
        .dev_close                      = idpf_dev_close,
        .rx_queue_start                 = idpf_rx_queue_start,
+       .rx_queue_stop                  = idpf_rx_queue_stop,
        .tx_queue_start                 = idpf_tx_queue_start,
+       .tx_queue_stop                  = idpf_tx_queue_stop,
        .rx_queue_setup                 = idpf_rx_queue_setup,
        .tx_queue_setup                 = idpf_tx_queue_setup,
        .dev_infos_get                  = idpf_dev_info_get,
@@ -300,22 +302,26 @@ idpf_dev_start(struct rte_eth_dev *dev)
 
        if (dev->data->mtu > vport->max_mtu) {
                PMD_DRV_LOG(ERR, "MTU should be less than %d", vport->max_mtu);
-               return -1;
+               goto err_mtu;
        }
 
        vport->max_pkt_len = dev->data->mtu + IDPF_ETH_OVERHEAD;
 
        if (idpf_start_queues(dev) != 0) {
                PMD_DRV_LOG(ERR, "Failed to start queues");
-               return -1;
+               goto err_mtu;
        }
 
        if (idpf_vc_ena_dis_vport(vport, true) != 0) {
                PMD_DRV_LOG(ERR, "Failed to enable vport");
-               return -1;
+               goto err_vport;
        }
 
        return 0;
+err_vport:
+       idpf_stop_queues(dev);
+err_mtu:
+       return -1;
 }
 
 static int
@@ -325,6 +331,8 @@ idpf_dev_stop(struct rte_eth_dev *dev)
 
        idpf_vc_ena_dis_vport(vport, false);
 
+       idpf_stop_queues(dev);
+
        return 0;
 }
 
diff --git a/drivers/net/idpf/idpf_rxtx.c b/drivers/net/idpf/idpf_rxtx.c
index b71c1ac6db..c14fd4324e 100644
--- a/drivers/net/idpf/idpf_rxtx.c
+++ b/drivers/net/idpf/idpf_rxtx.c
@@ -71,6 +71,55 @@ check_tx_thresh(uint16_t nb_desc, uint16_t tx_rs_thresh,
        return 0;
 }
 
+static inline void
+release_rxq_mbufs(struct idpf_rx_queue *rxq)
+{
+       uint16_t i;
+
+       if (rxq->sw_ring == NULL)
+               return;
+
+       for (i = 0; i < rxq->nb_rx_desc; i++) {
+               if (rxq->sw_ring[i] != NULL) {
+                       rte_pktmbuf_free_seg(rxq->sw_ring[i]);
+                       rxq->sw_ring[i] = NULL;
+               }
+       }
+}
+
+static inline void
+release_txq_mbufs(struct idpf_tx_queue *txq)
+{
+       uint16_t nb_desc, i;
+
+       if (txq == NULL || txq->sw_ring == NULL) {
+               PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL");
+               return;
+       }
+
+       if (txq->sw_nb_desc != 0) {
+               /* For split queue model, descriptor ring */
+               nb_desc = txq->sw_nb_desc;
+       } else {
+               /* For single queue model */
+               nb_desc = txq->nb_tx_desc;
+       }
+       for (i = 0; i < nb_desc; i++) {
+               if (txq->sw_ring[i].mbuf != NULL) {
+                       rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
+                       txq->sw_ring[i].mbuf = NULL;
+               }
+       }
+}
+
+static const struct idpf_rxq_ops def_rxq_ops = {
+       .release_mbufs = release_rxq_mbufs,
+};
+
+static const struct idpf_txq_ops def_txq_ops = {
+       .release_mbufs = release_txq_mbufs,
+};
+
 static inline void
 reset_split_rx_descq(struct idpf_rx_queue *rxq)
 {
@@ -311,6 +360,7 @@ idpf_rx_split_bufq_setup(struct rte_eth_dev *dev, struct 
idpf_rx_queue *bufq,
        bufq->q_set = true;
        bufq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_buf_qtail_start +
                         queue_idx * vport->chunks_info.rx_buf_qtail_spacing);
+       bufq->ops = &def_rxq_ops;
 
        /* TODO: allow bulk or vec */
 
@@ -552,6 +602,7 @@ idpf_rx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        dev->data->rx_queues[queue_idx] = rxq;
        rxq->qrx_tail = hw->hw_addr + (vport->chunks_info.rx_qtail_start +
                        queue_idx * vport->chunks_info.rx_qtail_spacing);
+       rxq->ops = &def_rxq_ops;
 
        return 0;
 }
@@ -654,6 +705,7 @@ idpf_tx_split_queue_setup(struct rte_eth_dev *dev, uint16_t 
queue_idx,
        reset_split_tx_descq(txq);
        txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
                        queue_idx * vport->chunks_info.tx_qtail_spacing);
+       txq->ops = &def_txq_ops;
 
        /* Allocate the TX completion queue data structure. */
        txq->complq = rte_zmalloc_socket("idpf splitq cq",
@@ -778,6 +830,7 @@ idpf_tx_single_queue_setup(struct rte_eth_dev *dev, 
uint16_t queue_idx,
        dev->data->tx_queues[queue_idx] = txq;
        txq->qtx_tail = hw->hw_addr + (vport->chunks_info.tx_qtail_start +
                        queue_idx * vport->chunks_info.tx_qtail_spacing);
+       txq->ops = &def_txq_ops;
 
        return 0;
 }
@@ -1010,3 +1063,90 @@ idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t 
tx_queue_id)
        return err;
 }
 
+int
+idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+       struct idpf_vport *vport = dev->data->dev_private;
+       struct idpf_rx_queue *rxq;
+       int err;
+
+       if (rx_queue_id >= dev->data->nb_rx_queues)
+               return -EINVAL;
+
+       err = idpf_switch_queue(vport, rx_queue_id, true, false);
+       if (err != 0) {
+               PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off",
+                           rx_queue_id);
+               return err;
+       }
+
+       rxq = dev->data->rx_queues[rx_queue_id];
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               rxq->ops->release_mbufs(rxq);
+               reset_single_rx_queue(rxq);
+       } else {
+               rxq->bufq1->ops->release_mbufs(rxq->bufq1);
+               rxq->bufq2->ops->release_mbufs(rxq->bufq2);
+               reset_split_rx_queue(rxq);
+       }
+       dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+int
+idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+       struct idpf_vport *vport = dev->data->dev_private;
+       struct idpf_tx_queue *txq;
+       int err;
+
+       if (tx_queue_id >= dev->data->nb_tx_queues)
+               return -EINVAL;
+
+       err = idpf_switch_queue(vport, tx_queue_id, false, false);
+       if (err != 0) {
+               PMD_DRV_LOG(ERR, "Failed to switch TX queue %u off",
+                           tx_queue_id);
+               return err;
+       }
+
+       txq = dev->data->tx_queues[tx_queue_id];
+       txq->ops->release_mbufs(txq);
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE) {
+               reset_single_tx_queue(txq);
+       } else {
+               reset_split_tx_descq(txq);
+               reset_split_tx_complq(txq->complq);
+       }
+       dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+}
+
+void
+idpf_stop_queues(struct rte_eth_dev *dev)
+{
+       struct idpf_rx_queue *rxq;
+       struct idpf_tx_queue *txq;
+       int i;
+
+       for (i = 0; i < dev->data->nb_rx_queues; i++) {
+               rxq = dev->data->rx_queues[i];
+               if (rxq == NULL)
+                       continue;
+
+               if (idpf_rx_queue_stop(dev, i) != 0)
+                       PMD_DRV_LOG(WARNING, "Fail to stop Rx queue %d", i);
+       }
+
+       for (i = 0; i < dev->data->nb_tx_queues; i++) {
+               txq = dev->data->tx_queues[i];
+               if (txq == NULL)
+                       continue;
+
+               if (idpf_tx_queue_stop(dev, i) != 0)
+                       PMD_DRV_LOG(WARNING, "Fail to stop Tx queue %d", i);
+       }
+}
+
diff --git a/drivers/net/idpf/idpf_rxtx.h b/drivers/net/idpf/idpf_rxtx.h
index d076903e3c..e1de436a4a 100644
--- a/drivers/net/idpf/idpf_rxtx.h
+++ b/drivers/net/idpf/idpf_rxtx.h
@@ -111,16 +111,27 @@ struct idpf_tx_queue {
        struct idpf_tx_queue *complq;
 };
 
+struct idpf_rxq_ops {
+       void (*release_mbufs)(struct idpf_rx_queue *rxq);
+};
+
+struct idpf_txq_ops {
+       void (*release_mbufs)(struct idpf_tx_queue *txq);
+};
+
 int idpf_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        uint16_t nb_desc, unsigned int socket_id,
                        const struct rte_eth_rxconf *rx_conf,
                        struct rte_mempool *mp);
 int idpf_rx_queue_init(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int idpf_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int idpf_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                        uint16_t nb_desc, unsigned int socket_id,
                        const struct rte_eth_txconf *tx_conf);
 int idpf_tx_queue_init(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int idpf_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int idpf_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 
+void idpf_stop_queues(struct rte_eth_dev *dev);
 #endif /* _IDPF_RXTX_H_ */
diff --git a/drivers/net/idpf/idpf_vchnl.c b/drivers/net/idpf/idpf_vchnl.c
index 5e5508e999..9138799989 100644
--- a/drivers/net/idpf/idpf_vchnl.c
+++ b/drivers/net/idpf/idpf_vchnl.c
@@ -891,6 +891,75 @@ idpf_switch_queue(struct idpf_vport *vport, uint16_t qid,
        return err;
 }
 
+#define IDPF_RXTX_QUEUE_CHUNKS_NUM     2
+int
+idpf_vc_ena_dis_queues(struct idpf_vport *vport, bool enable)
+{
+       struct idpf_adapter *adapter = vport->adapter;
+       struct virtchnl2_del_ena_dis_queues *queue_select;
+       struct virtchnl2_queue_chunk *queue_chunk;
+       uint32_t type;
+       struct idpf_cmd_info args;
+       uint16_t num_chunks;
+       int err, len;
+
+       num_chunks = IDPF_RXTX_QUEUE_CHUNKS_NUM;
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+               num_chunks++;
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT)
+               num_chunks++;
+
+       len = sizeof(struct virtchnl2_del_ena_dis_queues) +
+               sizeof(struct virtchnl2_queue_chunk) * (num_chunks - 1);
+       queue_select = rte_zmalloc("queue_select", len, 0);
+       if (queue_select == NULL)
+               return -ENOMEM;
+
+       queue_chunk = queue_select->chunks.chunks;
+       queue_select->chunks.num_chunks = num_chunks;
+       queue_select->vport_id = vport->vport_id;
+
+       type = VIRTCHNL_QUEUE_TYPE_RX;
+       queue_chunk[type].type = type;
+       queue_chunk[type].start_queue_id = vport->chunks_info.rx_start_qid;
+       queue_chunk[type].num_queues = vport->num_rx_q;
+
+       type = VIRTCHNL2_QUEUE_TYPE_TX;
+       queue_chunk[type].type = type;
+       queue_chunk[type].start_queue_id = vport->chunks_info.tx_start_qid;
+       queue_chunk[type].num_queues = vport->num_tx_q;
+
+       if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
+               queue_chunk[type].type = type;
+               queue_chunk[type].start_queue_id =
+                       vport->chunks_info.rx_buf_start_qid;
+               queue_chunk[type].num_queues = vport->num_rx_bufq;
+       }
+
+       if (vport->txq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+               type = VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
+               queue_chunk[type].type = type;
+               queue_chunk[type].start_queue_id =
+                       vport->chunks_info.tx_compl_start_qid;
+               queue_chunk[type].num_queues = vport->num_tx_complq;
+       }
+
+       args.ops = enable ? VIRTCHNL2_OP_ENABLE_QUEUES :
+               VIRTCHNL2_OP_DISABLE_QUEUES;
+       args.in_args = (u8 *)queue_select;
+       args.in_args_size = len;
+       args.out_buffer = adapter->mbx_resp;
+       args.out_size = IDPF_DFLT_MBX_BUF_SIZE;
+       err = idpf_execute_vc_cmd(adapter, &args);
+       if (err != 0)
+               PMD_DRV_LOG(ERR, "Failed to execute command of 
VIRTCHNL2_OP_%s_QUEUES",
+                           enable ? "ENABLE" : "DISABLE");
+
+       rte_free(queue_select);
+       return err;
+}
+
 int
 idpf_vc_ena_dis_vport(struct idpf_vport *vport, bool enable)
 {
-- 
2.34.1

Reply via email to