virtio_dev_start/stop support the switch of traffic flow. Signed-off-by: Zhiyong Yang <zhiyong.y...@intel.com> --- drivers/net/virtio/virtio_ethdev.c | 37 +++++++++++++++++++++++++++- drivers/net/virtio/virtio_rxtx.c | 12 +++++++++ drivers/net/virtio/virtio_rxtx_simple.c | 4 +++ drivers/net/virtio/virtio_rxtx_simple_neon.c | 5 +++- drivers/net/virtio/virtio_rxtx_simple_sse.c | 5 +++- drivers/net/virtio/virtio_user_ethdev.c | 1 + 6 files changed, 61 insertions(+), 3 deletions(-)
diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index d9986ab..5955443 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1707,6 +1707,33 @@ virtio_dev_configure(struct rte_eth_dev *dev) return 0; } +static void +update_queuing_status(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + unsigned int i; + struct virtnet_rx *vnet_rx; + struct virtnet_tx *vnet_tx; + int allow_queuing = 1; + + if (rte_atomic32_read(&hw->started) == 0) + allow_queuing = 0; + + /* Wait until rx/tx_pkt_burst stops accessing virtio device */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vnet_rx = dev->data->rx_queues[i]; + if (!vnet_rx) + continue; + rte_atomic32_set(&vnet_rx->allow_queuing, allow_queuing); + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + vnet_tx = dev->data->tx_queues[i]; + if (!vnet_tx) + continue; + rte_atomic32_set(&vnet_tx->allow_queuing, allow_queuing); + } +} static int virtio_dev_start(struct rte_eth_dev *dev) @@ -1758,6 +1785,9 @@ virtio_dev_start(struct rte_eth_dev *dev) virtqueue_notify(rxvq->vq); } + rte_atomic32_set(&hw->started, 1); + update_queuing_status(dev); + PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); for (i = 0; i < dev->data->nb_rx_queues; i++) { @@ -1819,13 +1849,15 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) } /* - * Stop device: disable interrupt and mark link down + * Stop device: disable interrupt, mark link down + * and stop the traffic flow. */ static void virtio_dev_stop(struct rte_eth_dev *dev) { struct rte_eth_link link; struct rte_intr_conf *intr_conf = &dev->data->dev_conf.intr_conf; + struct virtio_hw *hw = dev->data->dev_private; PMD_INIT_LOG(DEBUG, "stop"); @@ -1834,6 +1866,9 @@ virtio_dev_stop(struct rte_eth_dev *dev) memset(&link, 0, sizeof(link)); virtio_dev_atomic_write_link_status(dev, &link); + + rte_atomic32_set(&hw->started, 0); + update_queuing_status(dev); } static int diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index fcd9e93..3ef9678 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -736,6 +736,10 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) int offload; struct virtio_net_hdr *hdr; + nb_rx = 0; + if (unlikely(rte_atomic32_read(&rxvq->allow_queuing) == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); @@ -850,6 +854,10 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t hdr_size; int offload; + nb_rx = 0; + if (unlikely(rte_atomic32_read(&rxvq->allow_queuing) == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); @@ -1012,6 +1020,10 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) uint16_t nb_used, nb_tx; int error; + nb_tx = 0; + if (unlikely(rte_atomic32_read(&txvq->allow_queuing) == 0)) + return nb_tx; + if (unlikely(nb_pkts < 1)) return nb_pkts; diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index b651e53..d061e72 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -95,6 +95,10 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_tail, nb_commit; int i; uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1; + uint16_t nb_tx = 0; + + if (unlikely(rte_atomic32_read(&txvq->allow_queuing) == 0)) + return nb_tx; nb_used = VIRTQUEUE_NUSED(vq); rte_compiler_barrier(); diff --git a/drivers/net/virtio/virtio_rxtx_simple_neon.c b/drivers/net/virtio/virtio_rxtx_simple_neon.c index 793eefb..5aa7e2f 100644 --- a/drivers/net/virtio/virtio_rxtx_simple_neon.c +++ b/drivers/net/virtio/virtio_rxtx_simple_neon.c @@ -77,7 +77,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, struct vring_used_elem *rused; struct rte_mbuf **sw_ring; struct rte_mbuf **sw_ring_end; - uint16_t nb_pkts_received; + uint16_t nb_pkts_received = 0; uint8x16_t shuf_msk1 = { 0xFF, 0xFF, 0xFF, 0xFF, /* packet type */ @@ -106,6 +106,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 0, 0 }; + if (unlikely(rte_atomic32_read(&rxvq->allow_queuing) == 0)) + return nb_pkts_received; + if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) return 0; diff --git a/drivers/net/virtio/virtio_rxtx_simple_sse.c b/drivers/net/virtio/virtio_rxtx_simple_sse.c index 87bb5c6..39fe557 100644 --- a/drivers/net/virtio/virtio_rxtx_simple_sse.c +++ b/drivers/net/virtio/virtio_rxtx_simple_sse.c @@ -79,7 +79,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, struct vring_used_elem *rused; struct rte_mbuf **sw_ring; struct rte_mbuf **sw_ring_end; - uint16_t nb_pkts_received; + uint16_t nb_pkts_received = 0; __m128i shuf_msk1, shuf_msk2, len_adjust; shuf_msk1 = _mm_set_epi8( @@ -109,6 +109,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, 0, (uint16_t)-vq->hw->vtnet_hdr_size, 0, 0); + if (unlikely(rte_atomic32_read(&rxvq->allow_queuing) == 0)) + return nb_pkts_received; + if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) return 0; diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c index d3a5cac..00b35e9 100644 --- a/drivers/net/virtio/virtio_user_ethdev.c +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -318,6 +318,7 @@ virtio_user_eth_dev_alloc(const char *name) hw->modern = 0; hw->use_simple_rxtx = 0; hw->virtio_user_dev = dev; + rte_atomic32_set(&hw->started, 0); data->dev_private = hw; data->drv_name = virtio_user_driver.driver.name; data->numa_node = SOCKET_ID_ANY; -- 2.7.4