.dev_start()/.dev_stop() roughly corresponds to the local device's port being up or down. This is different from the remote client being connected which is roughtly link up or down. Emulate the behavior by separately tracking the local start/stop state to determine if we should allow packets to be queued to the remote client.
Signed-off-by: Chas Williams <ciwil...@brocade.com> --- drivers/net/vhost/rte_eth_vhost.c | 65 ++++++++++++++++++++++++++++++++------- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 6b11e40..d5a4540 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -100,7 +100,8 @@ struct vhost_stats { struct vhost_queue { int vid; - rte_atomic32_t allow_queuing; + rte_atomic32_t connected; + rte_atomic32_t ready; rte_atomic32_t while_queuing; struct pmd_internal *internal; struct rte_mempool *mb_pool; @@ -383,18 +384,25 @@ vhost_update_packet_xstats(struct vhost_queue *vq, } } +static inline bool +queuing_stopped(struct vhost_queue *r) +{ + return unlikely(rte_atomic32_read(&r->connected) == 0 || + rte_atomic32_read(&r->ready) == 0); +} + static uint16_t eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) { struct vhost_queue *r = q; uint16_t i, nb_rx = 0; - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) return 0; rte_atomic32_set(&r->while_queuing, 1); - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) goto out; /* Dequeue packets from guest TX queue */ @@ -422,12 +430,12 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) struct vhost_queue *r = q; uint16_t i, nb_tx = 0; - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) return 0; rte_atomic32_set(&r->while_queuing, 1); - if (unlikely(rte_atomic32_read(&r->allow_queuing) == 0)) + if (queuing_stopped(r)) goto out; /* Enqueue packets to guest RX queue */ @@ -546,13 +554,13 @@ new_device(int vid) vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 1); + rte_atomic32_set(&vq->connected, 1); } for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 1); + rte_atomic32_set(&vq->connected, 1); } RTE_LOG(INFO, PMD, "New connection established\n"); @@ -585,7 +593,7 @@ destroy_device(int vid) vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 0); + rte_atomic32_set(&vq->connected, 0); while (rte_atomic32_read(&vq->while_queuing)) rte_pause(); } @@ -593,7 +601,7 @@ destroy_device(int vid) vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - rte_atomic32_set(&vq->allow_queuing, 0); + rte_atomic32_set(&vq->connected, 0); while (rte_atomic32_read(&vq->while_queuing)) rte_pause(); } @@ -770,14 +778,49 @@ vhost_driver_session_stop(void) } static int -eth_dev_start(struct rte_eth_dev *dev __rte_unused) +eth_dev_start(struct rte_eth_dev *dev) { + struct vhost_queue *vq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vq = dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 1); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + vq = dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 1); + } + return 0; } static void -eth_dev_stop(struct rte_eth_dev *dev __rte_unused) +eth_dev_stop(struct rte_eth_dev *dev) { + struct vhost_queue *vq; + unsigned int i; + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + vq = dev->data->rx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } + for (i = 0; i < dev->data->nb_tx_queues; i++) { + vq = dev->data->tx_queues[i]; + if (vq == NULL) + continue; + rte_atomic32_set(&vq->ready, 0); + while (rte_atomic32_read(&vq->while_queuing)) + rte_pause(); + } } static int -- 2.1.4