update_queuing_status prevents PMD queue operations from affecting
the data plane by waiting for rx/tx_pkt_burst to stops accessing the
vhost device.
In fact, it is only necessary to wait when destroy/stop the device,
new/start device and vring_state_changed cases do not need.

Since vring is locked when vring state changes, unconditional
waiting may also cause deadlocks.

To avoid deadlocks and unnecessary waiting, this patch adds a flag to
control whether waiting is required.

Fixes: 9dc6bb0682 (net/vhost: fix access to freed memory)
Fixes: 1ce3c7fe14 (net/vhost: emulate device start/stop behavior)
Cc: sta...@dpdk.org

Signed-off-by: Yuan Wang <yuanx.w...@intel.com>
---
 drivers/net/vhost/rte_eth_vhost.c | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/net/vhost/rte_eth_vhost.c 
b/drivers/net/vhost/rte_eth_vhost.c
index a248a65df4..a280e788fb 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -716,7 +716,7 @@ eth_vhost_install_intr(struct rte_eth_dev *dev)
 }
 
 static void
-update_queuing_status(struct rte_eth_dev *dev)
+update_queuing_status(struct rte_eth_dev *dev, bool wait_queuing)
 {
        struct pmd_internal *internal = dev->data->dev_private;
        struct vhost_queue *vq;
@@ -742,7 +742,7 @@ update_queuing_status(struct rte_eth_dev *dev)
                        rte_atomic32_set(&vq->allow_queuing, 1);
                else
                        rte_atomic32_set(&vq->allow_queuing, 0);
-               while (rte_atomic32_read(&vq->while_queuing))
+               while (wait_queuing && rte_atomic32_read(&vq->while_queuing))
                        rte_pause();
        }
 
@@ -754,7 +754,7 @@ update_queuing_status(struct rte_eth_dev *dev)
                        rte_atomic32_set(&vq->allow_queuing, 1);
                else
                        rte_atomic32_set(&vq->allow_queuing, 0);
-               while (rte_atomic32_read(&vq->while_queuing))
+               while (wait_queuing && rte_atomic32_read(&vq->while_queuing))
                        rte_pause();
        }
 }
@@ -836,7 +836,7 @@ new_device(int vid)
        eth_dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
 
        rte_atomic32_set(&internal->dev_attached, 1);
-       update_queuing_status(eth_dev);
+       update_queuing_status(eth_dev, false);
 
        VHOST_LOG(INFO, "Vhost device %d created\n", vid);
 
@@ -866,7 +866,7 @@ destroy_device(int vid)
        internal = eth_dev->data->dev_private;
 
        rte_atomic32_set(&internal->dev_attached, 0);
-       update_queuing_status(eth_dev);
+       update_queuing_status(eth_dev, true);
 
        eth_dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
 
@@ -976,7 +976,7 @@ vring_state_changed(int vid, uint16_t vring, int enable)
        state->max_vring = RTE_MAX(vring, state->max_vring);
        rte_spinlock_unlock(&state->lock);
 
-       update_queuing_status(eth_dev);
+       update_queuing_status(eth_dev, false);
 
        VHOST_LOG(INFO, "vring%u is %s\n",
                        vring, enable ? "enabled" : "disabled");
@@ -1163,7 +1163,7 @@ eth_dev_start(struct rte_eth_dev *eth_dev)
        }
 
        rte_atomic32_set(&internal->started, 1);
-       update_queuing_status(eth_dev);
+       update_queuing_status(eth_dev, false);
 
        return 0;
 }
@@ -1175,7 +1175,7 @@ eth_dev_stop(struct rte_eth_dev *dev)
 
        dev->data->dev_started = 0;
        rte_atomic32_set(&internal->started, 0);
-       update_queuing_status(dev);
+       update_queuing_status(dev, true);
 
        return 0;
 }
-- 
2.25.1

Reply via email to