In async data path, call rte_vhost_async_get_inflight_thread_unsafe() API to directly return the number of inflight packets instead of maintaining a local variable.
Signed-off-by: Xuan Ding <xuan.d...@intel.com> --- examples/vhost/main.c | 26 +++++++++++--------------- examples/vhost/main.h | 1 - 2 files changed, 11 insertions(+), 16 deletions(-) diff --git a/examples/vhost/main.c b/examples/vhost/main.c index bc3d71c898..f0b74b5086 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -851,11 +851,8 @@ complete_async_pkts(struct vhost_dev *vdev) complete_count = rte_vhost_poll_enqueue_completed(vdev->vid, VIRTIO_RXQ, p_cpl, MAX_PKT_BURST); - if (complete_count) { + if (complete_count) free_pkts(p_cpl, complete_count); - __atomic_sub_fetch(&vdev->pkts_inflight, complete_count, __ATOMIC_SEQ_CST); - } - } static __rte_always_inline void @@ -898,7 +895,6 @@ drain_vhost(struct vhost_dev *vdev) complete_async_pkts(vdev); ret = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, m, nr_xmit, m_cpu_cpl, &cpu_cpl_nr); - __atomic_add_fetch(&vdev->pkts_inflight, ret - cpu_cpl_nr, __ATOMIC_SEQ_CST); if (cpu_cpl_nr) free_pkts(m_cpu_cpl, cpu_cpl_nr); @@ -1230,8 +1226,6 @@ drain_eth_rx(struct vhost_dev *vdev) enqueue_count = rte_vhost_submit_enqueue_burst(vdev->vid, VIRTIO_RXQ, pkts, rx_count, m_cpu_cpl, &cpu_cpl_nr); - __atomic_add_fetch(&vdev->pkts_inflight, enqueue_count - cpu_cpl_nr, - __ATOMIC_SEQ_CST); if (cpu_cpl_nr) free_pkts(m_cpu_cpl, cpu_cpl_nr); @@ -1360,6 +1354,7 @@ destroy_device(int vid) struct vhost_dev *vdev = NULL; int lcore; uint16_t i; + int pkts_inflight; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { if (vdev->vid == vid) @@ -1406,13 +1401,13 @@ destroy_device(int vid) if (async_vhost_driver) { uint16_t n_pkt = 0; - struct rte_mbuf *m_cpl[vdev->pkts_inflight]; + pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, VIRTIO_RXQ); + struct rte_mbuf *m_cpl[pkts_inflight]; - while (vdev->pkts_inflight) { + while (pkts_inflight) { n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, VIRTIO_RXQ, - m_cpl, vdev->pkts_inflight); + m_cpl, pkts_inflight); free_pkts(m_cpl, n_pkt); - __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST); } rte_vhost_async_channel_unregister(vid, VIRTIO_RXQ); @@ -1509,6 +1504,7 @@ static int vring_state_changed(int vid, uint16_t queue_id, int enable) { struct vhost_dev *vdev = NULL; + int pkts_inflight; TAILQ_FOREACH(vdev, &vhost_dev_list, global_vdev_entry) { if (vdev->vid == vid) @@ -1523,13 +1519,13 @@ vring_state_changed(int vid, uint16_t queue_id, int enable) if (async_vhost_driver) { if (!enable) { uint16_t n_pkt = 0; - struct rte_mbuf *m_cpl[vdev->pkts_inflight]; + pkts_inflight = rte_vhost_async_get_inflight_thread_unsafe(vid, queue_id); + struct rte_mbuf *m_cpl[pkts_inflight]; - while (vdev->pkts_inflight) { + while (pkts_inflight) { n_pkt = rte_vhost_clear_queue_thread_unsafe(vid, queue_id, - m_cpl, vdev->pkts_inflight); + m_cpl, pkts_inflight); free_pkts(m_cpl, n_pkt); - __atomic_sub_fetch(&vdev->pkts_inflight, n_pkt, __ATOMIC_SEQ_CST); } } } diff --git a/examples/vhost/main.h b/examples/vhost/main.h index e7b1ac60a6..0ccdce4b4a 100644 --- a/examples/vhost/main.h +++ b/examples/vhost/main.h @@ -51,7 +51,6 @@ struct vhost_dev { uint64_t features; size_t hdr_len; uint16_t nr_vrings; - uint16_t pkts_inflight; struct rte_vhost_memory *mem; struct device_statistics stats; TAILQ_ENTRY(vhost_dev) global_vdev_entry; -- 2.17.1