vhost enabled vSwitch could have their own thread-safe vring enqueue policy.
Add the RTE_LIBRTE_VHOST_LOCKLESS_ENQ macro for vhost lockless enqueue.
Turn it off by default.

Signed-off-by: Huawei Xie <huawei.xie at intel.com>
---
 config/common_linuxapp        |  1 +
 lib/librte_vhost/vhost_rxtx.c | 24 +++++++++++++++++++++++-
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/config/common_linuxapp b/config/common_linuxapp
index 0078dc9..7f59499 100644
--- a/config/common_linuxapp
+++ b/config/common_linuxapp
@@ -421,6 +421,7 @@ CONFIG_RTE_KNI_VHOST_DEBUG_TX=n
 #
 CONFIG_RTE_LIBRTE_VHOST=n
 CONFIG_RTE_LIBRTE_VHOST_USER=y
+CONFIG_RTE_LIBRTE_VHOST_LOCKLESS_ENQ=n
 CONFIG_RTE_LIBRTE_VHOST_DEBUG=n

 #
diff --git a/lib/librte_vhost/vhost_rxtx.c b/lib/librte_vhost/vhost_rxtx.c
index 510ffe8..475be6e 100644
--- a/lib/librte_vhost/vhost_rxtx.c
+++ b/lib/librte_vhost/vhost_rxtx.c
@@ -80,7 +80,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
         * they need to be reserved.
         */
        do {
+#ifdef RTE_LIBRTE_VHOST_LOCKESS_ENQ
                res_base_idx = vq->last_used_idx_res;
+#else
+               res_base_idx = vq->last_used_idx;
+#endif
                avail_idx = *((volatile uint16_t *)&vq->avail->idx);

                free_entries = (avail_idx - res_base_idx);
@@ -92,10 +96,15 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                        return 0;

                res_end_idx = res_base_idx + count;
+
+#ifdef RTE_LIBRTE_VHOST_LOCKLESS_ENQ
                /* vq->last_used_idx_res is atomically updated. */
-               /* TODO: Allow to disable cmpset if no concurrency in 
application. */
                success = rte_atomic16_cmpset(&vq->last_used_idx_res,
                                res_base_idx, res_end_idx);
+#else
+               /* last_used_idx_res isn't used. */
+               success = 1;
+#endif
        } while (unlikely(success == 0));
        res_cur_idx = res_base_idx;
        LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
@@ -171,9 +180,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,

        rte_compiler_barrier();

+#ifdef RTE_LIBRTE_VHOST_LOCKLESS_ENQ
        /* Wait until it's our turn to add our buffer to the used ring. */
        while (unlikely(vq->last_used_idx != res_base_idx))
                rte_pause();
+#endif

        *(volatile uint16_t *)&vq->used->idx += count;
        vq->last_used_idx = res_end_idx;
@@ -422,11 +433,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t 
queue_id,
                uint16_t i, id;

                do {
+#ifdef RTE_LIBRTE_VHOST_LOCKLESS_ENQ
                        /*
                         * As many data cores may want access to available
                         * buffers, they need to be reserved.
                         */
                        res_base_idx = vq->last_used_idx_res;
+#else
+                       res_base_idx = vq->last_used_idx;
+#endif
                        res_cur_idx = res_base_idx;

                        do {
@@ -459,10 +474,15 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t 
queue_id,
                                }
                        } while (pkt_len > secure_len);

+#ifdef RTE_LIBRTE_VHOST_LOCKLESS_ENQ
                        /* vq->last_used_idx_res is atomically updated. */
                        success = rte_atomic16_cmpset(&vq->last_used_idx_res,
                                                        res_base_idx,
                                                        res_cur_idx);
+#else
+                       /* last_used_idx_res isn't used. */
+                       success = 1;
+#endif
                } while (success == 0);

                id = res_base_idx;
@@ -495,12 +515,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t 
queue_id,

                rte_compiler_barrier();

+#ifdef RTE_LIBRTE_VHOST_LOCKLESS_ENQ
                /*
                 * Wait until it's our turn to add our buffer
                 * to the used ring.
                 */
                while (unlikely(vq->last_used_idx != res_base_idx))
                        rte_pause();
+#endif

                *(volatile uint16_t *)&vq->used->idx += entry_success;
                vq->last_used_idx = res_end_idx;
-- 
1.8.1.4

Reply via email to