Fix possible error with regards to setting the burst size from the
enqueue thread.

Fixes: b2e2aec3239e ("app/bbdev: enhance interrupt test")
Cc: sta...@dpdk.org

Signed-off-by: Hernan Vargas <hernan.var...@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
 app/test-bbdev/test_bbdev_perf.c | 98 ++++++++++++++++----------------
 1 file changed, 49 insertions(+), 49 deletions(-)

diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 9841464922ac..20cd8df19be7 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -3419,15 +3419,6 @@ throughput_intr_lcore_ldpc_dec(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_ldpc_dec_ops(
-                                               tp->dev_id,
-                                               queue_id, &ops[enqueued],
-                                               num_to_enq);
-                       } while (unlikely(num_to_enq != enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3438,6 +3429,15 @@ throughput_intr_lcore_ldpc_dec(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_ldpc_dec_ops(
+                                               tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(num_to_enq != enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
@@ -3514,14 +3514,6 @@ throughput_intr_lcore_dec(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
-                                               queue_id, &ops[enqueued],
-                                               num_to_enq);
-                       } while (unlikely(num_to_enq != enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3532,6 +3524,14 @@ throughput_intr_lcore_dec(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_dec_ops(tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(num_to_enq != enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
@@ -3603,14 +3603,6 @@ throughput_intr_lcore_enc(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
-                                               queue_id, &ops[enqueued],
-                                               num_to_enq);
-                       } while (unlikely(enq != num_to_enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3621,6 +3613,14 @@ throughput_intr_lcore_enc(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_enc_ops(tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
@@ -3694,15 +3694,6 @@ throughput_intr_lcore_ldpc_enc(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_ldpc_enc_ops(
-                                               tp->dev_id,
-                                               queue_id, &ops[enqueued],
-                                               num_to_enq);
-                       } while (unlikely(enq != num_to_enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3713,6 +3704,15 @@ throughput_intr_lcore_ldpc_enc(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_ldpc_enc_ops(
+                                               tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
@@ -3786,14 +3786,6 @@ throughput_intr_lcore_fft(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
-                                               queue_id, &ops[enqueued],
-                                               num_to_enq);
-                       } while (unlikely(enq != num_to_enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3804,6 +3796,14 @@ throughput_intr_lcore_fft(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_fft_ops(tp->dev_id,
+                                               queue_id, &ops[enqueued],
+                                               num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
@@ -3872,13 +3872,6 @@ throughput_intr_lcore_mldts(void *arg)
                        if (unlikely(num_to_process - enqueued < num_to_enq))
                                num_to_enq = num_to_process - enqueued;
 
-                       enq = 0;
-                       do {
-                               enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
-                                               queue_id, &ops[enqueued], 
num_to_enq);
-                       } while (unlikely(enq != num_to_enq));
-                       enqueued += enq;
-
                        /* Write to thread burst_sz current number of enqueued
                         * descriptors. It ensures that proper number of
                         * descriptors will be dequeued in callback
@@ -3889,6 +3882,13 @@ throughput_intr_lcore_mldts(void *arg)
                        rte_atomic_store_explicit(&tp->burst_sz, num_to_enq,
                                        rte_memory_order_relaxed);
 
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+                                               queue_id, &ops[enqueued], 
num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
                        /* Wait until processing of previous batch is
                         * completed
                         */
-- 
2.37.1

Reply via email to