For asymmetric crypto producer check for event type in
`process_crypto_request` will not pass in case of multiple stages, due
to overwrite of event type during event forward. Use producer type to
dispatch.

Fixes: 8f5b549502d1 ("app/eventdev: support asym ops for crypto adapter")
Cc: sta...@dpdk.org

Signed-off-by: Volodymyr Fialko <vfia...@marvell.com>
---
 app/test-eventdev/test_perf_atq.c    | 10 +++++-----
 app/test-eventdev/test_perf_common.h | 11 +++++------
 app/test-eventdev/test_perf_queue.c  | 10 +++++-----
 3 files changed, 15 insertions(+), 16 deletions(-)

diff --git a/app/test-eventdev/test_perf_atq.c 
b/app/test-eventdev/test_perf_atq.c
index 8326f54045..2b71f30b66 100644
--- a/app/test-eventdev/test_perf_atq.c
+++ b/app/test-eventdev/test_perf_atq.c
@@ -74,10 +74,10 @@ perf_atq_worker(void *arg, const int enable_fwd_latency)
                /* last stage in pipeline */
                if (unlikely((ev.sub_event_type % nb_stages) == laststage)) {
                        if (enable_fwd_latency)
-                               cnt = perf_process_last_stage_latency(pool,
+                               cnt = perf_process_last_stage_latency(pool, 
prod_crypto_type,
                                        &ev, w, bufs, sz, cnt);
                        else
-                               cnt = perf_process_last_stage(pool, &ev, w,
+                               cnt = perf_process_last_stage(pool, 
prod_crypto_type, &ev, w,
                                         bufs, sz, cnt);
                } else {
                        atq_fwd_event(&ev, sched_type_list, nb_stages);
@@ -141,10 +141,10 @@ perf_atq_worker_burst(void *arg, const int 
enable_fwd_latency)
                        if (unlikely((ev[i].sub_event_type % nb_stages)
                                                == laststage)) {
                                if (enable_fwd_latency)
-                                       cnt = perf_process_last_stage_latency(
-                                               pool, &ev[i], w, bufs, sz, cnt);
+                                       cnt = 
perf_process_last_stage_latency(pool,
+                                               prod_crypto_type, &ev[i], w, 
bufs, sz, cnt);
                                else
-                                       cnt = perf_process_last_stage(pool,
+                                       cnt = perf_process_last_stage(pool, 
prod_crypto_type,
                                                &ev[i], w, bufs, sz, cnt);
 
                                ev[i].op = RTE_EVENT_OP_RELEASE;
diff --git a/app/test-eventdev/test_perf_common.h 
b/app/test-eventdev/test_perf_common.h
index d06d52cdf8..5b075bfbc4 100644
--- a/app/test-eventdev/test_perf_common.h
+++ b/app/test-eventdev/test_perf_common.h
@@ -108,7 +108,7 @@ struct perf_elt {
                                rte_lcore_id(), dev, port)
 
 static __rte_always_inline int
-perf_process_last_stage(struct rte_mempool *const pool,
+perf_process_last_stage(struct rte_mempool *const pool, uint8_t 
prod_crypto_type,
                struct rte_event *const ev, struct worker_data *const w,
                void *bufs[], int const buf_sz, uint8_t count)
 {
@@ -119,7 +119,7 @@ perf_process_last_stage(struct rte_mempool *const pool,
        rte_atomic_thread_fence(__ATOMIC_RELEASE);
        w->processed_pkts++;
 
-       if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
+       if (prod_crypto_type &&
                        ((struct rte_crypto_op *)ev->event_ptr)->type ==
                                RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
                struct rte_crypto_op *op = ev->event_ptr;
@@ -137,7 +137,7 @@ perf_process_last_stage(struct rte_mempool *const pool,
 }
 
 static __rte_always_inline uint8_t
-perf_process_last_stage_latency(struct rte_mempool *const pool,
+perf_process_last_stage_latency(struct rte_mempool *const pool, uint8_t 
prod_crypto_type,
                struct rte_event *const ev, struct worker_data *const w,
                void *bufs[], int const buf_sz, uint8_t count)
 {
@@ -151,9 +151,8 @@ perf_process_last_stage_latency(struct rte_mempool *const 
pool,
        rte_atomic_thread_fence(__ATOMIC_RELEASE);
        w->processed_pkts++;
 
-       if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV &&
-                       ((struct rte_crypto_op *)m)->type ==
-                               RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
+       if (prod_crypto_type &&
+                       ((struct rte_crypto_op *)m)->type == 
RTE_CRYPTO_OP_TYPE_ASYMMETRIC) {
                rte_free(((struct rte_crypto_op *)m)->asym->modex.result.data);
                rte_crypto_op_free((struct rte_crypto_op *)m);
        } else {
diff --git a/app/test-eventdev/test_perf_queue.c 
b/app/test-eventdev/test_perf_queue.c
index 814ab9f9bd..38509eddbb 100644
--- a/app/test-eventdev/test_perf_queue.c
+++ b/app/test-eventdev/test_perf_queue.c
@@ -76,10 +76,10 @@ perf_queue_worker(void *arg, const int enable_fwd_latency)
                /* last stage in pipeline */
                if (unlikely((ev.queue_id % nb_stages) == laststage)) {
                        if (enable_fwd_latency)
-                               cnt = perf_process_last_stage_latency(pool,
+                               cnt = perf_process_last_stage_latency(pool, 
prod_crypto_type,
                                        &ev, w, bufs, sz, cnt);
                        else
-                               cnt = perf_process_last_stage(pool,
+                               cnt = perf_process_last_stage(pool, 
prod_crypto_type,
                                        &ev, w, bufs, sz, cnt);
                } else {
                        fwd_event(&ev, sched_type_list, nb_stages);
@@ -143,10 +143,10 @@ perf_queue_worker_burst(void *arg, const int 
enable_fwd_latency)
                        if (unlikely((ev[i].queue_id % nb_stages) ==
                                                 laststage)) {
                                if (enable_fwd_latency)
-                                       cnt = perf_process_last_stage_latency(
-                                               pool, &ev[i], w, bufs, sz, cnt);
+                                       cnt = 
perf_process_last_stage_latency(pool,
+                                               prod_crypto_type, &ev[i], w, 
bufs, sz, cnt);
                                else
-                                       cnt = perf_process_last_stage(pool,
+                                       cnt = perf_process_last_stage(pool, 
prod_crypto_type,
                                                &ev[i], w, bufs, sz, cnt);
 
                                ev[i].op = RTE_EVENT_OP_RELEASE;
-- 
2.25.1

Reply via email to