The enqueue/dequeue loop's exit condition is (ops_enqd_total >= total_ops). If the PMD driver cannot process the ops (returns zero), ops_enqd_total won't increase, resulting in an infinite loop. A check is added to process one op to determine whether PMD can process the packet.
Signed-off-by: Rupesh Chiluka <rchil...@marvell.com> --- app/test-crypto-perf/cperf_test_latency.c | 54 +++++++++++++++++++ app/test-crypto-perf/cperf_test_throughput.c | 55 ++++++++++++++++++++ 2 files changed, 109 insertions(+) diff --git a/app/test-crypto-perf/cperf_test_latency.c b/app/test-crypto-perf/cperf_test_latency.c index 2dd504b434..b501618d46 100644 --- a/app/test-crypto-perf/cperf_test_latency.c +++ b/app/test-crypto-perf/cperf_test_latency.c @@ -140,6 +140,56 @@ store_timestamp(struct rte_crypto_op *op, uint64_t timestamp) priv_data->result->tsc_end = timestamp; } +static int +cperf_check_single_op(struct cperf_latency_ctx *ctx, uint16_t iv_offset) +{ + struct rte_crypto_op *ops[1]; + uint32_t imix_idx = 0; + uint64_t tsc_start = 0; + uint64_t ops_enqd = 0, ops_deqd = 0; + + /* Allocate object containing crypto operations and mbufs */ + if (rte_mempool_get(ctx->pool, (void **)&ops[0]) != 0) { + RTE_LOG(ERR, USER1, + "Failed to allocate crypto operation " + "from the crypto operation pool.\n" + "Consider increasing the pool size " + "with --pool-sz\n"); + return -1; + } + + /* Setup crypto op, attach mbuf etc */ + (ctx->populate_ops)(ops, ctx->src_buf_offset, + ctx->dst_buf_offset, + 1, ctx->sess, ctx->options, + ctx->test_vector, iv_offset, + &imix_idx, &tsc_start); + + ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, ops, 1); + if (ops_enqd != 1) { + RTE_LOG(ERR, USER1, "PMD cannot process the packet.\n"); + return -1; + } + + /* Dequeue processed burst of ops from crypto device */ + tsc_start = rte_rdtsc_precise(); + while (1) { + ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, + ops, 1); + + if (ops_deqd == 1) { + rte_mempool_put(ctx->pool, ops[0]); + return 1; + } + + /* Check if 1 second timeout has been reached */ + if ((rte_rdtsc_precise() - tsc_start) > rte_get_tsc_hz()) { + RTE_LOG(ERR, USER1, "Dequeue operation timed out.\n"); + return -1; + } + } +} + int cperf_latency_test_runner(void *arg) { @@ -190,6 +240,10 @@ cperf_latency_test_runner(void *arg) sizeof(struct rte_crypto_sym_op) + sizeof(struct cperf_op_result *); + /* Enqueue just one operation to check whether PMD returns error */ + if (cperf_check_single_op(ctx, iv_offset) < 1) + return -1; + while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_deqd = 0; uint64_t b_idx = 0; diff --git a/app/test-crypto-perf/cperf_test_throughput.c b/app/test-crypto-perf/cperf_test_throughput.c index c8960ab855..13b16d9670 100644 --- a/app/test-crypto-perf/cperf_test_throughput.c +++ b/app/test-crypto-perf/cperf_test_throughput.c @@ -128,6 +128,57 @@ cperf_verify_init_ops(struct rte_mempool *mp __rte_unused, cperf_mbuf_set(op->sym->m_src, ctx->options, ctx->test_vector); } +static int +cperf_check_single_op(struct cperf_throughput_ctx *ctx, uint16_t iv_offset) +{ + struct rte_crypto_op *ops[1]; + uint32_t imix_idx = 0; + uint64_t tsc_start = 0; + uint64_t ops_enqd = 0, ops_deqd = 0; + + /* Allocate object containing crypto operations and mbufs */ + if (rte_mempool_get(ctx->pool, (void **)&ops[0]) != 0) { + RTE_LOG(ERR, USER1, + "Failed to allocate crypto operation " + "from the crypto operation pool.\n" + "Consider increasing the pool size " + "with --pool-sz\n"); + return -1; + } + + /* Setup crypto op, attach mbuf etc */ + if (!ctx->options->out_of_place) + (ctx->populate_ops)(ops, ctx->src_buf_offset, + ctx->dst_buf_offset, + 1, ctx->sess, + ctx->options, ctx->test_vector, + iv_offset, &imix_idx, &tsc_start); + + ops_enqd = rte_cryptodev_enqueue_burst(ctx->dev_id, ctx->qp_id, ops, 1); + if (ops_enqd != 1) { + RTE_LOG(ERR, USER1, "PMD cannot process the packet.\n"); + return -1; + } + + /* Dequeue processed burst of ops from crypto device */ + tsc_start = rte_rdtsc_precise(); + while (1) { + ops_deqd = rte_cryptodev_dequeue_burst(ctx->dev_id, ctx->qp_id, + ops, 1); + + if (ops_deqd == 1) { + rte_mempool_put(ctx->pool, ops[0]); + return 1; + } + + /* Check if 1 second timeout has been reached */ + if ((rte_rdtsc_precise() - tsc_start) > rte_get_tsc_hz()) { + RTE_LOG(ERR, USER1, "Dequeue operation timed out.\n"); + return -1; + } + } +} + int cperf_throughput_test_runner(void *test_ctx) { @@ -176,6 +227,10 @@ cperf_throughput_test_runner(void *test_ctx) if (ctx->options->out_of_place) rte_mempool_obj_iter(ctx->pool, cperf_verify_init_ops, (void *)ctx); + /* Enqueue just one operation to check whether PMD returns error */ + if (cperf_check_single_op(ctx, iv_offset) < 1) + return -1; + while (test_burst_size <= ctx->options->max_burst_size) { uint64_t ops_enqd = 0, ops_enqd_total = 0, ops_enqd_failed = 0; uint64_t ops_deqd = 0, ops_deqd_total = 0, ops_deqd_failed = 0; -- 2.48.1