From: Pavan Nikhilesh <pbhagavat...@marvell.com> Add enqueue dequeue operations tests.
Signed-off-by: Pavan Nikhilesh <pbhagavat...@marvell.com> --- app/test/test_dmadev.c | 160 +++++++++++++++++++++++++++++++++++++ app/test/test_dmadev_api.c | 76 ++++++++++++++++-- 2 files changed, 228 insertions(+), 8 deletions(-) diff --git a/app/test/test_dmadev.c b/app/test/test_dmadev.c index 9cbb9a6552..e9a62a0ddf 100644 --- a/app/test/test_dmadev.c +++ b/app/test/test_dmadev.c @@ -1052,6 +1052,147 @@ prepare_m2d_auto_free(int16_t dev_id, uint16_t vchan) return 0; } +static int +test_enq_deq_ops(int16_t dev_id, uint16_t vchan) +{ +#define BURST_SIZE 16 +#define ROUNDS 2E7 +#define CPY_LEN 64 + struct rte_mempool *ops_pool, *pkt_pool; + struct rte_mbuf *mbufs[BURST_SIZE * 2]; + struct rte_dma_op *ops[BURST_SIZE]; + uint64_t enq_lat, deq_lat, start; + int ret, i, j, enq, deq, n, max; + struct rte_dma_sge ssg, dsg; + struct rte_dma_info info; + uint64_t tenq, tdeq; + + memset(&info, 0, sizeof(info)); + ret = rte_dma_info_get(dev_id, &info); + if (ret != 0) + ERR_RETURN("Error with rte_dma_info_get()\n"); + + pkt_pool = rte_pktmbuf_pool_create("pkt_pool", info.max_desc * 2, 0, 0, + CPY_LEN + RTE_PKTMBUF_HEADROOM, rte_socket_id()); + if (pkt_pool == NULL) + ERR_RETURN("Error creating pkt pool\n"); + + ops_pool = rte_mempool_create("ops_pool", info.max_desc, + sizeof(struct rte_dma_op) + (sizeof(struct rte_dma_sge) * 2), + 0, 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0); + if (ops_pool == NULL) + ERR_RETURN("Error creating ops pool\n"); + + max = info.max_desc - BURST_SIZE; + tenq = 0; + tdeq = 0; + enq_lat = 0; + deq_lat = 0; + + for (i = 0; i < ROUNDS / max; i++) { + n = 0; + while (n != max) { + if (rte_mempool_get_bulk(ops_pool, (void **)ops, BURST_SIZE) != 0) + continue; + + if (rte_pktmbuf_alloc_bulk(pkt_pool, mbufs, BURST_SIZE * 2) != 0) + ERR_RETURN("Error allocating mbufs %d\n", n); + + for (j = 0; j < BURST_SIZE; j++) { + ops[j]->src_dst_seg[0].addr = rte_pktmbuf_iova(mbufs[j]); + ops[j]->src_dst_seg[1].addr = + rte_pktmbuf_iova(mbufs[j + BURST_SIZE]); + ops[j]->src_dst_seg[0].length = CPY_LEN; + ops[j]->src_dst_seg[1].length = CPY_LEN; + + ops[j]->nb_src = 1; + ops[j]->nb_dst = 1; + ops[j]->user_meta = (uint64_t)mbufs[j]; + ops[j]->event_meta = (uint64_t)mbufs[j + BURST_SIZE]; + + memset((void *)(uintptr_t)ops[j]->src_dst_seg[0].addr, + rte_rand() & 0xFF, CPY_LEN); + memset((void *)(uintptr_t)ops[j]->src_dst_seg[1].addr, 0, CPY_LEN); + } + + start = rte_rdtsc_precise(); + enq = rte_dma_enqueue_ops(dev_id, vchan, ops, BURST_SIZE); + while (enq != BURST_SIZE) { + enq += rte_dma_enqueue_ops(dev_id, vchan, ops + enq, + BURST_SIZE - enq); + } + + enq_lat += rte_rdtsc_precise() - start; + n += enq; + } + tenq += n; + + memset(ops, 0, sizeof(ops)); + n = 0; + while (n != max) { + start = rte_rdtsc_precise(); + deq = rte_dma_dequeue_ops(dev_id, vchan, ops, BURST_SIZE); + while (deq != BURST_SIZE) { + deq += rte_dma_dequeue_ops(dev_id, vchan, ops + deq, + BURST_SIZE - deq); + } + n += deq; + deq_lat += rte_rdtsc_precise() - start; + + for (j = 0; j < deq; j++) { + /* check the data is correct */ + ssg = ops[j]->src_dst_seg[0]; + dsg = ops[j]->src_dst_seg[1]; + if (memcmp((void *)(uintptr_t)ssg.addr, (void *)(uintptr_t)dsg.addr, + ssg.length) != 0) + ERR_RETURN("Error with copy operation\n"); + rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)ops[j]->user_meta); + rte_pktmbuf_free((struct rte_mbuf *)(uintptr_t)ops[j]->event_meta); + } + rte_mempool_put_bulk(ops_pool, (void **)ops, BURST_SIZE); + } + tdeq += n; + + printf("\rEnqueued %" PRIu64 " Latency %.3f Dequeued %" PRIu64 " Latency %.3f", + tenq, (double)enq_lat / tenq, tdeq, (double)deq_lat / tdeq); + } + printf("\n"); + + rte_mempool_free(pkt_pool); + rte_mempool_free(ops_pool); + + return 0; +} + +static int +prepare_enq_deq_ops(int16_t dev_id, uint16_t vchan) +{ + const struct rte_dma_conf conf = {.nb_vchans = 1, .flags = RTE_DMA_CFG_FLAG_ENQ_DEQ}; + struct rte_dma_vchan_conf qconf; + struct rte_dma_info info; + + memset(&qconf, 0, sizeof(qconf)); + memset(&info, 0, sizeof(info)); + + int ret = rte_dma_info_get(dev_id, &info); + if (ret != 0) + ERR_RETURN("Error with rte_dma_info_get()\n"); + + qconf.direction = RTE_DMA_DIR_MEM_TO_MEM; + qconf.nb_desc = info.max_desc; + + if (rte_dma_stop(dev_id) < 0) + ERR_RETURN("Error stopping device %u\n", dev_id); + if (rte_dma_configure(dev_id, &conf) != 0) + ERR_RETURN("Error with rte_dma_configure()\n"); + if (rte_dma_vchan_setup(dev_id, vchan, &qconf) < 0) + ERR_RETURN("Error with queue configuration\n"); + if (rte_dma_start(dev_id) != 0) + ERR_RETURN("Error with rte_dma_start()\n"); + + return 0; +} + static int test_dmadev_sg_copy_setup(void) { @@ -1129,6 +1270,20 @@ test_dmadev_autofree_setup(void) return ret; } +static int +test_dmadev_enq_deq_setup(void) +{ + int ret = TEST_SKIPPED; + + if ((info.dev_capa & RTE_DMA_CAPA_OPS_ENQ_DEQ)) { + if (prepare_enq_deq_ops(test_dev_id, vchan) != 0) + return ret; + ret = TEST_SUCCESS; + } + + return ret; +} + static int test_dmadev_setup(void) { @@ -1210,6 +1365,7 @@ test_dmadev_instance(int16_t dev_id) TEST_ERR, TEST_FILL, TEST_M2D, + TEST_ENQ_DEQ, TEST_END }; @@ -1221,6 +1377,7 @@ test_dmadev_instance(int16_t dev_id) {"error_handling", test_completion_handling, 1}, {"fill", test_enqueue_fill, 1}, {"m2d_auto_free", test_m2d_auto_free, 128}, + {"dma_enq_deq", test_enq_deq_ops, 1}, }; static struct unit_test_suite ts = { @@ -1249,6 +1406,9 @@ test_dmadev_instance(int16_t dev_id) TEST_CASE_NAMED_WITH_DATA("m2d_autofree", test_dmadev_autofree_setup, NULL, runtest, ¶m[TEST_M2D]), + TEST_CASE_NAMED_WITH_DATA("dma_enq_deq", + test_dmadev_enq_deq_setup, NULL, + runtest, ¶m[TEST_ENQ_DEQ]), TEST_CASES_END() } }; diff --git a/app/test/test_dmadev_api.c b/app/test/test_dmadev_api.c index 1ae85a9a29..1ba053696b 100644 --- a/app/test/test_dmadev_api.c +++ b/app/test/test_dmadev_api.c @@ -289,7 +289,7 @@ test_dma_vchan_setup(void) } static int -setup_vchan(int nb_vchans) +setup_vchan(int nb_vchans, bool ena_enq_deq) { struct rte_dma_vchan_conf vchan_conf = { 0 }; struct rte_dma_info dev_info = { 0 }; @@ -299,6 +299,7 @@ setup_vchan(int nb_vchans) ret = rte_dma_info_get(test_dev_id, &dev_info); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret); dev_conf.nb_vchans = nb_vchans; + dev_conf.flags = ena_enq_deq ? RTE_DMA_CFG_FLAG_ENQ_DEQ : 0; ret = rte_dma_configure(test_dev_id, &dev_conf); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to configure, %d", ret); vchan_conf.direction = RTE_DMA_DIR_MEM_TO_MEM; @@ -325,7 +326,7 @@ test_dma_start_stop(void) RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); /* Setup one vchan for later test */ - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -359,7 +360,7 @@ test_dma_reconfigure(void) return TEST_SKIPPED; /* Setup one vchan for later test */ - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -371,7 +372,7 @@ test_dma_reconfigure(void) /* Check reconfigure and vchan setup after device stopped */ cfg_vchans = dev_conf.nb_vchans = (dev_info.max_vchans - 1); - ret = setup_vchan(cfg_vchans); + ret = setup_vchan(cfg_vchans, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -403,7 +404,7 @@ test_dma_stats(void) RTE_TEST_ASSERT(ret == -EINVAL, "Expected -EINVAL, %d", ret); /* Setup one vchan for later test */ - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); /* Check for invalid vchan */ @@ -506,7 +507,7 @@ test_dma_completed(void) int ret; /* Setup one vchan for later test */ - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -569,7 +570,7 @@ test_dma_completed_status(void) int ret; /* Setup one vchan for later test */ - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -637,7 +638,7 @@ test_dma_sg(void) n_sge = RTE_MIN(dev_info.max_sges, TEST_SG_MAX); - ret = setup_vchan(1); + ret = setup_vchan(1, 0); RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); ret = rte_dma_start(test_dev_id); @@ -699,6 +700,64 @@ test_dma_sg(void) return TEST_SUCCESS; } +static int +test_dma_ops_enq_deq(void) +{ + struct rte_dma_info dev_info = {0}; + struct rte_dma_op *ops; + int n_sge, i, ret; + + ret = rte_dma_info_get(test_dev_id, &dev_info); + RTE_TEST_ASSERT_SUCCESS(ret, "Failed to obtain device info, %d", ret); + if ((dev_info.dev_capa & RTE_DMA_CAPA_OPS_ENQ_DEQ) == 0) + return TEST_SKIPPED; + + n_sge = RTE_MIN(dev_info.max_sges, TEST_SG_MAX); + + ret = setup_vchan(1, 1); + RTE_TEST_ASSERT_SUCCESS(ret, "Failed to setup one vchan, %d", ret); + + ret = rte_dma_start(test_dev_id); + RTE_TEST_ASSERT_SUCCESS(ret, "Failed to start, %d", ret); + + ops = rte_zmalloc( + "ops", sizeof(struct rte_dma_op) + ((2 * n_sge) * sizeof(struct rte_dma_sge)), 0); + + for (i = 0; i < n_sge; i++) { + ops->src_dst_seg[i].addr = rte_malloc_virt2iova(src_sg[i]); + ops->src_dst_seg[i].length = TEST_MEMCPY_SIZE; + ops->src_dst_seg[n_sge + i].addr = rte_malloc_virt2iova(dst_sg[i]); + ops->src_dst_seg[n_sge + i].length = TEST_MEMCPY_SIZE; + } + + ops->nb_src = n_sge; + ops->nb_dst = n_sge; + sg_memory_setup(n_sge); + + /* Enqueue operations */ + ret = rte_dma_enqueue_ops(test_dev_id, 0, &ops, 1); + RTE_TEST_ASSERT(ret == 1, "Failed to enqueue DMA operations, %d", ret); + + rte_delay_us_sleep(TEST_WAIT_US_VAL); + + ops = NULL; + /* Dequeue operations */ + ret = rte_dma_dequeue_ops(test_dev_id, 0, &ops, 1); + RTE_TEST_ASSERT(ret == 1, "Failed to dequeue DMA operations, %d", ret); + RTE_TEST_ASSERT(ops != NULL, "Failed to dequeue DMA operations %p", ops); + /* Free allocated memory for ops */ + rte_free(ops); + + ret = sg_memory_verify(n_sge); + RTE_TEST_ASSERT_SUCCESS(ret, "Failed to verify memory"); + + /* Stop dmadev to make sure dmadev to a known state */ + ret = rte_dma_stop(test_dev_id); + RTE_TEST_ASSERT_SUCCESS(ret, "Failed to stop, %d", ret); + + return TEST_SUCCESS; +} + static struct unit_test_suite dma_api_testsuite = { .suite_name = "DMA API Test Suite", .setup = testsuite_setup, @@ -717,6 +776,7 @@ static struct unit_test_suite dma_api_testsuite = { TEST_CASE(test_dma_completed), TEST_CASE(test_dma_completed_status), TEST_CASE(test_dma_sg), + TEST_CASE(test_dma_ops_enq_deq), TEST_CASES_END() } }; -- 2.43.0