Added test case to interleave inference requests from multiple
models. Interleaving would load and start all models and launch
inference requests for the models using available queue-pairs

Operations sequence when testing with N models and R reps,

(load + start) x N -> (enqueue + dequeue) x N x R ...
        -> (stop + unload) x N

Test can be executed by selecting "inference_interleave" test.

Signed-off-by: Srikanth Yalavarthi <syalavar...@marvell.com>
---
 app/test-mldev/meson.build                 |   1 +
 app/test-mldev/ml_options.c                |   3 +-
 app/test-mldev/test_inference_common.c     |  12 +--
 app/test-mldev/test_inference_interleave.c | 118 +++++++++++++++++++++
 4 files changed, 127 insertions(+), 7 deletions(-)
 create mode 100644 app/test-mldev/test_inference_interleave.c

diff --git a/app/test-mldev/meson.build b/app/test-mldev/meson.build
index 475d76d126..41d22fb22c 100644
--- a/app/test-mldev/meson.build
+++ b/app/test-mldev/meson.build
@@ -18,6 +18,7 @@ sources = files(
         'test_model_ops.c',
         'test_inference_common.c',
         'test_inference_ordered.c',
+        'test_inference_interleave.c',
 )
 
 deps += ['mldev']
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 10dad18fff..01ea050ee7 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -162,7 +162,8 @@ ml_dump_test_options(const char *testname)
                printf("\n");
        }
 
-       if (strcmp(testname, "inference_ordered") == 0) {
+       if ((strcmp(testname, "inference_ordered") == 0) ||
+           (strcmp(testname, "inference_interleave") == 0)) {
                printf("\t\t--filelist         : comma separated list of model, 
input and output\n"
                       "\t\t--repetitions      : number of inference 
repetitions\n");
                printf("\n");
diff --git a/app/test-mldev/test_inference_common.c 
b/app/test-mldev/test_inference_common.c
index ff25c056a0..ae0f4489f7 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -115,7 +115,7 @@ ml_dequeue_single(void *arg)
                total_deq += burst_deq;
                if (unlikely(op->status == RTE_ML_OP_STATUS_ERROR)) {
                        rte_ml_op_error_get(t->cmn.opt->dev_id, op, &error);
-                       ml_err("error_code = 0x%016lx, error_message = %s\n", 
error.errcode,
+                       ml_err("error_code = 0x%" PRIx64 ", error_message = 
%s\n", error.errcode,
                               error.message);
                }
                req = (struct ml_request *)op->user_ptr;
@@ -334,10 +334,10 @@ ml_request_initialize(struct rte_mempool *mp, void 
*opaque, void *obj, unsigned
        RTE_SET_USED(mp);
        RTE_SET_USED(obj_idx);
 
-       req->input = RTE_PTR_ADD(
-               obj, RTE_ALIGN_CEIL(sizeof(struct ml_request), 
t->cmn.dev_info.min_align_size));
-       req->output = RTE_PTR_ADD(req->input, 
RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize,
-                                                            
t->cmn.dev_info.min_align_size));
+       req->input = (uint8_t *)obj +
+                    RTE_ALIGN_CEIL(sizeof(struct ml_request), 
t->cmn.dev_info.min_align_size);
+       req->output = req->input +
+                     RTE_ALIGN_CEIL(t->model[t->fid].inp_qsize, 
t->cmn.dev_info.min_align_size);
        req->niters = 0;
 
        /* quantize data */
@@ -387,7 +387,7 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
        }
 
        t->model[fid].input = mz->addr;
-       t->model[fid].output = RTE_PTR_ADD(t->model[fid].input, 
t->model[fid].inp_dsize);
+       t->model[fid].output = t->model[fid].input + t->model[fid].inp_dsize;
 
        /* load input file */
        fp = fopen(opt->filelist[fid].input, "r");
diff --git a/app/test-mldev/test_inference_interleave.c 
b/app/test-mldev/test_inference_interleave.c
new file mode 100644
index 0000000000..eca67f0e62
--- /dev/null
+++ b/app/test-mldev/test_inference_interleave.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2022 Marvell.
+ */
+
+#include <stdint.h>
+
+#include <rte_common.h>
+#include <rte_launch.h>
+
+#include "ml_common.h"
+#include "ml_test.h"
+#include "test_inference_common.h"
+#include "test_model_common.h"
+
+static int
+test_inference_interleave_driver(struct ml_test *test, struct ml_options *opt)
+{
+       struct test_inference *t;
+       uint16_t fid = 0;
+       int ret = 0;
+
+       t = ml_test_priv(test);
+
+       ret = ml_inference_mldev_setup(test, opt);
+       if (ret != 0)
+               return ret;
+
+       ret = ml_inference_mem_setup(test, opt);
+       if (ret != 0)
+               return ret;
+
+       /* load and start all models */
+       for (fid = 0; fid < opt->nb_filelist; fid++) {
+               ret = ml_model_load(test, opt, &t->model[fid], fid);
+               if (ret != 0)
+                       goto error;
+
+               ret = ml_model_start(test, opt, &t->model[fid], fid);
+               if (ret != 0)
+                       goto error;
+
+               ret = ml_inference_iomem_setup(test, opt, fid);
+               if (ret != 0)
+                       goto error;
+       }
+
+       /* launch inference requests */
+       ret = ml_inference_launch_cores(test, opt, 0, opt->nb_filelist - 1);
+       if (ret != 0) {
+               ml_err("failed to launch cores");
+               goto error;
+       }
+
+       rte_eal_mp_wait_lcore();
+
+       /* stop and unload all models */
+       for (fid = 0; fid < opt->nb_filelist; fid++) {
+               ret = ml_inference_result(test, opt, fid);
+               if (ret != ML_TEST_SUCCESS)
+                       goto error;
+
+               ml_inference_iomem_destroy(test, opt, fid);
+
+               ret = ml_model_stop(test, opt, &t->model[fid], fid);
+               if (ret != 0)
+                       goto error;
+
+               ret = ml_model_unload(test, opt, &t->model[fid], fid);
+               if (ret != 0)
+                       goto error;
+       }
+
+       ml_inference_mem_destroy(test, opt);
+
+       ret = ml_inference_mldev_destroy(test, opt);
+       if (ret != 0)
+               return ret;
+
+       t->cmn.result = ML_TEST_SUCCESS;
+
+       return 0;
+
+error:
+       ml_inference_mem_destroy(test, opt);
+       for (fid = 0; fid < opt->nb_filelist; fid++) {
+               ml_inference_iomem_destroy(test, opt, fid);
+               ml_model_stop(test, opt, &t->model[fid], fid);
+               ml_model_unload(test, opt, &t->model[fid], fid);
+       }
+
+       t->cmn.result = ML_TEST_FAILED;
+
+       return ret;
+}
+
+static int
+test_inference_interleave_result(struct ml_test *test, struct ml_options *opt)
+{
+       struct test_inference *t;
+
+       RTE_SET_USED(opt);
+
+       t = ml_test_priv(test);
+
+       return t->cmn.result;
+}
+
+static const struct ml_test_ops inference_interleave = {
+       .cap_check = test_inference_cap_check,
+       .opt_check = test_inference_opt_check,
+       .opt_dump = test_inference_opt_dump,
+       .test_setup = test_inference_setup,
+       .test_destroy = test_inference_destroy,
+       .test_driver = test_inference_interleave_driver,
+       .test_result = test_inference_interleave_result,
+};
+
+ML_TEST_REGISTER(inference_interleave);
-- 
2.17.1

Reply via email to