Enabled support to execute multiple batches of inferences
per each enqueue request. Input and reference for the test
should be appropriately provided for multi-batch run. Number
of batches can be specified through "--batches" option.

Signed-off-by: Srikanth Yalavarthi <syalavar...@marvell.com>
---
 app/test-mldev/ml_options.c            | 15 ++++++++++++---
 app/test-mldev/ml_options.h            |  2 ++
 app/test-mldev/test_inference_common.c | 23 ++++++++++++++---------
 app/test-mldev/test_model_common.c     |  6 ++++++
 app/test-mldev/test_model_common.h     |  1 +
 5 files changed, 35 insertions(+), 12 deletions(-)

diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index c81dec6e30..499bfde899 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -27,6 +27,7 @@ ml_options_default(struct ml_options *opt)
        opt->burst_size = 1;
        opt->queue_pairs = 1;
        opt->queue_size = 1;
+       opt->batches = 0;
        opt->debug = false;
 }
 
@@ -170,6 +171,12 @@ ml_parse_queue_size(struct ml_options *opt, const char 
*arg)
        return parser_read_uint16(&opt->queue_size, arg);
 }
 
+static int
+ml_parse_batches(struct ml_options *opt, const char *arg)
+{
+       return parser_read_uint16(&opt->batches, arg);
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
@@ -190,7 +197,8 @@ ml_dump_test_options(const char *testname)
                       "\t\t--repetitions      : number of inference 
repetitions\n"
                       "\t\t--burst_size       : inference burst size\n"
                       "\t\t--queue_pairs      : number of queue pairs to 
create\n"
-                      "\t\t--queue_size       : size fo queue-pair\n");
+                      "\t\t--queue_size       : size fo queue-pair\n"
+                      "\t\t--batches          : number of batches of input\n");
                printf("\n");
        }
 }
@@ -214,7 +222,8 @@ static struct option lgopts[] = {
        {ML_TEST, 1, 0, 0},       {ML_DEVICE_ID, 1, 0, 0},   {ML_SOCKET_ID, 1, 
0, 0},
        {ML_MODELS, 1, 0, 0},     {ML_FILELIST, 1, 0, 0},    {ML_REPETITIONS, 
1, 0, 0},
        {ML_BURST_SIZE, 1, 0, 0}, {ML_QUEUE_PAIRS, 1, 0, 0}, {ML_QUEUE_SIZE, 1, 
0, 0},
-       {ML_DEBUG, 0, 0, 0},      {ML_HELP, 0, 0, 0},        {NULL, 0, 0, 0}};
+       {ML_BATCHES, 1, 0, 0},    {ML_DEBUG, 0, 0, 0},       {ML_HELP, 0, 0, 0},
+       {NULL, 0, 0, 0}};
 
 static int
 ml_opts_parse_long(int opt_idx, struct ml_options *opt)
@@ -226,7 +235,7 @@ ml_opts_parse_long(int opt_idx, struct ml_options *opt)
                {ML_SOCKET_ID, ml_parse_socket_id},   {ML_MODELS, 
ml_parse_models},
                {ML_FILELIST, ml_parse_filelist},     {ML_REPETITIONS, 
ml_parse_repetitions},
                {ML_BURST_SIZE, ml_parse_burst_size}, {ML_QUEUE_PAIRS, 
ml_parse_queue_pairs},
-               {ML_QUEUE_SIZE, ml_parse_queue_size},
+               {ML_QUEUE_SIZE, ml_parse_queue_size}, {ML_BATCHES, 
ml_parse_batches},
        };
 
        for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index c4018ee9d1..48fe064150 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -21,6 +21,7 @@
 #define ML_BURST_SIZE  ("burst_size")
 #define ML_QUEUE_PAIRS ("queue_pairs")
 #define ML_QUEUE_SIZE  ("queue_size")
+#define ML_BATCHES     ("batches")
 #define ML_DEBUG       ("debug")
 #define ML_HELP               ("help")
 
@@ -40,6 +41,7 @@ struct ml_options {
        uint16_t burst_size;
        uint16_t queue_pairs;
        uint16_t queue_size;
+       uint16_t batches;
        bool debug;
 };
 
diff --git a/app/test-mldev/test_inference_common.c 
b/app/test-mldev/test_inference_common.c
index 7637d0833e..3c8c3694e8 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -50,7 +50,7 @@ ml_enqueue_single(void *arg)
                goto retry;
 
        op->model_id = t->model[fid].id;
-       op->nb_batches = t->model[fid].info.batch_size;
+       op->nb_batches = t->model[fid].nb_batches;
        op->mempool = t->op_pool;
 
        op->input.addr = req->input;
@@ -162,7 +162,7 @@ ml_enqueue_burst(void *arg)
 
        for (i = 0; i < ops_count; i++) {
                args->enq_ops[i]->model_id = t->model[fid].id;
-               args->enq_ops[i]->nb_batches = t->model[fid].info.batch_size;
+               args->enq_ops[i]->nb_batches = t->model[fid].nb_batches;
                args->enq_ops[i]->mempool = t->op_pool;
 
                args->enq_ops[i]->input.addr = args->reqs[i]->input;
@@ -357,6 +357,11 @@ test_inference_opt_dump(struct ml_options *opt)
        ml_dump("queue_pairs", "%u", opt->queue_pairs);
        ml_dump("queue_size", "%u", opt->queue_size);
 
+       if (opt->batches == 0)
+               ml_dump("batches", "%u (default)", opt->batches);
+       else
+               ml_dump("batches", "%u", opt->batches);
+
        ml_dump_begin("filelist");
        for (i = 0; i < opt->nb_filelist; i++) {
                ml_dump_list("model", i, opt->filelist[i].model);
@@ -525,8 +530,8 @@ ml_request_initialize(struct rte_mempool *mp, void *opaque, 
void *obj, unsigned
        req->niters = 0;
 
        /* quantize data */
-       rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id,
-                          t->model[t->fid].info.batch_size, 
t->model[t->fid].input, req->input);
+       rte_ml_io_quantize(t->cmn.opt->dev_id, t->model[t->fid].id, 
t->model[t->fid].nb_batches,
+                          t->model[t->fid].input, req->input);
 }
 
 int
@@ -544,7 +549,7 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
        int ret;
 
        /* get input buffer size */
-       ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, 
t->model[fid].info.batch_size,
+       ret = rte_ml_io_input_size_get(opt->dev_id, t->model[fid].id, 
t->model[fid].nb_batches,
                                       (uint64_t *)&t->model[fid].inp_qsize,
                                       (uint64_t *)&t->model[fid].inp_dsize);
        if (ret != 0) {
@@ -553,9 +558,9 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
        }
 
        /* get output buffer size */
-       ret = rte_ml_io_output_size_get(
-               opt->dev_id, t->model[fid].id, t->model[fid].info.batch_size,
-               (uint64_t *)&t->model[fid].out_qsize, (uint64_t 
*)&t->model[fid].out_dsize);
+       ret = rte_ml_io_output_size_get(opt->dev_id, t->model[fid].id, 
t->model[fid].nb_batches,
+                                       (uint64_t *)&t->model[fid].out_qsize,
+                                       (uint64_t *)&t->model[fid].out_dsize);
        if (ret != 0) {
                ml_err("Failed to get input size, model : %s\n", 
opt->filelist[fid].model);
                return ret;
@@ -700,7 +705,7 @@ ml_request_finish(struct rte_mempool *mp, void *opaque, 
void *obj, unsigned int
                return;
 
        t->nb_used++;
-       rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, 
t->model[req->fid].info.batch_size,
+       rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, 
t->model[req->fid].nb_batches,
                             req->output, model->output);
 }
 
diff --git a/app/test-mldev/test_model_common.c 
b/app/test-mldev/test_model_common.c
index b94d46154d..c28e452f29 100644
--- a/app/test-mldev/test_model_common.c
+++ b/app/test-mldev/test_model_common.c
@@ -71,6 +71,12 @@ ml_model_load(struct ml_test *test, struct ml_options *opt, 
struct ml_model *mod
                return ret;
        }
 
+       /* Update number of batches */
+       if (opt->batches == 0)
+               model->nb_batches = model->info.batch_size;
+       else
+               model->nb_batches = opt->batches;
+
        model->state = MODEL_LOADED;
 
        return 0;
diff --git a/app/test-mldev/test_model_common.h 
b/app/test-mldev/test_model_common.h
index 1c89ef83aa..4ee5e26b1e 100644
--- a/app/test-mldev/test_model_common.h
+++ b/app/test-mldev/test_model_common.h
@@ -30,6 +30,7 @@ struct ml_model {
        uint8_t *output;
 
        struct rte_mempool *io_pool;
+       uint32_t nb_batches;
 };
 
 int ml_model_load(struct ml_test *test, struct ml_options *opt, struct 
ml_model *model,
-- 
2.17.1

Reply via email to