Enabled support to validate inference output with reference
output provided by the user. Validation would be successful
only when the inference outputs are within the 'tolerance'
specified through command line option "--tolerance".

Signed-off-by: Srikanth Yalavarthi <syalavar...@marvell.com>
Acked-by: Anup Prabhu <apra...@marvell.com>
---
 app/test-mldev/meson.build             |   2 +-
 app/test-mldev/ml_options.c            |  24 ++-
 app/test-mldev/ml_options.h            |   3 +
 app/test-mldev/test_inference_common.c | 216 ++++++++++++++++++++++++-
 app/test-mldev/test_inference_common.h |   1 +
 app/test-mldev/test_model_common.h     |   1 +
 doc/guides/tools/testmldev.rst         |  39 +++++
 7 files changed, 281 insertions(+), 5 deletions(-)

diff --git a/app/test-mldev/meson.build b/app/test-mldev/meson.build
index 41d22fb22c..15db534dc2 100644
--- a/app/test-mldev/meson.build
+++ b/app/test-mldev/meson.build
@@ -21,4 +21,4 @@ sources = files(
         'test_inference_interleave.c',
 )
 
-deps += ['mldev']
+deps += ['mldev', 'hash']
diff --git a/app/test-mldev/ml_options.c b/app/test-mldev/ml_options.c
index 44df44991b..da30796a6b 100644
--- a/app/test-mldev/ml_options.c
+++ b/app/test-mldev/ml_options.c
@@ -4,6 +4,7 @@
 
 #include <errno.h>
 #include <getopt.h>
+#include <math.h>
 
 #include <rte_memory.h>
 #include <rte_mldev.h>
@@ -28,6 +29,7 @@ ml_options_default(struct ml_options *opt)
        opt->queue_pairs = 1;
        opt->queue_size = 1;
        opt->batches = 0;
+       opt->tolerance = 0.0;
        opt->debug = false;
 }
 
@@ -133,6 +135,13 @@ ml_parse_filelist(struct ml_options *opt, const char *arg)
        }
        strlcpy(opt->filelist[opt->nb_filelist].output, token, PATH_MAX);
 
+       /* reference - optional */
+       token = strtok(NULL, delim);
+       if (token != NULL)
+               strlcpy(opt->filelist[opt->nb_filelist].reference, token, 
PATH_MAX);
+       else
+               memset(opt->filelist[opt->nb_filelist].reference, 0, PATH_MAX);
+
        opt->nb_filelist++;
 
        if (opt->nb_filelist == 0) {
@@ -177,6 +186,14 @@ ml_parse_batches(struct ml_options *opt, const char *arg)
        return parser_read_uint16(&opt->batches, arg);
 }
 
+static int
+ml_parse_tolerance(struct ml_options *opt, const char *arg)
+{
+       opt->tolerance = fabs(atof(arg));
+
+       return 0;
+}
+
 static void
 ml_dump_test_options(const char *testname)
 {
@@ -193,12 +210,13 @@ ml_dump_test_options(const char *testname)
 
        if ((strcmp(testname, "inference_ordered") == 0) ||
            (strcmp(testname, "inference_interleave") == 0)) {
-               printf("\t\t--filelist         : comma separated list of model, 
input and output\n"
+               printf("\t\t--filelist         : comma separated list of model, 
input, output and reference\n"
                       "\t\t--repetitions      : number of inference 
repetitions\n"
                       "\t\t--burst_size       : inference burst size\n"
                       "\t\t--queue_pairs      : number of queue pairs to 
create\n"
                       "\t\t--queue_size       : size fo queue-pair\n"
-                      "\t\t--batches          : number of batches of input\n");
+                      "\t\t--batches          : number of batches of input\n"
+                      "\t\t--tolerance        : maximum tolerance (%%) for 
output validation\n");
                printf("\n");
        }
 }
@@ -229,6 +247,7 @@ static struct option lgopts[] = {
        {ML_QUEUE_PAIRS, 1, 0, 0},
        {ML_QUEUE_SIZE, 1, 0, 0},
        {ML_BATCHES, 1, 0, 0},
+       {ML_TOLERANCE, 1, 0, 0},
        {ML_DEBUG, 0, 0, 0},
        {ML_HELP, 0, 0, 0},
        {NULL, 0, 0, 0}};
@@ -249,6 +268,7 @@ ml_opts_parse_long(int opt_idx, struct ml_options *opt)
                {ML_QUEUE_PAIRS, ml_parse_queue_pairs},
                {ML_QUEUE_SIZE, ml_parse_queue_size},
                {ML_BATCHES, ml_parse_batches},
+               {ML_TOLERANCE, ml_parse_tolerance},
        };
 
        for (i = 0; i < RTE_DIM(parsermap); i++) {
diff --git a/app/test-mldev/ml_options.h b/app/test-mldev/ml_options.h
index 48fe064150..7f3db29656 100644
--- a/app/test-mldev/ml_options.h
+++ b/app/test-mldev/ml_options.h
@@ -22,6 +22,7 @@
 #define ML_QUEUE_PAIRS ("queue_pairs")
 #define ML_QUEUE_SIZE  ("queue_size")
 #define ML_BATCHES     ("batches")
+#define ML_TOLERANCE   ("tolerance")
 #define ML_DEBUG       ("debug")
 #define ML_HELP               ("help")
 
@@ -29,6 +30,7 @@ struct ml_filelist {
        char model[PATH_MAX];
        char input[PATH_MAX];
        char output[PATH_MAX];
+       char reference[PATH_MAX];
 };
 
 struct ml_options {
@@ -42,6 +44,7 @@ struct ml_options {
        uint16_t queue_pairs;
        uint16_t queue_size;
        uint16_t batches;
+       float tolerance;
        bool debug;
 };
 
diff --git a/app/test-mldev/test_inference_common.c 
b/app/test-mldev/test_inference_common.c
index 0f281aed6c..b605c1f5d3 100644
--- a/app/test-mldev/test_inference_common.c
+++ b/app/test-mldev/test_inference_common.c
@@ -6,6 +6,7 @@
 #include <unistd.h>
 
 #include <rte_common.h>
+#include <rte_hash_crc.h>
 #include <rte_launch.h>
 #include <rte_lcore.h>
 #include <rte_malloc.h>
@@ -15,6 +16,27 @@
 #include "ml_common.h"
 #include "test_inference_common.h"
 
+#define ML_TEST_READ_TYPE(buffer, type) (*((type *)buffer))
+
+#define ML_TEST_CHECK_OUTPUT(output, reference, tolerance)                     
                    \
+       (((float)output - (float)reference) <= (((float)reference * tolerance) 
/ 100.0))
+
+#define ML_OPEN_WRITE_GET_ERR(name, buffer, size, err)                         
                    \
+       do {                                                                    
                   \
+               FILE *fp = fopen(name, "w+");                                   
                   \
+               if (fp == NULL) {                                               
                   \
+                       ml_err("Unable to create file: %s, error: %s", name, 
strerror(errno));     \
+                       err = true;                                             
                   \
+               } else {                                                        
                   \
+                       if (fwrite(buffer, 1, size, fp) != size) {              
                   \
+                               ml_err("Error writing output, file: %s, error: 
%s", name,          \
+                                      strerror(errno));                        
                   \
+                               err = true;                                     
                   \
+                       }                                                       
                   \
+                       fclose(fp);                                             
                   \
+               }                                                               
                   \
+       } while (0)
+
 /* Enqueue inference requests with burst size equal to 1 */
 static int
 ml_enqueue_single(void *arg)
@@ -358,6 +380,7 @@ test_inference_opt_dump(struct ml_options *opt)
        ml_dump("burst_size", "%u", opt->burst_size);
        ml_dump("queue_pairs", "%u", opt->queue_pairs);
        ml_dump("queue_size", "%u", opt->queue_size);
+       ml_dump("tolerance", "%-7.3f", opt->tolerance);
 
        if (opt->batches == 0)
                ml_dump("batches", "%u (default)", opt->batches);
@@ -369,6 +392,8 @@ test_inference_opt_dump(struct ml_options *opt)
                ml_dump_list("model", i, opt->filelist[i].model);
                ml_dump_list("input", i, opt->filelist[i].input);
                ml_dump_list("output", i, opt->filelist[i].output);
+               if (strcmp(opt->filelist[i].reference, "\0") != 0)
+                       ml_dump_list("reference", i, 
opt->filelist[i].reference);
        }
        ml_dump_end;
 }
@@ -393,6 +418,7 @@ test_inference_setup(struct ml_test *test, struct 
ml_options *opt)
        t = ml_test_priv(test);
 
        t->nb_used = 0;
+       t->nb_valid = 0;
        t->cmn.result = ML_TEST_FAILED;
        t->cmn.opt = opt;
        memset(t->error_count, 0, RTE_MAX_LCORE * sizeof(uint64_t));
@@ -569,6 +595,9 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
 
        /* allocate buffer for user data */
        mz_size = t->model[fid].inp_dsize + t->model[fid].out_dsize;
+       if (strcmp(opt->filelist[fid].reference, "\0") != 0)
+               mz_size += t->model[fid].out_dsize;
+
        sprintf(mz_name, "ml_user_data_%d", fid);
        mz = rte_memzone_reserve(mz_name, mz_size, opt->socket_id, 0);
        if (mz == NULL) {
@@ -579,6 +608,10 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
 
        t->model[fid].input = mz->addr;
        t->model[fid].output = t->model[fid].input + t->model[fid].inp_dsize;
+       if (strcmp(opt->filelist[fid].reference, "\0") != 0)
+               t->model[fid].reference = t->model[fid].output + 
t->model[fid].out_dsize;
+       else
+               t->model[fid].reference = NULL;
 
        /* load input file */
        fp = fopen(opt->filelist[fid].input, "r");
@@ -607,6 +640,27 @@ ml_inference_iomem_setup(struct ml_test *test, struct 
ml_options *opt, uint16_t
        }
        fclose(fp);
 
+       /* load reference file */
+       if (t->model[fid].reference != NULL) {
+               fp = fopen(opt->filelist[fid].reference, "r");
+               if (fp == NULL) {
+                       ml_err("Failed to open reference file : %s\n",
+                              opt->filelist[fid].reference);
+                       ret = -errno;
+                       goto error;
+               }
+
+               if (fread(t->model[fid].reference, 1, t->model[fid].out_dsize, 
fp) !=
+                   t->model[fid].out_dsize) {
+                       ml_err("Failed to read reference file : %s\n",
+                              opt->filelist[fid].reference);
+                       ret = -errno;
+                       fclose(fp);
+                       goto error;
+               }
+               fclose(fp);
+       }
+
        /* create mempool for quantized input and output buffers. 
ml_request_initialize is
         * used as a callback for object creation.
         */
@@ -691,6 +745,121 @@ ml_inference_mem_destroy(struct ml_test *test, struct 
ml_options *opt)
                rte_mempool_free(t->op_pool);
 }
 
+static bool
+ml_inference_validation(struct ml_test *test, struct ml_request *req)
+{
+       struct test_inference *t = ml_test_priv((struct ml_test *)test);
+       struct ml_model *model;
+       uint32_t nb_elements;
+       uint8_t *reference;
+       uint8_t *output;
+       bool match;
+       uint32_t i;
+       uint32_t j;
+
+       model = &t->model[req->fid];
+
+       /* compare crc when tolerance is 0 */
+       if (t->cmn.opt->tolerance == 0.0) {
+               match = (rte_hash_crc(model->output, model->out_dsize, 0) ==
+                        rte_hash_crc(model->reference, model->out_dsize, 0));
+       } else {
+               output = model->output;
+               reference = model->reference;
+
+               i = 0;
+next_output:
+               nb_elements =
+                       model->info.output_info[i].shape.w * 
model->info.output_info[i].shape.x *
+                       model->info.output_info[i].shape.y * 
model->info.output_info[i].shape.z;
+               j = 0;
+next_element:
+               match = false;
+               switch (model->info.output_info[i].dtype) {
+               case RTE_ML_IO_TYPE_INT8:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
int8_t),
+                                                ML_TEST_READ_TYPE(reference, 
int8_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(int8_t);
+                       reference += sizeof(int8_t);
+                       break;
+               case RTE_ML_IO_TYPE_UINT8:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
uint8_t),
+                                                ML_TEST_READ_TYPE(reference, 
uint8_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(float);
+                       reference += sizeof(float);
+                       break;
+               case RTE_ML_IO_TYPE_INT16:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
int16_t),
+                                                ML_TEST_READ_TYPE(reference, 
int16_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(int16_t);
+                       reference += sizeof(int16_t);
+                       break;
+               case RTE_ML_IO_TYPE_UINT16:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
uint16_t),
+                                                ML_TEST_READ_TYPE(reference, 
uint16_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(uint16_t);
+                       reference += sizeof(uint16_t);
+                       break;
+               case RTE_ML_IO_TYPE_INT32:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
int32_t),
+                                                ML_TEST_READ_TYPE(reference, 
int32_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(int32_t);
+                       reference += sizeof(int32_t);
+                       break;
+               case RTE_ML_IO_TYPE_UINT32:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
uint32_t),
+                                                ML_TEST_READ_TYPE(reference, 
uint32_t),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(uint32_t);
+                       reference += sizeof(uint32_t);
+                       break;
+               case RTE_ML_IO_TYPE_FP32:
+                       if (ML_TEST_CHECK_OUTPUT(ML_TEST_READ_TYPE(output, 
float),
+                                                ML_TEST_READ_TYPE(reference, 
float),
+                                                t->cmn.opt->tolerance))
+                               match = true;
+
+                       output += sizeof(float);
+                       reference += sizeof(float);
+                       break;
+               default: /* other types, fp8, fp16, bfloat16 */
+                       match = true;
+               }
+
+               if (!match)
+                       goto done;
+               j++;
+               if (j < nb_elements)
+                       goto next_element;
+
+               i++;
+               if (i < model->info.nb_outputs)
+                       goto next_output;
+       }
+done:
+       if (match)
+               t->nb_valid++;
+
+       return match;
+}
+
 /* Callback for mempool object iteration. This call would dequantize output 
data. */
 static void
 ml_request_finish(struct rte_mempool *mp, void *opaque, void *obj, unsigned 
int obj_idx)
@@ -698,9 +867,10 @@ ml_request_finish(struct rte_mempool *mp, void *opaque, 
void *obj, unsigned int
        struct test_inference *t = ml_test_priv((struct ml_test *)opaque);
        struct ml_request *req = (struct ml_request *)obj;
        struct ml_model *model = &t->model[req->fid];
+       char str[PATH_MAX];
+       bool error = false;
 
        RTE_SET_USED(mp);
-       RTE_SET_USED(obj_idx);
 
        if (req->niters == 0)
                return;
@@ -708,6 +878,48 @@ ml_request_finish(struct rte_mempool *mp, void *opaque, 
void *obj, unsigned int
        t->nb_used++;
        rte_ml_io_dequantize(t->cmn.opt->dev_id, model->id, 
t->model[req->fid].nb_batches,
                             req->output, model->output);
+
+       if (model->reference == NULL) {
+               t->nb_valid++;
+               goto dump_output_pass;
+       }
+
+       if (!ml_inference_validation(opaque, req))
+               goto dump_output_fail;
+       else
+               goto dump_output_pass;
+
+dump_output_pass:
+       if (obj_idx == 0) {
+               /* write quantized output */
+               snprintf(str, PATH_MAX, "%s.q", 
t->cmn.opt->filelist[req->fid].output);
+               ML_OPEN_WRITE_GET_ERR(str, req->output, model->out_qsize, 
error);
+               if (error)
+                       return;
+
+               /* write dequantized output */
+               snprintf(str, PATH_MAX, "%s", 
t->cmn.opt->filelist[req->fid].output);
+               ML_OPEN_WRITE_GET_ERR(str, model->output, model->out_dsize, 
error);
+               if (error)
+                       return;
+       }
+
+       return;
+
+dump_output_fail:
+       if (t->cmn.opt->debug) {
+               /* dump quantized output buffer */
+               snprintf(str, PATH_MAX, "%s.q.%d", 
t->cmn.opt->filelist[req->fid].output, obj_idx);
+               ML_OPEN_WRITE_GET_ERR(str, req->output, model->out_qsize, 
error);
+               if (error)
+                       return;
+
+               /* dump dequantized output buffer */
+               snprintf(str, PATH_MAX, "%s.%d", 
t->cmn.opt->filelist[req->fid].output, obj_idx);
+               ML_OPEN_WRITE_GET_ERR(str, model->output, model->out_dsize, 
error);
+               if (error)
+                       return;
+       }
 }
 
 int
@@ -725,7 +937,7 @@ ml_inference_result(struct ml_test *test, struct ml_options 
*opt, uint16_t fid)
 
        rte_mempool_obj_iter(t->model[fid].io_pool, ml_request_finish, test);
 
-       if ((t->nb_used > 0) && (error_count == 0))
+       if ((t->nb_used == t->nb_valid) && (error_count == 0))
                t->cmn.result = ML_TEST_SUCCESS;
        else
                t->cmn.result = ML_TEST_FAILED;
diff --git a/app/test-mldev/test_inference_common.h 
b/app/test-mldev/test_inference_common.h
index 81d9b07d41..2e4889e1f7 100644
--- a/app/test-mldev/test_inference_common.h
+++ b/app/test-mldev/test_inference_common.h
@@ -38,6 +38,7 @@ struct test_inference {
        struct rte_mempool *op_pool;
 
        uint64_t nb_used;
+       uint64_t nb_valid;
        uint16_t fid;
 
        int (*enqueue)(void *arg);
diff --git a/app/test-mldev/test_model_common.h 
b/app/test-mldev/test_model_common.h
index 19429ce142..b2a75a3261 100644
--- a/app/test-mldev/test_model_common.h
+++ b/app/test-mldev/test_model_common.h
@@ -28,6 +28,7 @@ struct ml_model {
 
        uint8_t *input;
        uint8_t *output;
+       uint8_t *reference;
 
        struct rte_mempool *io_pool;
        uint32_t nb_batches;
diff --git a/doc/guides/tools/testmldev.rst b/doc/guides/tools/testmldev.rst
index 6632025fdf..25dc878f25 100644
--- a/doc/guides/tools/testmldev.rst
+++ b/doc/guides/tools/testmldev.rst
@@ -111,6 +111,11 @@ The following are the command-line options supported by 
the test application.
         Set the number batches in the input file provided for inference run. 
When not specified
         the test would assume the number of batches is equal to the batch size 
of the model.
 
+* ``--tolerance <n>``
+
+        Set the tolerance value in percentage to be used for output 
validation. Default value
+        is `0`.
+
 * ``--debug``
 
         Enable the tests to run in debug mode.
@@ -273,6 +278,7 @@ Supported command line options for inference tests are 
following::
         --queue_pairs
         --queue_size
         --batches
+        --tolerance
 
 
 List of files to be used for the inference tests can be specified through the 
option
@@ -287,10 +293,20 @@ try to enqueue or dequeue ``num`` number of inferences 
per each call respectivel
 In the inference test, a pair of lcores are mapped to each queue pair. Minimum 
number of lcores
 required for the tests is equal to ``(queue_pairs * 2 + 1)``.
 
+Output validation of inference would be enabled only when a reference file is 
specified through
+the ``--filelist`` option. Application would additionally consider the 
tolerance value provided
+through ``--tolerance`` option during validation. When the tolerance values is 
0, CRC32 hash of
+inference output and reference output are compared. When the tolerance is 
non-zero, element wise
+comparison of output is performed. Validation is considered as successful only 
when all the
+elements of the output tensor are with in the tolerance range specified.
+
 .. Note::
 
     * The ``--filelist <file_list>`` is a mandatory option for running 
inference tests.
     * Options not supported by the tests are ignored if specified.
+    * Element wise comparison is not supported when the output dtype is either 
fp8, fp16
+      or bfloat16. This is applicable only when the tolerance is greater than 
zero and for
+      pre-quantized models only.
 
 
 INFERENCE_ORDERED Test
@@ -339,6 +355,14 @@ Example command to run inference_ordered test with 
multiple queue-pairs and queu
         --test=inference_ordered --filelist model.bin,input.bin,output.bin \
         --queue_pairs 4 --queue_size 16
 
+Example command to run inference_ordered with output validation using 
tolerance of `1%``:
+
+.. code-block:: console
+
+    sudo <build_dir>/app/dpdk-test-mldev -c 0xf -a <PCI_ID> -- \
+        --test=inference_ordered --filelist 
model.bin,input.bin,output.bin,reference.bin \
+        --tolerance 1.0
+
 
 INFERENCE_INTERLEAVE Test
 ~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -388,9 +412,24 @@ and queue size:
         --test=inference_interleave --filelist model.bin,input.bin,output.bin \
         --queue_pairs 8 --queue_size 12 --burst_size 16
 
+Example command to run inference_interleave test with multiple models ad 
output validation
+using tolerance of `2.0%``:
+
+.. code-block:: console
+
+    sudo <build_dir>/app/dpdk-test-mldev -c 0xf -a <PCI_ID> -- \
+        --test=inference_interleave \
+        --filelist model_A.bin,input_A.bin,output_A.bin,reference_A.bin \
+        --filelist model_B.bin,input_B.bin,output_B.bin,reference_B.bin \
+        --tolerance 2.0
+
 
 Debug mode
 ----------
 
 ML tests can be executed in debug mode by enabling the option ``--debug``. 
Execution of tests in
 debug mode would enable additional prints.
+
+When a validation failure is observed, output from that buffer is written to 
the disk, with the
+filenames having similar convention when the test has passed. Additionally 
index of the buffer
+would be appended to the filenames.
-- 
2.17.1

Reply via email to