Added scale and zero point to I/O information structure.
This would provision sharing the recommended sclae factor
and zero point to the user for quantization process.

Signed-off-by: Srikanth Yalavarthi <syalavar...@marvell.com>
---
 drivers/ml/cnxk/cn10k_ml_model.c | 4 ++++
 drivers/ml/cnxk/cnxk_ml_io.h     | 3 +++
 drivers/ml/cnxk/mvtvm_ml_model.c | 4 ++++
 lib/mldev/rte_mldev.h            | 4 ++++
 4 files changed, 15 insertions(+)

diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index 0325cd54f1f..12a2dda800e 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -586,6 +586,8 @@ cn10k_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, 
struct cnxk_ml_model *mo
                input[i].nb_elements = io_info->input[i].nb_elements;
                input[i].size = io_info->input[i].nb_elements *
                                
rte_ml_io_type_size_get(io_info->input[i].qtype);
+               input[i].scale = 1.0 / io_info->input[i].scale;
+               input[i].zero_point = 0;
        }
 
        /* Set output info */
@@ -597,6 +599,8 @@ cn10k_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, 
struct cnxk_ml_model *mo
                output[i].nb_elements = io_info->output[i].nb_elements;
                output[i].size = io_info->output[i].nb_elements *
                                 
rte_ml_io_type_size_get(io_info->output[i].qtype);
+               output[i].scale = io_info->output[i].scale;
+               output[i].zero_point = 0;
        }
 }
 
diff --git a/drivers/ml/cnxk/cnxk_ml_io.h b/drivers/ml/cnxk/cnxk_ml_io.h
index c33a9c23a11..83329c237aa 100644
--- a/drivers/ml/cnxk/cnxk_ml_io.h
+++ b/drivers/ml/cnxk/cnxk_ml_io.h
@@ -55,6 +55,9 @@ struct cnxk_ml_io {
 
        /* Scale */
        float scale;
+
+       /* Zero point */
+       int64_t zero_point;
 };
 
 /* Model / Layer IO structure */
diff --git a/drivers/ml/cnxk/mvtvm_ml_model.c b/drivers/ml/cnxk/mvtvm_ml_model.c
index e3234ae4422..88b0089295d 100644
--- a/drivers/ml/cnxk/mvtvm_ml_model.c
+++ b/drivers/ml/cnxk/mvtvm_ml_model.c
@@ -334,6 +334,8 @@ mvtvm_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, 
struct cnxk_ml_model *mo
                input[i].nb_elements = model->mvtvm.info.input[i].nb_elements;
                input[i].size = model->mvtvm.info.input[i].nb_elements *
                                
rte_ml_io_type_size_get(model->mvtvm.info.input[i].qtype);
+               input[i].scale = model->mvtvm.info.input[i].scale;
+               input[i].zero_point = 0;
        }
 
        /* Set output info */
@@ -345,6 +347,8 @@ mvtvm_ml_model_info_set(struct cnxk_ml_dev *cnxk_mldev, 
struct cnxk_ml_model *mo
                output[i].nb_elements = model->mvtvm.info.output[i].nb_elements;
                output[i].size = model->mvtvm.info.output[i].nb_elements *
                                 
rte_ml_io_type_size_get(model->mvtvm.info.output[i].qtype);
+               input[i].scale = model->mvtvm.info.output[i].scale;
+               input[i].zero_point = 0;
        }
 
        return;
diff --git a/lib/mldev/rte_mldev.h b/lib/mldev/rte_mldev.h
index 634af3d5e1a..e2f715f140d 100644
--- a/lib/mldev/rte_mldev.h
+++ b/lib/mldev/rte_mldev.h
@@ -937,6 +937,10 @@ struct rte_ml_io_info {
        /** Number of elements in tensor */
        uint64_t size;
        /** Size of tensor in bytes */
+       float scale;
+       /** Scale factor */
+       int64_t zero_point;
+       /* Zero point */
 };
 
 /** Model information structure */
-- 
2.45.1

Reply via email to