Update model metadata to v2301. Revised metadata introduces
fields to support up to 32 inputs/outputs per model, scratch
relocation and updates to names of existing fields. Update
driver files to include changes in names of metadata fields.

Signed-off-by: Srikanth Yalavarthi <[email protected]>
---
 drivers/ml/cnxk/cn10k_ml_model.c | 111 ++++++++++++++++---------------
 drivers/ml/cnxk/cn10k_ml_model.h |  36 +++++++---
 drivers/ml/cnxk/cn10k_ml_ops.c   |  50 +++++++-------
 3 files changed, 106 insertions(+), 91 deletions(-)

diff --git a/drivers/ml/cnxk/cn10k_ml_model.c b/drivers/ml/cnxk/cn10k_ml_model.c
index c0b7b061f5..a15df700aa 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.c
+++ b/drivers/ml/cnxk/cn10k_ml_model.c
@@ -83,11 +83,11 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t 
size)
 
        /* Header version */
        rte_memcpy(version, metadata->header.version, 4 * sizeof(uint8_t));
-       if (version[0] * 1000 + version[1] * 100 < MRVL_ML_MODEL_VERSION) {
+       if (version[0] * 1000 + version[1] * 100 != MRVL_ML_MODEL_VERSION_MIN) {
                plt_err("Metadata version = %u.%u.%u.%u (< %u.%u.%u.%u) not 
supported", version[0],
-                       version[1], version[2], version[3], 
(MRVL_ML_MODEL_VERSION / 1000) % 10,
-                       (MRVL_ML_MODEL_VERSION / 100) % 10, 
(MRVL_ML_MODEL_VERSION / 10) % 10,
-                       MRVL_ML_MODEL_VERSION % 10);
+                       version[1], version[2], version[3], 
(MRVL_ML_MODEL_VERSION_MIN / 1000) % 10,
+                       (MRVL_ML_MODEL_VERSION_MIN / 100) % 10,
+                       (MRVL_ML_MODEL_VERSION_MIN / 10) % 10, 
MRVL_ML_MODEL_VERSION_MIN % 10);
                return -ENOTSUP;
        }
 
@@ -125,36 +125,36 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t 
size)
        }
 
        /* Check input count */
-       if (metadata->model.num_input > MRVL_ML_INPUT_OUTPUT_SIZE) {
+       if (metadata->model.num_input > MRVL_ML_NUM_INPUT_OUTPUT_1) {
                plt_err("Invalid metadata, num_input  = %u (> %u)", 
metadata->model.num_input,
-                       MRVL_ML_INPUT_OUTPUT_SIZE);
+                       MRVL_ML_NUM_INPUT_OUTPUT_1);
                return -EINVAL;
        }
 
        /* Check output count */
-       if (metadata->model.num_output > MRVL_ML_INPUT_OUTPUT_SIZE) {
+       if (metadata->model.num_output > MRVL_ML_NUM_INPUT_OUTPUT_1) {
                plt_err("Invalid metadata, num_output  = %u (> %u)", 
metadata->model.num_output,
-                       MRVL_ML_INPUT_OUTPUT_SIZE);
+                       MRVL_ML_NUM_INPUT_OUTPUT_1);
                return -EINVAL;
        }
 
        /* Inputs */
        for (i = 0; i < metadata->model.num_input; i++) {
-               if 
(rte_ml_io_type_size_get(cn10k_ml_io_type_map(metadata->input[i].input_type)) <=
+               if 
(rte_ml_io_type_size_get(cn10k_ml_io_type_map(metadata->input1[i].input_type)) 
<=
                    0) {
                        plt_err("Invalid metadata, input[%u] : input_type = 
%u", i,
-                               metadata->input[i].input_type);
+                               metadata->input1[i].input_type);
                        return -EINVAL;
                }
 
                if (rte_ml_io_type_size_get(
-                           
cn10k_ml_io_type_map(metadata->input[i].model_input_type)) <= 0) {
+                           
cn10k_ml_io_type_map(metadata->input1[i].model_input_type)) <= 0) {
                        plt_err("Invalid metadata, input[%u] : model_input_type 
= %u", i,
-                               metadata->input[i].model_input_type);
+                               metadata->input1[i].model_input_type);
                        return -EINVAL;
                }
 
-               if (metadata->input[i].relocatable != 1) {
+               if (metadata->input1[i].relocatable != 1) {
                        plt_err("Model not supported, non-relocatable input: 
%u", i);
                        return -ENOTSUP;
                }
@@ -163,20 +163,20 @@ cn10k_ml_model_metadata_check(uint8_t *buffer, uint64_t 
size)
        /* Outputs */
        for (i = 0; i < metadata->model.num_output; i++) {
                if (rte_ml_io_type_size_get(
-                           
cn10k_ml_io_type_map(metadata->output[i].output_type)) <= 0) {
+                           
cn10k_ml_io_type_map(metadata->output1[i].output_type)) <= 0) {
                        plt_err("Invalid metadata, output[%u] : output_type = 
%u", i,
-                               metadata->output[i].output_type);
+                               metadata->output1[i].output_type);
                        return -EINVAL;
                }
 
                if (rte_ml_io_type_size_get(
-                           
cn10k_ml_io_type_map(metadata->output[i].model_output_type)) <= 0) {
+                           
cn10k_ml_io_type_map(metadata->output1[i].model_output_type)) <= 0) {
                        plt_err("Invalid metadata, output[%u] : 
model_output_type = %u", i,
-                               metadata->output[i].model_output_type);
+                               metadata->output1[i].model_output_type);
                        return -EINVAL;
                }
 
-               if (metadata->output[i].relocatable != 1) {
+               if (metadata->output1[i].relocatable != 1) {
                        plt_err("Model not supported, non-relocatable output: 
%u", i);
                        return -ENOTSUP;
                }
@@ -191,28 +191,29 @@ cn10k_ml_model_metadata_update(struct 
cn10k_ml_model_metadata *metadata)
        uint8_t i;
 
        for (i = 0; i < metadata->model.num_input; i++) {
-               metadata->input[i].input_type = 
cn10k_ml_io_type_map(metadata->input[i].input_type);
-               metadata->input[i].model_input_type =
-                       
cn10k_ml_io_type_map(metadata->input[i].model_input_type);
+               metadata->input1[i].input_type =
+                       cn10k_ml_io_type_map(metadata->input1[i].input_type);
+               metadata->input1[i].model_input_type =
+                       
cn10k_ml_io_type_map(metadata->input1[i].model_input_type);
 
-               if (metadata->input[i].shape.w == 0)
-                       metadata->input[i].shape.w = 1;
+               if (metadata->input1[i].shape.w == 0)
+                       metadata->input1[i].shape.w = 1;
 
-               if (metadata->input[i].shape.x == 0)
-                       metadata->input[i].shape.x = 1;
+               if (metadata->input1[i].shape.x == 0)
+                       metadata->input1[i].shape.x = 1;
 
-               if (metadata->input[i].shape.y == 0)
-                       metadata->input[i].shape.y = 1;
+               if (metadata->input1[i].shape.y == 0)
+                       metadata->input1[i].shape.y = 1;
 
-               if (metadata->input[i].shape.z == 0)
-                       metadata->input[i].shape.z = 1;
+               if (metadata->input1[i].shape.z == 0)
+                       metadata->input1[i].shape.z = 1;
        }
 
        for (i = 0; i < metadata->model.num_output; i++) {
-               metadata->output[i].output_type =
-                       cn10k_ml_io_type_map(metadata->output[i].output_type);
-               metadata->output[i].model_output_type =
-                       
cn10k_ml_io_type_map(metadata->output[i].model_output_type);
+               metadata->output1[i].output_type =
+                       cn10k_ml_io_type_map(metadata->output1[i].output_type);
+               metadata->output1[i].model_output_type =
+                       
cn10k_ml_io_type_map(metadata->output1[i].model_output_type);
        }
 }
 
@@ -272,31 +273,31 @@ cn10k_ml_model_addr_update(struct cn10k_ml_model *model, 
uint8_t *buffer, uint8_
        addr->total_input_sz_q = 0;
        for (i = 0; i < metadata->model.num_input; i++) {
                addr->input[i].nb_elements =
-                       metadata->input[i].shape.w * metadata->input[i].shape.x 
*
-                       metadata->input[i].shape.y * metadata->input[i].shape.z;
+                       metadata->input1[i].shape.w * 
metadata->input1[i].shape.x *
+                       metadata->input1[i].shape.y * 
metadata->input1[i].shape.z;
                addr->input[i].sz_d = addr->input[i].nb_elements *
-                                     
rte_ml_io_type_size_get(metadata->input[i].input_type);
+                                     
rte_ml_io_type_size_get(metadata->input1[i].input_type);
                addr->input[i].sz_q = addr->input[i].nb_elements *
-                                     
rte_ml_io_type_size_get(metadata->input[i].model_input_type);
+                                     
rte_ml_io_type_size_get(metadata->input1[i].model_input_type);
                addr->total_input_sz_d += addr->input[i].sz_d;
                addr->total_input_sz_q += addr->input[i].sz_q;
 
                plt_ml_dbg("model_id = %u, input[%u] - w:%u x:%u y:%u z:%u, 
sz_d = %u sz_q = %u",
-                          model->model_id, i, metadata->input[i].shape.w,
-                          metadata->input[i].shape.x, 
metadata->input[i].shape.y,
-                          metadata->input[i].shape.z, addr->input[i].sz_d, 
addr->input[i].sz_q);
+                          model->model_id, i, metadata->input1[i].shape.w,
+                          metadata->input1[i].shape.x, 
metadata->input1[i].shape.y,
+                          metadata->input1[i].shape.z, addr->input[i].sz_d, 
addr->input[i].sz_q);
        }
 
        /* Outputs */
        addr->total_output_sz_q = 0;
        addr->total_output_sz_d = 0;
        for (i = 0; i < metadata->model.num_output; i++) {
-               addr->output[i].nb_elements = metadata->output[i].size;
+               addr->output[i].nb_elements = metadata->output1[i].size;
                addr->output[i].sz_d = addr->output[i].nb_elements *
-                                      
rte_ml_io_type_size_get(metadata->output[i].output_type);
+                                      
rte_ml_io_type_size_get(metadata->output1[i].output_type);
                addr->output[i].sz_q =
                        addr->output[i].nb_elements *
-                       
rte_ml_io_type_size_get(metadata->output[i].model_output_type);
+                       
rte_ml_io_type_size_get(metadata->output1[i].model_output_type);
                addr->total_output_sz_q += addr->output[i].sz_q;
                addr->total_output_sz_d += addr->output[i].sz_d;
 
@@ -388,24 +389,24 @@ cn10k_ml_model_info_set(struct rte_ml_dev *dev, struct 
cn10k_ml_model *model)
 
        /* Set input info */
        for (i = 0; i < info->nb_inputs; i++) {
-               rte_memcpy(input[i].name, metadata->input[i].input_name, 
MRVL_ML_INPUT_NAME_LEN);
-               input[i].dtype = metadata->input[i].input_type;
-               input[i].qtype = metadata->input[i].model_input_type;
-               input[i].shape.format = metadata->input[i].shape.format;
-               input[i].shape.w = metadata->input[i].shape.w;
-               input[i].shape.x = metadata->input[i].shape.x;
-               input[i].shape.y = metadata->input[i].shape.y;
-               input[i].shape.z = metadata->input[i].shape.z;
+               rte_memcpy(input[i].name, metadata->input1[i].input_name, 
MRVL_ML_INPUT_NAME_LEN);
+               input[i].dtype = metadata->input1[i].input_type;
+               input[i].qtype = metadata->input1[i].model_input_type;
+               input[i].shape.format = metadata->input1[i].shape.format;
+               input[i].shape.w = metadata->input1[i].shape.w;
+               input[i].shape.x = metadata->input1[i].shape.x;
+               input[i].shape.y = metadata->input1[i].shape.y;
+               input[i].shape.z = metadata->input1[i].shape.z;
        }
 
        /* Set output info */
        for (i = 0; i < info->nb_outputs; i++) {
-               rte_memcpy(output[i].name, metadata->output[i].output_name,
+               rte_memcpy(output[i].name, metadata->output1[i].output_name,
                           MRVL_ML_OUTPUT_NAME_LEN);
-               output[i].dtype = metadata->output[i].output_type;
-               output[i].qtype = metadata->output[i].model_output_type;
+               output[i].dtype = metadata->output1[i].output_type;
+               output[i].qtype = metadata->output1[i].model_output_type;
                output[i].shape.format = RTE_ML_IO_FORMAT_1D;
-               output[i].shape.w = metadata->output[i].size;
+               output[i].shape.w = metadata->output1[i].size;
                output[i].shape.x = 1;
                output[i].shape.y = 1;
                output[i].shape.z = 1;
diff --git a/drivers/ml/cnxk/cn10k_ml_model.h b/drivers/ml/cnxk/cn10k_ml_model.h
index b30ad5a981..bd863a8c12 100644
--- a/drivers/ml/cnxk/cn10k_ml_model.h
+++ b/drivers/ml/cnxk/cn10k_ml_model.h
@@ -21,14 +21,15 @@ enum cn10k_ml_model_state {
        ML_CN10K_MODEL_STATE_UNKNOWN,
 };
 
-/* Model Metadata : v 2.1.0.2 */
+/* Model Metadata : v 2.3.0.1 */
 #define MRVL_ML_MODEL_MAGIC_STRING "MRVL"
 #define MRVL_ML_MODEL_TARGET_ARCH  128
-#define MRVL_ML_MODEL_VERSION     2100
+#define MRVL_ML_MODEL_VERSION_MIN  2100
 #define MRVL_ML_MODEL_NAME_LEN    64
 #define MRVL_ML_INPUT_NAME_LEN    16
 #define MRVL_ML_OUTPUT_NAME_LEN           16
-#define MRVL_ML_INPUT_OUTPUT_SIZE  8
+#define MRVL_ML_NUM_INPUT_OUTPUT_1 8
+#define MRVL_ML_NUM_INPUT_OUTPUT_2 24
 
 /* Header (256-byte) */
 struct cn10k_ml_model_metadata_header {
@@ -101,10 +102,10 @@ struct cn10k_ml_model_metadata_model {
        /* Inference batch size */
        uint8_t batch_size;
 
-       /* Number of input tensors (Max 8) */
+       /* Number of input tensors (Max 32) */
        uint8_t num_input;
 
-       /* Number of output tensors (Max 8) */
+       /* Number of output tensors (Max 32) */
        uint8_t num_output;
        uint8_t reserved_1;
 
@@ -159,7 +160,14 @@ struct cn10k_ml_model_metadata_model {
         * 1 - Yes
         */
        uint8_t supports_lower_batch_size_optimization;
-       uint8_t reserved_3[59];
+       uint8_t reserved_3[3];
+
+       /* Relative DDR start address of scratch space */
+       uint64_t ddr_scratch_range_start;
+
+       /* Relative DDR end address of scratch space */
+       uint64_t ddr_scratch_range_end;
+       uint8_t reserved_4[40];
 };
 
 /* Init section (64-byte) */
@@ -303,7 +311,7 @@ struct cn10k_ml_model_metadata_output_section {
 
 /* Model data */
 struct cn10k_ml_model_metadata_data_section {
-       uint8_t reserved[4068];
+       uint8_t reserved[996];
 
        /* Beta: xx.xx.xx.xx,
         * Later: YYYYMM.xx.xx
@@ -337,13 +345,19 @@ struct cn10k_ml_model_metadata {
        struct cn10k_ml_model_metadata_weights_bias_section weights_bias;
 
        /* Input (512-bytes, 64-byte per input) provisioned for 8 inputs */
-       struct cn10k_ml_model_metadata_input_section 
input[MRVL_ML_INPUT_OUTPUT_SIZE];
+       struct cn10k_ml_model_metadata_input_section 
input1[MRVL_ML_NUM_INPUT_OUTPUT_1];
 
        /* Output (512-bytes, 64-byte per output) provisioned for 8 outputs */
-       struct cn10k_ml_model_metadata_output_section 
output[MRVL_ML_INPUT_OUTPUT_SIZE];
+       struct cn10k_ml_model_metadata_output_section 
output1[MRVL_ML_NUM_INPUT_OUTPUT_1];
 
        uint8_t reserved_2[1792];
 
+       /* Input (1536-bytes, 64-byte per input) provisioned for 24 inputs */
+       struct cn10k_ml_model_metadata_input_section 
input2[MRVL_ML_NUM_INPUT_OUTPUT_2];
+
+       /* Output (1536-bytes, 64-byte per output) provisioned for 24 outputs */
+       struct cn10k_ml_model_metadata_output_section 
output2[MRVL_ML_NUM_INPUT_OUTPUT_2];
+
        /* Model data */
        struct cn10k_ml_model_metadata_data_section data;
 
@@ -399,7 +413,7 @@ struct cn10k_ml_model_addr {
 
                /* Quantized input size */
                uint32_t sz_q;
-       } input[MRVL_ML_INPUT_OUTPUT_SIZE];
+       } input[MRVL_ML_NUM_INPUT_OUTPUT_1];
 
        /* Output address and size */
        struct {
@@ -411,7 +425,7 @@ struct cn10k_ml_model_addr {
 
                /* Quantized output size */
                uint32_t sz_q;
-       } output[MRVL_ML_INPUT_OUTPUT_SIZE];
+       } output[MRVL_ML_NUM_INPUT_OUTPUT_1];
 
        /* Total size of quantized input */
        uint32_t total_input_sz_q;
diff --git a/drivers/ml/cnxk/cn10k_ml_ops.c b/drivers/ml/cnxk/cn10k_ml_ops.c
index b5eaa24e83..aecc6e74ad 100644
--- a/drivers/ml/cnxk/cn10k_ml_ops.c
+++ b/drivers/ml/cnxk/cn10k_ml_ops.c
@@ -325,13 +325,13 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t 
model_id, FILE *fp)
        print_line(fp, LINE_LEN);
        for (i = 0; i < model->metadata.model.num_input; i++) {
                fprintf(fp, "%8u  ", i);
-               fprintf(fp, "%*s  ", 16, model->metadata.input[i].input_name);
-               rte_ml_io_type_to_str(model->metadata.input[i].input_type, str, 
STR_LEN);
+               fprintf(fp, "%*s  ", 16, model->metadata.input1[i].input_name);
+               rte_ml_io_type_to_str(model->metadata.input1[i].input_type, 
str, STR_LEN);
                fprintf(fp, "%*s  ", 12, str);
-               
rte_ml_io_type_to_str(model->metadata.input[i].model_input_type, str, STR_LEN);
+               
rte_ml_io_type_to_str(model->metadata.input1[i].model_input_type, str, STR_LEN);
                fprintf(fp, "%*s  ", 18, str);
-               fprintf(fp, "%*s", 12, (model->metadata.input[i].quantize == 1 
? "Yes" : "No"));
-               rte_ml_io_format_to_str(model->metadata.input[i].shape.format, 
str, STR_LEN);
+               fprintf(fp, "%*s", 12, (model->metadata.input1[i].quantize == 1 
? "Yes" : "No"));
+               rte_ml_io_format_to_str(model->metadata.input1[i].shape.format, 
str, STR_LEN);
                fprintf(fp, "%*s", 16, str);
                fprintf(fp, "\n");
        }
@@ -343,12 +343,12 @@ cn10k_ml_model_print(struct rte_ml_dev *dev, uint16_t 
model_id, FILE *fp)
        print_line(fp, LINE_LEN);
        for (i = 0; i < model->metadata.model.num_output; i++) {
                fprintf(fp, "%8u  ", i);
-               fprintf(fp, "%*s  ", 16, model->metadata.output[i].output_name);
-               rte_ml_io_type_to_str(model->metadata.output[i].output_type, 
str, STR_LEN);
+               fprintf(fp, "%*s  ", 16, 
model->metadata.output1[i].output_name);
+               rte_ml_io_type_to_str(model->metadata.output1[i].output_type, 
str, STR_LEN);
                fprintf(fp, "%*s  ", 12, str);
-               
rte_ml_io_type_to_str(model->metadata.output[i].model_output_type, str, 
STR_LEN);
+               
rte_ml_io_type_to_str(model->metadata.output1[i].model_output_type, str, 
STR_LEN);
                fprintf(fp, "%*s  ", 18, str);
-               fprintf(fp, "%*s", 12, (model->metadata.output[i].dequantize == 
1 ? "Yes" : "No"));
+               fprintf(fp, "%*s", 12, (model->metadata.output1[i].dequantize 
== 1 ? "Yes" : "No"));
                fprintf(fp, "\n");
        }
        fprintf(fp, "\n");
@@ -1882,28 +1882,28 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t 
model_id, uint16_t nb_batc
 
 next_batch:
        for (i = 0; i < model->metadata.model.num_input; i++) {
-               if (model->metadata.input[i].input_type ==
-                   model->metadata.input[i].model_input_type) {
+               if (model->metadata.input1[i].input_type ==
+                   model->metadata.input1[i].model_input_type) {
                        rte_memcpy(lcl_qbuffer, lcl_dbuffer, 
model->addr.input[i].sz_d);
                } else {
-                       switch (model->metadata.input[i].model_input_type) {
+                       switch (model->metadata.input1[i].model_input_type) {
                        case RTE_ML_IO_TYPE_INT8:
-                               ret = 
rte_ml_io_float32_to_int8(model->metadata.input[i].qscale,
+                               ret = 
rte_ml_io_float32_to_int8(model->metadata.input1[i].qscale,
                                                                
model->addr.input[i].nb_elements,
                                                                lcl_dbuffer, 
lcl_qbuffer);
                                break;
                        case RTE_ML_IO_TYPE_UINT8:
-                               ret = 
rte_ml_io_float32_to_uint8(model->metadata.input[i].qscale,
+                               ret = 
rte_ml_io_float32_to_uint8(model->metadata.input1[i].qscale,
                                                                 
model->addr.input[i].nb_elements,
                                                                 lcl_dbuffer, 
lcl_qbuffer);
                                break;
                        case RTE_ML_IO_TYPE_INT16:
-                               ret = 
rte_ml_io_float32_to_int16(model->metadata.input[i].qscale,
+                               ret = 
rte_ml_io_float32_to_int16(model->metadata.input1[i].qscale,
                                                                 
model->addr.input[i].nb_elements,
                                                                 lcl_dbuffer, 
lcl_qbuffer);
                                break;
                        case RTE_ML_IO_TYPE_UINT16:
-                               ret = 
rte_ml_io_float32_to_uint16(model->metadata.input[i].qscale,
+                               ret = 
rte_ml_io_float32_to_uint16(model->metadata.input1[i].qscale,
                                                                  
model->addr.input[i].nb_elements,
                                                                  lcl_dbuffer, 
lcl_qbuffer);
                                break;
@@ -1913,7 +1913,7 @@ cn10k_ml_io_quantize(struct rte_ml_dev *dev, uint16_t 
model_id, uint16_t nb_batc
                                break;
                        default:
                                plt_err("Unsupported model_input_type[%u] : 
%u", i,
-                                       
model->metadata.input[i].model_input_type);
+                                       
model->metadata.input1[i].model_input_type);
                                ret = -ENOTSUP;
                        }
                        if (ret < 0)
@@ -1955,28 +1955,28 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t 
model_id, uint16_t nb_ba
 
 next_batch:
        for (i = 0; i < model->metadata.model.num_output; i++) {
-               if (model->metadata.output[i].output_type ==
-                   model->metadata.output[i].model_output_type) {
+               if (model->metadata.output1[i].output_type ==
+                   model->metadata.output1[i].model_output_type) {
                        rte_memcpy(lcl_dbuffer, lcl_qbuffer, 
model->addr.output[i].sz_q);
                } else {
-                       switch (model->metadata.output[i].model_output_type) {
+                       switch (model->metadata.output1[i].model_output_type) {
                        case RTE_ML_IO_TYPE_INT8:
-                               ret = 
rte_ml_io_int8_to_float32(model->metadata.output[i].dscale,
+                               ret = 
rte_ml_io_int8_to_float32(model->metadata.output1[i].dscale,
                                                                
model->addr.output[i].nb_elements,
                                                                lcl_qbuffer, 
lcl_dbuffer);
                                break;
                        case RTE_ML_IO_TYPE_UINT8:
-                               ret = 
rte_ml_io_uint8_to_float32(model->metadata.output[i].dscale,
+                               ret = 
rte_ml_io_uint8_to_float32(model->metadata.output1[i].dscale,
                                                                 
model->addr.output[i].nb_elements,
                                                                 lcl_qbuffer, 
lcl_dbuffer);
                                break;
                        case RTE_ML_IO_TYPE_INT16:
-                               ret = 
rte_ml_io_int16_to_float32(model->metadata.output[i].dscale,
+                               ret = 
rte_ml_io_int16_to_float32(model->metadata.output1[i].dscale,
                                                                 
model->addr.output[i].nb_elements,
                                                                 lcl_qbuffer, 
lcl_dbuffer);
                                break;
                        case RTE_ML_IO_TYPE_UINT16:
-                               ret = 
rte_ml_io_uint16_to_float32(model->metadata.output[i].dscale,
+                               ret = 
rte_ml_io_uint16_to_float32(model->metadata.output1[i].dscale,
                                                                  
model->addr.output[i].nb_elements,
                                                                  lcl_qbuffer, 
lcl_dbuffer);
                                break;
@@ -1987,7 +1987,7 @@ cn10k_ml_io_dequantize(struct rte_ml_dev *dev, uint16_t 
model_id, uint16_t nb_ba
                                break;
                        default:
                                plt_err("Unsupported model_output_type[%u] : 
%u", i,
-                                       
model->metadata.output[i].model_output_type);
+                                       
model->metadata.output1[i].model_output_type);
                                ret = -ENOTSUP;
                        }
                        if (ret < 0)
-- 
2.17.1

Reply via email to