On 11/4/23 00:34, Nicolas Chautru wrote:
From: Hernan Vargas <hernan.var...@intel.com>

Adding test-bbdev support for the MLD-TS processing specific to the VRB2
variant.

Signed-off-by: Hernan Vargas <hernan.var...@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coque...@redhat.com>
---
  app/test-bbdev/test_bbdev_perf.c   | 519 +++++++++++++++++++++++++++++
  app/test-bbdev/test_bbdev_vector.c | 132 ++++++++
  app/test-bbdev/test_bbdev_vector.h |   1 +
  3 files changed, 652 insertions(+)

diff --git a/app/test-bbdev/test_bbdev_perf.c b/app/test-bbdev/test_bbdev_perf.c
index 5f1e5de002..d2e3542356 100644
--- a/app/test-bbdev/test_bbdev_perf.c
+++ b/app/test-bbdev/test_bbdev_perf.c
@@ -139,6 +139,7 @@ struct test_op_params {
        struct rte_bbdev_dec_op *ref_dec_op;
        struct rte_bbdev_enc_op *ref_enc_op;
        struct rte_bbdev_fft_op *ref_fft_op;
+       struct rte_bbdev_mldts_op *ref_mldts_op;
        uint16_t burst_sz;
        uint16_t num_to_process;
        uint16_t num_lcores;
@@ -165,6 +166,7 @@ struct thread_params {
        struct rte_bbdev_dec_op *dec_ops[MAX_BURST];
        struct rte_bbdev_enc_op *enc_ops[MAX_BURST];
        struct rte_bbdev_fft_op *fft_ops[MAX_BURST];
+       struct rte_bbdev_mldts_op *mldts_ops[MAX_BURST];
  };
/* Stores time statistics */
@@ -472,6 +474,18 @@ check_dev_cap(const struct rte_bbdev_info *dev_info)
                                return TEST_FAILED;
                        }
                        return TEST_SUCCESS;
+               } else if (op_cap->type == RTE_BBDEV_OP_MLDTS) {
+                       const struct rte_bbdev_op_cap_mld *cap = 
&op_cap->cap.mld;
+                       if (!flags_match(test_vector.mldts.op_flags, 
cap->capability_flags)) {
+                               printf("Flag Mismatch\n");
+                               return TEST_FAILED;
+                       }
+                       if (nb_inputs > cap->num_buffers_src) {
+                               printf("Too many inputs defined: %u, max: %u\n",
+                                       nb_inputs, cap->num_buffers_src);
+                               return TEST_FAILED;
+                       }
+                       return TEST_SUCCESS;
                }
        }
@@ -822,6 +836,9 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
                        conf.arb_fft[i].gbr_threshold1 = VRB_QOS_GBR;
                        conf.arb_fft[i].gbr_threshold1 = VRB_QOS_GBR;
                        conf.arb_fft[i].round_robin_weight = VRB_QMGR_RR;
+                       conf.arb_mld[i].gbr_threshold1 = VRB_QOS_GBR;
+                       conf.arb_mld[i].gbr_threshold1 = VRB_QOS_GBR;
+                       conf.arb_mld[i].round_robin_weight = VRB_QMGR_RR;
                }
conf.input_pos_llr_1_bit = true;
@@ -847,6 +864,10 @@ add_bbdev_dev(uint8_t dev_id, struct rte_bbdev_info *info,
                conf.q_fft.num_qgroups = VRB_QMGR_NUM_QGS;
                conf.q_fft.first_qgroup_index = VRB_QMGR_INVALID_IDX;
                conf.q_fft.num_aqs_per_groups = VRB_QMGR_NUM_AQS;
+               conf.q_mld.num_qgroups = VRB_QMGR_NUM_QGS;
+               conf.q_mld.first_qgroup_index = VRB_QMGR_INVALID_IDX;
+               conf.q_mld.num_aqs_per_groups = VRB_QMGR_NUM_AQS;
+               conf.q_mld.aq_depth_log2 = VRB_QMGR_AQ_DEPTH;
/* setup PF with configuration information */
                ret = rte_acc_configure(info->dev_name, &conf);
@@ -1979,6 +2000,31 @@ copy_reference_fft_op(struct rte_bbdev_fft_op **ops, 
unsigned int n,
        }
  }
+static void
+copy_reference_mldts_op(struct rte_bbdev_mldts_op **ops, unsigned int n,
+               unsigned int start_idx,
+               struct rte_bbdev_op_data *q_inputs,
+               struct rte_bbdev_op_data *r_inputs,
+               struct rte_bbdev_op_data *outputs,
+               struct rte_bbdev_mldts_op *ref_op)
+{
+       unsigned int i, j;
+       struct rte_bbdev_op_mldts *mldts = &ref_op->mldts;
+       for (i = 0; i < n; i++) {
+               ops[i]->mldts.c_rep = mldts->c_rep;
+               ops[i]->mldts.num_layers = mldts->num_layers;
+               ops[i]->mldts.num_rbs = mldts->num_rbs;
+               ops[i]->mldts.op_flags = mldts->op_flags;
+               for (j = 0; j < RTE_BBDEV_MAX_MLD_LAYERS; j++)
+                       ops[i]->mldts.q_m[j] = mldts->q_m[j];
+               ops[i]->mldts.r_rep = mldts->r_rep;
+               ops[i]->mldts.c_rep = mldts->c_rep;
+               ops[i]->mldts.r_input = r_inputs[start_idx + i];
+               ops[i]->mldts.qhy_input = q_inputs[start_idx + i];
+               ops[i]->mldts.output = outputs[start_idx + i];
+       }
+}
+
  static int
  check_dec_status_and_ordering(struct rte_bbdev_dec_op *op,
                unsigned int order_idx, const int expected_status)
@@ -2039,6 +2085,21 @@ check_fft_status_and_ordering(struct rte_bbdev_fft_op 
*op,
        return TEST_SUCCESS;
  }
+static int
+check_mldts_status_and_ordering(struct rte_bbdev_mldts_op *op,
+               unsigned int order_idx, const int expected_status)
+{
+       TEST_ASSERT(op->status == expected_status,
+                       "op_status (%d) != expected_status (%d)",
+                       op->status, expected_status);
+
+       TEST_ASSERT((void *)(uintptr_t)order_idx == op->opaque_data,
+                       "Ordering error, expected %p, got %p",
+                       (void *)(uintptr_t)order_idx, op->opaque_data);
+
+       return TEST_SUCCESS;
+}
+
  static inline int
  validate_op_chain(struct rte_bbdev_op_data *op,
                struct op_data_entries *orig_op)
@@ -2554,6 +2615,57 @@ validate_op_fft_chain(struct rte_bbdev_op_data *op, 
struct op_data_entries *orig
        return TEST_SUCCESS;
  }
+static inline int
+validate_op_mldts_chain(struct rte_bbdev_op_data *op,
+               struct op_data_entries *orig_op)
+{
+       uint8_t i;
+       struct rte_mbuf *m = op->data;
+       uint8_t nb_dst_segments = orig_op->nb_segments;
+       /*the result is not bit exact*/
+       int16_t thres_hold = 3;
+       int16_t delt, abs_delt;
+       uint32_t j, data_len_iq;
+       uint32_t error_num;
+       int8_t *ref_out;
+       int8_t *op_out;
+
+       TEST_ASSERT(nb_dst_segments == m->nb_segs,
+                       "Number of segments differ in original (%u) and filled (%u) 
op mldts",
+                       nb_dst_segments, m->nb_segs);
+
+       /* Due to size limitation of mbuf, MLDTS doesn't use real mbuf. */
+       for (i = 0; i < nb_dst_segments; ++i) {
+               uint16_t offset = (i == 0) ? op->offset : 0;
+               uint32_t data_len = op->length;
+
+               TEST_ASSERT(orig_op->segments[i].length == data_len,
+                               "Length of segment differ in original (%u) and 
filled (%u) op mldts",
+                               orig_op->segments[i].length, data_len);
+               data_len_iq = data_len;
+               ref_out = (int8_t *)(orig_op->segments[i].addr);
+               op_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset),

I missed it while reviewing, but checkpatch did:

### [PATCH 08/13] test/bbdev: add MLD support

WARNING:SUSPECT_COMMA_SEMICOLON: Possible comma where semicolon could be used
#164: FILE: app/test-bbdev/test_bbdev_perf.c:2647:
+               op_out = rte_pktmbuf_mtod_offset(m, int8_t *, offset),
+               error_num = 0;

total: 0 errors, 1 warnings, 856 lines checked

I fix it directly in my tree, no need to submit new version.


+               error_num = 0;
+               for (j = 0; j < data_len_iq; j++) {
+
+                       delt = ref_out[j] - op_out[j];
+                       abs_delt = delt > 0 ? delt : -delt;
+                       error_num += (abs_delt > thres_hold ? 1 : 0);
+                       if (error_num > 0)
+                               printf("MLD Error %d: Exp %x %d Actual %x %d Diff 
%d\n",
+                                               j, ref_out[j], ref_out[j], 
op_out[j], op_out[j],
+                                               delt);
+               }
+               TEST_ASSERT(error_num == 0,
+                       "MLDTS Output are not matched total (%u) errors (%u)",
+                       data_len_iq, error_num);
+
+               m = m->next;
+       }
+
+       return TEST_SUCCESS;
+}
+
  static int
  validate_fft_op(struct rte_bbdev_fft_op **ops, const uint16_t n,
                struct rte_bbdev_fft_op *ref_op)
@@ -2578,6 +2690,28 @@ validate_fft_op(struct rte_bbdev_fft_op **ops, const 
uint16_t n,
        return TEST_SUCCESS;
  }
+static int
+validate_mldts_op(struct rte_bbdev_mldts_op **ops, const uint16_t n,
+               struct rte_bbdev_mldts_op *ref_op)
+{
+       unsigned int i;
+       int ret;
+       struct op_data_entries *mldts_data_orig =
+                       &test_vector.entries[DATA_HARD_OUTPUT];
+       for (i = 0; i < n; ++i) {
+               ret = check_mldts_status_and_ordering(ops[i], i, 
ref_op->status);
+               TEST_ASSERT_SUCCESS(ret,
+                               "Checking status and ordering for MLDTS 
failed");
+               TEST_ASSERT_SUCCESS(validate_op_mldts_chain(
+                               &ops[i]->mldts.output,
+                               mldts_data_orig),
+                               "MLDTS Output buffers (op=%u) are not matched",
+                               i);
+       }
+
+       return TEST_SUCCESS;
+}
+
  static void
  create_reference_dec_op(struct rte_bbdev_dec_op *op)
  {
@@ -2622,6 +2756,20 @@ create_reference_fft_op(struct rte_bbdev_fft_op *op)
                op->fft.base_input.length += entry->segments[i].length;
  }
+static void
+create_reference_mldts_op(struct rte_bbdev_mldts_op *op)
+{
+       unsigned int i;
+       struct op_data_entries *entry;
+       op->mldts = test_vector.mldts;
+       entry = &test_vector.entries[DATA_INPUT];
+       for (i = 0; i < entry->nb_segments; ++i)
+               op->mldts.qhy_input.length += entry->segments[i].length;
+       entry = &test_vector.entries[DATA_HARQ_INPUT];
+       for (i = 0; i < entry->nb_segments; ++i)
+               op->mldts.r_input.length += entry->segments[i].length;
+}
+
  static void
  create_reference_enc_op(struct rte_bbdev_enc_op *op)
  {
@@ -2730,6 +2878,14 @@ calc_fft_size(struct rte_bbdev_fft_op *op)
        return output_size;
  }
+static uint32_t
+calc_mldts_size(struct rte_bbdev_mldts_op *op)
+{
+       uint32_t output_size;
+       output_size = op->mldts.num_layers * op->mldts.num_rbs * 
op->mldts.c_rep;
+       return output_size;
+}
+
  static int
  init_test_op_params(struct test_op_params *op_params,
                enum rte_bbdev_op_type op_type, const int expected_status,
@@ -2744,6 +2900,9 @@ init_test_op_params(struct test_op_params *op_params,
        else if (op_type == RTE_BBDEV_OP_FFT)
                ret = rte_bbdev_fft_op_alloc_bulk(ops_mp,
                                &op_params->ref_fft_op, 1);
+       else if (op_type == RTE_BBDEV_OP_MLDTS)
+               ret = rte_bbdev_mldts_op_alloc_bulk(ops_mp,
+                               &op_params->ref_mldts_op, 1);
        else
                ret = rte_bbdev_enc_op_alloc_bulk(ops_mp,
                                &op_params->ref_enc_op, 1);
@@ -2763,6 +2922,8 @@ init_test_op_params(struct test_op_params *op_params,
                op_params->ref_enc_op->status = expected_status;
        else if (op_type == RTE_BBDEV_OP_FFT)
                op_params->ref_fft_op->status = expected_status;
+       else if (op_type == RTE_BBDEV_OP_MLDTS)
+               op_params->ref_mldts_op->status = expected_status;
        return 0;
  }
@@ -2831,6 +2992,8 @@ run_test_case_on_device(test_case_function *test_case_func, uint8_t dev_id,
                create_reference_ldpc_dec_op(op_params->ref_dec_op);
        else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
                create_reference_fft_op(op_params->ref_fft_op);
+       else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
+               create_reference_mldts_op(op_params->ref_mldts_op);
for (i = 0; i < ad->nb_queues; ++i) {
                f_ret = fill_queue_buffers(op_params,
@@ -3047,6 +3210,11 @@ dequeue_event_callback(uint16_t dev_id,
                                &tp->fft_ops[
                                        __atomic_load_n(&tp->nb_dequeued, 
__ATOMIC_RELAXED)],
                                burst_sz);
+       else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
+               deq = rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
+                               &tp->mldts_ops[
+                                       __atomic_load_n(&tp->nb_dequeued, 
__ATOMIC_RELAXED)],
+                               burst_sz);
        else /*RTE_BBDEV_OP_TURBO_ENC*/
                deq = rte_bbdev_dequeue_enc_ops(dev_id, queue_id,
                                &tp->enc_ops[
@@ -3093,6 +3261,10 @@ dequeue_event_callback(uint16_t dev_id,
                struct rte_bbdev_fft_op *ref_op = tp->op_params->ref_fft_op;
                ret = validate_fft_op(tp->fft_ops, num_ops, ref_op);
                rte_bbdev_fft_op_free_bulk(tp->fft_ops, deq);
+       } else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS) {
+               struct rte_bbdev_mldts_op *ref_op = tp->op_params->ref_mldts_op;
+               ret = validate_mldts_op(tp->mldts_ops, num_ops, ref_op);
+               rte_bbdev_mldts_op_free_bulk(tp->mldts_ops, deq);
        } else if (test_vector.op_type == RTE_BBDEV_OP_LDPC_DEC) {
                struct rte_bbdev_dec_op *ref_op = tp->op_params->ref_dec_op;
                ret = validate_ldpc_dec_op(tp->dec_ops, num_ops, ref_op,
@@ -3118,6 +3290,9 @@ dequeue_event_callback(uint16_t dev_id,
        case RTE_BBDEV_OP_FFT:
                tb_len_bits = calc_fft_size(tp->op_params->ref_fft_op);
                break;
+       case RTE_BBDEV_OP_MLDTS:
+               tb_len_bits = calc_mldts_size(tp->op_params->ref_mldts_op);
+               break;
        case RTE_BBDEV_OP_LDPC_ENC:
                tb_len_bits = calc_ldpc_enc_TB_size(tp->op_params->ref_enc_op);
                break;
@@ -3593,6 +3768,88 @@ throughput_intr_lcore_fft(void *arg)
        return TEST_SUCCESS;
  }
+static int
+throughput_intr_lcore_mldts(void *arg)
+{
+       struct thread_params *tp = arg;
+       unsigned int enqueued;
+       const uint16_t queue_id = tp->queue_id;
+       const uint16_t burst_sz = tp->op_params->burst_sz;
+       const uint16_t num_to_process = tp->op_params->num_to_process;
+       struct rte_bbdev_mldts_op *ops[num_to_process];
+       struct test_buffers *bufs = NULL;
+       struct rte_bbdev_info info;
+       int ret, i, j;
+       uint16_t num_to_enq, enq;
+
+       TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= 
%u", MAX_BURST);
+
+       TEST_ASSERT_SUCCESS(rte_bbdev_queue_intr_enable(tp->dev_id, queue_id),
+                       "Failed to enable interrupts for dev: %u, queue_id: %u",
+                       tp->dev_id, queue_id);
+
+       rte_bbdev_info_get(tp->dev_id, &info);
+
+       TEST_ASSERT_SUCCESS((num_to_process > info.drv.queue_size_lim),
+                       "NUM_OPS cannot exceed %u for this device",
+                       info.drv.queue_size_lim);
+
+       bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+       __atomic_store_n(&tp->processing_status, 0, __ATOMIC_RELAXED);
+       __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+
+       rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, 
__ATOMIC_RELAXED);
+
+       ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops, 
num_to_process);
+       TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", 
num_to_process);
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+               copy_reference_mldts_op(ops, num_to_process, 0, bufs->inputs, 
bufs->harq_inputs,
+                               bufs->hard_outputs, 
tp->op_params->ref_mldts_op);
+
+       /* Set counter to validate the ordering */
+       for (j = 0; j < num_to_process; ++j)
+               ops[j]->opaque_data = (void *)(uintptr_t)j;
+
+       for (j = 0; j < TEST_REPETITIONS; ++j) {
+               for (i = 0; i < num_to_process; ++i)
+                       mbuf_reset(ops[i]->mldts.output.data);
+
+               tp->start_time = rte_rdtsc_precise();
+               for (enqueued = 0; enqueued < num_to_process;) {
+                       num_to_enq = burst_sz;
+
+                       if (unlikely(num_to_process - enqueued < num_to_enq))
+                               num_to_enq = num_to_process - enqueued;
+
+                       enq = 0;
+                       do {
+                               enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+                                               queue_id, &ops[enqueued], 
num_to_enq);
+                       } while (unlikely(enq != num_to_enq));
+                       enqueued += enq;
+
+                       /* Write to thread burst_sz current number of enqueued
+                        * descriptors. It ensures that proper number of
+                        * descriptors will be dequeued in callback
+                        * function - needed for last batch in case where
+                        * the number of operations is not a multiple of
+                        * burst size.
+                        */
+                       __atomic_store_n(&tp->burst_sz, num_to_enq, 
__ATOMIC_RELAXED);
+
+                       /* Wait until processing of previous batch is
+                        * completed
+                        */
+                       rte_wait_until_equal_16(&tp->nb_dequeued, enqueued, 
__ATOMIC_RELAXED);
+               }
+               if (j != TEST_REPETITIONS - 1)
+                       __atomic_store_n(&tp->nb_dequeued, 0, __ATOMIC_RELAXED);
+       }
+
+       return TEST_SUCCESS;
+}
+
  static int
  throughput_pmd_lcore_dec(void *arg)
  {
@@ -4403,6 +4660,104 @@ throughput_pmd_lcore_fft(void *arg)
        return TEST_SUCCESS;
  }
+static int
+throughput_pmd_lcore_mldts(void *arg)
+{
+       struct thread_params *tp = arg;
+       uint16_t enq, deq;
+       uint64_t total_time = 0, start_time;
+       const uint16_t queue_id = tp->queue_id;
+       const uint16_t burst_sz = tp->op_params->burst_sz;
+       const uint16_t num_ops = tp->op_params->num_to_process;
+       struct rte_bbdev_mldts_op *ops_enq[num_ops];
+       struct rte_bbdev_mldts_op *ops_deq[num_ops];
+       struct rte_bbdev_mldts_op *ref_op = tp->op_params->ref_mldts_op;
+       struct test_buffers *bufs = NULL;
+       int i, j, ret;
+       struct rte_bbdev_info info;
+       uint16_t num_to_enq;
+
+       TEST_ASSERT_SUCCESS((burst_sz > MAX_BURST), "BURST_SIZE should be <= 
%u", MAX_BURST);
+
+       rte_bbdev_info_get(tp->dev_id, &info);
+
+       TEST_ASSERT_SUCCESS((num_ops > info.drv.queue_size_lim),
+                       "NUM_OPS cannot exceed %u for this device",
+                       info.drv.queue_size_lim);
+
+       bufs = &tp->op_params->q_bufs[GET_SOCKET(info.socket_id)][queue_id];
+
+       rte_wait_until_equal_16(&tp->op_params->sync, SYNC_START, 
__ATOMIC_RELAXED);
+
+       ret = rte_bbdev_mldts_op_alloc_bulk(tp->op_params->mp, ops_enq, 
num_ops);
+       TEST_ASSERT_SUCCESS(ret, "Allocation failed for %d ops", num_ops);
+
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+               copy_reference_mldts_op(ops_enq, num_ops, 0, bufs->inputs, 
bufs->harq_inputs,
+                               bufs->hard_outputs, ref_op);
+
+       /* Set counter to validate the ordering */
+       for (j = 0; j < num_ops; ++j)
+               ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+       for (i = 0; i < TEST_REPETITIONS; ++i) {
+               uint32_t time_out = 0;
+               for (j = 0; j < num_ops; ++j)
+                       mbuf_reset(ops_enq[j]->mldts.output.data);
+
+               start_time = rte_rdtsc_precise();
+
+               for (enq = 0, deq = 0; enq < num_ops;) {
+                       num_to_enq = burst_sz;
+
+                       if (unlikely(num_ops - enq < num_to_enq))
+                               num_to_enq = num_ops - enq;
+
+                       enq += rte_bbdev_enqueue_mldts_ops(tp->dev_id,
+                                       queue_id, &ops_enq[enq], num_to_enq);
+
+                       deq += rte_bbdev_dequeue_mldts_ops(tp->dev_id,
+                                       queue_id, &ops_deq[deq], enq - deq);
+                       time_out++;
+                       if (time_out >= TIME_OUT_POLL) {
+                               timeout_exit(tp->dev_id);
+                               TEST_ASSERT_SUCCESS(TEST_FAILED, "Enqueue 
timeout!");
+                       }
+               }
+
+               /* dequeue the remaining */
+               time_out = 0;
+               while (deq < enq) {
+                       deq += rte_bbdev_dequeue_mldts_ops(tp->dev_id,
+                                       queue_id, &ops_deq[deq], enq - deq);
+                       time_out++;
+                       if (time_out >= TIME_OUT_POLL) {
+                               timeout_exit(tp->dev_id);
+                               TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue 
timeout!");
+                       }
+               }
+
+               total_time += rte_rdtsc_precise() - start_time;
+       }
+
+       if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+               ret = validate_mldts_op(ops_deq, num_ops, ref_op);
+               TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+       }
+
+       rte_bbdev_mldts_op_free_bulk(ops_enq, num_ops);
+
+       double tb_len_bits = calc_mldts_size(ref_op);
+
+       tp->ops_per_sec = ((double)num_ops * TEST_REPETITIONS) /
+                       ((double)total_time / (double)rte_get_tsc_hz());
+       tp->mbps = (((double)(num_ops * TEST_REPETITIONS * tb_len_bits)) /
+                       1000000.0) / ((double)total_time /
+                       (double)rte_get_tsc_hz());
+
+       return TEST_SUCCESS;
+}
+
  static void
  print_enc_throughput(struct thread_params *t_params, unsigned int used_cores)
  {
@@ -4624,6 +4979,8 @@ throughput_test(struct active_device *ad,
                        throughput_function = throughput_intr_lcore_ldpc_enc;
                else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
                        throughput_function = throughput_intr_lcore_fft;
+               else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
+                       throughput_function = throughput_intr_lcore_mldts;
                else
                        throughput_function = throughput_intr_lcore_enc;
@@ -4646,6 +5003,8 @@ throughput_test(struct active_device *ad,
                        throughput_function = throughput_pmd_lcore_ldpc_enc;
                else if (test_vector.op_type == RTE_BBDEV_OP_FFT)
                        throughput_function = throughput_pmd_lcore_fft;
+               else if (test_vector.op_type == RTE_BBDEV_OP_MLDTS)
+                       throughput_function = throughput_pmd_lcore_mldts;
                else
                        throughput_function = throughput_pmd_lcore_enc;
        }
@@ -5139,6 +5498,77 @@ latency_test_fft(struct rte_mempool *mempool,
        return i;
  }
+static int
+latency_test_mldts(struct rte_mempool *mempool,
+               struct test_buffers *bufs, struct rte_bbdev_mldts_op *ref_op,
+               uint16_t dev_id, uint16_t queue_id,
+               const uint16_t num_to_process, uint16_t burst_sz,
+               uint64_t *total_time, uint64_t *min_time, uint64_t *max_time)
+{
+       int ret = TEST_SUCCESS;
+       uint16_t i, j, dequeued;
+       struct rte_bbdev_mldts_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+       uint64_t start_time = 0, last_time = 0;
+
+       for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+               uint16_t enq = 0, deq = 0;
+               uint32_t time_out = 0;
+               bool first_time = true;
+               last_time = 0;
+
+               if (unlikely(num_to_process - dequeued < burst_sz))
+                       burst_sz = num_to_process - dequeued;
+
+               ret = rte_bbdev_mldts_op_alloc_bulk(mempool, ops_enq, burst_sz);
+               TEST_ASSERT_SUCCESS(ret, "rte_bbdev_mldts_op_alloc_bulk() 
failed");
+               if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+                       copy_reference_mldts_op(ops_enq, burst_sz, dequeued,
+                                       bufs->inputs, bufs->harq_inputs,
+                                       bufs->hard_outputs,
+                                       ref_op);
+
+               /* Set counter to validate the ordering */
+               for (j = 0; j < burst_sz; ++j)
+                       ops_enq[j]->opaque_data = (void *)(uintptr_t)j;
+
+               start_time = rte_rdtsc_precise();
+
+               enq = rte_bbdev_enqueue_mldts_ops(dev_id, queue_id, 
&ops_enq[enq], burst_sz);
+               TEST_ASSERT(enq == burst_sz,
+                               "Error enqueueing burst, expected %u, got %u",
+                               burst_sz, enq);
+
+               /* Dequeue */
+               do {
+                       deq += rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
+                                       &ops_deq[deq], burst_sz - deq);
+                       if (likely(first_time && (deq > 0))) {
+                               last_time += rte_rdtsc_precise() - start_time;
+                               first_time = false;
+                       }
+                       time_out++;
+                       if (time_out >= TIME_OUT_POLL) {
+                               timeout_exit(dev_id);
+                               TEST_ASSERT_SUCCESS(TEST_FAILED, "Dequeue 
timeout!");
+                       }
+               } while (unlikely(burst_sz != deq));
+
+               *max_time = RTE_MAX(*max_time, last_time);
+               *min_time = RTE_MIN(*min_time, last_time);
+               *total_time += last_time;
+
+               if (test_vector.op_type != RTE_BBDEV_OP_NONE) {
+                       ret = validate_mldts_op(ops_deq, burst_sz, ref_op);
+                       TEST_ASSERT_SUCCESS(ret, "Validation failed!");
+               }
+
+               rte_bbdev_mldts_op_free_bulk(ops_enq, deq);
+               dequeued += deq;
+       }
+
+       return i;
+}
+
  /* Common function for running validation and latency test cases */
  static int
  validation_latency_test(struct active_device *ad,
@@ -5196,6 +5626,12 @@ validation_latency_test(struct active_device *ad,
                                ad->dev_id, queue_id,
                                num_to_process, burst_sz, &total_time,
                                &min_time, &max_time);
+       else if (op_type == RTE_BBDEV_OP_MLDTS)
+               iter = latency_test_mldts(op_params->mp, bufs,
+                               op_params->ref_mldts_op,
+                               ad->dev_id, queue_id,
+                               num_to_process, burst_sz, &total_time,
+                               &min_time, &max_time);
        else /* RTE_BBDEV_OP_TURBO_ENC */
                iter = latency_test_enc(op_params->mp, bufs,
                                op_params->ref_enc_op,
@@ -5337,6 +5773,85 @@ offload_latency_test_fft(struct rte_mempool *mempool, 
struct test_buffers *bufs,
        return i;
  }
+static int
+offload_latency_test_mldts(struct rte_mempool *mempool, struct test_buffers 
*bufs,
+               struct rte_bbdev_mldts_op *ref_op, uint16_t dev_id,
+               uint16_t queue_id, const uint16_t num_to_process,
+               uint16_t burst_sz, struct test_time_stats *time_st)
+{
+       int i, dequeued, ret;
+       struct rte_bbdev_mldts_op *ops_enq[MAX_BURST], *ops_deq[MAX_BURST];
+       uint64_t enq_start_time, deq_start_time;
+       uint64_t enq_sw_last_time, deq_last_time;
+       struct rte_bbdev_stats stats;
+
+       for (i = 0, dequeued = 0; dequeued < num_to_process; ++i) {
+               uint16_t enq = 0, deq = 0;
+
+               if (unlikely(num_to_process - dequeued < burst_sz))
+                       burst_sz = num_to_process - dequeued;
+
+               ret = rte_bbdev_mldts_op_alloc_bulk(mempool, ops_enq, burst_sz);
+               TEST_ASSERT_SUCCESS(ret, "rte_bbdev_mldts_op_alloc_bulk() 
failed");
+               if (test_vector.op_type != RTE_BBDEV_OP_NONE)
+                       copy_reference_mldts_op(ops_enq, burst_sz, dequeued,
+                                       bufs->inputs, bufs->harq_inputs,
+                                       bufs->hard_outputs,
+                                       ref_op);
+
+               /* Start time meas for enqueue function offload latency */
+               enq_start_time = rte_rdtsc_precise();
+               do {
+                       enq += rte_bbdev_enqueue_mldts_ops(dev_id, queue_id,
+                                       &ops_enq[enq], burst_sz - enq);
+               } while (unlikely(burst_sz != enq));
+
+               ret = get_bbdev_queue_stats(dev_id, queue_id, &stats);
+               TEST_ASSERT_SUCCESS(ret,
+                               "Failed to get stats for queue (%u) of device 
(%u)",
+                               queue_id, dev_id);
+
+               enq_sw_last_time = rte_rdtsc_precise() - enq_start_time -
+                               stats.acc_offload_cycles;
+               time_st->enq_sw_max_time = RTE_MAX(time_st->enq_sw_max_time,
+                               enq_sw_last_time);
+               time_st->enq_sw_min_time = RTE_MIN(time_st->enq_sw_min_time,
+                               enq_sw_last_time);
+               time_st->enq_sw_total_time += enq_sw_last_time;
+
+               time_st->enq_acc_max_time = RTE_MAX(time_st->enq_acc_max_time,
+                               stats.acc_offload_cycles);
+               time_st->enq_acc_min_time = RTE_MIN(time_st->enq_acc_min_time,
+                               stats.acc_offload_cycles);
+               time_st->enq_acc_total_time += stats.acc_offload_cycles;
+
+               /* give time for device to process ops */
+               rte_delay_us(WAIT_OFFLOAD_US);
+
+               /* Start time meas for dequeue function offload latency */
+               deq_start_time = rte_rdtsc_precise();
+               /* Dequeue one operation */
+               do {
+                       deq += rte_bbdev_dequeue_mldts_ops(dev_id, queue_id, 
&ops_deq[deq], enq);
+               } while (unlikely(deq == 0));
+
+               deq_last_time = rte_rdtsc_precise() - deq_start_time;
+               time_st->deq_max_time = RTE_MAX(time_st->deq_max_time, 
deq_last_time);
+               time_st->deq_min_time = RTE_MIN(time_st->deq_min_time, 
deq_last_time);
+               time_st->deq_total_time += deq_last_time;
+
+               /* Dequeue remaining operations if needed*/
+               while (burst_sz != deq)
+                       deq += rte_bbdev_dequeue_mldts_ops(dev_id, queue_id,
+                                       &ops_deq[deq], burst_sz - deq);
+
+               rte_bbdev_mldts_op_free_bulk(ops_enq, deq);
+               dequeued += deq;
+       }
+
+       return i;
+}
+
  static int
  offload_latency_test_dec(struct rte_mempool *mempool, struct test_buffers 
*bufs,
                struct rte_bbdev_dec_op *ref_op, uint16_t dev_id,
@@ -5734,6 +6249,10 @@ offload_cost_test(struct active_device *ad,
                iter = offload_latency_test_fft(op_params->mp, bufs,
                        op_params->ref_fft_op, ad->dev_id, queue_id,
                        num_to_process, burst_sz, &time_st);
+       else if (op_type == RTE_BBDEV_OP_MLDTS)
+               iter = offload_latency_test_mldts(op_params->mp, bufs,
+                       op_params->ref_mldts_op, ad->dev_id, queue_id,
+                       num_to_process, burst_sz, &time_st);
        else
                iter = offload_latency_test_enc(op_params->mp, bufs,
                                op_params->ref_enc_op, ad->dev_id, queue_id,
diff --git a/app/test-bbdev/test_bbdev_vector.c 
b/app/test-bbdev/test_bbdev_vector.c
index 0ef1481f2a..8f464db838 100644
--- a/app/test-bbdev/test_bbdev_vector.c
+++ b/app/test-bbdev/test_bbdev_vector.c
@@ -244,6 +244,20 @@ op_fft_flag_strtoul(char *token, uint32_t *op_flag_value)
        return 0;
  }
+/* convert MLD flag from string to unsigned long int*/
+static int
+op_mld_flag_strtoul(char *token, uint32_t *op_flag_value)
+{
+       if (!strcmp(token, "RTE_BBDEV_MLDTS_REP"))
+               *op_flag_value = RTE_BBDEV_MLDTS_REP;
+       else {
+               printf("The given value is not a MLD flag\n");
+               return -1;
+       }
+
+       return 0;
+}
+
  /* convert turbo encoder flag from string to unsigned long int*/
  static int
  op_encoder_flag_strtoul(char *token, uint32_t *op_flag_value)
@@ -326,6 +340,10 @@ parse_turbo_flags(char *tokens, uint32_t *op_flags,
                        if (op_fft_flag_strtoul(tok, &op_flag_value)
                                        == -1)
                                return -1;
+               } else if (op_type == RTE_BBDEV_OP_MLDTS) {
+                       if (op_mld_flag_strtoul(tok, &op_flag_value)
+                                       == -1)
+                               return -1;
                } else {
                        return -1;
                }
@@ -355,6 +373,8 @@ op_turbo_type_strtol(char *token, enum rte_bbdev_op_type 
*op_type)
                *op_type = RTE_BBDEV_OP_LDPC_DEC;
        else if (!strcmp(token, "RTE_BBDEV_OP_FFT"))
                *op_type = RTE_BBDEV_OP_FFT;
+       else if (!strcmp(token, "RTE_BBDEV_OP_MLDTS"))
+               *op_type = RTE_BBDEV_OP_MLDTS;
        else if (!strcmp(token, "RTE_BBDEV_OP_NONE"))
                *op_type = RTE_BBDEV_OP_NONE;
        else {
@@ -992,6 +1012,73 @@ parse_fft_params(const char *key_token, char *token,
        return 0;
  }
+/* parses MLD parameters and assigns to global variable */
+static int
+parse_mld_params(const char *key_token, char *token,
+               struct test_bbdev_vector *vector)
+{
+       int ret = 0, status = 0;
+       uint32_t op_flags = 0;
+       char *err = NULL;
+
+       struct rte_bbdev_op_mldts *mld = &vector->mldts;
+
+       if (starts_with(key_token, "qhy_input")) {
+               ret = parse_data_entry(key_token, token, vector,
+                               DATA_INPUT, "qhy_input");
+       } else if (starts_with(key_token, "r_input")) {
+               ret = parse_data_entry(key_token, token, vector,
+                               DATA_HARQ_INPUT, "r_input");
+       } else if (starts_with(key_token, "output")) {
+               ret = parse_data_entry(key_token, token, vector,
+                               DATA_HARD_OUTPUT, "output");
+       } else if (!strcmp(key_token, "layers")) {
+               mld->num_layers = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "layer1")) {
+               mld->q_m[0] = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "layer2")) {
+               mld->q_m[1] = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "layer3")) {
+               mld->q_m[2] = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "layer4")) {
+               mld->q_m[3] = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "crep")) {
+               mld->c_rep = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "rrep")) {
+               mld->r_rep = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "rbs")) {
+               mld->num_rbs = (uint32_t) strtoul(token, &err, 0);
+               ret = ((err == NULL) || (*err != '\0')) ? -1 : 0;
+       } else if (!strcmp(key_token, "op_flags")) {
+               vector->mask |= TEST_BBDEV_VF_OP_FLAGS;
+               ret = parse_turbo_flags(token, &op_flags, vector->op_type);
+               if (!ret)
+                       mld->op_flags = op_flags;
+       } else if (!strcmp(key_token, "expected_status")) {
+               vector->mask |= TEST_BBDEV_VF_EXPECTED_STATUS;
+               ret = parse_expected_status(token, &status, vector->op_type);
+               if (!ret)
+                       vector->expected_status = status;
+       } else {
+               printf("Not valid mld key: '%s'\n", key_token);
+               return -1;
+       }
+
+       if (ret != 0) {
+               printf("Failed with convert '%s\t%s'\n", key_token, token);
+               return -1;
+       }
+
+       return 0;
+}
+
  /* checks the type of key and assigns data */
  static int
  parse_entry(char *entry, struct test_bbdev_vector *vector)
@@ -1046,6 +1133,9 @@ parse_entry(char *entry, struct test_bbdev_vector *vector)
        } else if (vector->op_type == RTE_BBDEV_OP_FFT) {
                if (parse_fft_params(key_token, token, vector) == -1)
                        return -1;
+       } else if (vector->op_type == RTE_BBDEV_OP_MLDTS) {
+               if (parse_mld_params(key_token, token, vector) == -1)
+                       return -1;
        }
return 0;
@@ -1132,6 +1222,25 @@ check_fft_segments(struct test_bbdev_vector *vector)
        return 0;
  }
+static int
+check_mld_segments(struct test_bbdev_vector *vector)
+{
+       unsigned char i;
+
+       for (i = 0; i < vector->entries[DATA_INPUT].nb_segments; i++)
+               if (vector->entries[DATA_INPUT].segments[i].addr == NULL)
+                       return -1;
+
+       for (i = 0; i < vector->entries[DATA_HARQ_INPUT].nb_segments; i++)
+               if (vector->entries[DATA_HARQ_INPUT].segments[i].addr == NULL)
+                       return -1;
+
+       for (i = 0; i < vector->entries[DATA_HARD_OUTPUT].nb_segments; i++)
+               if (vector->entries[DATA_HARD_OUTPUT].segments[i].addr == NULL)
+                       return -1;
+       return 0;
+}
+
  static int
  check_decoder_llr_spec(struct test_bbdev_vector *vector)
  {
@@ -1359,6 +1468,26 @@ check_fft(struct test_bbdev_vector *vector)
        return 0;
  }
+/* checks mld parameters */
+static int
+check_mld(struct test_bbdev_vector *vector)
+{
+       const int mask = vector->mask;
+
+       if (check_mld_segments(vector) < 0)
+               return -1;
+
+       /* Check which params were set */
+       if (!(mask & TEST_BBDEV_VF_OP_FLAGS)) {
+               printf(
+                       "WARNING: op_flags was not specified in vector file and 
capabilities will not be validated\n");
+       }
+       if (!(mask & TEST_BBDEV_VF_EXPECTED_STATUS))
+               printf(
+                       "WARNING: expected_status was not specified in vector file 
and will be set to 0\n");
+       return 0;
+}
+
  /* checks encoder parameters */
  static int
  check_encoder(struct test_bbdev_vector *vector)
@@ -1520,6 +1649,9 @@ bbdev_check_vector(struct test_bbdev_vector *vector)
        } else if (vector->op_type == RTE_BBDEV_OP_FFT) {
                if (check_fft(vector) == -1)
                        return -1;
+       } else if (vector->op_type == RTE_BBDEV_OP_MLDTS) {
+               if (check_mld(vector) == -1)
+                       return -1;
        } else if (vector->op_type != RTE_BBDEV_OP_NONE) {
                printf("Vector was not filled\n");
                return -1;
diff --git a/app/test-bbdev/test_bbdev_vector.h 
b/app/test-bbdev/test_bbdev_vector.h
index 2ea271ffb7..14b8ef2764 100644
--- a/app/test-bbdev/test_bbdev_vector.h
+++ b/app/test-bbdev/test_bbdev_vector.h
@@ -65,6 +65,7 @@ struct test_bbdev_vector {
                struct rte_bbdev_op_ldpc_dec ldpc_dec;
                struct rte_bbdev_op_ldpc_enc ldpc_enc;
                struct rte_bbdev_op_fft fft;
+               struct rte_bbdev_op_mldts mldts;
        };
        /* Additional storage for op data entries */
        struct op_data_entries entries[DATA_NUM_TYPES];

Reply via email to