There is no functionality related to the MLD operation
but allows the unified PMD to support the operation
being added moving forward.
Signed-off-by: Nicolas Chautru <nicolas.chau...@intel.com>
---
drivers/baseband/acc/acc_common.h | 1 +
drivers/baseband/acc/rte_vrb_pmd.c | 39 ++++++++++++++++++++++++------
drivers/baseband/acc/vrb_pmd.h | 12 +++++++++
3 files changed, 45 insertions(+), 7 deletions(-)
diff --git a/drivers/baseband/acc/acc_common.h
b/drivers/baseband/acc/acc_common.h
index b5ee113faf..5de58dbe36 100644
--- a/drivers/baseband/acc/acc_common.h
+++ b/drivers/baseband/acc/acc_common.h
@@ -87,6 +87,7 @@
#define ACC_FCW_LE_BLEN 32
#define ACC_FCW_LD_BLEN 36
#define ACC_FCW_FFT_BLEN 28
+#define ACC_FCW_MLDTS_BLEN 32
#define ACC_5GUL_SIZE_0 16
#define ACC_5GUL_SIZE_1 40
#define ACC_5GUL_OFFSET_0 36
diff --git a/drivers/baseband/acc/rte_vrb_pmd.c
b/drivers/baseband/acc/rte_vrb_pmd.c
index f460e9ea2a..e82ed55ca7 100644
--- a/drivers/baseband/acc/rte_vrb_pmd.c
+++ b/drivers/baseband/acc/rte_vrb_pmd.c
@@ -37,7 +37,7 @@ vrb1_queue_offset(bool pf_device, uint8_t vf_id, uint8_t
qgrp_id, uint16_t aq_id
return ((qgrp_id << 7) + (aq_id << 3) + VRB1_VfQmgrIngressAq);
}
-enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, NUM_ACC};
+enum {UL_4G = 0, UL_5G, DL_4G, DL_5G, FFT, MLD, NUM_ACC};
/* Return the accelerator enum for a Queue Group Index. */
static inline int
@@ -53,6 +53,7 @@ accFromQgid(int qg_idx, const struct rte_acc_conf *acc_conf)
NumQGroupsPerFn[DL_4G] = acc_conf->q_dl_4g.num_qgroups;
NumQGroupsPerFn[DL_5G] = acc_conf->q_dl_5g.num_qgroups;
NumQGroupsPerFn[FFT] = acc_conf->q_fft.num_qgroups;
+ NumQGroupsPerFn[MLD] = acc_conf->q_mld.num_qgroups;
for (acc = UL_4G; acc < NUM_ACC; acc++)
for (qgIdx = 0; qgIdx < NumQGroupsPerFn[acc]; qgIdx++)
accQg[qgIndex++] = acc;
@@ -83,6 +84,9 @@ qtopFromAcc(struct rte_acc_queue_topology **qtop, int
acc_enum, struct rte_acc_c
case FFT:
p_qtop = &(acc_conf->q_fft);
break;
+ case MLD:
+ p_qtop = &(acc_conf->q_mld);
+ break;
default:
/* NOTREACHED. */
rte_bbdev_log(ERR, "Unexpected error evaluating %s using %d",
__func__, acc_enum);
@@ -139,6 +143,9 @@ initQTop(struct rte_acc_conf *acc_conf)
acc_conf->q_fft.num_aqs_per_groups = 0;
acc_conf->q_fft.num_qgroups = 0;
acc_conf->q_fft.first_qgroup_index = -1;
+ acc_conf->q_mld.num_aqs_per_groups = 0;
+ acc_conf->q_mld.num_qgroups = 0;
+ acc_conf->q_mld.first_qgroup_index = -1;
}
static inline void
@@ -250,7 +257,7 @@ fetch_acc_config(struct rte_bbdev *dev)
}
rte_bbdev_log_debug(
- "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u AQ %u %u
%u %u %u Len %u %u %u %u %u\n",
+ "%s Config LLR SIGN IN/OUT %s %s QG %u %u %u %u %u %u AQ %u
%u %u %u %u %u Len %u %u %u %u %u %u\n",
(d->pf_device) ? "PF" : "VF",
(acc_conf->input_pos_llr_1_bit) ? "POS" : "NEG",
(acc_conf->output_pos_llr_1_bit) ? "POS" : "NEG",
@@ -259,16 +266,19 @@ fetch_acc_config(struct rte_bbdev *dev)
acc_conf->q_ul_5g.num_qgroups,
acc_conf->q_dl_5g.num_qgroups,
acc_conf->q_fft.num_qgroups,
+ acc_conf->q_mld.num_qgroups,
acc_conf->q_ul_4g.num_aqs_per_groups,
acc_conf->q_dl_4g.num_aqs_per_groups,
acc_conf->q_ul_5g.num_aqs_per_groups,
acc_conf->q_dl_5g.num_aqs_per_groups,
acc_conf->q_fft.num_aqs_per_groups,
+ acc_conf->q_mld.num_aqs_per_groups,
acc_conf->q_ul_4g.aq_depth_log2,
acc_conf->q_dl_4g.aq_depth_log2,
acc_conf->q_ul_5g.aq_depth_log2,
acc_conf->q_dl_5g.aq_depth_log2,
- acc_conf->q_fft.aq_depth_log2);
+ acc_conf->q_fft.aq_depth_log2,
+ acc_conf->q_mld.aq_depth_log2);
}
static inline void
@@ -332,7 +342,7 @@ vrb_check_ir(struct acc_device *acc_dev)
while (ring_data->valid) {
if ((ring_data->int_nb < ACC_PF_INT_DMA_DL_DESC_IRQ) || (
- ring_data->int_nb >
ACC_PF_INT_DMA_DL5G_DESC_IRQ)) {
+ ring_data->int_nb >
ACC_PF_INT_DMA_MLD_DESC_IRQ)) {
rte_bbdev_log(WARNING, "InfoRing: ITR:%d Info:0x%x",
ring_data->int_nb,
ring_data->detailed_info);
/* Initialize Info Ring entry and move forward. */
@@ -366,6 +376,7 @@ vrb_dev_interrupt_handler(void *cb_arg)
case ACC_PF_INT_DMA_FFT_DESC_IRQ:
case ACC_PF_INT_DMA_UL5G_DESC_IRQ:
case ACC_PF_INT_DMA_DL5G_DESC_IRQ:
+ case ACC_PF_INT_DMA_MLD_DESC_IRQ:
deq_intr_det.queue_id =
get_queue_id_from_ring_info(
dev->data, *ring_data);
if (deq_intr_det.queue_id == UINT16_MAX) {
@@ -393,6 +404,7 @@ vrb_dev_interrupt_handler(void *cb_arg)
case ACC_VF_INT_DMA_FFT_DESC_IRQ:
case ACC_VF_INT_DMA_UL5G_DESC_IRQ:
case ACC_VF_INT_DMA_DL5G_DESC_IRQ:
+ case ACC_VF_INT_DMA_MLD_DESC_IRQ:
/* VFs are not aware of their vf_id - it's set
to 0. */
ring_data->vf_id = 0;
deq_intr_det.queue_id =
get_queue_id_from_ring_info(
@@ -741,7 +753,7 @@ vrb_find_free_queue_idx(struct rte_bbdev *dev,
const struct rte_bbdev_queue_conf *conf)
{
struct acc_device *d = dev->data->dev_private;
- int op_2_acc[6] = {0, UL_4G, DL_4G, UL_5G, DL_5G, FFT};
+ int op_2_acc[7] = {0, UL_4G, DL_4G, UL_5G, DL_5G, FFT, MLD};
int acc = op_2_acc[conf->op_type];
struct rte_acc_queue_topology *qtop = NULL;
uint16_t group_idx;
@@ -804,7 +816,8 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
int fcw_len = (conf->op_type == RTE_BBDEV_OP_LDPC_ENC ?
ACC_FCW_LE_BLEN : (conf->op_type ==
RTE_BBDEV_OP_TURBO_DEC ?
ACC_FCW_TD_BLEN : (conf->op_type ==
RTE_BBDEV_OP_LDPC_DEC ?
- ACC_FCW_LD_BLEN : ACC_FCW_FFT_BLEN)));
+ ACC_FCW_LD_BLEN : (conf->op_type == RTE_BBDEV_OP_FFT ?
+ ACC_FCW_FFT_BLEN : ACC_FCW_MLDTS_BLEN))));
for (desc_idx = 0; desc_idx < d->sw_ring_max_depth; desc_idx++) {
desc = q->ring_addr + desc_idx;
@@ -916,6 +929,8 @@ vrb_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
q->aq_depth = (1 << d->acc_conf.q_dl_5g.aq_depth_log2);
else if (conf->op_type == RTE_BBDEV_OP_FFT)
q->aq_depth = (1 << d->acc_conf.q_fft.aq_depth_log2);
+ else if (conf->op_type == RTE_BBDEV_OP_MLDTS)
+ q->aq_depth = (1 << d->acc_conf.q_mld.aq_depth_log2);
q->mmio_reg_enqueue = RTE_PTR_ADD(d->mmio_base,
d->queue_offset(d->pf_device, q->vf_id, q->qgrp_id,
q->aq_id));
@@ -972,6 +987,13 @@ vrb_print_op(struct rte_bbdev_dec_op *op, enum
rte_bbdev_op_type op_type,
op_dl->ldpc_enc.n_filler, op_dl->ldpc_enc.cb_params.e,
op_dl->ldpc_enc.op_flags, op_dl->ldpc_enc.rv_index
);
+ } else if (op_type == RTE_BBDEV_OP_MLDTS) {
+ struct rte_bbdev_mldts_op *op_mldts = (struct
rte_bbdev_mldts_op *) op;
+ rte_bbdev_log(INFO, " Op MLD %d RBs %d NL %d Rp %d %d %x\n",
+ index,
+ op_mldts->mldts.num_rbs,
op_mldts->mldts.num_layers,
+ op_mldts->mldts.r_rep,
+ op_mldts->mldts.c_rep,
op_mldts->mldts.op_flags);
}
}
@@ -1151,13 +1173,16 @@ vrb_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
d->acc_conf.q_dl_5g.num_qgroups;
dev_info->num_queues[RTE_BBDEV_OP_FFT] =
d->acc_conf.q_fft.num_aqs_per_groups *
d->acc_conf.q_fft.num_qgroups;
+ dev_info->num_queues[RTE_BBDEV_OP_MLDTS] =
d->acc_conf.q_mld.num_aqs_per_groups *
+ d->acc_conf.q_mld.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_TURBO_DEC] =
d->acc_conf.q_ul_4g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_TURBO_ENC] =
d->acc_conf.q_dl_4g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] =
d->acc_conf.q_ul_5g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] =
d->acc_conf.q_dl_5g.num_qgroups;
dev_info->queue_priority[RTE_BBDEV_OP_FFT] =
d->acc_conf.q_fft.num_qgroups;
+ dev_info->queue_priority[RTE_BBDEV_OP_MLDTS] =
d->acc_conf.q_mld.num_qgroups;
dev_info->max_num_queues = 0;
- for (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_FFT; i++)
+ for (i = RTE_BBDEV_OP_NONE; i <= RTE_BBDEV_OP_MLDTS; i++)
dev_info->max_num_queues += dev_info->num_queues[i];
dev_info->queue_size_lim = ACC_MAX_QUEUE_DEPTH;
dev_info->hardware_accelerated = true;
diff --git a/drivers/baseband/acc/vrb_pmd.h b/drivers/baseband/acc/vrb_pmd.h
index 01028273e7..1cabc0b7f4 100644
--- a/drivers/baseband/acc/vrb_pmd.h
+++ b/drivers/baseband/acc/vrb_pmd.h
@@ -101,6 +101,8 @@ struct acc_registry_addr {
unsigned int dma_ring_ul4g_lo;
unsigned int dma_ring_fft_hi;
unsigned int dma_ring_fft_lo;
+ unsigned int dma_ring_mld_hi;
+ unsigned int dma_ring_mld_lo;
unsigned int ring_size;
unsigned int info_ring_hi;
unsigned int info_ring_lo;
@@ -116,6 +118,8 @@ struct acc_registry_addr {
unsigned int tail_ptrs_ul4g_lo;
unsigned int tail_ptrs_fft_hi;
unsigned int tail_ptrs_fft_lo;
+ unsigned int tail_ptrs_mld_hi;
+ unsigned int tail_ptrs_mld_lo;
unsigned int depth_log0_offset;
unsigned int depth_log1_offset;
unsigned int qman_group_func;
@@ -140,6 +144,8 @@ static const struct acc_registry_addr vrb1_pf_reg_addr = {
.dma_ring_ul4g_lo = VRB1_PfDmaFec4GulDescBaseLoRegVf,
.dma_ring_fft_hi = VRB1_PfDmaFftDescBaseHiRegVf,
.dma_ring_fft_lo = VRB1_PfDmaFftDescBaseLoRegVf,
+ .dma_ring_mld_hi = 0,
+ .dma_ring_mld_lo = 0,
.ring_size = VRB1_PfQmgrRingSizeVf,
.info_ring_hi = VRB1_PfHiInfoRingBaseHiRegPf,
.info_ring_lo = VRB1_PfHiInfoRingBaseLoRegPf,
@@ -155,6 +161,8 @@ static const struct acc_registry_addr vrb1_pf_reg_addr = {
.tail_ptrs_ul4g_lo = VRB1_PfDmaFec4GulRespPtrLoRegVf,
.tail_ptrs_fft_hi = VRB1_PfDmaFftRespPtrHiRegVf,
.tail_ptrs_fft_lo = VRB1_PfDmaFftRespPtrLoRegVf,
+ .tail_ptrs_mld_hi = 0,
+ .tail_ptrs_mld_lo = 0,
.depth_log0_offset = VRB1_PfQmgrGrpDepthLog20Vf,
.depth_log1_offset = VRB1_PfQmgrGrpDepthLog21Vf,
.qman_group_func = VRB1_PfQmgrGrpFunction0,
@@ -179,6 +187,8 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = {
.dma_ring_ul4g_lo = VRB1_VfDmaFec4GulDescBaseLoRegVf,
.dma_ring_fft_hi = VRB1_VfDmaFftDescBaseHiRegVf,
.dma_ring_fft_lo = VRB1_VfDmaFftDescBaseLoRegVf,
+ .dma_ring_mld_hi = 0,
+ .dma_ring_mld_lo = 0,
.ring_size = VRB1_VfQmgrRingSizeVf,
.info_ring_hi = VRB1_VfHiInfoRingBaseHiVf,
.info_ring_lo = VRB1_VfHiInfoRingBaseLoVf,
@@ -194,6 +204,8 @@ static const struct acc_registry_addr vrb1_vf_reg_addr = {
.tail_ptrs_ul4g_lo = VRB1_VfDmaFec4GulRespPtrLoRegVf,
.tail_ptrs_fft_hi = VRB1_VfDmaFftRespPtrHiRegVf,
.tail_ptrs_fft_lo = VRB1_VfDmaFftRespPtrLoRegVf,
+ .tail_ptrs_mld_hi = 0,
+ .tail_ptrs_mld_lo = 0,
.depth_log0_offset = VRB1_VfQmgrGrpDepthLog20Vf,
.depth_log1_offset = VRB1_VfQmgrGrpDepthLog21Vf,
.qman_group_func = VRB1_VfQmgrGrpFunction0Vf,