From: Gregory Etelson <getel...@nvidia.com> New mlx5dr_context member replaces mlx5dr_cmd_query_caps. Capabilities structure is a member of mlx5dr_context.
Signed-off-by: Gregory Etelson <getel...@nvidia.com> Signed-off-by: Rongwei Liu <rongw...@nvidia.com> Reviewed-by: Alex Vesker <va...@nvidia.com> Acked-by: Viacheslav Ovsiienko <viachesl...@nvidia.com> --- drivers/net/mlx5/hws/mlx5dr_definer.c | 42 ++++++++++++++------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 6b98eb8c96..0f1cab7e07 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -100,7 +100,7 @@ struct mlx5dr_definer_sel_ctrl { }; struct mlx5dr_definer_conv_data { - struct mlx5dr_cmd_query_caps *caps; + struct mlx5dr_context *ctx; struct mlx5dr_definer_fc *fc; uint8_t relaxed; uint8_t tunnel; @@ -815,6 +815,7 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, struct rte_flow_item *item, int item_idx) { + struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps; const struct rte_flow_item_gtp *m = item->mask; struct mlx5dr_definer_fc *fc; @@ -836,7 +837,7 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, } if (m->teid) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -844,11 +845,11 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, fc->item_idx = item_idx; fc->tag_set = &mlx5dr_definer_gtp_teid_set; fc->bit_mask = __mlx5_mask(header_gtp, teid); - fc->byte_off = cd->caps->format_select_gtpu_dw_1 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE; } if (m->v_pt_rsv_flags) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -857,12 +858,12 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_gtp_ext_flag_set; fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag); fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag); - fc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; } if (m->msg_type) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -871,7 +872,7 @@ mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_gtp_msg_type_set; fc->bit_mask = __mlx5_mask(header_gtp, msg_type); fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type); - fc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; } return 0; @@ -882,12 +883,13 @@ mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd, struct rte_flow_item *item, int item_idx) { + struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps; const struct rte_flow_item_gtp_psc *m = item->mask; struct mlx5dr_definer_fc *fc; /* Overwrite GTP extension flag to be 1 */ if (!cd->relaxed) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -896,12 +898,12 @@ mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_ones_set; fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag); fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag); - fc->byte_off = cd->caps->format_select_gtpu_dw_0 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE; } /* Overwrite next extension header type */ if (!cd->relaxed) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -911,14 +913,14 @@ mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd, fc->tag_mask_set = &mlx5dr_definer_ones_set; fc->bit_mask = __mlx5_mask(header_opt_gtp, next_ext_hdr_type); fc->bit_off = __mlx5_dw_bit_off(header_opt_gtp, next_ext_hdr_type); - fc->byte_off = cd->caps->format_select_gtpu_dw_2 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE; } if (!m) return 0; if (m->hdr.type) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -927,11 +929,11 @@ mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_pdu_set; fc->bit_mask = __mlx5_mask(header_gtp_psc, pdu_type); fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, pdu_type); - fc->byte_off = cd->caps->format_select_gtpu_ext_dw_0 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE; } if (m->hdr.qfi) { - if (!(cd->caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) { + if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) { rte_errno = ENOTSUP; return rte_errno; } @@ -940,7 +942,7 @@ mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd, fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_qfi_set; fc->bit_mask = __mlx5_mask(header_gtp_psc, qfi); fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, qfi); - fc->byte_off = cd->caps->format_select_gtpu_ext_dw_0 * DW_SIZE; + fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE; } return 0; @@ -951,18 +953,19 @@ mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd, struct rte_flow_item *item, int item_idx) { + struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps; const struct rte_flow_item_ethdev *m = item->mask; struct mlx5dr_definer_fc *fc; uint8_t bit_offset = 0; if (m->port_id) { - if (!cd->caps->wire_regc_mask) { + if (!caps->wire_regc_mask) { DR_LOG(ERR, "Port ID item not supported, missing wire REGC mask"); rte_errno = ENOTSUP; return rte_errno; } - while (!(cd->caps->wire_regc_mask & (1 << bit_offset))) + while (!(caps->wire_regc_mask & (1 << bit_offset))) bit_offset++; fc = &cd->fc[MLX5DR_DEFINER_FNAME_VPORT_REG_C_0]; @@ -971,7 +974,7 @@ mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd, fc->tag_mask_set = &mlx5dr_definer_ones_set; DR_CALC_SET_HDR(fc, registers, register_c_0); fc->bit_off = bit_offset; - fc->bit_mask = cd->caps->wire_regc_mask >> bit_offset; + fc->bit_mask = caps->wire_regc_mask >> bit_offset; } else { DR_LOG(ERR, "Pord ID item mask must specify ID mask"); rte_errno = EINVAL; @@ -1479,8 +1482,7 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, int ret; cd.fc = fc; - cd.hl = hl; - cd.caps = ctx->caps; + cd.ctx = ctx; cd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH; /* Collect all RTE fields to the field array and set header layout */ -- 2.27.0