Firmware exports the parse graph header length capability via hca_attr and the current value is 6. The user must specify the header length field via field_size. Field size implies the mask implicitly as 2^field_size-1
1. If field_size is bigger than 6, PMD needs to add an extra offset internally, let HW only parses the 6 LSBs as length. length |--------|--------|--------|--------| The actual header length offset 8 doesn't work well with new firmware, only the bits 8-13 are read and parsed as a length field. Need to change the offset to 10 (8 + 2) internally. Field mask can't be bigger than 0x3F (2^6-1). 2. If filed_size is smaller that 6, PMD needs to subtract an offset to fit 6 bits exactly. length |--------|----|------------|--------| The actual header length offset 8 doesn't work well with new firmware because firmware will read two more bits from the next field. Need to change the offset to 6 (8 -2) internally. Signed-off-by: Rongwei Liu <rongw...@nvidia.com> Acked-by: Viacheslav Ovsiienko <viachesl...@nvidia.com> --- drivers/net/mlx5/mlx5.c | 3 +++ drivers/net/mlx5/mlx5_flow_flex.c | 22 +++++++++++++++++++++- 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index a75fa1b7f0..f9aea13187 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -1057,6 +1057,7 @@ mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev) uint32_t ids[MLX5_GRAPH_NODE_SAMPLE_NUM]; struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_common_dev_config *config = &priv->sh->cdev->config; + struct mlx5_hca_flex_attr *attr = &priv->sh->cdev->config.hca_attr.flex; void *fp = NULL, *ibv_ctx = priv->sh->cdev->ctx; int ret; @@ -1079,6 +1080,8 @@ mlx5_alloc_srh_flex_parser(struct rte_eth_dev *dev) node.header_length_field_shift = 0x3; /* Header length is the 2nd byte. */ node.header_length_field_offset = 0x8; + if (attr->header_length_mask_width < 8) + node.header_length_field_offset += 8 - attr->header_length_mask_width; node.header_length_field_mask = 0xF; /* One byte next header protocol. */ node.next_header_field_size = 0x8; diff --git a/drivers/net/mlx5/mlx5_flow_flex.c b/drivers/net/mlx5/mlx5_flow_flex.c index 4f66b7dd1a..4ae03a23f1 100644 --- a/drivers/net/mlx5/mlx5_flow_flex.c +++ b/drivers/net/mlx5/mlx5_flow_flex.c @@ -484,6 +484,14 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "unsupported header length field mode (OFFSET)"); + if (!field->field_size) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "field size is a must for offset mode"); + if (field->field_size + field->offset_base < attr->header_length_mask_width) + return rte_flow_error_set + (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "field size plus offset_base is too small"); node->header_length_mode = MLX5_GRAPH_NODE_LEN_FIELD; if (field->offset_mask == 0 || !rte_is_power_of_2(field->offset_mask + 1)) @@ -539,9 +547,21 @@ mlx5_flex_translate_length(struct mlx5_hca_flex_attr *attr, return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "header length field shift exceeds limit"); - node->header_length_field_shift = field->offset_shift; + node->header_length_field_shift = field->offset_shift; node->header_length_field_offset = field->offset_base; } + if (field->field_mode == FIELD_MODE_OFFSET) { + if (field->field_size > attr->header_length_mask_width) { + node->header_length_field_offset += + field->field_size - attr->header_length_mask_width; + } else if (field->field_size < attr->header_length_mask_width) { + node->header_length_field_offset -= + attr->header_length_mask_width - field->field_size; + node->header_length_field_mask = + RTE_MIN(node->header_length_field_mask, + (1u << field->field_size) - 1); + } + } return 0; } -- 2.27.0