The packet type matching provides quick way of finding out L2/L3/L4 protocols in a given packet. That helps with optimized flow rules matching, eliminating the need of stacking all the packet headers in the matching criteria.
Signed-off-by: Alexander Kozyrev <akozy...@nvidia.com> --- drivers/net/mlx5/hws/mlx5dr_definer.c | 170 ++++++++++++++++++++++++++ drivers/net/mlx5/hws/mlx5dr_definer.h | 8 ++ drivers/net/mlx5/mlx5_flow.h | 3 + drivers/net/mlx5/mlx5_flow_hw.c | 1 + 4 files changed, 182 insertions(+) diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.c b/drivers/net/mlx5/hws/mlx5dr_definer.c index 88f22e7f70..e3f4a3c0a8 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.c +++ b/drivers/net/mlx5/hws/mlx5dr_definer.c @@ -16,11 +16,14 @@ #define STE_NO_VLAN 0x0 #define STE_SVLAN 0x1 #define STE_CVLAN 0x2 +#define STE_NO_L3 0x0 #define STE_IPV4 0x1 #define STE_IPV6 0x2 +#define STE_NO_L4 0x0 #define STE_TCP 0x1 #define STE_UDP 0x2 #define STE_ICMP 0x3 +#define STE_ESP 0x3 #define MLX5DR_DEFINER_QUOTA_BLOCK 0 #define MLX5DR_DEFINER_QUOTA_PASS 2 @@ -276,6 +279,88 @@ mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc, DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask); } +static void +mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc, + const void *item_spec, + uint8_t *tag) +{ + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I); + const struct rte_flow_item_ptype *v = item_spec; + uint32_t packet_type = v->packet_type & + (inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK); + uint8_t l2_type = STE_NO_VLAN; + + if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER)) + l2_type = STE_NO_VLAN; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN)) + l2_type = STE_CVLAN; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ)) + l2_type = STE_SVLAN; + + DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc, + const void *item_spec, + uint8_t *tag) +{ + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I); + const struct rte_flow_item_ptype *v = item_spec; + uint32_t packet_type = v->packet_type & + (inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK); + uint8_t l3_type = STE_NO_L3; + + if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4)) + l3_type = STE_IPV4; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6)) + l3_type = STE_IPV6; + + DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc, + const void *item_spec, + uint8_t *tag) +{ + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I); + const struct rte_flow_item_ptype *v = item_spec; + uint32_t packet_type = v->packet_type & + (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK); + uint8_t l4_type = STE_NO_L4; + + if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP)) + l4_type = STE_TCP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP)) + l4_type = STE_UDP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP)) + l4_type = STE_ESP; + + DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask); +} + +static void +mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc, + const void *item_spec, + uint8_t *tag) +{ + bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I); + const struct rte_flow_item_ptype *v = item_spec; + uint32_t packet_type = v->packet_type & + (inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK); + uint8_t l4_type = STE_NO_L4; + + if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP)) + l4_type = STE_TCP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP)) + l4_type = STE_UDP; + else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP)) + l4_type = STE_ICMP; + + DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask); +} + static void mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc, const void *item_spec, @@ -1692,6 +1777,87 @@ mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd, return 0; } +static int +mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd, + struct rte_flow_item *item, + int item_idx) +{ + const struct rte_flow_item_ptype *m = item->mask; + struct mlx5dr_definer_fc *fc; + + if (!m) + return 0; + + if (!(m->packet_type & + (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | + RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) { + rte_errno = ENOTSUP; + return rte_errno; + } + + if (m->packet_type & RTE_PTYPE_L2_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l2_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false); + } + + if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l2_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true); + } + + if (m->packet_type & RTE_PTYPE_L3_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l3_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l3_type, false); + } + + if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l3_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l3_type, true); + } + + if (m->packet_type & RTE_PTYPE_L4_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type_bwc, false); + + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type, false); + } + + if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) { + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type_bwc, true); + + fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)]; + fc->item_idx = item_idx; + fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set; + fc->tag_mask_set = &mlx5dr_definer_ones_set; + DR_CALC_SET(fc, eth_l2, l4_type, true); + } + + return 0; +} + static int mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd, struct rte_flow_item *item, @@ -2308,6 +2474,10 @@ mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx, ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i); item_flags |= MLX5_FLOW_ITEM_IB_BTH; break; + case RTE_FLOW_ITEM_TYPE_PTYPE: + ret = mlx5dr_definer_conv_item_ptype(&cd, items, i); + item_flags |= MLX5_FLOW_ITEM_PTYPE; + break; default: DR_LOG(ERR, "Unsupported item type %d", items->type); rte_errno = ENOTSUP; diff --git a/drivers/net/mlx5/hws/mlx5dr_definer.h b/drivers/net/mlx5/hws/mlx5dr_definer.h index 6b645f4cf0..6b02161e02 100644 --- a/drivers/net/mlx5/hws/mlx5dr_definer.h +++ b/drivers/net/mlx5/hws/mlx5dr_definer.h @@ -136,6 +136,14 @@ enum mlx5dr_definer_fname { MLX5DR_DEFINER_FNAME_OKS2_MPLS4_I, MLX5DR_DEFINER_FNAME_IB_L4_OPCODE, MLX5DR_DEFINER_FNAME_IB_L4_QPN, + MLX5DR_DEFINER_FNAME_PTYPE_L2_O, + MLX5DR_DEFINER_FNAME_PTYPE_L2_I, + MLX5DR_DEFINER_FNAME_PTYPE_L3_O, + MLX5DR_DEFINER_FNAME_PTYPE_L3_I, + MLX5DR_DEFINER_FNAME_PTYPE_L4_O, + MLX5DR_DEFINER_FNAME_PTYPE_L4_I, + MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_O, + MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I, MLX5DR_DEFINER_FNAME_MAX, }; diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index 6beac3902c..c670bf72bc 100644 --- a/drivers/net/mlx5/mlx5_flow.h +++ b/drivers/net/mlx5/mlx5_flow.h @@ -233,6 +233,9 @@ enum mlx5_feature_name { /* IB BTH ITEM. */ #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51) +/* PTYPE ITEM */ +#define MLX5_FLOW_ITEM_PTYPE (1ull << 52) + /* Outer Masks. */ #define MLX5_FLOW_LAYER_OUTER_L3 \ (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c index b7853d3379..587d55148e 100644 --- a/drivers/net/mlx5/mlx5_flow_hw.c +++ b/drivers/net/mlx5/mlx5_flow_hw.c @@ -5392,6 +5392,7 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_ESP: case RTE_FLOW_ITEM_TYPE_FLEX: case RTE_FLOW_ITEM_TYPE_IB_BTH: + case RTE_FLOW_ITEM_TYPE_PTYPE: break; case RTE_FLOW_ITEM_TYPE_INTEGRITY: /* -- 2.18.2