Hi, > -----Original Message----- > From: Dekel Peled <dek...@mellanox.com> > Sent: Thursday, February 20, 2020 1:33 PM > To: Matan Azrad <ma...@mellanox.com>; Slava Ovsiienko > <viachesl...@mellanox.com>; Raslan Darawsheh <rasl...@mellanox.com> > Cc: dev@dpdk.org; sta...@dpdk.org > Subject: [PATCH v2] net/mlx5: fix match on Ethertype and CVLAN tag > > HW supports match on one Ethertype, the Ethertype following the last > VLAN tag of the packet (see PRM). > Previous patch added specific handling for packets with VLAN tag, > after setting match on Ethertype. > > This patch moves the handling of packets with VLAN tag, to be done > before and instead of setting match on Ethertype. > > Previous patch also added, as part of specific handling for packets > with VLAN tag, the setting of cvlan_tag mask bit in translation of > L3 items. > In case of L3 tunnel there is no inner L2 header, so setting this > mask bit is wrong and causes match failures. > > This patch adds check to make sure L2 header exists before setting > cvlan_tag mask bit for L3 items. > > Fixes: 00f75a40576b ("net/mlx5: fix VLAN match for DV mode") > Cc: sta...@dpdk.org > > Signed-off-by: Dekel Peled <dek...@mellanox.com>
Tested-by: Raslan Darawsheh <rasl...@mellanox.com> > --- > drivers/net/mlx5/mlx5_flow_dv.c | 42 > ++++++++++++++++++++++++++++++++--------- > 1 file changed, 33 insertions(+), 9 deletions(-) > > diff --git a/drivers/net/mlx5/mlx5_flow_dv.c > b/drivers/net/mlx5/mlx5_flow_dv.c > index 467d1ce..6f15a91 100644 > --- a/drivers/net/mlx5/mlx5_flow_dv.c > +++ b/drivers/net/mlx5/mlx5_flow_dv.c > @@ -5213,19 +5213,27 @@ struct field_modify_info modify_tcp[] = { > /* The value must be in the range of the mask. */ > for (i = 0; i < sizeof(eth_m->dst); ++i) > l24_v[i] = eth_m->src.addr_bytes[i] & eth_v- > >src.addr_bytes[i]; > - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, > - rte_be_to_cpu_16(eth_m->type)); > - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, > ethertype); > - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; > if (eth_v->type) { > /* When ethertype is present set mask for tagged VLAN. */ > MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, > 1); > /* Set value for tagged VLAN if ethertype is 802.1Q. */ > if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || > - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) > + eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { > MLX5_SET(fte_match_set_lyr_2_4, headers_v, > cvlan_tag, > 1); > + /* Return here to avoid setting match on ethertype. > */ > + return; > + } > } > + /* > + * HW supports match on one Ethertype, the Ethertype following the > last > + * VLAN tag of the packet (see PRM). > + * Set match on ethertype only if ETH header is not followed by > VLAN. > + */ > + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, > + rte_be_to_cpu_16(eth_m->type)); > + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, > ethertype); > + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; > } > > /** > @@ -5299,6 +5307,8 @@ struct field_modify_info modify_tcp[] = { > * Flow matcher value. > * @param[in] item > * Flow pattern to translate. > + * @param[in] item_flags > + * Bit-fields that holds the items detected until now. > * @param[in] inner > * Item is inner pattern. > * @param[in] group > @@ -5307,6 +5317,7 @@ struct field_modify_info modify_tcp[] = { > static void > flow_dv_translate_item_ipv4(void *matcher, void *key, > const struct rte_flow_item *item, > + const uint64_t item_flags, > int inner, uint32_t group) > { > const struct rte_flow_item_ipv4 *ipv4_m = item->mask; > @@ -5366,7 +5377,12 @@ struct field_modify_info modify_tcp[] = { > ipv4_m->hdr.next_proto_id); > MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, > ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id); > - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); > + /* > + * On outer header (which must contains L2), or inner header with L2, > + * set cvlan_tag mask bit to mark this packet as untagged. > + */ > + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) > + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, > 1); > } > > /** > @@ -5378,6 +5394,8 @@ struct field_modify_info modify_tcp[] = { > * Flow matcher value. > * @param[in] item > * Flow pattern to translate. > + * @param[in] item_flags > + * Bit-fields that holds the items detected until now. > * @param[in] inner > * Item is inner pattern. > * @param[in] group > @@ -5386,6 +5404,7 @@ struct field_modify_info modify_tcp[] = { > static void > flow_dv_translate_item_ipv6(void *matcher, void *key, > const struct rte_flow_item *item, > + const uint64_t item_flags, > int inner, uint32_t group) > { > const struct rte_flow_item_ipv6 *ipv6_m = item->mask; > @@ -5471,7 +5490,12 @@ struct field_modify_info modify_tcp[] = { > ipv6_m->hdr.proto); > MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, > ipv6_v->hdr.proto & ipv6_m->hdr.proto); > - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); > + /* > + * On outer header (which must contains L2), or inner header with L2, > + * set cvlan_tag mask bit to mark this packet as untagged. > + */ > + if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) > + MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, > 1); > } > > /** > @@ -7574,7 +7598,7 @@ struct field_modify_info modify_tcp[] = { > mlx5_flow_tunnel_ip_check(items, next_protocol, > &item_flags, &tunnel); > flow_dv_translate_item_ipv4(match_mask, > match_value, > - items, tunnel, > + items, item_flags, tunnel, > dev_flow->group); > matcher.priority = MLX5_PRIORITY_MAP_L3; > last_item = tunnel ? > MLX5_FLOW_LAYER_INNER_L3_IPV4 : > @@ -7597,7 +7621,7 @@ struct field_modify_info modify_tcp[] = { > mlx5_flow_tunnel_ip_check(items, next_protocol, > &item_flags, &tunnel); > flow_dv_translate_item_ipv6(match_mask, > match_value, > - items, tunnel, > + items, item_flags, tunnel, > dev_flow->group); > matcher.priority = MLX5_PRIORITY_MAP_L3; > last_item = tunnel ? > MLX5_FLOW_LAYER_INNER_L3_IPV6 : > -- > 1.8.3.1 Patch applied to next-net-mlx, Kindest regards Raslan Darawsheh