- Currently, no device supports partial mask for protocol in IP header.
- As there could be multiple IP items, next_protocol variable in flow
  validation has to be reset for inner layer. Otherwise, inner TCP/UDP will
  see protocol number of outer IP header.
- Remove redundant protocol checking for MPLS, which is done in
  mlx5_flow_validate_item_mpls().

Fixes: 3d69434113d1 ("net/mlx5: add Direct Verbs validation function")
Fixes: 23c1d42c7138 ("net/mlx5: split flow validation to dedicated function")
Cc: or...@mellanox.com

Signed-off-by: Yongseok Koh <ys...@mellanox.com>
---
 drivers/net/mlx5/mlx5_flow.c       |  6 ++++++
 drivers/net/mlx5/mlx5_flow_dv.c    | 18 ++++++++++++++++--
 drivers/net/mlx5/mlx5_flow_verbs.c | 25 ++++++++++++++++---------
 3 files changed, 38 insertions(+), 11 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 3c2ac4b377..8039664bc2 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1178,6 +1178,12 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item 
*item,
                                          "L3 cannot follow an L4 layer.");
        if (!mask)
                mask = &rte_flow_item_ipv4_mask;
+       else if (mask->hdr.next_proto_id != 0 &&
+                mask->hdr.next_proto_id != 0xff)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
+                                         "partial mask is not supported"
+                                         " for protocol");
        ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv4),
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 7909615360..a02bf47737 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -814,10 +814,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
-                            items->mask)->hdr.next_proto_id)
+                            items->mask)->hdr.next_proto_id) {
                                next_protocol =
                                        ((const struct rte_flow_item_ipv4 *)
                                         (items->spec))->hdr.next_proto_id;
+                               next_protocol &=
+                                       ((const struct rte_flow_item_ipv4 *)
+                                        (items->mask))->hdr.next_proto_id;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -828,10 +835,17 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                               MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
-                            items->mask)->hdr.proto)
+                            items->mask)->hdr.proto) {
                                next_protocol =
                                        ((const struct rte_flow_item_ipv6 *)
                                         items->spec)->hdr.proto;
+                               next_protocol &=
+                                       ((const struct rte_flow_item_ipv6 *)
+                                        items->mask)->hdr.proto;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c 
b/drivers/net/mlx5/mlx5_flow_verbs.c
index 699cc88c8c..d6d95db563 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1058,10 +1058,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                               MLX5_FLOW_LAYER_OUTER_L3_IPV4;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv4 *)
-                            items->mask)->hdr.next_proto_id)
+                            items->mask)->hdr.next_proto_id) {
                                next_protocol =
                                        ((const struct rte_flow_item_ipv4 *)
                                         (items->spec))->hdr.next_proto_id;
+                               next_protocol &=
+                                       ((const struct rte_flow_item_ipv4 *)
+                                        (items->mask))->hdr.next_proto_id;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
                        ret = mlx5_flow_validate_item_ipv6(items, item_flags,
@@ -1072,10 +1079,17 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                               MLX5_FLOW_LAYER_OUTER_L3_IPV6;
                        if (items->mask != NULL &&
                            ((const struct rte_flow_item_ipv6 *)
-                            items->mask)->hdr.proto)
+                            items->mask)->hdr.proto) {
                                next_protocol =
                                        ((const struct rte_flow_item_ipv6 *)
                                         items->spec)->hdr.proto;
+                               next_protocol &=
+                                       ((const struct rte_flow_item_ipv6 *)
+                                        items->mask)->hdr.proto;
+                       } else {
+                               /* Reset for inner layer. */
+                               next_protocol = 0xff;
+                       }
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
                        ret = mlx5_flow_validate_item_udp(items, item_flags,
@@ -1125,13 +1139,6 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                                                           error);
                        if (ret < 0)
                                return ret;
-                       if (next_protocol != 0xff &&
-                           next_protocol != IPPROTO_MPLS)
-                               return rte_flow_error_set
-                                       (error, EINVAL,
-                                        RTE_FLOW_ERROR_TYPE_ITEM, items,
-                                        "protocol filtering not compatible"
-                                        " with MPLS layer");
                        item_flags |= MLX5_FLOW_LAYER_MPLS;
                        break;
                default:
-- 
2.11.0

Reply via email to