HWS item validation scheme is based on existing DV items validation.

Signed-off-by: Gregory Etelson <getel...@nvidia.com>
Acked-by: Dariusz Sosnowski <dsosnow...@nvidia.com>
---
Depends-on: series-32045 ("validate actions in HWS table")
---
 drivers/net/mlx5/linux/mlx5_flow_os.c   |  21 +-
 drivers/net/mlx5/linux/mlx5_flow_os.h   |   9 +-
 drivers/net/mlx5/mlx5_flow.c            | 271 ++++++-----
 drivers/net/mlx5/mlx5_flow.h            | 115 +++--
 drivers/net/mlx5/mlx5_flow_dv.c         | 369 +++++++-------
 drivers/net/mlx5/mlx5_flow_hw.c         | 610 ++++++++++++++++--------
 drivers/net/mlx5/mlx5_flow_verbs.c      |  21 +-
 drivers/net/mlx5/windows/mlx5_flow_os.c |   5 +-
 drivers/net/mlx5/windows/mlx5_flow_os.h |   9 +-
 9 files changed, 895 insertions(+), 535 deletions(-)

diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.c 
b/drivers/net/mlx5/linux/mlx5_flow_os.c
index 2767b11708..af8c02c38b 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.c
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.c
@@ -14,10 +14,11 @@ static struct mlx5_flow_workspace *gc_head;
 static rte_spinlock_t mlx5_flow_workspace_lock = RTE_SPINLOCK_INITIALIZER;
 
 int
-mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
-                           uint64_t item_flags,
-                           uint8_t target_protocol,
-                           struct rte_flow_error *error)
+mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
+                              const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              uint8_t target_protocol,
+                              struct rte_flow_error *error)
 {
        const struct rte_flow_item_esp *mask = item->mask;
        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -27,10 +28,12 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item 
*item,
                                      MLX5_FLOW_LAYER_OUTER_L4;
        int ret;
 
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 is mandatory to filter on L4");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "L3 is mandatory to 
filter on L4");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -43,7 +46,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &rte_flow_item_esp_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_esp_mask,
                 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
                 error);
diff --git a/drivers/net/mlx5/linux/mlx5_flow_os.h 
b/drivers/net/mlx5/linux/mlx5_flow_os.h
index 3f7a94c9ee..35b5871ab9 100644
--- a/drivers/net/mlx5/linux/mlx5_flow_os.h
+++ b/drivers/net/mlx5/linux/mlx5_flow_os.h
@@ -521,10 +521,11 @@ mlx5_os_flow_dr_sync_domain(void *domain, uint32_t flags)
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
-                           uint64_t item_flags,
-                           uint8_t target_protocol,
-                           struct rte_flow_error *error);
+mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
+                              const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              uint8_t target_protocol,
+                              struct rte_flow_error *error);
 
 /**
  * Add per thread workspace to the global list for garbage collection.
diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index c90b87c8ef..18c8890e4a 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -1453,7 +1453,8 @@ mlx5_get_matcher_priority(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
+                         const struct rte_flow_item *item,
                          const uint8_t *mask,
                          const uint8_t *nic_mask,
                          unsigned int size,
@@ -1470,6 +1471,8 @@ mlx5_flow_item_acceptable(const struct rte_flow_item 
*item,
                                                  item,
                                                  "mask enables non supported"
                                                  " bits");
+       if (mlx5_hws_active(dev))
+               return 0;
        if (!item->spec && (item->mask || item->last))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2454,10 +2457,11 @@ flow_validate_modify_field_level(const struct 
rte_flow_field_data *data,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
-                              uint64_t item_flags,
-                              uint8_t target_protocol,
-                              struct rte_flow_error *error)
+mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
+                             const struct rte_flow_item *item,
+                             uint64_t item_flags,
+                             uint8_t target_protocol,
+                             struct rte_flow_error *error)
 {
        const struct rte_flow_item_icmp6 *mask = item->mask;
        const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -2472,11 +2476,12 @@ mlx5_flow_validate_item_icmp6(const struct 
rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
                                          " with ICMP6 layer");
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "IPv6 is mandatory to filter on"
-                                         " ICMP6");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "IPv6 is mandatory to 
filter on ICMP6");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2484,7 +2489,7 @@ mlx5_flow_validate_item_icmp6(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &rte_flow_item_icmp6_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_icmp6_mask,
                 sizeof(struct rte_flow_item_icmp6),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2509,7 +2514,8 @@ mlx5_flow_validate_item_icmp6(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item,
+mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
+                                  const struct rte_flow_item *item,
                                   uint64_t item_flags,
                                   uint8_t target_protocol,
                                   struct rte_flow_error *error)
@@ -2533,11 +2539,12 @@ mlx5_flow_validate_item_icmp6_echo(const struct 
rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
                                          " with ICMP6 layer");
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "IPv6 is mandatory to filter on"
-                                         " ICMP6");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "IPv6 is mandatory to 
filter on ICMP6");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2545,7 +2552,7 @@ mlx5_flow_validate_item_icmp6_echo(const struct 
rte_flow_item *item,
        if (!mask)
                mask = &nic_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
                 sizeof(struct rte_flow_item_icmp6_echo),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2568,7 +2575,8 @@ mlx5_flow_validate_item_icmp6_echo(const struct 
rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
+                            const struct rte_flow_item *item,
                             uint64_t item_flags,
                             uint8_t target_protocol,
                             struct rte_flow_error *error)
@@ -2592,11 +2600,12 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item 
*item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "protocol filtering not compatible"
                                          " with ICMP layer");
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "IPv4 is mandatory to filter"
-                                         " on ICMP");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "IPv4 is mandatory to 
filter on ICMP");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -2604,7 +2613,7 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &nic_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
                 sizeof(struct rte_flow_item_icmp),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2627,7 +2636,8 @@ mlx5_flow_validate_item_icmp(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
+                           const struct rte_flow_item *item,
                            uint64_t item_flags, bool ext_vlan_sup,
                            struct rte_flow_error *error)
 {
@@ -2664,7 +2674,7 @@ mlx5_flow_validate_item_eth(const struct rte_flow_item 
*item,
                                          "L2 layer should not follow GTP");
        if (!mask)
                mask = &rte_flow_item_eth_mask;
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_eth),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2718,7 +2728,7 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item 
*item,
                                          "VLAN cannot follow L3/L4 layer");
        if (!mask)
                mask = &rte_flow_item_vlan_mask;
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_vlan),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2782,7 +2792,8 @@ mlx5_flow_validate_item_vlan(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
+                            const struct rte_flow_item *item,
                             uint64_t item_flags,
                             uint64_t last_item,
                             uint16_t ether_type,
@@ -2854,7 +2865,7 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item 
*item,
                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
                                          "partial mask is not supported"
                                          " for protocol");
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv4),
@@ -2885,7 +2896,8 @@ mlx5_flow_validate_item_ipv4(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
+                            const struct rte_flow_item *item,
                             uint64_t item_flags,
                             uint64_t last_item,
                             uint16_t ether_type,
@@ -2936,9 +2948,9 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item 
*item,
        if (next_proto == IPPROTO_HOPOPTS  ||
            next_proto == IPPROTO_ROUTING  ||
            next_proto == IPPROTO_FRAGMENT ||
-           next_proto == IPPROTO_ESP      ||
            next_proto == IPPROTO_AH       ||
-           next_proto == IPPROTO_DSTOPTS)
+           next_proto == IPPROTO_DSTOPTS  ||
+           (!mlx5_hws_active(dev) && next_proto == IPPROTO_ESP))
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "IPv6 proto (next header) should "
@@ -2963,7 +2975,7 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item 
*item,
                                          "L3 cannot follow an NVGRE layer.");
        if (!mask)
                mask = &rte_flow_item_ipv6_mask;
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        acc_mask ? (const uint8_t *)acc_mask
                                                 : (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_ipv6),
@@ -2991,7 +3003,8 @@ mlx5_flow_validate_item_ipv6(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
+                           const struct rte_flow_item *item,
                            uint64_t item_flags,
                            uint8_t target_protocol,
                            struct rte_flow_error *error)
@@ -3004,15 +3017,17 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item 
*item,
                                      MLX5_FLOW_LAYER_OUTER_L4;
        int ret;
 
-       if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "protocol filtering not compatible"
-                                         " with UDP layer");
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 is mandatory to filter on L4");
+       if (!mlx5_hws_active(dev)) {
+               if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "protocol filtering not 
compatible with UDP layer");
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "L3 is mandatory to filter on 
L4");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -3020,7 +3035,7 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &rte_flow_item_udp_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_udp_mask,
                 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
                 error);
@@ -3045,7 +3060,8 @@ mlx5_flow_validate_item_udp(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
+                           const struct rte_flow_item *item,
                            uint64_t item_flags,
                            uint8_t target_protocol,
                            const struct rte_flow_item_tcp *flow_mask,
@@ -3060,15 +3076,16 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item 
*item,
        int ret;
 
        MLX5_ASSERT(flow_mask);
-       if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "protocol filtering not compatible"
-                                         " with TCP layer");
-       if (!(item_flags & l3m))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 is mandatory to filter on L4");
+       if (!mlx5_hws_active(dev)) {
+               if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "protocol filtering not 
compatible with TCP layer");
+               if (!(item_flags & l3m))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "L3 is mandatory to 
filter on L4");
+       }
        if (item_flags & l4m)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
@@ -3076,7 +3093,7 @@ mlx5_flow_validate_item_tcp(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &rte_flow_item_tcp_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)flow_mask,
                 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
                 error);
@@ -3136,10 +3153,16 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
         * Verify only UDPv4 is present as defined in
         * https://tools.ietf.org/html/rfc7348
         */
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "no outer UDP layer found");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "no outer UDP layer 
found");
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM, 
item,
+                                                 "VXLAN tunnel must be fully 
defined");
+       }
        if (!mask)
                mask = &rte_flow_item_vxlan_mask;
 
@@ -3154,7 +3177,7 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
                        valid_mask = &nic_mask;
        }
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)valid_mask,
                 sizeof(struct rte_flow_item_vxlan),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -3164,10 +3187,6 @@ mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
                memcpy(&id.vni[1], spec->hdr.vni, 3);
                memcpy(&id.vni[1], mask->hdr.vni, 3);
        }
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "VXLAN tunnel must be fully defined");
        return 0;
 }
 
@@ -3224,19 +3243,22 @@ mlx5_flow_validate_item_vxlan_gpe(const struct 
rte_flow_item *item,
         * Verify only UDPv4 is present as defined in
         * https://tools.ietf.org/html/rfc7348
         */
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "no outer UDP layer found");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "no outer UDP layer 
found");
+       }
        if (!mask)
                mask = &rte_flow_item_vxlan_gpe_mask;
-       if (priv->sh->misc5_cap && priv->sh->tunnel_header_0_1) {
+       if (mlx5_hws_active(dev) ||
+           (priv->sh->misc5_cap && priv->sh->tunnel_header_0_1)) {
                nic_mask.rsvd0[0] = 0xff;
                nic_mask.rsvd0[1] = 0xff;
                nic_mask.rsvd1 = 0xff;
        }
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
                 sizeof(struct rte_flow_item_vxlan_gpe),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -3269,7 +3291,8 @@ mlx5_flow_validate_item_vxlan_gpe(const struct 
rte_flow_item *item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
+mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
                                uint64_t item_flags,
                                const struct rte_flow_item *gre_item,
                                struct rte_flow_error *error)
@@ -3305,7 +3328,7 @@ mlx5_flow_validate_item_gre_key(const struct 
rte_flow_item *item,
        if (!mask)
                mask = &gre_key_default_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&gre_key_default_mask,
                 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        return ret;
@@ -3405,7 +3428,7 @@ mlx5_flow_validate_item_gre_option(struct rte_eth_dev 
*dev,
                                                  "Checksum/Sequence not 
supported");
        }
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
                 sizeof(struct rte_flow_item_gre_opt),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -3428,7 +3451,8 @@ mlx5_flow_validate_item_gre_option(struct rte_eth_dev 
*dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
+                           const struct rte_flow_item *item,
                            uint64_t item_flags,
                            uint8_t target_protocol,
                            struct rte_flow_error *error)
@@ -3451,14 +3475,16 @@ mlx5_flow_validate_item_gre(const struct rte_flow_item 
*item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple tunnel layers not"
                                          " supported");
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 Layer is missing");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "L3 Layer is missing");
+       }
        if (!mask)
                mask = &rte_flow_item_gre_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&nic_mask,
                 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
                 error);
@@ -3534,7 +3560,7 @@ mlx5_flow_validate_item_geneve(const struct rte_flow_item 
*item,
        if (!mask)
                mask = &rte_flow_item_geneve_mask;
        ret = mlx5_flow_item_acceptable
-                                 (item, (const uint8_t *)mask,
+                                 (dev, item, (const uint8_t *)mask,
                                   (const uint8_t *)&nic_mask,
                                   sizeof(struct rte_flow_item_geneve),
                                   MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -3732,36 +3758,48 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev 
__rte_unused,
                             struct rte_flow_error *error)
 {
 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+       const struct rte_flow_item_mpls hws_nic_mask = {
+               .label_tc_s = {0xff, 0xff, 0xff},
+               .ttl = 0xff
+       };
+       const struct rte_flow_item_mpls *nic_mask = !mlx5_hws_active(dev) ?
+               &rte_flow_item_mpls_mask : &hws_nic_mask;
        const struct rte_flow_item_mpls *mask = item->mask;
        struct mlx5_priv *priv = dev->data->dev_private;
        int ret;
 
-       if (!priv->sh->dev_cap.mpls_en)
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "MPLS not supported or"
-                                         " disabled in firmware"
-                                         " configuration.");
-       /* MPLS over UDP, GRE is allowed */
-       if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
-                           MLX5_FLOW_LAYER_GRE |
-                           MLX5_FLOW_LAYER_GRE_KEY)))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "protocol filtering not compatible"
-                                         " with MPLS layer");
-       /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
-       if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
-           !(item_flags & MLX5_FLOW_LAYER_GRE))
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "multiple tunnel layers not"
-                                         " supported");
+       if (!mlx5_hws_active(dev)) {
+               /* MPLS has HW support in HWS */
+               if (!priv->sh->dev_cap.mpls_en)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "MPLS not supported or 
disabled in firmware configuration.");
+               /* MPLS over UDP, GRE is allowed */
+               if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L4_UDP |
+                                   MLX5_FLOW_LAYER_GRE |
+                                   MLX5_FLOW_LAYER_GRE_KEY)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "protocol filtering not 
compatible with MPLS layer");
+               /* Multi-tunnel isn't allowed but MPLS over GRE is an 
exception. */
+               if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+                   !(item_flags & MLX5_FLOW_LAYER_GRE))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM, 
item,
+                                                 "multiple tunnel layers not 
supported");
+       } else {
+               /* Multi-tunnel isn't allowed but MPLS over GRE is an 
exception. */
+               if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
+                   !(item_flags & MLX5_FLOW_LAYER_MPLS))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM, 
item,
+                                                 "multiple tunnel layers not 
supported");
+       }
        if (!mask)
-               mask = &rte_flow_item_mpls_mask;
+               mask = nic_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
-                (const uint8_t *)&rte_flow_item_mpls_mask,
+               (dev, item, (const uint8_t *)mask,
+                (const uint8_t *)nic_mask,
                 sizeof(struct rte_flow_item_mpls),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
        if (ret < 0)
@@ -3791,7 +3829,8 @@ mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev 
__rte_unused,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
+mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
+                             const struct rte_flow_item *item,
                              uint64_t item_flags,
                              uint8_t target_protocol,
                              struct rte_flow_error *error)
@@ -3809,14 +3848,16 @@ mlx5_flow_validate_item_nvgre(const struct 
rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple tunnel layers not"
                                          " supported");
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
-               return rte_flow_error_set(error, ENOTSUP,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "L3 Layer is missing");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "L3 Layer is missing");
+       }
        if (!mask)
                mask = &rte_flow_item_nvgre_mask;
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_nvgre_mask,
                 sizeof(struct rte_flow_item_nvgre),
                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -3846,7 +3887,8 @@ mlx5_flow_validate_item_nvgre(const struct rte_flow_item 
*item,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
+                             const struct rte_flow_item *item,
                              uint64_t item_flags,
                              uint64_t last_item,
                              uint16_t ether_type,
@@ -3909,7 +3951,7 @@ mlx5_flow_validate_item_ecpri(const struct rte_flow_item 
*item,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
                                          "message header mask must be after a 
type mask");
-       return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       return mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                         acc_mask ? (const uint8_t *)acc_mask
                                                  : (const uint8_t *)&nic_mask,
                                         sizeof(struct rte_flow_item_ecpri),
@@ -9072,6 +9114,7 @@ mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
 {
        const struct mlx5_flow_driver_ops *fops;
        struct rte_flow_attr fattr = {0};
+       uint64_t item_flags = 0;
 
        if (flow_get_drv_type(dev, &fattr) != MLX5_FLOW_TYPE_HW) {
                rte_flow_error_set(error, ENOTSUP,
@@ -9080,7 +9123,7 @@ mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
                return -ENOTSUP;
        }
        fops = flow_get_drv_ops(MLX5_FLOW_TYPE_HW);
-       return fops->pattern_validate(dev, attr, items, error);
+       return fops->pattern_validate(dev, attr, items, &item_flags, error);
 }
 
 /**
diff --git a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h
index dd5b30a8a4..c42d02e478 100644
--- a/drivers/net/mlx5/mlx5_flow.h
+++ b/drivers/net/mlx5/mlx5_flow.h
@@ -270,6 +270,10 @@ enum mlx5_feature_name {
 #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
 #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
 
+#define MLX5_FLOW_ITEM_FLEX \
+       (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \
+       MLX5_FLOW_ITEM_FLEX_TUNNEL)
+
 /* ESP item */
 #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
 
@@ -2268,6 +2272,7 @@ typedef int (*mlx5_flow_pattern_validate_t)
                        (struct rte_eth_dev *dev,
                         const struct rte_flow_pattern_template_attr *attr,
                         const struct rte_flow_item items[],
+                        uint64_t *item_flags,
                         struct rte_flow_error *error);
 typedef struct rte_flow_pattern_template 
*(*mlx5_flow_pattern_template_create_t)
                        (struct rte_eth_dev *dev,
@@ -2897,46 +2902,79 @@ int flow_validate_modify_field_level
                        (const struct rte_flow_field_data *data,
                         struct rte_flow_error *error);
 int
-flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
-                                uint64_t action_flags,
-                                const struct rte_flow_action *action,
-                                const struct rte_flow_attr *attr,
-                                struct rte_flow_error *error);
+mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
+                                     uint64_t action_flags,
+                                     const struct rte_flow_action *action,
+                                     const struct rte_flow_attr *attr,
+                                     struct rte_flow_error *error);
 int
-flow_dv_validate_action_decap(struct rte_eth_dev *dev,
-                             uint64_t action_flags,
-                             const struct rte_flow_action *action,
-                             const uint64_t item_flags,
-                             const struct rte_flow_attr *attr,
-                             struct rte_flow_error *error);
+mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
+                                  uint64_t action_flags,
+                                  const struct rte_flow_action *action,
+                                  const uint64_t item_flags,
+                                  const struct rte_flow_attr *attr,
+                                  struct rte_flow_error *error);
 int
-flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
-                              uint64_t action_flags,
-                              uint64_t item_flags,
-                              bool root,
-                              struct rte_flow_error *error);
+mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
+                                   uint64_t action_flags,
+                                   uint64_t item_flags,
+                                   bool root,
+                                   struct rte_flow_error *error);
 int
-flow_dv_validate_action_raw_encap_decap
+mlx5_flow_dv_validate_action_raw_encap_decap
        (struct rte_eth_dev *dev,
         const struct rte_flow_action_raw_decap *decap,
         const struct rte_flow_action_raw_encap *encap,
         const struct rte_flow_attr *attr, uint64_t *action_flags,
         int *actions_n, const struct rte_flow_action *action,
         uint64_t item_flags, struct rte_flow_error *error);
-int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
+int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev,
+                             const struct rte_flow_item *item,
                              const uint8_t *mask,
                              const uint8_t *nic_mask,
                              unsigned int size,
                              bool range_accepted,
                              struct rte_flow_error *error);
-int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
                                uint64_t item_flags, bool ext_vlan_sup,
                                struct rte_flow_error *error);
-int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
+int
+mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+                               uint64_t item_flags,
+                               struct rte_eth_dev *dev,
+                               struct rte_flow_error *error);
+int
+mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
+                               uint64_t item_flags,
+                               uint64_t last_item,
+                               uint16_t ether_type,
+                               const struct rte_flow_item_ipv4 *acc_mask,
+                               struct rte_flow_error *error);
+int
+mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
+                              const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              struct rte_flow_error *error);
+int
+mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
+                                  const struct rte_flow_item *item,
+                                  uint64_t last_item,
+                                  const struct rte_flow_item *gtp_item,
+                                  bool root, struct rte_flow_error *error);
+int
+mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
+                                 const struct rte_flow_item *item,
+                                 uint64_t *item_flags,
+                                 struct rte_flow_error *error);
+int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
                                uint64_t item_flags,
                                uint8_t target_protocol,
                                struct rte_flow_error *error);
-int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev,
+                                   const struct rte_flow_item *item,
                                    uint64_t item_flags,
                                    const struct rte_flow_item *gre_item,
                                    struct rte_flow_error *error);
@@ -2946,14 +2984,16 @@ int mlx5_flow_validate_item_gre_option(struct 
rte_eth_dev *dev,
                                       const struct rte_flow_attr *attr,
                                       const struct rte_flow_item *gre_item,
                                       struct rte_flow_error *error);
-int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev,
+                                const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 uint64_t last_item,
                                 uint16_t ether_type,
                                 const struct rte_flow_item_ipv4 *acc_mask,
                                 bool range_accepted,
                                 struct rte_flow_error *error);
-int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev,
+                                const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 uint64_t last_item,
                                 uint16_t ether_type,
@@ -2964,12 +3004,14 @@ int mlx5_flow_validate_item_mpls(struct rte_eth_dev 
*dev,
                                 uint64_t item_flags,
                                 uint64_t prev_layer,
                                 struct rte_flow_error *error);
-int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
                                uint64_t item_flags,
                                uint8_t target_protocol,
                                const struct rte_flow_item_tcp *flow_mask,
                                struct rte_flow_error *error);
-int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
                                uint64_t item_flags,
                                uint8_t target_protocol,
                                struct rte_flow_error *error);
@@ -2987,19 +3029,23 @@ int mlx5_flow_validate_item_vxlan_gpe(const struct 
rte_flow_item *item,
                                      uint64_t item_flags,
                                      struct rte_eth_dev *dev,
                                      struct rte_flow_error *error);
-int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev,
+                                const struct rte_flow_item *item,
                                 uint64_t item_flags,
                                 uint8_t target_protocol,
                                 struct rte_flow_error *error);
-int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
-                                  uint64_t item_flags,
-                                  uint8_t target_protocol,
-                                  struct rte_flow_error *error);
-int mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev,
+                                 const struct rte_flow_item *item,
+                                 uint64_t item_flags,
+                                 uint8_t target_protocol,
+                                 struct rte_flow_error *error);
+int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev,
+                                      const struct rte_flow_item *item,
                                       uint64_t item_flags,
                                       uint8_t target_protocol,
                                       struct rte_flow_error *error);
-int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev,
+                                 const struct rte_flow_item *item,
                                  uint64_t item_flags,
                                  uint8_t target_protocol,
                                  struct rte_flow_error *error);
@@ -3012,7 +3058,8 @@ int mlx5_flow_validate_item_geneve_opt(const struct 
rte_flow_item *item,
                                   const struct rte_flow_item *geneve_item,
                                   struct rte_eth_dev *dev,
                                   struct rte_flow_error *error);
-int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
+int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev,
+                                 const struct rte_flow_item *item,
                                  uint64_t item_flags,
                                  uint64_t last_item,
                                  uint16_t ether_type,
@@ -3378,6 +3425,8 @@ mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
                            struct mlx5_indirect_list *reformat);
 
 extern const struct rte_flow_action_raw_decap empty_decap;
+extern const struct rte_flow_item_ipv6 nic_ipv6_mask;
+extern const struct rte_flow_item_tcp nic_tcp_mask;
 
 #endif
 #endif /* RTE_PMD_MLX5_FLOW_H_ */
diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c
index 06f5427abf..bd4f4f8fa9 100644
--- a/drivers/net/mlx5/mlx5_flow_dv.c
+++ b/drivers/net/mlx5/mlx5_flow_dv.c
@@ -2457,7 +2457,7 @@ flow_dv_validate_item_mark(struct rte_eth_dev *dev,
                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
                                        "mask cannot be zero");
 
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_mark),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2544,7 +2544,7 @@ flow_dv_validate_item_meta(struct rte_eth_dev *dev 
__rte_unused,
                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
                                        "mask cannot be zero");
 
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_meta),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2600,7 +2600,7 @@ flow_dv_validate_item_tag(struct rte_eth_dev *dev,
                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
                                        "mask cannot be zero");
 
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_tag),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2678,7 +2678,7 @@ flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
                                           "no support for partial mask on"
                                           " \"id\" field");
        ret = mlx5_flow_item_acceptable
-                               (item, (const uint8_t *)mask,
+                               (dev, item, (const uint8_t *)mask,
                                 (const uint8_t *)&rte_flow_item_port_id_mask,
                                 sizeof(struct rte_flow_item_port_id),
                                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2758,7 +2758,7 @@ flow_dv_validate_item_represented_port(struct rte_eth_dev 
*dev,
                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
                                           "no support for partial mask on 
\"id\" field");
        ret = mlx5_flow_item_acceptable
-                               (item, (const uint8_t *)mask,
+                               (dev, item, (const uint8_t *)mask,
                                 (const uint8_t *)&rte_flow_item_ethdev_mask,
                                 sizeof(struct rte_flow_item_ethdev),
                                 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2800,11 +2800,11 @@ flow_dv_validate_item_represented_port(struct 
rte_eth_dev *dev,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_dv_validate_item_vlan(const struct rte_flow_item *item,
-                          uint64_t item_flags,
-                          struct rte_eth_dev *dev,
-                          struct rte_flow_error *error)
+int
+mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item,
+                               uint64_t item_flags,
+                               struct rte_eth_dev *dev,
+                               struct rte_flow_error *error)
 {
        const struct rte_flow_item_vlan *mask = item->mask;
        const struct rte_flow_item_vlan nic_mask = {
@@ -2831,7 +2831,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item 
*item,
                                          "VLAN cannot follow L3/L4 layer");
        if (!mask)
                mask = &rte_flow_item_vlan_mask;
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)&nic_mask,
                                        sizeof(struct rte_flow_item_vlan),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2886,11 +2886,11 @@ flow_dv_validate_item_vlan(const struct rte_flow_item 
*item,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
-                         const struct rte_flow_item *item,
-                         uint64_t item_flags,
-                         struct rte_flow_error *error)
+int
+mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
+                              const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
        const struct rte_flow_item_gtp *spec = item->spec;
@@ -2910,10 +2910,12 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "multiple tunnel layers not"
                                          " supported");
-       if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
-                                         "no outer UDP layer found");
+       if (!mlx5_hws_active(dev)) {
+               if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item, "no outer UDP layer 
found");
+       }
        if (!mask)
                mask = &rte_flow_item_gtp_mask;
        if (spec && spec->hdr.gtp_hdr_info & ~MLX5_GTP_FLAGS_MASK)
@@ -2921,7 +2923,7 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "Match is supported for GTP"
                                          " flags only");
-       return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       return mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                         (const uint8_t *)&nic_mask,
                                         sizeof(struct rte_flow_item_gtp),
                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -2944,12 +2946,13 @@ flow_dv_validate_item_gtp(struct rte_eth_dev *dev,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_dv_validate_item_gtp_psc(const struct rte_flow_item *item,
-                             uint64_t last_item,
-                             const struct rte_flow_item *gtp_item,
-                             bool root,
-                             struct rte_flow_error *error)
+int
+mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev,
+                                  const struct rte_flow_item *item,
+                                  uint64_t last_item,
+                                  const struct rte_flow_item *gtp_item,
+                                  bool root,
+                                  struct rte_flow_error *error)
 {
        const struct rte_flow_item_gtp *gtp_spec;
        const struct rte_flow_item_gtp *gtp_mask;
@@ -2972,41 +2975,55 @@ flow_dv_validate_item_gtp_psc(const struct 
rte_flow_item *item,
                return rte_flow_error_set
                        (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
                         "GTP E flag must be 1 to match GTP PSC");
-       /* Check the flow is not created in group zero. */
-       if (root)
-               return rte_flow_error_set
-                       (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                        "GTP PSC is not supported for group 0");
-       /* GTP spec is here and E flag is requested to match zero. */
-       if (!item->spec)
-               return 0;
+       if (!mlx5_hws_active(dev)) {
+               /* Check the flow is not created in group zero. */
+               if (root)
+                       return rte_flow_error_set
+                               (error, ENOTSUP,
+                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+                                "GTP PSC is not supported for group 0");
+               /* GTP spec is here and E flag is requested to match zero. */
+               if (!item->spec)
+                       return 0;
+       }
        mask = item->mask ? item->mask : &rte_flow_item_gtp_psc_mask;
-       return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       return mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                         (const uint8_t *)&nic_mask,
                                         sizeof(struct rte_flow_item_gtp_psc),
                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
 }
 
-/**
+/*
  * Validate IPV4 item.
  * Use existing validation function mlx5_flow_validate_item_ipv4(), and
  * add specific validation of fragment_offset field,
  *
+ * @param[in] dev
+ *   Pointer to the rte_eth_dev structure.
  * @param[in] item
  *   Item specification.
  * @param[in] item_flags
  *   Bit-fields that holds the items detected until now.
+ * @param[in] last_item
+ *   Previous validated item in the pattern items.
+ * @param[in] ether_type
+ *   Type in the ethernet layer header (including dot1q).
+ * @param[in] acc_mask
+ *   Default acceptable mask (will be adjusted).
  * @param[out] error
  *   Pointer to error structure.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
-                          const struct rte_flow_item *item,
-                          uint64_t item_flags, uint64_t last_item,
-                          uint16_t ether_type, struct rte_flow_error *error)
+int
+mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
+                               const struct rte_flow_item *item,
+                               uint64_t item_flags,
+                               uint64_t last_item,
+                               uint16_t ether_type,
+                               const struct rte_flow_item_ipv4 *acc_mask,
+                               struct rte_flow_error *error)
 {
        int ret;
        struct mlx5_priv *priv = dev->data->dev_private;
@@ -3016,16 +3033,7 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
        const struct rte_flow_item_ipv4 *mask = item->mask;
        rte_be16_t fragment_offset_spec = 0;
        rte_be16_t fragment_offset_last = 0;
-       struct rte_flow_item_ipv4 nic_ipv4_mask = {
-               .hdr = {
-                       .src_addr = RTE_BE32(0xffffffff),
-                       .dst_addr = RTE_BE32(0xffffffff),
-                       .type_of_service = 0xff,
-                       .fragment_offset = RTE_BE16(0xffff),
-                       .next_proto_id = 0xff,
-                       .time_to_live = 0xff,
-               },
-       };
+       struct rte_flow_item_ipv4 actual_ipv4_mask = *acc_mask;
 
        if (mask && (mask->hdr.version_ihl & RTE_IPV4_HDR_IHL_MASK)) {
                int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
@@ -3036,10 +3044,10 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
                                                  item,
                                                  "IPV4 ihl offload not 
supported");
-               nic_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
+               actual_ipv4_mask.hdr.version_ihl = mask->hdr.version_ihl;
        }
-       ret = mlx5_flow_validate_item_ipv4(item, item_flags, last_item,
-                                          ether_type, &nic_ipv4_mask,
+       ret = mlx5_flow_validate_item_ipv4(dev, item, item_flags, last_item,
+                                          ether_type, &actual_ipv4_mask,
                                           MLX5_ITEM_RANGE_ACCEPTED, error);
        if (ret < 0)
                return ret;
@@ -3129,7 +3137,8 @@ flow_dv_validate_item_ipv4(struct rte_eth_dev *dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
-flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item,
+flow_dv_validate_item_ipv6_frag_ext(const struct rte_eth_dev *dev,
+                                   const struct rte_flow_item *item,
                                    uint64_t item_flags,
                                    struct rte_flow_error *error)
 {
@@ -3188,7 +3197,7 @@ flow_dv_validate_item_ipv6_frag_ext(const struct 
rte_flow_item *item,
                                          RTE_FLOW_ERROR_TYPE_ITEM, item,
                                          "specified value not supported");
        ret = mlx5_flow_item_acceptable
-                               (item, (const uint8_t *)mask,
+                               (dev, item, (const uint8_t *)mask,
                                 (const uint8_t *)&nic_mask,
                                 sizeof(struct rte_flow_item_ipv6_frag_ext),
                                 MLX5_ITEM_RANGE_ACCEPTED, error);
@@ -3244,31 +3253,33 @@ flow_dv_validate_item_ipv6_frag_ext(const struct 
rte_flow_item *item,
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
-static int
-flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
-                            const struct rte_flow_item *item,
-                            uint64_t *item_flags,
-                            struct rte_flow_error *error)
+int
+mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev,
+                                 const struct rte_flow_item *item,
+                                 uint64_t *item_flags,
+                                 struct rte_flow_error *error)
 {
        const struct rte_flow_item_conntrack *spec = item->spec;
        const struct rte_flow_item_conntrack *mask = item->mask;
-       RTE_SET_USED(dev);
        uint32_t flags;
 
        if (*item_flags & MLX5_FLOW_LAYER_ASO_CT)
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM, NULL,
                                          "Only one CT is supported");
-       if (!mask)
-               mask = &rte_flow_item_conntrack_mask;
-       flags = spec->flags & mask->flags;
-       if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
-           ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
-            (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
-            (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
-               return rte_flow_error_set(error, EINVAL,
-                                         RTE_FLOW_ERROR_TYPE_ITEM, NULL,
-                                         "Conflict status bits");
+       if (!mlx5_hws_active(dev)) {
+               if (!mask)
+                       mask = &rte_flow_item_conntrack_mask;
+               flags = spec->flags & mask->flags;
+               if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) &&
+                   ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) ||
+                    (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) ||
+                    (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)))
+                       return rte_flow_error_set(error, EINVAL,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 NULL,
+                                                 "Conflict status bits");
+       }
        /* State change also needs to be considered. */
        *item_flags |= MLX5_FLOW_LAYER_ASO_CT;
        return 0;
@@ -3899,11 +3910,11 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, 
bool shared,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
-                                uint64_t action_flags,
-                                const struct rte_flow_action *action,
-                                const struct rte_flow_attr *attr,
-                                struct rte_flow_error *error)
+mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev,
+                                     uint64_t action_flags,
+                                     const struct rte_flow_action *action,
+                                     const struct rte_flow_attr *attr,
+                                     struct rte_flow_error *error)
 {
        const struct mlx5_priv *priv = dev->data->dev_private;
 
@@ -3944,12 +3955,12 @@ flow_dv_validate_action_l2_encap(struct rte_eth_dev 
*dev,
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-flow_dv_validate_action_decap(struct rte_eth_dev *dev,
-                             uint64_t action_flags,
-                             const struct rte_flow_action *action,
-                             const uint64_t item_flags,
-                             const struct rte_flow_attr *attr,
-                             struct rte_flow_error *error)
+mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev,
+                                  uint64_t action_flags,
+                                  const struct rte_flow_action *action,
+                                  const uint64_t item_flags,
+                                  const struct rte_flow_attr *attr,
+                                  struct rte_flow_error *error)
 {
        const struct mlx5_priv *priv = dev->data->dev_private;
 
@@ -4017,7 +4028,7 @@ const struct rte_flow_action_raw_decap empty_decap = 
{.data = NULL, .size = 0,};
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-flow_dv_validate_action_raw_encap_decap
+mlx5_flow_dv_validate_action_raw_encap_decap
        (struct rte_eth_dev *dev,
         const struct rte_flow_action_raw_decap *decap,
         const struct rte_flow_action_raw_encap *encap,
@@ -4058,8 +4069,10 @@ flow_dv_validate_action_raw_encap_decap
                                "encap combination");
        }
        if (decap) {
-               ret = flow_dv_validate_action_decap(dev, *action_flags, action,
-                                                   item_flags, attr, error);
+               ret = mlx5_flow_dv_validate_action_decap(dev, *action_flags,
+                                                        action,
+                                                        item_flags, attr,
+                                                        error);
                if (ret < 0)
                        return ret;
                *action_flags |= MLX5_FLOW_ACTION_DECAP;
@@ -4106,11 +4119,11 @@ flow_dv_validate_action_raw_encap_decap
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
-                              uint64_t action_flags,
-                              uint64_t item_flags,
-                              bool root,
-                              struct rte_flow_error *error)
+mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev,
+                                   uint64_t action_flags,
+                                   uint64_t item_flags,
+                                   bool root,
+                                   struct rte_flow_error *error)
 {
        RTE_SET_USED(dev);
 
@@ -4195,7 +4208,7 @@ flow_dv_validate_item_meter_color(struct rte_eth_dev *dev,
                                        RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
                                        "mask cannot be zero");
 
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                (const uint8_t *)&nic_mask,
                                sizeof(struct rte_flow_item_meter_color),
                                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -4264,7 +4277,7 @@ flow_dv_validate_item_aggr_affinity(struct rte_eth_dev 
*dev,
                return rte_flow_error_set(error, EINVAL,
                                          RTE_FLOW_ERROR_TYPE_ITEM_SPEC, NULL,
                                          "mask cannot be zero");
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                (const uint8_t *)&nic_mask,
                                sizeof(struct rte_flow_item_aggr_affinity),
                                MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -6437,7 +6450,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                        ++actions_n;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
-                       ret = flow_dv_validate_action_raw_encap_decap
+                       ret = mlx5_flow_dv_validate_action_raw_encap_decap
                                (dev, NULL, act->conf, attr, sub_action_flags,
                                 &actions_n, action, item_flags, error);
                        if (ret < 0)
@@ -6446,10 +6459,10 @@ flow_dv_validate_action_sample(uint64_t *action_flags,
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
-                       ret = flow_dv_validate_action_l2_encap(dev,
-                                                              
*sub_action_flags,
-                                                              act, attr,
-                                                              error);
+                       ret = mlx5_flow_dv_validate_action_l2_encap(dev,
+                                                                   
*sub_action_flags,
+                                                                   act, attr,
+                                                                   error);
                        if (ret < 0)
                                return ret;
                        *sub_action_flags |= MLX5_FLOW_ACTION_ENCAP;
@@ -7594,7 +7607,7 @@ mlx5_flow_validate_item_ib_bth(struct rte_eth_dev *dev,
                                          "IB BTH item is not supported");
        if (!mask)
                mask = &rte_flow_item_ib_bth_mask;
-       ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
+       ret = mlx5_flow_item_acceptable(dev, item, (const uint8_t *)mask,
                                        (const uint8_t *)valid_mask,
                                        sizeof(struct rte_flow_item_ib_bth),
                                        MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
@@ -7603,6 +7616,40 @@ mlx5_flow_validate_item_ib_bth(struct rte_eth_dev *dev,
        return 0;
 }
 
+const struct rte_flow_item_ipv4 nic_ipv4_mask = {
+       .hdr = {
+               .src_addr = RTE_BE32(0xffffffff),
+               .dst_addr = RTE_BE32(0xffffffff),
+               .type_of_service = 0xff,
+               .fragment_offset = RTE_BE16(0xffff),
+               .next_proto_id = 0xff,
+               .time_to_live = 0xff,
+       },
+};
+
+const struct rte_flow_item_ipv6 nic_ipv6_mask = {
+       .hdr = {
+               .src_addr =
+               "\xff\xff\xff\xff\xff\xff\xff\xff"
+               "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .dst_addr =
+               "\xff\xff\xff\xff\xff\xff\xff\xff"
+               "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .vtc_flow = RTE_BE32(0xffffffff),
+               .proto = 0xff,
+               .hop_limits = 0xff,
+       },
+       .has_frag_ext = 1,
+};
+
+const struct rte_flow_item_tcp nic_tcp_mask = {
+       .hdr = {
+               .tcp_flags = 0xFF,
+               .src_port = RTE_BE16(UINT16_MAX),
+               .dst_port = RTE_BE16(UINT16_MAX),
+       }
+};
+
 /**
  * Internal validation function. For validating both actions and items.
  *
@@ -7648,27 +7695,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
        const struct rte_flow_action_rss *rss = NULL;
        const struct rte_flow_action_rss *sample_rss = NULL;
        const struct rte_flow_action_count *sample_count = NULL;
-       const struct rte_flow_item_tcp nic_tcp_mask = {
-               .hdr = {
-                       .tcp_flags = 0xFF,
-                       .src_port = RTE_BE16(UINT16_MAX),
-                       .dst_port = RTE_BE16(UINT16_MAX),
-               }
-       };
-       const struct rte_flow_item_ipv6 nic_ipv6_mask = {
-               .hdr = {
-                       .src_addr =
-                       "\xff\xff\xff\xff\xff\xff\xff\xff"
-                       "\xff\xff\xff\xff\xff\xff\xff\xff",
-                       .dst_addr =
-                       "\xff\xff\xff\xff\xff\xff\xff\xff"
-                       "\xff\xff\xff\xff\xff\xff\xff\xff",
-                       .vtc_flow = RTE_BE32(0xffffffff),
-                       .proto = 0xff,
-                       .hop_limits = 0xff,
-               },
-               .has_frag_ext = 1,
-       };
        const struct rte_flow_item_ecpri nic_ecpri_mask = {
                .hdr = {
                        .common = {
@@ -7753,9 +7779,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
                case RTE_FLOW_ITEM_TYPE_ESP:
-                       ret = mlx5_flow_os_validate_item_esp(items, item_flags,
-                                                         next_protocol,
-                                                         error);
+                       ret = mlx5_flow_os_validate_item_esp(dev, items,
+                                                            item_flags,
+                                                            next_protocol,
+                                                            error);
                        if (ret < 0)
                                return ret;
                        last_item = MLX5_FLOW_ITEM_ESP;
@@ -7778,7 +7805,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        port_id_item = items;
                        break;
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       ret = mlx5_flow_validate_item_eth(items, item_flags,
+                       ret = mlx5_flow_validate_item_eth(dev, items, 
item_flags,
                                                          true, error);
                        if (ret < 0)
                                return ret;
@@ -7797,8 +7824,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
-                       ret = flow_dv_validate_item_vlan(items, item_flags,
-                                                        dev, error);
+                       ret = mlx5_flow_dv_validate_item_vlan(items, item_flags,
+                                                             dev, error);
                        if (ret < 0)
                                return ret;
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
@@ -7829,9 +7856,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                item_flags |= l3_tunnel_flag;
                                tunnel = 1;
                        }
-                       ret = flow_dv_validate_item_ipv4(dev, items, item_flags,
-                                                        last_item, ether_type,
-                                                        error);
+                       ret = mlx5_flow_dv_validate_item_ipv4(dev, items,
+                                                             item_flags,
+                                                             last_item,
+                                                             ether_type,
+                                                             &nic_ipv4_mask,
+                                                             error);
                        if (ret < 0)
                                return ret;
                        last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
@@ -7850,7 +7880,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                item_flags |= l3_tunnel_flag;
                                tunnel = 1;
                        }
-                       ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+                       ret = mlx5_flow_validate_item_ipv6(dev, items,
+                                                          item_flags,
                                                           last_item,
                                                           ether_type,
                                                           &nic_ipv6_mask,
@@ -7863,7 +7894,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                item_flags |= l3_tunnel_flag;
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
-                       ret = flow_dv_validate_item_ipv6_frag_ext(items,
+                       ret = flow_dv_validate_item_ipv6_frag_ext(dev, items,
                                                                  item_flags,
                                                                  error);
                        if (ret < 0)
@@ -7876,7 +7907,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
-                                               (items, item_flags,
+                                               (dev, items, item_flags,
                                                 next_protocol,
                                                 &nic_tcp_mask,
                                                 error);
@@ -7886,7 +7917,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                             MLX5_FLOW_LAYER_OUTER_L4_TCP;
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
-                       ret = mlx5_flow_validate_item_udp(items, item_flags,
+                       ret = mlx5_flow_validate_item_udp(dev, items, 
item_flags,
                                                          next_protocol,
                                                          error);
                        const struct rte_flow_item_udp *spec = items->spec;
@@ -7903,7 +7934,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                             MLX5_FLOW_LAYER_OUTER_L4_UDP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       ret = mlx5_flow_validate_item_gre(items, item_flags,
+                       ret = mlx5_flow_validate_item_gre(dev, items, 
item_flags,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
@@ -7918,7 +7949,8 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        last_item = MLX5_FLOW_LAYER_GRE;
                        break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+                       ret = mlx5_flow_validate_item_nvgre(dev, items,
+                                                           item_flags,
                                                            next_protocol,
                                                            error);
                        if (ret < 0)
@@ -7927,7 +7959,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
                        ret = mlx5_flow_validate_item_gre_key
-                               (items, item_flags, gre_item, error);
+                               (dev, items, item_flags, gre_item, error);
                        if (ret < 0)
                                return ret;
                        last_item = MLX5_FLOW_LAYER_GRE_KEY;
@@ -7991,7 +8023,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        last_item = MLX5_FLOW_ITEM_METADATA;
                        break;
                case RTE_FLOW_ITEM_TYPE_ICMP:
-                       ret = mlx5_flow_validate_item_icmp(items, item_flags,
+                       ret = mlx5_flow_validate_item_icmp(dev, items, 
item_flags,
                                                           next_protocol,
                                                           error);
                        if (ret < 0)
@@ -7999,7 +8031,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        last_item = MLX5_FLOW_LAYER_ICMP;
                        break;
                case RTE_FLOW_ITEM_TYPE_ICMP6:
-                       ret = mlx5_flow_validate_item_icmp6(items, item_flags,
+                       ret = mlx5_flow_validate_item_icmp6(dev, items, 
item_flags,
                                                            next_protocol,
                                                            error);
                        if (ret < 0)
@@ -8009,7 +8041,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        break;
                case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
                case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
-                       ret = mlx5_flow_validate_item_icmp6_echo(items,
+                       ret = mlx5_flow_validate_item_icmp6_echo(dev, items,
                                                                 item_flags,
                                                                 next_protocol,
                                                                 error);
@@ -8038,24 +8070,27 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        tag_bitmap |= 1 << mlx5_tag->id;
                        break;
                case RTE_FLOW_ITEM_TYPE_GTP:
-                       ret = flow_dv_validate_item_gtp(dev, items, item_flags,
-                                                       error);
+                       ret = mlx5_flow_dv_validate_item_gtp(dev, items,
+                                                            item_flags,
+                                                            error);
                        if (ret < 0)
                                return ret;
                        gtp_item = items;
                        last_item = MLX5_FLOW_LAYER_GTP;
                        break;
                case RTE_FLOW_ITEM_TYPE_GTP_PSC:
-                       ret = flow_dv_validate_item_gtp_psc(items, last_item,
-                                                           gtp_item, is_root,
-                                                           error);
+                       ret = mlx5_flow_dv_validate_item_gtp_psc(dev, items,
+                                                                last_item,
+                                                                gtp_item,
+                                                                is_root, 
error);
                        if (ret < 0)
                                return ret;
                        last_item = MLX5_FLOW_LAYER_GTP_PSC;
                        break;
                case RTE_FLOW_ITEM_TYPE_ECPRI:
                        /* Capacity will be checked in the translate stage. */
-                       ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+                       ret = mlx5_flow_validate_item_ecpri(dev, items,
+                                                           item_flags,
                                                            last_item,
                                                            ether_type,
                                                            &nic_ecpri_mask,
@@ -8074,8 +8109,9 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                                return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
-                       ret = flow_dv_validate_item_aso_ct(dev, items,
-                                                          &item_flags, error);
+                       ret = mlx5_flow_dv_validate_item_aso_ct(dev, items,
+                                                               &item_flags,
+                                                               error);
                        if (ret < 0)
                                return ret;
                        break;
@@ -8356,10 +8392,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
-                       ret = flow_dv_validate_action_l2_encap(dev,
-                                                              action_flags,
-                                                              actions, attr,
-                                                              error);
+                       ret = mlx5_flow_dv_validate_action_l2_encap(dev,
+                                                                   
action_flags,
+                                                                   actions,
+                                                                   attr,
+                                                                   error);
                        if (ret < 0)
                                return ret;
                        action_flags |= MLX5_FLOW_ACTION_ENCAP;
@@ -8367,9 +8404,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        break;
                case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
                case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
-                       ret = flow_dv_validate_action_decap(dev, action_flags,
-                                                           actions, item_flags,
-                                                           attr, error);
+                       ret = mlx5_flow_dv_validate_action_decap(dev,
+                                                                action_flags,
+                                                                actions,
+                                                                item_flags,
+                                                                attr, error);
                        if (ret < 0)
                                return ret;
                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
@@ -8378,7 +8417,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        ++actions_n;
                        break;
                case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
-                       ret = flow_dv_validate_action_raw_encap_decap
+                       ret = mlx5_flow_dv_validate_action_raw_encap_decap
                                (dev, NULL, actions->conf, attr, &action_flags,
                                 &actions_n, actions, item_flags, error);
                        if (ret < 0)
@@ -8394,11 +8433,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        } else {
                                encap = actions->conf;
                        }
-                       ret = flow_dv_validate_action_raw_encap_decap
-                                          (dev,
-                                           decap ? decap : &empty_decap, encap,
-                                           attr, &action_flags, &actions_n,
-                                           actions, item_flags, error);
+                       ret = mlx5_flow_dv_validate_action_raw_encap_decap
+                               (dev,
+                                decap ? decap : &empty_decap, encap,
+                                attr, &action_flags, &actions_n,
+                                actions, item_flags, error);
                        if (ret < 0)
                                return ret;
                        if ((action_flags & MLX5_FLOW_ACTION_SAMPLE) &&
@@ -8707,9 +8746,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct 
rte_flow_attr *attr,
                        rw_act_num += ret;
                        break;
                case RTE_FLOW_ACTION_TYPE_CONNTRACK:
-                       ret = flow_dv_validate_action_aso_ct(dev, action_flags,
-                                                            item_flags,
-                                                            is_root, error);
+                       ret = mlx5_flow_dv_validate_action_aso_ct(dev,
+                                                                 action_flags,
+                                                                 item_flags,
+                                                                 is_root,
+                                                                 error);
                        if (ret < 0)
                                return ret;
                        if (action_flags & MLX5_FLOW_ACTION_SAMPLE)
diff --git a/drivers/net/mlx5/mlx5_flow_hw.c b/drivers/net/mlx5/mlx5_flow_hw.c
index 63194935a3..e8562660dd 100644
--- a/drivers/net/mlx5/mlx5_flow_hw.c
+++ b/drivers/net/mlx5/mlx5_flow_hw.c
@@ -11,6 +11,7 @@
 #include "mlx5.h"
 #include "mlx5_defs.h"
 #include "mlx5_flow.h"
+#include "mlx5_flow_os.h"
 #include "mlx5_rx.h"
 
 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
@@ -484,79 +485,6 @@ flow_hw_hashfields_set(struct mlx5_flow_rss_desc *rss_desc,
        *hash_fields = fields;
 }
 
-/**
- * Generate the matching pattern item flags.
- *
- * @param[in] items
- *   Pointer to the list of items.
- *
- * @return
- *   Matching item flags. RSS hash field function
- *   silently ignores the flags which are unsupported.
- */
-static uint64_t
-flow_hw_matching_item_flags_get(const struct rte_flow_item items[])
-{
-       uint64_t item_flags = 0;
-       uint64_t last_item = 0;
-
-       for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
-               int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
-               int item_type = items->type;
-
-               switch (item_type) {
-               case RTE_FLOW_ITEM_TYPE_IPV4:
-                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
-                                            MLX5_FLOW_LAYER_OUTER_L3_IPV4;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV6:
-                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
-                                            MLX5_FLOW_LAYER_OUTER_L3_IPV6;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_TCP:
-                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
-                                            MLX5_FLOW_LAYER_OUTER_L4_TCP;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_UDP:
-                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
-                                            MLX5_FLOW_LAYER_OUTER_L4_UDP;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
-                       last_item = tunnel ? 
MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
-                                            
MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_GRE:
-                       last_item = MLX5_FLOW_LAYER_GRE;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_NVGRE:
-                       last_item = MLX5_FLOW_LAYER_GRE;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VXLAN:
-                       last_item = MLX5_FLOW_LAYER_VXLAN;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
-                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_GENEVE:
-                       last_item = MLX5_FLOW_LAYER_GENEVE;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_MPLS:
-                       last_item = MLX5_FLOW_LAYER_MPLS;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_GTP:
-                       last_item = MLX5_FLOW_LAYER_GTP;
-                       break;
-               case RTE_FLOW_ITEM_TYPE_COMPARE:
-                       last_item = MLX5_FLOW_ITEM_COMPARE;
-                       break;
-               default:
-                       break;
-               }
-               item_flags |= last_item;
-       }
-       return item_flags;
-}
-
 /**
  * Register destination table DR jump action.
  *
@@ -6547,8 +6475,8 @@ mlx5_hw_validate_action_l2_encap(struct rte_eth_dev *dev,
                .transfer = template_attr->transfer
        };
 
-       return flow_dv_validate_action_l2_encap(dev, action_flags, action,
-                                               &attr, error);
+       return mlx5_flow_dv_validate_action_l2_encap(dev, action_flags, action,
+                                                    &attr, error);
 }
 
 static int
@@ -6579,8 +6507,8 @@ mlx5_hw_validate_action_l2_decap(struct rte_eth_dev *dev,
                action->type == RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
                MLX5_FLOW_LAYER_VXLAN : 0;
 
-       return flow_dv_validate_action_decap(dev, action_flags, action,
-                                            item_flags, &attr, error);
+       return mlx5_flow_dv_validate_action_decap(dev, action_flags, action,
+                                                 item_flags, &attr, error);
 }
 
 static int
@@ -6594,9 +6522,9 @@ mlx5_hw_validate_action_conntrack(struct rte_eth_dev *dev,
        RTE_SET_USED(template_action);
        RTE_SET_USED(template_mask);
        RTE_SET_USED(template_attr);
-       return flow_dv_validate_action_aso_ct(dev, action_flags,
-                                             MLX5_FLOW_LAYER_OUTER_L4_TCP,
-                                             false, error);
+       return mlx5_flow_dv_validate_action_aso_ct(dev, action_flags,
+                                                  MLX5_FLOW_LAYER_OUTER_L4_TCP,
+                                                  false, error);
 }
 
 static int
@@ -6666,11 +6594,12 @@ flow_hw_validate_action_raw_reformat(struct rte_eth_dev 
*dev,
                if (ret)
                        return ret;
        }
-       return flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
-                                                      raw_encap, &attr,
-                                                      action_flags, &actions_n,
-                                                      template_action,
-                                                      item_flags, error);
+       return mlx5_flow_dv_validate_action_raw_encap_decap(dev, raw_decap,
+                                                           raw_encap, &attr,
+                                                           action_flags,
+                                                           &actions_n,
+                                                           template_action,
+                                                           item_flags, error);
 }
 
 
@@ -7702,18 +7631,6 @@ flow_hw_actions_template_destroy(struct rte_eth_dev *dev,
        return 0;
 }
 
-static uint32_t
-flow_hw_count_items(const struct rte_flow_item *items)
-{
-       const struct rte_flow_item *curr_item;
-       uint32_t nb_items;
-
-       nb_items = 0;
-       for (curr_item = items; curr_item->type != RTE_FLOW_ITEM_TYPE_END; 
++curr_item)
-               ++nb_items;
-       return ++nb_items;
-}
-
 static struct rte_flow_item *
 flow_hw_prepend_item(const struct rte_flow_item *items,
                     const uint32_t nb_items,
@@ -7844,17 +7761,99 @@ mlx5_hw_validate_item_nsh(struct rte_eth_dev *dev,
        return mlx5_flow_validate_item_nsh(dev, item, error);
 }
 
+static bool
+mlx5_hw_flow_tunnel_ip_check(uint64_t last_item, uint64_t *item_flags)
+{
+       bool tunnel;
+
+       if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV4) {
+               tunnel = true;
+               *item_flags |= MLX5_FLOW_LAYER_IPIP;
+       } else if (last_item == MLX5_FLOW_LAYER_OUTER_L3_IPV6) {
+               tunnel = true;
+               *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
+       } else {
+               tunnel = false;
+       }
+       return tunnel;
+}
+
+const struct rte_flow_item_ipv4 hws_nic_ipv4_mask = {
+       .hdr = {
+               .version = 0xf,
+               .ihl = 0xf,
+               .type_of_service = 0xff,
+               .total_length = RTE_BE16(0xffff),
+               .packet_id = RTE_BE16(0xffff),
+               .fragment_offset = RTE_BE16(0xffff),
+               .time_to_live = 0xff,
+               .next_proto_id = 0xff,
+               .src_addr = RTE_BE32(0xffffffff),
+               .dst_addr = RTE_BE32(0xffffffff),
+       },
+};
+
+const struct rte_flow_item_ipv6 hws_nic_ipv6_mask = {
+       .hdr = {
+               .vtc_flow = RTE_BE32(0xffffffff),
+               .payload_len = RTE_BE16(0xffff),
+               .proto = 0xff,
+               .hop_limits = 0xff,
+               .src_addr =
+               "\xff\xff\xff\xff\xff\xff\xff\xff"
+               "\xff\xff\xff\xff\xff\xff\xff\xff",
+               .dst_addr =
+               "\xff\xff\xff\xff\xff\xff\xff\xff"
+               "\xff\xff\xff\xff\xff\xff\xff\xff",
+       },
+       .has_frag_ext = 1,
+};
+
+static int
+flow_hw_validate_item_ptype(const struct rte_flow_item *item,
+                           struct rte_flow_error *error)
+{
+       const struct rte_flow_item_ptype *ptype = item->mask;
+
+       /* HWS does not allow empty PTYPE mask */
+       if (!ptype)
+               return rte_flow_error_set(error, EINVAL,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "empty ptype mask");
+       if (!(ptype->packet_type &
+             (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK |
+              RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK |
+              RTE_PTYPE_INNER_L4_MASK)))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                         NULL, "ptype mask not supported");
+       return 0;
+}
+
+struct mlx5_hw_pattern_validation_ctx {
+       const struct rte_flow_item *geneve_item;
+       const struct rte_flow_item *flex_item;
+};
+
 static int
 flow_hw_pattern_validate(struct rte_eth_dev *dev,
                         const struct rte_flow_pattern_template_attr *attr,
                         const struct rte_flow_item items[],
+                        uint64_t *item_flags,
                         struct rte_flow_error *error)
 {
        struct mlx5_priv *priv = dev->data->dev_private;
-       int i, tag_idx;
-       bool items_end = false;
+       const struct rte_flow_item *item;
+       const struct rte_flow_item *gtp_item = NULL;
+       const struct rte_flow_item *gre_item = NULL;
+       const struct rte_flow_attr flow_attr = {
+               .ingress = attr->ingress,
+               .egress = attr->egress,
+               .transfer = attr->transfer
+       };
+       int ret, tag_idx;
        uint32_t tag_bitmap = 0;
-       int ret;
+       uint64_t last_item = 0;
 
        if (!mlx5_hw_ctx_validate(dev, error))
                return -rte_errno;
@@ -7892,14 +7891,20 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                  "transfer attribute cannot be 
used when"
                                                  " E-Switch is disabled");
        }
-       for (i = 0; !items_end; i++) {
-               int type = items[i].type;
+       for (item = items; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+               bool tunnel = *item_flags & MLX5_FLOW_LAYER_TUNNEL;
 
-               switch (type) {
+               switch ((int)item->type) {
+               case RTE_FLOW_ITEM_TYPE_PTYPE:
+                       ret = flow_hw_validate_item_ptype(item, error);
+                       if (ret)
+                               return ret;
+                       last_item = MLX5_FLOW_ITEM_PTYPE;
+                       break;
                case RTE_FLOW_ITEM_TYPE_TAG:
                {
                        const struct rte_flow_item_tag *tag =
-                               (const struct rte_flow_item_tag *)items[i].spec;
+                               (const struct rte_flow_item_tag *)item->spec;
 
                        if (tag == NULL)
                                return rte_flow_error_set(error, EINVAL,
@@ -7924,12 +7929,13 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                          NULL,
                                                          "Duplicated tag 
index");
                        tag_bitmap |= 1 << tag_idx;
+                       last_item = MLX5_FLOW_ITEM_TAG;
                        break;
                }
                case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
                {
                        const struct rte_flow_item_tag *tag =
-                               (const struct rte_flow_item_tag *)items[i].spec;
+                               (const struct rte_flow_item_tag *)item->spec;
                        uint16_t regcs = 
(uint8_t)priv->sh->cdev->config.hca_attr.set_reg_c;
 
                        if (!((1 << (tag->index - REG_C_0)) & regcs))
@@ -7956,9 +7962,11 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                  RTE_FLOW_ERROR_TYPE_ITEM, 
NULL,
                                                  "represented port item cannot 
be used"
                                                  " when egress attribute is 
set");
+                       last_item = MLX5_FLOW_ITEM_REPRESENTED_PORT;
                        break;
                case RTE_FLOW_ITEM_TYPE_META:
                        /* ingress + group 0 is not supported */
+                       *item_flags |= MLX5_FLOW_ITEM_METADATA;
                        break;
                case RTE_FLOW_ITEM_TYPE_METER_COLOR:
                {
@@ -7970,6 +7978,12 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                          
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                                          NULL,
                                                          "Unsupported meter 
color register");
+                       if (*item_flags &
+                           (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
+                               return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only 
one ASO item is supported");
+                       last_item = MLX5_FLOW_ITEM_METER_COLOR;
                        break;
                }
                case RTE_FLOW_ITEM_TYPE_AGGR_AFFINITY:
@@ -7984,56 +7998,250 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                          "Aggregated affinity 
item not supported"
                                                          " with egress or 
transfer"
                                                          " attribute");
+                       last_item = MLX5_FLOW_ITEM_AGGR_AFFINITY;
                        break;
                }
-               case RTE_FLOW_ITEM_TYPE_COMPARE:
-               {
-                       ret = flow_hw_validate_item_compare(&items[i], error);
-                       if (ret)
-                               return ret;
+               case RTE_FLOW_ITEM_TYPE_GENEVE:
+                       last_item = MLX5_FLOW_LAYER_GENEVE;
                        break;
-               }
                case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
                {
-                       int ret;
-
-                       ret = mlx5_flow_geneve_tlv_option_validate(priv,
-                                                                  &items[i],
+                       last_item = MLX5_FLOW_LAYER_GENEVE_OPT;
+                       ret = mlx5_flow_geneve_tlv_option_validate(priv, item,
                                                                   error);
                        if (ret < 0)
                                return ret;
                        break;
                }
-               case RTE_FLOW_ITEM_TYPE_VOID:
+               case RTE_FLOW_ITEM_TYPE_COMPARE:
+               {
+                       last_item = MLX5_FLOW_ITEM_COMPARE;
+                       ret = flow_hw_validate_item_compare(item, error);
+                       if (ret)
+                               return ret;
+                       break;
+               }
                case RTE_FLOW_ITEM_TYPE_ETH:
+                       ret = mlx5_flow_validate_item_eth(dev, item,
+                                                         *item_flags,
+                                                         true, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+                                   MLX5_FLOW_LAYER_OUTER_L2;
+                       break;
                case RTE_FLOW_ITEM_TYPE_VLAN:
+                       ret = mlx5_flow_dv_validate_item_vlan(item, *item_flags,
+                                                             dev, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+                                   MLX5_FLOW_LAYER_OUTER_VLAN;
+                       break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
+                       tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
+                                                              item_flags);
+                       ret = mlx5_flow_dv_validate_item_ipv4(dev, item,
+                                                             *item_flags,
+                                                             last_item, 0,
+                                                             
&hws_nic_ipv4_mask,
+                                                             error);
+                       if (ret)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+                                   MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+                       break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
+                       tunnel |= mlx5_hw_flow_tunnel_ip_check(last_item,
+                                                              item_flags);
+                       ret = mlx5_flow_validate_item_ipv6(dev, item,
+                                                          *item_flags,
+                                                          last_item, 0,
+                                                          &hws_nic_ipv6_mask,
+                                                          error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+                                   MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+                       break;
                case RTE_FLOW_ITEM_TYPE_UDP:
+                       ret = mlx5_flow_validate_item_udp(dev, item,
+                                                         *item_flags,
+                                                         0xff, error);
+                       if (ret)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+                                   MLX5_FLOW_LAYER_OUTER_L4_UDP;
+                       break;
                case RTE_FLOW_ITEM_TYPE_TCP:
+                       ret = mlx5_flow_validate_item_tcp
+                               (dev, item, *item_flags,
+                                0xff, &nic_tcp_mask, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+                                   MLX5_FLOW_LAYER_OUTER_L4_TCP;
+                       break;
                case RTE_FLOW_ITEM_TYPE_GTP:
+                       gtp_item = item;
+                       ret = mlx5_flow_dv_validate_item_gtp(dev, gtp_item,
+                                                            *item_flags, 
error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_GTP;
+                       break;
                case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+                       ret = mlx5_flow_dv_validate_item_gtp_psc(dev, item,
+                                                                last_item,
+                                                                gtp_item,
+                                                                false, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_GTP_PSC;
+                       break;
                case RTE_FLOW_ITEM_TYPE_VXLAN:
+                       ret = mlx5_flow_validate_item_vxlan(dev, 0, item,
+                                                           *item_flags,
+                                                           false, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_VXLAN;
+                       break;
                case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+                       ret = mlx5_flow_validate_item_vxlan_gpe(item,
+                                                               *item_flags,
+                                                               dev, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+                       break;
                case RTE_FLOW_ITEM_TYPE_MPLS:
-               case RTE_FLOW_ITEM_TYPE_GENEVE:
+                       ret = mlx5_flow_validate_item_mpls(dev, item,
+                                                          *item_flags,
+                                                          last_item, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_MPLS;
+                       break;
                case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
+               case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+                       last_item = MLX5_FLOW_ITEM_SQ;
+                       break;
                case RTE_FLOW_ITEM_TYPE_GRE:
+                       ret = mlx5_flow_validate_item_gre(dev, item,
+                                                         *item_flags,
+                                                         0xff, error);
+                       if (ret < 0)
+                               return ret;
+                       gre_item = item;
+                       last_item = MLX5_FLOW_LAYER_GRE;
+                       break;
                case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+                       if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
+                               return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE 
item is missing");
+                       ret = mlx5_flow_validate_item_gre_key
+                               (dev, item, *item_flags, gre_item, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_GRE_KEY;
+                       break;
                case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
+                       if (!(*item_flags & MLX5_FLOW_LAYER_GRE))
+                               return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_ITEM, item, "GRE 
item is missing");
+                       ret = mlx5_flow_validate_item_gre_option(dev, item,
+                                                                *item_flags,
+                                                                &flow_attr,
+                                                                gre_item,
+                                                                error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_GRE;
+                       break;
                case RTE_FLOW_ITEM_TYPE_NVGRE:
+                       ret = mlx5_flow_validate_item_nvgre(dev, item,
+                                                           *item_flags, 0xff,
+                                                           error);
+                       if (ret)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_NVGRE;
+                       break;
                case RTE_FLOW_ITEM_TYPE_ICMP:
+                       ret = mlx5_flow_validate_item_icmp(dev, item,
+                                                          *item_flags, 0xff,
+                                                          error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_ICMP;
+                       break;
                case RTE_FLOW_ITEM_TYPE_ICMP6:
+                       ret = mlx5_flow_validate_item_icmp6(dev, item,
+                                                           *item_flags, 0xff,
+                                                           error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_ICMP6;
+                       break;
                case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
-               case RTE_FLOW_ITEM_TYPE_QUOTA:
                case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
+                       ret = mlx5_flow_validate_item_icmp6_echo(dev, item,
+                                                                *item_flags,
+                                                                0xff, error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_LAYER_ICMP6;
+                       break;
                case RTE_FLOW_ITEM_TYPE_CONNTRACK:
-               case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+                       if (*item_flags &
+                           (MLX5_FLOW_ITEM_QUOTA | MLX5_FLOW_LAYER_ASO_CT))
+                               return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only 
one ASO item is supported");
+                       ret = mlx5_flow_dv_validate_item_aso_ct(dev, item,
+                                                               item_flags,
+                                                               error);
+                       if (ret < 0)
+                               return ret;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_QUOTA:
+                       if (*item_flags &
+                           (MLX5_FLOW_ITEM_METER_COLOR |
+                            MLX5_FLOW_LAYER_ASO_CT))
+                               return rte_flow_error_set
+                                       (error, EINVAL,
+                                        RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Only 
one ASO item is supported");
+                       last_item = MLX5_FLOW_ITEM_QUOTA;
+                       break;
                case RTE_FLOW_ITEM_TYPE_ESP:
+                       ret = mlx5_flow_os_validate_item_esp(dev, item,
+                                                            *item_flags, 0xff,
+                                                            error);
+                       if (ret < 0)
+                               return ret;
+                       last_item = MLX5_FLOW_ITEM_ESP;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
+                       last_item = tunnel ?
+                                   MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
+                                   MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
+                       break;
                case RTE_FLOW_ITEM_TYPE_FLEX:
-               case RTE_FLOW_ITEM_TYPE_IB_BTH:
-               case RTE_FLOW_ITEM_TYPE_PTYPE:
+                       /* match mlx5dr_definer_conv_items_to_hl() */
+                       last_item = tunnel ?
+                                   MLX5_FLOW_ITEM_INNER_FLEX :
+                                   MLX5_FLOW_ITEM_OUTER_FLEX;
+                       break;
                case RTE_FLOW_ITEM_TYPE_RANDOM:
+                       last_item = MLX5_FLOW_ITEM_RANDOM;
+                       break;
+               case RTE_FLOW_ITEM_TYPE_NSH:
+                       last_item = MLX5_FLOW_ITEM_NSH;
+                       ret = mlx5_hw_validate_item_nsh(dev, item, error);
+                       if (ret < 0)
+                               return ret;
                        break;
                case RTE_FLOW_ITEM_TYPE_INTEGRITY:
                        /*
@@ -8043,13 +8251,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                         * template and item spec in flow rule.
                         */
                        break;
-               case RTE_FLOW_ITEM_TYPE_NSH:
-                       ret = mlx5_hw_validate_item_nsh(dev, &items[i], error);
-                       if (ret < 0)
-                               return ret;
-                       break;
+               case RTE_FLOW_ITEM_TYPE_IB_BTH:
+               case RTE_FLOW_ITEM_TYPE_VOID:
                case RTE_FLOW_ITEM_TYPE_END:
-                       items_end = true;
                        break;
                default:
                        return rte_flow_error_set(error, EINVAL,
@@ -8057,19 +8261,9 @@ flow_hw_pattern_validate(struct rte_eth_dev *dev,
                                                  NULL,
                                                  "Unsupported item type");
                }
+               *item_flags |= last_item;
        }
-       return 0;
-}
-
-static bool
-flow_hw_pattern_has_sq_match(const struct rte_flow_item *items)
-{
-       unsigned int i;
-
-       for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i)
-               if (items[i].type == (enum 
rte_flow_item_type)MLX5_RTE_FLOW_ITEM_TYPE_SQ)
-                       return true;
-       return false;
+       return 1 + RTE_PTR_DIFF(item, items) / sizeof(item[0]);
 }
 
 /*
@@ -8083,9 +8277,10 @@ flow_hw_pattern_has_sq_match(const struct rte_flow_item 
*items)
  */
 static int
 pattern_template_validate(struct rte_eth_dev *dev,
-                         struct rte_flow_pattern_template *pt[], uint32_t 
pt_num)
+                         struct rte_flow_pattern_template *pt[],
+                         uint32_t pt_num,
+                         struct rte_flow_error *error)
 {
-       struct rte_flow_error error;
        struct mlx5_flow_template_table_cfg tbl_cfg = {
                .attr = {
                        .nb_flows = 64,
@@ -8104,25 +8299,45 @@ pattern_template_validate(struct rte_eth_dev *dev,
        struct rte_flow_template_table *tmpl_tbl;
        int ret;
 
-       if (pt[0]->attr.ingress)
-               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
-       else if (pt[0]->attr.egress)
-               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
-       else if (pt[0]->attr.transfer)
-               action_template = 
priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
-       else
-               return -EINVAL;
+       if (pt[0]->attr.ingress) {
+               action_template =
+                       priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_RX];
+       } else if (pt[0]->attr.egress) {
+               action_template =
+                       priv->action_template_drop[MLX5DR_TABLE_TYPE_NIC_TX];
+       } else if (pt[0]->attr.transfer) {
+               action_template =
+                       priv->action_template_drop[MLX5DR_TABLE_TYPE_FDB];
+       } else {
+               ret = EINVAL;
+               goto end;
+       }
+
        if (pt[0]->item_flags & MLX5_FLOW_ITEM_COMPARE)
                tbl_cfg.attr.nb_flows = 1;
        tmpl_tbl = flow_hw_table_create(dev, &tbl_cfg, pt, pt_num,
-                                       &action_template, 1, NULL);
+                                       &action_template, 1, error);
        if (tmpl_tbl) {
                ret = 0;
-               flow_hw_table_destroy(dev, tmpl_tbl, &error);
+               flow_hw_table_destroy(dev, tmpl_tbl, error);
        } else {
-               ret = rte_errno == E2BIG ? -E2BIG : 0;
+               switch (rte_errno) {
+               case E2BIG:
+                       ret = E2BIG;
+                       break;
+               case ENOTSUP:
+                       ret = EINVAL;
+                       break;
+               default:
+                       ret = 0;
+                       break;
+               }
        }
-       return ret;
+end:
+       if (ret)
+               rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                  NULL, "failed to validate pattern template");
+       return -ret;
 }
 
 /**
@@ -8150,7 +8365,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
        struct rte_flow_pattern_template *it;
        struct rte_flow_item *copied_items = NULL;
        const struct rte_flow_item *tmpl_items;
-       uint32_t orig_item_nb;
+       uint64_t orig_item_nb, item_flags = 0;
        struct rte_flow_item port = {
                .type = RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT,
                .mask = &rte_flow_item_ethdev_mask,
@@ -8170,10 +8385,13 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
                .last = NULL
        };
        unsigned int i = 0;
+       int rc;
 
-       if (flow_hw_pattern_validate(dev, attr, items, error))
+       /* Validate application items only */
+       rc = flow_hw_pattern_validate(dev, attr, items, &item_flags, error);
+       if (rc < 0)
                return NULL;
-       orig_item_nb = flow_hw_count_items(items);
+       orig_item_nb = rc;
        if (priv->sh->config.dv_esw_en &&
            priv->sh->config.repr_matching &&
            attr->ingress && !attr->egress && !attr->transfer) {
@@ -8184,7 +8402,7 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
        } else if (priv->sh->config.dv_esw_en &&
                   priv->sh->config.repr_matching &&
                   !attr->ingress && attr->egress && !attr->transfer) {
-               if (flow_hw_pattern_has_sq_match(items)) {
+               if (item_flags & MLX5_FLOW_ITEM_SQ) {
                        DRV_LOG(DEBUG, "Port %u omitting implicit REG_C_0 match 
for egress "
                                       "pattern template", dev->data->port_id);
                        tmpl_items = items;
@@ -8200,28 +8418,23 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
 setup_pattern_template:
        it = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*it), 0, rte_socket_id());
        if (!it) {
-               if (copied_items)
-                       mlx5_free(copied_items);
                rte_flow_error_set(error, ENOMEM,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                   NULL,
                                   "cannot allocate item template");
-               return NULL;
+               goto error;
        }
        it->attr = *attr;
+       it->item_flags = item_flags;
        it->orig_item_nb = orig_item_nb;
        it->mt = mlx5dr_match_template_create(tmpl_items, 
attr->relaxed_matching);
        if (!it->mt) {
-               if (copied_items)
-                       mlx5_free(copied_items);
-               mlx5_free(it);
                rte_flow_error_set(error, rte_errno,
                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
                                   NULL,
                                   "cannot create match template");
-               return NULL;
+               goto error;
        }
-       it->item_flags = flow_hw_matching_item_flags_get(tmpl_items);
        if (copied_items) {
                if (attr->ingress)
                        it->implicit_port = true;
@@ -8235,58 +8448,63 @@ flow_hw_pattern_template_create(struct rte_eth_dev *dev,
                if (((it->item_flags & MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT) &&
                     (it->item_flags & MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT)) 
||
                    (mlx5_alloc_srh_flex_parser(dev))) {
-                       claim_zero(mlx5dr_match_template_destroy(it->mt));
-                       mlx5_free(it);
                        rte_flow_error_set(error, rte_errno,
                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 
NULL,
                                           "cannot create IPv6 routing 
extension support");
-                       return NULL;
+                       goto error;
                }
        }
-       for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; ++i) {
-               switch (items[i].type) {
-               case RTE_FLOW_ITEM_TYPE_FLEX: {
-                       const struct rte_flow_item_flex *spec =
-                               (const struct rte_flow_item_flex 
*)items[i].spec;
-                       struct rte_flow_item_flex_handle *handle = spec->handle;
+       if (it->item_flags & MLX5_FLOW_ITEM_FLEX) {
+               for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
+                       const struct rte_flow_item_flex *spec = items[i].spec;
+                       struct rte_flow_item_flex_handle *handle;
 
-                       if (flow_hw_flex_item_acquire(dev, handle, 
&it->flex_item)) {
-                               rte_flow_error_set(error, rte_errno,
-                                                  
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                                  "Failed to acquire flex 
item");
+                       if (items[i].type != RTE_FLOW_ITEM_TYPE_FLEX)
+                               continue;
+                       handle = spec->handle;
+                       if (flow_hw_flex_item_acquire(dev, handle,
+                                                     &it->flex_item)) {
+                               rte_flow_error_set(error, EINVAL,
+                                                  
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                                  NULL, "cannot create hw FLEX 
item");
                                goto error;
                        }
-                       break;
                }
-               case RTE_FLOW_ITEM_TYPE_GENEVE_OPT: {
-                       const struct rte_flow_item_geneve_opt *spec = 
items[i].spec;
+       }
+       if (it->item_flags & MLX5_FLOW_LAYER_GENEVE_OPT) {
+               for (i = 0; items[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
+                       const struct rte_flow_item_geneve_opt *spec =
+                               items[i].spec;
 
+                       if (items[i].type != RTE_FLOW_ITEM_TYPE_GENEVE_OPT)
+                               continue;
                        if (mlx5_geneve_tlv_option_register(priv, spec,
                                                            
&it->geneve_opt_mng)) {
-                               rte_flow_error_set(error, rte_errno,
-                                                  
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
-                                                  "Failed to register GENEVE 
TLV option");
+                               rte_flow_error_set(error, EINVAL,
+                                                  
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+                                                  NULL, "cannot register 
GENEVE TLV option");
                                goto error;
                        }
-                       break;
-               }
-               default:
-                       break;
                }
        }
        rte_atomic_fetch_add_explicit(&it->refcnt, 1, rte_memory_order_relaxed);
-       rte_errno = pattern_template_validate(dev, &it, 1);
-       if (rte_errno)
+       rc = pattern_template_validate(dev, &it, 1, error);
+       if (rc)
                goto error;
        LIST_INSERT_HEAD(&priv->flow_hw_itt, it, next);
        return it;
 error:
-       flow_hw_flex_item_release(dev, &it->flex_item);
-       mlx5_geneve_tlv_options_unregister(priv, &it->geneve_opt_mng);
-       claim_zero(mlx5dr_match_template_destroy(it->mt));
-       mlx5_free(it);
-       rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, 
NULL,
-                          "Failed to create pattern template");
+       if (it) {
+               if (it->flex_item)
+                       flow_hw_flex_item_release(dev, &it->flex_item);
+               if (it->geneve_opt_mng.nb_options)
+                       mlx5_geneve_tlv_options_unregister(priv, 
&it->geneve_opt_mng);
+               if (it->mt)
+                       claim_zero(mlx5dr_match_template_destroy(it->mt));
+               mlx5_free(it);
+       }
+       if (copied_items)
+               mlx5_free(copied_items);
        return NULL;
 }
 
diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c 
b/drivers/net/mlx5/mlx5_flow_verbs.c
index 9879f14213..7b1ef322bd 100644
--- a/drivers/net/mlx5/mlx5_flow_verbs.c
+++ b/drivers/net/mlx5/mlx5_flow_verbs.c
@@ -1332,9 +1332,10 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                switch (items->type) {
 #ifdef HAVE_IBV_FLOW_SPEC_ESP
                case RTE_FLOW_ITEM_TYPE_ESP:
-                       ret = mlx5_flow_os_validate_item_esp(items, item_flags,
-                                                         next_protocol,
-                                                         error);
+                       ret = mlx5_flow_os_validate_item_esp(dev, items,
+                                                            item_flags,
+                                                            next_protocol,
+                                                            error);
                        if (ret < 0)
                                return ret;
                        last_item = MLX5_FLOW_ITEM_ESP;
@@ -1343,7 +1344,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                case RTE_FLOW_ITEM_TYPE_VOID:
                        break;
                case RTE_FLOW_ITEM_TYPE_ETH:
-                       ret = mlx5_flow_validate_item_eth(items, item_flags,
+                       ret = mlx5_flow_validate_item_eth(dev, items, 
item_flags,
                                                          false, error);
                        if (ret < 0)
                                return ret;
@@ -1387,7 +1388,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV4:
                        ret = mlx5_flow_validate_item_ipv4
-                                               (items, item_flags,
+                                               (dev, items, item_flags,
                                                 last_item, ether_type, NULL,
                                                 MLX5_ITEM_RANGE_NOT_ACCEPTED,
                                                 error);
@@ -1410,7 +1411,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_IPV6:
-                       ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+                       ret = mlx5_flow_validate_item_ipv6(dev, items,
+                                                          item_flags,
                                                           last_item,
                                                           ether_type, NULL,
                                                           error);
@@ -1433,7 +1435,8 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        }
                        break;
                case RTE_FLOW_ITEM_TYPE_UDP:
-                       ret = mlx5_flow_validate_item_udp(items, item_flags,
+                       ret = mlx5_flow_validate_item_udp(dev, items,
+                                                         item_flags,
                                                          next_protocol,
                                                          error);
                        const struct rte_flow_item_udp *spec = items->spec;
@@ -1452,7 +1455,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        break;
                case RTE_FLOW_ITEM_TYPE_TCP:
                        ret = mlx5_flow_validate_item_tcp
-                                               (items, item_flags,
+                                               (dev, items, item_flags,
                                                 next_protocol,
                                                 &rte_flow_item_tcp_mask,
                                                 error);
@@ -1478,7 +1481,7 @@ flow_verbs_validate(struct rte_eth_dev *dev,
                        last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
                        break;
                case RTE_FLOW_ITEM_TYPE_GRE:
-                       ret = mlx5_flow_validate_item_gre(items, item_flags,
+                       ret = mlx5_flow_validate_item_gre(dev, items, 
item_flags,
                                                          next_protocol, error);
                        if (ret < 0)
                                return ret;
diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.c 
b/drivers/net/mlx5/windows/mlx5_flow_os.c
index f907b21ecc..bf93da9f1e 100644
--- a/drivers/net/mlx5/windows/mlx5_flow_os.c
+++ b/drivers/net/mlx5/windows/mlx5_flow_os.c
@@ -424,7 +424,8 @@ mlx5_flow_os_workspace_gc_add(struct mlx5_flow_workspace 
*ws)
 }
 
 int
-mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
+mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
+                           const struct rte_flow_item *item,
                            uint64_t item_flags,
                            uint8_t target_protocol,
                            struct rte_flow_error *error)
@@ -459,7 +460,7 @@ mlx5_flow_os_validate_item_esp(const struct rte_flow_item 
*item,
                                          "matching on spi field in esp is not"
                                          " supported on Windows");
        ret = mlx5_flow_item_acceptable
-               (item, (const uint8_t *)mask,
+               (dev, item, (const uint8_t *)mask,
                 (const uint8_t *)&rte_flow_item_esp_mask,
                 sizeof(struct rte_flow_item_esp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
                 error);
diff --git a/drivers/net/mlx5/windows/mlx5_flow_os.h 
b/drivers/net/mlx5/windows/mlx5_flow_os.h
index 856d8ba948..36edc3d532 100644
--- a/drivers/net/mlx5/windows/mlx5_flow_os.h
+++ b/drivers/net/mlx5/windows/mlx5_flow_os.h
@@ -468,10 +468,11 @@ int mlx5_flow_os_destroy_flow(void *drv_flow_ptr);
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 int
-mlx5_flow_os_validate_item_esp(const struct rte_flow_item *item,
-                           uint64_t item_flags,
-                           uint8_t target_protocol,
-                           struct rte_flow_error *error);
+mlx5_flow_os_validate_item_esp(const struct rte_eth_dev *dev,
+                              const struct rte_flow_item *item,
+                              uint64_t item_flags,
+                              uint8_t target_protocol,
+                              struct rte_flow_error *error);
 
 /**
  * Add per thread workspace to the global list for garbage collection.
-- 
2.43.0

Reply via email to