Hi,

> -----Original Message-----
> From: Dekel Peled <dek...@mellanox.com>
> Sent: Tuesday, May 5, 2020 3:58 PM
> To: Matan Azrad <ma...@mellanox.com>; Slava Ovsiienko
> <viachesl...@mellanox.com>; Raslan Darawsheh <rasl...@mellanox.com>
> Cc: dev@dpdk.org; sta...@dpdk.org
> Subject: [PATCH] net/mlx5: fix match on empty VLAN item in DV mode
> 
> In existing implementation, using wild card VLAN item is not allowed.
> A VLAN item in flow pattern must include VLAN ID (vid) value.
> This obligation contradics the intention of documentation update [1].
> 
> This patch updates the VLAN item validation and translation, to allow
> wild card VLAN item, without VLAN ID value.
> User guide and release notes are updated accordingly.
> 
> [1]
> https://eur03.safelinks.protection.outlook.com/?url=http%3A%2F%2Fpatch
> es.dpdk.org%2Fpatch%2F69207%2F&amp;data=02%7C01%7Crasland%40mell
> anox.com%7Cb37fc86e5f16401fe17e08d7f0f3fa1b%7Ca652971c7d2e4d9ba6a
> 4d149256f461b%7C0%7C0%7C637242802974204161&amp;sdata=sTXABuuMC
> 83mAuSoZGDgntPl1r4fspKNC4UhOnojmw4%3D&amp;reserved=0
> 
> Fixes: 00f75a40576b ("net/mlx5: fix VLAN match for DV mode")
> Cc: sta...@dpdk.org
> 
> Signed-off-by: Dekel Peled <dek...@mellanox.com>
> Acked-by: Viacheslav Ovsiienko <viachesl...@mellanox.com>
> ---
>  doc/guides/nics/mlx5.rst               | 18 +++++++
>  doc/guides/rel_notes/release_20_05.rst |  1 +
>  drivers/net/mlx5/mlx5_flow_dv.c        | 98
> ++++++++++++++++++++++++++++++----
>  3 files changed, 107 insertions(+), 10 deletions(-)
> 
> diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
> index 0643592..fa93a75 100644
> --- a/doc/guides/nics/mlx5.rst
> +++ b/doc/guides/nics/mlx5.rst
> @@ -127,6 +127,24 @@ Limitations
> 
>    Will match any ipv4 packet (VLAN included).
> 
> +- When using DV flow engine (``dv_flow_en`` = 1), flow pattern without
> VLAN item
> +  will match untagged packets only.
> +  The flow rule::
> +
> +        flow create 0 ingress pattern eth / ipv4 / end ...
> +
> +  Will match untagged packets only.
> +  The flow rule::
> +
> +        flow create 0 ingress pattern eth / vlan / ipv4 / end ...
> +
> +  Will match tagged packets only, with any VLAN ID value.
> +  The flow rule::
> +
> +        flow create 0 ingress pattern eth / vlan vid is 3 / ipv4 / end ...
> +
> +  Will only match tagged packets with VLAN ID 3.
> +
>  - VLAN pop offload command:
> 
>    - Flow rules having a VLAN pop offload command as one of their actions and
> diff --git a/doc/guides/rel_notes/release_20_05.rst
> b/doc/guides/rel_notes/release_20_05.rst
> index b90e117..60fff51 100644
> --- a/doc/guides/rel_notes/release_20_05.rst
> +++ b/doc/guides/rel_notes/release_20_05.rst
> @@ -144,6 +144,7 @@ New Features
>    * Removed flow rules caching for memory saving and compliance with
> ethdev API.
>    * Optimized the memory consumption of flow.
>    * Added support for flow aging based on hardware counter.
> +  * Added support for flow pattern with wild-card VLAN item (without vid
> value).
> 
>  * **Updated the AESNI MB crypto PMD.**
> 
> diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> b/drivers/net/mlx5/mlx5_flow_dv.c
> index aa5c353..5a0bb9d 100644
> --- a/drivers/net/mlx5/mlx5_flow_dv.c
> +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> @@ -1640,6 +1640,79 @@ struct field_modify_info modify_tcp[] = {
>  }
> 
>  /**
> + * Validate VLAN item.
> + *
> + * @param[in] item
> + *   Item specification.
> + * @param[in] item_flags
> + *   Bit-fields that holds the items detected until now.
> + * @param[in] dev
> + *   Ethernet device flow is being created on.
> + * @param[out] error
> + *   Pointer to error structure.
> + *
> + * @return
> + *   0 on success, a negative errno value otherwise and rte_errno is set.
> + */
> +static int
> +flow_dv_validate_item_vlan(const struct rte_flow_item *item,
> +                        uint64_t item_flags,
> +                        struct rte_eth_dev *dev,
> +                        struct rte_flow_error *error)
> +{
> +     const struct rte_flow_item_vlan *mask = item->mask;
> +     const struct rte_flow_item_vlan nic_mask = {
> +             .tci = RTE_BE16(UINT16_MAX),
> +             .inner_type = RTE_BE16(UINT16_MAX),
> +     };
> +     const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
> +     int ret;
> +     const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
> +                                     MLX5_FLOW_LAYER_INNER_L4) :
> +                                    (MLX5_FLOW_LAYER_OUTER_L3 |
> +                                     MLX5_FLOW_LAYER_OUTER_L4);
> +     const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
> +                                     MLX5_FLOW_LAYER_OUTER_VLAN;
> +
> +     if (item_flags & vlanm)
> +             return rte_flow_error_set(error, EINVAL,
> +                                       RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +                                       "multiple VLAN layers not
> supported");
> +     else if ((item_flags & l34m) != 0)
> +             return rte_flow_error_set(error, EINVAL,
> +                                       RTE_FLOW_ERROR_TYPE_ITEM,
> item,
> +                                       "VLAN cannot follow L3/L4 layer");
> +     if (!mask)
> +             mask = &rte_flow_item_vlan_mask;
> +     ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
> +                                     (const uint8_t *)&nic_mask,
> +                                     sizeof(struct rte_flow_item_vlan),
> +                                     error);
> +     if (ret)
> +             return ret;
> +     if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
> +             struct mlx5_priv *priv = dev->data->dev_private;
> +
> +             if (priv->vmwa_context) {
> +                     /*
> +                      * Non-NULL context means we have a virtual
> machine
> +                      * and SR-IOV enabled, we have to create VLAN
> interface
> +                      * to make hypervisor to setup E-Switch vport
> +                      * context correctly. We avoid creating the multiple
> +                      * VLAN interfaces, so we cannot support VLAN tag
> mask.
> +                      */
> +                     return rte_flow_error_set(error, EINVAL,
> +
> RTE_FLOW_ERROR_TYPE_ITEM,
> +                                               item,
> +                                               "VLAN tag mask is not"
> +                                               " supported in virtual"
> +                                               " environment");
> +             }
> +     }
> +     return 0;
> +}
> +
> +/**
>   * Validate GTP item.
>   *
>   * @param[in] dev
> @@ -4818,8 +4891,8 @@ struct field_modify_info modify_tcp[] = {
>                       }
>                       break;
>               case RTE_FLOW_ITEM_TYPE_VLAN:
> -                     ret = mlx5_flow_validate_item_vlan(items,
> item_flags,
> -                                                        dev, error);
> +                     ret = flow_dv_validate_item_vlan(items, item_flags,
> +                                                      dev, error);
>                       if (ret < 0)
>                               return ret;
>                       last_item = tunnel ?
> MLX5_FLOW_LAYER_INNER_VLAN :
> @@ -5754,10 +5827,6 @@ struct field_modify_info modify_tcp[] = {
>       uint16_t tci_m;
>       uint16_t tci_v;
> 
> -     if (!vlan_v)
> -             return;
> -     if (!vlan_m)
> -             vlan_m = &rte_flow_item_vlan_mask;
>       if (inner) {
>               headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
>                                        inner_headers);
> @@ -5770,13 +5839,22 @@ struct field_modify_info modify_tcp[] = {
>                * This is workaround, masks are not supported,
>                * and pre-validated.
>                */
> -             dev_flow->handle->vf_vlan.tag =
> -                     rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
> +             if (vlan_v)
> +                     dev_flow->handle->vf_vlan.tag =
> +                                     rte_be_to_cpu_16(vlan_v->tci) &
> 0x0fff;
>       }
> -     tci_m = rte_be_to_cpu_16(vlan_m->tci);
> -     tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
> +     /*
> +      * When VLAN item exists in flow, mark packet as tagged,
> +      * even if TCI is not specified.
> +      */
>       MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
>       MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
> +     if (!vlan_v)
> +             return;
> +     if (!vlan_m)
> +             vlan_m = &rte_flow_item_vlan_mask;
> +     tci_m = rte_be_to_cpu_16(vlan_m->tci);
> +     tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
>       MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
>       MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
>       MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >>
> 12);
> --
> 1.8.3.1


Patch applied to next-net-mlx,

Kindest regards,
Raslan Darawsheh

Reply via email to