On Mon, 19-07-22, 19:33, Dekel Peled wrote:
> Thanks, PSB.
> 
> > -----Original Message-----
> > From: Jack Min
> > Sent: Monday, July 22, 2019 12:31 PM
> > To: Dekel Peled <dek...@mellanox.com>
> > Cc: Yongseok Koh <ys...@mellanox.com>; Slava Ovsiienko
> > <viachesl...@mellanox.com>; Shahaf Shuler <shah...@mellanox.com>; Ori
> > Kam <or...@mellanox.com>; dev@dpdk.org
> > Subject: Re: [PATCH] net/mlx5: fix NVGRE matching
> > 
> > On Thu, 19-07-18, 22:42, Dekel Peled wrote:
> > > NVGRE has a GRE header with c_rsvd0_ver value 0x2000 and protocol
> > > value 0x6558.
> > > These should be matched when item_nvgre is provided.
> > >
> > > This patch adds validation function of NVGRE item, to validate that
> > > the input values, if exist, are as required.
> > > It also updates the translate function of NVGRE item, to add the
> > > required values, if they were not specified.
> > >
> > > Original work by Xiaoyu Min <jack...@mellanox.com>
> > >
> > > Fixes: fc2c498ccb94 ("net/mlx5: add Direct Verbs translate items")
> > > Signed-off-by: Dekel Peled <dek...@mellanox.com>
> > > ---
> > >  drivers/net/mlx5/mlx5_flow.c    | 69
> > +++++++++++++++++++++++++++++++++++++++++
> > >  drivers/net/mlx5/mlx5_flow.h    | 10 ++++--
> > >  drivers/net/mlx5/mlx5_flow_dv.c | 25 +++++++++++++--
> > >  drivers/net/mlx5/mlx5_rxtx.h    |  2 +-
> > >  4 files changed, 101 insertions(+), 5 deletions(-)
> > >
> > > diff --git a/drivers/net/mlx5/mlx5_flow.c
> > > b/drivers/net/mlx5/mlx5_flow.c index e082cbb..6aca4d6 100644
> > > --- a/drivers/net/mlx5/mlx5_flow.c
> > > +++ b/drivers/net/mlx5/mlx5_flow.c
> > > @@ -298,6 +298,10 @@ struct mlx5_flow_tunnel_info {
> > >           .tunnel = MLX5_FLOW_LAYER_MPLS,
> > >           .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
> > >   },
> > > + {
> > > +         .tunnel = MLX5_FLOW_LAYER_NVGRE,
> > > +         .ptype = RTE_PTYPE_TUNNEL_NVGRE,
> > > + },
> > >  };
> > >
> > >  /**
> > > @@ -1323,6 +1327,11 @@ uint32_t mlx5_flow_adjust_priority(struct
> > rte_eth_dev *dev, int32_t priority,
> > >           return rte_flow_error_set(error, EINVAL,
> > >                                     RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > >                                     "L3 cannot follow an L4 layer.");
> > > + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
> > > +           !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
> > > +         return rte_flow_error_set(error, EINVAL,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "L3 cannot follow an NVGRE layer.");
> > >   if (!mask)
> > >           mask = &rte_flow_item_ipv4_mask;
> > >   else if (mask->hdr.next_proto_id != 0 && @@ -1409,6 +1418,11 @@
> > > uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t
> > priority,
> > >           return rte_flow_error_set(error, EINVAL,
> > >                                     RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > >                                     "L3 cannot follow an L4 layer.");
> > > + else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
> > > +           !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
> > > +         return rte_flow_error_set(error, EINVAL,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "L3 cannot follow an NVGRE layer.");
> > >   if (!mask)
> > >           mask = &rte_flow_item_ipv6_mask;
> > >   ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, @@
> > > -1887,6 +1901,61 @@ uint32_t mlx5_flow_adjust_priority(struct
> > rte_eth_dev *dev, int32_t priority,
> > >                             " update.");
> > >  }
> > >
> > > +/**
> > > + * Validate NVGRE item.
> > > + *
> > > + * @param[in] item
> > > + *   Item specification.
> > > + * @param[in] item_flags
> > > + *   Bit flags to mark detected items.
> > > + * @param[in] target_protocol
> > > + *   The next protocol in the previous item.
> > > + * @param[out] error
> > > + *   Pointer to error structure.
> > > + *
> > > + * @return
> > > + *   0 on success, a negative errno value otherwise and rte_errno is set.
> > > + */
> > > +int
> > > +mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
> > > +                       uint64_t item_flags,
> > > +                       uint8_t target_protocol,
> > > +                       struct rte_flow_error *error) {
> > > + const struct rte_flow_item_nvgre *mask = item->mask;
> > > + const struct rte_flow_item_nvgre *spec = item->spec;
> > > + int ret;
> > > +
> > > + if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
> > > +         return rte_flow_error_set(error, EINVAL,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "protocol filtering not compatible"
> > > +                                   " with this GRE layer");
> > > + if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
> > > +         return rte_flow_error_set(error, ENOTSUP,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "multiple tunnel layers not"
> > > +                                   " supported");
> > > + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
> > > +         return rte_flow_error_set(error, ENOTSUP,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "L3 Layer is missing");
> > > + if (spec && (spec->protocol != RTE_BE16(RTE_ETHER_TYPE_TEB) ||
> > > +              spec->c_k_s_rsvd0_ver != RTE_BE16(0x2000)))
> > > +         return rte_flow_error_set(error, EINVAL,
> > > +                                   RTE_FLOW_ERROR_TYPE_ITEM,
> > item,
> > > +                                   "wrong values for NVGRE");
> > Not necessary to check the spec because the following
> > mlx5_flow_item_acceptable only accept matching on .tni field.
> > Since there is no meaning allowing the user to match on .protocol and
> > .c_k_s_rsvd0_ver.
> > What do you think?
> 
> I think it is safer to verify that application doesn't insert invalid values.
> It covers any future pmd or applications change.
If the mask is not enabled for .protocol and c_k_s_rsvd0_ver, whatever
the spec value is, it doesn't matter, right?

> 
> > 
> > > + if (!mask)
> > > +         mask = &rte_flow_item_nvgre_mask;
> > > + ret = mlx5_flow_item_acceptable
> > > +         (item, (const uint8_t *)mask,
> > > +          (const uint8_t *)&rte_flow_item_nvgre_mask,
> > > +          sizeof(struct rte_flow_item_nvgre), error);
> > > + if (ret < 0)
> > > +         return ret;
> > > + return 0;
> > > +}
> > > +
> > >  static int
> > >  flow_null_validate(struct rte_eth_dev *dev __rte_unused,
> > >              const struct rte_flow_attr *attr __rte_unused, diff --git
> > > a/drivers/net/mlx5/mlx5_flow.h b/drivers/net/mlx5/mlx5_flow.h index
> > > 3f96bec..24da74b 100644
> > > --- a/drivers/net/mlx5/mlx5_flow.h
> > > +++ b/drivers/net/mlx5/mlx5_flow.h
> > > @@ -48,6 +48,7 @@
> > >  #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)  #define
> > > MLX5_FLOW_LAYER_GRE (1u << 14)  #define MLX5_FLOW_LAYER_MPLS
> > (1u <<
> > > 15)
> > > +/* List of tunnel Layer bits continued below. */
> > >
> > >  /* General pattern items bits. */
> > >  #define MLX5_FLOW_ITEM_METADATA (1u << 16) @@ -58,8 +59,10 @@
> > > #define MLX5_FLOW_LAYER_ICMP6 (1u << 19)  #define
> > > MLX5_FLOW_LAYER_GRE_KEY (1u << 20)
> > >
> > > +/* Pattern tunnel Layer bits (continued). */
> > >  #define MLX5_FLOW_LAYER_IPIP (1u << 21)  #define
> > > MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 22)
> > > +#define MLX5_FLOW_LAYER_NVGRE (1u << 23)
> > >
> > >  /* Outer Masks. */
> > >  #define MLX5_FLOW_LAYER_OUTER_L3 \
> > > @@ -79,7 +82,7 @@
> > >  /* Tunnel Masks. */
> > >  #define MLX5_FLOW_LAYER_TUNNEL \
> > >   (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
> > > -  MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS | \
> > > +  MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE |
> > MLX5_FLOW_LAYER_MPLS
> > > +| \
> > >    MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP)
> > >
> > >  /* Inner Masks. */
> > > @@ -518,5 +521,8 @@ int mlx5_flow_validate_item_icmp6(const struct
> > rte_flow_item *item,
> > >                              uint64_t item_flags,
> > >                              uint8_t target_protocol,
> > >                              struct rte_flow_error *error);
> > > -
> > > +int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
> > > +                           uint64_t item_flags,
> > > +                           uint8_t target_protocol,
> > > +                           struct rte_flow_error *error);
> > >  #endif /* RTE_PMD_MLX5_FLOW_H_ */
> > > diff --git a/drivers/net/mlx5/mlx5_flow_dv.c
> > > b/drivers/net/mlx5/mlx5_flow_dv.c index 7240d3b..ab758d4 100644
> > > --- a/drivers/net/mlx5/mlx5_flow_dv.c
> > > +++ b/drivers/net/mlx5/mlx5_flow_dv.c
> > > @@ -2966,7 +2966,6 @@ struct field_modify_info modify_tcp[] = {
> > >
> > MLX5_FLOW_LAYER_OUTER_L4_UDP;
> > >                   break;
> > >           case RTE_FLOW_ITEM_TYPE_GRE:
> > > -         case RTE_FLOW_ITEM_TYPE_NVGRE:
> > >                   ret = mlx5_flow_validate_item_gre(items,
> > item_flags,
> > >                                                     next_protocol,
> > error);
> > >                   if (ret < 0)
> > > @@ -2974,6 +2973,14 @@ struct field_modify_info modify_tcp[] = {
> > >                   gre_item = items;
> > >                   last_item = MLX5_FLOW_LAYER_GRE;
> > >                   break;
> > > +         case RTE_FLOW_ITEM_TYPE_NVGRE:
> > > +                 ret = mlx5_flow_validate_item_nvgre(items,
> > item_flags,
> > > +                                                     next_protocol,
> > > +                                                     error);
> > > +                 if (ret < 0)
> > > +                         return ret;
> > > +                 last_item = MLX5_FLOW_LAYER_NVGRE;
> > > +                 break;
> > >           case RTE_FLOW_ITEM_TYPE_GRE_KEY:
> > >                   ret = mlx5_flow_validate_item_gre_key
> > >                           (items, item_flags, gre_item, error); @@ -
> > 3919,7 +3926,21 @@
> > > struct field_modify_info modify_tcp[] = {
> > >   int size;
> > >   int i;
> > >
> > > - flow_dv_translate_item_gre(matcher, key, item, inner);
> > > + /* For NVGRE, GRE header fields must be set with defined values. */
> > > + const struct rte_flow_item_gre gre_spec = {
> > > +         .c_rsvd0_ver = RTE_BE16(0x2000),
> > > +         .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
> > > + };
> > > + const struct rte_flow_item_gre gre_mask = {
> > > +         .c_rsvd0_ver = RTE_BE16(UINT16_MAX),
> > Well, it should be `RTE_BE16(0xB000)`, which, I think, is more explicit.
> > Because our NIC only support matching on C,K,S bits, not else bits in
> > c_rsvd0_ver. Our PMD just ignore the other bits.
> 
> The spec is specific 0x2000 and with full 0xffff mask to make sure all bits 
> are covered.
I see. You wanna make sure that all bits are covered but what I wanna
point out is our PMD only match C,K,S bits and ignore others.
So, even though the spec/mask is 0x2000/0xffff, it still will match on 0x2001, 
for
example.

> 
> > 
> > > +         .protocol = RTE_BE16(UINT16_MAX),
> > > + };
> > > + const struct rte_flow_item gre_item = {
> > > +         .spec = &gre_spec,
> > > +         .mask = &gre_mask,
> > > +         .last = NULL,
> > > + };
> > > + flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
> > >   if (!nvgre_v)
> > >           return;
> > >   if (!nvgre_m)
> > > diff --git a/drivers/net/mlx5/mlx5_rxtx.h
> > > b/drivers/net/mlx5/mlx5_rxtx.h index dfa79e2..d732757 100644
> > > --- a/drivers/net/mlx5/mlx5_rxtx.h
> > > +++ b/drivers/net/mlx5/mlx5_rxtx.h
> > > @@ -40,7 +40,7 @@
> > >  #include "mlx5_glue.h"
> > >
> > >  /* Support tunnel matching. */
> > > -#define MLX5_FLOW_TUNNEL 5
> > > +#define MLX5_FLOW_TUNNEL 6
> > >
> > >  struct mlx5_rxq_stats {
> > >  #ifdef MLX5_PMD_SOFT_COUNTERS
> > > --
> > > 1.8.3.1
> > >

Reply via email to