> -----Original Message-----
> From: Nélio Laranjeiro <nelio.laranje...@6wind.com>
> Sent: Friday, April 13, 2018 9:37 PM
> To: Xueming(Steven) Li <xuemi...@mellanox.com>
> Cc: Shahaf Shuler <shah...@mellanox.com>; dev@dpdk.org
> Subject: Re: [PATCH v3 11/14] net/mlx5: support MPLS-in-GRE and MPLS-in-
> UDP
> 
> Some nits,
> 
> On Fri, Apr 13, 2018 at 07:20:20PM +0800, Xueming Li wrote:
> > This patch supports new tunnel type MPLS-in-GRE and MPLS-in-UDP.
> > Flow pattern example:
> >   ipv4 proto is 47 / gre proto is 0x8847 / mpls
> >   ipv4 / udp dst is 6635 / mpls / end
> >
> > Signed-off-by: Xueming Li <xuemi...@mellanox.com>
> > ---
> >  drivers/net/mlx5/Makefile    |   5 ++
> >  drivers/net/mlx5/mlx5.c      |  15 +++++
> >  drivers/net/mlx5/mlx5.h      |   1 +
> >  drivers/net/mlx5/mlx5_flow.c | 148
> > ++++++++++++++++++++++++++++++++++++++++++-
> >  4 files changed, 166 insertions(+), 3 deletions(-)
> >
> > diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile
> > index f9a6c460b..33553483e 100644
> > --- a/drivers/net/mlx5/Makefile
> > +++ b/drivers/net/mlx5/Makefile
> > @@ -131,6 +131,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-
> config-h.sh
> >             enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \
> >             $(AUTOCONF_OUTPUT)
> >     $Q sh -- '$<' '$@' \
> > +           HAVE_IBV_DEVICE_MPLS_SUPPORT \
> > +           infiniband/verbs.h \
> > +           enum IBV_FLOW_SPEC_MPLS \
> > +           $(AUTOCONF_OUTPUT)
> > +   $Q sh -- '$<' '$@' \
> >             HAVE_IBV_WQ_FLAG_RX_END_PADDING \
> >             infiniband/verbs.h \
> >             enum IBV_WQ_FLAG_RX_END_PADDING \
> > diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index
> > 38118e524..89b683d6e 100644
> > --- a/drivers/net/mlx5/mlx5.c
> > +++ b/drivers/net/mlx5/mlx5.c
> > @@ -614,6 +614,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv
> __rte_unused,
> >     unsigned int cqe_comp;
> >     unsigned int tunnel_en = 0;
> >     unsigned int verb_priorities = 0;
> > +   unsigned int mpls_en = 0;
> >     int idx;
> >     int i;
> >     struct mlx5dv_context attrs_out = {0}; @@ -720,12 +721,25 @@
> > mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
> >                           MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) &&
> >                          (attrs_out.tunnel_offloads_caps &
> >                           MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE));
> > +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> > +           mpls_en = ((attrs_out.tunnel_offloads_caps &
> > +                       MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_GRE) &&
> > +                      (attrs_out.tunnel_offloads_caps &
> > +                       MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_UDP) &&
> > +                      (attrs_out.tunnel_offloads_caps &
> > +                     MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CTRL_DW_MPLS));
> > +#endif
> >     }
> >     DRV_LOG(DEBUG, "tunnel offloading is %ssupported",
> >             tunnel_en ? "" : "not ");
> > +   DRV_LOG(DEBUG, "MPLS over GRE/UDP offloading is %ssupported",
> > +           mpls_en ? "" : "not ");
> >  #else
> >     DRV_LOG(WARNING,
> >             "tunnel offloading disabled due to old OFED/rdma-core
> version");
> > +   DRV_LOG(WARNING,
> > +           "MPLS over GRE/UDP offloading disabled due to old"
> > +           " OFED/rdma-core version or firmware configuration");
> >  #endif
> >     if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) {
> >             err = errno;
> > @@ -749,6 +763,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv
> __rte_unused,
> >                     .cqe_comp = cqe_comp,
> >                     .mps = mps,
> >                     .tunnel_en = tunnel_en,
> > +                   .mpls_en = mpls_en,
> >                     .tx_vec_en = 1,
> >                     .rx_vec_en = 1,
> >                     .mpw_hdr_dseg = 0,
> > diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index
> > 6e4613fe0..efbcb2156 100644
> > --- a/drivers/net/mlx5/mlx5.h
> > +++ b/drivers/net/mlx5/mlx5.h
> > @@ -81,6 +81,7 @@ struct mlx5_dev_config {
> >     unsigned int vf:1; /* This is a VF. */
> >     unsigned int mps:2; /* Multi-packet send supported mode. */
> >     unsigned int tunnel_en:1;
> > +   unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */
> >     /* Whether tunnel stateless offloads are supported. */
> >     unsigned int flow_counter_en:1; /* Whether flow counter is supported.
> */
> >     unsigned int cqe_comp:1; /* CQE compression is enabled. */ diff
> > --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
> > index 0fccd39b3..98edf1882 100644
> > --- a/drivers/net/mlx5/mlx5_flow.c
> > +++ b/drivers/net/mlx5/mlx5_flow.c
> > @@ -100,6 +100,11 @@ mlx5_flow_create_gre(const struct rte_flow_item
> *item,
> >                    const void *default_mask,
> >                    struct mlx5_flow_data *data);
> >
> > +static int
> > +mlx5_flow_create_mpls(const struct rte_flow_item *item,
> > +                 const void *default_mask,
> > +                 struct mlx5_flow_data *data);
> > +
> >  struct mlx5_flow_parse;
> >
> >  static void
> > @@ -247,12 +252,14 @@ struct rte_flow {  #define IS_TUNNEL(type) ( \
> >     (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \
> >     (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \
> > +   (type) == RTE_FLOW_ITEM_TYPE_MPLS || \
> >     (type) == RTE_FLOW_ITEM_TYPE_GRE)
> >
> >  const uint32_t flow_ptype[] = {
> >     [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
> >     [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE,
> >     [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE,
> > +   [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
> >  };
> >
> >  #define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12) @@ -263,6
> > +270,10 @@ const uint32_t ptype_ext[] = {
> >     [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] =
> RTE_PTYPE_TUNNEL_VXLAN_GPE |
> >                                               RTE_PTYPE_L4_UDP,
> >     [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE,
> > +   [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] =
> > +           RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
> > +   [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] =
> > +           RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP,
> >  };
> >
> >  /** Structure to generate a simple graph of layers supported by the
> > NIC. */ @@ -399,7 +410,8 @@ static const struct mlx5_flow_items
> mlx5_flow_items[] = {
> >     },
> >     [RTE_FLOW_ITEM_TYPE_UDP] = {
> >             .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN,
> > -                          RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
> > +                          RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
> > +                          RTE_FLOW_ITEM_TYPE_MPLS),
> >             .actions = valid_actions,
> >             .mask = &(const struct rte_flow_item_udp){
> >                     .hdr = {
> > @@ -428,7 +440,8 @@ static const struct mlx5_flow_items mlx5_flow_items[]
> = {
> >     [RTE_FLOW_ITEM_TYPE_GRE] = {
> >             .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
> >                            RTE_FLOW_ITEM_TYPE_IPV4,
> > -                          RTE_FLOW_ITEM_TYPE_IPV6),
> > +                          RTE_FLOW_ITEM_TYPE_IPV6,
> > +                          RTE_FLOW_ITEM_TYPE_MPLS),
> >             .actions = valid_actions,
> >             .mask = &(const struct rte_flow_item_gre){
> >                     .protocol = -1,
> > @@ -436,7 +449,11 @@ static const struct mlx5_flow_items
> mlx5_flow_items[] = {
> >             .default_mask = &rte_flow_item_gre_mask,
> >             .mask_sz = sizeof(struct rte_flow_item_gre),
> >             .convert = mlx5_flow_create_gre,
> > +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> > +           .dst_sz = sizeof(struct ibv_flow_spec_gre), #else
> >             .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
> > +#endif
> >     },
> >     [RTE_FLOW_ITEM_TYPE_VXLAN] = {
> >             .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, @@ -464,6 +481,21 @@
> static
> > const struct mlx5_flow_items mlx5_flow_items[] = {
> >             .convert = mlx5_flow_create_vxlan_gpe,
> >             .dst_sz = sizeof(struct ibv_flow_spec_tunnel),
> >     },
> > +   [RTE_FLOW_ITEM_TYPE_MPLS] = {
> > +           .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH,
> > +                          RTE_FLOW_ITEM_TYPE_IPV4,
> > +                          RTE_FLOW_ITEM_TYPE_IPV6),
> > +           .actions = valid_actions,
> > +           .mask = &(const struct rte_flow_item_mpls){
> > +                   .label_tc_s = "\xff\xff\xf0",
> > +           },
> > +           .default_mask = &rte_flow_item_mpls_mask,
> > +           .mask_sz = sizeof(struct rte_flow_item_mpls),
> > +           .convert = mlx5_flow_create_mpls,
> > +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> > +           .dst_sz = sizeof(struct ibv_flow_spec_mpls), #endif
> > +   },
> 
> Why the whole item is not under ifdef?

If apply macro to whole item, there will be a null pointer if create mpls flow.
There is a macro in function mlx5_flow_create_mpls() to avoid using this 
invalid data.

> 
> >  };
> >
> >  /** Structure to pass to the conversion function. */ @@ -912,7 +944,9
> > @@ mlx5_flow_convert_items_validate(struct rte_eth_dev *dev,
> >             if (ret)
> >                     goto exit_item_not_supported;
> >             if (IS_TUNNEL(items->type)) {
> > -                   if (parser->tunnel) {
> > +                   if (parser->tunnel &&
> > +                      !(parser->tunnel == RTE_PTYPE_TUNNEL_GRE &&
> > +                        items->type == RTE_FLOW_ITEM_TYPE_MPLS)) {
> >                             rte_flow_error_set(error, ENOTSUP,
> >                                                RTE_FLOW_ERROR_TYPE_ITEM,
> >                                                items,
> > @@ -920,6 +954,16 @@ mlx5_flow_convert_items_validate(struct rte_eth_dev
> *dev,
> >                                                " tunnel encapsulations.");
> >                             return -rte_errno;
> >                     }
> > +                   if (items->type == RTE_FLOW_ITEM_TYPE_MPLS &&
> > +                       !priv->config.mpls_en) {
> > +                           rte_flow_error_set(error, ENOTSUP,
> > +                                              RTE_FLOW_ERROR_TYPE_ITEM,
> > +                                              items,
> > +                                              "MPLS not supported or"
> > +                                              " disabled in firmware"
> > +                                              " configuration.");
> > +                           return -rte_errno;
> > +                   }
> >                     if (!priv->config.tunnel_en &&
> >                         parser->rss_conf.level) {
> >                             rte_flow_error_set(error, ENOTSUP, @@ -1880,6
> +1924,80 @@
> > mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item,  }
> >
> >  /**
> > + * Convert MPLS item to Verbs specification.
> > + * Tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP.
> > + *
> > + * @param item[in]
> > + *   Item specification.
> > + * @param default_mask[in]
> > + *   Default bit-masks to use when item->mask is not provided.
> > + * @param data[in, out]
> > + *   User structure.
> > + *
> > + * @return
> > + *   0 on success, a negative errno value otherwise and rte_errno is
> set.
> > + */
> > +static int
> > +mlx5_flow_create_mpls(const struct rte_flow_item *item __rte_unused,
> > +                 const void *default_mask __rte_unused,
> > +                 struct mlx5_flow_data *data __rte_unused) { #ifndef
> > +HAVE_IBV_DEVICE_MPLS_SUPPORT
> > +   return rte_flow_error_set(data->error, EINVAL,
> 
> ENOTSUP is more accurate to keep a consistency among the errors.
> 
> > +                             RTE_FLOW_ERROR_TYPE_ITEM,
> > +                             item,
> > +                             "MPLS not supported by driver"); #else
> > +   unsigned int i;
> > +   const struct rte_flow_item_mpls *spec = item->spec;
> > +   const struct rte_flow_item_mpls *mask = item->mask;
> > +   struct mlx5_flow_parse *parser = data->parser;
> > +   unsigned int size = sizeof(struct ibv_flow_spec_mpls);
> > +   struct ibv_flow_spec_mpls mpls = {
> > +           .type = IBV_FLOW_SPEC_MPLS,
> > +           .size = size,
> > +   };
> > +   union tag {
> > +           uint32_t tag;
> > +           uint8_t label[4];
> > +   } id;
> > +
> > +   id.tag = 0;
> > +   parser->inner = IBV_FLOW_SPEC_INNER;
> > +   if (parser->layer == HASH_RXQ_UDPV4 ||
> > +       parser->layer == HASH_RXQ_UDPV6) {
> > +           parser->tunnel =
> > +                   ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)];
> > +           parser->out_layer = parser->layer;
> > +   } else {
> > +           parser->tunnel =
> > +                   ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)];
> > +   }
> > +   parser->layer = HASH_RXQ_TUNNEL;
> > +   if (spec) {
> > +           if (!mask)
> > +                   mask = default_mask;
> > +           memcpy(&id.label[1], spec->label_tc_s, 3);
> > +           id.label[0] = spec->ttl;
> > +           mpls.val.tag = id.tag;
> > +           memcpy(&id.label[1], mask->label_tc_s, 3);
> > +           id.label[0] = mask->ttl;
> > +           mpls.mask.tag = id.tag;
> > +           /* Remove unwanted bits from values. */
> > +           mpls.val.tag &= mpls.mask.tag;
> > +   }
> > +   mlx5_flow_create_copy(parser, &mpls, size);
> > +   for (i = 0; i != hash_rxq_init_n; ++i) {
> > +           if (!parser->queue[i].ibv_attr)
> > +                   continue;
> > +           parser->queue[i].ibv_attr->flags |=
> > +                   IBV_FLOW_ATTR_FLAGS_ORDERED_SPEC_LIST;
> > +   }
> > +   return 0;
> > +#endif
> > +}
> > +
> > +/**
> >   * Convert GRE item to Verbs specification.
> >   *
> >   * @param item[in]
> > @@ -1898,16 +2016,40 @@ mlx5_flow_create_gre(const struct rte_flow_item
> *item __rte_unused,
> >                  struct mlx5_flow_data *data)
> >  {
> >     struct mlx5_flow_parse *parser = data->parser;
> > +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
> >     unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
> >     struct ibv_flow_spec_tunnel tunnel = {
> >             .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL,
> >             .size = size,
> >     };
> > +#else
> > +   const struct rte_flow_item_gre *spec = item->spec;
> > +   const struct rte_flow_item_gre *mask = item->mask;
> > +   unsigned int size = sizeof(struct ibv_flow_spec_gre);
> > +   struct ibv_flow_spec_gre tunnel = {
> > +           .type = parser->inner | IBV_FLOW_SPEC_GRE,
> > +           .size = size,
> > +   };
> > +#endif
> >
> >     parser->inner = IBV_FLOW_SPEC_INNER;
> >     parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)];
> >     parser->out_layer = parser->layer;
> >     parser->layer = HASH_RXQ_TUNNEL;
> > +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
> > +   if (spec) {
> > +           if (!mask)
> > +                   mask = default_mask;
> > +           tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
> > +           tunnel.val.protocol = spec->protocol;
> > +           tunnel.val.c_ks_res0_ver = mask->c_rsvd0_ver;
> > +           tunnel.val.protocol = mask->protocol;
> > +           /* Remove unwanted bits from values. */
> > +           tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
> > +           tunnel.val.protocol &= tunnel.mask.protocol;
> > +           tunnel.val.key &= tunnel.mask.key;
> > +   }
> > +#endif
> >     mlx5_flow_create_copy(parser, &tunnel, size);
> >     return 0;
> >  }
> > --
> > 2.13.3
> >
> 
> Thanks,
> 
> --
> Nélio Laranjeiro
> 6WIND

Reply via email to