Signed-off-by: Nelio Laranjeiro <nelio.laranje...@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 191 ++++++++++++++++++++++++++++++++++-
 1 file changed, 186 insertions(+), 5 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 47c55b426..636aaabe8 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -54,6 +54,7 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 /* Pattern tunnel Layer bits. */
 #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
+#define MLX5_FLOW_LAYER_GRE (1u << 14)
 
 /* Outer Masks. */
 #define MLX5_FLOW_LAYER_OUTER_L3 \
@@ -66,7 +67,8 @@ extern const struct eth_dev_ops mlx5_dev_ops_isolate;
 
 /* Tunnel masks. */
 #define MLX5_FLOW_LAYER_TUNNEL \
-       (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE)
+       (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
+        MLX5_FLOW_LAYER_GRE)
 
 /* Inner Masks. */
 #define MLX5_FLOW_LAYER_INNER_L3 \
@@ -215,6 +217,9 @@ static const struct rte_flow_expand_node 
mlx5_support_expansion[] = {
        },
 };
 
+/* Tunnel protocol values. */
+#define MLX5_PROTOCOL_GRE 47
+
 /** Handles information leading to a drop fate. */
 struct mlx5_flow_verbs {
        LIST_ENTRY(mlx5_flow_verbs) next;
@@ -1005,12 +1010,23 @@ mlx5_flow_item_ipv6(const struct rte_flow_item *item, 
struct rte_flow *flow,
                                                  item,
                                                  "L3 cannot follow an L4"
                                                  " layer");
+               /*
+                * IPv6 is not recognised by the NIC inside a GRE tunnel.
+                * Such support has to be disabled as the rule will be
+                * accepted.  Tested with Mellanox OFED 4.3-3.0.2.1
+                */
+               if (tunnel && layers & MLX5_FLOW_LAYER_GRE)
+                       return rte_flow_error_set(error, ENOTSUP,
+                                                 RTE_FLOW_ERROR_TYPE_ITEM,
+                                                 item,
+                                                 "IPv6 inside a GRE tunnel is"
+                                                 " not recognised.");
                if (!mask)
                        mask = &rte_flow_item_ipv6_mask;
-               ret = mlx5_flow_item_validate(item, (const uint8_t *)mask,
-                                             (const uint8_t *)&nic_mask,
-                                             sizeof(struct rte_flow_item_ipv6),
-                                             error);
+               ret = mlx5_flow_item_validate
+                       (item, (const uint8_t *)mask,
+                        (const uint8_t *)&nic_mask,
+                        sizeof(struct rte_flow_item_ipv6), error);
                if (ret < 0)
                        return ret;
        }
@@ -1411,6 +1427,168 @@ mlx5_flow_item_vxlan_gpe(struct rte_eth_dev *dev,
        return size;
 }
 
+/**
+ * Update the protocol in Verbs IPv4 spec.
+ *
+ * @param attr[in, out]
+ *   Pointer to Verbs attributes structure.
+ * @param protocol[in]
+ *   Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ipv4_protocol_update(struct ibv_flow_attr *attr,
+                                       uint8_t protocol)
+{
+       unsigned int i;
+       const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV4_EXT;
+       struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+               ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+       if (!attr)
+               return;
+       for (i = 0; i != attr->num_of_specs; ++i) {
+               if (hdr->type == search) {
+                       struct ibv_flow_spec_ipv4_ext *ip =
+                               (struct ibv_flow_spec_ipv4_ext *)hdr;
+
+                       if (!ip->val.proto) {
+                               ip->val.proto = protocol;
+                               ip->mask.proto = 0xff;
+                       }
+                       break;
+               }
+               hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+       }
+}
+
+/**
+ * Update the protocol in Verbs IPv6 spec.
+ *
+ * @param attr[in, out]
+ *   Pointer to Verbs attributes structure.
+ * @param protocol[in]
+ *   Protocol value to set if none is present in the specification.
+ */
+static void
+mlx5_flow_item_gre_ipv6_protocol_update(struct ibv_flow_attr *attr,
+                                       uint8_t protocol)
+{
+       unsigned int i;
+       const enum ibv_flow_spec_type search = IBV_FLOW_SPEC_IPV6;
+       struct ibv_spec_header *hdr = (struct ibv_spec_header *)
+               ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
+
+       if (!attr)
+               return;
+       for (i = 0; i != attr->num_of_specs; ++i) {
+               if (hdr->type == search) {
+                       struct ibv_flow_spec_ipv6 *ip =
+                               (struct ibv_flow_spec_ipv6 *)hdr;
+
+                       if (!ip->val.next_hdr) {
+                               ip->val.next_hdr = protocol;
+                               ip->mask.next_hdr = 0xff;
+                       }
+                       break;
+               }
+               hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
+       }
+}
+
+/**
+ * Validate GRE layer and possibly create the Verbs specification.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param item[in]
+ *   Item specification.
+ * @param flow[in, out]
+ *   Pointer to flow structure.
+ * @param flow_size[in]
+ *   Size in bytes of the available space for to store the flow information.
+ * @param error
+ *   Pointer to error structure.
+ *
+ * @return
+ *   size in bytes necessary for the conversion, a negative errno value
+ *   otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_item_gre(const struct rte_flow_item *item,
+                  struct rte_flow *flow, const size_t flow_size,
+                  struct rte_flow_error *error)
+{
+       struct mlx5_flow_verbs *verbs = flow->cur_verbs;
+       const struct rte_flow_item_gre *spec = item->spec;
+       const struct rte_flow_item_gre *mask = item->mask;
+       const uint32_t layers = mlx5_flow_layers(flow);
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+       unsigned int size = sizeof(struct ibv_flow_spec_gre);
+       struct ibv_flow_spec_gre tunnel = {
+               .type = IBV_FLOW_SPEC_GRE,
+               .size = size,
+       };
+#else
+       unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
+       struct ibv_flow_spec_tunnel tunnel = {
+               .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
+               .size = size,
+       };
+#endif
+       int ret;
+
+       if (layers & MLX5_FLOW_LAYER_TUNNEL)
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "a tunnel is already present");
+       if (!(layers & MLX5_FLOW_LAYER_OUTER_L3))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "L3 Layer is missing");
+       if (!mask)
+               mask = &rte_flow_item_gre_mask;
+       ret = mlx5_flow_item_validate
+               (item, (const uint8_t *)mask,
+                (const uint8_t *)&rte_flow_item_gre_mask,
+                sizeof(struct rte_flow_item_gre), error);
+       if (ret < 0)
+               return ret;
+#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
+       if (spec) {
+               tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
+               tunnel.val.protocol = spec->protocol;
+               tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
+               tunnel.mask.protocol = mask->protocol;
+               /* Remove unwanted bits from values. */
+               tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
+               tunnel.val.protocol &= tunnel.mask.protocol;
+               tunnel.val.key &= tunnel.mask.key;
+       }
+#else
+       if (spec && (spec->protocol & mask->protocol))
+               return rte_flow_error_set(error, ENOTSUP,
+                                         RTE_FLOW_ERROR_TYPE_ITEM,
+                                         item,
+                                         "without MPLS support the"
+                                         " specification cannot be used for"
+                                         " filtering");
+#endif /* !HAVE_IBV_DEVICE_MPLS_SUPPORT */
+       if (size <= flow_size) {
+               if (layers & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
+                       mlx5_flow_item_gre_ipv4_protocol_update
+                               (verbs->attr, MLX5_PROTOCOL_GRE);
+               else
+                       mlx5_flow_item_gre_ipv6_protocol_update
+                               (verbs->attr, MLX5_PROTOCOL_GRE);
+               mlx5_flow_spec_verbs_add(flow, &tunnel, size);
+       }
+       mlx5_flow_layers_update(flow, MLX5_FLOW_LAYER_GRE);
+       flow->ptype = RTE_PTYPE_TUNNEL_GRE;
+       return size;
+}
+
 /**
  * Validate items provided by the user.
  *
@@ -1469,6 +1647,9 @@ mlx5_flow_items(struct rte_eth_dev *dev,
                        ret = mlx5_flow_item_vxlan_gpe(dev, items, flow,
                                                       remain, error);
                        break;
+               case RTE_FLOW_ITEM_TYPE_GRE:
+                       ret = mlx5_flow_item_gre(items, flow, remain, error);
+                       break;
                default:
                        return rte_flow_error_set(error, ENOTSUP,
                                                  RTE_FLOW_ERROR_TYPE_ITEM,
-- 
2.18.0

Reply via email to