Signed-off-by: Nelio Laranjeiro <nelio.laranje...@6wind.com>
---
 drivers/net/mlx5/mlx5_flow.c | 131 +++++++++++++++++++++++++----------
 drivers/net/mlx5/mlx5_rxtx.h |   1 -
 2 files changed, 96 insertions(+), 36 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c
index 7dda88641..eedf0c461 100644
--- a/drivers/net/mlx5/mlx5_flow.c
+++ b/drivers/net/mlx5/mlx5_flow.c
@@ -219,6 +219,8 @@ struct rte_flow {
        struct mlx5_flow_verbs *cur_verbs;
        /**< Current Verbs flow structure being filled. */
        struct rte_flow_action_rss rss;/**< RSS context. */
+       uint32_t ptype;
+       /**< Store tunnel packet type data to store in Rx queue. */
        uint8_t key[40]; /**< RSS hash key. */
        uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */
 };
@@ -1320,13 +1322,15 @@ mlx5_flow_action_queue(struct rte_eth_dev *dev,
  *   Pointer to flow structure.
  * @param types
  *   RSS types for this flow (see ETH_RSS_*).
+ * @param level
+ *   RSS level.
  *
  * @return
  *   0 on success, a negative errno value otherwise and rte_errno is set.
  */
 static int
 mlx5_flow_action_rss_verbs_attr(struct rte_eth_dev *dev, struct rte_flow *flow,
-                               uint32_t types)
+                               uint32_t types, uint32_t level)
 {
        const uint32_t layers = mlx5_flow_layers(flow);
        uint64_t hash_fields;
@@ -1374,6 +1378,8 @@ mlx5_flow_action_rss_verbs_attr(struct rte_eth_dev *dev, 
struct rte_flow *flow,
                hash_fields = 0;
                priority = 2;
        }
+       if (hash_fields && level == 2)
+               hash_fields |= IBV_RX_HASH_INNER;
        flow->cur_verbs->hash_fields = hash_fields;
        flow->cur_verbs->attr->priority =
                mlx5_flow_priority(dev, flow->attributes.priority, priority);
@@ -1416,7 +1422,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->func,
                                          "RSS hash function not supported");
-       if (rss->level > 1)
+       if (rss->level > 2)
                return rte_flow_error_set(error, ENOTSUP,
                                          RTE_FLOW_ERROR_TYPE_ACTION_CONF,
                                          &rss->level,
@@ -1456,6 +1462,7 @@ mlx5_flow_action_rss(struct rte_eth_dev *dev,
        flow->rss.queue_num = rss->queue_num;
        memcpy(flow->key, rss->key, rss_hash_default_key_len);
        flow->rss.types = rss->types;
+       flow->rss.level = rss->level;
        flow->fate |= MLX5_FLOW_FATE_RSS;
        return 0;
 }
@@ -1814,7 +1821,8 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow 
*flow,
                        flow->cur_verbs->attr->priority =
                                flow->attributes.priority;
                        ret = mlx5_flow_action_rss_verbs_attr(dev, flow,
-                                                             flow->rss.types);
+                                                             flow->rss.types,
+                                                             flow->rss.level);
                        if (ret < 0)
                                goto error;
                        LIST_INSERT_HEAD(&flow->verbs, flow->cur_verbs, next);
@@ -1828,27 +1836,6 @@ mlx5_flow_merge(struct rte_eth_dev *dev, struct rte_flow 
*flow,
        return ret;
 }
 
-/**
- * Mark the Rx queues mark flag if the flow has a mark or flag modifier.
- *
- * @param dev
- *   Pointer to Ethernet device.
- * @param flow
- *   Pointer to flow structure.
- */
-static void
-mlx5_flow_rxq_mark(struct rte_eth_dev *dev, struct rte_flow *flow)
-{
-       struct priv *priv = dev->data->dev_private;
-       const uint32_t mask = MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK;
-       uint32_t i;
-
-       if (!(flow->modifier & mask))
-               return;
-       for (i = 0; i != flow->rss.queue_num; ++i)
-               (*priv->rxqs)[(*flow->queue)[i]]->mark = 1;
-}
-
 /**
  * Validate a flow supported by the NIC.
  *
@@ -1978,6 +1965,88 @@ mlx5_flow_fate_apply(struct rte_eth_dev *dev, struct 
rte_flow *flow,
        return -rte_errno;
 }
 
+/**
+ * Set the Tunnel packet type and the Mark in the Rx queue.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ * @param idx
+ *   Rx queue index.
+ */
+static void
+mlx5_flow_rxq(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct rte_flow *flow;
+       const uint32_t mark_m = MLX5_FLOW_MOD_FLAG | MLX5_FLOW_MOD_MARK;
+       uint32_t ptype = 0;
+       uint32_t mark = 0;
+
+       TAILQ_FOREACH(flow, &priv->flows, next) {
+               unsigned int i;
+
+               for (i = 0; i != flow->rss.queue_num; ++i) {
+                       if ((*flow->queue)[i] == idx) {
+                               mark |= !!(flow->modifier & mark_m);
+                               if (ptype == 0)
+                                       ptype = flow->ptype;
+                               else if (ptype != flow->ptype)
+                                       ptype = (uint32_t)-1;
+                               break;
+                       }
+               }
+       }
+       if (ptype == (uint32_t)-1)
+               ptype = 0;
+       (*priv->rxqs)[idx]->tunnel = ptype;
+       (*priv->rxqs)[idx]->mark = mark;
+}
+
+/**
+ * Set the tunnel packet type and Mark in the Rx queues, if several packet
+ * types are possible, the information in the Rx queue is just cleared.
+ * Mark is not impacted by this case.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+static void
+mlx5_flow_rxqs(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       unsigned int idx;
+       unsigned int n;
+
+       for (idx = 0, n = 0; n != priv->rxqs_n; ++idx) {
+               if (!(*priv->rxqs)[idx])
+                       continue;
+               mlx5_flow_rxq(dev, idx);
+               n++;
+       }
+}
+
+/**
+ * Clear tunnel ptypes and Mark in Rx queues.
+ *
+ * @param dev
+ *   Pointer to Ethernet device.
+ */
+static void
+mlx5_flow_rxqs_clear(struct rte_eth_dev *dev)
+{
+       struct priv *priv = dev->data->dev_private;
+       unsigned int idx;
+       unsigned int n;
+
+       for (idx = 0, n = 0; n != priv->rxqs_n; ++idx) {
+               if (!(*priv->rxqs)[idx])
+                       continue;
+               (*priv->rxqs)[idx]->tunnel = 0;
+               (*priv->rxqs)[idx]->mark = 0;
+               n++;
+       }
+}
+
 /**
  * Create a flow.
  *
@@ -2039,8 +2108,8 @@ mlx5_flow_list_create(struct rte_eth_dev *dev,
                        return NULL;
                }
        }
-       mlx5_flow_rxq_mark(dev, flow);
        TAILQ_INSERT_TAIL(list, flow, next);
+       mlx5_flow_rxqs(dev);
        return flow;
 }
 
@@ -2144,19 +2213,11 @@ mlx5_flow_list_flush(struct rte_eth_dev *dev, struct 
mlx5_flows *list)
 void
 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
 {
-       struct priv *priv = dev->data->dev_private;
        struct rte_flow *flow;
-       unsigned int i;
-       unsigned int idx;
 
        TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
                mlx5_flow_fate_remove(dev, flow);
-       for (idx = 0, i = 0; idx != priv->rxqs_n; ++i) {
-               if (!(*priv->rxqs)[idx])
-                       continue;
-               (*priv->rxqs)[idx]->mark = 0;
-               ++idx;
-       }
+       mlx5_flow_rxqs_clear(dev);
 }
 
 /**
@@ -2181,8 +2242,8 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct 
mlx5_flows *list)
                ret = mlx5_flow_fate_apply(dev, flow, &error);
                if (ret < 0)
                        goto error;
-               mlx5_flow_rxq_mark(dev, flow);
        }
+       mlx5_flow_rxqs(dev);
        return 0;
 error:
        ret = rte_errno; /* Save rte_errno before cleanup. */
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index bb67c32a6..57504ceb2 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -136,7 +136,6 @@ struct mlx5_rxq_ctrl {
        struct priv *priv; /* Back pointer to private data. */
        struct mlx5_rxq_data rxq; /* Data path structure. */
        unsigned int socket; /* CPU socket ID for allocations. */
-       uint32_t tunnel_types[16]; /* Tunnel type counter. */
        unsigned int irq:1; /* Whether IRQ is enabled. */
        uint16_t idx; /* Queue index. */
 };
-- 
2.18.0

Reply via email to