Ethdev Tx offloads API has changed since:

commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")

This commit support the new Tx offloads API.

Signed-off-by: Shahaf Shuler <shah...@mellanox.com>
---
 doc/guides/nics/mlx5.rst         | 12 ++----
 drivers/net/mlx5/mlx5.c          | 49 +++++++++++------------
 drivers/net/mlx5/mlx5.h          |  2 +-
 drivers/net/mlx5/mlx5_ethdev.c   | 27 ++++++-------
 drivers/net/mlx5/mlx5_rxtx.h     |  3 +-
 drivers/net/mlx5/mlx5_rxtx_vec.c | 22 ++++++++---
 drivers/net/mlx5/mlx5_txq.c      | 74 ++++++++++++++++++++++++++++++++---
 7 files changed, 129 insertions(+), 60 deletions(-)

diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst
index f9558da89..1942eda47 100644
--- a/doc/guides/nics/mlx5.rst
+++ b/doc/guides/nics/mlx5.rst
@@ -253,8 +253,10 @@ Run-time configuration
   Enhanced MPS supports hybrid mode - mixing inlined packets and pointers
   in the same descriptor.
 
-  This option cannot be used in conjunction with ``tso`` below. When ``tso``
-  is set, ``txq_mpw_en`` is disabled.
+  This option cannot be used with certain offloads such as 
``DEV_TX_OFFLOAD_TCP_TSO,
+  DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO,
+  DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, DEV_TX_OFFLOAD_VLAN_INSERT``.
+  When those offloads enabled the mpw send function will be disabled.
 
   It is currently only supported on the ConnectX-4 Lx and ConnectX-5
   families of adapters. Enabled by default.
@@ -275,12 +277,6 @@ Run-time configuration
 
   Effective only when Enhanced MPS is supported. The default value is 256.
 
-- ``tso`` parameter [int]
-
-  A nonzero value enables hardware TSO.
-  When hardware TSO is enabled, packets marked with TCP segmentation
-  offload will be divided into segments by the hardware. Disabled by default.
-
 - ``tx_vec_en`` parameter [int]
 
   A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of
diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c
index be21c72e8..03839271c 100644
--- a/drivers/net/mlx5/mlx5.c
+++ b/drivers/net/mlx5/mlx5.c
@@ -85,9 +85,6 @@
 /* Device parameter to limit the size of inlining packet. */
 #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len"
 
-/* Device parameter to enable hardware TSO offload. */
-#define MLX5_TSO "tso"
-
 /* Device parameter to enable hardware Tx vector. */
 #define MLX5_TX_VEC_EN "tx_vec_en"
 
@@ -411,8 +408,6 @@ mlx5_args_check(const char *key, const char *val, void 
*opaque)
                args->mpw_hdr_dseg = !!tmp;
        } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) {
                args->inline_max_packet_sz = tmp;
-       } else if (strcmp(MLX5_TSO, key) == 0) {
-               args->tso = !!tmp;
        } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) {
                args->tx_vec_en = !!tmp;
        } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) {
@@ -445,7 +440,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs 
*devargs)
                MLX5_TXQ_MPW_EN,
                MLX5_TXQ_MPW_HDR_DSEG_EN,
                MLX5_TXQ_MAX_INLINE_LEN,
-               MLX5_TSO,
                MLX5_TX_VEC_EN,
                MLX5_RX_VEC_EN,
                NULL,
@@ -483,11 +477,22 @@ static struct rte_pci_driver mlx5_driver;
  * @param[in/out] priv
  *   Pointer to private structure.
  */
-static void
+void
 mlx5_args_update(struct priv *priv)
 {
        struct mlx5_args *args_def = &priv->args_default;
        struct mlx5_args *args = &priv->args;
+       uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+       uint64_t tx_offloads = priv->dev ?
+                              priv->dev->data->dev_conf.txmode.offloads :
+                              0;
+       int tso = !!(tx_offloads & supp_tx_offloads & DEV_TX_OFFLOAD_TCP_TSO);
+       int vlan_insert = !!(tx_offloads & supp_tx_offloads &
+                            DEV_TX_OFFLOAD_VLAN_INSERT);
+       int tunnel = !!(tx_offloads & supp_tx_offloads &
+                       (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                        DEV_TX_OFFLOAD_GRE_TNL_TSO |
+                        DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM));
 
        if (args_def->cqe_comp != MLX5_ARG_UNSET) {
                if (!priv->cqe_comp && args_def->cqe_comp) {
@@ -498,30 +503,28 @@ mlx5_args_update(struct priv *priv)
        } else {
                args->cqe_comp = priv->cqe_comp;
        }
-       if (args_def->tso != MLX5_ARG_UNSET) {
-               if (!priv->tso && args_def->tso) {
-                       WARN("TSO is not supported");
-                       args_def->tso = 0;
-               }
-               args->tso = args_def->tso;
-       } else {
-               args->tso = 0;
-       }
        if (args_def->mps != MLX5_ARG_UNSET) {
                if (!priv->mps && args_def->mps) {
                        WARN("multi-packet send not supported");
                        args_def->mps = MLX5_MPW_DISABLED;
-               }
-               if (args->tso && args_def->mps) {
+               } else if (tso && args_def->mps) {
                        WARN("multi-packet send not supported in conjunction "
                              "with TSO. MPS disabled");
                        args->mps = MLX5_MPW_DISABLED;
+               } else if (vlan_insert && args_def->mps) {
+                       WARN("multi-packet send not supported in conjunction "
+                             "with vlan insertion. MPS disabled");
+                       args->mps = MLX5_MPW_DISABLED;
+               } else if (tunnel && args_def->mps) {
+                       WARN("multi-packet send not supported in conjunction "
+                             "with tunnel offloads. MPS disabled");
+                       args->mps = MLX5_MPW_DISABLED;
                } else {
                        args->mps = args_def->mps ? priv->mps :
                                                    MLX5_MPW_DISABLED;
                }
        } else {
-               if (args->tso)
+               if (tso || vlan_insert || tunnel)
                        args->mps = MLX5_MPW_DISABLED;
                else
                        args->mps = priv->mps;
@@ -725,7 +728,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
                        .mps = MLX5_ARG_UNSET,
                        .mpw_hdr_dseg = MLX5_ARG_UNSET,
                        .inline_max_packet_sz = MLX5_ARG_UNSET,
-                       .tso = MLX5_ARG_UNSET,
                        .tx_vec_en = MLX5_ARG_UNSET,
                        .rx_vec_en = MLX5_ARG_UNSET,
                };
@@ -882,10 +884,9 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct 
rte_pci_device *pci_dev)
 
                priv_get_num_vfs(priv, &num_vfs);
                priv->sriov = (num_vfs || sriov);
-               priv->tso = ((priv->tso) &&
-                           (device_attr_ex.tso_caps.max_tso > 0) &&
-                           (device_attr_ex.tso_caps.supported_qpts &
-                           (1 << IBV_QPT_RAW_PACKET)));
+               priv->tso = ((device_attr_ex.tso_caps.max_tso > 0) &&
+                            (device_attr_ex.tso_caps.supported_qpts &
+                            (1 << IBV_QPT_RAW_PACKET)));
                if (priv->tso)
                        priv->max_tso_payload_sz =
                                device_attr_ex.tso_caps.max_tso;
diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h
index 5e943000e..cba6d3ceb 100644
--- a/drivers/net/mlx5/mlx5.h
+++ b/drivers/net/mlx5/mlx5.h
@@ -97,7 +97,6 @@ struct mlx5_args {
        int mps;
        int mpw_hdr_dseg;
        int inline_max_packet_sz;
-       int tso;
        int tx_vec_en;
        int rx_vec_en;
 };
@@ -187,6 +186,7 @@ priv_unlock(struct priv *priv)
 /* mlx5.c */
 
 int mlx5_getenv_int(const char *);
+void mlx5_args_update(struct priv *);
 
 /* mlx5_ethdev.c */
 
diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c
index 5c59bc45e..decc6edfa 100644
--- a/drivers/net/mlx5/mlx5_ethdev.c
+++ b/drivers/net/mlx5/mlx5_ethdev.c
@@ -578,7 +578,15 @@ dev_configure(struct rte_eth_dev *dev)
        unsigned int reta_idx_n;
        const uint8_t use_app_rss_key =
                !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len;
-
+       uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv);
+       uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
+
+       if ((tx_offloads & supp_tx_offloads) != tx_offloads) {
+               ERROR("Some Tx offloads are not supported "
+                     "requested 0x%lx supported 0x%lx\n",
+                     tx_offloads, supp_tx_offloads);
+               return ENOTSUP;
+       }
        if (use_app_rss_key &&
            (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
             rss_hash_default_key_len)) {
@@ -610,6 +618,8 @@ dev_configure(struct rte_eth_dev *dev)
                ERROR("cannot handle this many RX queues (%u)", rxqs_n);
                return EINVAL;
        }
+       /* Update args according to selected offloads. */
+       mlx5_args_update(priv);
        if (rxqs_n == priv->rxqs_n)
                return 0;
        INFO("%p: RX queues number update: %u -> %u",
@@ -700,20 +710,7 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct 
rte_eth_dev_info *info)
                 0) |
                (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) |
                DEV_RX_OFFLOAD_TIMESTAMP;
-
-       if (!priv->args.mps)
-               info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
-       if (priv->hw_csum)
-               info->tx_offload_capa |=
-                       (DEV_TX_OFFLOAD_IPV4_CKSUM |
-                        DEV_TX_OFFLOAD_UDP_CKSUM |
-                        DEV_TX_OFFLOAD_TCP_CKSUM);
-       if (priv->args.tso)
-               info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
-       if (priv->tunnel_en)
-               info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-                                         DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
-                                         DEV_TX_OFFLOAD_GRE_TNL_TSO);
+       info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv);
        if (priv_get_ifname(priv, &ifname) == 0)
                info->if_index = if_nametoindex(ifname);
        info->reta_size = priv->reta_idx_n ?
diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h
index d34f3cc04..1e0a9875f 100644
--- a/drivers/net/mlx5/mlx5_rxtx.h
+++ b/drivers/net/mlx5/mlx5_rxtx.h
@@ -200,7 +200,7 @@ struct mlx5_txq_data {
        uint16_t inline_max_packet_sz; /* Max packet size for inlining. */
        uint16_t mr_cache_idx; /* Index of last hit entry. */
        uint32_t qp_num_8s; /* QP number shifted by 8. */
-       uint32_t flags; /* Flags for Tx Queue. */
+       uint64_t offloads; /* Offloads for Tx Queue. */
        volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */
        volatile void *wqes; /* Work queue (use volatile to write into). */
        volatile uint32_t *qp_db; /* Work queue doorbell. */
@@ -292,6 +292,7 @@ int mlx5_priv_txq_release(struct priv *, uint16_t);
 int mlx5_priv_txq_releasable(struct priv *, uint16_t);
 int mlx5_priv_txq_verify(struct priv *);
 void txq_alloc_elts(struct mlx5_txq_ctrl *);
+uint64_t mlx5_priv_get_tx_port_offloads(struct priv *);
 
 /* mlx5_rxtx.c */
 
diff --git a/drivers/net/mlx5/mlx5_rxtx_vec.c b/drivers/net/mlx5/mlx5_rxtx_vec.c
index 2556f5ebf..4e09a959d 100644
--- a/drivers/net/mlx5/mlx5_rxtx_vec.c
+++ b/drivers/net/mlx5/mlx5_rxtx_vec.c
@@ -202,15 +202,18 @@ mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, 
uint16_t pkts_n)
                uint16_t ret;
 
                /* Transmit multi-seg packets in the head of pkts list. */
-               if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) &&
+               if ((txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) &&
                    NB_SEGS(pkts[nb_tx]) > 1)
                        nb_tx += txq_scatter_v(txq,
                                               &pkts[nb_tx],
                                               pkts_n - nb_tx);
                n = RTE_MIN((uint16_t)(pkts_n - nb_tx), MLX5_VPMD_TX_MAX_BURST);
-               if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS))
+               if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
                        n = txq_check_multiseg(&pkts[nb_tx], n);
-               if (!(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+               if (txq->offloads &
+                   (DEV_TX_OFFLOAD_VLAN_INSERT |
+                    DEV_TX_OFFLOAD_UDP_CKSUM |
+                    DEV_TX_OFFLOAD_TCP_CKSUM))
                        n = txq_calc_offload(txq, &pkts[nb_tx], n, &cs_flags);
                ret = txq_burst_v(txq, &pkts[nb_tx], n, cs_flags);
                nb_tx += ret;
@@ -308,8 +311,12 @@ priv_check_raw_vec_tx_support(struct priv *priv)
        for (i = 0; i < priv->txqs_n; ++i) {
                struct mlx5_txq_data *txq = (*priv->txqs)[i];
 
-               if (!(txq->flags & ETH_TXQ_FLAGS_NOMULTSEGS) ||
-                   !(txq->flags & ETH_TXQ_FLAGS_NOOFFLOADS))
+               if (txq->offloads &
+                   (DEV_TX_OFFLOAD_MULTI_SEGS |
+                    DEV_TX_OFFLOAD_VLAN_INSERT |
+                    DEV_TX_OFFLOAD_UDP_CKSUM |
+                    DEV_TX_OFFLOAD_TCP_CKSUM |
+                    DEV_TX_OFFLOAD_IPV4_CKSUM))
                        break;
        }
        if (i != priv->txqs_n)
@@ -329,10 +336,13 @@ priv_check_raw_vec_tx_support(struct priv *priv)
 int __attribute__((cold))
 priv_check_vec_tx_support(struct priv *priv)
 {
+       uint64_t offloads = priv->dev->data->dev_conf.txmode.offloads;
+       int tso = !!(offloads & DEV_TX_OFFLOAD_TCP_TSO);
+
        if (!priv->args.tx_vec_en ||
            priv->txqs_n > MLX5_VPMD_MIN_TXQS ||
            priv->args.mps != MLX5_MPW_ENHANCED ||
-           priv->args.tso)
+           tso)
                return -ENOTSUP;
        return 1;
 }
diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c
index 28fc90e2e..4d9c7d697 100644
--- a/drivers/net/mlx5/mlx5_txq.c
+++ b/drivers/net/mlx5/mlx5_txq.c
@@ -116,6 +116,62 @@ txq_free_elts(struct mlx5_txq_ctrl *txq_ctrl)
 }
 
 /**
+ * Returns the per-port supported offloads.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   Supported Tx offloads.
+ */
+uint64_t
+mlx5_priv_get_tx_port_offloads(struct priv *priv)
+{
+       uint64_t offloads = (DEV_TX_OFFLOAD_MULTI_SEGS |
+                            DEV_TX_OFFLOAD_VLAN_INSERT);
+
+       if (priv->hw_csum)
+               offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                            DEV_TX_OFFLOAD_UDP_CKSUM |
+                            DEV_TX_OFFLOAD_TCP_CKSUM);
+       if (priv->tso)
+               offloads |= DEV_TX_OFFLOAD_TCP_TSO;
+       if (priv->tunnel_en) {
+               if (priv->hw_csum)
+                       offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+               if (priv->tso)
+                       offloads |= (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+                                    DEV_TX_OFFLOAD_GRE_TNL_TSO);
+       }
+       return offloads;
+}
+
+/**
+ * Checks if the per-queue offload configuration is valid.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ * @param offloads
+ *   Per-queue offloads configuration.
+ *
+ * @return
+ *   1 if the configuration is valid, 0 otherwise.
+ */
+static int
+priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads)
+{
+       uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads;
+       uint64_t port_supp_offloads = mlx5_priv_get_tx_port_offloads(priv);
+
+       /* There are no Tx offloads which are per queue. */
+       if ((offloads & port_supp_offloads) != offloads)
+               return 0;
+       if ((port_offloads ^ offloads) & port_supp_offloads)
+               return 0;
+       return 1;
+}
+
+/**
  * DPDK callback to configure a TX queue.
  *
  * @param dev
@@ -146,6 +202,15 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, 
uint16_t desc,
                return -E_RTE_SECONDARY;
 
        priv_lock(priv);
+       if (!priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) {
+               ret = ENOTSUP;
+               ERROR("%p: Tx queue offloads 0x%lx don't match port "
+                     "offloads 0x%lx or supported offloads 0x%lx",
+                     (void *)dev, conf->offloads,
+                     dev->data->dev_conf.txmode.offloads,
+                     mlx5_priv_get_tx_port_offloads(priv));
+               goto out;
+       }
        if (desc <= MLX5_TX_COMP_THRESH) {
                WARN("%p: number of descriptors requested for TX queue %u"
                     " must be higher than MLX5_TX_COMP_THRESH, using"
@@ -570,6 +635,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
                ((MLX5_MAX_TSO_HEADER + (RTE_CACHE_LINE_SIZE - 1)) /
                 RTE_CACHE_LINE_SIZE);
        struct mlx5_txq_ctrl *tmpl;
+       int tso = !!(conf->offloads & DEV_TX_OFFLOAD_TCP_TSO);
 
        tmpl = rte_calloc_socket("TXQ", 1,
                                 sizeof(*tmpl) +
@@ -578,7 +644,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
        if (!tmpl)
                return NULL;
        assert(desc > MLX5_TX_COMP_THRESH);
-       tmpl->txq.flags = conf->txq_flags;
+       tmpl->txq.offloads = conf->offloads;
        tmpl->priv = priv;
        tmpl->socket = socket;
        tmpl->txq.elts_n = log2above(desc);
@@ -597,8 +663,6 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
                        ((priv->args.txq_inline + (RTE_CACHE_LINE_SIZE - 1)) /
                         RTE_CACHE_LINE_SIZE);
                tmpl->txq.inline_en = 1;
-               /* TSO and MPS can't be enabled concurrently. */
-               assert(!priv->args.tso || !priv->args.mps);
                if (priv->args.mps == MLX5_MPW_ENHANCED) {
                        tmpl->txq.inline_max_packet_sz =
                                priv->args.inline_max_packet_sz;
@@ -610,7 +674,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
                                          priv->args.inline_max_packet_sz) +
                                  (RTE_CACHE_LINE_SIZE - 1)) /
                                 RTE_CACHE_LINE_SIZE) * RTE_CACHE_LINE_SIZE;
-               } else if (priv->args.tso) {
+               } else if (tso) {
                        int inline_diff = tmpl->txq.max_inline - max_tso_inline;
 
                        /*
@@ -646,7 +710,7 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t 
desc,
                        tmpl->txq.max_inline = max_inline / RTE_CACHE_LINE_SIZE;
                }
        }
-       if (priv->args.tso) {
+       if (tso) {
                tmpl->max_tso_header = max_tso_inline * RTE_CACHE_LINE_SIZE;
                tmpl->txq.max_inline = RTE_MAX(tmpl->txq.max_inline,
                                               max_tso_inline);
-- 
2.12.0

Reply via email to