Ethdev Tx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Tx queue offloads API")
This commit support the new Tx offloads API. Signed-off-by: Shahaf Shuler <shah...@mellanox.com> --- drivers/net/mlx4/mlx4.c | 9 +++++ drivers/net/mlx4/mlx4_ethdev.c | 7 +--- drivers/net/mlx4/mlx4_rxtx.h | 1 + drivers/net/mlx4/mlx4_txq.c | 66 +++++++++++++++++++++++++++++++++++-- 4 files changed, 75 insertions(+), 8 deletions(-) diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index f9e4f9d73..38c545b1b 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -99,8 +99,17 @@ mlx4_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct rte_flow_error error; + uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; int ret; + if ((tx_offloads & supp_tx_offloads) != tx_offloads) { + rte_errno = ENOTSUP; + ERROR("Some Tx offloads are not supported " + "requested 0x%lx supported 0x%lx\n", + tx_offloads, supp_tx_offloads); + return -rte_errno; + } /* Prepare internal flow rules. */ ret = mlx4_flow_sync(priv, &error); if (ret) { diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index 2f69e7d4f..63e00b1da 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); info->rx_offload_capa = 0; - info->tx_offload_capa = 0; + info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv); if (priv->hw_csum) { - info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM); } - if (priv->hw_csum_l2tun) - info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (mlx4_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE; diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index 463df2b0b..528600a18 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -181,6 +181,7 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf); void mlx4_tx_queue_release(void *dpdk_txq); +uint64_t mlx4_priv_get_tx_port_offloads(struct priv *priv); /** * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c index 7882a4d0b..91befb16b 100644 --- a/drivers/net/mlx4/mlx4_txq.c +++ b/drivers/net/mlx4/mlx4_txq.c @@ -184,6 +184,56 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv) } /** + * Returns the per-port supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx4_priv_get_tx_port_offloads(struct priv *priv) +{ + uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS; + + if (priv->hw_csum) { + offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + } + if (priv->hw_csum_l2tun) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + return offloads; +} + +/** + * Checks if the per-queue offload configuration is valid. + * + * @param priv + * Pointer to private structure. + * @param offloads + * Per-queue offloads configuration. + * + * @return + * 1 if the configuration is valid, 0 otherwise. + */ +static int +priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +{ + uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; + uint64_t port_supp_offloads = mlx4_priv_get_tx_port_offloads(priv); + + /* There are no Tx offloads which are per queue. */ + if ((offloads & port_supp_offloads) != offloads) + return 0; + if ((port_offloads ^ offloads) & port_supp_offloads) + return 0; + return 1; +} + +/** * DPDK callback to configure a Tx queue. * * @param dev @@ -234,6 +284,15 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void)conf; /* Thresholds configuration (ignored). */ DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); + if (!priv_is_tx_queue_offloads_allowed(priv, conf->offloads)) { + rte_errno = ENOTSUP; + ERROR("%p: Tx queue offloads 0x%lx don't match port " + "offloads 0x%lx or supported offloads 0x%lx", + (void *)dev, conf->offloads, + dev->data->dev_conf.txmode.offloads, + mlx4_priv_get_tx_port_offloads(priv)); + return -rte_errno; + } if (idx >= dev->data->nb_tx_queues) { rte_errno = EOVERFLOW; ERROR("%p: queue index out of range (%u >= %u)", @@ -278,8 +337,11 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), .elts_comp_cd_init = RTE_MIN(MLX4_PMD_TX_PER_COMP_REQ, desc / 4), - .csum = priv->hw_csum, - .csum_l2tun = priv->hw_csum_l2tun, + .csum = !!(conf->offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM)), + .csum_l2tun = !!(conf->offloads & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM), /* Enable Tx loopback for VF devices. */ .lb = !!priv->vf, .bounce_buf = bounce_buf, -- 2.12.0