A per port offloading feature should be enabled or disabled at same time in both rte_eth_dev_configure( ) and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ). This patch check if a per port offloading flag has same configuration in rte_eth_dev_configure( ) and rte_eth_rx_queue_setup( )/rte_eth_tx_queue_setup( ). This patch can make such checking in a common way in rte_ethdev layer to avoid same checking in underlying PMD.
Signed-off-by: Wei Dai <wei....@intel.com> --- lib/librte_ether/rte_ethdev.c | 70 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index 78bed1a..7945890 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1404,6 +1404,44 @@ rte_eth_dev_is_removed(uint16_t port_id) return ret; } +/** +* Check if the Rx/Tx queue offloading settings is valid +* @param queue_offloads +* offloads input to rte_eth_rx_queue_setup( ) or rte_eth_tx_queue_setup( ) +* @param port_offloads +* Rx or Tx offloads input to rte_eth_dev_configure( ) +* @param queue_offload_capa +* rx_queue_offload_capa or tx_queue_offload_capa in struct rte_eth_dev_ifnfo +* got from rte_eth_dev_info_get( ) +* @param all_offload_capa +* rx_offload_capa or tx_offload_capa in struct rte_eth_dev_info +* got from rte_eth_dev_info_get( ) +* +* @return +* Nonzero when per-queue offloading setting is valid +*/ +static int +rte_eth_check_queue_offloads(uint64_t queue_offloads, + uint64_t port_offloads, + uint64_t queue_offload_capa, + uint64_t all_offload_capa) +{ + uint64_t pure_port_capa = all_offload_capa ^ queue_offload_capa; + + return !((port_offloads ^ queue_offloads) & pure_port_capa); +} + +static int +rte_eth_check_rx_queue_offloads(uint64_t rx_queue_offloads, + const struct rte_eth_rxmode *rxmode, + const struct rte_eth_dev_info *dev_info) +{ + return rte_eth_check_queue_offloads(rx_queue_offloads, + rxmode->offloads, + dev_info->rx_queue_offload_capa, + dev_info->rx_offload_capa); +} + int rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id, @@ -1446,6 +1484,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, (int) sizeof(struct rte_pktmbuf_pool_private)); return -ENOSPC; } + mbp_buf_size = rte_pktmbuf_data_room_size(mp); if ((mbp_buf_size - RTE_PKTMBUF_HEADROOM) < dev_info.min_rx_bufsize) { @@ -1495,6 +1534,16 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, &local_conf.offloads); } + if (!rte_eth_check_rx_queue_offloads(local_conf.offloads, + &dev->data->dev_conf.rxmode, &dev_info)) { + RTE_PMD_DEBUG_TRACE("%p : Rx queue offloads ox%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, local_conf.offloads, + dev_info.rx_offload_capa); + return -ENOTSUP; + } + ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, socket_id, &local_conf, mp); if (!ret) { @@ -1555,6 +1604,17 @@ rte_eth_convert_txq_offloads(const uint64_t tx_offloads, uint32_t *txq_flags) *txq_flags = flags; } +static int +rte_eth_check_tx_queue_offloads(uint64_t tx_queue_offloads, + const struct rte_eth_txmode *txmode, + const struct rte_eth_dev_info *dev_info) +{ + return rte_eth_check_queue_offloads(tx_queue_offloads, + txmode->offloads, + dev_info->tx_queue_offload_capa, + dev_info->tx_offload_capa); +} + int rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, uint16_t nb_tx_desc, unsigned int socket_id, @@ -1622,6 +1682,16 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, &local_conf.offloads); } + if (!rte_eth_check_tx_queue_offloads(local_conf.offloads, + &dev->data->dev_conf.txmode, &dev_info)) { + RTE_PMD_DEBUG_TRACE("%p : Tx queue offloads ox%" PRIx64 + " don't match port offloads 0x%" PRIx64 + " or supported offloads 0x%" PRIx64, + (void *)dev, local_conf.offloads, + dev_info.tx_offload_capa); + return -ENOTSUP; + } + return eth_err(port_id, (*dev->dev_ops->tx_queue_setup)(dev, tx_queue_id, nb_tx_desc, socket_id, &local_conf)); } -- 2.7.5