[dpdk-dev] [PATCH 2/2] net/ixgbe: move RSS to flow API
Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow. This patch is to move ixgbe existing RSS to rte_flow. Signed-off-by: Wei Zhao --- drivers/net/ixgbe/ixgbe_ethdev.c | 12 +++ drivers/net/ixgbe/ixgbe_ethdev.h | 10 +++ drivers/net/ixgbe/ixgbe_flow.c | 165 +++ drivers/net/ixgbe/ixgbe_rxtx.c | 65 +++ 4 files changed, 252 insertions(+) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index ff19a56..2596f19 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -8339,6 +8339,17 @@ ixgbe_l2_tn_filter_restore(struct rte_eth_dev *dev) } } +/* restore rss filter */ +static inline void +ixgbe_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct ixgbe_filter_info *filter_info = + IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + ixgbe_config_rss_filter(dev, + &filter_info->rss_info, TRUE); +} + static int ixgbe_filter_restore(struct rte_eth_dev *dev) { @@ -8347,6 +8358,7 @@ ixgbe_filter_restore(struct rte_eth_dev *dev) ixgbe_syn_filter_restore(dev); ixgbe_fdir_filter_restore(dev); ixgbe_l2_tn_filter_restore(dev); + ixgbe_rss_filter_restore(dev); return 0; } diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h index 51ddcfd..4af79b4 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.h +++ b/drivers/net/ixgbe/ixgbe_ethdev.h @@ -224,6 +224,12 @@ struct ixgbe_hw_fdir_info { bool mask_added; /* If already got mask from consistent filter */ }; +struct ixgbe_rte_flow_rss_conf { + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ + uint16_t num; /**< Number of entries in queue[]. */ + uint16_t queue[IXGBE_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ +}; + /* structure for interrupt relative data */ struct ixgbe_interrupt { uint32_t flags; @@ -340,6 +346,8 @@ struct ixgbe_filter_info { struct ixgbe_5tuple_filter_list fivetuple_list; /* store the SYN filter info */ uint32_t syn_info; + /* store the rss filter info */ + struct ixgbe_rte_flow_rss_conf rss_info; }; struct ixgbe_l2_tn_key { @@ -719,6 +727,8 @@ void ixgbe_tm_conf_init(struct rte_eth_dev *dev); void ixgbe_tm_conf_uninit(struct rte_eth_dev *dev); int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t tx_rate); +int ixgbe_config_rss_filter(struct rte_eth_dev *dev, + struct ixgbe_rte_flow_rss_conf *conf, bool add); static inline int ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info, diff --git a/drivers/net/ixgbe/ixgbe_flow.c b/drivers/net/ixgbe/ixgbe_flow.c index 19c2d47..8f964cf 100644 --- a/drivers/net/ixgbe/ixgbe_flow.c +++ b/drivers/net/ixgbe/ixgbe_flow.c @@ -103,6 +103,11 @@ struct ixgbe_eth_l2_tunnel_conf_ele { TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries; struct rte_eth_l2_tunnel_conf filter_info; }; +/* rss filter list structure */ +struct ixgbe_rss_conf_ele { + TAILQ_ENTRY(ixgbe_rss_conf_ele) entries; + struct ixgbe_rte_flow_rss_conf filter_info; +}; /* ixgbe_flow memory list structure */ struct ixgbe_flow_mem { TAILQ_ENTRY(ixgbe_flow_mem) entries; @@ -114,6 +119,7 @@ TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele); TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele); TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele); TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele); +TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele); TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem); static struct ixgbe_ntuple_filter_list filter_ntuple_list; @@ -121,6 +127,7 @@ static struct ixgbe_ethertype_filter_list filter_ethertype_list; static struct ixgbe_syn_filter_list filter_syn_list; static struct ixgbe_fdir_rule_filter_list filter_fdir_list; static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list; +static struct ixgbe_rss_filter_list filter_rss_list; static struct ixgbe_flow_mem_list ixgbe_flow_list; /** @@ -2700,6 +2707,109 @@ ixgbe_parse_fdir_filter(struct rte_eth_dev *dev, return ret; } +static int +ixgbe_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct ixgbe_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n; + + /** +* rss only supports forwarding, +* check if the first not void action is RSS. +*/ + act = next_no_void_action(actions, NULL); + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) { + memset(rss_conf, 0, sizeof(struct
[dpdk-dev] [PATCH 1/2] net/e1000: move RSS to flow API
Rte_flow actually defined to include RSS, but till now, RSS is out of rte_flow. This patch is to move igb existing RSS to rte_flow. Signed-off-by: Wei Zhao --- drivers/net/e1000/e1000_ethdev.h | 20 + drivers/net/e1000/igb_ethdev.c | 16 drivers/net/e1000/igb_flow.c | 160 +++ drivers/net/e1000/igb_rxtx.c | 61 +++ 4 files changed, 257 insertions(+) diff --git a/drivers/net/e1000/e1000_ethdev.h b/drivers/net/e1000/e1000_ethdev.h index 5668910..0731766 100644 --- a/drivers/net/e1000/e1000_ethdev.h +++ b/drivers/net/e1000/e1000_ethdev.h @@ -257,6 +257,12 @@ struct igb_ethertype_filter { uint32_t etqf; }; +struct igb_rte_flow_rss_conf { + struct rte_eth_rss_conf rss_conf; /**< RSS parameters. */ + uint16_t num; /**< Number of entries in queue[]. */ + uint16_t queue[IGB_MAX_RX_QUEUE_NUM]; /**< Queues indices to use. */ +}; + /* * Structure to store filters'info. */ @@ -274,6 +280,8 @@ struct e1000_filter_info { struct e1000_2tuple_filter_list twotuple_list; /* store the SYN filter info */ uint32_t syn_info; + /* store the rss filter info */ + struct igb_rte_flow_rss_conf rss_info; }; /* @@ -342,6 +350,12 @@ struct igb_flex_filter_ele { struct rte_eth_flex_filter filter_info; }; +/* rss filter list structure */ +struct igb_rss_conf_ele { + TAILQ_ENTRY(igb_rss_conf_ele) entries; + struct igb_rte_flow_rss_conf filter_info; +}; + /* igb_flow memory list structure */ struct igb_flow_mem { TAILQ_ENTRY(igb_flow_mem) entries; @@ -357,6 +371,8 @@ TAILQ_HEAD(igb_syn_filter_list, igb_eth_syn_filter_ele); struct igb_syn_filter_list igb_filter_syn_list; TAILQ_HEAD(igb_flex_filter_list, igb_flex_filter_ele); struct igb_flex_filter_list igb_filter_flex_list; +TAILQ_HEAD(igb_rss_filter_list, igb_rss_conf_ele); +struct igb_rss_filter_list igb_filter_rss_list; TAILQ_HEAD(igb_flow_mem_list, igb_flow_mem); struct igb_flow_mem_list igb_flow_list; @@ -500,4 +516,8 @@ int eth_igb_syn_filter_set(struct rte_eth_dev *dev, int eth_igb_add_del_flex_filter(struct rte_eth_dev *dev, struct rte_eth_flex_filter *filter, bool add); +int igb_config_rss_filter(struct rte_eth_dev *dev, + struct igb_rte_flow_rss_conf *conf, + bool add); + #endif /* _E1000_ETHDEV_H_ */ diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index fdc139f..2faa089 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -948,6 +948,7 @@ eth_igb_dev_init(struct rte_eth_dev *eth_dev) TAILQ_INIT(&igb_filter_ethertype_list); TAILQ_INIT(&igb_filter_syn_list); TAILQ_INIT(&igb_filter_flex_list); + TAILQ_INIT(&igb_filter_rss_list); TAILQ_INIT(&igb_flow_list); return 0; @@ -1007,6 +1008,10 @@ eth_igb_dev_uninit(struct rte_eth_dev *eth_dev) memset(filter_info->ethertype_filters, 0, E1000_MAX_ETQF_FILTERS * sizeof(struct igb_ethertype_filter)); + /* clear the rss filter info */ + memset(&filter_info->rss_info, 0, + sizeof(struct igb_rte_flow_rss_conf)); + /* remove all ntuple filters of the device */ igb_ntuple_filter_uninit(eth_dev); @@ -5628,6 +5633,16 @@ igb_flex_filter_restore(struct rte_eth_dev *dev) } } +/* restore rss filter */ +static inline void +igb_rss_filter_restore(struct rte_eth_dev *dev) +{ + struct e1000_filter_info *filter_info = + E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private); + + igb_config_rss_filter(dev, &filter_info->rss_info, TRUE); +} + /* restore all types filter */ static int igb_filter_restore(struct rte_eth_dev *dev) @@ -5636,6 +5651,7 @@ igb_filter_restore(struct rte_eth_dev *dev) igb_ethertype_filter_restore(dev); igb_syn_filter_restore(dev); igb_flex_filter_restore(dev); + igb_rss_filter_restore(dev); return 0; } diff --git a/drivers/net/e1000/igb_flow.c b/drivers/net/e1000/igb_flow.c index 22bad26..840f814 100644 --- a/drivers/net/e1000/igb_flow.c +++ b/drivers/net/e1000/igb_flow.c @@ -1295,6 +1295,101 @@ igb_parse_flex_filter(struct rte_eth_dev *dev, return 0; } +static int +igb_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_action actions[], + struct igb_rte_flow_rss_conf *rss_conf, + struct rte_flow_error *error) +{ + const struct rte_flow_action *act; + const struct rte_flow_action_rss *rss; + uint16_t n, index; + + /** +* rss only supports forwarding, +* check if the first not void action is RSS. +*/ + index = 0; + NEXT_ITEM_OF_ACTION(act, actions, index); + if (act->type != RTE_FLOW_ACTION_TYP
[dpdk-dev] [PATCH 0/5] net/mlx5: cleanups
This series apply on top of patch 31485. Nelio Laranjeiro (5): net/mlx5: remove get priv internal function net/mlx5: fix secondary process verification net/mlx5: removes 32bits support net/mlx5: move variable declaration net/mlx5: remove redundant inline variable drivers/net/mlx5/mlx5.c | 6 ++ drivers/net/mlx5/mlx5_ethdev.c | 46 +++-- drivers/net/mlx5/mlx5_mac.c | 6 -- drivers/net/mlx5/mlx5_rss.c | 1 - drivers/net/mlx5/mlx5_rxmode.c | 8 --- drivers/net/mlx5/mlx5_rxq.c | 10 ++--- drivers/net/mlx5/mlx5_rxtx.c| 31 +-- drivers/net/mlx5/mlx5_rxtx.h| 1 - drivers/net/mlx5/mlx5_stats.c | 8 +++ drivers/net/mlx5/mlx5_trigger.c | 6 -- drivers/net/mlx5/mlx5_txq.c | 9 +--- 11 files changed, 26 insertions(+), 106 deletions(-) -- 2.11.0
[dpdk-dev] [PATCH 2/5] net/mlx5: fix secondary process verification
Since the secondary process has its own devops, function which cannot be called by the secondary don't need anymore to verify which process is calling it. Fixes: 87ec44ce1651 ("net/mlx5: add operations for secondary process") Cc: xuemi...@mellanox.com Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5.c | 4 +--- drivers/net/mlx5/mlx5_ethdev.c | 25 - drivers/net/mlx5/mlx5_mac.c | 6 -- drivers/net/mlx5/mlx5_rss.c | 1 - drivers/net/mlx5/mlx5_rxmode.c | 8 drivers/net/mlx5/mlx5_rxq.c | 6 -- drivers/net/mlx5/mlx5_trigger.c | 6 -- drivers/net/mlx5/mlx5_txq.c | 6 -- 8 files changed, 1 insertion(+), 61 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 9d0f5f069..cd66fe162 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -158,7 +158,6 @@ mlx5_alloc_verbs_buf(size_t size, void *data) size_t alignment = sysconf(_SC_PAGESIZE); assert(data != NULL); - assert(!mlx5_is_secondary()); ret = rte_malloc_socket(__func__, size, alignment, priv->dev->device->numa_node); DEBUG("Extern alloc size: %lu, align: %lu: %p", size, alignment, ret); @@ -177,7 +176,6 @@ static void mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) { assert(data != NULL); - assert(!mlx5_is_secondary()); DEBUG("Extern free request: %p", ptr); rte_free(ptr); } @@ -687,7 +685,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) mlx5_dev[idx].ports |= test; - if (mlx5_is_secondary()) { + if (rte_eal_process_type() == RTE_PROC_SECONDARY) { /* from rte_ethdev.c */ char name[RTE_ETH_NAME_MAX_LEN]; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 75be352cf..ca9ad0fef 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -119,18 +119,6 @@ struct ethtool_link_settings { #endif /** - * Check if running as a secondary process. - * - * @return - * Nonzero if running as a secondary process. - */ -inline int -mlx5_is_secondary(void) -{ - return rte_eal_process_type() == RTE_PROC_SECONDARY; -} - -/** * Get interface name from private structure. * * @param[in] priv @@ -634,9 +622,6 @@ mlx5_dev_configure(struct rte_eth_dev *dev) struct priv *priv = dev->data->dev_private; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); ret = dev_configure(dev); assert(ret >= 0); @@ -937,9 +922,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) uint16_t kern_mtu; int ret = 0; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); ret = priv_get_mtu(priv, &kern_mtu); if (ret) @@ -987,9 +969,6 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) }; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - ifr.ifr_data = (void *)ðpause; priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { @@ -1038,9 +1017,6 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) }; int ret; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - ifr.ifr_data = (void *)ðpause; ethpause.autoneg = fc_conf->autoneg; if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || @@ -1302,7 +1278,6 @@ priv_dev_interrupt_handler_install(struct priv *priv, struct rte_eth_dev *dev) { int rc, flags; - assert(!mlx5_is_secondary()); assert(priv->ctx->async_fd > 0); flags = fcntl(priv->ctx->async_fd, F_GETFL); rc = fcntl(priv->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index d17b991e4..9fb5ba5e7 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -93,8 +93,6 @@ priv_get_mac(struct priv *priv, uint8_t (*mac)[ETHER_ADDR_LEN]) void mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) { - if (mlx5_is_secondary()) - return; assert(index < MLX5_MAX_MAC_ADDRESSES); memset(&dev->data->mac_addrs[index], 0, sizeof(struct ether_addr)); if (!dev->data->promiscuous && !dev->data->all_multicast) @@ -124,8 +122,6 @@ mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, int ret = 0; (void)vmdq; - if (mlx5_is_secondary()) - return 0; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { @@ -154,8 +150,6 @@ mlx5_mac_
[dpdk-dev] [PATCH 3/5] net/mlx5: removes 32bits support
naddr variable was introduced in commit 9a7fa9f76d9e ("net/mlx5: use vector types to speed up processing") to avoid compilation errors on 32bits compilation, as x86_32 is no more supported by rdma-core nor by MLNX_OFED, this variable becomes useless and can be safely removed. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_rxtx.c | 20 +--- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 32bfa307c..932470602 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -370,7 +370,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int ds = 0; unsigned int sg = 0; /* counter of additional segs attached. */ uintptr_t addr; - uint64_t naddr; uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2; uint16_t tso_header_sz = 0; uint16_t ehdr; @@ -594,12 +593,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) ds = 3; use_dseg: /* Add the remaining packet as a simple ds. */ - naddr = rte_cpu_to_be_64(addr); + addr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - naddr, - naddr >> 32, + addr, + addr >> 32, }; ++ds; if (!segs_n) @@ -633,12 +632,12 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) total_length += length; #endif /* Store segment information. */ - naddr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); + addr = rte_cpu_to_be_64(rte_pktmbuf_mtod(buf, uintptr_t)); *dseg = (rte_v128u32_t){ rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - naddr, - naddr >> 32, + addr, + addr >> 32, }; (*txq->elts)[++elts_head & elts_m] = buf; ++sg; @@ -1339,7 +1338,6 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) do { struct rte_mbuf *buf = *(pkts++); uintptr_t addr; - uint64_t naddr; unsigned int n; unsigned int do_inline = 0; /* Whether inline is possible. */ uint32_t length; @@ -1521,12 +1519,12 @@ mlx5_tx_burst_empw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) for (n = 0; n * RTE_CACHE_LINE_SIZE < length; n++) rte_prefetch2((void *)(addr + n * RTE_CACHE_LINE_SIZE)); - naddr = rte_cpu_to_be_64(addr); + addr = rte_cpu_to_be_64(addr); *dseg = (rte_v128u32_t) { rte_cpu_to_be_32(length), mlx5_tx_mb2mr(txq, buf), - naddr, - naddr >> 32, + addr, + addr >> 32, }; mpw.data.raw = (volatile void *)(dseg + 1); mpw.total_len += (inl_pad + sizeof(*dseg)); -- 2.11.0
[dpdk-dev] [PATCH 1/5] net/mlx5: remove get priv internal function
mlx5_get_priv() is barely use across the driver. To avoid mixing access, this function is definitely removed. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5.c| 2 +- drivers/net/mlx5/mlx5_ethdev.c | 21 +++-- drivers/net/mlx5/mlx5_rxq.c| 4 ++-- drivers/net/mlx5/mlx5_stats.c | 8 4 files changed, 10 insertions(+), 25 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index a5eb3fdc5..9d0f5f069 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -193,7 +193,7 @@ mlx5_free_verbs_buf(void *ptr, void *data __rte_unused) static void mlx5_dev_close(struct rte_eth_dev *dev) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; unsigned int i; int ret; diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index a3cef6891..75be352cf 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -119,21 +119,6 @@ struct ethtool_link_settings { #endif /** - * Return private structure associated with an Ethernet device. - * - * @param dev - * Pointer to Ethernet device structure. - * - * @return - * Pointer to private structure. - */ -struct priv * -mlx5_get_priv(struct rte_eth_dev *dev) -{ - return dev->data->dev_private; -} - -/** * Check if running as a secondary process. * * @return @@ -670,7 +655,7 @@ mlx5_dev_configure(struct rte_eth_dev *dev) void mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; unsigned int max; char ifname[IF_NAMESIZE]; @@ -761,7 +746,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) static int mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { .cmd = ETHTOOL_GSET /* Deprecated since Linux v4.5. */ }; @@ -827,7 +812,7 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) static int mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; struct ifreq ifr; struct rte_eth_link dev_link; diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 85399eff5..a6eec2fa6 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -460,7 +460,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; @@ -504,7 +504,7 @@ mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_ibv *rxq_ibv = NULL; diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index 5e225d374..e689b0748 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -321,7 +321,7 @@ priv_xstats_reset(struct priv *priv) int mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct rte_eth_stats tmp = {0}; unsigned int i; unsigned int idx; @@ -427,7 +427,7 @@ int mlx5_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *stats, unsigned int n) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; int ret = xstats_n; if (n >= xstats_n && stats) { @@ -457,7 +457,7 @@ mlx5_xstats_get(struct rte_eth_dev *dev, void mlx5_xstats_reset(struct rte_eth_dev *dev) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_xstats_ctrl *xstats_ctrl = &priv->xstats_ctrl; int stats_n; @@ -489,7 +489,7 @@ int mlx5_xstats_get_names(struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, unsigned int n) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; unsigned int i; if (n >= xstats_n && xstats_names) { -- 2.11.0
[dpdk-dev] [PATCH 4/5] net/mlx5: move variable declaration
Most of the variable in mlx5_tx_burst() are defined too soon. This commit moves them their uses C block of code. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_rxtx.c | 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 932470602..d735e646c 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -344,15 +344,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int j = 0; unsigned int k = 0; uint16_t max_elts; - unsigned int max_inline = txq->max_inline; - const unsigned int inline_en = !!max_inline && txq->inline_en; uint16_t max_wqe; unsigned int comp; - volatile struct mlx5_wqe_v *wqe = NULL; volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; - struct rte_mbuf *buf = NULL; - uint8_t *raw; if (unlikely(!pkts_n)) return 0; @@ -365,6 +360,11 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!max_wqe)) return 0; do { + unsigned int max_inline = txq->max_inline; + const unsigned int inline_en = !!max_inline && txq->inline_en; + struct rte_mbuf *buf = NULL; + uint8_t *raw; + volatile struct mlx5_wqe_v *wqe = NULL; volatile rte_v128u32_t *dseg = NULL; uint32_t length; unsigned int ds = 0; -- 2.11.0
[dpdk-dev] [PATCH 5/5] net/mlx5: remove redundant inline variable
A non max_inline 0 means an inline is requested, there is no need to duplicate this information. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- drivers/net/mlx5/mlx5_rxtx.c | 5 ++--- drivers/net/mlx5/mlx5_rxtx.h | 1 - drivers/net/mlx5/mlx5_txq.c | 3 +-- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index d735e646c..28c0ad8ab 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -348,6 +348,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int comp; volatile struct mlx5_wqe_ctrl *last_wqe = NULL; unsigned int segs_n = 0; + const unsigned int max_inline = txq->max_inline; if (unlikely(!pkts_n)) return 0; @@ -360,8 +361,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) if (unlikely(!max_wqe)) return 0; do { - unsigned int max_inline = txq->max_inline; - const unsigned int inline_en = !!max_inline && txq->inline_en; struct rte_mbuf *buf = NULL; uint8_t *raw; volatile struct mlx5_wqe_v *wqe = NULL; @@ -516,7 +515,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) } } /* Inline if enough room. */ - if (inline_en || tso) { + if (max_inline || tso) { uint32_t inl; uintptr_t end = (uintptr_t) (((uintptr_t)txq->wqes) + diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 63eb12c66..b8c7925a3 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -191,7 +191,6 @@ struct mlx5_txq_data { uint16_t elts_n:4; /* (*elts)[] length (in log2). */ uint16_t cqe_n:4; /* Number of CQ elements (in log2). */ uint16_t wqe_n:4; /* Number of of WQ elements (in log2). */ - uint16_t inline_en:1; /* When set inline is enabled. */ uint16_t tso_en:1; /* When set hardware TSO is enabled. */ uint16_t tunnel_en:1; /* When set TX offload for tunneled packets are supported. */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 84d37be19..a786a6b63 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -353,7 +353,7 @@ mlx5_priv_txq_ibv_new(struct priv *priv, uint16_t idx) .pd = priv->pd, .comp_mask = IBV_QP_INIT_ATTR_PD, }; - if (txq_data->inline_en) + if (txq_data->max_inline) attr.init.cap.max_inline_data = txq_ctrl->max_inline_data; if (txq_data->tso_en) { attr.init.max_tso_header = txq_ctrl->max_tso_header; @@ -589,7 +589,6 @@ mlx5_priv_txq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->txq.max_inline = ((priv->txq_inline + (RTE_CACHE_LINE_SIZE - 1)) / RTE_CACHE_LINE_SIZE); - tmpl->txq.inline_en = 1; /* TSO and MPS can't be enabled concurrently. */ assert(!priv->tso || !priv->mps); if (priv->mps == MLX5_MPW_ENHANCED) { -- 2.11.0
[dpdk-dev] [PATCH v2] net/mlx5: remove parser/flow drop queue
This drop queue can be handled efficiently by using the drop flag in the context. Signed-off-by: Nelio Laranjeiro Acked-by: Yongseok Koh --- Changes in v2: * remove also the drop queue in rte_flow structure to avoid a segfault. --- drivers/net/mlx5/mlx5_flow.c | 178 ++- 1 file changed, 56 insertions(+), 122 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f32dfdd3f..1eda83671 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -250,11 +250,8 @@ struct rte_flow { uint8_t rss_key[40]; /**< copy of the RSS key. */ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ struct mlx5_flow_counter_stats counter_stats;/**drop) { - parser->drop_q.offset = sizeof(struct ibv_flow_attr); - } else { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset = sizeof(struct ibv_flow_attr); - } + for (i = 0; i != hash_rxq_init_n; ++i) + parser->queue[i].offset = sizeof(struct ibv_flow_attr); for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { const struct mlx5_flow_items *token = NULL; unsigned int n; @@ -868,9 +853,7 @@ priv_flow_convert_items_validate(struct priv *priv, } parser->inner = IBV_FLOW_SPEC_INNER; } - if (parser->drop) { - parser->drop_q.offset += cur_item->dst_sz; - } else if (parser->queues_n == 1) { + if (parser->drop || parser->queues_n == 1) { parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; } else { for (n = 0; n != hash_rxq_init_n; ++n) @@ -885,12 +868,8 @@ priv_flow_convert_items_validate(struct priv *priv, if (parser->count) { unsigned int size = sizeof(struct ibv_flow_spec_counter_action); - if (parser->drop) { - parser->drop_q.offset += size; - } else { - for (i = 0; i != hash_rxq_init_n; ++i) - parser->queue[i].offset += size; - } + for (i = 0; i != hash_rxq_init_n; ++i) + parser->queue[i].offset += size; } return 0; exit_item_not_supported: @@ -1102,15 +1081,7 @@ priv_flow_convert(struct priv *priv, * Second step. * Allocate the memory space to store verbs specifications. */ - if (parser->drop) { - parser->drop_q.ibv_attr = - priv_flow_convert_allocate(priv, attr->priority, - parser->drop_q.offset, - error); - if (!parser->drop_q.ibv_attr) - return ENOMEM; - parser->drop_q.offset = sizeof(struct ibv_flow_attr); - } else if (parser->queues_n == 1) { + if (parser->drop || parser->queues_n == 1) { unsigned int priority = attr->priority + hash_rxq_init[HASH_RXQ_ETH].flow_priority; @@ -1172,15 +1143,7 @@ priv_flow_convert(struct priv *priv, * Last step. Complete missing specification to reach the RSS * configuration. */ - if (parser->drop) { - /* -* Drop queue priority needs to be adjusted to -* their most specific layer priority. -*/ - parser->drop_q.ibv_attr->priority = - attr->priority + - hash_rxq_init[parser->layer].flow_priority; - } else if (parser->queues_n > 1) { + if (parser->queues_n > 1) { priv_flow_convert_finalise(priv, parser); } else { /* @@ -1195,10 +1158,6 @@ priv_flow_convert(struct priv *priv, exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { - if (parser->drop) { - rte_free(parser->drop_q.ibv_attr); - parser->drop_q.ibv_attr = NULL; - } for (i = 0; i != hash_rxq_init_n; ++i) { if (parser->queue[i].ibv_attr) { rte_free(parser->queue[i].ibv_attr); @@ -1240,14 +1199,6 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, unsigned int i; void *dst; - if (parser->drop) { - dst = (void *)((uintptr_t)parser->drop_q.ibv_attr + - parser->drop_q.offset); - memcpy(dst, src, size); - ++parser->drop_q.ibv_attr->num_of_specs; - parser->drop_q.offset += size; - return; - }
Re: [dpdk-dev] [PATCH 0/2] ethdev: add GENEVE to flow API
Hi Andrew, On Mon, Nov 20, 2017 at 08:21:59AM +, Andrew Rybchenko wrote: > enum rte_flow_item_type states that items matching protocol headers > must be stacked in the same order as the protocol layers to match. > As the result the patch changes ABI since Geneve is added just after > VXLAN (the closest protocol). > > In fact as far as I can see many items do not follow the requirement > already. May be the comment/requirement should be removed and GENEVE > should be added at the end of the list. If so, should be keep it just > after VXLAN in all other places or move after ESP as well? Perhaps documentation is unclear, this requirement only applies to applications when constructing patterns out of those items (e.g. to make sense, TCP is supposed to come after IPv4, not before). New item/action definitions must obviously be added at the end of both lists to avoid ABI breakage, there is no specific order to follow other than that. What may have confused you is most of them are apparently ordered by protocol layer, that's because those are here from day one; it's not the case anymore starting with E_TAG, which was added much later. Besides addressing the ABI breakage, I don't see any issue with adding GENEVE to rte_flow, I only have a few more comments on subsequent patches in the series. Otherwise good job, looks like you didn't miss anything. -- Adrien Mazarguil 6WIND
Re: [dpdk-dev] [PATCH 1/2] ethdev: add GENEVE flow pattern item
On Mon, Nov 20, 2017 at 08:22:00AM +, Andrew Rybchenko wrote: > From: Roman Zhukov > > Add new pattern item RTE_FLOW_ITEM_TYPE_GENEVE in flow API. > This commit also adds default mask for these item. > > Signed-off-by: Roman Zhukov > Signed-off-by: Andrew Rybchenko OK, the main issue in this patch is you're inserting GENEVE in the middle of everything (enums, documentation, etc). Just append it to the enum to avoid ABI breakage as described in my previous message and use the same position everywhere else for consistency. It must appear after RTE_FLOW_ITEM_TYPE_ESP. More comments below. > --- > doc/guides/prog_guide/rte_flow.rst | 12 > lib/librte_ether/rte_flow.c| 1 + > lib/librte_ether/rte_flow.h| 31 +++ > 3 files changed, 44 insertions(+) > > diff --git a/doc/guides/prog_guide/rte_flow.rst > b/doc/guides/prog_guide/rte_flow.rst > index d158be5..2f96623 100644 > --- a/doc/guides/prog_guide/rte_flow.rst > +++ b/doc/guides/prog_guide/rte_flow.rst > @@ -863,6 +863,18 @@ Matches a VXLAN header (RFC 7348). > - ``rsvd1``: reserved, normally 0x00. > - Default ``mask`` matches VNI only. > > +Item: ``GENEVE`` > +^^^ > + > +Matches a GENEVE header. > + > +- ``ver_opt_len_o_c_rsvd0``: version (2b), length of the options fields (6b), > + OAM packet (1b), critical options present (1b), reserved 0 (6b). > +- ``protocol``: protocol type. > +- ``vni``: virtual network identifier. > +- ``rsvd1``: reserved, normally 0x00. > +- Default ``mask`` matches protocol type and VNI. > + I'm not sure about the default mask. It should be the least common denominator, not necessarily what the first implementation supports. How about making it like VXLAN, i.e. VNI only? Does that make sense at all? > Item: ``E_TAG`` > ^^^ > > diff --git a/lib/librte_ether/rte_flow.c b/lib/librte_ether/rte_flow.c > index 6659063..bf1b253 100644 > --- a/lib/librte_ether/rte_flow.c > +++ b/lib/librte_ether/rte_flow.c > @@ -77,6 +77,7 @@ static const struct rte_flow_desc_data rte_flow_desc_item[] > = { > MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), > MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), > MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), > + MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), You should add it at the end but some items are already missing from that list. Since I plan to send an overhaul for this function, you can leave this change out for the time being. > MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), > MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)), > MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), > diff --git a/lib/librte_ether/rte_flow.h b/lib/librte_ether/rte_flow.h > index 47c88ea..29d81d4 100644 > --- a/lib/librte_ether/rte_flow.h > +++ b/lib/librte_ether/rte_flow.h > @@ -272,6 +272,13 @@ enum rte_flow_item_type { > RTE_FLOW_ITEM_TYPE_VXLAN, > > /** > + * Matches a GENEVE header. > + * > + * See struct rte_flow_item_geneve. > + */ > + RTE_FLOW_ITEM_TYPE_GENEVE, > + > + /** >* Matches a E_TAG header. >* >* See struct rte_flow_item_e_tag. > @@ -651,6 +658,30 @@ static const struct rte_flow_item_vxlan > rte_flow_item_vxlan_mask = { > #endif > > /** > + * RTE_FLOW_ITEM_TYPE_GENEVE. > + * > + * Matches a GENEVE header. > + */ > +struct rte_flow_item_geneve { > + /** > + * Version (2b), length of the options fields (6b), OAM packet (1b), > + * critical options present (1b), reserved 0 (6b). > + */ > + rte_be16_t ver_opt_len_o_c_rsvd0; > + rte_be16_t protocol; /**< Protocol type. */ > + uint8_t vni[3]; /**< Virtual Network Identifier. */ > + uint8_t rsvd1; /**< Reserved, normally 0x00. */ > +}; > + > +/** Default mask for RTE_FLOW_ITEM_TYPE_GENEVE. */ > +#ifndef __cplusplus > +static const struct rte_flow_item_geneve rte_flow_item_geneve_mask = { > + .protocol = RTE_BE16(0x), > + .vni = "\xff\xff\xff", > +}; > +#endif So how about removing .protocol from the default mask? > + > +/** > * RTE_FLOW_ITEM_TYPE_E_TAG. > * > * Matches a E-tag header. > -- > 2.7.4 > -- Adrien Mazarguil 6WIND
Re: [dpdk-dev] [PATCH 2/2] app/testpmd: support GENEVE pattern item in flow rules
On Mon, Nov 20, 2017 at 08:22:01AM +, Andrew Rybchenko wrote: > From: Roman Zhukov > > Add the ability to match a VNI field of GENEVE protocol header. > > Signed-off-by: Roman Zhukov > Signed-off-by: Andrew Rybchenko Same comments as previously basically, keep the same order as rte_flow.h after fixing the ABI breakage. One minor comment below. > --- > app/test-pmd/cmdline_flow.c | 31 > + > app/test-pmd/config.c | 1 + > doc/guides/testpmd_app_ug/testpmd_funcs.rst | 5 + > 3 files changed, 37 insertions(+) > > diff --git a/app/test-pmd/cmdline_flow.c b/app/test-pmd/cmdline_flow.c > index df16d2a..cee99f3 100644 > --- a/app/test-pmd/cmdline_flow.c > +++ b/app/test-pmd/cmdline_flow.c > @@ -161,6 +161,9 @@ enum index { > ITEM_SCTP_CKSUM, > ITEM_VXLAN, > ITEM_VXLAN_VNI, > + ITEM_GENEVE, > + ITEM_GENEVE_VNI, > + ITEM_GENEVE_PROTO, > ITEM_E_TAG, > ITEM_E_TAG_GRP_ECID_B, > ITEM_NVGRE, > @@ -452,6 +455,7 @@ static const enum index next_item[] = { > ITEM_TCP, > ITEM_SCTP, > ITEM_VXLAN, > + ITEM_GENEVE, > ITEM_E_TAG, > ITEM_NVGRE, > ITEM_MPLS, > @@ -573,6 +577,13 @@ static const enum index item_vxlan[] = { > ZERO, > }; > > +static const enum index item_geneve[] = { > + ITEM_GENEVE_VNI, > + ITEM_GENEVE_PROTO, > + ITEM_NEXT, > + ZERO, > +}; > + > static const enum index item_e_tag[] = { > ITEM_E_TAG_GRP_ECID_B, > ITEM_NEXT, > @@ -1371,6 +1382,26 @@ static const struct token token_list[] = { > .next = NEXT(item_vxlan, NEXT_ENTRY(UNSIGNED), item_param), > .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_vxlan, vni)), > }, > + [ITEM_GENEVE] = { > + .name = "geneve", > + .help = "match GENEVE header", > + .priv = PRIV_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), > + .next = NEXT(item_geneve), > + .call = parse_vc, > + }, > + [ITEM_GENEVE_VNI] = { > + .name = "vni", > + .help = "Virtual Network Identifier", How about "virtual network identifier" (all lower caps)? > + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), > + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, vni)), > + }, > + [ITEM_GENEVE_PROTO] = { > + .name = "protocol", > + .help = "GENEVE protocol type", > + .next = NEXT(item_geneve, NEXT_ENTRY(UNSIGNED), item_param), > + .args = ARGS(ARGS_ENTRY_HTON(struct rte_flow_item_geneve, > + protocol)), > + }, > [ITEM_E_TAG] = { > .name = "e_tag", > .help = "match E-Tag header", > diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c > index cd2ac11..4eda37f 100644 > --- a/app/test-pmd/config.c > +++ b/app/test-pmd/config.c > @@ -965,6 +965,7 @@ static const struct { > MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)), > MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)), > MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)), > + MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)), > MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)), > MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)), > MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)), > diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst > b/doc/guides/testpmd_app_ug/testpmd_funcs.rst > index 9789139..8c2fd12 100644 > --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst > +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst > @@ -3083,6 +3083,11 @@ This section lists supported pattern items and their > attributes, if any. > >- ``vni {unsigned}``: VXLAN identifier. > > +- ``geneve``: match GENEVE header. > + > + - ``vni {unsigned}``: virtual network identifier. > + - ``protocol {unsigned}``: protocol type. > + > - ``e_tag``: match IEEE 802.1BR E-Tag header. > >- ``grp_ecid_b {unsigned}``: GRP and E-CID base. > -- > 2.7.4 > -- Adrien Mazarguil 6WIND
[dpdk-dev] [PATCH v2 1/3] security: fix device operation type
Device operation pointers should be constant to avoid any modification while it is in use. Fixes: c261d1431bd8 ("security: introduce security API and framework") Cc: akhil.go...@nxp.com Cc: sta...@dpdk.org Signed-off-by: Nelio Laranjeiro --- lib/librte_security/rte_security.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h index aa3a471a3..679c0a696 100644 --- a/lib/librte_security/rte_security.h +++ b/lib/librte_security/rte_security.h @@ -94,7 +94,7 @@ enum rte_security_ipsec_tunnel_type { struct rte_security_ctx { void *device; /**< Crypto/ethernet device attached */ - struct rte_security_ops *ops; + const struct rte_security_ops *ops; /**< Pointer to security ops for the device */ uint16_t sess_cnt; /**< Number of sessions attached to this context */ -- 2.11.0
[dpdk-dev] [PATCH v2 2/3] crypto: fix pedantic compilation errors
/root/dpdk/x86_64-native-linuxapp-gcc/include/rte_crypto.h:126:28: error: ISO C forbids zero-size array ‘sym’ [-Werror=pedantic] struct rte_crypto_sym_op sym[0]; ^~~ Zero-size array is an extension to the language it cannot be replaced by a empty size array i.e. [] because structure is inside an union. Fixes: d2a4223c4c6d ("cryptodev: do not store pointer to op specific params") Cc: pablo.de.lara.gua...@intel.com Cc: sta...@dpdk.org Signed-off-by: Nelio Laranjeiro --- Changes in v2: replace RTE_STD_C11 by __extension__ --- lib/librte_cryptodev/rte_crypto.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/librte_cryptodev/rte_crypto.h b/lib/librte_cryptodev/rte_crypto.h index 3d672fe7d..6f0b29732 100644 --- a/lib/librte_cryptodev/rte_crypto.h +++ b/lib/librte_cryptodev/rte_crypto.h @@ -121,7 +121,7 @@ struct rte_crypto_op { rte_iova_t phys_addr; /**< physical address of crypto operation */ - RTE_STD_C11 + __extension__ union { struct rte_crypto_sym_op sym[0]; /**< Symmetric operation parameters */ -- 2.11.0
[dpdk-dev] [PATCH v2 3/3] security: fix pedantic compilation
/root/dpdk/x86_64-native-linuxapp-gcc/include/rte_security.h:229:8: error: struct has no members [-Werror=pedantic] struct rte_security_macsec_xform { ^ /root/dpdk/x86_64-native-linuxapp-gcc/include/rte_security.h:453:3: error: struct has no members [-Werror=pedantic] struct { ^~ Fixes: c261d1431bd8 ("security: introduce security API and framework") Cc: akhil.go...@nxp.com Cc: sta...@dpdk.org Signed-off-by: Nelio Laranjeiro --- lib/librte_security/rte_security.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h index 679c0a696..bcce7a189 100644 --- a/lib/librte_security/rte_security.h +++ b/lib/librte_security/rte_security.h @@ -228,6 +228,7 @@ struct rte_security_ipsec_xform { */ struct rte_security_macsec_xform { /** To be Filled */ + int dummy; }; /** @@ -471,6 +472,7 @@ struct rte_security_capability { /**< IPsec capability */ struct { /* To be Filled */ + int dummy; } macsec; /**< MACsec capability */ }; -- 2.11.0
Re: [dpdk-dev] [PATCH 0/2] ethdev: add GENEVE to flow API
Hi Adrien, On 11/23/2017 12:39 PM, Adrien Mazarguil wrote: Hi Andrew, On Mon, Nov 20, 2017 at 08:21:59AM +, Andrew Rybchenko wrote: enum rte_flow_item_type states that items matching protocol headers must be stacked in the same order as the protocol layers to match. As the result the patch changes ABI since Geneve is added just after VXLAN (the closest protocol). In fact as far as I can see many items do not follow the requirement already. May be the comment/requirement should be removed and GENEVE should be added at the end of the list. If so, should be keep it just after VXLAN in all other places or move after ESP as well? Perhaps documentation is unclear, this requirement only applies to applications when constructing patterns out of those items (e.g. to make sense, TCP is supposed to come after IPv4, not before). New item/action definitions must obviously be added at the end of both lists to avoid ABI breakage, there is no specific order to follow other than that. What may have confused you is most of them are apparently ordered by protocol layer, that's because those are here from day one; it's not the case anymore starting with E_TAG, which was added much later. Besides addressing the ABI breakage, I don't see any issue with adding GENEVE to rte_flow, I only have a few more comments on subsequent patches in the series. Otherwise good job, looks like you didn't miss anything. Many thinks for review and clarification. We'll process review notes and send v2. -- Andrew
[dpdk-dev] [PATCH 2/3] lib/librte_flow_classy: add run api for flow classification
This patch extends the flow classification library by adding run api. This function classifies the packets based on the flow rules stored in the classifier table. During lookup operation, the table entry is identified on lookup hit and based on meta-data stored at table entry, actions are performed on the current packet. The meta-information about the actions stored in the table entry is determined from the actions fields specified in flow rules. Signed-off-by: Jasvinder Singh --- lib/librte_flow_classify/rte_flow_classify.c | 56 ++ lib/librte_flow_classify/rte_flow_classify.h | 24 ++ .../rte_flow_classify_version.map | 1 + 3 files changed, 81 insertions(+) diff --git a/lib/librte_flow_classify/rte_flow_classify.c b/lib/librte_flow_classify/rte_flow_classify.c index ff1bc86..5433bfe 100644 --- a/lib/librte_flow_classify/rte_flow_classify.c +++ b/lib/librte_flow_classify/rte_flow_classify.c @@ -37,6 +37,9 @@ #include #include +#define RTE_PKT_METADATA_PTR(pkt, offset) \ + (&((uint32_t *)(pkt))[offset]) + int librte_flow_classify_logtype; static uint32_t unique_id = 1; @@ -674,6 +677,59 @@ action_apply(struct rte_flow_classifier *cls, } int +rte_flow_classifier_run(struct rte_flow_classifier *cls, + struct rte_mbuf **pkts, + const uint16_t nb_pkts, + uint32_t pkt_offset) +{ + struct rte_flow_action_mark *mark; + struct classify_action *action; + uint64_t pkts_mask = RTE_LEN2MASK(nb_pkts, uint64_t); + uint64_t action_mask; + uint32_t *ptr, i, j; + int ret = -EINVAL; + + if (!cls || !pkts || nb_pkts == 0) + return ret; + + for (i = 0; i < cls->num_tables; i++) { + if (cls->table_mask & (1LU << i)) { + struct rte_cls_table *table = &cls->tables[i]; + uint64_t lookup_hit_mask; + + ret = table->ops.f_lookup(table->h_table, + pkts, pkts_mask, &lookup_hit_mask, + (void **)cls->entries); + if (ret) + return ret; + + if (lookup_hit_mask) { + for (j = 0; j < nb_pkts; j++) { + uint64_t pkt_mask = 1LLU << j; + + if ((lookup_hit_mask & pkt_mask) == 0) + continue; + /* Meta-data */ + enum rte_flow_action_type act_type = + RTE_FLOW_ACTION_TYPE_MARK; + action = &cls->entries[j]->action; + action_mask = action->action_mask; + + if (action_mask & (1LLU << act_type)) { + mark = &action->act.mark; + ptr = RTE_PKT_METADATA_PTR( + pkts[j], pkt_offset); + *ptr = mark->id; + } + } + } + } + } + + return 0; +} + +int rte_flow_classifier_query(struct rte_flow_classifier *cls, struct rte_mbuf **pkts, const uint16_t nb_pkts, diff --git a/lib/librte_flow_classify/rte_flow_classify.h b/lib/librte_flow_classify/rte_flow_classify.h index b9b669f..b74bd11 100644 --- a/lib/librte_flow_classify/rte_flow_classify.h +++ b/lib/librte_flow_classify/rte_flow_classify.h @@ -273,6 +273,30 @@ rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls, struct rte_flow_classify_rule *rule); /** + * Flow classifier run. + * + * As a result of lookup operation, flow classifer idenfies the + * table entries that are hit and executes the actions on the packets. + * + * @param[in] cls + * Flow classifier handle + * @param[in] pkts + * Pointer to packets to process + * @param[in] nb_pkts + * Number of packets to process + * @param[in] pkt_offset + *Offset to store action metadata in the mbuf headroom + * + * @return + * 0 on success, error code otherwise. + */ +int +rte_flow_classifier_run(struct rte_flow_classifier *cls, + struct rte_mbuf **pkts, + const uint16_t nb_pkts, + uint32_t pkt_offset); + +/** * Query flow classifier for given rule. * * @param[in] cls diff --git a/lib/librte_flow_classify/rte_flow_classify_version.map b/lib/librte_flow_classify/rte_flow_classify_version.map index 49bc25c..b51cb1a 100644 --- a/lib/librte_flow_classify/rte_flow_classify_version.map +++ b/lib/librte_flow_classify/rte_flow_classify_version.map @@ -4,6 +4,7 @@ EXPERIMENTAL {
[dpdk-dev] [PATCH 1/3] lib/librte_flow_classify: remove table id parameter from apis
This patch removes table id parameter from all the flow classify apis to reduce the complexity and and does some cleanup of the code. The validate api has been exposed as public api to allows user to validate the flow before adding it to the classifier. The sample app and unit tests have been updated to accomodate the apis changes. Signed-off-by: Jasvinder Singh --- examples/flow_classify/flow_classify.c | 27 +- lib/librte_flow_classify/rte_flow_classify.c | 320 +++-- lib/librte_flow_classify/rte_flow_classify.h | 74 +++-- lib/librte_flow_classify/rte_flow_classify_parse.c | 118 lib/librte_flow_classify/rte_flow_classify_parse.h | 16 +- .../rte_flow_classify_version.map | 1 + test/test/test_flow_classify.c | 86 +++--- test/test/test_flow_classify.h | 10 +- 8 files changed, 365 insertions(+), 287 deletions(-) diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c index 766f1dd..37e6904 100644 --- a/examples/flow_classify/flow_classify.c +++ b/examples/flow_classify/flow_classify.c @@ -94,7 +94,6 @@ static const struct rte_eth_conf port_conf_default = { struct flow_classifier { struct rte_flow_classifier *cls; - uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX]; }; struct flow_classifier_acl { @@ -195,7 +194,15 @@ static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END, /* sample actions: * "actions count / end" */ -static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0}; +struct rte_flow_query_count count = { + .reset = 1, + .hits_set = 1, + .bytes_set = 1, + .hits = 0, + .bytes = 0, +}; +static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, + &count}; static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0}; static struct rte_flow_action actions[2]; @@ -274,7 +281,7 @@ lcore_main(struct flow_classifier *cls_app) int i = 0; ret = rte_flow_classify_table_entry_delete(cls_app->cls, - cls_app->table_id[0], rules[7]); + rules[7]); if (ret) printf("table_entry_delete failed [7] %d\n\n", ret); else @@ -317,7 +324,6 @@ lcore_main(struct flow_classifier *cls_app) if (rules[i]) { ret = rte_flow_classifier_query( cls_app->cls, - cls_app->table_id[0], bufs, nb_rx, rules[i], &classify_stats); if (ret) @@ -635,8 +641,8 @@ add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter, actions[1] = end_action; rule = rte_flow_classify_table_entry_add( - cls_app->cls, cls_app->table_id[0], &key_found, - &attr, pattern_ipv4_5tuple, actions, &error); + cls_app->cls, &attr, pattern_ipv4_5tuple, + actions, &key_found, &error); if (rule == NULL) { printf("table entry add failed ipv4_proto = %u\n", ipv4_proto); @@ -809,7 +815,6 @@ main(int argc, char *argv[]) cls_params.name = "flow_classifier"; cls_params.socket_id = socket_id; - cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL; cls_app->cls = rte_flow_classifier_create(&cls_params); if (cls_app->cls == NULL) { @@ -824,11 +829,11 @@ main(int argc, char *argv[]) memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs)); /* initialise table create params */ - cls_table_params.ops = &rte_table_acl_ops, - cls_table_params.arg_create = &table_acl_params, + cls_table_params.ops = &rte_table_acl_ops; + cls_table_params.arg_create = &table_acl_params; + cls_table_params.type = RTE_FLOW_CLASSIFY_TABLE_ACL_IP4_5TUPLE; - ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params, - &cls_app->table_id[0]); + ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params); if (ret) { rte_flow_classifier_free(cls_app->cls); rte_free(cls_app); diff --git a/lib/librte_flow_classify/rte_flow_classify.c b/lib/librte_flow_classify/rte_flow_classify.c index e6f4486..ff1bc86 100644 --- a/lib/librte_flow_classify/rte_flow_classify.c +++ b/lib/librte_flow_classify/rte_flow_classify.c @@ -39,16 +39,20 @@ int librte_flow_classify_logtype; -static struct rte_eth_ntuple_filter ntuple_filter; static uint32_t unique_id = 1; +enum rte_flow_classify_table_type table_type + = RTE_FLOW_CLASSIFY_TABLE_TYPE_NONE; struct rte_flow_classify_table_entry { /* meta-dat
[dpdk-dev] [PATCH 3/3] doc: update documentation for flow classify lib
Updates the documentation for flow classification library and sample application. Signed-off-by: Jasvinder Singh --- doc/guides/prog_guide/flow_classify_lib.rst | 79 + doc/guides/sample_app_ug/flow_classify.rst | 8 +-- 2 files changed, 49 insertions(+), 38 deletions(-) diff --git a/doc/guides/prog_guide/flow_classify_lib.rst b/doc/guides/prog_guide/flow_classify_lib.rst index 820dc72..519117c 100644 --- a/doc/guides/prog_guide/flow_classify_lib.rst +++ b/doc/guides/prog_guide/flow_classify_lib.rst @@ -101,30 +101,26 @@ The library has the following API's * Handle to flow classifier instance * @param params * Parameters for flow_classify table creation - * @param table_id - * Table ID. Valid only within the scope of table IDs of the current - * classifier. Only returned after a successful invocation. * @return * 0 on success, error code otherwise */ int rte_flow_classify_table_create(struct rte_flow_classifier *cls, - struct rte_flow_classify_table_params *params, - uint32_t *table_id); + struct rte_flow_classify_table_params *params); /** * Add a flow classify rule to the flow_classifier table. * * @param[in] cls * Flow classifier handle - * @param[in] table_id - * id of table * @param[in] attr * Flow rule attributes * @param[in] pattern * Pattern specification (list terminated by the END pattern item). * @param[in] actions * Associated actions (list terminated by the END pattern item). + * @param[out] key_found + * returns 1 if rule present already, 0 otherwise. * @param[out] error * Perform verbose error reporting if not NULL. Structure * initialised in case of error only. @@ -133,10 +129,10 @@ The library has the following API's */ struct rte_flow_classify_rule * rte_flow_classify_table_entry_add(struct rte_flow_classifier *cls, -uint32_t table_id, const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], +int *key_found; struct rte_flow_error *error); /** @@ -144,8 +140,6 @@ The library has the following API's * * @param[in] cls * Flow classifier handle - * @param[in] table_id - * id of table * @param[in] rule * Flow classify rule * @return @@ -153,16 +147,37 @@ The library has the following API's */ int rte_flow_classify_table_entry_delete(struct rte_flow_classifier *cls, -uint32_t table_id, struct rte_flow_classify_rule *rule); /** + * Flow classifier run. + * + * As a result of lookup operation, flow classifer idenfies the + * table entries that are hit and executes the actions on the packets. + * + * @param[in] cls + * Flow classifier handle + * @param[in] pkts + * Pointer to packets to process + * @param[in] nb_pkts + * Number of packets to process + * @param[in] pkt_offset + *Offset to store action metadata in the mbuf headroom + * + * @return + * 0 on success, error code otherwise. + */ + int + rte_flow_classifier_run(struct rte_flow_classifier *cls, + struct rte_mbuf **pkts, + const uint16_t nb_pkts, + uint32_t pkt_offset); + +/** * Query flow classifier for given rule. * * @param[in] cls * Flow classifier handle - * @param[in] table_id - * id of table * @param[in] pkts * Pointer to packets to process * @param[in] nb_pkts @@ -177,7 +192,6 @@ The library has the following API's */ int rte_flow_classifier_query(struct rte_flow_classifier *cls, -uint32_t table_id, struct rte_mbuf **pkts, const uint16_t nb_pkts, struct rte_flow_classify_rule *rule, @@ -200,16 +214,13 @@ application before calling the API. /** CPU socket ID where memory for the flow classifier and its */ /** elements (tables) should be allocated */ int socket_id; - -/** Table type */ -enum rte_flow_classify_table_type type; }; The ``Classifier`` has the following internal structures: .. code-block:: c -struct rte_table { +struct rte_cls_table { /* Input parameters */ struct rte_table_ops ops; uint32_t entry_size; @@ -225,11 +236,16 @@ The ``Classifier`` has the following internal structures: /* Input parameters */ char name[RTE_FLOW_CLASSIFIER_MAX_NAME_SZ]; int socket_id; -enum rte_flow_classify_table_type type; -/* Internal tables */ -struct rte_table tables[RTE_FLOW_CLASSIFY_TABLE_MAX]; +/* Internal */ +/* ntuple_fliter */ +st
[dpdk-dev] [PATCH v3 2/2] examples/ipsec-secgw: add support for inline protocol
Adding support for inline protocol processing In ingress side, application will receive regular IP packets, without any IPsec related info. Application will do a selector check (SP-SA check) by making use of the metadata from the packet. In egress side, the plain packet would be submitted to the driver. The packet will have optional metadata, which could be used to identify the security session associated with the packet. Signed-off-by: Anoob Joseph --- v3: * Using (void *)userdata instead of 64 bit metadata in conf * Changes parallel to the change in API v2: * Using get_pkt_metadata API instead of get_session & get_cookie APIs examples/ipsec-secgw/esp.c | 6 +- examples/ipsec-secgw/ipsec-secgw.c | 40 +++- examples/ipsec-secgw/ipsec.c | 121 +++-- 3 files changed, 145 insertions(+), 22 deletions(-) diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index c3efe52..561f873 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -178,7 +178,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); - if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) cop->status = RTE_CRYPTO_OP_STATUS_ERROR; @@ -474,7 +475,8 @@ esp_outbound_post(struct rte_mbuf *m, RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if ((sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL) || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)) { m->ol_flags |= PKT_TX_SEC_OFFLOAD; } else { RTE_ASSERT(cop != NULL); diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index c98454a..c79e1c2 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -265,6 +265,38 @@ prepare_one_packet(struct rte_mbuf *pkt, struct ipsec_traffic *t) RTE_LOG(ERR, IPSEC, "Unsupported packet type\n"); rte_pktmbuf_free(pkt); } + + /* Check if the packet has been processed inline. For inline protocol +* processed packets, metadata from the packet need to be obtained. +* This metadata will be the application registered "userdata" of the +* security session which processed the packet. +*/ + + if (pkt->ol_flags & PKT_RX_SEC_OFFLOAD) { + struct ipsec_sa *sa; + struct ipsec_mbuf_metadata *priv; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + pkt->port); + + /* Get metadata from the packet. This will return application +* registered userdata of the security session which processed +* the packet. Here, the userdata registered is the SA pointer. +*/ + sa = (struct ipsec_sa *)rte_security_get_pkt_metadata(ctx, pkt); + + if (sa == NULL) { + /* userdata could not be retrieved */ + return; + } + + /* Save SA as priv member in mbuf. This will be used in the +* IPsec selector(SP-SA) check. +*/ + + priv = get_priv(pkt); + priv->sa = sa; + } } static inline void @@ -401,11 +433,17 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip, ip->pkts[j++] = m; continue; } - if (res & DISCARD || i < lim) { + if (res & DISCARD) { rte_pktmbuf_free(m); continue; } + /* Only check SPI match for processed IPSec packets */ + if (i < lim && ((m->ol_flags & PKT_RX_SEC_OFFLOAD) == 0)) { + rte_pktmbuf_free(m); + continue; + } + sa_idx = ip->res[i] & PROTECT_MASK; if (sa_idx == 0 || !inbound_sa_check(sa, m, sa_idx)) { rte_pktmbuf_free(m); diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 70ed227..3ad3692 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -46,6 +46,27 @@ #include "ipsec.h" #include "esp.h" +static inline void +set_ipsec_conf(struct ipsec_sa *sa, struct rte_security_ipsec_xform *ipsec) +{ + if (ipsec->mode == RTE_SECU
[dpdk-dev] [PATCH v3 0/2] add inline protocol support
The series adds inline protocol support in ipsec-secgw application. First patch introduces changes in lib to enable applications to save a (void *) pointer as "userdata" in security session. For inline processed packets, application could call "rte_security_get_pkt_metadata" API to retrieve this application registered userdata from the packet. API will return the userdata associated with the security session which processed the packet. This is primarily required for inline protocol processed ingress packets. For such packets, the packet may not have enough information to identify the security parameters with which the packet was processed. Application can register what it needs to identify the required parameter. The userdata will be set while creating the security session. Second patch adds support for inline protocol in ipsec-secgw application. Anoob Joseph (2): lib/security: add support for get metadata examples/ipsec-secgw: add support for inline protocol examples/ipsec-secgw/esp.c| 6 +- examples/ipsec-secgw/ipsec-secgw.c| 40 +- examples/ipsec-secgw/ipsec.c | 121 +- lib/librte_security/rte_security.c| 13 lib/librte_security/rte_security.h| 19 + lib/librte_security/rte_security_driver.h | 16 6 files changed, 193 insertions(+), 22 deletions(-) -- 2.7.4
[dpdk-dev] [PATCH v3 1/2] lib/security: add support for get metadata
In case of inline protocol processed ingress traffic, the packet may not have enough information to determine the security parameters with which the packet was processed. In such cases, application could get metadata from the packet which could be used to identify the security parameters with which the packet was processed. Signed-off-by: Anoob Joseph --- v3: * Replaced 64 bit metadata in conf with (void *)userdata * The API(rte_security_get_pkt_metadata) would return void * instead of uint64_t v2: * Replaced get_session and get_cookie APIs with get_pkt_metadata API lib/librte_security/rte_security.c| 13 + lib/librte_security/rte_security.h| 19 +++ lib/librte_security/rte_security_driver.h | 16 3 files changed, 48 insertions(+) diff --git a/lib/librte_security/rte_security.c b/lib/librte_security/rte_security.c index 1227fca..a1d78b6 100644 --- a/lib/librte_security/rte_security.c +++ b/lib/librte_security/rte_security.c @@ -108,6 +108,19 @@ rte_security_set_pkt_metadata(struct rte_security_ctx *instance, sess, m, params); } +void * +rte_security_get_pkt_metadata(struct rte_security_ctx *instance, + struct rte_mbuf *pkt) +{ + void *md = NULL; + + RTE_FUNC_PTR_OR_ERR_RET(*instance->ops->get_pkt_metadata, NULL); + if (instance->ops->get_pkt_metadata(instance->device, pkt, &md)) + return NULL; + + return md; +} + const struct rte_security_capability * rte_security_capabilities_get(struct rte_security_ctx *instance) { diff --git a/lib/librte_security/rte_security.h b/lib/librte_security/rte_security.h index 653929b..35c306a 100644 --- a/lib/librte_security/rte_security.h +++ b/lib/librte_security/rte_security.h @@ -274,6 +274,8 @@ struct rte_security_session_conf { /**< Configuration parameters for security session */ struct rte_crypto_sym_xform *crypto_xform; /**< Security Session Crypto Transformations */ + void *userdata; + /**< Application specific metadata */ }; struct rte_security_session { @@ -346,6 +348,23 @@ rte_security_set_pkt_metadata(struct rte_security_ctx *instance, struct rte_mbuf *mb, void *params); /** + * Get metadata from the packet. This returns metadata associated with the + * security session which processed the packet. + * + * This is valid only for inline processed ingress packets. + * + * @param instance security instance + * @param pktpacket mbuf + * + * @return + * - On success, metadata + * - On failure, NULL + */ +void * +rte_security_get_pkt_metadata(struct rte_security_ctx *instance, + struct rte_mbuf *pkt); + +/** * Attach a session to a symmetric crypto operation * * @param sym_op crypto operation diff --git a/lib/librte_security/rte_security_driver.h b/lib/librte_security/rte_security_driver.h index 997fbe7..561ae83 100644 --- a/lib/librte_security/rte_security_driver.h +++ b/lib/librte_security/rte_security_driver.h @@ -122,6 +122,20 @@ typedef int (*security_set_pkt_metadata_t)(void *device, void *params); /** + * Get metadata from the packet. + * + * @param device Crypto/eth device pointer + * @param pkt Packet mbuf + * @param mt Pointer to receive metadata + * + * @return + * - Returns 0 if metadata is retrieved successfully. + * - Returns -ve value for errors. + */ +typedef int (*security_get_pkt_metadata_t)(void *device, + struct rte_mbuf *pkt, void **md); + +/** * Get security capabilities of the device. * * @param device crypto/eth device pointer @@ -145,6 +159,8 @@ struct rte_security_ops { /**< Clear a security sessions private data. */ security_set_pkt_metadata_t set_pkt_metadata; /**< Update mbuf metadata. */ + security_get_pkt_metadata_t get_pkt_metadata; + /**< Get metadata from packet. */ security_capabilities_get_t capabilities_get; /**< Get security capabilities. */ }; -- 2.7.4
[dpdk-dev] [PATCH 0/6] convert mlx PMDs to new ethdev offloads API
This series is to convert mlx4 and mlx5 PMDs to the new offloads API [1]. [1] http://dpdk.org/ml/archives/dev/2017-October/077329.html Shahaf Shuler (6): net/mlx5: store PMD args in private structure net/mlx5: convert to new Tx offloads API net/mlx5: convert to new Rx offloads API net/mlx5: fix VLAN configuration after port stop net/mlx4: convert to new Tx offloads API net/mlx4: convert to new Rx offloads API doc/guides/nics/mlx5.rst | 12 +-- drivers/net/mlx4/mlx4.c | 20 drivers/net/mlx4/mlx4_ethdev.c | 17 +--- drivers/net/mlx4/mlx4_flow.c | 5 +- drivers/net/mlx4/mlx4_rxq.c | 78 ++- drivers/net/mlx4/mlx4_rxtx.h | 3 + drivers/net/mlx4/mlx4_txq.c | 66 - drivers/net/mlx5/mlx5.c | 176 ++ drivers/net/mlx5/mlx5.h | 24 +++-- drivers/net/mlx5/mlx5_ethdev.c | 55 ++- drivers/net/mlx5/mlx5_rxq.c | 108 + drivers/net/mlx5/mlx5_rxtx.h | 6 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 30 -- drivers/net/mlx5/mlx5_txq.c | 95 +++--- drivers/net/mlx5/mlx5_vlan.c | 8 +- 15 files changed, 517 insertions(+), 186 deletions(-) -- 2.12.0
[dpdk-dev] [PATCH 1/6] net/mlx5: store PMD args in private structure
The PMD has several specific parameters set by the application. The current implementation parse those args as part of the probe stage, however the args value as set by the application are not stored in any place. This patch stores the parameters set by the application in the PMD private structure. This in order to provide an infrastructure for dynamic Tx and Rx burst callback changes based on application offloads selection. Signed-off-by: Shahaf Shuler Acked-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.c | 151 +++--- drivers/net/mlx5/mlx5.h | 24 -- drivers/net/mlx5/mlx5_ethdev.c | 10 +-- drivers/net/mlx5/mlx5_rxq.c | 6 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 10 +-- drivers/net/mlx5/mlx5_txq.c | 27 +++--- 6 files changed, 126 insertions(+), 102 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 0548d17ad..be21c72e8 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -106,17 +106,6 @@ #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) #endif -struct mlx5_args { - int cqe_comp; - int txq_inline; - int txqs_inline; - int mps; - int mpw_hdr_dseg; - int inline_max_packet_sz; - int tso; - int tx_vec_en; - int rx_vec_en; -}; /** * Retrieve integer value from environment variable. * @@ -489,35 +478,91 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs) static struct rte_pci_driver mlx5_driver; /** - * Assign parameters from args into priv, only non default - * values are considered. + * Update parameters from application configuration. * - * @param[out] priv + * @param[in/out] priv * Pointer to private structure. - * @param[in] args - * Pointer to args values. */ static void -mlx5_args_assign(struct priv *priv, struct mlx5_args *args) +mlx5_args_update(struct priv *priv) { - if (args->cqe_comp != MLX5_ARG_UNSET) - priv->cqe_comp = args->cqe_comp; - if (args->txq_inline != MLX5_ARG_UNSET) - priv->txq_inline = args->txq_inline; - if (args->txqs_inline != MLX5_ARG_UNSET) - priv->txqs_inline = args->txqs_inline; - if (args->mps != MLX5_ARG_UNSET) - priv->mps = args->mps ? priv->mps : 0; - if (args->mpw_hdr_dseg != MLX5_ARG_UNSET) - priv->mpw_hdr_dseg = args->mpw_hdr_dseg; - if (args->inline_max_packet_sz != MLX5_ARG_UNSET) - priv->inline_max_packet_sz = args->inline_max_packet_sz; - if (args->tso != MLX5_ARG_UNSET) - priv->tso = args->tso; - if (args->tx_vec_en != MLX5_ARG_UNSET) - priv->tx_vec_en = args->tx_vec_en; - if (args->rx_vec_en != MLX5_ARG_UNSET) - priv->rx_vec_en = args->rx_vec_en; + struct mlx5_args *args_def = &priv->args_default; + struct mlx5_args *args = &priv->args; + + if (args_def->cqe_comp != MLX5_ARG_UNSET) { + if (!priv->cqe_comp && args_def->cqe_comp) { + WARN("Rx CQE compression is not supported"); + args_def->cqe_comp = 0; + } + args->cqe_comp = args_def->cqe_comp; + } else { + args->cqe_comp = priv->cqe_comp; + } + if (args_def->tso != MLX5_ARG_UNSET) { + if (!priv->tso && args_def->tso) { + WARN("TSO is not supported"); + args_def->tso = 0; + } + args->tso = args_def->tso; + } else { + args->tso = 0; + } + if (args_def->mps != MLX5_ARG_UNSET) { + if (!priv->mps && args_def->mps) { + WARN("multi-packet send not supported"); + args_def->mps = MLX5_MPW_DISABLED; + } + if (args->tso && args_def->mps) { + WARN("multi-packet send not supported in conjunction " + "with TSO. MPS disabled"); + args->mps = MLX5_MPW_DISABLED; + } else { + args->mps = args_def->mps ? priv->mps : + MLX5_MPW_DISABLED; + } + } else { + if (args->tso) + args->mps = MLX5_MPW_DISABLED; + else + args->mps = priv->mps; + } + if (args_def->txq_inline != MLX5_ARG_UNSET) { + args->txq_inline = args_def->txq_inline; + } else { + if (args->mps == MLX5_MPW_ENHANCED) + args->txq_inline = MLX5_WQE_SIZE_MAX - + MLX5_WQE_SIZE; + else + args->txq_inline = 0; + } + if (args_def->txqs_inline != MLX5_ARG_UNSET) { + args->txqs_inline = args_def->txqs_inline; + } else { + if (args-
[dpdk-dev] [PATCH 2/6] net/mlx5: convert to new Tx offloads API
Ethdev Tx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new Tx offloads API. Signed-off-by: Shahaf Shuler --- doc/guides/nics/mlx5.rst | 12 ++ drivers/net/mlx5/mlx5.c | 49 +++ drivers/net/mlx5/mlx5.h | 2 +- drivers/net/mlx5/mlx5_ethdev.c | 27 ++--- drivers/net/mlx5/mlx5_rxtx.h | 3 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 22 --- drivers/net/mlx5/mlx5_txq.c | 74 --- 7 files changed, 129 insertions(+), 60 deletions(-) diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index f9558da89..1942eda47 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -253,8 +253,10 @@ Run-time configuration Enhanced MPS supports hybrid mode - mixing inlined packets and pointers in the same descriptor. - This option cannot be used in conjunction with ``tso`` below. When ``tso`` - is set, ``txq_mpw_en`` is disabled. + This option cannot be used with certain offloads such as ``DEV_TX_OFFLOAD_TCP_TSO, + DEV_TX_OFFLOAD_VXLAN_TNL_TSO, DEV_TX_OFFLOAD_GRE_TNL_TSO, + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, DEV_TX_OFFLOAD_VLAN_INSERT``. + When those offloads enabled the mpw send function will be disabled. It is currently only supported on the ConnectX-4 Lx and ConnectX-5 families of adapters. Enabled by default. @@ -275,12 +277,6 @@ Run-time configuration Effective only when Enhanced MPS is supported. The default value is 256. -- ``tso`` parameter [int] - - A nonzero value enables hardware TSO. - When hardware TSO is enabled, packets marked with TCP segmentation - offload will be divided into segments by the hardware. Disabled by default. - - ``tx_vec_en`` parameter [int] A nonzero value enables Tx vector on ConnectX-5 only NIC if the number of diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index be21c72e8..03839271c 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -85,9 +85,6 @@ /* Device parameter to limit the size of inlining packet. */ #define MLX5_TXQ_MAX_INLINE_LEN "txq_max_inline_len" -/* Device parameter to enable hardware TSO offload. */ -#define MLX5_TSO "tso" - /* Device parameter to enable hardware Tx vector. */ #define MLX5_TX_VEC_EN "tx_vec_en" @@ -411,8 +408,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque) args->mpw_hdr_dseg = !!tmp; } else if (strcmp(MLX5_TXQ_MAX_INLINE_LEN, key) == 0) { args->inline_max_packet_sz = tmp; - } else if (strcmp(MLX5_TSO, key) == 0) { - args->tso = !!tmp; } else if (strcmp(MLX5_TX_VEC_EN, key) == 0) { args->tx_vec_en = !!tmp; } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { @@ -445,7 +440,6 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs) MLX5_TXQ_MPW_EN, MLX5_TXQ_MPW_HDR_DSEG_EN, MLX5_TXQ_MAX_INLINE_LEN, - MLX5_TSO, MLX5_TX_VEC_EN, MLX5_RX_VEC_EN, NULL, @@ -483,11 +477,22 @@ static struct rte_pci_driver mlx5_driver; * @param[in/out] priv * Pointer to private structure. */ -static void +void mlx5_args_update(struct priv *priv) { struct mlx5_args *args_def = &priv->args_default; struct mlx5_args *args = &priv->args; + uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); + uint64_t tx_offloads = priv->dev ? + priv->dev->data->dev_conf.txmode.offloads : + 0; + int tso = !!(tx_offloads & supp_tx_offloads & DEV_TX_OFFLOAD_TCP_TSO); + int vlan_insert = !!(tx_offloads & supp_tx_offloads & +DEV_TX_OFFLOAD_VLAN_INSERT); + int tunnel = !!(tx_offloads & supp_tx_offloads & + (DEV_TX_OFFLOAD_VXLAN_TNL_TSO | +DEV_TX_OFFLOAD_GRE_TNL_TSO | +DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)); if (args_def->cqe_comp != MLX5_ARG_UNSET) { if (!priv->cqe_comp && args_def->cqe_comp) { @@ -498,30 +503,28 @@ mlx5_args_update(struct priv *priv) } else { args->cqe_comp = priv->cqe_comp; } - if (args_def->tso != MLX5_ARG_UNSET) { - if (!priv->tso && args_def->tso) { - WARN("TSO is not supported"); - args_def->tso = 0; - } - args->tso = args_def->tso; - } else { - args->tso = 0; - } if (args_def->mps != MLX5_ARG_UNSET) { if (!priv->mps && args_def->mps) { WARN("multi-packet send not supported"); args_def->mps = MLX5_MPW_DISABLED; - } - if (args->tso && args_def->mps) { + } else if (tso && args_def->mp
[dpdk-dev] [PATCH 3/6] net/mlx5: convert to new Rx offloads API
Ethdev Rx offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This commit support the new Rx offloads API. Signed-off-by: Shahaf Shuler --- drivers/net/mlx5/mlx5_ethdev.c | 22 +--- drivers/net/mlx5/mlx5_rxq.c| 102 +++- drivers/net/mlx5/mlx5_rxtx.h | 3 ++ drivers/net/mlx5/mlx5_vlan.c | 3 +- 4 files changed, 108 insertions(+), 22 deletions(-) diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index decc6edfa..434130c28 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -580,6 +580,10 @@ dev_configure(struct rte_eth_dev *dev) !!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len; uint64_t supp_tx_offloads = mlx5_priv_get_tx_port_offloads(priv); uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t supp_rx_offloads = + (mlx5_get_rx_port_offloads() | +mlx5_priv_get_rx_queue_offloads(priv)); + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { ERROR("Some Tx offloads are not supported " @@ -587,6 +591,12 @@ dev_configure(struct rte_eth_dev *dev) tx_offloads, supp_tx_offloads); return ENOTSUP; } + if ((rx_offloads & supp_rx_offloads) != rx_offloads) { + ERROR("Some Rx offloads are not supported " + "requested 0x%lx supported 0x%lx\n", + rx_offloads, supp_rx_offloads); + return ENOTSUP; + } if (use_app_rss_key && (dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len != rss_hash_default_key_len)) { @@ -702,14 +712,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_offload_capa = - (priv->hw_csum ? -(DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM) : -0) | - (priv->hw_vlan_strip ? DEV_RX_OFFLOAD_VLAN_STRIP : 0) | - DEV_RX_OFFLOAD_TIMESTAMP; + info->rx_queue_offload_capa = + mlx5_priv_get_rx_queue_offloads(priv); + info->rx_offload_capa = (mlx5_get_rx_port_offloads() | +info->rx_queue_offload_capa); info->tx_offload_capa = mlx5_priv_get_tx_port_offloads(priv); if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 4298c1b4d..c714b0d8f 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -213,6 +213,75 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) } /** + * Returns the per-queue supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx5_priv_get_rx_queue_offloads(struct priv *priv) +{ + uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | +DEV_RX_OFFLOAD_TIMESTAMP | +DEV_RX_OFFLOAD_JUMBO_FRAME); + + if (priv->hw_fcs_strip) + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + if (priv->hw_csum) + offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | +DEV_RX_OFFLOAD_UDP_CKSUM | +DEV_RX_OFFLOAD_TCP_CKSUM); + if (priv->hw_vlan_strip) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + return offloads; +} + + +/** + * Returns the per-port supported offloads. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx5_get_rx_port_offloads(void) +{ + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + + return offloads; +} + +/** + * Checks if the per-queue offload configuration is valid. + * + * @param priv + * Pointer to private structure. + * @param offloads + * Per-queue offloads configuration. + * + * @return + * 1 if the configuration is valid, 0 otherwise. + */ +static int +priv_is_rx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +{ + uint64_t port_offloads = priv->dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = + mlx5_priv_get_rx_queue_offloads(priv); + uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != +offloads) + return 0; + if (((port_offloads ^ offloads) & port_supp_offloads)) + return 0; + return 1; +} + +/** * * @param dev * Pointer to Ethernet device structure. @@ -259,6 +328,16 @@ mlx5_rx_queue_setup(stru
[dpdk-dev] [PATCH 4/6] net/mlx5: fix VLAN configuration after port stop
Ethdev layer has an API to configure vlan setting on the flight, i.e. when the port state is start. calling such API when the port is stopped may cause segmentation fault as the related Verbs contexts has not been created yet. Fixes: 09cb5b581762 ("net/mlx5: separate DPDK from verbs Rx queue objects") Cc: nelio.laranje...@6wind.com Cc: sta...@dpdk.org Signed-off-by: Shahaf Shuler --- drivers/net/mlx5/mlx5_vlan.c | 5 + 1 file changed, 5 insertions(+) diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index dc82643fc..128c2b6b6 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -127,6 +127,11 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) DEBUG("set VLAN offloads 0x%x for port %d queue %d", vlan_offloads, rxq->port_id, idx); + if (!rxq_ctrl->ibv) { + /* Update related bits in RX queue. */ + rxq->vlan_strip = !!on; + return; + } mod = (struct ibv_wq_attr){ .attr_mask = IBV_WQ_ATTR_FLAGS, .flags_mask = IBV_WQ_FLAGS_CVLAN_STRIPPING, -- 2.12.0
[dpdk-dev] [PATCH 5/6] net/mlx4: convert to new Tx offloads API
Ethdev Tx offloads API has changed since: commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new Tx offloads API. Signed-off-by: Shahaf Shuler --- drivers/net/mlx4/mlx4.c| 9 + drivers/net/mlx4/mlx4_ethdev.c | 7 +--- drivers/net/mlx4/mlx4_rxtx.h | 1 + drivers/net/mlx4/mlx4_txq.c| 66 +++-- 4 files changed, 75 insertions(+), 8 deletions(-) diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index f9e4f9d73..38c545b1b 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -99,8 +99,17 @@ mlx4_dev_configure(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct rte_flow_error error; + uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv); + uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; int ret; + if ((tx_offloads & supp_tx_offloads) != tx_offloads) { + rte_errno = ENOTSUP; + ERROR("Some Tx offloads are not supported " + "requested 0x%lx supported 0x%lx\n", + tx_offloads, supp_tx_offloads); + return -rte_errno; + } /* Prepare internal flow rules. */ ret = mlx4_flow_sync(priv, &error); if (ret) { diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index 2f69e7d4f..63e00b1da 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -767,17 +767,12 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); info->rx_offload_capa = 0; - info->tx_offload_capa = 0; + info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv); if (priv->hw_csum) { - info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM); } - if (priv->hw_csum_l2tun) - info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (mlx4_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE; diff --git a/drivers/net/mlx4/mlx4_rxtx.h b/drivers/net/mlx4/mlx4_rxtx.h index 463df2b0b..528600a18 100644 --- a/drivers/net/mlx4/mlx4_rxtx.h +++ b/drivers/net/mlx4/mlx4_rxtx.h @@ -181,6 +181,7 @@ int mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, unsigned int socket, const struct rte_eth_txconf *conf); void mlx4_tx_queue_release(void *dpdk_txq); +uint64_t mlx4_priv_get_tx_port_offloads(struct priv *priv); /** * Get memory region (MR) <-> memory pool (MP) association from txq->mp2mr[]. diff --git a/drivers/net/mlx4/mlx4_txq.c b/drivers/net/mlx4/mlx4_txq.c index 7882a4d0b..91befb16b 100644 --- a/drivers/net/mlx4/mlx4_txq.c +++ b/drivers/net/mlx4/mlx4_txq.c @@ -184,6 +184,56 @@ mlx4_txq_fill_dv_obj_info(struct txq *txq, struct mlx4dv_obj *mlxdv) } /** + * Returns the per-port supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx4_priv_get_tx_port_offloads(struct priv *priv) +{ + uint64_t offloads = DEV_TX_OFFLOAD_MULTI_SEGS; + + if (priv->hw_csum) { + offloads |= (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_UDP_CKSUM | +DEV_TX_OFFLOAD_TCP_CKSUM); + } + if (priv->hw_csum_l2tun) + offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + return offloads; +} + +/** + * Checks if the per-queue offload configuration is valid. + * + * @param priv + * Pointer to private structure. + * @param offloads + * Per-queue offloads configuration. + * + * @return + * 1 if the configuration is valid, 0 otherwise. + */ +static int +priv_is_tx_queue_offloads_allowed(struct priv *priv, uint64_t offloads) +{ + uint64_t port_offloads = priv->dev->data->dev_conf.txmode.offloads; + uint64_t port_supp_offloads = mlx4_priv_get_tx_port_offloads(priv); + + /* There are no Tx offloads which are per queue. */ + if ((offloads & port_supp_offloads) != offloads) + return 0; + if ((port_offloads ^ offloads) & port_supp_offloads) + return 0; + return 1; +} + +/** * DPDK callback to configure a Tx queue. * * @param dev @@ -234,6 +284,15 @@ mlx4_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, (void)conf; /* Thresholds configuration (ignored). */
[dpdk-dev] [PATCH 6/6] net/mlx4: convert to new Rx offloads API
Ethdev Rx offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") This commit support the new Rx offloads API. Signed-off-by: Shahaf Shuler --- drivers/net/mlx4/mlx4.c| 11 ++ drivers/net/mlx4/mlx4_ethdev.c | 10 ++--- drivers/net/mlx4/mlx4_flow.c | 5 ++- drivers/net/mlx4/mlx4_rxq.c| 78 ++--- drivers/net/mlx4/mlx4_rxtx.h | 2 + 5 files changed, 93 insertions(+), 13 deletions(-) diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 38c545b1b..3205b58ac 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -101,6 +101,10 @@ mlx4_dev_configure(struct rte_eth_dev *dev) struct rte_flow_error error; uint64_t supp_tx_offloads = mlx4_priv_get_tx_port_offloads(priv); uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads; + uint64_t supp_rx_offloads = + (mlx4_get_rx_port_offloads() | +mlx4_priv_get_rx_queue_offloads(priv)); + uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; int ret; if ((tx_offloads & supp_tx_offloads) != tx_offloads) { @@ -110,6 +114,13 @@ mlx4_dev_configure(struct rte_eth_dev *dev) tx_offloads, supp_tx_offloads); return -rte_errno; } + if ((rx_offloads & supp_rx_offloads) != rx_offloads) { + rte_errno = ENOTSUP; + ERROR("Some Rx offloads are not supported " + "requested 0x%lx supported 0x%lx\n", + rx_offloads, supp_rx_offloads); + return -rte_errno; + } /* Prepare internal flow rules. */ ret = mlx4_flow_sync(priv, &error); if (ret) { diff --git a/drivers/net/mlx4/mlx4_ethdev.c b/drivers/net/mlx4/mlx4_ethdev.c index 63e00b1da..fef89e731 100644 --- a/drivers/net/mlx4/mlx4_ethdev.c +++ b/drivers/net/mlx4/mlx4_ethdev.c @@ -766,13 +766,11 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->max_rx_queues = max; info->max_tx_queues = max; info->max_mac_addrs = RTE_DIM(priv->mac); - info->rx_offload_capa = 0; info->tx_offload_capa = mlx4_priv_get_tx_port_offloads(priv); - if (priv->hw_csum) { - info->rx_offload_capa |= (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - } + info->rx_queue_offload_capa = + mlx4_priv_get_rx_queue_offloads(priv); + info->rx_offload_capa = (mlx4_get_rx_port_offloads() | +info->rx_queue_offload_capa); if (mlx4_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); info->hash_key_size = MLX4_RSS_HASH_KEY_SIZE; diff --git a/drivers/net/mlx4/mlx4_flow.c b/drivers/net/mlx4/mlx4_flow.c index 8b87b2989..654e72df3 100644 --- a/drivers/net/mlx4/mlx4_flow.c +++ b/drivers/net/mlx4/mlx4_flow.c @@ -1224,7 +1224,7 @@ mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan) * - MAC flow rules are generated from @p dev->data->mac_addrs * (@p priv->mac array). * - An additional flow rule for Ethernet broadcasts is also generated. - * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter + * - All these are per-VLAN if @p DEV_RX_OFFLOAD_VLAN_FILTER * is enabled and VLAN filters are configured. * * @param priv @@ -1292,7 +1292,8 @@ mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error) }; struct ether_addr *rule_mac = ð_spec.dst; rte_be16_t *rule_vlan = - priv->dev->data->dev_conf.rxmode.hw_vlan_filter && + (priv->dev->data->dev_conf.rxmode.offloads & +DEV_RX_OFFLOAD_VLAN_FILTER) && !priv->dev->data->promiscuous ? &vlan_spec.tci : NULL; diff --git a/drivers/net/mlx4/mlx4_rxq.c b/drivers/net/mlx4/mlx4_rxq.c index 53313c56f..f8c1105dc 100644 --- a/drivers/net/mlx4/mlx4_rxq.c +++ b/drivers/net/mlx4/mlx4_rxq.c @@ -663,6 +663,66 @@ mlx4_rxq_detach(struct rxq *rxq) } /** + * Returns the per-queue supported offloads. + * + * @param priv + * Pointer to private structure. + * + * @return + * Supported Tx offloads. + */ +uint64_t +mlx4_priv_get_rx_queue_offloads(struct priv *priv) +{ + uint64_t offloads = DEV_RX_OFFLOAD_SCATTER; + + if (priv->hw_csum) + offloads |= DEV_RX_OFFLOAD_CHECKSUM; + return offloads; +} + +/** + * Returns the per-port supported offloads. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx4_get_rx_port_offloads(void) +{ + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + + return offloads; +} + +/** + * Checks if the per-queue offload configuration is valid. + * + * @param priv + * Pointer to p
[dpdk-dev] [PATCH 0/5] convert testpmd to new ethdev offloads API
This series is to convert testpmd application to the new offloads API [1]. [1] http://dpdk.org/ml/archives/dev/2017-October/077329.html Shahaf Shuler (5): app/testpmd: convert to new Ethdev offloads API app/testpmd: remove txqflags app/testpmd: add command line option for multiseg app/testpmd: add command line option for mbuf fast free app/testpmd: enforce offloads caps app/test-pmd/cmdline.c | 311 ++- app/test-pmd/config.c | 103 +--- app/test-pmd/parameters.c | 62 ++--- app/test-pmd/testpmd.c | 55 ++-- app/test-pmd/testpmd.h | 2 +- doc/guides/testpmd_app_ug/run_app.rst | 20 +- doc/guides/testpmd_app_ug/testpmd_funcs.rst | 11 - 7 files changed, 332 insertions(+), 232 deletions(-) -- 2.12.0
[dpdk-dev] [PATCH 1/5] app/testpmd: convert to new Ethdev offloads API
Ethdev Rx/Tx offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") Convert the application to use the new API. Signed-off-by: Shahaf Shuler --- app/test-pmd/cmdline.c| 148 + app/test-pmd/config.c | 97 +++ app/test-pmd/parameters.c | 35 +- app/test-pmd/testpmd.c| 24 +++ 4 files changed, 219 insertions(+), 85 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index f71d96301..b0f2325c8 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -1577,6 +1577,7 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_max_pkt_len_result *res = parsed_result; + uint64_t rx_offloads = rx_mode.offloads; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); @@ -1594,14 +1595,16 @@ cmd_config_max_pkt_len_parsed(void *parsed_result, rx_mode.max_rx_pkt_len = res->value; if (res->value > ETHER_MAX_LEN) - rx_mode.jumbo_frame = 1; + rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - rx_mode.jumbo_frame = 0; + rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; } else { printf("Unknown parameter\n"); return; } + rx_mode.offloads = rx_offloads; + init_port_config(); cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); @@ -1703,6 +1706,7 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, __attribute__((unused)) void *data) { struct cmd_config_rx_mode_flag *res = parsed_result; + uint64_t rx_offloads = rx_mode.offloads; if (!all_ports_stopped()) { printf("Please stop all ports first\n"); @@ -1711,48 +1715,48 @@ cmd_config_rx_mode_flag_parsed(void *parsed_result, if (!strcmp(res->name, "crc-strip")) { if (!strcmp(res->value, "on")) - rx_mode.hw_strip_crc = 1; + rx_offloads |= DEV_RX_OFFLOAD_CRC_STRIP; else if (!strcmp(res->value, "off")) - rx_mode.hw_strip_crc = 0; + rx_offloads &= ~DEV_RX_OFFLOAD_CRC_STRIP; else { printf("Unknown parameter\n"); return; } } else if (!strcmp(res->name, "scatter")) { - if (!strcmp(res->value, "on")) - rx_mode.enable_scatter = 1; - else if (!strcmp(res->value, "off")) - rx_mode.enable_scatter = 0; - else { + if (!strcmp(res->value, "on")) { + rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + } else if (!strcmp(res->value, "off")) { + rx_offloads &= ~DEV_RX_OFFLOAD_SCATTER; + } else { printf("Unknown parameter\n"); return; } } else if (!strcmp(res->name, "rx-cksum")) { if (!strcmp(res->value, "on")) - rx_mode.hw_ip_checksum = 1; + rx_offloads |= DEV_RX_OFFLOAD_CHECKSUM; else if (!strcmp(res->value, "off")) - rx_mode.hw_ip_checksum = 0; + rx_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM; else { printf("Unknown parameter\n"); return; } } else if (!strcmp(res->name, "rx-timestamp")) { if (!strcmp(res->value, "on")) - rx_mode.hw_timestamp = 1; + rx_offloads |= DEV_RX_OFFLOAD_TIMESTAMP; else if (!strcmp(res->value, "off")) - rx_mode.hw_timestamp = 0; + rx_offloads &= ~DEV_RX_OFFLOAD_TIMESTAMP; else { printf("Unknown parameter\n"); return; } } else if (!strcmp(res->name, "hw-vlan")) { if (!strcmp(res->value, "on")) { - rx_mode.hw_vlan_filter = 1; - rx_mode.hw_vlan_strip = 1; + rx_offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); } else if (!strcmp(res->value, "off")) { - rx_mode.hw_vlan_filter = 0; - rx_mode.hw_vlan_strip = 0; + rx_offloads &= ~(DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_VLAN_STRIP); } else {
[dpdk-dev] [PATCH 2/5] app/testpmd: remove txqflags
Since testpmd is now using the new Ethdev offloads API there is no need for the txqflags configuration. Signed-off-by: Shahaf Shuler Acked-by: Nelio Laranjeiro --- app/test-pmd/cmdline.c | 69 app/test-pmd/config.c | 4 +- app/test-pmd/parameters.c | 14 + app/test-pmd/testpmd.c | 8 --- app/test-pmd/testpmd.h | 1 - doc/guides/testpmd_app_ug/run_app.rst | 12 - doc/guides/testpmd_app_ug/testpmd_funcs.rst | 11 7 files changed, 2 insertions(+), 117 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index b0f2325c8..9e3f02ec5 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -3092,74 +3092,6 @@ cmdline_parse_inst_t cmd_set_txsplit = { }, }; -/* *** CONFIG TX QUEUE FLAGS *** */ - -struct cmd_config_txqflags_result { - cmdline_fixed_string_t port; - cmdline_fixed_string_t config; - cmdline_fixed_string_t all; - cmdline_fixed_string_t what; - int32_t hexvalue; -}; - -static void cmd_config_txqflags_parsed(void *parsed_result, - __attribute__((unused)) struct cmdline *cl, - __attribute__((unused)) void *data) -{ - struct cmd_config_txqflags_result *res = parsed_result; - - if (!all_ports_stopped()) { - printf("Please stop all ports first\n"); - return; - } - - if (strcmp(res->what, "txqflags")) { - printf("Unknown parameter\n"); - return; - } - - if (res->hexvalue >= 0) { - txq_flags = res->hexvalue; - } else { - printf("txqflags must be >= 0\n"); - return; - } - - init_port_config(); - - cmd_reconfig_device_queue(RTE_PORT_ALL, 1, 1); -} - -cmdline_parse_token_string_t cmd_config_txqflags_port = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, port, -"port"); -cmdline_parse_token_string_t cmd_config_txqflags_config = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, config, -"config"); -cmdline_parse_token_string_t cmd_config_txqflags_all = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, all, -"all"); -cmdline_parse_token_string_t cmd_config_txqflags_what = - TOKEN_STRING_INITIALIZER(struct cmd_config_txqflags_result, what, -"txqflags"); -cmdline_parse_token_num_t cmd_config_txqflags_value = - TOKEN_NUM_INITIALIZER(struct cmd_config_txqflags_result, - hexvalue, INT32); - -cmdline_parse_inst_t cmd_config_txqflags = { - .f = cmd_config_txqflags_parsed, - .data = NULL, - .help_str = "port config all txqflags ", - .tokens = { - (void *)&cmd_config_txqflags_port, - (void *)&cmd_config_txqflags_config, - (void *)&cmd_config_txqflags_all, - (void *)&cmd_config_txqflags_what, - (void *)&cmd_config_txqflags_value, - NULL, - }, -}; - /* *** ADD/REMOVE ALL VLAN IDENTIFIERS TO/FROM A PORT VLAN RX FILTER *** */ struct cmd_rx_vlan_filter_all_result { cmdline_fixed_string_t rx_vlan; @@ -15729,7 +15661,6 @@ cmdline_parse_ctx_t main_ctx[] = { (cmdline_parse_inst_t *)&cmd_config_rx_mode_flag, (cmdline_parse_inst_t *)&cmd_config_rss, (cmdline_parse_inst_t *)&cmd_config_rxtx_queue, - (cmdline_parse_inst_t *)&cmd_config_txqflags, (cmdline_parse_inst_t *)&cmd_config_rss_reta, (cmdline_parse_inst_t *)&cmd_showport_reta, (cmdline_parse_inst_t *)&cmd_config_burst, diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 9b6ffeca9..089e9f4cf 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -417,7 +417,6 @@ tx_queue_infos_display(portid_t port_id, uint16_t queue_id) printf("\nTX writeback threshold: %hhu", qinfo.conf.tx_thresh.wthresh); printf("\nTX RS threshold: %hu", qinfo.conf.tx_rs_thresh); printf("\nTX free threshold: %hu", qinfo.conf.tx_free_thresh); - printf("\nTX flags: %#x", qinfo.conf.txq_flags); printf("\nTX deferred start: %s", (qinfo.conf.tx_deferred_start != 0) ? "on" : "off"); printf("\nNumber of TXDs: %hu", qinfo.nb_desc); @@ -1702,8 +1701,7 @@ rxtx_config_display(void) printf(" TX threshold registers: pthresh=%d hthresh=%d wthresh=%d\n", tx_conf->tx_thresh.pthresh, tx_conf->tx_thresh.hthresh, tx_conf->tx_thresh.wthresh); - printf(" TX RS bit threshold=%d - TXQ flags=0x%"PRIx32"\n", - tx_conf->tx_rs_thresh, tx_conf->txq_flags); + printf(" TX RS bit threshold=%d\n", tx_conf->tx_rs_thresh); } void diff --git a/app/test-pmd/par
[dpdk-dev] [PATCH 3/5] app/testpmd: add command line option for multiseg
This patch enables multi segment send Tx offloads from the command line. Signed-off-by: Shahaf Shuler --- app/test-pmd/parameters.c | 8 app/test-pmd/testpmd.c| 4 app/test-pmd/testpmd.h| 1 + doc/guides/testpmd_app_ug/run_app.rst | 4 4 files changed, 17 insertions(+) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 1a145c387..8a68a39ea 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -214,6 +214,8 @@ usage(char* progname) "disable print of designated event or all of them.\n"); printf(" --flow-isolate-all: " "requests flow API isolated mode on all ports at initialization time.\n"); + printf(" --enable-multiseg: " + "enables multi segment send Tx offload on all ports.\n"); } #ifdef RTE_LIBRTE_CMDLINE @@ -566,6 +568,8 @@ launch_args_parse(int argc, char** argv) enum { TX, RX }; /* Default Rx offloads for all ports. */ uint64_t rx_offloads = rx_mode.offloads; + /* Default Tx offloads for all ports. */ + uint64_t tx_offloads = tx_mode.offloads; static struct option lgopts[] = { { "help", 0, 0, 0 }, @@ -642,6 +646,7 @@ launch_args_parse(int argc, char** argv) { "no-rmv-interrupt", 0, 0, 0 }, { "print-event",1, 0, 0 }, { "mask-event", 1, 0, 0 }, + { "enable-multiseg",0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1115,6 +1120,8 @@ launch_args_parse(int argc, char** argv) rte_exit(EXIT_FAILURE, "invalid mask-event argument\n"); } + if (!strcmp(lgopts[opt_idx].name, "enable-multiseg")) + tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; break; case 'h': @@ -1131,4 +1138,5 @@ launch_args_parse(int argc, char** argv) /* Set offload configuration from command line parameters. */ rx_mode.offloads = rx_offloads; + tx_mode.offloads = tx_offloads; } diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index a97edcadd..2550677c3 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -339,6 +339,8 @@ struct rte_eth_rxmode rx_mode = { .ignore_offload_bitfield = 1, /**< Use rte_eth_rxq_conf offloads API */ }; +struct rte_eth_txmode tx_mode; + struct rte_fdir_conf fdir_conf = { .mode = RTE_FDIR_MODE_NONE, .pballoc = RTE_FDIR_PBALLOC_64K, @@ -599,6 +601,8 @@ init_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; + /* Apply default Tx configuration for all ports */ + port->dev_conf.txmode = tx_mode; rte_eth_dev_info_get(pid, &port->dev_info); if (numa_support) { diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h index 6af70e02a..8e572704e 100644 --- a/app/test-pmd/testpmd.h +++ b/app/test-pmd/testpmd.h @@ -415,6 +415,7 @@ extern portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; extern struct rte_port *ports; extern struct rte_eth_rxmode rx_mode; +extern struct rte_eth_txmode tx_mode; extern uint64_t rss_hf; extern queueid_t nb_rxq; diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index cb94582b9..b21a1fe8a 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -497,3 +497,7 @@ The commandline options are: configured flow rules only (see flow command). Ports that do not support this mode are automatically discarded. + +* ``--enable-multiseg`` + +Enables multi segment send Tx offload on all ports. -- 2.12.0
[dpdk-dev] [PATCH 4/5] app/testpmd: add command line option for mbuf fast free
This patch enables mbuf fast free Tx offloads from the command line. Signed-off-by: Shahaf Shuler --- app/test-pmd/parameters.c | 5 + doc/guides/testpmd_app_ug/run_app.rst | 4 2 files changed, 9 insertions(+) diff --git a/app/test-pmd/parameters.c b/app/test-pmd/parameters.c index 8a68a39ea..cc18cb767 100644 --- a/app/test-pmd/parameters.c +++ b/app/test-pmd/parameters.c @@ -216,6 +216,8 @@ usage(char* progname) "requests flow API isolated mode on all ports at initialization time.\n"); printf(" --enable-multiseg: " "enables multi segment send Tx offload on all ports.\n"); + printf(" --enable-fast-free: " + "enables mbuf fast free Tx offload on all ports.\n"); } #ifdef RTE_LIBRTE_CMDLINE @@ -647,6 +649,7 @@ launch_args_parse(int argc, char** argv) { "print-event",1, 0, 0 }, { "mask-event", 1, 0, 0 }, { "enable-multiseg",0, 0, 0 }, + { "enable-fast-free", 0, 0, 0 }, { 0, 0, 0, 0 }, }; @@ -1122,6 +1125,8 @@ launch_args_parse(int argc, char** argv) } if (!strcmp(lgopts[opt_idx].name, "enable-multiseg")) tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + if (!strcmp(lgopts[opt_idx].name, "enable-fast-free")) + tx_offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE; break; case 'h': diff --git a/doc/guides/testpmd_app_ug/run_app.rst b/doc/guides/testpmd_app_ug/run_app.rst index b21a1fe8a..1311cce41 100644 --- a/doc/guides/testpmd_app_ug/run_app.rst +++ b/doc/guides/testpmd_app_ug/run_app.rst @@ -501,3 +501,7 @@ The commandline options are: * ``--enable-multiseg`` Enables multi segment send Tx offload on all ports. + +* ``--enable-fast-free`` + +Enables mbuf fast free Tx offload on all ports. -- 2.12.0
[dpdk-dev] [PATCH 5/5] app/testpmd: enforce offloads caps
In the current design it was possible for offload to be set even though the device is not supporting it. A warning message was printed instead. This is a wrong behaviour, as application should set only the offloads reported by the capabilities of the device. This patch adds verification for the offloads being set and make sure the offload configuration passed to the device always match its capabilities. Signed-off-by: Shahaf Shuler --- app/test-pmd/cmdline.c | 132 app/test-pmd/config.c | 22 app/test-pmd/testpmd.c | 21 ++- 3 files changed, 115 insertions(+), 60 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 9e3f02ec5..2f099a8e6 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -3639,6 +3639,7 @@ cmd_csum_parsed(void *parsed_result, int hw = 0; uint16_t mask = 0; uint64_t csum_offloads = 0; + struct rte_eth_dev_info dev_info; if (port_id_is_invalid(res->port_id, ENABLED_WARN)) { printf("invalid port %d\n", res->port_id); @@ -3649,26 +3650,58 @@ cmd_csum_parsed(void *parsed_result, return; } + rte_eth_dev_info_get(res->port_id, &dev_info); if (!strcmp(res->mode, "set")) { if (!strcmp(res->hwsw, "hw")) hw = 1; if (!strcmp(res->proto, "ip")) { - mask = TESTPMD_TX_OFFLOAD_IP_CKSUM; - csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_IPV4_CKSUM) { + mask = TESTPMD_TX_OFFLOAD_IP_CKSUM; + csum_offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; + } else { + printf("IP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "udp")) { - mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM; - csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_UDP_CKSUM) { + mask = TESTPMD_TX_OFFLOAD_UDP_CKSUM; + csum_offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; + } else { + printf("UDP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "tcp")) { - mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM; - csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_TCP_CKSUM) { + mask = TESTPMD_TX_OFFLOAD_TCP_CKSUM; + csum_offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; + } else { + printf("TCP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "sctp")) { - mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM; - csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SCTP_CKSUM) { + mask = TESTPMD_TX_OFFLOAD_SCTP_CKSUM; + csum_offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM; + } else { + printf("SCTP checksum offload is not supported " + "by port %u\n", res->port_id); + } } else if (!strcmp(res->proto, "outer-ip")) { - mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM; - csum_offloads |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + if (dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) { + mask = TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM; + csum_offloads |= + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + } else { + printf("Outer IP checksum offload is not " + "supported by port %u\n", res->port_id); + } } if (hw) { @@ -3815,6 +3848,14 @@ cmd_tso_set_parsed(void *parsed_result, if (!strcmp(res->mode, "set")) ports[res->port_id].tso_s
[dpdk-dev] [PATCH 00/39] convert examples to new ethdev offloads API
This series is to convert DPDK examples to the new offloads API [1]. [1] http://dpdk.org/ml/archives/dev/2017-October/077329.html Shahaf Shuler (39): examples/l2fwd: convert to new ethdev offloads API examples/l2fwd-crypto: convert to new ethdev offloads API examples/l2fwd-jobstats: convert to new ethdev offloads API examples/l2fwd-keepalive: convert to new ethdev offloads API examples/l3fwd: move to ethdev offloads API examples/l3fwd-acl: convert to new ethdev offloads API examples/l3fwd-power: convert to new ethdev offloads API examples/l3fwd-vf: convert to new ethdev offloads API examples/bond: convert to new ethdev offloads API examples/exception_path: convert to new ethdev offloads API examples/kni: convert to new ethdev offloads API examples/ip_fragmentation: convert to new offloads API examples/ip_pipeline: convert to new ethdev offloads API examples/ip_reassembly: convert to new ethdev offloads API examples/ipsec-secgw: convert to new ethdev offloads API examples/ipv4_multicast: convert to new ethdev offloads API examples/link_status_interrupt: convert to new offloads API examples/load_balancer: convert to new ethdev offloads API examples/multi_process: convert to new ethdev offloads API examples/netmap_compat: convert to new ethdev offloads API examples/performance-thread: convert to new offloads API examples/qos_meter: convert to new ethdev offloads API examples/qos_sched: convert to new ethdev offloads API examples/quota_watermark: convert to new ethdev offloads API examples/tep_termination: convert to new ethdev offloads API examples/vhost: convert to new ethdev offloads API examples/vmdq: convert to new ethdev offloads API examples/vmdq_dcb: convert to new ethdev offloads API examples/vm_power_manager: convert to new offloads API examples/distributor: convert to new ethdev offloads API examples/ethtool: convert to new ethdev offloads API examples/eventdev_pipeline: convert to new offloads API examples/flow_classify: convert to new ethdev offloads API examples/flow_filtering: convert to new ethdev offloads API examples/packet_ordering: convert to new ethdev offloads API examples/ptpclient: convert to new ethdev offloads API examples/rxtx_callbacks: convert to new ethdev offloads API examples/server_node_efd: convert to new ethdev offloads API examples/skeleton: convert to new ethdev offloads API examples/bond/main.c| 68 +--- examples/distributor/main.c | 8 ++- examples/ethtool/ethtool-app/main.c | 7 +- examples/eventdev_pipeline_sw_pmd/main.c| 10 ++- examples/exception_path/main.c | 38 --- examples/flow_classify/flow_classify.c | 12 +++- examples/flow_filtering/main.c | 27 examples/ip_fragmentation/main.c| 36 +-- examples/ip_pipeline/config_parse.c | 13 +--- examples/ip_pipeline/init.c | 30 +++-- examples/ip_reassembly/main.c | 36 +-- examples/ipsec-secgw/ipsec-secgw.c | 27 +++- examples/ipv4_multicast/main.c | 35 -- examples/kni/main.c | 67 --- examples/l2fwd-crypto/main.c| 38 --- examples/l2fwd-jobstats/main.c | 39 --- examples/l2fwd-keepalive/main.c | 39 --- examples/l2fwd/main.c | 38 --- examples/l3fwd-acl/main.c | 41 +--- examples/l3fwd-power/main.c | 42 +--- examples/l3fwd-vf/main.c| 38 --- examples/l3fwd/main.c | 41 +--- examples/link_status_interrupt/main.c | 38 --- examples/load_balancer/init.c | 37 +-- examples/multi_process/l2fwd_fork/main.c| 36 +-- examples/multi_process/symmetric_mp/main.c | 35 -- examples/netmap_compat/bridge/bridge.c | 7 +- examples/netmap_compat/lib/compat_netmap.c | 29 - examples/packet_ordering/main.c | 13 +++- examples/performance-thread/l3fwd-thread/main.c | 42 +--- examples/ptpclient/ptpclient.c | 7 +- examples/qos_meter/main.c | 64 -- examples/qos_sched/init.c | 31 +++-- examples/quota_watermark/qw/init.c | 38 --- examples/rxtx_callbacks/main.c | 12 +++- examples/server_node_efd/server/init.c | 10 ++- examples/skeleton/basicfwd.c| 12 +++- examples/tep_termination/vxlan_setup.c | 37 +-- examples/vhost/main.c | 42 examples/vm_power_manager/main.c| 12 +++- exampl
[dpdk-dev] [PATCH 02/39] examples/l2fwd-crypto: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l2fwd-crypto/main.c | 38 ++ 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c index d4e1682c8..e88638af4 100644 --- a/examples/l2fwd-crypto/main.c +++ b/examples/l2fwd-crypto/main.c @@ -235,16 +235,13 @@ struct lcore_queue_conf { struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_NONE, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -2356,6 +2353,9 @@ initialize_ports(struct l2fwd_crypto_options *options) for (last_portid = 0, portid = 0; portid < nb_ports; portid++) { int retval; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; /* Skip ports that are not enabled */ if ((options->portmask & (1 << portid)) == 0) @@ -2364,6 +2364,23 @@ initialize_ports(struct l2fwd_crypto_options *options) /* init port */ printf("Initializing port %u... ", portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } retval = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (retval < 0) { printf("Cannot configure device: err=%d, port=%u\n", @@ -2381,9 +2398,11 @@ initialize_ports(struct l2fwd_crypto_options *options) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, l2fwd_pktmbuf_pool); +&rxq_conf, l2fwd_pktmbuf_pool); if (retval < 0) { printf("rte_eth_rx_queue_setup:err=%d, port=%u\n", retval, portid); @@ -2392,9 +2411,12 @@ initialize_ports(struct l2fwd_crypto_options *options) /* init one TX queue on each port */ fflush(stdout); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; retval = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (retval < 0) { printf("rte_eth_tx_queue_setup:err=%d, port=%u\n", retval, portid); -- 2.12.0
[dpdk-dev] [PATCH 03/39] examples/l2fwd-jobstats: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l2fwd-jobstats/main.c | 39 + 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c index 485370de6..f522cd905 100644 --- a/examples/l2fwd-jobstats/main.c +++ b/examples/l2fwd-jobstats/main.c @@ -116,14 +116,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -866,6 +863,10 @@ main(int argc, char **argv) /* Initialise each port */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", portid); @@ -875,6 +876,23 @@ main(int argc, char **argv) /* init port */ printf("Initializing port %u... ", portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", @@ -891,19 +909,24 @@ main(int argc, char **argv) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, l2fwd_pktmbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", ret, portid); /* init one TX queue on each port */ + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; fflush(stdout); ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", -- 2.12.0
[dpdk-dev] [PATCH 05/39] examples/l3fwd: move to ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l3fwd/main.c | 41 +++-- 1 file changed, 31 insertions(+), 10 deletions(-) diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c index 6229568f2..cb1606c64 100644 --- a/examples/l3fwd/main.c +++ b/examples/l3fwd/main.c @@ -149,11 +149,9 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | +DEV_RX_OFFLOAD_CHECKSUM), }, .rx_adv_conf = { .rss_conf = { @@ -612,7 +610,8 @@ parse_args(int argc, char **argv) }; printf("%s\n", str8); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; /* * if no max-pkt-len set, use the default @@ -908,6 +907,24 @@ main(int argc, char **argv) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue ); + + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) @@ -955,10 +972,9 @@ main(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -984,6 +1000,8 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; @@ -996,9 +1014,12 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, socketid, - NULL, + &rxq_conf, pktmbuf_pool[socketid]); if (ret < 0)
[dpdk-dev] [PATCH 04/39] examples/l2fwd-keepalive: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l2fwd-keepalive/main.c | 39 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c index 358ca5ec7..798abf792 100644 --- a/examples/l2fwd-keepalive/main.c +++ b/examples/l2fwd-keepalive/main.c @@ -107,14 +107,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -658,6 +655,10 @@ main(int argc, char **argv) /* Initialise each port */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", portid); @@ -667,6 +668,23 @@ main(int argc, char **argv) /* init port */ printf("Initializing port %u... ", portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -684,9 +702,11 @@ main(int argc, char **argv) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, l2fwd_pktmbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -695,9 +715,12 @@ main(int argc, char **argv) /* init one TX queue on each port */ fflush(stdout); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", -- 2.12.0
[dpdk-dev] [PATCH 01/39] examples/l2fwd: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l2fwd/main.c | 38 ++ 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c index e89e2e1bf..a1e378be6 100644 --- a/examples/l2fwd/main.c +++ b/examples/l2fwd/main.c @@ -110,14 +110,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -649,6 +646,9 @@ main(int argc, char **argv) /* Initialise each port */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", portid); @@ -658,6 +658,23 @@ main(int argc, char **argv) /* init port */ printf("Initializing port %u... ", portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", @@ -674,9 +691,11 @@ main(int argc, char **argv) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, l2fwd_pktmbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", @@ -684,9 +703,12 @@ main(int argc, char **argv) /* init one TX queue on each port */ fflush(stdout); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, portid); -- 2.12.0
[dpdk-dev] [PATCH 08/39] examples/l3fwd-vf: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l3fwd-vf/main.c | 38 +- 1 file changed, 29 insertions(+), 9 deletions(-) diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c index 6ef89fc8c..060c102a9 100644 --- a/examples/l3fwd-vf/main.c +++ b/examples/l3fwd-vf/main.c @@ -190,11 +190,9 @@ static struct rte_eth_conf port_conf = { .mq_mode= ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | +DEV_RX_OFFLOAD_CHECKSUM), }, .rx_adv_conf = { .rss_conf = { @@ -1006,6 +1004,24 @@ main(int argc, char **argv) printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)1 ); + + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, n_tx_queue, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%d\n", @@ -1032,10 +1048,9 @@ main(int argc, char **argv) printf("txq=%d,%d,%d ", portid, 0, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, socketid, txconf); if (ret < 0) @@ -1046,6 +1061,8 @@ main(int argc, char **argv) } for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) { + struct rte_eth_rxconf rxq_conf; + if (rte_lcore_is_enabled(lcore_id) == 0) continue; qconf = &lcore_conf[lcore_id]; @@ -1066,8 +1083,11 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, NULL, + socketid, &rxq_conf, pktmbuf_pool[socketid]); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d," -- 2.12.0
[dpdk-dev] [PATCH 09/39] examples/bond: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/bond/main.c | 68 --- 1 file changed, 58 insertions(+), 10 deletions(-) diff --git a/examples/bond/main.c b/examples/bond/main.c index 8e3b1f340..306447e6b 100644 --- a/examples/bond/main.c +++ b/examples/bond/main.c @@ -151,11 +151,8 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_RX_NONE, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .rx_adv_conf = { .rss_conf = { @@ -174,10 +171,30 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool) int retval; uint16_t nb_rxd = RTE_RX_DESC_DEFAULT; uint16_t nb_txd = RTE_TX_DESC_DEFAULT; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; if (portid >= rte_eth_dev_count()) rte_exit(EXIT_FAILURE, "Invalid port\n"); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != +port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } retval = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (retval != 0) rte_exit(EXIT_FAILURE, "port %u: configuration failed (res=%d)\n", @@ -189,16 +206,22 @@ slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool) "failed (res=%d)\n", portid, retval); /* RX setup */ + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; retval = rte_eth_rx_queue_setup(portid, 0, nb_rxd, - rte_eth_dev_socket_id(portid), NULL, + rte_eth_dev_socket_id(portid), + &rxq_conf, mbuf_pool); if (retval < 0) rte_exit(retval, " port %u: RX queue 0 setup failed (res=%d)", portid, retval); /* TX setup */ + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; retval = rte_eth_tx_queue_setup(portid, 0, nb_txd, - rte_eth_dev_socket_id(portid), NULL); + rte_eth_dev_socket_id(portid), &txq_conf); if (retval < 0) rte_exit(retval, "port %u: TX queue 0 setup failed (res=%d)", @@ -225,6 +248,9 @@ bond_port_init(struct rte_mempool *mbuf_pool) uint8_t i; uint16_t nb_rxd = RTE_RX_DESC_DEFAULT; uint16_t nb_txd = RTE_TX_DESC_DEFAULT; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; retval = rte_eth_bond_create("bond0", BONDING_MODE_ALB, 0 /*SOCKET_ID_ANY*/); @@ -234,6 +260,23 @@ bond_port_init(struct rte_mempool *mbuf_pool) BOND_PORT = retval; + rte_eth_dev_info_get(BOND_PORT, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + BOND_PORT, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_ca
[dpdk-dev] [PATCH 07/39] examples/l3fwd-power: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l3fwd-power/main.c | 42 ++-- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c index 0a4ed145c..89c773d3e 100644 --- a/examples/l3fwd-power/main.c +++ b/examples/l3fwd-power/main.c @@ -215,11 +215,9 @@ static struct rte_eth_conf port_conf = { .mq_mode= ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | +DEV_RX_OFFLOAD_CHECKSUM), }, .rx_adv_conf = { .rss_conf = { @@ -1317,7 +1315,10 @@ parse_args(int argc, char **argv) 0, 0}; printf("jumbo frame is enabled \n"); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; /** * if no max-pkt-len set, use the default value @@ -1718,6 +1719,23 @@ main(int argc, char **argv) /* If number of Rx queue is 0, no need to enable Rx interrupt */ if (nb_rx_queue == 0) port_conf.intr_conf.rxq = 0; + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &port_conf); /* Revert to original value */ @@ -1776,10 +1794,9 @@ main(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -1819,6 +1836,8 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for(queue = 0; queue < qconf->n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; @@ -1831,8 +1850,11 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
[dpdk-dev] [PATCH 06/39] examples/l3fwd-acl: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/l3fwd-acl/main.c | 41 - 1 file changed, 32 insertions(+), 9 deletions(-) diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c index e50b1a1a8..977364f17 100644 --- a/examples/l3fwd-acl/main.c +++ b/examples/l3fwd-acl/main.c @@ -156,11 +156,9 @@ static struct rte_eth_conf port_conf = { .mq_mode= ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | +DEV_RX_OFFLOAD_CHECKSUM), }, .rx_adv_conf = { .rss_conf = { @@ -1727,7 +1725,10 @@ parse_args(int argc, char **argv) }; printf("jumbo frame is enabled\n"); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; /* * if no max-pkt-len set, then use the @@ -1948,6 +1949,23 @@ main(int argc, char **argv) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) @@ -2004,8 +2022,8 @@ main(int argc, char **argv) rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -2031,6 +2049,8 @@ main(int argc, char **argv) fflush(stdout); /* init RX queues */ for (queue = 0; queue < qconf->n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + portid = qconf->rx_queue_list[queue].port_id; queueid = qconf->rx_queue_list[queue].queue_id; @@ -2043,8 +2063,11 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, - socketid, NULL, + socketid, &rxq_conf, pktmbuf_pool[socketid]);
[dpdk-dev] [PATCH v1 1/2] net/mlx5: load libmlx5 and libibverbs in run-time
MLX5 PMD loads libraries: libibverbs and libmlx5. MLX5 PMD is not linked to external libraries. Signed-off-by: Shachar Beiser --- config/common_base | 1 + drivers/net/mlx5/Makefile| 27 +- drivers/net/mlx5/lib/mlx5_dll.c | 758 +++ drivers/net/mlx5/lib/mlx5_dll.h | 97 + drivers/net/mlx5/mlx5.c | 17 +- drivers/net/mlx5/mlx5.h | 4 + drivers/net/mlx5/mlx5_flow.c | 4 + drivers/net/mlx5/mlx5_mac.c | 4 + drivers/net/mlx5/mlx5_mr.c | 4 + drivers/net/mlx5/mlx5_rss.c | 4 + drivers/net/mlx5/mlx5_rxmode.c | 4 + drivers/net/mlx5/mlx5_rxq.c | 4 + drivers/net/mlx5/mlx5_rxtx.c | 4 + drivers/net/mlx5/mlx5_rxtx.h | 6 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 4 + drivers/net/mlx5/mlx5_txq.c | 4 + mk/rte.app.mk| 8 +- 17 files changed, 941 insertions(+), 13 deletions(-) create mode 100644 drivers/net/mlx5/lib/mlx5_dll.c create mode 100644 drivers/net/mlx5/lib/mlx5_dll.h diff --git a/config/common_base b/config/common_base index e74febe..3708de4 100644 --- a/config/common_base +++ b/config/common_base @@ -237,6 +237,7 @@ CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 # Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD # CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DLL=y CONFIG_RTE_LIBRTE_MLX5_DEBUG=n CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index a3984eb..1dc0a05 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -53,7 +53,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/mlx5_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -64,7 +66,11 @@ CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) CFLAGS += -Wno-strict-prototypes +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx5 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -91,7 +97,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +CFLAGS += -DMLX5_PMD_DLL +else +CFLAGS += -UMLX5_PMD_DLL +endif include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx5_autoconf.h. @@ -105,26 +115,28 @@ endif mlx5_autoconf.h.new: FORCE +VERBS_H := infiniband/verbs.h +MLX5DV_H := infiniband/mlx5dv.h mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_VXLAN_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_DEVICE_VXLAN_SUPPORT \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ @@ -144,10 +156,9 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_FLOW_SPEC_ACTION_COUNT \ $(AUTOCONF_OUTPUT) - # Create mlx5_autoconf.h or update it in case it differs from the new one. mlx5_autoconf.h: mlx5_autoconf.h.new diff --git a/drivers/net/mlx5/lib/mlx5_dll.c b/drivers/net/mlx5/lib/mlx5_dll.c new file mode 100644 index 000..2e679ea --- /dev/null +++ b/drivers/net/mlx5/lib/mlx5_dll.c @@ -0,0 +1,758 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyrigh
[dpdk-dev] [PATCH v1 2/2] net/mlx4: load libmlx4 and libibverbs in run-time
MLX4 PMD dynamically loads mlx4 and ibverbs in run-time and it is not linked to external libraries. Signed-off-by: Shachar Beiser --- config/common_base | 1 + drivers/net/mlx4/Makefile | 14 +- drivers/net/mlx4/lib/mlx4_dll.c | 731 drivers/net/mlx4/lib/mlx4_dll.h | 94 ++ drivers/net/mlx4/mlx4.c | 12 + drivers/net/mlx4/mlx4.h | 4 + drivers/net/mlx4/mlx4_ethdev.c | 4 + drivers/net/mlx4/mlx4_flow.c| 4 + drivers/net/mlx4/mlx4_mr.c | 4 + drivers/net/mlx4/mlx4_prm.h | 4 + drivers/net/mlx4/mlx4_rxq.c | 4 + drivers/net/mlx4/mlx4_rxtx.c| 4 + drivers/net/mlx4/mlx4_rxtx.h| 4 + drivers/net/mlx4/mlx4_txq.c | 4 + mk/rte.app.mk | 7 + 15 files changed, 893 insertions(+), 2 deletions(-) create mode 100644 drivers/net/mlx4/lib/mlx4_dll.c create mode 100644 drivers/net/mlx4/lib/mlx4_dll.h diff --git a/config/common_base b/config/common_base index 3708de4..8ef6be4 100644 --- a/config/common_base +++ b/config/common_base @@ -229,6 +229,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y # Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD # CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DLL=y CONFIG_RTE_LIBRTE_MLX4_DEBUG=n CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index f1f47c2..aba1d5f 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -44,7 +44,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/mlx4_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -54,7 +56,11 @@ CFLAGS += -D_BSD_SOURCE CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx4 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -81,7 +87,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +CFLAGS += -DMLX4_PMD_DLL +else +CFLAGS += -UMLX4_PMD_DLL +endif ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y) CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS endif diff --git a/drivers/net/mlx4/lib/mlx4_dll.c b/drivers/net/mlx4/lib/mlx4_dll.c new file mode 100644 index 000..6469869 --- /dev/null +++ b/drivers/net/mlx4/lib/mlx4_dll.c @@ -0,0 +1,731 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif +#include +#include +#include +#include +#include "../mlx4_utils.h" +#include "mlx4_dll.h" + +#define VERBS_LIB_DIR "/usr/lib64/libibverbs" +#define MLX5_LIB_DIR "/usr/lib64/libmlx4" +#define DIR_LENGTH 25 +/** + * Load a libibverbs and libmlx4 symbols table. + * + * @return + * 0 on success. + */ +int mlx4_load_libs(void) +{ +void *dlhandle; +int ret; + + dlhandle = mlx4_lib_load("ibverbs.so"); + i
[dpdk-dev] [PATCH 10/39] examples/exception_path: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/exception_path/main.c | 38 + 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c index f8f5bbdf0..e5f69a4f6 100644 --- a/examples/exception_path/main.c +++ b/examples/exception_path/main.c @@ -107,13 +107,10 @@ */ /* Options for configuring ethernet port */ -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { - .header_split = 0, /* Header Split disabled */ - .hw_ip_checksum = 0,/* IP checksum offload disabled */ - .hw_vlan_filter = 0,/* VLAN filtering disabled */ - .jumbo_frame = 0, /* Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /* CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -447,10 +444,30 @@ init_port(uint16_t port) int ret; uint16_t nb_rxd = NB_RXD; uint16_t nb_txd = NB_TXD; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; /* Initialise device and RX/TX queues */ PRINT_INFO("Initialising port %u ...", port); fflush(stdout); + rte_eth_dev_info_get(port, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(port, 1, 1, &port_conf); if (ret < 0) FATAL_ERROR("Could not configure port%u (%d)", port, ret); @@ -460,17 +477,22 @@ init_port(uint16_t port) FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)", port, ret); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(port, 0, nb_rxd, rte_eth_dev_socket_id(port), - NULL, + &rxq_conf, pktmbuf_pool); if (ret < 0) FATAL_ERROR("Could not setup up RX queue for port%u (%d)", port, ret); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(port, 0, nb_txd, rte_eth_dev_socket_id(port), - NULL); + &txq_conf); if (ret < 0) FATAL_ERROR("Could not setup up TX queue for port%u (%d)", port, ret); -- 2.12.0
[dpdk-dev] [PATCH 11/39] examples/kni: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/kni/main.c | 67 +--- 1 file changed, 58 insertions(+), 9 deletions(-) diff --git a/examples/kni/main.c b/examples/kni/main.c index 3f1738544..ae5a8c61c 100644 --- a/examples/kni/main.c +++ b/examples/kni/main.c @@ -124,11 +124,8 @@ static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS]; /* Options for configuring ethernet port */ static struct rte_eth_conf port_conf = { .rxmode = { - .header_split = 0, /* Header Split disabled */ - .hw_ip_checksum = 0,/* IP checksum offload disabled */ - .hw_vlan_filter = 0,/* VLAN filtering disabled */ - .jumbo_frame = 0, /* Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /* CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -607,10 +604,30 @@ init_port(uint16_t port) int ret; uint16_t nb_rxd = NB_RXD; uint16_t nb_txd = NB_TXD; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; /* Initialise device and RX/TX queues */ RTE_LOG(INFO, APP, "Initialising port %u ...\n", (unsigned)port); fflush(stdout); + rte_eth_dev_info_get(port, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != +port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(port, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)\n", @@ -621,14 +638,19 @@ init_port(uint16_t port) rte_exit(EXIT_FAILURE, "Could not adjust number of descriptors " "for port%u (%d)\n", (unsigned)port, ret); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(port, 0, nb_rxd, - rte_eth_dev_socket_id(port), NULL, pktmbuf_pool); + rte_eth_dev_socket_id(port), &rxq_conf, pktmbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "Could not setup up RX queue for " "port%u (%d)\n", (unsigned)port, ret); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(port, 0, nb_txd, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Could not setup up TX queue for " "port%u (%d)\n", (unsigned)port, ret); @@ -702,7 +724,10 @@ static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu) { int ret; + uint16_t nb_rxd = NB_RXD; struct rte_eth_conf conf; + struct rte_eth_dev_info dev_info; + struct rte_eth_rxconf rxq_conf; if (port_id >= rte_eth_dev_count()) { RTE_LOG(ERR, APP, "Invalid port id %d\n", port_id); @@ -717,19 +742,43 @@ kni_change_mtu(uint16_t port_id, unsigned int new_mtu) memcpy(&conf, &port_conf, sizeof(conf)); /* Set new MTU */ if (new_mtu > ETHER_MAX_LEN) - conf.rxmode.jumbo_frame = 1; + conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else - conf.rxmode.jumbo_frame = 0; + conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; /* mtu + length of header + length of FCS = max pkt length */ conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE + KNI_ENET_FCS_SIZE; + rte_eth_dev_info_get(port_id, &dev_info); + if ((dev_info.rx_offload_capa & conf.rxmode.offloads) != +
[dpdk-dev] [PATCH 12/39] examples/ip_fragmentation: convert to new offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ip_fragmentation/main.c | 36 --- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c index 5aefe0987..ab728989f 100644 --- a/examples/ip_fragmentation/main.c +++ b/examples/ip_fragmentation/main.c @@ -169,14 +169,15 @@ static struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 1, /**< Jumbo Frame Support enabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | +DEV_RX_OFFLOAD_JUMBO_FRAME | +DEV_RX_OFFLOAD_CRC_STRIP), }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_MULTI_SEGS), }, }; @@ -905,6 +906,8 @@ main(int argc, char **argv) /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %d\n", portid); @@ -949,6 +952,22 @@ main(int argc, char **argv) n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) { @@ -967,8 +986,10 @@ main(int argc, char **argv) } /* init one RX queue */ + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, -socket, NULL, +socket, &rxq_conf, socket_direct_pool[socket]); if (ret < 0) { printf("\n"); @@ -992,7 +1013,8 @@ main(int argc, char **argv) fflush(stdout); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socket, txconf); if (ret < 0) { -- 2.12.0
[dpdk-dev] [PATCH 13/39] examples/ip_pipeline: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ip_pipeline/config_parse.c | 13 +++-- examples/ip_pipeline/init.c | 30 ++ 2 files changed, 29 insertions(+), 14 deletions(-) diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c index 3211c6ab7..835541229 100644 --- a/examples/ip_pipeline/config_parse.c +++ b/examples/ip_pipeline/config_parse.c @@ -97,14 +97,8 @@ static const struct app_link_params link_params_default = { .rxmode = { .mq_mode = ETH_MQ_RX_NONE, - .header_split = 0, /* Header split */ - .hw_ip_checksum = 0, /* IP checksum offload */ - .hw_vlan_filter = 0, /* VLAN filtering */ - .hw_vlan_strip = 0, /* VLAN strip */ - .hw_vlan_extend = 0, /* Extended VLAN */ - .jumbo_frame= 0, /* Jumbo frame support */ - .hw_strip_crc = 1, /* CRC strip by HW */ - .enable_scatter = 0, /* Scattered packets RX handler */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */ .split_hdr_size = 0, /* Header split buffer size */ @@ -158,8 +152,7 @@ static const struct app_pktq_hwq_out_params default_hwq_out_params = { }, .tx_rs_thresh = 0, .tx_free_thresh = 0, - .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS, + .txq_flags = ETH_TXQ_FLAGS_IGNORE, .tx_deferred_start = 0, } }; diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c index e56e40482..79a2fbb63 100644 --- a/examples/ip_pipeline/init.c +++ b/examples/ip_pipeline/init.c @@ -876,10 +876,10 @@ app_init_link_frag_ras(struct app_params *app) uint32_t i; if (is_any_swq_frag_or_ras(app)) { - for (i = 0; i < app->n_pktq_hwq_out; i++) { - struct app_pktq_hwq_out_params *p_txq = &app->hwq_out_params[i]; - - p_txq->conf.txq_flags &= ~ETH_TXQ_FLAGS_NOMULTSEGS; + for (i = 0; i < app->n_links; i++) { + struct app_link_params *p_link = &app->link_params[i]; + p_link->conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; } } } @@ -962,6 +962,7 @@ app_init_link(struct app_params *app) for (i = 0; i < app->n_links; i++) { struct app_link_params *p_link = &app->link_params[i]; + struct rte_eth_dev_info dev_info; uint32_t link_id, n_hwq_in, n_hwq_out, j; int status; @@ -978,6 +979,25 @@ app_init_link(struct app_params *app) n_hwq_out); /* LINK */ + rte_eth_dev_info_get(p_link->pmd_id, &dev_info); + if ((dev_info.rx_offload_capa & p_link->conf.rxmode.offloads) != + p_link->conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + p_link->pmd_id, p_link->conf.rxmode.offloads, + dev_info.rx_offload_capa); + p_link->conf.rxmode.offloads &= + dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & p_link->conf.txmode.offloads) != + p_link->conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + p_link->pmd_id, p_link->conf.txmode.offloads, + dev_info.tx_offload_capa); + p_link->conf.txmode.offloads &= + dev_info.tx_offload_capa; + } status = rte_eth_dev_configure( p_link->pmd_id, n_hwq_in, @@ -1019,6 +1039,7 @@ app_init_link(struct app_params *app) p_rxq->name, status); + p_rxq->conf.offloads = p_link->conf.rxmode.offloads; status = rte_eth_rx_queue_setup( p_link->pmd_id, rxq_queue_id, @@ -1060,6 +108
[dpdk-dev] [PATCH 15/39] examples/ipsec-secgw: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ipsec-secgw/ipsec-secgw.c | 27 +-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index c98454a90..6e538a1ab 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -217,6 +217,8 @@ static struct rte_eth_conf port_conf = { }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_MULTI_SEGS), }, }; @@ -1394,6 +1396,22 @@ port_init(uint16_t portid) if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY) port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY; + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != +port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, &port_conf); if (ret < 0) @@ -1420,7 +1438,8 @@ port_init(uint16_t portid) printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd, socket_id, txconf); @@ -1434,6 +1453,8 @@ port_init(uint16_t portid) /* init RX queues */ for (queue = 0; queue < qconf->nb_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + if (portid != qconf->rx_queue_list[queue].port_id) continue; @@ -1442,8 +1463,10 @@ port_init(uint16_t portid) printf("Setup rxq=%d,%d,%d\n", portid, rx_queueid, socket_id); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, rx_queueid, - nb_rxd, socket_id, NULL, + nb_rxd, socket_id, &rxq_conf, socket_ctx[socket_id].mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, -- 2.12.0
[dpdk-dev] [PATCH 14/39] examples/ip_reassembly: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ip_reassembly/main.c | 36 +--- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c index 756f90efb..d899e4c37 100644 --- a/examples/ip_reassembly/main.c +++ b/examples/ip_reassembly/main.c @@ -193,11 +193,10 @@ static struct rte_eth_conf port_conf = { .mq_mode= ETH_MQ_RX_RSS, .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 1, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | +DEV_RX_OFFLOAD_JUMBO_FRAME | +DEV_RX_OFFLOAD_CRC_STRIP), }, .rx_adv_conf = { .rss_conf = { @@ -207,6 +206,8 @@ static struct rte_eth_conf port_conf = { }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_MULTI_SEGS), }, }; @@ -1052,6 +1053,8 @@ main(int argc, char **argv) /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("\nSkipping disabled port %d\n", portid); @@ -1104,6 +1107,22 @@ main(int argc, char **argv) n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) { @@ -1114,8 +1133,10 @@ main(int argc, char **argv) } /* init one RX queue */ + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, -socket, NULL, +socket, &rxq_conf, rxq->pool); if (ret < 0) { printf("\n"); @@ -1140,7 +1161,8 @@ main(int argc, char **argv) fflush(stdout); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socket, txconf); -- 2.12.0
[dpdk-dev] [PATCH 16/39] examples/ipv4_multicast: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ipv4_multicast/main.c | 35 --- 1 file changed, 28 insertions(+), 7 deletions(-) diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c index 1c5851654..1b8ad3e4e 100644 --- a/examples/ipv4_multicast/main.c +++ b/examples/ipv4_multicast/main.c @@ -138,14 +138,13 @@ static struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 1, /**< Jumbo Frame Support enabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_JUMBO_FRAME | +DEV_RX_OFFLOAD_CRC_STRIP), }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = DEV_TX_OFFLOAD_MULTI_SEGS, }, }; @@ -714,6 +713,8 @@ main(int argc, char **argv) /* initialize all ports */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + /* skip ports that are not enabled */ if ((enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %d\n", portid); @@ -748,6 +749,23 @@ main(int argc, char **argv) n_tx_queue = nb_lcores; if (n_tx_queue > MAX_TX_QUEUE_PER_PORT) n_tx_queue = MAX_TX_QUEUE_PER_PORT; + + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) @@ -769,9 +787,11 @@ main(int argc, char **argv) queueid = 0; printf("rxq=%hu ", queueid); fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, packet_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, port=%d\n", @@ -787,7 +807,8 @@ main(int argc, char **argv) fflush(stdout); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, rte_lcore_to_socket_id(lcore_id), txconf); if (ret < 0) -- 2.12.0
[dpdk-dev] [PATCH 17/39] examples/link_status_interrupt: convert to new offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/link_status_interrupt/main.c | 38 +++--- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c index bc47dcce3..223c41b3f 100644 --- a/examples/link_status_interrupt/main.c +++ b/examples/link_status_interrupt/main.c @@ -105,14 +105,11 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE]; struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -633,6 +630,9 @@ main(int argc, char **argv) /* Initialise each port */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + /* skip ports that are not enabled */ if ((lsi_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", (unsigned) portid); @@ -641,6 +641,23 @@ main(int argc, char **argv) /* init port */ printf("Initializing port %u... ", (unsigned) portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", @@ -666,9 +683,11 @@ main(int argc, char **argv) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, lsi_pktmbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup: err=%d, port=%u\n", @@ -676,9 +695,12 @@ main(int argc, char **argv) /* init one TX queue logical core on each port */ fflush(stdout); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d,port=%u\n", ret, (unsigned) portid); -- 2.12.0
[dpdk-dev] [PATCH 19/39] examples/multi_process: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/multi_process/l2fwd_fork/main.c | 36 - examples/multi_process/symmetric_mp/main.c | 35 +++- 2 files changed, 57 insertions(+), 14 deletions(-) diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c index deace2739..06c566d65 100644 --- a/examples/multi_process/l2fwd_fork/main.c +++ b/examples/multi_process/l2fwd_fork/main.c @@ -156,11 +156,8 @@ struct cpu_aff_arg{ static const struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -1064,6 +1061,9 @@ main(int argc, char **argv) /* Initialise each port */ for (portid = 0; portid < nb_ports; portid++) { + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + /* skip ports that are not enabled */ if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) { printf("Skipping disabled port %u\n", (unsigned) portid); @@ -1073,6 +1073,23 @@ main(int argc, char **argv) /* init port */ printf("Initializing port %u... ", (unsigned) portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != +port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != +port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n", @@ -1089,9 +1106,11 @@ main(int argc, char **argv) /* init one RX queue */ fflush(stdout); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd, rte_eth_dev_socket_id(portid), -NULL, +&rxq_conf, l2fwd_pktmbuf_pool[portid]); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n", @@ -1099,9 +1118,12 @@ main(int argc, char **argv) /* init one TX queue on each port */ fflush(stdout); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.tx_offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, nb_txd, rte_eth_dev_socket_id(portid), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n", ret, (unsigned) portid); diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c index 6fb285c74..e722173a5 100644 --- a/examples/multi_process/symmetric_mp/main.c +++ b/examples/multi_process/symmetric_mp/main.c @@ -207,11 +207,9 @@ smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool, .rxmode = { .mq_mode
[dpdk-dev] [PATCH 18/39] examples/load_balancer: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/load_balancer/init.c | 37 ++--- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c index 3dab7f258..532a8b765 100644 --- a/examples/load_balancer/init.c +++ b/examples/load_balancer/init.c @@ -74,11 +74,9 @@ static struct rte_eth_conf port_conf = { .rxmode = { .mq_mode= ETH_MQ_RX_RSS, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | +DEV_RX_OFFLOAD_CRC_STRIP), }, .rx_adv_conf = { .rss_conf = { @@ -430,6 +428,9 @@ app_init_nics(void) struct rte_mempool *pool; uint16_t nic_rx_ring_size; uint16_t nic_tx_ring_size; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; n_rx_queues = app_get_nic_rx_queues_per_port(port); n_tx_queues = app.nic_tx_port_mask[port]; @@ -440,6 +441,23 @@ app_init_nics(void) /* Init port */ printf("Initializing NIC port %u ...\n", port); + rte_eth_dev_info_get(port, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure( port, (uint8_t) n_rx_queues, @@ -461,6 +479,8 @@ app_init_nics(void) app.nic_rx_ring_size = nic_rx_ring_size; app.nic_tx_ring_size = nic_tx_ring_size; + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; /* Init RX queues */ for (queue = 0; queue < APP_MAX_RX_QUEUES_PER_NIC_PORT; queue ++) { if (app.nic_rx_queue_mask[port][queue] == 0) { @@ -478,7 +498,7 @@ app_init_nics(void) queue, (uint16_t) app.nic_rx_ring_size, socket, - NULL, + &rxq_conf, pool); if (ret < 0) { rte_panic("Cannot init RX queue %u for port %u (%d)\n", @@ -486,6 +506,9 @@ app_init_nics(void) } } + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; /* Init TX queues */ if (app.nic_tx_port_mask[port] == 1) { app_get_lcore_for_nic_tx(port, &lcore); @@ -497,7 +520,7 @@ app_init_nics(void) 0, (uint16_t) app.nic_tx_ring_size, socket, - NULL); + &txq_conf); if (ret < 0) { rte_panic("Cannot init TX queue 0 for port %d (%d)\n", port, -- 2.12.0
[dpdk-dev] [PATCH 23/39] examples/qos_sched: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/qos_sched/init.c | 31 --- 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c index 038f0427d..803ce54d7 100644 --- a/examples/qos_sched/init.c +++ b/examples/qos_sched/init.c @@ -84,15 +84,12 @@ const char *cfg_profile = NULL; int mp_size = NB_MBUF; struct flow_conf qos_conf[MAX_DATA_STREAMS]; -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_DCB_NONE, @@ -104,6 +101,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) { int ret; struct rte_eth_link link; + struct rte_eth_dev_info dev_info; struct rte_eth_rxconf rx_conf; struct rte_eth_txconf tx_conf; uint16_t rx_size; @@ -125,12 +123,29 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) tx_conf.tx_thresh.wthresh = tx_thresh.wthresh; tx_conf.tx_free_thresh = 0; tx_conf.tx_rs_thresh = 0; - tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS; tx_conf.tx_deferred_start = 0; + tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; /* init port */ RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -149,6 +164,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) /* init one RX queue */ fflush(stdout); + rx_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size, rte_eth_dev_socket_id(portid), &rx_conf, mp); if (ret < 0) @@ -158,6 +174,7 @@ app_init_port(uint16_t portid, struct rte_mempool *mp) /* init one TX queue */ fflush(stdout); + tx_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, 0, (uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf); if (ret < 0) -- 2.12.0
[dpdk-dev] [PATCH 22/39] examples/qos_meter: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/qos_meter/main.c | 64 +- 1 file changed, 57 insertions(+), 7 deletions(-) diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c index 67b4a75b9..1fac4cf1e 100644 --- a/examples/qos_meter/main.c +++ b/examples/qos_meter/main.c @@ -85,11 +85,9 @@ static struct rte_eth_conf port_conf = { .mq_mode= ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, - .hw_ip_checksum = 1, - .hw_vlan_filter = 0, - .jumbo_frame= 0, - .hw_strip_crc = 1, + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | +DEV_RX_OFFLOAD_CRC_STRIP), }, .rx_adv_conf = { .rss_conf = { @@ -310,6 +308,10 @@ main(int argc, char **argv) uint32_t lcore_id; uint16_t nb_rxd = NIC_RX_QUEUE_DESC; uint16_t nb_txd = NIC_TX_QUEUE_DESC; + struct rte_eth_conf conf; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; int ret; /* EAL init */ @@ -335,6 +337,26 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "Buffer pool creation error\n"); /* NIC init */ + rte_eth_dev_info_get(port_rx, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_rx, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_rx, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } + conf = port_conf; + conf.rxmode.offloads &= dev_info.rx_offload_capa; + conf.txmode.offloads &= dev_info.tx_offload_capa; ret = rte_eth_dev_configure(port_rx, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret); @@ -344,18 +366,41 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "Port %d adjust number of descriptors error (%d)\n", port_rx, ret); + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, nb_rxd, rte_eth_dev_socket_id(port_rx), - NULL, pool); + &rxq_conf, pool); if (ret < 0) rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = conf.txmode.offloads; ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd, rte_eth_dev_socket_id(port_rx), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret); + rte_eth_dev_info_get(port_tx, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_tx, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_tx, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + } + conf = port_conf; + conf.rxmode.offloads &= dev_info.rx_offload_capa; + conf.txmode.offloads &= dev_info.tx_offload_capa; ret = rte_eth_dev_configure(port_tx, 1, 1, &port_conf); if (ret < 0) rt
[dpdk-dev] [PATCH 20/39] examples/netmap_compat: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/netmap_compat/bridge/bridge.c | 7 ++ examples/netmap_compat/lib/compat_netmap.c | 29 +++-- 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/examples/netmap_compat/bridge/bridge.c b/examples/netmap_compat/bridge/bridge.c index 2f2b6baaa..33c38d3cc 100644 --- a/examples/netmap_compat/bridge/bridge.c +++ b/examples/netmap_compat/bridge/bridge.c @@ -55,11 +55,8 @@ struct rte_eth_conf eth_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, - .hw_ip_checksum = 0, - .hw_vlan_filter = 0, - .jumbo_frame= 0, - .hw_strip_crc = 1, + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c index 12b3fcbe0..80a4d90eb 100644 --- a/examples/netmap_compat/lib/compat_netmap.c +++ b/examples/netmap_compat/lib/compat_netmap.c @@ -690,6 +690,9 @@ rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf) int32_t ret; uint16_t i; uint16_t rx_slots, tx_slots; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; if (conf == NULL || portid >= RTE_DIM(ports) || @@ -710,6 +713,23 @@ rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf) return -EINVAL; } + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & conf->eth_conf->rxmode.offloads) != + conf->eth_conf->rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, conf->eth_conf->rxmode.offloads, + dev_info.rx_offload_capa); + conf->eth_conf->rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & conf->eth_conf->txmode.offloads) != + conf->eth_conf->txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, conf->eth_conf->txmode.offloads, + dev_info.tx_offload_capa); + conf->eth_conf->txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, conf->nr_rx_rings, conf->nr_tx_rings, conf->eth_conf); @@ -727,9 +747,14 @@ rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf) return ret; } + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = conf->eth_conf->rxmode.offloads; + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = conf->eth_conf->txmode.offloads; for (i = 0; i < conf->nr_tx_rings; i++) { ret = rte_eth_tx_queue_setup(portid, i, tx_slots, - conf->socket_id, NULL); + conf->socket_id, &txq_conf); if (ret < 0) { RTE_LOG(ERR, USER1, @@ -739,7 +764,7 @@ rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf) } ret = rte_eth_rx_queue_setup(portid, i, rx_slots, - conf->socket_id, NULL, conf->pool); + conf->socket_id, &rxq_conf, conf->pool); if (ret < 0) { RTE_LOG(ERR, USER1, -- 2.12.0
[dpdk-dev] [PATCH 21/39] examples/performance-thread: convert to new offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/performance-thread/l3fwd-thread/main.c | 42 +++- 1 file changed, 32 insertions(+), 10 deletions(-) diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c index fa65234f3..49ea80102 100644 --- a/examples/performance-thread/l3fwd-thread/main.c +++ b/examples/performance-thread/l3fwd-thread/main.c @@ -335,11 +335,9 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = (DEV_RX_OFFLOAD_CHECKSUM | +DEV_RX_OFFLOAD_CRC_STRIP), }, .rx_adv_conf = { .rss_conf = { @@ -2999,7 +2997,10 @@ parse_args(int argc, char **argv) 0}; printf("jumbo frame is enabled - disabling simple TX path\n"); - port_conf.rxmode.jumbo_frame = 1; + port_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MULTI_SEGS; /* if no max-pkt-len set, use the default value ETHER_MAX_LEN */ if (0 == getopt_long(argc, argvopt, "", &lenopts, @@ -3567,6 +3568,23 @@ main(int argc, char **argv) n_tx_queue = MAX_TX_QUEUE_PER_PORT; printf("Creating queues: nb_rxq=%d nb_txq=%u... ", nb_rx_queue, (unsigned)n_tx_queue); + rte_eth_dev_info_get(portid, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + portid, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(portid, nb_rx_queue, (uint16_t)n_tx_queue, &port_conf); if (ret < 0) @@ -3612,10 +3630,9 @@ main(int argc, char **argv) printf("txq=%u,%d,%d ", lcore_id, queueid, socketid); fflush(stdout); - rte_eth_dev_info_get(portid, &dev_info); txconf = &dev_info.default_txconf; - if (port_conf.rxmode.jumbo_frame) - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; + txconf->offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd, socketid, txconf); if (ret < 0) @@ -3644,6 +3661,8 @@ main(int argc, char **argv) /* init RX queues */ for (queue = 0; queue < rx_thread[i].n_rx_queue; ++queue) { + struct rte_eth_rxconf rxq_conf; + portid = rx_thread[i].rx_queue_list[queue].port_id; queueid = rx_thread[i].rx_queue_list[queue].queue_id; @@ -3655,9 +3674,12 @@ main(int argc, char **argv) printf("rxq=%d,%d,%d ", portid, queueid, socketid); fflush(stdout); + rte_eth_dev_info_get(portid, &dev_info); + rxq_conf = dev_info.default_rxconf; +
[dpdk-dev] [PATCH 24/39] examples/quota_watermark: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/quota_watermark/qw/init.c | 38 ++--- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c index 37b03626d..c26ef3aad 100644 --- a/examples/quota_watermark/qw/init.c +++ b/examples/quota_watermark/qw/init.c @@ -50,14 +50,11 @@ #include "../include/conf.h" -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP csum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_DCB_NONE, @@ -78,9 +75,29 @@ void configure_eth_port(uint16_t port_id) int ret; uint16_t nb_rxd = RX_DESC_PER_QUEUE; uint16_t nb_txd = TX_DESC_PER_QUEUE; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_txconf txq_conf; + struct rte_eth_dev_info dev_info; rte_eth_dev_stop(port_id); + rte_eth_dev_info_get(port_id, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_id, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_id, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } ret = rte_eth_dev_configure(port_id, 1, 1, &port_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Cannot configure port %u (error %d)\n", @@ -93,9 +110,11 @@ void configure_eth_port(uint16_t port_id) (unsigned int) port_id, ret); /* Initialize the port's RX queue */ + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd, rte_eth_dev_socket_id(port_id), - NULL, + &rxq_conf, mbuf_pool); if (ret < 0) rte_exit(EXIT_FAILURE, @@ -103,9 +122,12 @@ void configure_eth_port(uint16_t port_id) (unsigned int) port_id, ret); /* Initialize the port's TX queue */ + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; + txq_conf.offloads = port_conf.txmode.offloads; ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, rte_eth_dev_socket_id(port_id), - NULL); + &txq_conf); if (ret < 0) rte_exit(EXIT_FAILURE, "Failed to setup TX queue on port %u (error %d)\n", -- 2.12.0
[dpdk-dev] [PATCH 27/39] examples/vmdq: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/vmdq/main.c | 11 +-- 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c index 84e9937d4..aaf156aad 100644 --- a/examples/vmdq/main.c +++ b/examples/vmdq/main.c @@ -94,10 +94,7 @@ static const struct rte_eth_conf vmdq_conf_default = { .rxmode = { .mq_mode= ETH_MQ_RX_VMDQ_ONLY, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ + .ignore_offload_bitfield = 1, }, .txmode = { @@ -188,6 +185,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) { struct rte_eth_dev_info dev_info; struct rte_eth_rxconf *rxconf; + struct rte_eth_txconf *txconf; struct rte_eth_conf port_conf; uint16_t rxRings, txRings; uint16_t rxRingSize = RTE_TEST_RX_DESC_DEFAULT; @@ -260,9 +258,10 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return -1; } - rte_eth_dev_info_get(port, &dev_info); rxconf = &dev_info.default_rxconf; rxconf->rx_drop_en = 1; + txconf = &dev_info.default_txconf; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < rxRings; q++) { retval = rte_eth_rx_queue_setup(port, q, rxRingSize, rte_eth_dev_socket_id(port), @@ -277,7 +276,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) for (q = 0; q < txRings; q++) { retval = rte_eth_tx_queue_setup(port, q, txRingSize, rte_eth_dev_socket_id(port), - NULL); + txconf); if (retval < 0) { printf("initialise tx queue %d failed\n", q); return retval; -- 2.12.0
[dpdk-dev] [PATCH 25/39] examples/tep_termination: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/tep_termination/vxlan_setup.c | 37 +++-- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c index 1ad4ca3cd..0a955dc06 100644 --- a/examples/tep_termination/vxlan_setup.c +++ b/examples/tep_termination/vxlan_setup.c @@ -95,17 +95,22 @@ uint8_t tep_filter_type[] = {RTE_TUNNEL_FILTER_IMAC_TENID, RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,}; /* Options for configuring ethernet port */ -static const struct rte_eth_conf port_conf = { +static struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_UDP_CKSUM | +DEV_TX_OFFLOAD_TCP_CKSUM | +DEV_TX_OFFLOAD_SCTP_CKSUM | +DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | +DEV_TX_OFFLOAD_TCP_TSO | +DEV_TX_OFFLOAD_MULTI_SEGS | +DEV_TX_OFFLOAD_VXLAN_TNL_TSO), }, }; @@ -154,7 +159,7 @@ vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool) rxconf = &dev_info.default_rxconf; txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; if (port >= rte_eth_dev_count()) return -1; @@ -162,6 +167,22 @@ vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool) rx_rings = nb_devices; /* Configure ethernet device. */ + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) return retval; @@ -172,6 +193,7 @@ vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; /* Setup the queues. */ + rxconf->offloads = port_conf.rxmode.offloads; for (q = 0; q < rx_rings; q++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), @@ -180,6 +202,7 @@ vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool) if (retval < 0) return retval; } + txconf->offloads = port_conf.txmode.offloads; for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), -- 2.12.0
[dpdk-dev] [PATCH 26/39] examples/vhost: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/vhost/main.c | 42 +++--- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/examples/vhost/main.c b/examples/vhost/main.c index 89a61f0e5..aeb6f5e5d 100644 --- a/examples/vhost/main.c +++ b/examples/vhost/main.c @@ -145,21 +145,23 @@ static struct rte_eth_conf vmdq_conf_default = { .rxmode = { .mq_mode= ETH_MQ_RX_VMDQ_ONLY, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ + .ignore_offload_bitfield = 1, /* -* It is necessary for 1G NIC such as I350, +* VLAN strip is necessary for 1G NIC such as I350, * this fixes bug of ipv4 forwarding in guest can't * forward pakets from one virtio dev to another virtio dev. */ - .hw_vlan_strip = 1, /**< VLAN strip enabled. */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .offloads = (DEV_RX_OFFLOAD_CRC_STRIP | +DEV_RX_OFFLOAD_VLAN_STRIP), }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, + .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM | +DEV_TX_OFFLOAD_TCP_CKSUM | +DEV_TX_OFFLOAD_VLAN_INSERT | +DEV_TX_OFFLOAD_MULTI_SEGS | +DEV_TX_OFFLOAD_TCP_TSO), }, .rx_adv_conf = { /* @@ -176,6 +178,7 @@ static struct rte_eth_conf vmdq_conf_default = { }, }; + static unsigned lcore_ids[RTE_MAX_LCORE]; static uint16_t ports[RTE_MAX_ETHPORTS]; static unsigned num_ports = 0; /**< The number of ports specified in command line */ @@ -288,9 +291,7 @@ port_init(uint16_t port) rxconf = &dev_info.default_rxconf; txconf = &dev_info.default_txconf; rxconf->rx_drop_en = 1; - - /* Enable vlan offload */ - txconf->txq_flags &= ~ETH_TXQ_FLAGS_NOVLANOFFL; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; /*configure the number of supported virtio devices based on VMDQ limits */ num_devices = dev_info.max_vmdq_pools; @@ -332,6 +333,22 @@ port_init(uint16_t port) rx_rings = (uint16_t)dev_info.max_rx_queues; /* Configure ethernet device. */ + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != + port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } + if ((dev_info.tx_offload_capa & port_conf.txmode.offloads) != + port_conf.txmode.offloads) { + printf("Some Tx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port, port_conf.txmode.offloads, + dev_info.tx_offload_capa); + port_conf.txmode.offloads &= dev_info.tx_offload_capa; + } retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); if (retval != 0) { RTE_LOG(ERR, VHOST_PORT, "Failed to configure port %u: %s.\n", @@ -353,6 +370,7 @@ port_init(uint16_t port) } /* Setup the queues. */ + rxconf->offloads = port_conf.rxmode.offloads; for (q = 0; q < rx_rings; q ++) { retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, rte_eth_dev_socket_id(port), @@ -365,6 +383,7 @@ port_init(uint16_t port) return retval; } } + txconf->offloads = port_conf.txmode.offloads; for (q = 0; q < tx_rings; q ++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, rte_eth_dev_socket_id(port), @@ -624,7 +643,8 @@ us_vhost_parse_args(int argc, char **argv) } else { mergeable = !!ret; if (ret) { - vmdq_conf_default.rxmode.jumbo_frame = 1; + vmdq_conf_default.rxmode.offloads |= +
[dpdk-dev] [PATCH 29/39] examples/vm_power_manager: convert to new offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/vm_power_manager/main.c | 12 ++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c index 399fbdd43..53d587d83 100644 --- a/examples/vm_power_manager/main.c +++ b/examples/vm_power_manager/main.c @@ -74,7 +74,10 @@ static volatile bool force_quit; // static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } + .rxmode = { + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, + } }; static inline int @@ -84,6 +87,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txq_conf; if (port >= rte_eth_dev_count()) return -1; @@ -101,10 +106,13 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txq_conf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 28/39] examples/vmdq_dcb: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/vmdq_dcb/main.c | 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c index 9dad2b8ec..aff04cb0d 100644 --- a/examples/vmdq_dcb/main.c +++ b/examples/vmdq_dcb/main.c @@ -100,10 +100,7 @@ static const struct rte_eth_conf vmdq_dcb_conf_default = { .rxmode = { .mq_mode= ETH_MQ_RX_VMDQ_DCB, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame= 0, /**< Jumbo Frame Support disabled */ + .ignore_offload_bitfield = 1, }, .txmode = { .mq_mode = ETH_MQ_TX_VMDQ_DCB, @@ -228,6 +225,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) uint16_t q; uint16_t queues_per_pool; uint32_t max_nb_pools; + struct rte_eth_txconf txq_conf; /* * The max pool number from dev_info will be used to validate the pool @@ -316,10 +314,12 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) } } + txq_conf = dev_info.default_txconf; + txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < num_queues; q++) { retval = rte_eth_tx_queue_setup(port, q, txRingSize, rte_eth_dev_socket_id(port), - NULL); + &txq_conf); if (retval < 0) { printf("initialize tx queue %d failed\n", q); return retval; -- 2.12.0
[dpdk-dev] [PATCH 30/39] examples/distributor: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/distributor/main.c | 8 +++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/examples/distributor/main.c b/examples/distributor/main.c index 61e6e6b9e..9a004476d 100644 --- a/examples/distributor/main.c +++ b/examples/distributor/main.c @@ -108,6 +108,7 @@ static const struct rte_eth_conf port_conf_default = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, }, .txmode = { .mq_mode = ETH_MQ_TX_NONE, @@ -140,6 +141,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) uint16_t q; uint16_t nb_rxd = RX_RING_SIZE; uint16_t nb_txd = TX_RING_SIZE; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port >= rte_eth_dev_count()) return -1; @@ -160,10 +163,13 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < txRings; q++) { retval = rte_eth_tx_queue_setup(port, q, nb_txd, rte_eth_dev_socket_id(port), - NULL); + &txconf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 32/39] examples/eventdev_pipeline: convert to new offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/eventdev_pipeline_sw_pmd/main.c | 10 -- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c index 5f431d87d..e2c746902 100644 --- a/examples/eventdev_pipeline_sw_pmd/main.c +++ b/examples/eventdev_pipeline_sw_pmd/main.c @@ -563,7 +563,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) static const struct rte_eth_conf port_conf_default = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, }, .rx_adv_conf = { .rss_conf = { @@ -578,6 +579,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) struct rte_eth_conf port_conf = port_conf_default; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port >= rte_eth_dev_count()) return -1; @@ -595,10 +598,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txconf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 31/39] examples/ethtool: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ethtool/ethtool-app/main.c | 7 ++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/ethtool/ethtool-app/main.c b/examples/ethtool/ethtool-app/main.c index bbab2f6e6..8a1a5f34e 100644 --- a/examples/ethtool/ethtool-app/main.c +++ b/examples/ethtool/ethtool-app/main.c @@ -124,9 +124,11 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports) char str_name[16]; uint16_t nb_rxd = PORT_RX_QUEUE_SIZE; uint16_t nb_txd = PORT_TX_QUEUE_SIZE; + struct rte_eth_txconf txconf; memset(&cfg_port, 0, sizeof(cfg_port)); cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE; + cfg_port.rxmode.ignore_offload_bitfield = 1; for (idx_port = 0; idx_port < cnt_ports; idx_port++) { struct app_port *ptr_port = &app_cfg->ports[idx_port]; @@ -160,6 +162,7 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports) &nb_txd) < 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc failed"); + if (rte_eth_rx_queue_setup( idx_port, 0, nb_rxd, rte_eth_dev_socket_id(idx_port), NULL, @@ -167,9 +170,11 @@ static void setup_ports(struct app_config *app_cfg, int cnt_ports) rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup failed" ); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; if (rte_eth_tx_queue_setup( idx_port, 0, nb_txd, - rte_eth_dev_socket_id(idx_port), NULL) < 0) + rte_eth_dev_socket_id(idx_port), &txconf) < 0) rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup failed" ); -- 2.12.0
[dpdk-dev] [PATCH 33/39] examples/flow_classify: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/flow_classify/flow_classify.c | 12 ++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c index 766f1dd0e..3abb004cf 100644 --- a/examples/flow_classify/flow_classify.c +++ b/examples/flow_classify/flow_classify.c @@ -89,7 +89,10 @@ static struct{ const char cb_port_delim[] = ":"; static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } + .rxmode = { + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, + } }; struct flow_classifier { @@ -216,6 +219,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) const uint16_t rx_rings = 1, tx_rings = 1; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port >= rte_eth_dev_count()) return -1; @@ -233,10 +238,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txconf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 34/39] examples/flow_filtering: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/flow_filtering/main.c | 27 --- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c index 7d739b4ae..3fca813b4 100644 --- a/examples/flow_filtering/main.c +++ b/examples/flow_filtering/main.c @@ -138,20 +138,23 @@ init_port(void) struct rte_eth_conf port_conf = { .rxmode = { .split_hdr_size = 0, - /**< Header Split disabled */ - .header_split = 0, - /**< IP checksum offload disabled */ - .hw_ip_checksum = 0, - /**< VLAN filtering disabled */ - .hw_vlan_filter = 0, - /**< Jumbo Frame Support disabled */ - .jumbo_frame= 0, - /**< CRC stripped by hardware */ - .hw_strip_crc = 1, + .ignore_offload_bitfield = 1, + .offloads = DEV_RX_OFFLOAD_CRC_STRIP, }, }; + struct rte_eth_rxconf rxq_conf; + struct rte_eth_dev_info dev_info; printf(":: initializing port: %d\n", port_id); + rte_eth_dev_info_get(port_id, &dev_info); + if ((dev_info.rx_offload_capa & port_conf.rxmode.offloads) != +port_conf.rxmode.offloads) { + printf("Some Rx offloads are not supported " + "by port %d: requested 0x%lx supported 0x%lx\n", + port_id, port_conf.rxmode.offloads, + dev_info.rx_offload_capa); + port_conf.rxmode.offloads &= dev_info.rx_offload_capa; + } ret = rte_eth_dev_configure(port_id, nr_queues, nr_queues, &port_conf); if (ret < 0) { @@ -160,11 +163,13 @@ init_port(void) ret, port_id); } + rxq_conf = dev_info.default_rxconf; + rxq_conf.offloads = port_conf.rxmode.offloads; /* only set Rx queues: something we care only so far */ for (i = 0; i < nr_queues; i++) { ret = rte_eth_rx_queue_setup(port_id, i, 512, rte_eth_dev_socket_id(port_id), -NULL, +&rxq_conf, mbuf_pool); if (ret < 0) { rte_exit(EXIT_FAILURE, -- 2.12.0
[dpdk-dev] [PATCH 36/39] examples/ptpclient: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/ptpclient/ptpclient.c | 7 +-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c index c53dad68b..13ffecb27 100644 --- a/examples/ptpclient/ptpclient.c +++ b/examples/ptpclient/ptpclient.c @@ -77,7 +77,10 @@ uint8_t ptp_enabled_port_nb; static uint8_t ptp_enabled_ports[RTE_MAX_ETHPORTS]; static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } + .rxmode = { + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, + } }; static const struct ether_addr ether_multicast = { @@ -241,7 +244,7 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) rte_eth_dev_info_get(q, &dev_info); txconf = &dev_info.default_txconf; - txconf->txq_flags = 0; + txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE; retval = rte_eth_tx_queue_setup(port, q, nb_txd, rte_eth_dev_socket_id(port), txconf); -- 2.12.0
[dpdk-dev] [PATCH 35/39] examples/packet_ordering: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/packet_ordering/main.c | 13 +++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c index 3add7be47..dd4cce895 100644 --- a/examples/packet_ordering/main.c +++ b/examples/packet_ordering/main.c @@ -64,7 +64,11 @@ volatile uint8_t quit_signal; static struct rte_mempool *mbuf_pool; -static struct rte_eth_conf port_conf_default; +static struct rte_eth_conf port_conf_default = { + .rxmode = { + .ignore_offload_bitfield = 1, + }, +}; struct worker_thread_args { struct rte_ring *ring_in; @@ -293,6 +297,8 @@ configure_eth_port(uint16_t port_id) uint16_t q; uint16_t nb_rxd = RX_DESC_PER_QUEUE; uint16_t nb_txd = TX_DESC_PER_QUEUE; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port_id > nb_ports) return -1; @@ -313,9 +319,12 @@ configure_eth_port(uint16_t port_id) return ret; } + rte_eth_dev_info_get(port_id, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < txRings; q++) { ret = rte_eth_tx_queue_setup(port_id, q, nb_txd, - rte_eth_dev_socket_id(port_id), NULL); + rte_eth_dev_socket_id(port_id), &txconf); if (ret < 0) return ret; } -- 2.12.0
[dpdk-dev] [PATCH 38/39] examples/server_node_efd: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/server_node_efd/server/init.c | 10 -- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c index 0bcab8cc1..9bce96fa2 100644 --- a/examples/server_node_efd/server/init.c +++ b/examples/server_node_efd/server/init.c @@ -125,12 +125,15 @@ init_port(uint16_t port_num) /* for port configuration all features are off by default */ const struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS + .mq_mode = ETH_MQ_RX_RSS, + .ignore_offload_bitfield = 1, } }; const uint16_t rx_rings = 1, tx_rings = num_nodes; uint16_t rx_ring_size = RTE_MP_RX_DESC_DEFAULT; uint16_t tx_ring_size = RTE_MP_TX_DESC_DEFAULT; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; uint16_t q; int retval; @@ -159,10 +162,13 @@ init_port(uint16_t port_num) return retval; } + rte_eth_dev_info_get(port_num, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size, rte_eth_dev_socket_id(port_num), - NULL); + &txconf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 37/39] examples/rxtx_callbacks: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/rxtx_callbacks/main.c | 12 ++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c index ca135d219..ca4518dcf 100644 --- a/examples/rxtx_callbacks/main.c +++ b/examples/rxtx_callbacks/main.c @@ -47,7 +47,10 @@ #define BURST_SIZE 32 static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN, }, + .rxmode = { + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, + }, }; static unsigned nb_ports; @@ -105,6 +108,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) uint16_t nb_txd = TX_RING_SIZE; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port >= rte_eth_dev_count()) return -1; @@ -124,9 +129,12 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, nb_txd, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txconf); if (retval < 0) return retval; } -- 2.12.0
[dpdk-dev] [PATCH 39/39] examples/skeleton: convert to new ethdev offloads API
Ethdev offloads API has changed since: commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API") commit cba7f53b717d ("ethdev: introduce Tx queue offloads API") This commit support the new API. Signed-off-by: Shahaf Shuler --- examples/skeleton/basicfwd.c | 12 ++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c index e623754cf..32263a639 100644 --- a/examples/skeleton/basicfwd.c +++ b/examples/skeleton/basicfwd.c @@ -47,7 +47,10 @@ #define BURST_SIZE 32 static const struct rte_eth_conf port_conf_default = { - .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN } + .rxmode = { + .max_rx_pkt_len = ETHER_MAX_LEN, + .ignore_offload_bitfield = 1, + } }; /* basicfwd.c: Basic DPDK skeleton forwarding example. */ @@ -65,6 +68,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) uint16_t nb_txd = TX_RING_SIZE; int retval; uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; if (port >= rte_eth_dev_count()) return -1; @@ -86,10 +91,13 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool) return retval; } + rte_eth_dev_info_get(port, &dev_info); + txconf = dev_info.default_txconf; + txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE; /* Allocate and set up 1 TX queue per Ethernet port. */ for (q = 0; q < tx_rings; q++) { retval = rte_eth_tx_queue_setup(port, q, nb_txd, - rte_eth_dev_socket_id(port), NULL); + rte_eth_dev_socket_id(port), &txconf); if (retval < 0) return retval; } -- 2.12.0
Re: [dpdk-dev] [PATCH] net/mlx5: cleanup the allocation of the buffer used for the ethtool stats
Hi Thierry Friday, November 17, 2017 9:55 AM, Nelio Laranjeiro: > > 2.11.0 > > Acked-by: Nelio Laranjeiro Am having the following check-git-log errors: Headline too long: net/mlx5: cleanup the allocation of the buffer used for the ethtool stats Wrong tag: Fixes: a4193ae3bc4f ('net/mlx5: support extended statistics') Wrong 'Fixes' reference: Fixes: a4193ae3bc4f ('net/mlx5: support extended statistics') Is it candidate for Cc: sta...@dpdk.org backport? net/mlx5: cleanup the allocation of the buffer used for the ethtool stats is the below suggestion[1] acceptable? If so I will merge into next-net-mlx [1] Author: Thierry Herbelot Date: Fri Nov 17 14:51:34 2017 +0100 net/mlx5: cleanup allocation of ethtool stats Simplify the computation for the needed size: - exact size for the structure header, - exact size for a number of 64-bit counters. Fixes: a4193ae3bc4f ("net/mlx5: support extended statistics") Cc: sta...@dpdk.org Signed-off-by: Thierry Herbelot Acked-by: Nelio Laranjeiro > > Thanks, > > -- > Nélio Laranjeiro > 6WIND
Re: [dpdk-dev] [PATCH] net/mlx5: cleanup the allocation of the buffer used for the ethtool stats
On 11/23/2017 02:00 PM, Shahaf Shuler wrote: Hi Thierry Friday, November 17, 2017 9:55 AM, Nelio Laranjeiro: 2.11.0 Acked-by: Nelio Laranjeiro Am having the following check-git-log errors: Headline too long: net/mlx5: cleanup the allocation of the buffer used for the ethtool stats Wrong tag: Fixes: a4193ae3bc4f ('net/mlx5: support extended statistics') Wrong 'Fixes' reference: Fixes: a4193ae3bc4f ('net/mlx5: support extended statistics') Is it candidate for Cc: sta...@dpdk.org backport? net/mlx5: cleanup the allocation of the buffer used for the ethtool stats is the below suggestion[1] acceptable? If so I will merge into next-net-mlx [1] Author: Thierry Herbelot Date: Fri Nov 17 14:51:34 2017 +0100 net/mlx5: cleanup allocation of ethtool stats Simplify the computation for the needed size: - exact size for the structure header, - exact size for a number of 64-bit counters. Fixes: a4193ae3bc4f ("net/mlx5: support extended statistics") Cc: sta...@dpdk.org Signed-off-by: Thierry Herbelot Acked-by: Nelio Laranjeiro Hello Shahaf Your suggestion is excellent: thanks for it. Best regards Thierry Thanks, -- Nélio Laranjeiro 6WIND -- Thierry Herbelot 6WIND Software Engineer
Re: [dpdk-dev] [PATCH] net/mlx5: cleanup the allocation of the buffer used for the ethtool stats
Thursday, November 23, 2017 3:06 PM, Thierry Herbelot: > On 11/23/2017 02:00 PM, Shahaf Shuler wrote: > Hello Shahaf > > Your suggestion is excellent: thanks for it. > > Best regards > > Thierry Applied to next-net-mlx, thanks. > > > > > > > > > >> > >> Thanks, > >> > >> -- > >> Nélio Laranjeiro > >> 6WIND > > > -- > Thierry Herbelot > 6WIND > Software Engineer
[dpdk-dev] [PATCH v2 1/2] net/mlx5: load libmlx5 and libibverbs in run-time
MLX5 PMD loads libraries: libibverbs and libmlx5. MLX5 PMD is not linked to external libraries. Signed-off-by: Shachar Beiser --- History: V1: http://dpdk.org/patch/31555 V2: * Fixing checkpatch coding style issues --- config/common_base | 1 + drivers/net/mlx5/Makefile| 27 +- drivers/net/mlx5/lib/mlx5_dll.c | 758 +++ drivers/net/mlx5/lib/mlx5_dll.h | 97 + drivers/net/mlx5/mlx5.c | 17 +- drivers/net/mlx5/mlx5.h | 4 + drivers/net/mlx5/mlx5_flow.c | 4 + drivers/net/mlx5/mlx5_mac.c | 4 + drivers/net/mlx5/mlx5_mr.c | 4 + drivers/net/mlx5/mlx5_rss.c | 4 + drivers/net/mlx5/mlx5_rxmode.c | 4 + drivers/net/mlx5/mlx5_rxq.c | 4 + drivers/net/mlx5/mlx5_rxtx.c | 4 + drivers/net/mlx5/mlx5_rxtx.h | 6 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 4 + drivers/net/mlx5/mlx5_txq.c | 4 + mk/rte.app.mk| 8 +- 17 files changed, 941 insertions(+), 13 deletions(-) create mode 100644 drivers/net/mlx5/lib/mlx5_dll.c create mode 100644 drivers/net/mlx5/lib/mlx5_dll.h diff --git a/config/common_base b/config/common_base index e74febe..3708de4 100644 --- a/config/common_base +++ b/config/common_base @@ -237,6 +237,7 @@ CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 # Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD # CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DLL=y CONFIG_RTE_LIBRTE_MLX5_DEBUG=n CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index a3984eb..1dc0a05 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -53,7 +53,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/mlx5_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -64,7 +66,11 @@ CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) CFLAGS += -Wno-strict-prototypes +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx5 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -91,7 +97,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +CFLAGS += -DMLX5_PMD_DLL +else +CFLAGS += -UMLX5_PMD_DLL +endif include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx5_autoconf.h. @@ -105,26 +115,28 @@ endif mlx5_autoconf.h.new: FORCE +VERBS_H := infiniband/verbs.h +MLX5DV_H := infiniband/mlx5dv.h mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_VXLAN_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_DEVICE_VXLAN_SUPPORT \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ @@ -144,10 +156,9 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_FLOW_SPEC_ACTION_COUNT \ $(AUTOCONF_OUTPUT) - # Create mlx5_autoconf.h or update it in case it differs from the new one. mlx5_autoconf.h: mlx5_autoconf.h.new diff --git a/drivers/net/mlx5/lib/mlx5_dll.c b/drivers/net/mlx5/lib/mlx5_dll.c new file mode 100644 index 000..2228ad1 --- /dev/null +++ b/drivers/net/mlx5/lib/mlx5_dll.c @@ -0,0 +1,758 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions
[dpdk-dev] [PATCH v2 2/2] net/mlx4: load libmlx4 and libibverbs in run-time
MLX4 PMD dynamically loads mlx4 and ibverbs in run-time and it is not linked to external libraries. Signed-off-by: Shachar Beiser --- History: V1: http://dpdk.org/dev/patchwork/patch/31555/ V2: Fixing checkpatch code styling warnings --- config/common_base | 1 + drivers/net/mlx4/Makefile | 14 +- drivers/net/mlx4/lib/mlx4_dll.c | 731 drivers/net/mlx4/lib/mlx4_dll.h | 94 ++ drivers/net/mlx4/mlx4.c | 12 + drivers/net/mlx4/mlx4.h | 4 + drivers/net/mlx4/mlx4_ethdev.c | 4 + drivers/net/mlx4/mlx4_flow.c| 4 + drivers/net/mlx4/mlx4_mr.c | 4 + drivers/net/mlx4/mlx4_prm.h | 4 + drivers/net/mlx4/mlx4_rxq.c | 4 + drivers/net/mlx4/mlx4_rxtx.c| 4 + drivers/net/mlx4/mlx4_rxtx.h| 4 + drivers/net/mlx4/mlx4_txq.c | 4 + drivers/net/mlx5/lib/mlx5_dll.c | 4 +- mk/rte.app.mk | 7 + 16 files changed, 895 insertions(+), 4 deletions(-) create mode 100644 drivers/net/mlx4/lib/mlx4_dll.c create mode 100644 drivers/net/mlx4/lib/mlx4_dll.h diff --git a/config/common_base b/config/common_base index 3708de4..8ef6be4 100644 --- a/config/common_base +++ b/config/common_base @@ -229,6 +229,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y # Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD # CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DLL=y CONFIG_RTE_LIBRTE_MLX4_DEBUG=n CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index f1f47c2..aba1d5f 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -44,7 +44,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/mlx4_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -54,7 +56,11 @@ CFLAGS += -D_BSD_SOURCE CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx4 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -81,7 +87,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +CFLAGS += -DMLX4_PMD_DLL +else +CFLAGS += -UMLX4_PMD_DLL +endif ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y) CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS endif diff --git a/drivers/net/mlx4/lib/mlx4_dll.c b/drivers/net/mlx4/lib/mlx4_dll.c new file mode 100644 index 000..0d8e7a9 --- /dev/null +++ b/drivers/net/mlx4/lib/mlx4_dll.c @@ -0,0 +1,731 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif +#include +#include +#include +#include +#include "../mlx4_utils.h" +#include "mlx4_dll.h" + +#define VERBS_LIB_DIR "/usr/lib64/libibverbs" +#define MLX5_LIB_DIR "/usr/lib64/libmlx4" +#define DIR_LENGTH 25 +/** + * Load a libibverbs and libmlx4 symbols t
Re: [dpdk-dev] [PATCH 2/3] power: switching to unbuffered access for /sys files
Hi Radoslaw, On 11/11/2017 6:55 PM, Radoslaw Biernacki wrote: This patch fixes the bug caused by improper use of buffered stdio file access for switching the CPU frequency and governor. When using buffered stdio, each fwrite() must use fflush() and the return code must be verified. Also fseek() is needed. Therefore it is better to use unbuffered mode or use plain open()/write() functions. This fix use second approach. This not only remove need for ffush() but also remove need for fseek() operations. This patch also reuse some code around power_set_governor_userspace() and power_set_governor_userspace() functions. Fixes: 445c6528b55f ("power: common interface for guest and host") CC: sta...@dpdk.org Signed-off-by: Radoslaw Biernacki --- lib/librte_power/rte_power_acpi_cpufreq.c | 211 +- 1 file changed, 91 insertions(+), 120 deletions(-) diff --git a/lib/librte_power/rte_power_acpi_cpufreq.c b/lib/librte_power/rte_power_acpi_cpufreq.c index 3d0872f..f811bd3 100644 --- a/lib/librte_power/rte_power_acpi_cpufreq.c +++ b/lib/librte_power/rte_power_acpi_cpufreq.c @@ -30,7 +30,7 @@ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ - +#include #include #include #include @@ -47,6 +47,12 @@ #include "rte_power_acpi_cpufreq.h" #include "rte_power_common.h" +#define min(_x, _y) ({ \ + typeof(_x) _min1 = (_x); \ + typeof(_y) _min2 = (_y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + #ifdef RTE_LIBRTE_POWER_DEBUG #define POWER_DEBUG_TRACE(fmt, args...) do { \ RTE_LOG(ERR, POWER, "%s: " fmt, __func__, ## args); \ @@ -88,7 +94,7 @@ struct rte_power_info { unsigned lcore_id; /**< Logical core id */ uint32_t freqs[RTE_MAX_LCORE_FREQS]; /**< Frequency array */ uint32_t nb_freqs; /**< number of available freqs */ - FILE *f; /**< FD of scaling_setspeed */ + int fd; /**< FD of scaling_setspeed */ char governor_ori[32]; /**< Original governor name */ uint32_t curr_idx; /**< Freq index in freqs array */ volatile uint32_t state; /**< Power in use state */ @@ -105,6 +111,9 @@ static struct rte_power_info lcore_power_info[RTE_MAX_LCORE]; static int set_freq_internal(struct rte_power_info *pi, uint32_t idx) { + char buf[BUFSIZ]; + int count, ret; + if (idx >= RTE_MAX_LCORE_FREQS || idx >= pi->nb_freqs) { RTE_LOG(ERR, POWER, "Invalid frequency index %u, which " "should be less than %u\n", idx, pi->nb_freqs); @@ -117,17 +126,14 @@ set_freq_internal(struct rte_power_info *pi, uint32_t idx) POWER_DEBUG_TRACE("Freqency[%u] %u to be set for lcore %u\n", idx, pi->freqs[idx], pi->lcore_id); - if (fseek(pi->f, 0, SEEK_SET) < 0) { - RTE_LOG(ERR, POWER, "Fail to set file position indicator to 0 " - "for setting frequency for lcore %u\n", pi->lcore_id); + count = snprintf(buf, sizeof(buf), "%u", pi->freqs[idx]); + assert((size_t)count < sizeof(buf)-1); + ret = write(pi->fd, buf, count); + if (ret != count) { + RTE_LOG(ERR, POWER, "Fail to write new frequency (%s) for " + "lcore %u\n", buf, pi->lcore_id); return -1; } - if (fprintf(pi->f, "%u", pi->freqs[idx]) < 0) { - RTE_LOG(ERR, POWER, "Fail to write new frequency for " - "lcore %u\n", pi->lcore_id); - return -1; - } - fflush(pi->f); pi->curr_idx = idx; return 1; @@ -139,90 +145,109 @@ set_freq_internal(struct rte_power_info *pi, uint32_t idx) * governor will be saved for rolling back. */ static int -power_set_governor_userspace(struct rte_power_info *pi) +power_set_governor(unsigned int lcore_id, const char *new_gov, char *old_gov, + size_t old_gov_len) { - FILE *f; + int fd; + int count, len; int ret = -1; char buf[BUFSIZ]; char fullpath[PATH_MAX]; - char *s; - int val; snprintf(fullpath, sizeof(fullpath), POWER_SYSFILE_GOVERNOR, - pi->lcore_id); - f = fopen(fullpath, "rw+"); - if (!f) { +lcore_id); + fd = open(fullpath, O_RDWR); + if (fd < 0) { RTE_LOG(ERR, POWER, "Failed to open %s\n", fullpath); return ret; } - s = fgets(buf, sizeof(buf), f); - if (!s) { - RTE_LOG(ERR, POWER, "fgets returns nothing\n"); + count = read(fd, buf, sizeof(buf)); + if (count < 0) { + RTE_LOG(ERR, POWER, "Failed t
[dpdk-dev] [PATCH 2/2] examples/ipsec-secgw: add target queues in flow actions
Mellanox INNOVA NIC needs to have final target queue actions to perform inline crypto. Signed-off-by: Nelio Laranjeiro --- examples/ipsec-secgw/ipsec.c | 27 ++- examples/ipsec-secgw/ipsec.h | 2 +- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 17bd7620d..e967f88b3 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -142,6 +142,22 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) rte_eth_dev_get_sec_ctx( sa->portid); const struct rte_security_capability *sec_cap; + uint8_t rss_key[40]; + struct rte_eth_rss_conf rss_conf = { + .rss_key = rss_key, + .rss_key_len = 40, + }; + struct rte_eth_dev *eth_dev; + union { + struct rte_flow_action_rss rss; + struct { + const struct rte_eth_rss_conf *rss_conf; + uint16_t num; + uint16_t queue[RTE_MAX_QUEUES_PER_PORT]; + } local; + } action_rss; + unsigned int i; + unsigned int j; sa->sec_session = rte_security_session_create(ctx, &sess_conf, ipsec_ctx->session_pool); @@ -201,7 +217,16 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; sa->action[0].conf = sa->sec_session; - sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + sa->action[1].type = RTE_FLOW_ACTION_TYPE_RSS; + sa->action[1].conf = &action_rss; + eth_dev = ctx->device; + rte_eth_dev_rss_hash_conf_get(sa->portid, &rss_conf); + for (i = 0, j = 0; i < eth_dev->data->nb_rx_queues; ++i) + if (eth_dev->data->rx_queues[i]) + action_rss.local.queue[j++] = i; + action_rss.local.num = j; + action_rss.local.rss_conf = &rss_conf; + sa->action[2].type = RTE_FLOW_ACTION_TYPE_END; sa->attr.egress = (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS); diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index 775b316ff..82ffc1c6d 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -133,7 +133,7 @@ struct ipsec_sa { uint32_t ol_flags; #define MAX_RTE_FLOW_PATTERN (4) -#define MAX_RTE_FLOW_ACTIONS (2) +#define MAX_RTE_FLOW_ACTIONS (4) struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN]; struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS]; struct rte_flow_attr attr; -- 2.11.0
[dpdk-dev] [PATCH 1/2] examples/ipsec-secgw: fix missing ingress flow attribute
Generic flow API have both direction bits, ingress and egress for rules which may work on both sides. Fixes: ec17993a145a ("examples/ipsec-secgw: support security offload") Cc: akhil.go...@nxp.com Signed-off-by: Nelio Laranjeiro --- examples/ipsec-secgw/ipsec.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index ec8bf95e1..17bd7620d 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -205,6 +205,8 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) sa->attr.egress = (sa->direction == RTE_SECURITY_IPSEC_SA_DIR_EGRESS); + sa->attr.ingress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_INGRESS); sa->flow = rte_flow_create(sa->portid, &sa->attr, sa->pattern, sa->action, &err); if (sa->flow == NULL) { -- 2.11.0
[dpdk-dev] [PATCH v3 1/2] net/mlx5: load libmlx5 and libibverbs in run-time
MLX5 PMD loads libraries: libibverbs and libmlx5. MLX5 PMD is not linked to external libraries. Signed-off-by: Shachar Beiser --- v1: load external libraries in run-time v2 * fix checkpatch warnings v3: * fix checkpatch warnings --- config/common_base | 1 + drivers/net/mlx5/Makefile| 27 +- drivers/net/mlx5/lib/mlx5_dll.c | 756 +++ drivers/net/mlx5/lib/mlx5_dll.h | 94 + drivers/net/mlx5/mlx5.c | 17 +- drivers/net/mlx5/mlx5.h | 4 + drivers/net/mlx5/mlx5_flow.c | 4 + drivers/net/mlx5/mlx5_mac.c | 4 + drivers/net/mlx5/mlx5_mr.c | 4 + drivers/net/mlx5/mlx5_rss.c | 4 + drivers/net/mlx5/mlx5_rxmode.c | 4 + drivers/net/mlx5/mlx5_rxq.c | 4 + drivers/net/mlx5/mlx5_rxtx.c | 4 + drivers/net/mlx5/mlx5_rxtx.h | 6 +- drivers/net/mlx5/mlx5_rxtx_vec.c | 4 + drivers/net/mlx5/mlx5_txq.c | 4 + mk/rte.app.mk| 8 +- 17 files changed, 936 insertions(+), 13 deletions(-) create mode 100644 drivers/net/mlx5/lib/mlx5_dll.c create mode 100644 drivers/net/mlx5/lib/mlx5_dll.h diff --git a/config/common_base b/config/common_base index e74febe..3708de4 100644 --- a/config/common_base +++ b/config/common_base @@ -237,6 +237,7 @@ CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 # Compile burst-oriented Mellanox ConnectX-4 & ConnectX-5 (MLX5) PMD # CONFIG_RTE_LIBRTE_MLX5_PMD=n +CONFIG_RTE_LIBRTE_MLX5_DLL=y CONFIG_RTE_LIBRTE_MLX5_DEBUG=n CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE=8 diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index a3984eb..1dc0a05 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -53,7 +53,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/mlx5_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -64,7 +66,11 @@ CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) CFLAGS += -Wno-strict-prototypes +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx5 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -91,7 +97,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX5_DLL),y) +CFLAGS += -DMLX5_PMD_DLL +else +CFLAGS += -UMLX5_PMD_DLL +endif include $(RTE_SDK)/mk/rte.lib.mk # Generate and clean-up mlx5_autoconf.h. @@ -105,26 +115,28 @@ endif mlx5_autoconf.h.new: FORCE +VERBS_H := infiniband/verbs.h +MLX5DV_H := infiniband/mlx5dv.h mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_VXLAN_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_DEVICE_VXLAN_SUPPORT \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_WQ_FLAG_RX_END_PADDING \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_MPW \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_MPW_ALLOWED \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_MLX5_MOD_CQE_128B_COMP \ - infiniband/mlx5dv.h \ + $(MLX5DV_H) \ enum MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ @@ -144,10 +156,9 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT \ - infiniband/verbs.h \ + $(VERBS_H) \ enum IBV_FLOW_SPEC_ACTION_COUNT \ $(AUTOCONF_OUTPUT) - # Create mlx5_autoconf.h or update it in case it differs from the new one. mlx5_autoconf.h: mlx5_autoconf.h.new diff --git a/drivers/net/mlx5/lib/mlx5_dll.c b/drivers/net/mlx5/lib/mlx5_dll.c new file mode 100644 index 000..2c4ac74 --- /dev/null +++ b/drivers/net/mlx5/lib/mlx5_dll.c @@ -0,0 +1,756 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * not
[dpdk-dev] [PATCH v3 2/2] net/mlx4: load libmlx4 and libibverbs in run-time
MLX4 PMD dynamically loads mlx4 and ibverbs in run-time and it is not linked to external libraries. Signed-off-by: Shachar Beiser --- v1: load external libraries in run-time v2: fix checkpatch warnings v3: fix checkpatch warnings --- config/common_base | 1 + drivers/net/mlx4/Makefile | 14 +- drivers/net/mlx4/lib/mlx4_dll.c | 729 drivers/net/mlx4/lib/mlx4_dll.h | 91 + drivers/net/mlx4/mlx4.c | 12 + drivers/net/mlx4/mlx4.h | 4 + drivers/net/mlx4/mlx4_ethdev.c | 4 + drivers/net/mlx4/mlx4_flow.c| 4 + drivers/net/mlx4/mlx4_mr.c | 4 + drivers/net/mlx4/mlx4_prm.h | 4 + drivers/net/mlx4/mlx4_rxq.c | 4 + drivers/net/mlx4/mlx4_rxtx.c| 4 + drivers/net/mlx4/mlx4_rxtx.h| 4 + drivers/net/mlx4/mlx4_txq.c | 4 + mk/rte.app.mk | 7 + 15 files changed, 888 insertions(+), 2 deletions(-) create mode 100644 drivers/net/mlx4/lib/mlx4_dll.c create mode 100644 drivers/net/mlx4/lib/mlx4_dll.h diff --git a/config/common_base b/config/common_base index 3708de4..8ef6be4 100644 --- a/config/common_base +++ b/config/common_base @@ -229,6 +229,7 @@ CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y # Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD # CONFIG_RTE_LIBRTE_MLX4_PMD=n +CONFIG_RTE_LIBRTE_MLX4_DLL=y CONFIG_RTE_LIBRTE_MLX4_DEBUG=n CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS=n CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE=8 diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index f1f47c2..aba1d5f 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -44,7 +44,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_txq.c SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4_utils.c - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +SRCS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += lib/mlx4_dll.c +endif # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=c11 -Wall -Wextra @@ -54,7 +56,11 @@ CFLAGS += -D_BSD_SOURCE CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +LDLIBS += -ldl +else LDLIBS += -libverbs -lmlx4 +endif LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs LDLIBS += -lrte_bus_pci @@ -81,7 +87,11 @@ endif ifdef CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE CFLAGS += -DMLX4_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX4_TX_MP_CACHE) endif - +ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DLL),y) +CFLAGS += -DMLX4_PMD_DLL +else +CFLAGS += -UMLX4_PMD_DLL +endif ifeq ($(CONFIG_RTE_LIBRTE_MLX4_DEBUG_BROKEN_VERBS),y) CFLAGS += -DMLX4_PMD_DEBUG_BROKEN_VERBS endif diff --git a/drivers/net/mlx4/lib/mlx4_dll.c b/drivers/net/mlx4/lib/mlx4_dll.c new file mode 100644 index 000..8c7cede --- /dev/null +++ b/drivers/net/mlx4/lib/mlx4_dll.c @@ -0,0 +1,729 @@ +/*- + * BSD LICENSE + * + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef _GNU_SOURCE +#define _GNU_SOURCE 1 +#endif +#include +#include +#include +#include +#include "../mlx4_utils.h" +#include "mlx4_dll.h" + +#define VERBS_LIB_DIR "/usr/lib64/libibverbs" +#define MLX5_LIB_DIR "/usr/lib64/libmlx4" +#define DIR_LENGTH 25 +/** + * Load a libibverbs and libmlx4 symbols table. + * + * @return + * 0 on s
[dpdk-dev] rte_eth_bond: Problem with link failure and 8023AD
Hello, I've been testing my LAG implemented with the DPDK eth_bond pmd. As part of my fault tolerance testing, I want to ensure that if a link is flapping up and down continuously, impact to service is minimal. My findings are that in this case, the lag is rendered inoperable if a certain link is flapping. Details below. Setup: -4x10G X710 links in a 8023ad lag connected to a switch. -Under normal operations, lag is steady, traffic balanced, etc Problem: If I take down a link on the switch corresponding to the "aggregator" link in the dpdk lag, then bring it back up, every link in the lag goes from distributing to not distributing to back to distributing. This causes unnecessary loss of service. A single link failure, regardless of whether or not it's the aggregator link, should not change the state of the other links. Consider what would happen if there were a hardware fault on that link, or its signal were bad: it's possible for it to be stuck flapping up and down. This would lead to complete loss of service on the lag, despite there being three stable links remaining. Analysis: - The switch is showing that the system id is changing when the link flaps. It's going from 00:00:00:00:00:00 to the aggregator's mac. This is not good. Why is it happening? It's because by default we seem to be using the "AGG_BANDWIDTH" selection algorithm, which is broken: It's taking a slave index, and using that the index into the 8023ad ports array, which is based on the dpdk port number. It should translate it from the slave index into a dpdk_port number using the slaves[] array. - Aside from the above, if you look, the default is supposed to be AGG_STABLE, according to bond_mode_8023ad_conf_get_default. However, bond_mode_8023ad_conf_assign does not actually copy out the selection algorithm, so it just uses 0, which happens to be AGG_BANDWIDTH. - I fixed the above, but still faced two more issues: 1) The system ID changes when the aggregator changes, which can lead to the problem. 2) When the link fails, it is "deactivated" in the lag via bond_mode_8023ad_deactivate_slave. There is a block in there dedicated to the case where the aggregator is disabled. In that case, it explicitly unselects each slave sharing that aggregator. This causes them to fall back to the DETACHED state in the mux machine -- i.e. they are no longer aggregating at all, until the state machine runs through the LACP exchange with the partner again. Possible fix: 1) Change bond_mode_8023ad_conf_assign to actually copy out the selection algorithm. 2) Ensure that all members of a LAG have the same system id (i.e. choose the LAG's mac address) 3) Do not detach the other members when the aggregator's link state goes down. Note: 1) We should fix AGG_BANDWIDTH and AGG_COUNT separately. 2) I can't see any reason why the system id should be equal to the mac of the aggregator. It's intended to represent the system to which the lag belongs, not the aggregator itself. The aggregator is represented by the operational key. So, it should be fine to use the LAG's mac address, which is fixed at init, as the system id for all possible aggregators. 3) I think not detaching is the correct approach. There is nothing in my reading of 802.1Q or 802.1AX' LACP specification that implies we should do this. There is a blurb about changes in parameters which lead to the change in aggregator forcing the unselected transition, but I don't think that needs to apply here. I'm fairly certain they're talking about changing the operational key/etc. How does everyone feel about this? Am I crazy in requiring this functionality? What about the proposed fix. Does it sound reasonable, or am I going to break the state machine somehow? Thanks, Kyle
[dpdk-dev] [PATCH v1 0/7] net/mlx5: IPsec offload support
Add support for IPsec using rte_security on ConnectX-4 Lx INNOVA NIC. This support is still in progress on all side (RDMA-Core and Linux Kernel) it is only enable at compilation time when the expected symbols are present in Verbs, even in such situation static assert are present to assure the offload request can be understood by the NIC (those values may change once patches in RDMA-Core are up-streamed). Minor changes expected in v2: - Rebased on top of new offload API. - Eventual modifications to match upstream RDMA-Core and Linux Kernel - RSS on SPI. - ESN support. - Documentation update to explain how to use the INNOVA NIC. Applies on top of: http://dpdk.org/dev/patchwork/patch/31592/ Aviad Yehezkel (2): net/mlx5: add IPsec Tx/Rx offload support net/mlx5: add security capability function Nelio Laranjeiro (4): net: define Mellanox ether type for embed metadata net/mlx5: handle the IPsec support from Verbs net/mlx5: simplify error handling in flow action parsing net/mlx5: add device parameter to enabled IPsec Shahaf Shuler (1): net/mlx5: support security flow action doc/guides/nics/mlx5.rst | 9 + drivers/net/mlx5/Makefile | 6 + drivers/net/mlx5/mlx5.c| 52 ++ drivers/net/mlx5/mlx5.h| 10 ++ drivers/net/mlx5/mlx5_ethdev.c | 4 + drivers/net/mlx5/mlx5_flow.c | 395 + drivers/net/mlx5/mlx5_ipsec.c | 318 + drivers/net/mlx5/mlx5_prm.h| 39 drivers/net/mlx5/mlx5_rxtx.c | 104 ++- drivers/net/mlx5/mlx5_rxtx.h | 4 +- drivers/net/mlx5/mlx5_txq.c| 1 + lib/librte_net/rte_ether.h | 2 + 12 files changed, 861 insertions(+), 83 deletions(-) create mode 100644 drivers/net/mlx5/mlx5_ipsec.c -- 2.11.0
[dpdk-dev] [PATCH v1 1/7] net: define Mellanox ether type for embed metadata
Mellanox detains an Ethernet to embed additional metadata in Ethernet frames [1]. This Ether type is necessary for some NIC to transform packets using an offloading. [1] http://standards-oui.ieee.org/ethertype/eth.txt Signed-off-by: Nelio Laranjeiro --- lib/librte_net/rte_ether.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/lib/librte_net/rte_ether.h b/lib/librte_net/rte_ether.h index 06d7b486c..f118b6e01 100644 --- a/lib/librte_net/rte_ether.h +++ b/lib/librte_net/rte_ether.h @@ -334,6 +334,8 @@ struct vxlan_hdr { #define ETHER_TYPE_SLOW 0x8809 /**< Slow protocols (LACP and Marker). */ #define ETHER_TYPE_TEB 0x6558 /**< Transparent Ethernet Bridging. */ #define ETHER_TYPE_LLDP 0x88CC /**< LLDP Protocol. */ +#define ETHER_TYPE_MLNX 0X8CE4 +/**< Mellanox additional metadata in Ethernet frames. */ #define ETHER_VXLAN_HLEN (sizeof(struct udp_hdr) + sizeof(struct vxlan_hdr)) /**< VXLAN tunnel header length. */ -- 2.11.0
[dpdk-dev] [PATCH v1 2/7] net/mlx5: handle the IPsec support from Verbs
Enable HAVE_VERBS_IPSEC_SUPPORT is associated code is available in RDMA-Core. Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/Makefile | 5 + 1 file changed, 5 insertions(+) diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index a3984eb9f..b2dd86796 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -147,6 +147,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh infiniband/verbs.h \ enum IBV_FLOW_SPEC_ACTION_COUNT \ $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ + HAVE_IBV_IPSEC_SUPPORT \ + infiniband/verbs.h \ + enum IBV_FLOW_SPEC_ESP \ + $(AUTOCONF_OUTPUT) # Create mlx5_autoconf.h or update it in case it differs from the new one. -- 2.11.0
[dpdk-dev] [PATCH v1 3/7] net/mlx5: add IPsec Tx/Rx offload support
From: Aviad Yehezkel This feature is only supported by ConnectX-4 Lx INNOVA NIC. Having such support will automatically disable and enable crypto offload device arguments to make the PMD IPsec capable. Signed-off-by: Aviad Yehezkel Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.c| 8 drivers/net/mlx5/mlx5.h| 1 + drivers/net/mlx5/mlx5_ethdev.c | 4 ++ drivers/net/mlx5/mlx5_prm.h| 39 drivers/net/mlx5/mlx5_rxtx.c | 104 ++--- drivers/net/mlx5/mlx5_rxtx.h | 4 +- drivers/net/mlx5/mlx5_txq.c| 1 + 7 files changed, 154 insertions(+), 7 deletions(-) diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index cd66fe162..00480cef0 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -106,6 +106,14 @@ #define MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP (1 << 4) #endif +#ifdef HAVE_IBV_IPSEC_SUPPORT +#define MLX5_IPSEC_FLAGS \ + (MLX5DV_CONTEXT_XFRM_FLAGS_ESP_AES_GCM_TX | \ +MLX5DV_CONTEXT_XFRM_FLAGS_ESP_AES_GCM_RX | \ +MLX5DV_CONTEXT_XFRM_FLAGS_ESP_AES_GCM_REQ_METADATA | \ +MLX5DV_CONTEXT_XFRM_FLAGS_ESP_AES_GCM_SPI_RSS_ONLY) +#endif + struct mlx5_args { int cqe_comp; int txq_inline; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index e6a69b823..c6a01d972 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -117,6 +117,7 @@ struct priv { unsigned int isolated:1; /* Whether isolated mode is enabled. */ unsigned int tx_vec_en:1; /* Whether Tx vector is enabled. */ unsigned int rx_vec_en:1; /* Whether Rx vector is enabled. */ + unsigned int ipsec_en:1; /* Whether IPsec is enabled. */ unsigned int counter_set_supported:1; /* Counter set is supported. */ /* Whether Tx offloads for tunneled packets are supported. */ unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index ca9ad0fef..f0c7fba43 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -680,6 +680,10 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_TCP_CKSUM); if (priv->tso) info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv->ipsec_en) { + info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; + info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + } if (priv->tunnel_en) info->tx_offload_capa |= (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 2de310bcb..bd6270671 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -342,4 +342,43 @@ mlx5_flow_mark_get(uint32_t val) #endif } +/* IPsec offloads elements. */ + +/* IPsec Rx code. */ +#define MLX5_IPSEC_RX_DECRYPTED 0x11 +#define MLX5_IPSEC_RX_AUTH_FAIL 0x12 + +/* IPsec Tx code. */ +#define MLX5_IPSEC_TX_OFFLOAD 0x8 + +/* Metadata length . */ +#define MLX5_METADATA_LEN 8 + +/* Packet IPsec Rx metadata. */ +struct mlx5_rx_pkt_ipsec_metadata { + uint8_t reserved; + rte_be32_t sa_handle; +} __rte_packed; + +/* Tx packet Metadata. */ +struct mlx5_tx_pkt_ipsec_metadata { + rte_be16_t mss_inv; /** MSS fixed point, used only in LSO. */ + rte_be16_t seq; /** LSBs of the first TCP seq, only in LSO. */ + uint8_t esp_next_proto; /* Next protocol of ESP. */ +} __rte_packed; + +/* Packet Metadata. */ +struct mlx5_pkt_metadata { + uint8_t syndrome; + union { + uint8_t raw[5]; + struct mlx5_rx_pkt_ipsec_metadata rx; + struct mlx5_tx_pkt_ipsec_metadata tx; + } __rte_packed; + rte_be16_t ethertype; +} __rte_packed; + +static_assert(sizeof(struct mlx5_pkt_metadata) == MLX5_METADATA_LEN, + "wrong metadata size detected."); + #endif /* RTE_PMD_MLX5_PRM_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 28c0ad8ab..91ceb3c55 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -344,6 +344,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) unsigned int j = 0; unsigned int k = 0; uint16_t max_elts; + const unsigned int ipsec_en = txq->ipsec_en; uint16_t max_wqe; unsigned int comp; volatile struct mlx5_wqe_ctrl *last_wqe = NULL; @@ -417,14 +418,43 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) rte_pktmbuf_mtod(*(pkts + 1), volatile void *)); cs_flags = txq_ol_cksum_to_cs(txq, buf); raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; - /* Replace the Ethernet type by the VLAN if
[dpdk-dev] [PATCH v1 7/7] net/mlx5: add device parameter to enabled IPsec
This feature still relies on some symbols from Verbs and thus the support is only compile if the symbols are available. Only ConnectX-4 Lx INNOVA are security capable. Signed-off-by: Aviad Yehezkel Signed-off-by: Nelio Laranjeiro --- doc/guides/nics/mlx5.rst | 9 + drivers/net/mlx5/mlx5.c | 32 2 files changed, 41 insertions(+) diff --git a/doc/guides/nics/mlx5.rst b/doc/guides/nics/mlx5.rst index f9558da89..643c1dd5d 100644 --- a/doc/guides/nics/mlx5.rst +++ b/doc/guides/nics/mlx5.rst @@ -295,6 +295,15 @@ Run-time configuration Enabled by default. +- ``ipsec_en`` parameter [int] + + A nonzero value enables the IPsec feature on the port. + Enabling this feature enables, ``txq_inline`` with a size equal to + RTE_CACHE_LINE_SIZE and disables ``rx_vec_en``, ``tx_vec_en`` and + ``txq_mpw_en``. + + Enabled by default on ConnectX-4 Lx INOVA. + Prerequisites - diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index e74026caf..0a7e9ac34 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -95,6 +95,9 @@ /* Device parameter to enable hardware Rx vector. */ #define MLX5_RX_VEC_EN "rx_vec_en" +/* Device parameter to enable hardware IPsec offload. */ +#define MLX5_IPSEC_EN "ipsec_en" + /* Default PMD specific parameter value. */ #define MLX5_ARG_UNSET (-1) @@ -128,6 +131,7 @@ struct mlx5_args { int tso; int tx_vec_en; int rx_vec_en; + int ipsec_en; }; /** * Retrieve integer value from environment variable. @@ -438,6 +442,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) args->tx_vec_en = !!tmp; } else if (strcmp(MLX5_RX_VEC_EN, key) == 0) { args->rx_vec_en = !!tmp; + } else if (strcmp(MLX5_IPSEC_EN, key) == 0) { + args->ipsec_en = !!tmp; } else { WARN("%s: unknown parameter", key); return -EINVAL; @@ -469,6 +475,7 @@ mlx5_args(struct mlx5_args *args, struct rte_devargs *devargs) MLX5_TSO, MLX5_TX_VEC_EN, MLX5_RX_VEC_EN, + MLX5_IPSEC_EN, NULL, }; struct rte_kvargs *kvlist; @@ -528,6 +535,8 @@ mlx5_args_assign(struct priv *priv, struct mlx5_args *args) priv->tx_vec_en = args->tx_vec_en; if (args->rx_vec_en != MLX5_ARG_UNSET) priv->rx_vec_en = args->rx_vec_en; + if (args->ipsec_en != MLX5_ARG_UNSET) + priv->ipsec_en = args->ipsec_en; } /** @@ -556,6 +565,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; + unsigned int ipsec_en = 0; int idx; int i; struct mlx5dv_context attrs_out; @@ -645,6 +655,13 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) ibv_dev = list[i]; DEBUG("device opened"); +#ifdef HAVE_IBV_IPSEC_SUPPORT + attrs_out.comp_mask |= MLX5DV_CONTEXT_MASK_XFRM_FLAGS; + mlx5dv_query_device(attr_ctx, &attrs_out); + if ((attrs_out.xfrm_flags & MLX5_IPSEC_FLAGS) == MLX5_IPSEC_FLAGS) + ipsec_en = 1; +#endif + DEBUG("Tx/Rx IPsec offload is %ssupported", ipsec_en ? "" : "not "); /* * Multi-packet send is supported by ConnectX-4 Lx PF as well * as all ConnectX-5 devices. @@ -693,6 +710,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) .tso = MLX5_ARG_UNSET, .tx_vec_en = MLX5_ARG_UNSET, .rx_vec_en = MLX5_ARG_UNSET, + .ipsec_en = MLX5_ARG_UNSET, }; mlx5_dev[idx].ports |= test; @@ -787,6 +805,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mps = mps; /* Enable MPW by default if supported. */ priv->cqe_comp = cqe_comp; priv->tunnel_en = tunnel_en; + priv->ipsec_en = ipsec_en; /* Enable vector by default if supported. */ priv->tx_vec_en = 1; priv->rx_vec_en = 1; @@ -797,6 +816,19 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) goto port_error; } mlx5_args_assign(priv, &args); + if (priv->ipsec_en) { +#ifndef HAVE_IBV_IPSEC_SUPPORT + priv->ipsec_en = 0; + WARN("IPsec Offload not supported."); +#else /* HAVE_IBV_IPSEC_SUPPORT */ + priv->txq_inline = RTE_CACHE_LINE_SIZE; + priv->txqs_inline = 0; + priv->mps = MLX5_MPW_DISABLED; + priv->tx_vec_en = 0; + priv->rx_vec_en = 0; +
[dpdk-dev] [PATCH v1 6/7] net/mlx5: support security flow action
From: Shahaf Shuler Signed-off-by: Shahaf Shuler Signed-off-by: Aviad Yehezkel Signed-off-by: Matan Barak Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5.h | 7 + drivers/net/mlx5/mlx5_flow.c | 309 ++ drivers/net/mlx5/mlx5_ipsec.c | 10 +- 3 files changed, 289 insertions(+), 37 deletions(-) diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 2927b851b..cb25beb3c 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -154,6 +154,13 @@ struct priv { struct rte_security_ctx security; /* Security context. */ }; +/* Security session. */ +struct mlx5_security_session { + struct rte_security_ipsec_xform ipsec_xform; + struct rte_eth_dev *dev; + struct ibv_action_xfrm *ibv_action_xfrm; +}; + /** * Lock private structure to protect it from concurrent access in the * control path. diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index ff50470b5..704c47820 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -78,6 +78,20 @@ ibv_destroy_counter_set(struct ibv_counter_set *cs) } #endif +#ifndef HAVE_IBV_IPSEC_SUPPORT +/* Define dummy structure when IPsec is not available in Verbs. */ + +/* Dummy spec ESP defined when missing in Verbs. */ +struct ibv_flow_spec_esp { + int dummy; +}; + +/* Dummy transform action defined when missing in Verbs. */ +struct ibv_flow_spec_action_xfrm { + int dummy; +}; +#endif + /* Dev ops structure defined in mlx5.c */ extern const struct eth_dev_ops mlx5_dev_ops; extern const struct eth_dev_ops mlx5_dev_ops_isolate; @@ -129,6 +143,14 @@ mlx5_flow_create_flag_mark(struct mlx5_flow_parse *parser, uint32_t mark_id); static int mlx5_flow_create_count(struct priv *priv, struct mlx5_flow_parse *parser); +static int +mlx5_flow_create_esp(const struct rte_flow_item *item, +const void *default_mask, +void *data); + +static void +mlx5_flow_create_xfrm(struct mlx5_flow_parse *parser); + /* Hash RX queue types. */ enum hash_rxq_type { HASH_RXQ_TCPV4, @@ -244,6 +266,8 @@ struct rte_flow { TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ uint32_t mark:1; /**< Set if the flow is marked. */ uint32_t drop:1; /**< Drop queue. */ + uint32_t security:1; /**< Security flow. */ + uint32_t ingress:1; /**< Ingress flow. */ uint16_t queues_n; /**< Number of entries in queue[]. */ uint16_t (*queues)[]; /**< Queues indexes to use. */ struct rte_eth_rss_conf rss_conf; /**< RSS configuration */ @@ -305,6 +329,7 @@ static const enum rte_flow_action_type valid_actions[] = { #ifdef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT RTE_FLOW_ACTION_TYPE_COUNT, #endif + RTE_FLOW_ACTION_TYPE_SECURITY, RTE_FLOW_ACTION_TYPE_END, }; @@ -343,7 +368,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, [RTE_FLOW_ITEM_TYPE_IPV4] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_ESP), .actions = valid_actions, .mask = &(const struct rte_flow_item_ipv4){ .hdr = { @@ -360,7 +386,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, [RTE_FLOW_ITEM_TYPE_IPV6] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP, - RTE_FLOW_ITEM_TYPE_TCP), + RTE_FLOW_ITEM_TYPE_TCP, + RTE_FLOW_ITEM_TYPE_ESP), .actions = valid_actions, .mask = &(const struct rte_flow_item_ipv6){ .hdr = { @@ -424,6 +451,17 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .convert = mlx5_flow_create_vxlan, .dst_sz = sizeof(struct ibv_flow_spec_tunnel), }, + [RTE_FLOW_ITEM_TYPE_ESP] = { + .actions = valid_actions, + .mask = &(const struct rte_flow_item_esp){ + .hdr = { + .spi = 0x, + } + }, + .mask_sz = sizeof(struct rte_flow_item_esp), + .convert = mlx5_flow_create_esp, + .dst_sz = sizeof(struct ibv_flow_spec_esp), + }, }; /** Structure to pass to the conversion function. */ @@ -434,6 +472,7 @@ struct mlx5_flow_parse { uint32_t drop:1; /**< Target is a drop queue. */ uint32_t mark:1; /**< Mark is present in the flow. */ uint32_t count:1; /**< Count is present in the flow. */ + uint32_t ingress:1; /** Flow is for ingress. */ uint32_t mark_id; /**< Mark identifier. */ uint16_t queues[RTE_MAX_QUEUES_PER_PORT]; /**< Queues indexes
[dpdk-dev] [PATCH v1 4/7] net/mlx5: add security capability function
From: Aviad Yehezkel Signed-off-by: Aviad Yehezkel Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/Makefile | 1 + drivers/net/mlx5/mlx5.c | 12 ++ drivers/net/mlx5/mlx5.h | 2 + drivers/net/mlx5/mlx5_ipsec.c | 322 ++ 4 files changed, 337 insertions(+) create mode 100644 drivers/net/mlx5/mlx5_ipsec.c diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index b2dd86796..839d208b1 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -53,6 +53,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_flow.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_socket.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_ipsec.c # Basic CFLAGS. CFLAGS += -O3 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 00480cef0..e74026caf 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -57,6 +57,7 @@ #include #include #include +#include #include "mlx5.h" #include "mlx5_utils.h" @@ -114,6 +115,9 @@ MLX5DV_CONTEXT_XFRM_FLAGS_ESP_AES_GCM_SPI_RSS_ONLY) #endif +/* Dev ops structure defined in mlx5_ipsec.c */ +extern const struct rte_security_ops mlx5_security_ops; + struct mlx5_args { int cqe_comp; int txq_inline; @@ -942,6 +946,14 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) mlx5dv_set_context_attr(ctx, MLX5DV_CTX_ATTR_BUF_ALLOCATORS, (void *)((uintptr_t)&alctr)); + if (priv->ipsec_en) { + priv->security = (struct rte_security_ctx){ + .device = (void *)eth_dev, + .ops = &mlx5_security_ops, + .sess_cnt = 0, + }; + eth_dev->security_ctx = &priv->security; + } /* Bring Ethernet device up. */ DEBUG("forcing Ethernet interface up"); priv_set_flags(priv, ~IFF_UP, IFF_UP); diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index c6a01d972..2927b851b 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -58,6 +58,7 @@ #include #include #include +#include #include "mlx5_utils.h" #include "mlx5_rxtx.h" @@ -150,6 +151,7 @@ struct priv { rte_spinlock_t lock; /* Lock for control functions. */ int primary_socket; /* Unix socket for primary process. */ struct rte_intr_handle intr_handle_socket; /* Interrupt handler. */ + struct rte_security_ctx security; /* Security context. */ }; /** diff --git a/drivers/net/mlx5/mlx5_ipsec.c b/drivers/net/mlx5/mlx5_ipsec.c new file mode 100644 index 0..52a3add7a --- /dev/null +++ b/drivers/net/mlx5/mlx5_ipsec.c @@ -0,0 +1,322 @@ +/*- + * BSD LICENSE + * + * Copyright 2017 Mellanox. + * Copyright 2017 6WIND S.A. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-Wpedantic" +#endif +#include +#include +#ifdef PEDANTIC +#pragma GCC diagnostic error "-Wpedantic" +#endif + +#include +#include +#include +#include + +#include "mlx5.h" +#include "mlx5_utils.h" +#include "mlx5_auto
[dpdk-dev] [PATCH v1 5/7] net/mlx5: simplify error handling in flow action parsing
Signed-off-by: Nelio Laranjeiro --- drivers/net/mlx5/mlx5_flow.c | 88 +--- 1 file changed, 41 insertions(+), 47 deletions(-) diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 1eda83671..ff50470b5 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -654,6 +654,9 @@ priv_flow_convert_actions(struct priv *priv, struct rte_flow_error *error, struct mlx5_flow_parse *parser) { + int ret = 0; + const char *msg = NULL; + const struct rte_flow_action *action = NULL; /* * Add default RSS configuration necessary for Verbs to create QP even * if no RSS is necessary. @@ -674,7 +677,7 @@ priv_flow_convert_actions(struct priv *priv, uint16_t found = 0; if (!queue || (queue->index > (priv->rxqs_n - 1))) - goto exit_action_not_supported; + goto error; for (n = 0; n < parser->queues_n; ++n) { if (parser->queues[n] == queue->index) { found = 1; @@ -682,11 +685,10 @@ priv_flow_convert_actions(struct priv *priv, } } if (parser->queues_n > 1 && !found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS queues"); - return -rte_errno; + ret = ENOTSUP; + action = actions; + msg = "queue action not in RSS queues"; + goto error; } if (!found) { parser->queues_n = 1; @@ -699,11 +701,10 @@ priv_flow_convert_actions(struct priv *priv, uint16_t n; if (!rss || !rss->num) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "no valid queues"); - return -rte_errno; + ret = EINVAL; + action = actions; + msg = "no valid queues"; + goto error; } if (parser->queues_n == 1) { uint16_t found = 0; @@ -717,22 +718,18 @@ priv_flow_convert_actions(struct priv *priv, } } if (!found) { - rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue action not in RSS" - " queues"); - return -rte_errno; + ret = ENOTSUP; + action = actions; + msg = "queue action not in RSS queues"; + goto error; } } for (n = 0; n < rss->num; ++n) { if (rss->queue[n] >= priv->rxqs_n) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "queue id > number of" - " queues"); - return -rte_errno; + ret = EINVAL; + action = actions; + msg = "queue id > number of queues"; + goto error; } } for (n = 0; n < rss->num; ++n) @@ -740,11 +737,10 @@ priv_flow_convert_actions(struct priv *priv, parser->queues_n = rss->num; if (priv_flow_convert_rss_conf(priv, parser, rss->rss_conf)) { - rte_flow_error_se
[dpdk-dev] [PATCH 0/2] add raw flowtype mode for flow director filter
Add possibility to load file with raw packet and set it as a template for flow director filter setup. Kirill Rybalchenko (2): app/testpmd: add raw flowtype mode for flow director filter doc: add description of raw mode in flow director in testpmd app/test-pmd/cmdline.c | 116 +--- app/test-pmd/config.c | 8 +- app/test-pmd/testpmd.h | 6 +- doc/guides/testpmd_app_ug/testpmd_funcs.rst | 24 -- 4 files changed, 131 insertions(+), 23 deletions(-) -- 2.5.5
[dpdk-dev] [PATCH 2/2] doc: add description of raw mode in flow director in testpmd
Add description of raw flow type mode for flow_director_filter command in testpmd. Modify description of flow type parameter for functions set_hash_global_config, set_hash_input_set and set_fdir_input_set Signed-off-by: Kirill Rybalchenko --- doc/guides/testpmd_app_ug/testpmd_funcs.rst | 24 +++- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/doc/guides/testpmd_app_ug/testpmd_funcs.rst b/doc/guides/testpmd_app_ug/testpmd_funcs.rst index 9789139..e198767 100644 --- a/doc/guides/testpmd_app_ug/testpmd_funcs.rst +++ b/doc/guides/testpmd_app_ug/testpmd_funcs.rst @@ -2523,12 +2523,22 @@ Perfect-tunnel filters, the match mode is set by the ``--pkt-filter-mode`` comma The hardware checks a match between the masked fields of the received packets and the programmed filters. The masked fields are for tunnel flow. +* Perfect-raw-flow-type match filters. + The hardware checks a match between the masked fields of the received packets and pre-loaded raw (template) packet. + The masked fields are specified by input sets. + The Flow Director filters can match the different fields for different type of packet: flow type, specific input set per flow type and the flexible payload. The Flow Director can also mask out parts of all of these fields so that filters are only applied to certain fields or parts of the fields. +Note that for raw flow type mode the source and destination fields in the +raw packet buffer need to be presented in a reversed order with respect +to the expected received packets. +For example: IP source and destination addresses or TCP/UDP/SCTP +source and destination ports + Different NICs may have different capabilities, command show port fdir (port_id) can be used to acquire the information. # Commands to add flow director filters of different flow types:: @@ -2575,6 +2585,10 @@ Different NICs may have different capabilities, command show port fdir (port_id) flexbytes (flexbytes_value) (drop|fwd) \ queue (queue_id) fd_id (fd_id_value) + flow_director_filter (port_id) mode raw (add|del|update) flow (flow_id) \ +(drop|fwd) queue (queue_id) fd_id (fd_id_value) \ +packet (packet file name) + For example, to add an ipv4-udp flow type filter:: testpmd> flow_director_filter 0 mode IP add flow ipv4-udp src 2.2.2.3 32 \ @@ -2689,7 +2703,7 @@ Set the global configurations of hash filters:: set_hash_global_config (port_id) (toeplitz|simple_xor|default) \ (ipv4|ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp|ipv4-other|ipv6|ipv6-frag| \ - ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload) \ + ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other|l2_payload|) \ (enable|disable) For example, to enable simple_xor for flow type of ipv6 on port 2:: @@ -2703,8 +2717,8 @@ Set the input set for hash:: set_hash_input_set (port_id) (ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp| \ ipv4-other|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other| \ - l2_payload) (ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos| \ - ipv4-proto|ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port| \ + l2_payload|) (ovlan|ivlan|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6| \ + ipv4-tos|ipv4-proto|ipv6-tc|ipv6-next-header|udp-src-port|udp-dst-port| \ tcp-src-port|tcp-dst-port|sctp-src-port|sctp-dst-port|sctp-veri-tag| \ udp-key|gre-key|fld-1st|fld-2nd|fld-3rd|fld-4th|fld-5th|fld-6th|fld-7th| \ fld-8th|none) (select|add) @@ -2723,8 +2737,8 @@ Set the input set for flow director:: set_fdir_input_set (port_id) (ipv4-frag|ipv4-tcp|ipv4-udp|ipv4-sctp| \ ipv4-other|ipv6|ipv6-frag|ipv6-tcp|ipv6-udp|ipv6-sctp|ipv6-other| \ - l2_payload) (ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6|ipv4-tos| \ - ipv4-proto|ipv4-ttl|ipv6-tc|ipv6-next-header|ipv6-hop-limits| \ + l2_payload|) (ivlan|ethertype|src-ipv4|dst-ipv4|src-ipv6|dst-ipv6| \ + ipv4-tos|ipv4-proto|ipv4-ttl|ipv6-tc|ipv6-next-header|ipv6-hop-limits| \ tudp-src-port|udp-dst-port|cp-src-port|tcp-dst-port|sctp-src-port| \ sctp-dst-port|sctp-veri-tag|none) (select|add) -- 2.5.5
[dpdk-dev] [PATCH 1/2] app/testpmd: add raw flowtype mode for flow director filter
Add possibility to load file with raw packet and set it as a template for flow director filter setup. Signed-off-by: Kirill Rybalchenko --- app/test-pmd/cmdline.c | 116 - app/test-pmd/config.c | 8 ++-- app/test-pmd/testpmd.h | 6 +-- 3 files changed, 112 insertions(+), 18 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index f71d963..86178ae 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -979,6 +979,11 @@ static void cmd_help_long_parsed(void *parsed_result, " queue (queue_id) fd_id (fd_id_value)\n" "Add/Del a Tunnel flow director filter.\n\n" + "flow_director_filter (port_id) mode raw (add|del|update)" + " flow (flow_id) (drop|fwd) queue (queue_id)" + " fd_id (fd_id_value) packet (packet file name)\n" + "Add/Del a raw type flow director filter.\n\n" + "flush_flow_director (port_id)\n" "Flush all flow director entries of a device.\n\n" @@ -9769,6 +9774,8 @@ struct cmd_flow_director_result { cmdline_fixed_string_t tunnel_type; cmdline_fixed_string_t tunnel_id; uint32_t tunnel_id_value; + cmdline_fixed_string_t packet; + char filepath[]; }; static inline int @@ -9918,8 +9925,62 @@ cmd_flow_director_filter_parsed(void *parsed_result, return; } } else { - if (strcmp(res->mode_value, "IP")) { - printf("Please set mode to IP.\n"); + if (!strcmp(res->mode_value, "raw")) { +#ifdef RTE_LIBRTE_I40E_PMD + struct rte_pmd_i40e_flow_type_mapping + mapping[RTE_PMD_I40E_FLOW_TYPE_MAX]; + struct rte_pmd_i40e_pkt_template_conf conf; + uint16_t flow_type = str2flowtype(res->flow_type); + uint16_t i, port = res->port_id; + uint8_t add; + + memset(&conf, 0, sizeof(conf)); + + if (flow_type == RTE_ETH_FLOW_UNKNOWN) { + printf("Invalid flow type specified.\n"); + return; + } + ret = rte_pmd_i40e_flow_type_mapping_get(res->port_id, +mapping); + if (ret) + return; + if (mapping[flow_type].pctype == 0ULL) { + printf("Invalid flow type specified.\n"); + return; + } + for (i = 0; i < RTE_PMD_I40E_PCTYPE_MAX; i++) { + if (mapping[flow_type].pctype & (1ULL << i)) { + conf.input.pctype = i; + break; + } + } + + conf.input.packet = open_file(res->filepath, + &conf.input.length); + if (!conf.input.packet) + return; + if (!strcmp(res->drop, "drop")) + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_REJECT; + else + conf.action.behavior = + RTE_PMD_I40E_PKT_TEMPLATE_ACCEPT; + conf.action.report_status = + RTE_PMD_I40E_PKT_TEMPLATE_REPORT_ID; + conf.action.rx_queue = res->queue_id; + conf.soft_id = res->fd_id_value; + add = strcmp(res->ops, "del") ? 1 : 0; + ret = rte_pmd_i40e_flow_add_del_packet_template(port, + &conf, + add); + if (ret < 0) + printf("flow director config error: (%s)\n", + strerror(-ret)); + close_file(conf.input.packet); +#endif + return; + } else if (strcmp(res->mode_value, "IP")) { + printf("Please set mode to IP or raw.\n"); return; } entry.input.flow_type = str2flowtype(res->flow_type); @@ -10091,8 +10152,7 @@ cmdline_parse_token_string_t cmd_flow_director_flow = flow, "flow"); cmdline_parse_token_string_t cmd_flow_director_flow_type = TOKEN_STRING_INITIALIZ
Re: [dpdk-dev] [PATCH] net/bonding: fix bond 8023ad mode enable using wrong index
Hello, > -Original Message- > From: dev [mailto:dev-boun...@dpdk.org] On Behalf Of Lilijun (Jerry) > Sent: Monday, November 20, 2017 2:16 AM > To: dev@dpdk.org; tomaszx.kula...@intel.com > Cc: Zhang, Jerry; Wanghanlin > Subject: [dpdk-dev] [PATCH] net/bonding: fix bond 8023ad mode enable > using wrong index > ... > --- a/drivers/net/bonding/rte_eth_bond_8023ad.c > +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c > @@ -1159,7 +1159,7 @@ > uint8_t i; > > for (i = 0; i < internals->active_slave_count; i++) > - bond_mode_8023ad_activate_slave(bond_dev, i); > + bond_mode_8023ad_activate_slave(bond_dev, internals- > >active_slaves[i]); > > return 0; > } > I don't think active_slaves is correct here. The slave is not yet active, so it may not be in that array yet. Should we instead use internals->slaves[i].port_id?