vmxnet3 version 6 supports some new features, including but not limited to: - Increased max MTU up to 9190 - Increased max number of queues, both for Rx and Tx - Removes power-of-two limitations - Extended interrupt structures, required implementation for additional number of queues
Tested, using testpmd, for different hardware version on ESXi 7.0 Update 2. Signed-off-by: Pankaj Gupta <pagu...@vmware.com> Reviewed-by: Jochen Behrens <jbehr...@vmware.com> --- drivers/net/vmxnet3/base/vmxnet3_defs.h | 73 ++++++-- drivers/net/vmxnet3/vmxnet3_ethdev.c | 223 +++++++++++++++++------- drivers/net/vmxnet3/vmxnet3_ethdev.h | 10 +- drivers/net/vmxnet3/vmxnet3_rxtx.c | 2 +- 4 files changed, 229 insertions(+), 79 deletions(-) diff --git a/drivers/net/vmxnet3/base/vmxnet3_defs.h b/drivers/net/vmxnet3/base/vmxnet3_defs.h index 8d62b3e116..bd6695e69d 100644 --- a/drivers/net/vmxnet3/base/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/base/vmxnet3_defs.h @@ -103,7 +103,11 @@ typedef enum { VMXNET3_CMD_GET_CONF_INTR, VMXNET3_CMD_GET_ADAPTIVE_RING_INFO, VMXNET3_CMD_GET_TXDATA_DESC_SIZE, - VMXNET3_CMD_RESERVED5, + VMXNET3_CMD_RESERVED5, + VMXNET3_CMD_RESERVED6, + VMXNET3_CMD_RESERVED7, + VMXNET3_CMD_RESERVED8, + VMXNET3_CMD_GET_MAX_QUEUES_CONF, } Vmxnet3_Cmd; /* Adaptive Ring Info Flags */ @@ -571,6 +575,24 @@ enum vmxnet3_intr_type { /* addition 1 for events */ #define VMXNET3_MAX_INTRS 25 +/* Version 6 and later will use below macros */ +#define VMXNET3_EXT_MAX_TX_QUEUES 32 +#define VMXNET3_EXT_MAX_RX_QUEUES 32 + +/* Version-dependent MAX RX/TX queues macro */ +#define MAX_RX_QUEUES(hw) \ + (VMXNET3_VERSION_GE_6((hw)) ? \ + VMXNET3_EXT_MAX_RX_QUEUES : \ + VMXNET3_MAX_RX_QUEUES) +#define MAX_TX_QUEUES(hw) \ + (VMXNET3_VERSION_GE_6((hw)) ? \ + VMXNET3_EXT_MAX_TX_QUEUES : \ + VMXNET3_MAX_TX_QUEUES) + +/* addition 1 for events */ +#define VMXNET3_EXT_MAX_INTRS 65 +#define VMXNET3_FIRST_SET_INTRS 64 + /* value of intrCtrl */ #define VMXNET3_IC_DISABLE_ALL 0x1 /* bit 0 */ @@ -587,6 +609,21 @@ struct Vmxnet3_IntrConf { #include "vmware_pack_end.h" Vmxnet3_IntrConf; +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_IntrConfExt { + uint8 autoMask; + uint8 numIntrs; /* # of interrupts */ + uint8 eventIntrIdx; + uint8 reserved; + __le32 intrCtrl; + __le32 reserved1; + uint8 modLevels[VMXNET3_EXT_MAX_INTRS]; /* moderation level for each intr */ + uint8 reserved2[3]; +} +#include "vmware_pack_end.h" +Vmxnet3_IntrConfExt; + /* one bit per VLAN ID, the size is in the units of uint32 */ #define VMXNET3_VFT_SIZE (4096 / (sizeof(uint32) * 8)) @@ -692,6 +729,15 @@ struct Vmxnet3_DSDevRead { #include "vmware_pack_end.h" Vmxnet3_DSDevRead; +typedef +#include "vmware_pack_begin.h" +struct Vmxnet3_DSDevReadExt { + /* read-only region for device, read by dev in response to a SET cmd */ + struct Vmxnet3_IntrConfExt intrConfExt; +} +#include "vmware_pack_end.h" +Vmxnet3_DSDevReadExt; + typedef #include "vmware_pack_begin.h" struct Vmxnet3_TxQueueDesc { @@ -778,18 +824,18 @@ Vmxnet3_CmdInfo; typedef #include "vmware_pack_begin.h" struct Vmxnet3_DriverShared { - __le32 magic; - __le32 pad; /* make devRead start at 64-bit boundaries */ - Vmxnet3_DSDevRead devRead; - __le32 ecr; - __le32 reserved; - - union { - __le32 reserved1[4]; - Vmxnet3_CmdInfo cmdInfo; /* only valid in the context of executing the - * relevant command - */ - } cu; + __le32 magic; + __le32 size; /* size of DriverShared */ + Vmxnet3_DSDevRead devRead; + __le32 ecr; + __le32 reserved; + + union { + __le32 reserved1[4]; + /* only valid in the context of executing the relevant command */ + Vmxnet3_CmdInfo cmdInfo; + } cu; + struct Vmxnet3_DSDevReadExt devReadExt; } #include "vmware_pack_end.h" Vmxnet3_DriverShared; @@ -821,6 +867,7 @@ do {\ ((vfTable[vid >> 5] & (1 << (vid & 31))) != 0) #define VMXNET3_MAX_MTU 9000 +#define VMXNET3_V6_MAX_MTU 9190 #define VMXNET3_MIN_MTU 60 #define VMXNET3_LINK_UP (10000 << 16 | 1) // 10 Gbps, up diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index 30a0026ade..357fec09af 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -222,24 +222,20 @@ vmxnet3_disable_intr(struct vmxnet3_hw *hw, unsigned int intr_idx) } /* - * Enable all intrs used by the device + * Simple helper to get intrCtrl and eventIntrIdx based on config and hw version */ static void -vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw) +vmxnet3_get_intr_ctrl_ev(struct vmxnet3_hw *hw, + uint8 **out_eventIntrIdx, + uint32 **out_intrCtrl) { - Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; - - PMD_INIT_FUNC_TRACE(); - devRead->intrConf.intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL); - - if (hw->intr.lsc_only) { - vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx); + if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) { + *out_eventIntrIdx = &hw->shared->devReadExt.intrConfExt.eventIntrIdx; + *out_intrCtrl = &hw->shared->devReadExt.intrConfExt.intrCtrl; } else { - int i; - - for (i = 0; i < hw->intr.num_intrs; i++) - vmxnet3_enable_intr(hw, i); + *out_eventIntrIdx = &hw->shared->devRead.intrConf.eventIntrIdx; + *out_intrCtrl = &hw->shared->devRead.intrConf.intrCtrl; } } @@ -250,15 +246,42 @@ static void vmxnet3_disable_all_intrs(struct vmxnet3_hw *hw) { int i; + uint8 *eventIntrIdx; + uint32 *intrCtrl; PMD_INIT_FUNC_TRACE(); + vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl); + + *intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL); - hw->shared->devRead.intrConf.intrCtrl |= - rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL); - for (i = 0; i < hw->num_intrs; i++) + for (i = 0; i < hw->intr.num_intrs; i++) vmxnet3_disable_intr(hw, i); } +/* + * Enable all intrs used by the device + */ +static void +vmxnet3_enable_all_intrs(struct vmxnet3_hw *hw) +{ + uint8 *eventIntrIdx; + uint32 *intrCtrl; + + PMD_INIT_FUNC_TRACE(); + vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl); + + *intrCtrl &= rte_cpu_to_le_32(~VMXNET3_IC_DISABLE_ALL); + + if (hw->intr.lsc_only) { + vmxnet3_enable_intr(hw, *eventIntrIdx); + } else { + int i; + + for (i = 0; i < hw->intr.num_intrs; i++) + vmxnet3_enable_intr(hw, i); + } +} + /* * Gets tx data ring descriptor size. */ @@ -333,7 +356,11 @@ eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev) /* Check h/w version compatibility with driver. */ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS); - if (ver & (1 << VMXNET3_REV_5)) { + if (ver & (1 << VMXNET3_REV_6)) { + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, + 1 << VMXNET3_REV_6); + hw->version = VMXNET3_REV_6 + 1; + } else if (ver & (1 << VMXNET3_REV_5)) { VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1 << VMXNET3_REV_5); hw->version = VMXNET3_REV_5 + 1; @@ -508,15 +535,22 @@ vmxnet3_dev_configure(struct rte_eth_dev *dev) if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH; - if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES || - dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) { - PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported"); - return -EINVAL; + if (!VMXNET3_VERSION_GE_6(hw)) { + if (!rte_is_power_of_2(dev->data->nb_rx_queues)) { + PMD_INIT_LOG(ERR, + "ERROR: Number of rx queues not power of 2"); + return -EINVAL; + } } - if (!rte_is_power_of_2(dev->data->nb_rx_queues)) { - PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2"); - return -EINVAL; + /* At this point, the number of queues requested has already + * been validated against dev_infos max queues by EAL + */ + if (dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES || + dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES) { + hw->queuesExtEnabled = 1; + } else { + hw->queuesExtEnabled = 0; } size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) + @@ -627,9 +661,9 @@ vmxnet3_configure_msix(struct rte_eth_dev *dev) return -1; intr_vector = dev->data->nb_rx_queues; - if (intr_vector > VMXNET3_MAX_RX_QUEUES) { + if (intr_vector > MAX_RX_QUEUES(hw)) { PMD_INIT_LOG(ERR, "At most %d intr queues supported", - VMXNET3_MAX_RX_QUEUES); + MAX_RX_QUEUES(hw)); return -ENOTSUP; } @@ -777,6 +811,7 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) uint32_t mtu = dev->data->mtu; Vmxnet3_DriverShared *shared = hw->shared; Vmxnet3_DSDevRead *devRead = &shared->devRead; + struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads; uint32_t i; int ret; @@ -853,13 +888,27 @@ vmxnet3_setup_driver_shared(struct rte_eth_dev *dev) } /* intr settings */ - devRead->intrConf.autoMask = hw->intr.mask_mode == VMXNET3_IMM_AUTO; - devRead->intrConf.numIntrs = hw->intr.num_intrs; - for (i = 0; i < hw->intr.num_intrs; i++) - devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i]; + if (VMXNET3_VERSION_GE_6(hw) && hw->queuesExtEnabled) { + devReadExt->intrConfExt.autoMask = hw->intr.mask_mode == + VMXNET3_IMM_AUTO; + devReadExt->intrConfExt.numIntrs = hw->intr.num_intrs; + for (i = 0; i < hw->intr.num_intrs; i++) + devReadExt->intrConfExt.modLevels[i] = + hw->intr.mod_levels[i]; + + devReadExt->intrConfExt.eventIntrIdx = hw->intr.event_intr_idx; + devReadExt->intrConfExt.intrCtrl |= + rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL); + } else { + devRead->intrConf.autoMask = hw->intr.mask_mode == + VMXNET3_IMM_AUTO; + devRead->intrConf.numIntrs = hw->intr.num_intrs; + for (i = 0; i < hw->intr.num_intrs; i++) + devRead->intrConf.modLevels[i] = hw->intr.mod_levels[i]; - devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx; - devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL); + devRead->intrConf.eventIntrIdx = hw->intr.event_intr_idx; + devRead->intrConf.intrCtrl |= rte_cpu_to_le_32(VMXNET3_IC_DISABLE_ALL); + } /* RxMode set to 0 of VMXNET3_RXM_xxx */ devRead->rxFilterConf.rxMode = 0; @@ -937,18 +986,24 @@ vmxnet3_dev_start(struct rte_eth_dev *dev) return -EINVAL; } - /* Setup memory region for rx buffers */ - ret = vmxnet3_dev_setup_memreg(dev); - if (ret == 0) { - VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, - VMXNET3_CMD_REGISTER_MEMREGS); - ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); - if (ret != 0) - PMD_INIT_LOG(DEBUG, - "Failed in setup memory region cmd\n"); - ret = 0; + /* Check memregs restrictions first */ + if (dev->data->nb_rx_queues <= VMXNET3_MAX_RX_QUEUES && + dev->data->nb_tx_queues <= VMXNET3_MAX_TX_QUEUES) { + ret = vmxnet3_dev_setup_memreg(dev); + if (ret == 0) { + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_REGISTER_MEMREGS); + ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); + if (ret != 0) + PMD_INIT_LOG(DEBUG, + "Failed in setup memory region cmd\n"); + ret = 0; + } else { + PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n"); + } } else { - PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n"); + PMD_INIT_LOG(WARNING, "Memregs can't init (rx: %d, tx: %d)", + dev->data->nb_rx_queues, dev->data->nb_tx_queues); } if (VMXNET3_VERSION_GE_4(hw) && @@ -1203,8 +1258,6 @@ vmxnet3_hw_stats_save(struct vmxnet3_hw *hw) VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); - RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); - for (i = 0; i < hw->num_tx_queues; i++) vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]); for (i = 0; i < hw->num_rx_queues; i++) @@ -1306,7 +1359,6 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); - RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES); for (i = 0; i < hw->num_tx_queues; i++) { vmxnet3_tx_stats_get(hw, i, &txStats); @@ -1323,7 +1375,6 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard; } - RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES); for (i = 0; i < hw->num_rx_queues; i++) { vmxnet3_rx_stats_get(hw, i, &rxStats); @@ -1377,9 +1428,30 @@ vmxnet3_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct vmxnet3_hw *hw = dev->data->dev_private; + int queues = 0; + + if (VMXNET3_VERSION_GE_6(hw)) { + VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_MAX_QUEUES_CONF); + queues = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD); + + if (queues > 0) { +#ifndef MIN +#define MIN(x, y) (((x) < (y)) ? (x) : (y)) +#endif + dev_info->max_rx_queues = + MIN(VMXNET3_EXT_MAX_RX_QUEUES, ((queues >> 8) & 0xff)); + dev_info->max_tx_queues = + MIN(VMXNET3_EXT_MAX_TX_QUEUES, (queues & 0xff)); + } else { + dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; + dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; + } + } else { + dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; + dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; + } - dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES; - dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES; dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM; dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */ dev_info->min_mtu = VMXNET3_MIN_MTU; @@ -1430,24 +1502,50 @@ vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev) } static int -vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, __rte_unused uint16_t mtu) +vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) { - if (dev->data->dev_started) { - PMD_DRV_LOG(ERR, "Port %d must be stopped to configure MTU", - dev->data->port_id); - return -EBUSY; - } + struct vmxnet3_hw *hw = dev->data->dev_private; + rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr)); + vmxnet3_write_mac(hw, mac_addr->addr_bytes); return 0; } static int -vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr) +vmxnet3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct vmxnet3_hw *hw = dev->data->dev_private; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + 4; + + if (mtu < VMXNET3_MIN_MTU) + return -EINVAL; + + if (VMXNET3_VERSION_GE_6(hw)) { + if (mtu > VMXNET3_V6_MAX_MTU) + return -EINVAL; + } else { + if (mtu > VMXNET3_MAX_MTU) { + PMD_DRV_LOG(ERR, "MTU %d too large in device version v%d", + mtu, hw->version); + return -EINVAL; + } + } + + dev->data->mtu = mtu; + /* update max frame size */ + dev->data->dev_conf.rxmode.mtu = frame_size; + + if (dev->data->dev_started == 0) + return 0; + + /* changing mtu for vmxnet3 pmd does not require a restart + * as it does not need to repopulate the rx rings to support + * different mtu size. We stop and restart the device here + * just to pass the mtu info to the backend. + */ + vmxnet3_dev_stop(dev); + vmxnet3_dev_start(dev); - rte_ether_addr_copy(mac_addr, (struct rte_ether_addr *)(hw->perm_addr)); - vmxnet3_write_mac(hw, mac_addr->addr_bytes); return 0; } @@ -1668,11 +1766,14 @@ vmxnet3_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; struct vmxnet3_hw *hw = dev->data->dev_private; - Vmxnet3_DSDevRead *devRead = &hw->shared->devRead; uint32_t events; + uint8 *eventIntrIdx; + uint32 *intrCtrl; PMD_INIT_FUNC_TRACE(); - vmxnet3_disable_intr(hw, devRead->intrConf.eventIntrIdx); + + vmxnet3_get_intr_ctrl_ev(hw, &eventIntrIdx, &intrCtrl); + vmxnet3_disable_intr(hw, *eventIntrIdx); events = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_ECR); if (events == 0) @@ -1681,7 +1782,7 @@ vmxnet3_interrupt_handler(void *param) RTE_LOG(DEBUG, PMD, "Reading events: 0x%X", events); vmxnet3_process_events(dev); done: - vmxnet3_enable_intr(hw, devRead->intrConf.eventIntrIdx); + vmxnet3_enable_intr(hw, *eventIntrIdx); } static int diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index ceaeb66392..5a303717b1 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -70,7 +70,7 @@ struct vmxnet3_intr { enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */ uint8_t num_intrs; /* # of intr vectors */ uint8_t event_intr_idx; /* idx of the intr vector for event */ - uint8_t mod_levels[VMXNET3_MAX_MSIX_VECT]; /* moderation level */ + uint8_t mod_levels[VMXNET3_EXT_MAX_INTRS]; /* moderation level */ bool lsc_only; /* no Rx queue interrupt */ }; @@ -108,6 +108,7 @@ struct vmxnet3_hw { uint64_t queueDescPA; uint16_t queue_desc_len; uint16_t mtu; + bool queuesExtEnabled; VMXNET3_RSSConf *rss_conf; uint64_t rss_confPA; @@ -117,19 +118,20 @@ struct vmxnet3_hw { Vmxnet3_MemRegs *memRegs; uint64_t memRegsPA; #define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t)) - UPT1_TxStats saved_tx_stats[VMXNET3_MAX_TX_QUEUES]; - UPT1_RxStats saved_rx_stats[VMXNET3_MAX_RX_QUEUES]; - + UPT1_TxStats saved_tx_stats[VMXNET3_EXT_MAX_TX_QUEUES]; + UPT1_RxStats saved_rx_stats[VMXNET3_EXT_MAX_RX_QUEUES]; UPT1_TxStats snapshot_tx_stats[VMXNET3_MAX_TX_QUEUES]; UPT1_RxStats snapshot_rx_stats[VMXNET3_MAX_RX_QUEUES]; }; +#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */ #define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */ #define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */ #define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */ #define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */ #define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */ +#define VMXNET3_VERSION_GE_6(hw) ((hw)->version >= VMXNET3_REV_6 + 1) #define VMXNET3_VERSION_GE_5(hw) ((hw)->version >= VMXNET3_REV_5 + 1) #define VMXNET3_VERSION_GE_4(hw) ((hw)->version >= VMXNET3_REV_4 + 1) #define VMXNET3_VERSION_GE_3(hw) ((hw)->version >= VMXNET3_REV_3 + 1) diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index e15b377d8c..c94e3762e6 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -1400,7 +1400,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev) /* loading hashKeySize */ dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE; /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/ - dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4); + dev_rss_conf->indTableSize = (uint16_t)((MAX_RX_QUEUES(hw)) * 4); if (port_rss_conf->rss_key == NULL) { /* Default hash key */ -- 2.17.1