On Thu, Jun 19, 2014 at 3:58 PM, Polehn, Mike A <mike.a.pol...@intel.com> wrote: > Large TX and RX queues are needed for high speed 10 GbE physical NICS. > Observed a 250% zero loss improvement over small NIC queue test for > A port to port flow test. > > Signed-off-by: Mike A. Polehn <mike.a.pol...@intel.com> I am fine with the > > diff --git a/lib/netdev-dpdk.c b/lib/netdev-dpdk.c > index fbdb6b3..d1bcc73 100644 > --- a/lib/netdev-dpdk.c > +++ b/lib/netdev-dpdk.c > @@ -70,6 +70,9 @@ static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(5, > 20); > > #define NON_PMD_THREAD_TX_QUEUE 0 > > +#define NIC_PORT_RX_Q_SIZE 2048 /* Size of Physical NIC RX Queue > (n*32<4096)*/
I am not sure what does "(n*32<4096)" means. Can you elaborate it bit? > +#define NIC_PORT_TX_Q_SIZE 2048 /* Size of Physical NIC TX Queue > (n*32<4096)*/ > + > /* TODO: Needs per NIC value for these constants. */ > #define RX_PTHRESH 32 /* Default values of RX prefetch threshold reg. */ > #define RX_HTHRESH 32 /* Default values of RX host threshold reg. */ > @@ -369,7 +372,7 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) > OVS_REQUIRES(dpdk_mutex) > } > > for (i = 0; i < NR_QUEUE; i++) { > - diag = rte_eth_tx_queue_setup(dev->port_id, i, MAX_TX_QUEUE_LEN, > + diag = rte_eth_tx_queue_setup(dev->port_id, i, NIC_PORT_TX_Q_SIZE, > dev->socket_id, &tx_conf); > if (diag) { > VLOG_ERR("eth dev tx queue setup error %d",diag); > @@ -378,7 +381,7 @@ dpdk_eth_dev_init(struct netdev_dpdk *dev) > OVS_REQUIRES(dpdk_mutex) > } > > for (i = 0; i < NR_QUEUE; i++) { > - diag = rte_eth_rx_queue_setup(dev->port_id, i, MAX_RX_QUEUE_LEN, > + diag = rte_eth_rx_queue_setup(dev->port_id, i, NIC_PORT_RX_Q_SIZE, > dev->socket_id, > &rx_conf, dev->dpdk_mp->mp); > if (diag) { > > > _______________________________________________ > dev mailing list > dev@openvswitch.org > http://openvswitch.org/mailman/listinfo/dev _______________________________________________ dev mailing list dev@openvswitch.org http://openvswitch.org/mailman/listinfo/dev