In message (http://dpdk.org/ml/archives/dev/2017-November/081557.html) it was noted that under congestion that the LACPDU's are dropped under load.
This patch changes the drop logic to re-enqueue the LACPDU to the slaves control message queue. This will allow resend attempt's to be made in subsequent tx_burst() calls on the bonded device. Signed-off-by: Declan Doherty <declan.dohe...@intel.com> --- drivers/net/bonding/rte_eth_bond_pmd.c | 35 ++++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index fe23289..d1d3663 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1258,12 +1258,10 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, uint8_t distributing_count; uint16_t num_tx_slave, num_tx_total = 0, num_tx_fail_total = 0; - uint16_t i, j, op_slave_idx; - const uint16_t buffs_size = nb_pkts + BOND_MODE_8023AX_SLAVE_TX_PKTS + 1; + uint16_t i, op_slave_idx; /* Allocate additional packets in case 8023AD mode. */ - struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][buffs_size]; - void *slow_pkts[BOND_MODE_8023AX_SLAVE_TX_PKTS] = { NULL }; + struct rte_mbuf *slave_bufs[RTE_MAX_ETHPORTS][nb_pkts]; /* Total amount of packets in slave_bufs */ uint16_t slave_nb_pkts[RTE_MAX_ETHPORTS] = { 0 }; @@ -1285,14 +1283,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, for (i = 0; i < num_of_slaves; i++) { struct port *port = &mode_8023ad_ports[slaves[i]]; - slave_slow_nb_pkts[i] = rte_ring_dequeue_burst(port->tx_ring, - slow_pkts, BOND_MODE_8023AX_SLAVE_TX_PKTS, - NULL); - slave_nb_pkts[i] = slave_slow_nb_pkts[i]; - - for (j = 0; j < slave_slow_nb_pkts[i]; j++) - slave_bufs[i][j] = slow_pkts[j]; - if (ACTOR_STATE(port, DISTRIBUTING)) distributing_offsets[distributing_count++] = i; } @@ -1334,6 +1324,27 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct rte_mbuf **bufs, } } + /* Check for LACP control packets and send if available */ + for (i = 0; i < num_of_slaves; i++) { + struct port *port = &mode_8023ad_ports[slaves[i]]; + struct rte_mbuf *ctrl_pkt = NULL; + + int pkt_avail = rte_ring_dequeue(port->tx_ring, + (void **)&ctrl_pkt); + + if (unlikely(pkt_avail == 0)) { + num_tx_slave = rte_eth_tx_burst(slaves[i], + bd_tx_q->queue_id, &ctrl_pkt, 1); + + /* + * re-enqueue LAG control plane packets to buffering + * ring if transmission fails so the packet isn't lost. + */ + if (num_tx_slave != nb_pkts) + rte_ring_enqueue(port->tx_ring, ctrl_pkt); + } + } + return num_tx_total; } -- 2.9.4