How strange. I was just looking at this issue this weekend. I think
we can just move the control packet handling before the data packet
handling. That avoids the goto and is a little more sensible -- we should
try to prioritize control traffic (even if we can't guarantee there will
be space it's better than nothing). Something like this:
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c
b/drivers/net/bonding/rte_eth_bond_pmd.c
index 44deaf119..89ad67266 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1298,9 +1298,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct
rte_mbuf **bufs,
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
@@ -1310,6 +1307,30 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct
rte_mbuf **bufs,
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ /* Check for LACP control packets and send if available */
+ for (i = 0; i < slave_count; i++) {
+ struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
+ struct rte_mbuf *ctrl_pkt = NULL;
+
+ if (likely(rte_ring_empty(port->tx_ring)))
+ continue;
+
+ if (rte_ring_dequeue(port->tx_ring,
+ (void **)&ctrl_pkt) != -ENOENT) {
+ slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
+ bd_tx_q->queue_id, &ctrl_pkt, 1);
+ /*
+ * re-enqueue LAG control plane packets to buffering
+ * ring if transmission fails so the packet isn't lost.
+ */
+ if (slave_tx_count != 1)
+ rte_ring_enqueue(port->tx_ring, ctrl_pkt);
+ }
+ }
+
+ if (unlikely(nb_bufs == 0))
+ return 0;
+
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
@@ -1365,27 +1386,6 @@ bond_ethdev_tx_burst_8023ad(void *queue, struct
rte_mbuf **bufs,
}
}
- /* Check for LACP control packets and send if available */
- for (i = 0; i < slave_count; i++) {
- struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
- struct rte_mbuf *ctrl_pkt = NULL;
-
- if (likely(rte_ring_empty(port->tx_ring)))
- continue;
-
- if (rte_ring_dequeue(port->tx_ring,
- (void **)&ctrl_pkt) != -ENOENT) {
- slave_tx_count = rte_eth_tx_burst(slave_port_ids[i],
- bd_tx_q->queue_id, &ctrl_pkt, 1);
- /*
- * re-enqueue LAG control plane packets to buffering
- * ring if transmission fails so the packet isn't lost.
- */
- if (slave_tx_count != 1)
- rte_ring_enqueue(port->tx_ring, ctrl_pkt);
- }
- }
-
return total_tx_count;
}
On 2/11/19 3:01 AM, wangyunjian wrote:
From: Yunjian Wang <wangyunj...@huawei.com>
Now sending 0 packet it doesn't consider for LACPDUs,
but the LACPDUs should be checked and sended.
Fixes: 09150784a776 ("net/bonding: burst mode hash calculation")
Cc: sta...@dpdk.org
Reported-by: Hui Zhao <zhaoh...@huawei.com>
Signed-off-by: Yunjian Wang <wangyunj...@huawei.com>
---
drivers/net/bonding/rte_eth_bond_pmd.c | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c
b/drivers/net/bonding/rte_eth_bond_pmd.c
index 44deaf1..a77af19 100644
--- a/drivers/net/bonding/rte_eth_bond_pmd.c
+++ b/drivers/net/bonding/rte_eth_bond_pmd.c
@@ -1298,9 +1298,6 @@ struct bwg_slave {
uint16_t i;
- if (unlikely(nb_bufs == 0))
- return 0;
-
/* Copy slave list to protect against slave up/down changes during tx
* bursting */
slave_count = internals->active_slave_count;
@@ -1310,6 +1307,9 @@ struct bwg_slave {
memcpy(slave_port_ids, internals->active_slaves,
sizeof(slave_port_ids[0]) * slave_count);
+ if (unlikely(nb_bufs == 0))
+ goto lacp_send;
+
dist_slave_count = 0;
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];
@@ -1365,6 +1365,7 @@ struct bwg_slave {
}
}
+lacp_send:
/* Check for LACP control packets and send if available */
for (i = 0; i < slave_count; i++) {
struct port *port = &bond_mode_8023ad_ports[slave_port_ids[i]];