The branch stable/14 has been updated by markj: URL: https://cgit.FreeBSD.org/src/commit/?id=021eb07a8b2806f875aed20de4320cd2d2a90ab7
commit 021eb07a8b2806f875aed20de4320cd2d2a90ab7 Author: Mark Johnston <ma...@freebsd.org> AuthorDate: 2025-05-28 13:31:44 +0000 Commit: Mark Johnston <ma...@freebsd.org> CommitDate: 2025-06-20 12:46:10 +0000 igc: Fix some issues in igc_neweitr() The justification is the same as in commit fb876eef219e ("e1000: Fix some issues in em_newitr()"). Reviewed by: kbowling MFC after: 2 weeks Differential Revision: https://reviews.freebsd.org/D50548 (cherry picked from commit ef062029ceffacb6bde3a5639a2bd8c4d59ca1df) --- sys/dev/igc/if_igc.c | 40 ++++++++++++++++++++++------------------ 1 file changed, 22 insertions(+), 18 deletions(-) diff --git a/sys/dev/igc/if_igc.c b/sys/dev/igc/if_igc.c index f34b0507c1ea..a1ae35c7aa43 100644 --- a/sys/dev/igc/if_igc.c +++ b/sys/dev/igc/if_igc.c @@ -909,14 +909,16 @@ igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, struct tx_ring *txr, struct rx_ring *rxr) { struct igc_hw *hw = &sc->hw; + unsigned long bytes, bytes_per_packet, packets; + unsigned long rxbytes, rxpackets, txbytes, txpackets; u32 neweitr; - u32 bytes; - u32 bytes_packets; - u32 packets; u8 nextlatency; + rxbytes = atomic_load_long(&rxr->rx_bytes); + txbytes = atomic_load_long(&txr->tx_bytes); + /* Idle, do nothing */ - if ((txr->tx_bytes == 0) && (rxr->rx_bytes == 0)) + if (txbytes == 0 && rxbytes == 0) return; neweitr = 0; @@ -936,18 +938,20 @@ igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, goto igc_set_next_eitr; } - bytes = bytes_packets = 0; + bytes = bytes_per_packet = 0; /* Get largest values from the associated tx and rx ring */ - if (txr->tx_bytes && txr->tx_packets) { - bytes = txr->tx_bytes; - bytes_packets = txr->tx_bytes/txr->tx_packets; - packets = txr->tx_packets; + txpackets = atomic_load_long(&txr->tx_packets); + if (txpackets != 0) { + bytes = txbytes; + bytes_per_packet = txbytes / txpackets; + packets = txpackets; } - if (rxr->rx_bytes && rxr->rx_packets) { - bytes = max(bytes, rxr->rx_bytes); - bytes_packets = max(bytes_packets, - rxr->rx_bytes/rxr->rx_packets); - packets = max(packets, rxr->rx_packets); + rxpackets = atomic_load_long(&rxr->rx_packets); + if (rxpackets != 0) { + bytes = lmax(bytes, rxbytes); + bytes_per_packet = + lmax(bytes_per_packet, rxbytes / rxpackets); + packets = lmax(packets, rxpackets); } /* Latency state machine */ @@ -957,7 +961,7 @@ igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, break; case eitr_latency_lowest: /* 70k ints/s */ /* TSO and jumbo frames */ - if (bytes_packets > 8000) + if (bytes_per_packet > 8000) nextlatency = eitr_latency_bulk; else if ((packets < 5) && (bytes > 512)) nextlatency = eitr_latency_low; @@ -965,14 +969,14 @@ igc_neweitr(struct igc_softc *sc, struct igc_rx_queue *que, case eitr_latency_low: /* 20k ints/s */ if (bytes > 10000) { /* Handle TSO */ - if (bytes_packets > 8000) + if (bytes_per_packet > 8000) nextlatency = eitr_latency_bulk; else if ((packets < 10) || - (bytes_packets > 1200)) + (bytes_per_packet > 1200)) nextlatency = eitr_latency_bulk; else if (packets > 35) nextlatency = eitr_latency_lowest; - } else if (bytes_packets > 2000) { + } else if (bytes_per_packet > 2000) { nextlatency = eitr_latency_bulk; } else if (packets < 3 && bytes < 512) { nextlatency = eitr_latency_lowest;