-----Original Message-----
From: Akihiko Odaki <akihiko.od...@daynix.com>
Sent: Sunday, 29 January 2023 10:16
To: Sriram Yagnaraman <sriram.yagnara...@est.tech>
Cc: qemu-devel@nongnu.org; Jason Wang <jasow...@redhat.com>; Dmitry
Fleytman <dmitry.fleyt...@gmail.com>; Michael S . Tsirkin
<m...@redhat.com>; Marcel Apfelbaum <marcel.apfelb...@gmail.com>
Subject: Re: [PATCH 3/9] igb: implement VFRE and VFTE registers
On 2023/01/28 22:46, Sriram Yagnaraman wrote:
Also add checks for RXDCTL/TXDCTL queue enable bits
Signed-off-by: Sriram Yagnaraman <sriram.yagnara...@est.tech>
---
hw/net/igb_core.c | 42 +++++++++++++++++++++++++++++++-----------
hw/net/igb_regs.h | 3 ++-
2 files changed, 33 insertions(+), 12 deletions(-)
diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index
9bd53cc25f..6bca5459b9 100644
--- a/hw/net/igb_core.c
+++ b/hw/net/igb_core.c
@@ -778,6 +778,19 @@ igb_txdesc_writeback(IGBCore *core, dma_addr_t
base,
return igb_tx_wb_eic(core, txi->idx);
}
+static inline bool
+igb_tx_enabled(IGBCore *core, const E1000E_RingInfo *txi) {
+ bool vmdq = core->mac[MRQC] & 1;
+ uint16_t qn = txi->idx;
+ uint16_t vfn = (qn > IGB_MAX_VF_FUNCTIONS) ?
+ (qn - IGB_MAX_VF_FUNCTIONS) : qn;
+
+ return (core->mac[TCTL] & E1000_TCTL_EN) &&
+ (vmdq ? (core->mac[VFTE] & BIT(vfn)) : true) &&
Instead, do: (!vmdq || core->mac[VFTE] & BIT(vfn))
+ (core->mac[TXDCTL0 + (qn * 16)] & E1000_TXDCTL_QUEUE_ENABLE);
+}
+
static void
igb_start_xmit(IGBCore *core, const IGB_TxRing *txr)
{
@@ -787,8 +800,7 @@ igb_start_xmit(IGBCore *core, const IGB_TxRing
*txr)
const E1000E_RingInfo *txi = txr->i;
uint32_t eic = 0;
- /* TODO: check if the queue itself is enabled too. */
- if (!(core->mac[TCTL] & E1000_TCTL_EN)) {
+ if (!igb_tx_enabled(core, txi)) {
trace_e1000e_tx_disabled();
return;
}
@@ -1003,6 +1015,7 @@ static uint16_t igb_receive_assign(IGBCore *core,
const struct eth_header *ehdr,
queues = BIT(def_pl >> E1000_VT_CTL_DEFAULT_POOL_SHIFT);
}
+ queues &= core->mac[VFRE];
igb_rss_parse_packet(core, core->rx_pkt, external_tx != NULL,
rss_info);
if (rss_info->queue & 1) {
queues <<= 8;
@@ -1486,7 +1499,7 @@ igb_receive_internal(IGBCore *core, const struct
iovec *iov, int iovcnt,
static const int maximum_ethernet_hdr_len = (ETH_HLEN + 4);
uint16_t queues = 0;
- uint32_t n;
+ uint32_t n = 0;
uint8_t min_buf[ETH_ZLEN];
struct iovec min_iov;
struct eth_header *ehdr;
@@ -1566,26 +1579,22 @@ igb_receive_internal(IGBCore *core, const
struct iovec *iov, int iovcnt,
}
igb_rx_ring_init(core, &rxr, i);
-
- trace_e1000e_rx_rss_dispatched_to_queue(rxr.i->idx);
-
if (!igb_has_rxbufs(core, rxr.i, total_size)) {
retval = 0;
}
This stops sending packet when a disabled queue has no space.