Module Name:    src
Committed By:   yamaguchi
Date:           Thu Mar 23 02:42:49 UTC 2023

Modified Files:
        src/sys/dev/pci: if_vioif.c

Log Message:
vioif(4): added new data structure for network queues

and moved the same parameters in vioif_txqueue and
vioif_rxqueue into the new structure


To generate a diff of this commit:
cvs rdiff -u -r1.97 -r1.98 src/sys/dev/pci/if_vioif.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/pci/if_vioif.c
diff -u src/sys/dev/pci/if_vioif.c:1.97 src/sys/dev/pci/if_vioif.c:1.98
--- src/sys/dev/pci/if_vioif.c:1.97	Thu Mar 23 02:33:34 2023
+++ src/sys/dev/pci/if_vioif.c	Thu Mar 23 02:42:49 2023
@@ -1,4 +1,4 @@
-/*	$NetBSD: if_vioif.c,v 1.97 2023/03/23 02:33:34 yamaguchi Exp $	*/
+/*	$NetBSD: if_vioif.c,v 1.98 2023/03/23 02:42:49 yamaguchi Exp $	*/
 
 /*
  * Copyright (c) 2020 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.97 2023/03/23 02:33:34 yamaguchi Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.98 2023/03/23 02:42:49 yamaguchi Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -205,12 +205,13 @@ struct virtio_net_ctrl_mq {
 
 /*
  * Locking notes:
- * + a field in vioif_txqueue is protected by txq_lock (a spin mutex), and
- *   a field in vioif_rxqueue is protected by rxq_lock (a spin mutex).
+ * + a field in vioif_netueue is protected by netq_lock (a spin mutex)
  *      - more than one lock cannot be held at onece
+ * + a field in vioif_tx_context and vioif_rx_context is also protected
+ *   by netq_lock.
  * + ctrlq_inuse is protected by ctrlq_wait_lock.
  *      - other fields in vioif_ctrlqueue are protected by ctrlq_inuse
- *      - txq_lock or rxq_lock cannot be held along with ctrlq_wait_lock
+ *      - netq_lock cannot be held along with ctrlq_wait_lock
  * + fields in vioif_softc except queues are protected by
  *   sc->sc_lock(an adaptive mutex)
  *      - the lock is held before acquisition of other locks
@@ -236,49 +237,44 @@ struct vioif_net_map {
 	bus_dmamap_t		 vnm_mbuf_map;
 };
 
-struct vioif_txqueue {
-	kmutex_t		*txq_lock;	/* lock for tx operations */
+#define VIOIF_NETQ_RX		0
+#define VIOIF_NETQ_TX		1
+#define VIOIF_NETQ_IDX		2
+#define VIOIF_NETQ_DIR(n)	((n) % VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_PAIRIDX(n)	((n) / VIOIF_NETQ_IDX)
+#define VIOIF_NETQ_RXQID(n)	((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_RX)
+#define VIOIF_NETQ_TXQID(n)	((n) * VIOIF_NETQ_IDX + VIOIF_NETQ_TX)
+
+struct vioif_netqueue {
+	kmutex_t		 netq_lock;
+	struct virtqueue	*netq_vq;
+	bool			 netq_stopping;
+	bool			 netq_running_handle;
+	void			*netq_maps_kva;
+	struct vioif_net_map	*netq_maps;
+
+	void			*netq_softint;
+	struct vioif_work	 netq_work;
+	bool			 netq_workqueue;
+
+	char			 netq_evgroup[32];
+	struct evcnt		 netq_mbuf_load_failed;
+	struct evcnt		 netq_enqueue_reserve_failed;
 
-	struct virtqueue	*txq_vq;
-	bool			txq_stopping;
-	bool			txq_link_active;
-	pcq_t			*txq_intrq;
-
-	void			*txq_maps_kva;
-	struct vioif_net_map	*txq_maps;
-
-	void			*txq_deferred_transmit;
-	void			*txq_handle_si;
-	struct vioif_work	 txq_work;
-	bool			 txq_workqueue;
-	bool			 txq_running_handle;
-
-	char			 txq_evgroup[16];
-	struct evcnt		 txq_defrag_failed;
-	struct evcnt		 txq_mbuf_load_failed;
-	struct evcnt		 txq_enqueue_reserve_failed;
+	void			*netq_ctx;
 };
 
-struct vioif_rxqueue {
-	kmutex_t		*rxq_lock;	/* lock for rx operations */
+struct vioif_tx_context {
+	bool			 txc_link_active;
+	pcq_t			*txc_intrq;
+	void			*txc_deferred_transmit;
 
-	struct virtqueue	*rxq_vq;
-	bool			rxq_stopping;
-
-	void			*rxq_maps_kva;
-	struct vioif_net_map	*rxq_maps;
-
-	void			*rxq_handle_si;
-	struct vioif_work	 rxq_work;
-	bool			 rxq_workqueue;
-	bool			 rxq_running_handle;
-
-	char			 rxq_evgroup[16];
-	struct evcnt		 rxq_mbuf_enobufs;
-	struct evcnt		 rxq_mbuf_load_failed;
-	struct evcnt		 rxq_enqueue_reserve_failed;
+	struct evcnt		 txc_defrag_failed;
 };
 
+struct vioif_rx_context {
+	struct evcnt		 rxc_mbuf_enobufs;
+};
 struct vioif_ctrlqueue {
 	struct virtqueue		*ctrlq_vq;
 	enum {
@@ -325,8 +321,7 @@ struct vioif_softc {
 	struct ethercom		sc_ethercom;
 	int			sc_link_state;
 
-	struct vioif_txqueue	*sc_txq;
-	struct vioif_rxqueue	*sc_rxq;
+	struct vioif_netqueue	*sc_netqs;
 
 	bool			sc_has_ctrl;
 	struct vioif_ctrlqueue	sc_ctrlq;
@@ -365,34 +360,34 @@ static int	vioif_finalize_teardown(devic
 static int	vioif_init(struct ifnet *);
 static void	vioif_stop(struct ifnet *, int);
 static void	vioif_start(struct ifnet *);
-static void	vioif_start_locked(struct ifnet *, struct vioif_txqueue *);
+static void	vioif_start_locked(struct ifnet *, struct vioif_netqueue *);
 static int	vioif_transmit(struct ifnet *, struct mbuf *);
-static void	vioif_transmit_locked(struct ifnet *, struct vioif_txqueue *);
+static void	vioif_transmit_locked(struct ifnet *, struct vioif_netqueue *);
 static int	vioif_ioctl(struct ifnet *, u_long, void *);
 static void	vioif_watchdog(struct ifnet *);
 static int	vioif_ifflags_cb(struct ethercom *);
 
+/* tx & rx */
+static void	vioif_net_sched_handle(struct vioif_softc *,
+		    struct vioif_netqueue *);
+
 /* rx */
 static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *,
-		    struct vioif_rxqueue *);
+		    struct vioif_netqueue *);
 static void	vioif_rx_queue_clear(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_rxqueue *);
+		    struct vioif_netqueue *);
 static bool	vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_rxqueue *, u_int, size_t *);
+		    struct vioif_netqueue *, u_int, size_t *);
 static int	vioif_rx_intr(void *);
 static void	vioif_rx_handle(void *);
-static void	vioif_rx_sched_handle(struct vioif_softc *,
-		    struct vioif_rxqueue *);
 
 /* tx */
 static int	vioif_tx_intr(void *);
 static void	vioif_tx_handle(void *);
-static void	vioif_tx_sched_handle(struct vioif_softc *,
-		    struct vioif_txqueue *);
 static void	vioif_tx_queue_clear(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_txqueue *);
+		    struct vioif_netqueue *);
 static bool	vioif_tx_deq_locked(struct vioif_softc *, struct virtio_softc *,
-		    struct vioif_txqueue *, u_int);
+		    struct vioif_netqueue *, u_int);
 static void	vioif_deferred_transmit(void *);
 
 /* workqueue */
@@ -501,60 +496,199 @@ static void
 vioif_alloc_queues(struct vioif_softc *sc)
 {
 	int nvq_pairs = sc->sc_max_nvq_pairs;
-	int nvqs = nvq_pairs * 2;
-	int i;
+	size_t nvqs, netq_num;
 
 	KASSERT(nvq_pairs <= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX);
 
-	sc->sc_rxq = kmem_zalloc(sizeof(sc->sc_rxq[0]) * nvq_pairs,
-	    KM_SLEEP);
-	sc->sc_txq = kmem_zalloc(sizeof(sc->sc_txq[0]) * nvq_pairs,
-	    KM_SLEEP);
-
+	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
 	if (sc->sc_has_ctrl)
 		nvqs++;
 
 	sc->sc_vqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * nvqs, KM_SLEEP);
-	nvqs = 0;
-	for (i = 0; i < nvq_pairs; i++) {
-		sc->sc_rxq[i].rxq_vq = &sc->sc_vqs[nvqs++];
-		sc->sc_txq[i].txq_vq = &sc->sc_vqs[nvqs++];
-	}
-
-	if (sc->sc_has_ctrl)
-		sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[nvqs++];
+	sc->sc_netqs = kmem_zalloc(sizeof(sc->sc_vqs[0]) * netq_num,
+	    KM_SLEEP);
 }
 
 static void
 vioif_free_queues(struct vioif_softc *sc)
 {
-	int nvq_pairs = sc->sc_max_nvq_pairs;
-	int nvqs = nvq_pairs * 2;
+	size_t nvqs, netq_num;
 
+	nvqs = netq_num = sc->sc_max_nvq_pairs * 2;
 	if (sc->sc_ctrlq.ctrlq_vq)
 		nvqs++;
 
-	if (sc->sc_txq) {
-		kmem_free(sc->sc_txq, sizeof(sc->sc_txq[0]) * nvq_pairs);
-		sc->sc_txq = NULL;
+	kmem_free(sc->sc_netqs, sizeof(sc->sc_netqs[0]) * netq_num);
+	kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
+	sc->sc_netqs = NULL;
+	sc->sc_vqs = NULL;
+}
+
+static int
+vioif_netqueue_init(struct vioif_softc *sc, struct virtio_softc *vsc,
+    size_t qid, u_int softint_flags)
+{
+	static const struct {
+		const char	*dirname;
+		int		 segsize;
+		int		 nsegs;
+		int 		(*intrhand)(void *);
+		void		(*sihand)(void *);
+	} params[VIOIF_NETQ_IDX] = {
+		[VIOIF_NETQ_RX] = {
+			.dirname	= "rx",
+			.segsize	= MCLBYTES,
+			.nsegs		= 2,
+			.intrhand	= vioif_rx_intr,
+			.sihand		= vioif_rx_handle,
+		},
+		[VIOIF_NETQ_TX] = {
+			.dirname	= "tx",
+			.segsize	= ETHER_MAX_LEN - ETHER_HDR_LEN,
+			.nsegs		= 2,
+			.intrhand	= vioif_tx_intr,
+			.sihand		= vioif_tx_handle,
+		}
+	};
+
+	struct virtqueue *vq;
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
+	struct vioif_rx_context *rxc;
+	char qname[32];
+	int r, dir;
+
+	txc = NULL;
+	rxc = NULL;
+	netq = &sc->sc_netqs[qid];
+	vq = &sc->sc_vqs[qid];
+	dir = VIOIF_NETQ_DIR(qid);
+
+	netq->netq_vq = &sc->sc_vqs[qid];
+	netq->netq_stopping = false;
+	netq->netq_running_handle = false;
+
+	snprintf(qname, sizeof(qname), "%s%zu",
+	    params[dir].dirname, VIOIF_NETQ_PAIRIDX(qid));
+	snprintf(netq->netq_evgroup, sizeof(netq->netq_evgroup),
+	    "%s-%s", device_xname(sc->sc_dev), qname);
+
+	mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET);
+	r = virtio_alloc_vq(vsc, vq, qid,
+	    params[dir].segsize + sc->sc_hdr_size,
+	    params[dir].nsegs, qname);
+	if (r != 0)
+		goto err;
+	netq->netq_vq = vq;
+
+	netq->netq_vq->vq_intrhand = params[dir].intrhand;
+	netq->netq_vq->vq_intrhand_arg = netq;
+	netq->netq_softint = softint_establish(softint_flags,
+	    params[dir].sihand, netq);
+	if (netq->netq_softint == NULL) {
+		aprint_error_dev(sc->sc_dev,
+		    "couldn't establish %s softint\n",
+		    params[dir].dirname);
+		goto err;
 	}
+	vioif_work_set(&netq->netq_work, params[dir].sihand, netq);
 
-	if (sc->sc_rxq) {
-		kmem_free(sc->sc_rxq, sizeof(sc->sc_rxq[0]) * nvq_pairs);
-		sc->sc_rxq = NULL;
+	switch (dir) {
+	case VIOIF_NETQ_RX:
+		rxc = kmem_zalloc(sizeof(*rxc), KM_SLEEP);
+		netq->netq_ctx = rxc;
+		/* nothing to do */
+		break;
+	case VIOIF_NETQ_TX:
+		txc = kmem_zalloc(sizeof(*txc), KM_SLEEP);
+		netq->netq_ctx = (void *)txc;
+		txc->txc_deferred_transmit = softint_establish(softint_flags,
+		    vioif_deferred_transmit, netq);
+		if (txc->txc_deferred_transmit == NULL) {
+			aprint_error_dev(sc->sc_dev,
+			    "couldn't establish softint for "
+			    "tx deferred transmit\n");
+			goto err;
+		}
+		txc->txc_link_active = VIOIF_IS_LINK_ACTIVE(sc);
+		txc->txc_intrq = pcq_create(vq->vq_num, KM_SLEEP);
+		break;
 	}
 
-	if (sc->sc_vqs) {
-		kmem_free(sc->sc_vqs, sizeof(sc->sc_vqs[0]) * nvqs);
-		sc->sc_vqs = NULL;
+	return 0;
+
+err:
+	netq->netq_ctx = NULL;
+
+	if (rxc != NULL) {
+		kmem_free(rxc, sizeof(*rxc));
+	}
+
+	if (txc != NULL) {
+		if (txc->txc_deferred_transmit != NULL)
+			softint_disestablish(txc->txc_deferred_transmit);
+		if (txc->txc_intrq != NULL)
+			pcq_destroy(txc->txc_intrq);
+		kmem_free(txc, sizeof(txc));
+	}
+
+	vioif_work_set(&netq->netq_work, NULL, NULL);
+	if (netq->netq_softint != NULL) {
+		softint_disestablish(netq->netq_softint);
+		netq->netq_softint = NULL;
+	}
+	netq->netq_vq->vq_intrhand = NULL;
+	netq->netq_vq->vq_intrhand_arg = NULL;
+
+	virtio_free_vq(vsc, vq);
+	mutex_destroy(&netq->netq_lock);
+	netq->netq_vq = NULL;
+
+	return -1;
+}
+
+static void
+vioif_netqueue_teardown(struct vioif_softc *sc, struct virtio_softc *vsc,
+    size_t qid)
+{
+	struct vioif_netqueue *netq;
+	struct vioif_rx_context *rxc;
+	struct vioif_tx_context *txc;
+	int dir;
+
+	netq = &sc->sc_netqs[qid];
+
+	if (netq->netq_vq == NULL)
+		return;
+
+	netq = &sc->sc_netqs[qid];
+	dir = VIOIF_NETQ_DIR(qid);
+	switch (dir) {
+	case VIOIF_NETQ_RX:
+		rxc = netq->netq_ctx;
+		netq->netq_ctx = NULL;
+		kmem_free(rxc, sizeof(*rxc));
+		break;
+	case VIOIF_NETQ_TX:
+		txc = netq->netq_ctx;
+		netq->netq_ctx = NULL;
+		softint_disestablish(txc->txc_deferred_transmit);
+		pcq_destroy(txc->txc_intrq);
+		kmem_free(txc, sizeof(*txc));
+		break;
 	}
+
+	softint_disestablish(netq->netq_softint);
+	virtio_free_vq(vsc, netq->netq_vq);
+	mutex_destroy(&netq->netq_lock);
+	netq->netq_vq = NULL;
 }
 
 /* allocate memory */
 /*
  * dma memory is used for:
- *   rxq_maps_kva:	 metadata array for received frames (READ)
- *   txq_maps_kva:	 metadata array for frames to be sent (WRITE)
+ *   netq_maps_kva:	 metadata array for received frames (READ) and
+ *			 sent frames (WRITE)
  *   ctrlq_cmd:		 command to be sent via ctrl vq (WRITE)
  *   ctrlq_status:	 return value for a command via ctrl vq (READ)
  *   ctrlq_rx:		 parameter for a VIRTIO_NET_CTRL_RX class command
@@ -570,62 +704,69 @@ static int
 vioif_alloc_mems(struct vioif_softc *sc)
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_txqueue *txq;
-	struct vioif_rxqueue *rxq;
-	struct vioif_net_map *maps;
+	struct vioif_netqueue *netq;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	int allocsize, allocsize2, r, rsegs, i, qid;
+	struct vioif_net_map *maps;
+	unsigned int vq_num;
+	int r, rsegs;
+	bus_size_t dmamemsize;
+	size_t qid, i, netq_num, kmemsize;
 	void *vaddr;
 	intptr_t p;
 
-	allocsize = 0;
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
-		rxq = &sc->sc_rxq[qid];
-		txq = &sc->sc_txq[qid];
+	netq_num = sc->sc_max_nvq_pairs * 2;
+
+	/* allocate DMA memory */
+	dmamemsize = 0;
 
-		allocsize += sizeof(struct virtio_net_hdr) *
-			(rxq->rxq_vq->vq_num + txq->txq_vq->vq_num);
+	for (qid = 0; qid < netq_num; qid++) {
+		maps = sc->sc_netqs[qid].netq_maps;
+		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
+		dmamemsize += sizeof(*maps[0].vnm_hdr) * vq_num;
 	}
+
 	if (sc->sc_has_ctrl) {
-		allocsize += sizeof(struct virtio_net_ctrl_cmd);
-		allocsize += sizeof(struct virtio_net_ctrl_status);
-		allocsize += sizeof(struct virtio_net_ctrl_rx);
-		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
+		dmamemsize += sizeof(struct virtio_net_ctrl_cmd);
+		dmamemsize += sizeof(struct virtio_net_ctrl_status);
+		dmamemsize += sizeof(struct virtio_net_ctrl_rx);
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
 		    + ETHER_ADDR_LEN;
-		allocsize += sizeof(struct virtio_net_ctrl_mac_tbl)
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_tbl)
 		    + ETHER_ADDR_LEN * VIRTIO_NET_CTRL_MAC_MAXENTRIES;
-		allocsize += sizeof(struct virtio_net_ctrl_mac_addr);
-		allocsize += sizeof(struct virtio_net_ctrl_mq);
+		dmamemsize += sizeof(struct virtio_net_ctrl_mac_addr);
+		dmamemsize += sizeof(struct virtio_net_ctrl_mq);
 	}
-	r = bus_dmamem_alloc(virtio_dmat(vsc), allocsize, 0, 0,
+
+	r = bus_dmamem_alloc(virtio_dmat(vsc), dmamemsize, 0, 0,
 	    &sc->sc_hdr_segs[0], 1, &rsegs, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
-		    "DMA memory allocation failed, size %d, "
-		    "error code %d\n", allocsize, r);
+		    "DMA memory allocation failed, size %zu, "
+		    "error code %d\n", dmamemsize, r);
 		goto err_none;
 	}
-	r = bus_dmamem_map(virtio_dmat(vsc),
-	    &sc->sc_hdr_segs[0], 1, allocsize, &vaddr, BUS_DMA_NOWAIT);
+	r = bus_dmamem_map(virtio_dmat(vsc),&sc->sc_hdr_segs[0], 1,
+	    dmamemsize, &vaddr, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
 		    "DMA memory map failed, error code %d\n", r);
 		goto err_dmamem_alloc;
 	}
 
-	memset(vaddr, 0, allocsize);
+	/* assign DMA memory */
+	memset(vaddr, 0, dmamemsize);
 	sc->sc_dmamem = vaddr;
 	p = (intptr_t) vaddr;
 
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
-		rxq = &sc->sc_rxq[qid];
-		txq = &sc->sc_txq[qid];
-
-		rxq->rxq_maps_kva = vioif_assign_mem(&p,
-		    sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num);
-		txq->txq_maps_kva = vioif_assign_mem(&p,
-		    sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num);
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		maps = netq->netq_maps;
+		vq_num = netq->netq_vq->vq_num;
+
+		netq->netq_maps_kva = vioif_assign_mem(&p,
+		    sizeof(*maps[0].vnm_hdr) * vq_num);
 	}
+
 	if (sc->sc_has_ctrl) {
 		ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
 		    sizeof(*ctrlq->ctrlq_cmd));
@@ -644,72 +785,76 @@ vioif_alloc_mems(struct vioif_softc *sc)
 		ctrlq->ctrlq_mq = vioif_assign_mem(&p, sizeof(*ctrlq->ctrlq_mq));
 	}
 
-	allocsize2 = 0;
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
-		int rxqsize, txqsize;
-
-		rxq = &sc->sc_rxq[qid];
-		txq = &sc->sc_txq[qid];
+	/* allocate kmem */
+	kmemsize = 0;
 
-		rxqsize = rxq->rxq_vq->vq_num;
-		txqsize = txq->txq_vq->vq_num;
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
 
-		allocsize2 += sizeof(rxq->rxq_maps[0]) * rxqsize;
-		allocsize2 += sizeof(txq->txq_maps[0]) * txqsize;
+		kmemsize += sizeof(netq->netq_maps[0]) * vq_num;
 	}
-	vaddr = kmem_zalloc(allocsize2, KM_SLEEP);
+
+	vaddr = kmem_zalloc(kmemsize, KM_SLEEP);
 	sc->sc_kmem = vaddr;
+
+	/* assign allocated kmem */
 	p = (intptr_t) vaddr;
 
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
-		int rxqsize, txqsize;
-		rxq = &sc->sc_rxq[qid];
-		txq = &sc->sc_txq[qid];
-		rxqsize = rxq->rxq_vq->vq_num;
-		txqsize = txq->txq_vq->vq_num;
-
-		rxq->rxq_maps = vioif_assign_mem(&p,
-		    sizeof(rxq->rxq_maps[0]) * rxqsize);
-		txq->txq_maps = vioif_assign_mem(&p,
-		    sizeof(txq->txq_maps[0]) * txqsize);
-	}
+	for (qid = 0; qid < netq_num; qid++) {
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
+
+		netq->netq_maps = vioif_assign_mem(&p,
+		    sizeof(netq->netq_maps[0]) * vq_num);
+	}
+
+	/* prepare dmamaps */
+	for (qid = 0; qid < netq_num; qid++) {
+		static const struct {
+			const char	*msg_hdr;
+			const char	*msg_payload;
+			int		 dma_flag;
+			bus_size_t	 dma_size;
+			int		 dma_nsegs;
+		} dmaparams[VIOIF_NETQ_IDX] = {
+			[VIOIF_NETQ_RX] = {
+				.msg_hdr	= "rx header",
+				.msg_payload	= "rx payload",
+				.dma_flag	= BUS_DMA_READ,
+				.dma_size	= MCLBYTES - ETHER_ALIGN,
+				.dma_nsegs	= 1,
+			},
+			[VIOIF_NETQ_TX] = {
+				.msg_hdr	= "tx header",
+				.msg_payload	= "tx payload",
+				.dma_flag	= BUS_DMA_WRITE,
+				.dma_size	= ETHER_MAX_LEN,
+				.dma_nsegs	= VIRTIO_NET_TX_MAXNSEGS,
+			}
+		};
 
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
 		struct virtio_net_hdr *hdrs;
-		unsigned int vq_num;
+		int dir;
 
-		rxq = &sc->sc_rxq[qid];
-		vq_num = rxq->rxq_vq->vq_num;
-		maps = rxq->rxq_maps;
-		hdrs = (struct virtio_net_hdr *)rxq->rxq_maps_kva;
-		for (i = 0; i < vq_num; i++) {
-			maps[i].vnm_hdr = &hdrs[i];
-			r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map,
-			    maps[i].vnm_hdr, sc->sc_hdr_size, 1, BUS_DMA_READ,
-			    "rx header");
-			if (r != 0)
-				goto err_reqs;
+		dir = VIOIF_NETQ_DIR(qid);
+		netq = &sc->sc_netqs[qid];
+		vq_num = netq->netq_vq->vq_num;
+		maps = netq->netq_maps;
+		hdrs = netq->netq_maps_kva;
 
-			r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map,
-			    MCLBYTES - ETHER_ALIGN, 1, "rx payload");
-			if (r != 0)
-				goto err_reqs;
-		}
-
-		txq = &sc->sc_txq[qid];
-		vq_num = txq->txq_vq->vq_num;
-		maps = txq->txq_maps;
-		hdrs = (struct virtio_net_hdr *)txq->txq_maps_kva;
 		for (i = 0; i < vq_num; i++) {
 			maps[i].vnm_hdr = &hdrs[i];
+	
 			r = vioif_dmamap_create_load(sc, &maps[i].vnm_hdr_map,
-			    maps[i].vnm_hdr, sc->sc_hdr_size, 1, BUS_DMA_WRITE,
-			    "tx header");
+			    maps[i].vnm_hdr, sc->sc_hdr_size, 1,
+			    dmaparams[dir].dma_flag, dmaparams[dir].msg_hdr);
 			if (r != 0)
 				goto err_reqs;
 
 			r = vioif_dmamap_create(sc, &maps[i].vnm_mbuf_map,
-			    ETHER_MAX_LEN, VIRTIO_NET_TX_MAXNSEGS, "tx payload");
+			    dmaparams[dir].dma_size, dmaparams[dir].dma_nsegs,
+			    dmaparams[dir].msg_payload);
 			if (r != 0)
 				goto err_reqs;
 		}
@@ -779,30 +924,20 @@ err_reqs:
 	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_status_dmamap);
 	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_cmd_dmamap);
 	vioif_dmamap_destroy(sc, &ctrlq->ctrlq_mac_addr_dmamap);
-	for (qid = 0; qid < sc->sc_max_nvq_pairs; qid++) {
-		unsigned int vq_num;
-		rxq = &sc->sc_rxq[qid];
-		txq = &sc->sc_txq[qid];
+	for (qid = 0; qid < netq_num; qid++) {
+		vq_num = sc->sc_netqs[qid].netq_vq->vq_num;
+		maps = sc->sc_netqs[qid].netq_maps;
 
-		vq_num = txq->txq_vq->vq_num;
-		maps = txq->txq_maps;
-		for (i = 0; i < vq_num; i++) {
-			vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map);
-			vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map);
-		}
-
-		vq_num = txq->txq_vq->vq_num;
-		maps = txq->txq_maps;
 		for (i = 0; i < vq_num; i++) {
 			vioif_dmamap_destroy(sc, &maps[i].vnm_mbuf_map);
 			vioif_dmamap_destroy(sc, &maps[i].vnm_hdr_map);
 		}
 	}
 	if (sc->sc_kmem) {
-		kmem_free(sc->sc_kmem, allocsize2);
+		kmem_free(sc->sc_kmem, kmemsize);
 		sc->sc_kmem = NULL;
 	}
-	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, allocsize);
+	bus_dmamem_unmap(virtio_dmat(vsc), sc->sc_dmamem, dmamemsize);
 err_dmamem_alloc:
 	bus_dmamem_free(virtio_dmat(vsc), &sc->sc_hdr_segs[0], 1);
 err_none:
@@ -814,14 +949,14 @@ vioif_attach(device_t parent, device_t s
 {
 	struct vioif_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
+	struct vioif_netqueue *txq0;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	struct vioif_txqueue *txq;
-	struct vioif_rxqueue *rxq;
 	uint64_t features, req_features;
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	u_int softint_flags;
-	int r, i, nvqs = 0, req_flags;
+	int r, i, req_flags;
 	char xnamebuf[MAXCOMLEN];
+	size_t netq_num;
 
 	if (virtio_child(vsc) != NULL) {
 		aprint_normal(": child already attached for %s; "
@@ -933,70 +1068,22 @@ vioif_attach(device_t parent, device_t s
 #endif
 
 	/*
-	 * Allocating virtqueues
+	 * Initialize network queues
 	 */
-	for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
-		rxq = &sc->sc_rxq[i];
-		txq = &sc->sc_txq[i];
-		char qname[32];
-
-		rxq->rxq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
-
-		rxq->rxq_handle_si = softint_establish(softint_flags,
-		    vioif_rx_handle, rxq);
-		if (rxq->rxq_handle_si == NULL) {
-			aprint_error_dev(self, "cannot establish rx softint\n");
-			goto err;
-		}
-
-		snprintf(qname, sizeof(qname), "rx%d", i);
-		r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs,
-		    MCLBYTES + sc->sc_hdr_size, 2, qname);
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		r = vioif_netqueue_init(sc, vsc, i, softint_flags);
 		if (r != 0)
 			goto err;
-		nvqs++;
-		rxq->rxq_vq->vq_intrhand = vioif_rx_intr;
-		rxq->rxq_vq->vq_intrhand_arg = (void *)rxq;
-		rxq->rxq_stopping = false;
-		rxq->rxq_running_handle = false;
-		vioif_work_set(&rxq->rxq_work, vioif_rx_handle, rxq);
-
-		txq->txq_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET);
-
-		txq->txq_deferred_transmit = softint_establish(softint_flags,
-		    vioif_deferred_transmit, txq);
-		if (txq->txq_deferred_transmit == NULL) {
-			aprint_error_dev(self, "cannot establish tx softint\n");
-			goto err;
-		}
-		txq->txq_handle_si = softint_establish(softint_flags,
-		    vioif_tx_handle, txq);
-		if (txq->txq_handle_si == NULL) {
-			aprint_error_dev(self, "cannot establish tx softint\n");
-			goto err;
-		}
-
-		snprintf(qname, sizeof(qname), "tx%d", i);
-		r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
-		    sc->sc_hdr_size + (ETHER_MAX_LEN - ETHER_HDR_LEN),
-		    VIRTIO_NET_TX_MAXNSEGS + 1, qname);
-		if (r != 0)
-			goto err;
-		nvqs++;
-		txq->txq_vq->vq_intrhand = vioif_tx_intr;
-		txq->txq_vq->vq_intrhand_arg = (void *)txq;
-		txq->txq_link_active = VIOIF_IS_LINK_ACTIVE(sc);
-		txq->txq_stopping = false;
-		txq->txq_running_handle = false;
-		txq->txq_intrq = pcq_create(txq->txq_vq->vq_num, KM_SLEEP);
-		vioif_work_set(&txq->txq_work, vioif_tx_handle, txq);
 	}
 
 	if (sc->sc_has_ctrl) {
+		int ctrlq_idx = sc->sc_max_nvq_pairs * 2;
 		/*
 		 * Allocating a virtqueue for control channel
 		 */
-		r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, nvqs,
+		sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx];
+		r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx,
 		    NBPG, 1, "control");
 		if (r != 0) {
 			aprint_error_dev(self, "failed to allocate "
@@ -1007,7 +1094,6 @@ vioif_attach(device_t parent, device_t s
 			cv_destroy(&ctrlq->ctrlq_wait);
 			mutex_destroy(&ctrlq->ctrlq_wait_lock);
 		} else {
-			nvqs++;
 			ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr;
 			ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq;
 		}
@@ -1047,8 +1133,8 @@ vioif_attach(device_t parent, device_t s
 	ifp->if_stop = vioif_stop;
 	ifp->if_capabilities = 0;
 	ifp->if_watchdog = vioif_watchdog;
-	txq = &sc->sc_txq[0];
-	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq->txq_vq->vq_num, IFQ_MAXLEN));
+	txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
+	IFQ_SET_MAXLEN(&ifp->if_snd, MAX(txq0->netq_vq->vq_num, IFQ_MAXLEN));
 	IFQ_SET_READY(&ifp->if_snd);
 
 	sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU;
@@ -1061,49 +1147,18 @@ vioif_attach(device_t parent, device_t s
 	return;
 
 err:
-	for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
-		rxq = &sc->sc_rxq[i];
-		txq = &sc->sc_txq[i];
-
-		if (rxq->rxq_lock) {
-			mutex_obj_free(rxq->rxq_lock);
-			rxq->rxq_lock = NULL;
-		}
-
-		if (rxq->rxq_handle_si) {
-			softint_disestablish(rxq->rxq_handle_si);
-			rxq->rxq_handle_si = NULL;
-		}
-
-		if (txq->txq_lock) {
-			mutex_obj_free(txq->txq_lock);
-			txq->txq_lock = NULL;
-		}
-
-		if (txq->txq_handle_si) {
-			softint_disestablish(txq->txq_handle_si);
-			txq->txq_handle_si = NULL;
-		}
-
-		if (txq->txq_deferred_transmit) {
-			softint_disestablish(txq->txq_deferred_transmit);
-			txq->txq_deferred_transmit = NULL;
-		}
-
-		if (txq->txq_intrq) {
-			pcq_destroy(txq->txq_intrq);
-			txq->txq_intrq = NULL;
-		}
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		vioif_netqueue_teardown(sc, vsc, i);
 	}
 
 	if (sc->sc_has_ctrl) {
 		cv_destroy(&ctrlq->ctrlq_wait);
 		mutex_destroy(&ctrlq->ctrlq_wait_lock);
+		virtio_free_vq(vsc, ctrlq->ctrlq_vq);
+		ctrlq->ctrlq_vq = NULL;
 	}
 
-	while (nvqs > 0)
-		virtio_free_vq(vsc, &sc->sc_vqs[--nvqs]);
-
 	vioif_free_queues(sc);
 	mutex_destroy(&sc->sc_lock);
 	virtio_child_attach_failed(vsc);
@@ -1129,16 +1184,13 @@ static void
 vioif_enable_interrupt_vqpairs(struct vioif_softc *sc)
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_txqueue *txq;
-	struct vioif_rxqueue *rxq;
-	int i;
-
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		txq = &sc->sc_txq[i];
-		rxq = &sc->sc_rxq[i];
+	struct vioif_netqueue *netq;
+	size_t i, netq_act_num;
 
-		virtio_start_vq_intr(vsc, txq->txq_vq);
-		virtio_start_vq_intr(vsc, rxq->rxq_vq);
+	netq_act_num = sc->sc_act_nvq_pairs * 2;
+	for (i = 0; i < netq_act_num; i++) {
+		netq = &sc->sc_netqs[i];
+		virtio_start_vq_intr(vsc, netq->netq_vq);
 	}
 }
 
@@ -1146,16 +1198,13 @@ static void
 vioif_disable_interrupt_vqpairs(struct vioif_softc *sc)
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_txqueue *txq;
-	struct vioif_rxqueue *rxq;
-	int i;
-
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		rxq = &sc->sc_rxq[i];
-		txq = &sc->sc_txq[i];
+	struct vioif_netqueue *netq;
+	size_t i, netq_act_num;
 
-		virtio_stop_vq_intr(vsc, rxq->rxq_vq);
-		virtio_stop_vq_intr(vsc, txq->txq_vq);
+	netq_act_num = sc->sc_act_nvq_pairs * 2;
+	for (i = 0; i < netq_act_num; i++) {
+		netq = &sc->sc_netqs[i];
+		virtio_stop_vq_intr(vsc, netq->netq_vq);
 	}
 }
 
@@ -1167,7 +1216,7 @@ vioif_init(struct ifnet *ifp)
 {
 	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_rxqueue *rxq;
+	struct vioif_netqueue *netq;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
 	int r, i;
 
@@ -1182,12 +1231,11 @@ vioif_init(struct ifnet *ifp)
 	virtio_negotiate_features(vsc, virtio_features(vsc));
 
 	for (i = 0; i < sc->sc_req_nvq_pairs; i++) {
-		rxq = &sc->sc_rxq[i];
-
-		mutex_enter(rxq->rxq_lock);
-		vioif_populate_rx_mbufs_locked(sc, rxq);
-		mutex_exit(rxq->rxq_lock);
+		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
 
+		mutex_enter(&netq->netq_lock);
+		vioif_populate_rx_mbufs_locked(sc, netq);
+		mutex_exit(&netq->netq_lock);
 	}
 
 	virtio_reinit_end(vsc);
@@ -1201,11 +1249,12 @@ vioif_init(struct ifnet *ifp)
 	else
 		sc->sc_act_nvq_pairs = 1;
 
+	SET(ifp->if_flags, IFF_RUNNING);
+	CLR(ifp->if_flags, IFF_OACTIVE);
+
 	vioif_enable_interrupt_vqpairs(sc);
 
 	vioif_update_link_status(sc);
-	ifp->if_flags |= IFF_RUNNING;
-	ifp->if_flags &= ~IFF_OACTIVE;
 	r = vioif_rx_filter(sc);
 
 	return r;
@@ -1216,23 +1265,19 @@ vioif_stop(struct ifnet *ifp, int disabl
 {
 	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct vioif_txqueue *txq;
-	struct vioif_rxqueue *rxq;
+	struct vioif_netqueue *netq;
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
-	int i;
+	size_t i, netq_act_num;
 
+	netq_act_num = sc->sc_act_nvq_pairs * 2;
 
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		txq = &sc->sc_txq[i];
-		rxq = &sc->sc_rxq[i];
+	CLR(ifp->if_flags, IFF_RUNNING);
+	for (i = 0; i < netq_act_num; i++) {
+		netq = &sc->sc_netqs[i];
 
-		mutex_enter(rxq->rxq_lock);
-		rxq->rxq_stopping = true;
-		mutex_exit(rxq->rxq_lock);
-
-		mutex_enter(txq->txq_lock);
-		txq->txq_stopping = true;
-		mutex_exit(txq->txq_lock);
+		mutex_enter(&netq->netq_lock);
+		netq->netq_stopping = true;
+		mutex_exit(&netq->netq_lock);
 	}
 
 	/* disable interrupts */
@@ -1250,59 +1295,55 @@ vioif_stop(struct ifnet *ifp, int disabl
 
 	vioif_intr_barrier();
 
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		txq = &sc->sc_txq[i];
-		rxq = &sc->sc_rxq[i];
-
-		vioif_work_wait(sc->sc_txrx_workqueue, &rxq->rxq_work);
-		vioif_work_wait(sc->sc_txrx_workqueue, &txq->txq_work);
+	for (i = 0; i < netq_act_num; i++) {
+		netq = &sc->sc_netqs[i];
+		vioif_work_wait(sc->sc_txrx_workqueue, &netq->netq_work);
 	}
 
 	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		vioif_rx_queue_clear(sc, vsc, &sc->sc_rxq[i]);
-		vioif_tx_queue_clear(sc, vsc, &sc->sc_txq[i]);
-	}
+		netq = &sc->sc_netqs[VIOIF_NETQ_RXQID(i)];
+		vioif_rx_queue_clear(sc, vsc, netq);
 
-	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
+		netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
+		vioif_tx_queue_clear(sc, vsc, netq);
+	}
 
 	/* all packet processing is stopped */
-	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		txq = &sc->sc_txq[i];
-		rxq = &sc->sc_rxq[i];
+	for (i = 0; i < netq_act_num; i++) {
+		netq = &sc->sc_netqs[i];
 
-		mutex_enter(rxq->rxq_lock);
-		rxq->rxq_stopping = false;
-		KASSERT(!rxq->rxq_running_handle);
-		mutex_exit(rxq->rxq_lock);
-
-		mutex_enter(txq->txq_lock);
-		txq->txq_stopping = false;
-		KASSERT(!txq->txq_running_handle);
-		mutex_exit(txq->txq_lock);
+		mutex_enter(&netq->netq_lock);
+		netq->netq_stopping = false;
+		mutex_exit(&netq->netq_lock);
 	}
 }
 
 static void
-vioif_send_common_locked(struct ifnet *ifp, struct vioif_txqueue *txq,
+vioif_send_common_locked(struct ifnet *ifp, struct vioif_netqueue *netq,
     bool is_transmit)
 {
 	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct virtqueue *vq = txq->txq_vq;
+	struct virtqueue *vq = netq->netq_vq;
+	struct vioif_tx_context *txc;
 	struct vioif_net_map *map;
 	struct virtio_net_hdr *hdr;
 	struct mbuf *m;
 	int queued = 0;
 
-	KASSERT(mutex_owned(txq->txq_lock));
+	KASSERT(mutex_owned(&netq->netq_lock));
 
-	if ((ifp->if_flags & IFF_RUNNING) == 0)
+	if (netq->netq_stopping ||
+	    !ISSET(ifp->if_flags, IFF_RUNNING))
 		return;
 
-	if (!txq->txq_link_active || txq->txq_stopping)
+	txc = netq->netq_ctx;
+
+	if (!txc->txc_link_active)
 		return;
 
-	if ((ifp->if_flags & IFF_OACTIVE) != 0 && !is_transmit)
+	if (!is_transmit &&
+	    ISSET(ifp->if_flags, IFF_OACTIVE))
 		return;
 
 	for (;;) {
@@ -1314,7 +1355,7 @@ vioif_send_common_locked(struct ifnet *i
 			panic("enqueue_prep for tx buffers");
 
 		if (is_transmit)
-			m = pcq_get(txq->txq_intrq);
+			m = pcq_get(txc->txc_intrq);
 		else
 			IFQ_DEQUEUE(&ifp->if_snd, m);
 
@@ -1323,7 +1364,7 @@ vioif_send_common_locked(struct ifnet *i
 			break;
 		}
 
-		map = &txq->txq_maps[slot];
+		map = &netq->netq_maps[slot];
 		KASSERT(map->vnm_mbuf == NULL);
 
 		r = bus_dmamap_load_mbuf(virtio_dmat(vsc),
@@ -1334,7 +1375,7 @@ vioif_send_common_locked(struct ifnet *i
 
 			newm = m_defrag(m, M_NOWAIT);
 			if (newm == NULL) {
-				txq->txq_defrag_failed.ev_count++;
+				txc->txc_defrag_failed.ev_count++;
 				goto skip;
 			}
 
@@ -1343,7 +1384,7 @@ vioif_send_common_locked(struct ifnet *i
 			    map->vnm_mbuf_map, m,
 			    BUS_DMA_WRITE | BUS_DMA_NOWAIT);
 			if (r != 0) {
-				txq->txq_mbuf_load_failed.ev_count++;
+				netq->netq_mbuf_load_failed.ev_count++;
 skip:
 				m_freem(m);
 				if_statinc(ifp, if_oerrors);
@@ -1356,7 +1397,7 @@ skip:
 		r = virtio_enqueue_reserve(vsc, vq, slot,
 		    map->vnm_mbuf_map->dm_nsegs + 1);
 		if (r != 0) {
-			txq->txq_enqueue_reserve_failed.ev_count++;
+			netq->netq_enqueue_reserve_failed.ev_count++;
 			bus_dmamap_unload(virtio_dmat(vsc),
 			     map->vnm_mbuf_map);
 			/* slot already freed by virtio_enqueue_reserve */
@@ -1387,13 +1428,13 @@ skip:
 }
 
 static void
-vioif_start_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
+vioif_start_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
 {
 
 	/*
 	 * ifp->if_obytes and ifp->if_omcasts are added in if_transmit()@if.c.
 	 */
-	vioif_send_common_locked(ifp, txq, false);
+	vioif_send_common_locked(ifp, netq, false);
 
 }
 
@@ -1401,15 +1442,15 @@ static void
 vioif_start(struct ifnet *ifp)
 {
 	struct vioif_softc *sc = ifp->if_softc;
-	struct vioif_txqueue *txq = &sc->sc_txq[0];
+	struct vioif_netqueue *txq0 = &sc->sc_netqs[VIOIF_NETQ_TXQID(0)];
 
 #ifdef VIOIF_MPSAFE
 	KASSERT(if_is_mpsafe(ifp));
 #endif
 
-	mutex_enter(txq->txq_lock);
-	vioif_start_locked(ifp, txq);
-	mutex_exit(txq->txq_lock);
+	mutex_enter(&txq0->netq_lock);
+	vioif_start_locked(ifp, txq0);
+	mutex_exit(&txq0->netq_lock);
 }
 
 static inline int
@@ -1418,27 +1459,29 @@ vioif_select_txqueue(struct ifnet *ifp, 
 	struct vioif_softc *sc = ifp->if_softc;
 	u_int cpuid = cpu_index(curcpu());
 
-	return cpuid % sc->sc_act_nvq_pairs;
+	return VIOIF_NETQ_TXQID(cpuid % sc->sc_act_nvq_pairs);
 }
 
 static void
-vioif_transmit_locked(struct ifnet *ifp, struct vioif_txqueue *txq)
+vioif_transmit_locked(struct ifnet *ifp, struct vioif_netqueue *netq)
 {
 
-	vioif_send_common_locked(ifp, txq, true);
+	vioif_send_common_locked(ifp, netq, true);
 }
 
 static int
 vioif_transmit(struct ifnet *ifp, struct mbuf *m)
 {
 	struct vioif_softc *sc = ifp->if_softc;
-	struct vioif_txqueue *txq;
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
 	int qid;
 
 	qid = vioif_select_txqueue(ifp, m);
-	txq = &sc->sc_txq[qid];
+	netq = &sc->sc_netqs[qid];
+	txc = netq->netq_ctx;
 
-	if (__predict_false(!pcq_put(txq->txq_intrq, m))) {
+	if (__predict_false(!pcq_put(txc->txc_intrq, m))) {
 		m_freem(m);
 		return ENOBUFS;
 	}
@@ -1449,9 +1492,9 @@ vioif_transmit(struct ifnet *ifp, struct
 		if_statinc_ref(nsr, if_omcasts);
 	IF_STAT_PUTREF(ifp);
 
-	if (mutex_tryenter(txq->txq_lock)) {
-		vioif_transmit_locked(ifp, txq);
-		mutex_exit(txq->txq_lock);
+	if (mutex_tryenter(&netq->netq_lock)) {
+		vioif_transmit_locked(ifp, netq);
+		mutex_exit(&netq->netq_lock);
 	}
 
 	return 0;
@@ -1460,14 +1503,14 @@ vioif_transmit(struct ifnet *ifp, struct
 static void
 vioif_deferred_transmit(void *arg)
 {
-	struct vioif_txqueue *txq = arg;
-	struct virtio_softc *vsc = txq->txq_vq->vq_owner;
+	struct vioif_netqueue *netq = arg;
+	struct virtio_softc *vsc = netq->netq_vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 
-	mutex_enter(txq->txq_lock);
-	vioif_send_common_locked(ifp, txq, true);
-	mutex_exit(txq->txq_lock);
+	mutex_enter(&netq->netq_lock);
+	vioif_send_common_locked(ifp, netq, true);
+	mutex_exit(&netq->netq_lock);
 }
 
 static int
@@ -1495,37 +1538,54 @@ void
 vioif_watchdog(struct ifnet *ifp)
 {
 	struct vioif_softc *sc = ifp->if_softc;
-	struct vioif_txqueue *txq;
+	struct vioif_netqueue *netq;
 	int i;
 
 	if (ifp->if_flags & IFF_RUNNING) {
 		for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-			txq = &sc->sc_txq[i];
+			netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
 
-			mutex_enter(txq->txq_lock);
-			if (!txq->txq_running_handle) {
-				txq->txq_running_handle = true;
-				vioif_tx_sched_handle(sc, txq);
+			mutex_enter(&netq->netq_lock);
+			if (!netq->netq_running_handle) {
+				netq->netq_running_handle = true;
+				vioif_net_sched_handle(sc, netq);
 			}
-			mutex_exit(txq->txq_lock);
+			mutex_exit(&netq->netq_lock);
 		}
 	}
 }
 
+static void
+vioif_net_sched_handle(struct vioif_softc *sc, struct vioif_netqueue *netq)
+{
+
+	KASSERT(mutex_owned(&netq->netq_lock));
+	KASSERT(!netq->netq_stopping);
+
+	if (netq->netq_workqueue) {
+		vioif_work_add(sc->sc_txrx_workqueue, &netq->netq_work);
+	} else {
+		softint_schedule(netq->netq_softint);
+	}
+}
+
 /*
  * Receive implementation
  */
 /* add mbufs for all the empty receive slots */
 static void
-vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
+vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_netqueue *netq)
 {
-	struct virtqueue *vq = rxq->rxq_vq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_rx_context *rxc;
 	struct vioif_net_map *map;
 	struct mbuf *m;
 	int i, r, ndone = 0;
 
-	KASSERT(mutex_owned(rxq->rxq_lock));
+	KASSERT(mutex_owned(&netq->netq_lock));
+
+	rxc = netq->netq_ctx;
 
 	for (i = 0; i < vq->vq_num; i++) {
 		int slot;
@@ -1535,20 +1595,20 @@ vioif_populate_rx_mbufs_locked(struct vi
 		if (__predict_false(r != 0))
 			panic("enqueue_prep for rx buffers");
 
-		map = &rxq->rxq_maps[slot];
+		map = &netq->netq_maps[slot];
 		KASSERT(map->vnm_mbuf == NULL);
 
 		MGETHDR(m, M_DONTWAIT, MT_DATA);
 		if (m == NULL) {
 			virtio_enqueue_abort(vsc, vq, slot);
-			rxq->rxq_mbuf_enobufs.ev_count++;
+			rxc->rxc_mbuf_enobufs.ev_count++;
 			break;
 		}
 		MCLGET(m, M_DONTWAIT);
 		if ((m->m_flags & M_EXT) == 0) {
 			virtio_enqueue_abort(vsc, vq, slot);
 			m_freem(m);
-			rxq->rxq_mbuf_enobufs.ev_count++;
+			rxc->rxc_mbuf_enobufs.ev_count++;
 			break;
 		}
 
@@ -1561,14 +1621,14 @@ vioif_populate_rx_mbufs_locked(struct vi
 		if (r != 0) {
 			virtio_enqueue_abort(vsc, vq, slot);
 			m_freem(m);
-			rxq->rxq_mbuf_load_failed.ev_count++;
+			netq->netq_mbuf_load_failed.ev_count++;
 			break;
 		}
 
 		r = virtio_enqueue_reserve(vsc, vq, slot,
 		    map->vnm_mbuf_map->dm_nsegs + 1);
 		if (r != 0) {
-			rxq->rxq_enqueue_reserve_failed.ev_count++;
+			netq->netq_enqueue_reserve_failed.ev_count++;
 			bus_dmamap_unload(virtio_dmat(vsc), map->vnm_mbuf_map);
 			m_freem(m);
 			/* slot already freed by virtio_enqueue_reserve */
@@ -1591,23 +1651,23 @@ vioif_populate_rx_mbufs_locked(struct vi
 
 static void
 vioif_rx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_rxqueue *rxq)
+    struct vioif_netqueue *netq)
 {
 	struct vioif_net_map *map;
 	unsigned int i, vq_num;
 	bool more;
 
-	mutex_enter(rxq->rxq_lock);
-	vq_num = rxq->rxq_vq->vq_num;
+	mutex_enter(&netq->netq_lock);
+	vq_num = netq->netq_vq->vq_num;
 
 	for (;;) {
-		more = vioif_rx_deq_locked(sc, vsc, rxq, vq_num, NULL);
+		more = vioif_rx_deq_locked(sc, vsc, netq, vq_num, NULL);
 		if (more == false)
 			break;
 	}
 
 	for (i = 0; i < vq_num; i++) {
-		map = &rxq->rxq_maps[i];
+		map = &netq->netq_maps[i];
 
 		if (map->vnm_mbuf == NULL)
 			continue;
@@ -1616,15 +1676,15 @@ vioif_rx_queue_clear(struct vioif_softc 
 		m_freem(map->vnm_mbuf);
 		map->vnm_mbuf = NULL;
 	}
-	mutex_exit(rxq->rxq_lock);
+	mutex_exit(&netq->netq_lock);
 }
 
 /* dequeue received packets */
 static bool
 vioif_rx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_rxqueue *rxq, u_int limit, size_t *ndeqp)
+    struct vioif_netqueue *netq, u_int limit, size_t *ndeqp)
 {
-	struct virtqueue *vq = rxq->rxq_vq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	struct vioif_net_map *map;
 	struct mbuf *m;
@@ -1632,7 +1692,7 @@ vioif_rx_deq_locked(struct vioif_softc *
 	bool more;
 	size_t ndeq;
 
-	KASSERT(mutex_owned(rxq->rxq_lock));
+	KASSERT(mutex_owned(&netq->netq_lock));
 
 	more = false;
 	ndeq = 0;
@@ -1649,7 +1709,7 @@ vioif_rx_deq_locked(struct vioif_softc *
 		if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
 			break;
 
-		map = &rxq->rxq_maps[slot];
+		map = &netq->netq_maps[slot];
 		KASSERT(map->vnm_mbuf != NULL);
 
 		bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map,
@@ -1677,106 +1737,87 @@ done:
 /* rx interrupt; call _dequeue above and schedule a softint */
 
 static void
-vioif_rx_handle_locked(void *xrxq, u_int limit)
+vioif_rx_handle_locked(void *xnetq, u_int limit)
 {
-	struct vioif_rxqueue *rxq = xrxq;
-	struct virtqueue *vq = rxq->rxq_vq;
+	struct vioif_netqueue *netq = xnetq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	bool more;
 	int enqueued;
 	size_t ndeq;
 
-	KASSERT(mutex_owned(rxq->rxq_lock));
-	KASSERT(!rxq->rxq_stopping);
+	KASSERT(mutex_owned(&netq->netq_lock));
+	KASSERT(!netq->netq_stopping);
 
-	more = vioif_rx_deq_locked(sc, vsc, rxq, limit, &ndeq);
+	more = vioif_rx_deq_locked(sc, vsc, netq, limit, &ndeq);
 	if (ndeq > 0)
-		vioif_populate_rx_mbufs_locked(sc, rxq);
+		vioif_populate_rx_mbufs_locked(sc, netq);
 
 	if (more) {
-		vioif_rx_sched_handle(sc, rxq);
+		vioif_net_sched_handle(sc, netq);
 		return;
 	}
 
-	enqueued = virtio_start_vq_intr(vsc, rxq->rxq_vq);
+	enqueued = virtio_start_vq_intr(vsc, netq->netq_vq);
 	if (enqueued != 0) {
-		virtio_stop_vq_intr(vsc, rxq->rxq_vq);
-		vioif_rx_sched_handle(sc, rxq);
+		virtio_stop_vq_intr(vsc, netq->netq_vq);
+		vioif_net_sched_handle(sc, netq);
 		return;
 	}
 
-	rxq->rxq_running_handle = false;
+	netq->netq_running_handle = false;
 }
 
 static int
 vioif_rx_intr(void *arg)
 {
-	struct vioif_rxqueue *rxq = arg;
-	struct virtqueue *vq = rxq->rxq_vq;
+	struct vioif_netqueue *netq = arg;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
 
+	mutex_enter(&netq->netq_lock);
 
-	mutex_enter(rxq->rxq_lock);
-
-	/* rx handler is already running in softint/workqueue */
-	if (rxq->rxq_running_handle)
+	/* handler is already running in softint/workqueue */
+	if (netq->netq_running_handle)
 		goto done;
 
-	if (rxq->rxq_stopping)
-		goto done;
-
-	rxq->rxq_running_handle = true;
+	netq->netq_running_handle = true;
 
 	limit = sc->sc_rx_intr_process_limit;
 	virtio_stop_vq_intr(vsc, vq);
-	vioif_rx_handle_locked(rxq, limit);
+	vioif_rx_handle_locked(netq, limit);
 
 done:
-	mutex_exit(rxq->rxq_lock);
+	mutex_exit(&netq->netq_lock);
 	return 1;
 }
 
 static void
-vioif_rx_handle(void *xrxq)
+vioif_rx_handle(void *xnetq)
 {
-	struct vioif_rxqueue *rxq = xrxq;
-	struct virtqueue *vq = rxq->rxq_vq;
+	struct vioif_netqueue *netq = xnetq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
 
-	mutex_enter(rxq->rxq_lock);
+	mutex_enter(&netq->netq_lock);
 
-	KASSERT(rxq->rxq_running_handle);
+	KASSERT(netq->netq_running_handle);
 
-	if (rxq->rxq_stopping) {
-		rxq->rxq_running_handle = false;
+	if (netq->netq_stopping) {
+		netq->netq_running_handle = false;
 		goto done;
 	}
 
 	limit = sc->sc_rx_process_limit;
-	vioif_rx_handle_locked(rxq, limit);
+	vioif_rx_handle_locked(netq, limit);
 
 done:
-	mutex_exit(rxq->rxq_lock);
-}
-
-static void
-vioif_rx_sched_handle(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
-{
-
-	KASSERT(mutex_owned(rxq->rxq_lock));
-
-	if (rxq->rxq_stopping)
-		return;
-
-	if (rxq->rxq_workqueue)
-		vioif_work_add(sc->sc_txrx_workqueue, &rxq->rxq_work);
-	else
-		softint_schedule(rxq->rxq_handle_si);
+	mutex_exit(&netq->netq_lock);
 }
 
 /*
@@ -1790,21 +1831,22 @@ vioif_rx_sched_handle(struct vioif_softc
  */
 
 static void
-vioif_tx_handle_locked(struct vioif_txqueue *txq, u_int limit)
+vioif_tx_handle_locked(struct vioif_netqueue *netq, u_int limit)
 {
-	struct virtqueue *vq = txq->txq_vq;
+	struct virtqueue *vq = netq->netq_vq;
+	struct vioif_tx_context *txc = netq->netq_ctx;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	bool more;
 	int enqueued;
 
-	KASSERT(mutex_owned(txq->txq_lock));
-	KASSERT(!txq->txq_stopping);
+	KASSERT(mutex_owned(&netq->netq_lock));
+	KASSERT(!netq->netq_stopping);
 
-	more = vioif_tx_deq_locked(sc, vsc, txq, limit);
+	more = vioif_tx_deq_locked(sc, vsc, netq, limit);
 	if (more) {
-		vioif_tx_sched_handle(sc, txq);
+		vioif_net_sched_handle(sc, netq);
 		return;
 	}
 
@@ -1813,111 +1855,94 @@ vioif_tx_handle_locked(struct vioif_txqu
 	    virtio_start_vq_intr(vsc, vq);
 	if (enqueued != 0) {
 		virtio_stop_vq_intr(vsc, vq);
-		vioif_tx_sched_handle(sc, txq);
+		vioif_net_sched_handle(sc, netq);
 		return;
 	}
 
-	txq->txq_running_handle = false;
+	netq->netq_running_handle = false;
 
 	/* for ALTQ */
-	if (txq == &sc->sc_txq[0]) {
+	if (netq == &sc->sc_netqs[VIOIF_NETQ_TXQID(0)]) {
 		if_schedule_deferred_start(ifp);
 		ifp->if_flags &= ~IFF_OACTIVE;
 	}
-	softint_schedule(txq->txq_deferred_transmit);
+	softint_schedule(txc->txc_deferred_transmit);
 }
 
-
 static int
 vioif_tx_intr(void *arg)
 {
-	struct vioif_txqueue *txq = arg;
-	struct virtqueue *vq = txq->txq_vq;
+	struct vioif_netqueue *netq = arg;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
 
-	limit = sc->sc_tx_intr_process_limit;
-
-	mutex_enter(txq->txq_lock);
+	mutex_enter(&netq->netq_lock);
 
 	/* tx handler is already running in softint/workqueue */
-	if (txq->txq_running_handle)
+	if (netq->netq_running_handle)
 		goto done;
 
-	if (txq->txq_stopping)
+	if (netq->netq_stopping)
 		goto done;
 
-	txq->txq_running_handle = true;
+	netq->netq_running_handle = true;
 
 	virtio_stop_vq_intr(vsc, vq);
-	txq->txq_workqueue = sc->sc_txrx_workqueue_sysctl;
-	vioif_tx_handle_locked(txq, limit);
+	netq->netq_workqueue = sc->sc_txrx_workqueue_sysctl;
+	limit = sc->sc_tx_intr_process_limit;
+	vioif_tx_handle_locked(netq, limit);
 
 done:
-	mutex_exit(txq->txq_lock);
+	mutex_exit(&netq->netq_lock);
 	return 1;
 }
 
 static void
-vioif_tx_handle(void *xtxq)
+vioif_tx_handle(void *xnetq)
 {
-	struct vioif_txqueue *txq = xtxq;
-	struct virtqueue *vq = txq->txq_vq;
+	struct vioif_netqueue *netq = xnetq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
 
-	mutex_enter(txq->txq_lock);
+	mutex_enter(&netq->netq_lock);
 
-	KASSERT(txq->txq_running_handle);
+	KASSERT(netq->netq_running_handle);
 
-	if (txq->txq_stopping) {
-		txq->txq_running_handle = false;
+	if (netq->netq_stopping) {
+		netq->netq_running_handle = false;
 		goto done;
 	}
 
 	limit = sc->sc_tx_process_limit;
-	vioif_tx_handle_locked(txq, limit);
+	vioif_tx_handle_locked(netq, limit);
 
 done:
-	mutex_exit(txq->txq_lock);
-}
-
-static void
-vioif_tx_sched_handle(struct vioif_softc *sc, struct vioif_txqueue *txq)
-{
-
-	KASSERT(mutex_owned(txq->txq_lock));
-
-	if (txq->txq_stopping)
-		return;
-
-	if (txq->txq_workqueue)
-		vioif_work_add(sc->sc_txrx_workqueue, &txq->txq_work);
-	else
-		softint_schedule(txq->txq_handle_si);
+	mutex_exit(&netq->netq_lock);
 }
 
 static void
 vioif_tx_queue_clear(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_txqueue *txq)
+    struct vioif_netqueue *netq)
 {
 	struct vioif_net_map *map;
 	unsigned int i, vq_num;
 	bool more;
 
-	mutex_enter(txq->txq_lock);
+	mutex_enter(&netq->netq_lock);
 
-	vq_num = txq->txq_vq->vq_num;
+	vq_num = netq->netq_vq->vq_num;
 	for (;;) {
-		more = vioif_tx_deq_locked(sc, vsc, txq, vq_num);
+		more = vioif_tx_deq_locked(sc, vsc, netq, vq_num);
 		if (more == false)
 			break;
 	}
 
 	for (i = 0; i < vq_num; i++) {
-		map = &txq->txq_maps[i];
+		map = &netq->netq_maps[i];
 		if (map->vnm_mbuf == NULL)
 			continue;
 
@@ -1925,21 +1950,21 @@ vioif_tx_queue_clear(struct vioif_softc 
 		m_freem(map->vnm_mbuf);
 		map->vnm_mbuf = NULL;
 	}
-	mutex_exit(txq->txq_lock);
+	mutex_exit(&netq->netq_lock);
 }
 
 static bool
 vioif_tx_deq_locked(struct vioif_softc *sc, struct virtio_softc *vsc,
-    struct vioif_txqueue *txq, u_int limit)
+    struct vioif_netqueue *netq, u_int limit)
 {
-	struct virtqueue *vq = txq->txq_vq;
+	struct virtqueue *vq = netq->netq_vq;
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	struct vioif_net_map *map;
 	struct mbuf *m;
 	int slot, len;
 	bool more = false;
 
-	KASSERT(mutex_owned(txq->txq_lock));
+	KASSERT(mutex_owned(&netq->netq_lock));
 
 	if (virtio_vq_is_enqueued(vsc, vq) == false)
 		return false;
@@ -1953,7 +1978,7 @@ vioif_tx_deq_locked(struct vioif_softc *
 		if (virtio_dequeue(vsc, vq, &slot, &len) != 0)
 			break;
 
-		map = &txq->txq_maps[slot];
+		map = &netq->netq_maps[slot];
 		KASSERT(map->vnm_mbuf != NULL);
 
 		bus_dmamap_sync(virtio_dmat(vsc), map->vnm_hdr_map,
@@ -2444,7 +2469,8 @@ static void
 vioif_update_link_status(struct vioif_softc *sc)
 {
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
-	struct vioif_txqueue *txq;
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
 	bool active;
 	int link, i;
 
@@ -2459,11 +2485,12 @@ vioif_update_link_status(struct vioif_so
 
 	active = VIOIF_IS_LINK_ACTIVE(sc);
 	for (i = 0; i < sc->sc_act_nvq_pairs; i++) {
-		txq = &sc->sc_txq[i];
+		netq = &sc->sc_netqs[VIOIF_NETQ_TXQID(i)];
 
-		mutex_enter(txq->txq_lock);
-		txq->txq_link_active = active;
-		mutex_exit(txq->txq_lock);
+		mutex_enter(&netq->netq_lock);
+		txc = netq->netq_ctx;
+		txc->txc_link_active = active;
+		mutex_exit(&netq->netq_lock);
 	}
 
 	if_link_state_change(ifp, sc->sc_link_state);
@@ -2628,31 +2655,34 @@ out:
 static void
 vioif_setup_stats(struct vioif_softc *sc)
 {
-	struct vioif_rxqueue *rxq;
-	struct vioif_txqueue *txq;
-	int i;
-
-	for (i = 0; i < sc->sc_max_nvq_pairs; i++) {
-		rxq = &sc->sc_rxq[i];
-		txq = &sc->sc_txq[i];
-
-		snprintf(txq->txq_evgroup, sizeof(txq->txq_evgroup), "%s-TX%d",
-		    device_xname(sc->sc_dev), i);
-		evcnt_attach_dynamic(&txq->txq_defrag_failed, EVCNT_TYPE_MISC,
-		    NULL, txq->txq_evgroup, "tx m_defrag() failed");
-		evcnt_attach_dynamic(&txq->txq_mbuf_load_failed, EVCNT_TYPE_MISC,
-		    NULL, txq->txq_evgroup, "tx dmamap load failed");
-		evcnt_attach_dynamic(&txq->txq_enqueue_reserve_failed, EVCNT_TYPE_MISC,
-		    NULL, txq->txq_evgroup, "virtio_enqueue_reserve failed");
-
-		snprintf(rxq->rxq_evgroup, sizeof(rxq->rxq_evgroup), "%s-RX%d",
-		    device_xname(sc->sc_dev), i);
-		evcnt_attach_dynamic(&rxq->rxq_mbuf_enobufs, EVCNT_TYPE_MISC,
-		    NULL, rxq->rxq_evgroup, "no receive buffer");
-		evcnt_attach_dynamic(&rxq->rxq_mbuf_load_failed, EVCNT_TYPE_MISC,
-		    NULL, rxq->rxq_evgroup, "tx dmamap load failed");
-		evcnt_attach_dynamic(&rxq->rxq_enqueue_reserve_failed, EVCNT_TYPE_MISC,
-		    NULL, rxq->rxq_evgroup, "virtio_enqueue_reserve failed");
+	struct vioif_netqueue *netq;
+	struct vioif_tx_context *txc;
+	struct vioif_rx_context *rxc;
+	size_t i, netq_num;
+
+	netq_num = sc->sc_max_nvq_pairs * 2;
+	for (i = 0; i < netq_num; i++) {
+		netq = &sc->sc_netqs[i];
+		evcnt_attach_dynamic(&netq->netq_mbuf_load_failed, EVCNT_TYPE_MISC,
+		    NULL, netq->netq_evgroup, "failed to load mbuf to DMA");
+		evcnt_attach_dynamic(&netq->netq_enqueue_reserve_failed,
+		    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+		    "virtio_enqueue_reserve failed");
+
+		switch (VIOIF_NETQ_DIR(i)) {
+		case VIOIF_NETQ_RX:
+			rxc = netq->netq_ctx;
+			evcnt_attach_dynamic(&rxc->rxc_mbuf_enobufs,
+			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+			    "no receive buffer");
+			break;
+		case VIOIF_NETQ_TX:
+			txc = netq->netq_ctx;
+			evcnt_attach_dynamic(&txc->txc_defrag_failed,
+			    EVCNT_TYPE_MISC, NULL, netq->netq_evgroup,
+			    "m_defrag() failed");
+			break;
+		}
 	}
 
 	evcnt_attach_dynamic(&sc->sc_ctrlq.ctrlq_cmd_load_failed, EVCNT_TYPE_MISC,

Reply via email to