Module Name: src Committed By: martin Date: Sat May 13 10:56:10 UTC 2023
Modified Files: src/sys/dev/pci [netbsd-10]: if_vioif.c ld_virtio.c vio9p.c viomb.c viornd.c vioscsi.c virtio.c virtio_pci.c virtioreg.h virtiovar.h src/sys/dev/virtio [netbsd-10]: viocon.c virtio_mmio.c Log Message: Pull up following revision(s) (requested by yamaguchi in ticket #139): sys/dev/pci/vioscsi.c: revision 1.31 sys/dev/pci/vio9p.c: revision 1.10 sys/dev/pci/vioscsi.c: revision 1.32 sys/dev/pci/vio9p.c: revision 1.11 sys/dev/pci/vioscsi.c: revision 1.33 sys/dev/pci/ld_virtio.c: revision 1.31 sys/dev/virtio/viocon.c: revision 1.6 sys/dev/pci/vioscsi.c: revision 1.34 sys/dev/pci/ld_virtio.c: revision 1.32 sys/dev/virtio/viocon.c: revision 1.7 sys/dev/virtio/viocon.c: revision 1.8 sys/dev/pci/vioscsi.c: revision 1.36 sys/dev/pci/virtioreg.h: revision 1.12 sys/dev/pci/viornd.c: revision 1.19 sys/dev/pci/virtio.c: revision 1.66 sys/dev/pci/virtio.c: revision 1.67 sys/dev/pci/virtio.c: revision 1.68 sys/dev/pci/if_vioif.c: revision 1.103 sys/dev/pci/virtio.c: revision 1.69 sys/dev/pci/if_vioif.c: revision 1.104 sys/dev/pci/virtio_pci.c: revision 1.40 sys/dev/virtio/virtio_mmio.c: revision 1.8 sys/dev/virtio/virtio_mmio.c: revision 1.9 sys/dev/pci/viomb.c: revision 1.14 sys/dev/pci/viomb.c: revision 1.15 sys/dev/pci/viomb.c: revision 1.17 sys/dev/pci/viornd.c: revision 1.20 sys/dev/pci/viornd.c: revision 1.21 sys/dev/pci/virtiovar.h: revision 1.25 sys/dev/pci/virtiovar.h: revision 1.26 sys/dev/pci/virtiovar.h: revision 1.27 sys/dev/pci/virtiovar.h: revision 1.28 sys/dev/pci/virtio.c: revision 1.70 sys/dev/pci/virtio.c: revision 1.71 sys/dev/pci/virtio.c: revision 1.72 sys/dev/pci/virtio.c: revision 1.73 sys/dev/pci/virtio.c: revision 1.74 sys/dev/pci/virtio_pci.c: revision 1.39 Set virtqueues in virtio_child_attach_finish The number of virtqueue maybe change in a part of VirtIO devices (e.g. vioif(4)). And it is fixed after negotiation of features. So the configuration is moved into the function. viocon(4): fix not to allocate unused virtqueue viocon(4) allocates 4 virtqueues but it only uses 2 (0 and 1) queues. Added functions to set interrupt handler and index into virtqueue Added check of pointer for allocated memory before release of resource Setup virtqueues after registering them to virtio_softc restore fetch of qsize. Mark as MPSAFE. virtio(4): Avoid name collision with global intrhand on sparc64. Pacifies -Werror=shadow. No functional change intended. Use PRIuBUSSIZE to print bus_size_t variables. virtio(4): Fix sizing of virtqueue allocation. vq->vq_avail[0].ring is a zero-length array, and thus sizeof is zero; likewise vq->vq_used[0].ring. Use vq->vq_avail[0].ring[0] and vq->vq_used[0].ring[0] to fix this and restore the previous allocation sizing logic. XXX We shouldn't use zero-length arrays here -- they are asking for trouble like this, and C99 has a standard way to express what we're actually trying to get at it, flexible array members. PR kern/57304 virtio(4): Use flexible array members, not zero-length arrays. This enables the compiler to detect sizeof mistakes like PR kern/57304. Use descriptor chain for free slots instead of vq_entry list Descriptors can be chained by themself. And descriptors added to avail ring or used ring are already chained. But it was not used for unused descriptors and another linked list structure named vq_entry was used. The chain is also used for unused descriptors to make virtio(4) simpler. Added flags to store status of attaching a virtio device This prevents a panic on reboot after a virtio device had called virtio_child_attach_failed(). Fix wrong variable names This fixes build errors in virtio_mmio.c To generate a diff of this commit: cvs rdiff -u -r1.82.4.2 -r1.82.4.3 src/sys/dev/pci/if_vioif.c cvs rdiff -u -r1.30 -r1.30.4.1 src/sys/dev/pci/ld_virtio.c cvs rdiff -u -r1.9 -r1.9.4.1 src/sys/dev/pci/vio9p.c cvs rdiff -u -r1.13 -r1.13.4.1 src/sys/dev/pci/viomb.c cvs rdiff -u -r1.18 -r1.18.4.1 src/sys/dev/pci/viornd.c cvs rdiff -u -r1.30 -r1.30.2.1 src/sys/dev/pci/vioscsi.c cvs rdiff -u -r1.63.2.3 -r1.63.2.4 src/sys/dev/pci/virtio.c cvs rdiff -u -r1.38 -r1.38.4.1 src/sys/dev/pci/virtio_pci.c cvs rdiff -u -r1.11 -r1.11.2.1 src/sys/dev/pci/virtioreg.h cvs rdiff -u -r1.24 -r1.24.4.1 src/sys/dev/pci/virtiovar.h cvs rdiff -u -r1.5 -r1.5.4.1 src/sys/dev/virtio/viocon.c cvs rdiff -u -r1.7 -r1.7.4.1 src/sys/dev/virtio/virtio_mmio.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/dev/pci/if_vioif.c diff -u src/sys/dev/pci/if_vioif.c:1.82.4.2 src/sys/dev/pci/if_vioif.c:1.82.4.3 --- src/sys/dev/pci/if_vioif.c:1.82.4.2 Sat Apr 1 10:31:06 2023 +++ src/sys/dev/pci/if_vioif.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: if_vioif.c,v 1.82.4.2 2023/04/01 10:31:06 martin Exp $ */ +/* $NetBSD: if_vioif.c,v 1.82.4.3 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2020 The NetBSD Foundation, Inc. @@ -27,7 +27,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.82.4.2 2023/04/01 10:31:06 martin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.82.4.3 2023/05/13 10:56:10 martin Exp $"); #ifdef _KERNEL_OPT #include "opt_net_mpsafe.h" @@ -465,7 +465,7 @@ vioif_attach(device_t parent, device_t s u_int softint_flags; int r, i, req_flags; char xnamebuf[MAXCOMLEN]; - size_t netq_num; + size_t nvqs; if (virtio_child(vsc) != NULL) { aprint_normal(": child already attached for %s; " @@ -509,11 +509,11 @@ vioif_attach(device_t parent, device_t s #ifdef VIOIF_MULTIQ req_features |= VIRTIO_NET_F_MQ; #endif - virtio_child_attach_start(vsc, self, IPL_NET, NULL, - vioif_config_change, virtio_vq_intrhand, req_flags, - req_features, VIRTIO_NET_FLAG_BITS); + virtio_child_attach_start(vsc, self, IPL_NET, + req_features, VIRTIO_NET_FLAG_BITS); features = virtio_features(vsc); + if (features == 0) goto err; @@ -565,10 +565,12 @@ vioif_attach(device_t parent, device_t s /* Limit the number of queue pairs to use */ sc->sc_req_nvq_pairs = MIN(sc->sc_max_nvq_pairs, ncpu); + + if (sc->sc_max_nvq_pairs > 1) + req_flags |= VIRTIO_F_INTR_PERVQ; } vioif_alloc_queues(sc); - virtio_child_attach_set_vqs(vsc, sc->sc_vqs, sc->sc_req_nvq_pairs); #ifdef VIOIF_MPSAFE softint_flags = SOFTINT_NET | SOFTINT_MPSAFE; @@ -579,21 +581,25 @@ vioif_attach(device_t parent, device_t s /* * Initialize network queues */ - netq_num = sc->sc_max_nvq_pairs * 2; - for (i = 0; i < netq_num; i++) { + nvqs = sc->sc_max_nvq_pairs * 2; + for (i = 0; i < nvqs; i++) { r = vioif_netqueue_init(sc, vsc, i, softint_flags); if (r != 0) goto err; } if (sc->sc_has_ctrl) { - int ctrlq_idx = sc->sc_max_nvq_pairs * 2; + int ctrlq_idx = nvqs; + + nvqs++; /* * Allocating a virtqueue for control channel */ sc->sc_ctrlq.ctrlq_vq = &sc->sc_vqs[ctrlq_idx]; - r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx, - NBPG, 1, "control"); + virtio_init_vq(vsc, ctrlq->ctrlq_vq, ctrlq_idx, + vioif_ctrl_intr, ctrlq); + + r = virtio_alloc_vq(vsc, ctrlq->ctrlq_vq, NBPG, 1, "control"); if (r != 0) { aprint_error_dev(self, "failed to allocate " "a virtqueue for control channel, error code %d\n", @@ -602,9 +608,6 @@ vioif_attach(device_t parent, device_t s sc->sc_has_ctrl = false; cv_destroy(&ctrlq->ctrlq_wait); mutex_destroy(&ctrlq->ctrlq_wait_lock); - } else { - ctrlq->ctrlq_vq->vq_intrhand = vioif_ctrl_intr; - ctrlq->ctrlq_vq->vq_intrhand_arg = (void *) ctrlq; } } @@ -618,7 +621,9 @@ vioif_attach(device_t parent, device_t s if (vioif_alloc_mems(sc) < 0) goto err; - if (virtio_child_attach_finish(vsc) != 0) + r = virtio_child_attach_finish(vsc, sc->sc_vqs, nvqs, + vioif_config_change, req_flags); + if (r != 0) goto err; if (vioif_setup_sysctl(sc) != 0) { @@ -656,8 +661,8 @@ vioif_attach(device_t parent, device_t s return; err: - netq_num = sc->sc_max_nvq_pairs * 2; - for (i = 0; i < netq_num; i++) { + nvqs = sc->sc_max_nvq_pairs * 2; + for (i = 0; i < nvqs; i++) { vioif_netqueue_teardown(sc, vsc, i); } @@ -1468,15 +1473,15 @@ vioif_netqueue_init(struct vioif_softc * "%s-%s", device_xname(sc->sc_dev), qname); mutex_init(&netq->netq_lock, MUTEX_DEFAULT, IPL_NET); - r = virtio_alloc_vq(vsc, vq, qid, + virtio_init_vq(vsc, vq, qid, params[dir].intrhand, netq); + + r = virtio_alloc_vq(vsc, vq, params[dir].segsize + sc->sc_hdr_size, params[dir].nsegs, qname); if (r != 0) goto err; netq->netq_vq = vq; - netq->netq_vq->vq_intrhand = params[dir].intrhand; - netq->netq_vq->vq_intrhand_arg = netq; netq->netq_softint = softint_establish(softint_flags, params[dir].sihand, netq); if (netq->netq_softint == NULL) { @@ -1532,8 +1537,6 @@ err: softint_disestablish(netq->netq_softint); netq->netq_softint = NULL; } - netq->netq_vq->vq_intrhand = NULL; - netq->netq_vq->vq_intrhand_arg = NULL; virtio_free_vq(vsc, vq); mutex_destroy(&netq->netq_lock); Index: src/sys/dev/pci/ld_virtio.c diff -u src/sys/dev/pci/ld_virtio.c:1.30 src/sys/dev/pci/ld_virtio.c:1.30.4.1 --- src/sys/dev/pci/ld_virtio.c:1.30 Wed Apr 13 10:42:12 2022 +++ src/sys/dev/pci/ld_virtio.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: ld_virtio.c,v 1.30 2022/04/13 10:42:12 uwe Exp $ */ +/* $NetBSD: ld_virtio.c,v 1.30.4.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2010 Minoura Makoto. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.30 2022/04/13 10:42:12 uwe Exp $"); +__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.30.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -275,8 +275,7 @@ ld_virtio_attach(device_t parent, device sc->sc_dev = self; sc->sc_virtio = vsc; - virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq, - NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX, + virtio_child_attach_start(vsc, self, IPL_BIO, (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX | VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE | VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_CONFIG_WCE), @@ -333,14 +332,17 @@ ld_virtio_attach(device_t parent, device /* 2 for the minimum size */ maxnsegs += VIRTIO_BLK_MIN_SEGMENTS; - if (virtio_alloc_vq(vsc, &sc->sc_vq, 0, maxxfersize, maxnsegs, + virtio_init_vq_vqdone(vsc, &sc->sc_vq, 0, + ld_virtio_vq_done); + + if (virtio_alloc_vq(vsc, &sc->sc_vq, maxxfersize, maxnsegs, "I/O request") != 0) { goto err; } qsize = sc->sc_vq.vq_num; - sc->sc_vq.vq_done = ld_virtio_vq_done; - if (virtio_child_attach_finish(vsc) != 0) + if (virtio_child_attach_finish(vsc, &sc->sc_vq, 1, + NULL, VIRTIO_F_INTR_MSIX) != 0) goto err; ld->sc_dv = self; Index: src/sys/dev/pci/vio9p.c diff -u src/sys/dev/pci/vio9p.c:1.9 src/sys/dev/pci/vio9p.c:1.9.4.1 --- src/sys/dev/pci/vio9p.c:1.9 Wed Apr 20 22:08:10 2022 +++ src/sys/dev/pci/vio9p.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: vio9p.c,v 1.9 2022/04/20 22:08:10 uwe Exp $ */ +/* $NetBSD: vio9p.c,v 1.9.4.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2019 Internet Initiative Japan, Inc. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.9 2022/04/20 22:08:10 uwe Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.9.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -500,25 +500,19 @@ vio9p_attach(device_t parent, device_t s sc->sc_dev = self; sc->sc_virtio = vsc; - virtio_child_attach_start(vsc, self, IPL_VM, sc->sc_vq, - NULL, virtio_vq_intr, - VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT, - VIO9P_F_MOUNT_TAG, - VIO9P_FLAG_BITS); + virtio_child_attach_start(vsc, self, IPL_VM, + VIO9P_F_MOUNT_TAG, VIO9P_FLAG_BITS); features = virtio_features(vsc); if ((features & VIO9P_F_MOUNT_TAG) == 0) goto err_none; - error = virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, VIO9P_MAX_REQLEN, + virtio_init_vq_vqdone(vsc, &sc->sc_vq[0], 0, vio9p_request_done); + error = virtio_alloc_vq(vsc, &sc->sc_vq[0], VIO9P_MAX_REQLEN, VIO9P_N_SEGMENTS * 2, "vio9p"); if (error != 0) goto err_none; - sc->sc_vq[0].vq_done = vio9p_request_done; - - virtio_child_attach_set_vqs(vsc, sc->sc_vq, 1); - sc->sc_buf_tx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP); sc->sc_buf_rx = kmem_alloc(VIO9P_MAX_REQLEN, KM_SLEEP); @@ -559,7 +553,9 @@ vio9p_attach(device_t parent, device_t s vio9p_read_config(sc); aprint_normal_dev(self, "tagged as %s\n", sc->sc_tag); - error = virtio_child_attach_finish(vsc); + error = virtio_child_attach_finish(vsc, sc->sc_vq, + __arraycount(sc->sc_vq), NULL, + VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT); if (error != 0) goto err_mutex; Index: src/sys/dev/pci/viomb.c diff -u src/sys/dev/pci/viomb.c:1.13 src/sys/dev/pci/viomb.c:1.13.4.1 --- src/sys/dev/pci/viomb.c:1.13 Wed Apr 13 10:42:12 2022 +++ src/sys/dev/pci/viomb.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: viomb.c,v 1.13 2022/04/13 10:42:12 uwe Exp $ */ +/* $NetBSD: viomb.c,v 1.13.4.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2010 Minoura Makoto. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.13 2022/04/13 10:42:12 uwe Exp $"); +__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.13.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -148,8 +148,7 @@ viomb_attach(device_t parent, device_t s sc->sc_dev = self; sc->sc_virtio = vsc; - virtio_child_attach_start(vsc, self, IPL_VM, sc->sc_vq, - viomb_config_change, virtio_vq_intr, 0, + virtio_child_attach_start(vsc, self, IPL_VM, VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_FLAG_BITS); features = virtio_features(vsc); @@ -164,18 +163,20 @@ viomb_attach(device_t parent, device_t s mutex_init(&sc->sc_waitlock, MUTEX_DEFAULT, IPL_VM); /* spin */ cv_init(&sc->sc_wait, "balloon"); - if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], 0, + virtio_init_vq_vqdone(vsc, &sc->sc_vq[VQ_INFLATE], VQ_INFLATE, + inflateq_done); + virtio_init_vq_vqdone(vsc, &sc->sc_vq[VQ_DEFLATE], VQ_DEFLATE, + deflateq_done); + + if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], sizeof(uint32_t)*PGS_PER_REQ, 1, "inflate") != 0) goto err_mutex; - if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], 1, + if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], sizeof(uint32_t)*PGS_PER_REQ, 1, "deflate") != 0) goto err_vq0; - sc->sc_vq[VQ_INFLATE].vq_done = inflateq_done; - sc->sc_vq[VQ_DEFLATE].vq_done = deflateq_done; - if (bus_dmamap_create(virtio_dmat(vsc), sizeof(uint32_t)*PGS_PER_REQ, 1, sizeof(uint32_t)*PGS_PER_REQ, 0, BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) { @@ -190,7 +191,8 @@ viomb_attach(device_t parent, device_t s goto err_dmamap; } - if (virtio_child_attach_finish(vsc) != 0) + if (virtio_child_attach_finish(vsc, sc->sc_vq, __arraycount(sc->sc_vq), + viomb_config_change, VIRTIO_F_INTR_MPSAFE) != 0) goto err_out; if (kthread_create(PRI_IDLE, KTHREAD_MPSAFE, NULL, Index: src/sys/dev/pci/viornd.c diff -u src/sys/dev/pci/viornd.c:1.18 src/sys/dev/pci/viornd.c:1.18.4.1 --- src/sys/dev/pci/viornd.c:1.18 Thu Apr 14 19:47:14 2022 +++ src/sys/dev/pci/viornd.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: viornd.c,v 1.18 2022/04/14 19:47:14 riastradh Exp $ */ +/* $NetBSD: viornd.c,v 1.18.4.1 2023/05/13 10:56:10 martin Exp $ */ /* $OpenBSD: viornd.c,v 1.1 2014/01/21 21:14:58 sf Exp $ */ /* @@ -176,11 +176,12 @@ viornd_attach(device_t parent, device_t goto load_failed; } - virtio_child_attach_start(vsc, self, IPL_NET, &sc->sc_vq, - NULL, virtio_vq_intr, 0, + virtio_child_attach_start(vsc, self, IPL_NET, 0, VIRTIO_COMMON_FLAG_BITS); - error = virtio_alloc_vq(vsc, &sc->sc_vq, 0, VIORND_BUFSIZE, 1, + virtio_init_vq_vqdone(vsc, &sc->sc_vq, 0, viornd_vq_done); + + error = virtio_alloc_vq(vsc, &sc->sc_vq, VIORND_BUFSIZE, 1, "Entropy request"); if (error) { aprint_error_dev(sc->sc_dev, "can't alloc virtqueue: %d\n", @@ -189,7 +190,9 @@ viornd_attach(device_t parent, device_t } sc->sc_vq.vq_done = viornd_vq_done; - if (virtio_child_attach_finish(vsc) != 0) { + error = virtio_child_attach_finish(vsc, &sc->sc_vq, 1, + NULL, VIRTIO_F_INTR_MPSAFE); + if (error) { virtio_free_vq(vsc, &sc->sc_vq); goto vio_failed; } Index: src/sys/dev/pci/vioscsi.c diff -u src/sys/dev/pci/vioscsi.c:1.30 src/sys/dev/pci/vioscsi.c:1.30.2.1 --- src/sys/dev/pci/vioscsi.c:1.30 Tue Oct 11 22:03:37 2022 +++ src/sys/dev/pci/vioscsi.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: vioscsi.c,v 1.30 2022/10/11 22:03:37 andvar Exp $ */ +/* $NetBSD: vioscsi.c,v 1.30.2.1 2023/05/13 10:56:10 martin Exp $ */ /* $OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $ */ /* @@ -18,7 +18,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.30 2022/10/11 22:03:37 andvar Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.30.2.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -128,8 +128,7 @@ vioscsi_attach(device_t parent, device_t sc->sc_dev = self; - virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs, - NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX, + virtio_child_attach_start(vsc, self, ipl, 0, VIRTIO_COMMON_FLAG_BITS); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl); @@ -149,7 +148,9 @@ vioscsi_attach(device_t parent, device_t sc->sc_seg_max = seg_max; for(i=0; i < __arraycount(sc->sc_vqs); i++) { - rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], i, MAXPHYS, + virtio_init_vq_vqdone(vsc, &sc->sc_vqs[i], i, + vioscsi_vq_done); + rv = virtio_alloc_vq(vsc, &sc->sc_vqs[i], MAXPHYS, VIRTIO_SCSI_MIN_SEGMENTS + howmany(MAXPHYS, NBPG), vioscsi_vq_names[i]); if (rv) { @@ -171,7 +172,9 @@ vioscsi_attach(device_t parent, device_t " max_lun %u\n", cmd_per_lun, qsize, seg_max, max_target, max_lun); - if (virtio_child_attach_finish(vsc) != 0) + if (virtio_child_attach_finish(vsc, sc->sc_vqs, + __arraycount(sc->sc_vqs), NULL, + VIRTIO_F_INTR_MSIX | VIRTIO_F_INTR_MPSAFE) != 0) goto err; /* @@ -184,6 +187,7 @@ vioscsi_attach(device_t parent, device_t adapt->adapt_max_periph = adapt->adapt_openings; adapt->adapt_request = vioscsi_scsipi_request; adapt->adapt_minphys = minphys; + adapt->adapt_flags = SCSIPI_ADAPT_MPSAFE; /* * Fill in the scsipi_channel. @@ -205,8 +209,7 @@ err: vioscsi_free_reqs(sc, vsc); for (i=0; i < __arraycount(sc->sc_vqs); i++) { - if (sc->sc_vqs[i].vq_num > 0) - virtio_free_vq(vsc, &sc->sc_vqs[i]); + virtio_free_vq(vsc, &sc->sc_vqs[i]); } virtio_child_attach_failed(vsc); Index: src/sys/dev/pci/virtio.c diff -u src/sys/dev/pci/virtio.c:1.63.2.3 src/sys/dev/pci/virtio.c:1.63.2.4 --- src/sys/dev/pci/virtio.c:1.63.2.3 Sun Apr 2 10:51:22 2023 +++ src/sys/dev/pci/virtio.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: virtio.c,v 1.63.2.3 2023/04/02 10:51:22 martin Exp $ */ +/* $NetBSD: virtio.c,v 1.63.2.4 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2020 The NetBSD Foundation, Inc. @@ -28,7 +28,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.63.2.3 2023/04/02 10:51:22 martin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.63.2.4 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -46,6 +46,13 @@ __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1 #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ +/* + * The maximum descriptor size is 2^15. Use that value as the end of + * descriptor chain terminator since it will never be a valid index + * in the descriptor table. + */ +#define VRING_DESC_CHAIN_END 32768 + /* incomplete list */ static const char *virtio_device_name[] = { "unknown (0)", /* 0 */ @@ -61,8 +68,8 @@ static const char *virtio_device_name[] }; #define NDEVNAMES __arraycount(virtio_device_name) -static void virtio_init_vq(struct virtio_softc *, - struct virtqueue *, const bool); +static void virtio_reset_vq(struct virtio_softc *, + struct virtqueue *); void virtio_set_status(struct virtio_softc *sc, int status) @@ -108,7 +115,7 @@ virtio_reinit_start(struct virtio_softc device_xname(sc->sc_dev), vq->vq_index); } - virtio_init_vq(sc, vq, true); + virtio_reset_vq(sc, vq); sc->sc_ops->setup_queue(sc, vq->vq_index, vq->vq_dmamap->dm_segs[0].ds_addr); } @@ -434,6 +441,31 @@ virtio_soft_intr(void *arg) (*sc->sc_intrhand)(sc); } +/* set to vq->vq_intrhand in virtio_init_vq_vqdone() */ +static int +virtio_vq_done(void *xvq) +{ + struct virtqueue *vq = xvq; + + return vq->vq_done(vq); +} + +static int +virtio_vq_intr(struct virtio_softc *sc) +{ + struct virtqueue *vq; + int i, r = 0; + + for (i = 0; i < sc->sc_nvqs; i++) { + vq = &sc->sc_vqs[i]; + if (virtio_vq_is_enqueued(sc, vq) == 1) { + r |= (*vq->vq_intrhand)(vq->vq_intrhand_arg); + } + } + + return r; +} + /* * dmamap sync operations for a virtqueue. */ @@ -564,44 +596,6 @@ virtio_vq_is_enqueued(struct virtio_soft } /* - * Scan vq, bus_dmamap_sync for the vqs (not for the payload), - * and calls (*vq_done)() if some entries are consumed. - * - * Can be used as sc_intrhand. - */ -int -virtio_vq_intr(struct virtio_softc *sc) -{ - struct virtqueue *vq; - int i, r = 0; - - for (i = 0; i < sc->sc_nvqs; i++) { - vq = &sc->sc_vqs[i]; - if (virtio_vq_is_enqueued(sc, vq) == 1) { - if (vq->vq_done) - r |= (*vq->vq_done)(vq); - } - } - - return r; -} - -int -virtio_vq_intrhand(struct virtio_softc *sc) -{ - struct virtqueue *vq; - int i, r = 0; - - for (i = 0; i < sc->sc_nvqs; i++) { - vq = &sc->sc_vqs[i]; - r |= (*vq->vq_intrhand)(vq->vq_intrhand_arg); - } - - return r; -} - - -/* * Increase the event index in order to delay interrupts. */ int @@ -707,15 +701,26 @@ virtio_start_vq_intr(struct virtio_softc /* * Initialize vq structure. */ +/* + * Reset virtqueue parameters + */ static void -virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, - const bool reinit) +virtio_reset_vq(struct virtio_softc *sc, struct virtqueue *vq) { + struct vring_desc *vds; int i, j; int vq_size = vq->vq_num; memset(vq->vq_vaddr, 0, vq->vq_bytesize); + /* build the descriptor chain for free slot management */ + vds = vq->vq_desc; + for (i = 0; i < vq_size - 1; i++) { + vds[i].next = virtio_rw16(sc, i + 1); + } + vds[i].next = virtio_rw16(sc, VRING_DESC_CHAIN_END); + vq->vq_free_idx = 0; + /* build the indirect descriptor chain */ if (vq->vq_indirect != NULL) { struct vring_desc *vd; @@ -729,66 +734,74 @@ virtio_init_vq(struct virtio_softc *sc, } } - /* free slot management */ - SIMPLEQ_INIT(&vq->vq_freelist); - for (i = 0; i < vq_size; i++) { - SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, &vq->vq_entries[i], - qe_list); - vq->vq_entries[i].qe_index = i; - } - if (!reinit) - mutex_init(&vq->vq_freelist_lock, MUTEX_SPIN, sc->sc_ipl); - /* enqueue/dequeue status */ vq->vq_avail_idx = 0; vq->vq_used_idx = 0; vq->vq_queued = 0; - if (!reinit) { - mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); - mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); - } vq_sync_uring_all(sc, vq, BUS_DMASYNC_PREREAD); vq->vq_queued++; } +/* Initialize vq */ +void +virtio_init_vq_vqdone(struct virtio_softc *sc, struct virtqueue *vq, + int index, int (*vq_done)(struct virtqueue *)) +{ + + virtio_init_vq(sc, vq, index, virtio_vq_done, vq); + vq->vq_done = vq_done; +} + +void +virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, + int (*func)(void *), void *arg) +{ + + memset(vq, 0, sizeof(*vq)); + + vq->vq_owner = sc; + vq->vq_num = sc->sc_ops->read_queue_size(sc, index); + vq->vq_index = index; + vq->vq_intrhand = func; + vq->vq_intrhand_arg = arg; +} + /* * Allocate/free a vq. */ int -virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int index, +virtio_alloc_vq(struct virtio_softc *sc, struct virtqueue *vq, int maxsegsize, int maxnsegs, const char *name) { - int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0; + bus_size_t size_desc, size_avail, size_used, size_indirect; + bus_size_t allocsize = 0, size_desc_avail; int rsegs, r, hdrlen; + unsigned int vq_num; #define VIRTQUEUE_ALIGN(n) roundup(n, VIRTIO_PAGE_SIZE) - /* Make sure callers allocate vqs in order */ - KASSERT(sc->sc_nvqs == index); - - memset(vq, 0, sizeof(*vq)); + vq_num = vq->vq_num; - vq_size = sc->sc_ops->read_queue_size(sc, index); - if (vq_size == 0) { + if (vq_num == 0) { aprint_error_dev(sc->sc_dev, "virtqueue not exist, index %d for %s\n", - index, name); + vq->vq_index, name); goto err; } hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2; - /* allocsize1: descriptor table + avail ring + pad */ - allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc) * vq_size - + sizeof(uint16_t) * (hdrlen + vq_size)); - /* allocsize2: used ring + pad */ - allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen - + sizeof(struct vring_used_elem) * vq_size); - /* allocsize3: indirect table */ - if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) - allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size; - else - allocsize3 = 0; - allocsize = allocsize1 + allocsize2 + allocsize3; + size_desc = sizeof(vq->vq_desc[0]) * vq_num; + size_avail = sizeof(uint16_t) * hdrlen + + sizeof(vq->vq_avail[0].ring[0]) * vq_num; + size_used = sizeof(uint16_t) *hdrlen + + sizeof(vq->vq_used[0].ring[0]) * vq_num; + size_indirect = (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT) ? + sizeof(struct vring_desc) * maxnsegs * vq_num : 0; + + size_desc_avail = VIRTQUEUE_ALIGN(size_desc + size_avail); + size_used = VIRTQUEUE_ALIGN(size_used); + + allocsize = size_desc_avail + size_used + size_indirect; /* alloc and map the memory */ r = bus_dmamem_alloc(sc->sc_dmat, allocsize, VIRTIO_PAGE_SIZE, 0, @@ -796,80 +809,81 @@ virtio_alloc_vq(struct virtio_softc *sc, if (r != 0) { aprint_error_dev(sc->sc_dev, "virtqueue %d for %s allocation failed, " - "error code %d\n", index, name, r); + "error code %d\n", vq->vq_index, name, r); goto err; } + r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize, &vq->vq_vaddr, BUS_DMA_WAITOK); if (r != 0) { aprint_error_dev(sc->sc_dev, "virtqueue %d for %s map failed, " - "error code %d\n", index, name, r); + "error code %d\n", vq->vq_index, name, r); goto err; } + r = bus_dmamap_create(sc->sc_dmat, allocsize, 1, allocsize, 0, BUS_DMA_WAITOK, &vq->vq_dmamap); if (r != 0) { aprint_error_dev(sc->sc_dev, "virtqueue %d for %s dmamap creation failed, " - "error code %d\n", index, name, r); + "error code %d\n", vq->vq_index, name, r); goto err; } + r = bus_dmamap_load(sc->sc_dmat, vq->vq_dmamap, vq->vq_vaddr, allocsize, NULL, BUS_DMA_WAITOK); if (r != 0) { aprint_error_dev(sc->sc_dev, "virtqueue %d for %s dmamap load failed, " - "error code %d\n", index, name, r); + "error code %d\n", vq->vq_index, name, r); goto err; } - /* remember addresses and offsets for later use */ - vq->vq_owner = sc; - vq->vq_num = vq_size; - vq->vq_index = index; - vq->vq_desc = vq->vq_vaddr; - vq->vq_availoffset = sizeof(struct vring_desc) * vq_size; - vq->vq_avail = (void *)(((char *)vq->vq_desc) + vq->vq_availoffset); - vq->vq_used_event = (uint16_t *)((char *)vq->vq_avail + - offsetof(struct vring_avail, ring[vq->vq_num])); - vq->vq_usedoffset = allocsize1; - vq->vq_used = (void *)(((char *)vq->vq_desc) + vq->vq_usedoffset); - vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used + - offsetof(struct vring_used, ring[vq->vq_num])); - - if (allocsize3 > 0) { - vq->vq_indirectoffset = allocsize1 + allocsize2; - vq->vq_indirect = (void *)(((char *)vq->vq_desc) - + vq->vq_indirectoffset); - } vq->vq_bytesize = allocsize; vq->vq_maxsegsize = maxsegsize; vq->vq_maxnsegs = maxnsegs; - /* free slot management */ - vq->vq_entries = kmem_zalloc(sizeof(struct vq_entry) * vq_size, +#define VIRTIO_PTR(base, offset) (void *)((intptr_t)(base) + (offset)) + /* initialize vring pointers */ + vq->vq_desc = VIRTIO_PTR(vq->vq_vaddr, 0); + vq->vq_availoffset = size_desc; + vq->vq_avail = VIRTIO_PTR(vq->vq_vaddr, vq->vq_availoffset); + vq->vq_used_event = VIRTIO_PTR(vq->vq_avail, + offsetof(struct vring_avail, ring[vq_num])); + vq->vq_usedoffset = size_desc_avail; + vq->vq_used = VIRTIO_PTR(vq->vq_vaddr, vq->vq_usedoffset); + vq->vq_avail_event = VIRTIO_PTR(vq->vq_used, + offsetof(struct vring_used, ring[vq_num])); + + if (size_indirect > 0) { + vq->vq_indirectoffset = size_desc_avail + size_used; + vq->vq_indirect = VIRTIO_PTR(vq->vq_vaddr, + vq->vq_indirectoffset); + } +#undef VIRTIO_PTR + + vq->vq_descx = kmem_zalloc(sizeof(vq->vq_descx[0]) * vq_num, KM_SLEEP); - virtio_init_vq(sc, vq, false); - /* set the vq address */ - sc->sc_ops->setup_queue(sc, index, - vq->vq_dmamap->dm_segs[0].ds_addr); + mutex_init(&vq->vq_freedesc_lock, MUTEX_SPIN, sc->sc_ipl); + mutex_init(&vq->vq_aring_lock, MUTEX_SPIN, sc->sc_ipl); + mutex_init(&vq->vq_uring_lock, MUTEX_SPIN, sc->sc_ipl); + + virtio_reset_vq(sc, vq); aprint_verbose_dev(sc->sc_dev, - "allocated %u byte for virtqueue %d for %s, size %d\n", - allocsize, index, name, vq_size); - if (allocsize3 > 0) + "allocated %" PRIuBUSSIZE " byte for virtqueue %d for %s, " + "size %d\n", allocsize, vq->vq_index, name, vq_num); + if (size_indirect > 0) aprint_verbose_dev(sc->sc_dev, - "using %d byte (%d entries) indirect descriptors\n", - allocsize3, maxnsegs * vq_size); - - sc->sc_nvqs++; + "using %" PRIuBUSSIZE " byte (%d entries) indirect " + "descriptors\n", size_indirect, maxnsegs * vq_num); return 0; err: - sc->sc_ops->setup_queue(sc, index, 0); + sc->sc_ops->setup_queue(sc, vq->vq_index, 0); if (vq->vq_dmamap) bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); if (vq->vq_vaddr) @@ -884,12 +898,18 @@ err: int virtio_free_vq(struct virtio_softc *sc, struct virtqueue *vq) { - struct vq_entry *qe; - int i = 0; + uint16_t s; + size_t i; + + if (vq->vq_vaddr == NULL) + return 0; /* device must be already deactivated */ /* confirm the vq is empty */ - SIMPLEQ_FOREACH(qe, &vq->vq_freelist, qe_list) { + s = vq->vq_free_idx; + i = 0; + while (s != virtio_rw16(sc, VRING_DESC_CHAIN_END)) { + s = vq->vq_desc[s].next; i++; } if (i != vq->vq_num) { @@ -903,49 +923,80 @@ virtio_free_vq(struct virtio_softc *sc, vq_sync_aring_all(sc, vq, BUS_DMASYNC_POSTWRITE); - kmem_free(vq->vq_entries, sizeof(*vq->vq_entries) * vq->vq_num); + kmem_free(vq->vq_descx, sizeof(vq->vq_descx[0]) * vq->vq_num); bus_dmamap_unload(sc->sc_dmat, vq->vq_dmamap); bus_dmamap_destroy(sc->sc_dmat, vq->vq_dmamap); bus_dmamem_unmap(sc->sc_dmat, vq->vq_vaddr, vq->vq_bytesize); bus_dmamem_free(sc->sc_dmat, &vq->vq_segs[0], 1); - mutex_destroy(&vq->vq_freelist_lock); + mutex_destroy(&vq->vq_freedesc_lock); mutex_destroy(&vq->vq_uring_lock); mutex_destroy(&vq->vq_aring_lock); memset(vq, 0, sizeof(*vq)); - sc->sc_nvqs--; - return 0; } /* * Free descriptor management. */ -static struct vq_entry * -vq_alloc_entry(struct virtqueue *vq) +static int +vq_alloc_slot_locked(struct virtio_softc *sc, struct virtqueue *vq, + size_t nslots) { - struct vq_entry *qe; + struct vring_desc *vd; + uint16_t rv, tail; + size_t i; + + KASSERT(mutex_owned(&vq->vq_freedesc_lock)); + + tail = virtio_rw16(sc, vq->vq_free_idx); + for (i = 0; i < nslots - 1; i++) { + if (tail == VRING_DESC_CHAIN_END) + return VRING_DESC_CHAIN_END; - mutex_enter(&vq->vq_freelist_lock); - if (SIMPLEQ_EMPTY(&vq->vq_freelist)) { - mutex_exit(&vq->vq_freelist_lock); - return NULL; + vd = &vq->vq_desc[tail]; + vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT); + tail = virtio_rw16(sc, vd->next); } - qe = SIMPLEQ_FIRST(&vq->vq_freelist); - SIMPLEQ_REMOVE_HEAD(&vq->vq_freelist, qe_list); - mutex_exit(&vq->vq_freelist_lock); - return qe; + if (tail == VRING_DESC_CHAIN_END) + return VRING_DESC_CHAIN_END; + + rv = virtio_rw16(sc, vq->vq_free_idx); + + vd = &vq->vq_desc[tail]; + vd->flags = virtio_rw16(sc, 0); + vq->vq_free_idx = vd->next; + + return rv; +} +static uint16_t +vq_alloc_slot(struct virtio_softc *sc, struct virtqueue *vq, size_t nslots) +{ + uint16_t rv; + + mutex_enter(&vq->vq_freedesc_lock); + rv = vq_alloc_slot_locked(sc, vq, nslots); + mutex_exit(&vq->vq_freedesc_lock); + + return rv; } static void -vq_free_entry(struct virtqueue *vq, struct vq_entry *qe) +vq_free_slot(struct virtio_softc *sc, struct virtqueue *vq, uint16_t slot) { - mutex_enter(&vq->vq_freelist_lock); - SIMPLEQ_INSERT_TAIL(&vq->vq_freelist, qe, qe_list); - mutex_exit(&vq->vq_freelist_lock); + struct vring_desc *vd; + uint16_t s; - return; + mutex_enter(&vq->vq_freedesc_lock); + vd = &vq->vq_desc[slot]; + while ((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) != 0) { + s = virtio_rw16(sc, vd->next); + vd = &vq->vq_desc[s]; + } + vd->next = vq->vq_free_idx; + vq->vq_free_idx = virtio_rw16(sc, slot); + mutex_exit(&vq->vq_freedesc_lock); } /* @@ -986,16 +1037,15 @@ vq_free_entry(struct virtqueue *vq, stru int virtio_enqueue_prep(struct virtio_softc *sc, struct virtqueue *vq, int *slotp) { - struct vq_entry *qe1; + uint16_t slot; KASSERT(slotp != NULL); - qe1 = vq_alloc_entry(vq); - if (qe1 == NULL) + slot = vq_alloc_slot(sc, vq, 1); + if (slot == VRING_DESC_CHAIN_END) return EAGAIN; - /* next slot is not allocated yet */ - qe1->qe_next = -1; - *slotp = qe1->qe_index; + + *slotp = slot; return 0; } @@ -1007,69 +1057,61 @@ int virtio_enqueue_reserve(struct virtio_softc *sc, struct virtqueue *vq, int slot, int nsegs) { - int indirect; - struct vq_entry *qe1 = &vq->vq_entries[slot]; + struct vring_desc *vd; + struct vring_desc_extra *vdx; + int i; - KASSERT(qe1->qe_next == -1); KASSERT(1 <= nsegs && nsegs <= vq->vq_num); + vdx = &vq->vq_descx[slot]; + vd = &vq->vq_desc[slot]; + + KASSERT((vd->flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0); + if ((vq->vq_indirect != NULL) && (nsegs >= MINSEG_INDIRECT) && (nsegs <= vq->vq_maxnsegs)) - indirect = 1; + vdx->use_indirect = true; else - indirect = 0; - qe1->qe_indirect = indirect; + vdx->use_indirect = false; - if (indirect) { - struct vring_desc *vd; + if (vdx->use_indirect) { uint64_t addr; - int i; - vd = &vq->vq_desc[qe1->qe_index]; addr = vq->vq_dmamap->dm_segs[0].ds_addr + vq->vq_indirectoffset; addr += sizeof(struct vring_desc) - * vq->vq_maxnsegs * qe1->qe_index; + * vq->vq_maxnsegs * slot; + vd->addr = virtio_rw64(sc, addr); vd->len = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs); vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT); - vd = vq->vq_indirect; - vd += vq->vq_maxnsegs * qe1->qe_index; - qe1->qe_desc_base = vd; + vd = &vq->vq_indirect[vq->vq_maxnsegs * slot]; + vdx->desc_base = vd; + vdx->desc_free_idx = 0; for (i = 0; i < nsegs - 1; i++) { vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); } vd[i].flags = virtio_rw16(sc, 0); - qe1->qe_next = 0; - - return 0; } else { - struct vring_desc *vd; - struct vq_entry *qe; - int i, s; + uint16_t s; - vd = &vq->vq_desc[0]; - qe1->qe_desc_base = vd; - qe1->qe_next = qe1->qe_index; - s = slot; - for (i = 0; i < nsegs - 1; i++) { - qe = vq_alloc_entry(vq); - if (qe == NULL) { - vd[s].flags = virtio_rw16(sc, 0); - virtio_enqueue_abort(sc, vq, slot); - return EAGAIN; - } - vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT); - vd[s].next = virtio_rw16(sc, qe->qe_index); - s = qe->qe_index; + s = vq_alloc_slot(sc, vq, nsegs - 1); + if (s == VRING_DESC_CHAIN_END) { + vq_free_slot(sc, vq, slot); + return EAGAIN; } - vd[s].flags = virtio_rw16(sc, 0); - return 0; + vd->next = virtio_rw16(sc, s); + vd->flags = virtio_rw16(sc, VRING_DESC_F_NEXT); + + vdx->desc_base = &vq->vq_desc[0]; + vdx->desc_free_idx = slot; } + + return 0; } /* @@ -1079,22 +1121,35 @@ int virtio_enqueue(struct virtio_softc *sc, struct virtqueue *vq, int slot, bus_dmamap_t dmamap, bool write) { - struct vq_entry *qe1 = &vq->vq_entries[slot]; - struct vring_desc *vd = qe1->qe_desc_base; + struct vring_desc *vds; + struct vring_desc_extra *vdx; + uint16_t s; int i; - int s = qe1->qe_next; - KASSERT(s >= 0); KASSERT(dmamap->dm_nsegs > 0); + vdx = &vq->vq_descx[slot]; + vds = vdx->desc_base; + s = vdx->desc_free_idx; + + KASSERT(vds != NULL); + for (i = 0; i < dmamap->dm_nsegs; i++) { - vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); - vd[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); + KASSERT(s != VRING_DESC_CHAIN_END); + + vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr); + vds[s].len = virtio_rw32(sc, dmamap->dm_segs[i].ds_len); if (!write) - vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); - s = virtio_rw16(sc, vd[s].next); + vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); + + if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) { + s = VRING_DESC_CHAIN_END; + } else { + s = virtio_rw16(sc, vds[s].next); + } } - qe1->qe_next = s; + + vdx->desc_free_idx = s; return 0; } @@ -1104,20 +1159,32 @@ virtio_enqueue_p(struct virtio_softc *sc bus_dmamap_t dmamap, bus_addr_t start, bus_size_t len, bool write) { - struct vq_entry *qe1 = &vq->vq_entries[slot]; - struct vring_desc *vd = qe1->qe_desc_base; - int s = qe1->qe_next; + struct vring_desc_extra *vdx; + struct vring_desc *vds; + uint16_t s; + + vdx = &vq->vq_descx[slot]; + vds = vdx->desc_base; + s = vdx->desc_free_idx; - KASSERT(s >= 0); + KASSERT(s != VRING_DESC_CHAIN_END); + KASSERT(vds != NULL); KASSERT(dmamap->dm_nsegs == 1); /* XXX */ KASSERT(dmamap->dm_segs[0].ds_len > start); KASSERT(dmamap->dm_segs[0].ds_len >= start + len); - vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); - vd[s].len = virtio_rw32(sc, len); + vds[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start); + vds[s].len = virtio_rw32(sc, len); if (!write) - vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); - qe1->qe_next = virtio_rw16(sc, vd[s].next); + vds[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE); + + if ((vds[s].flags & virtio_rw16(sc, VRING_DESC_F_NEXT)) == 0) { + s = VRING_DESC_CHAIN_END; + } else { + s = virtio_rw16(sc, vds[s].next); + } + + vdx->desc_free_idx = s; return 0; } @@ -1129,16 +1196,16 @@ int virtio_enqueue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot, bool notifynow) { - struct vq_entry *qe1; if (slot < 0) { mutex_enter(&vq->vq_aring_lock); goto notify; } + vq_sync_descs(sc, vq, BUS_DMASYNC_PREWRITE); - qe1 = &vq->vq_entries[slot]; - if (qe1->qe_indirect) + if (vq->vq_descx[slot].use_indirect) vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE); + mutex_enter(&vq->vq_aring_lock); vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = virtio_rw16(sc, slot); @@ -1190,23 +1257,14 @@ notify: int virtio_enqueue_abort(struct virtio_softc *sc, struct virtqueue *vq, int slot) { - struct vq_entry *qe = &vq->vq_entries[slot]; - struct vring_desc *vd; - int s; + struct vring_desc_extra *vdx; - if (qe->qe_next < 0) { - vq_free_entry(vq, qe); - return 0; - } + vq_free_slot(sc, vq, slot); + + vdx = &vq->vq_descx[slot]; + vdx->desc_free_idx = VRING_DESC_CHAIN_END; + vdx->desc_base = NULL; - s = slot; - vd = &vq->vq_desc[0]; - while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { - s = virtio_rw16(sc, vd[s].next); - vq_free_entry(vq, qe); - qe = &vq->vq_entries[s]; - } - vq_free_entry(vq, qe); return 0; } @@ -1222,7 +1280,6 @@ virtio_dequeue(struct virtio_softc *sc, int *slotp, int *lenp) { uint16_t slot, usedidx; - struct vq_entry *qe; if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx)) return ENOENT; @@ -1231,9 +1288,8 @@ virtio_dequeue(struct virtio_softc *sc, mutex_exit(&vq->vq_uring_lock); usedidx %= vq->vq_num; slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id); - qe = &vq->vq_entries[slot]; - if (qe->qe_indirect) + if (vq->vq_descx[slot].use_indirect) vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_POSTWRITE); if (slotp) @@ -1251,16 +1307,13 @@ virtio_dequeue(struct virtio_softc *sc, int virtio_dequeue_commit(struct virtio_softc *sc, struct virtqueue *vq, int slot) { - struct vq_entry *qe = &vq->vq_entries[slot]; - struct vring_desc *vd = &vq->vq_desc[0]; - int s = slot; - - while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) { - s = virtio_rw16(sc, vd[s].next); - vq_free_entry(vq, qe); - qe = &vq->vq_entries[s]; - } - vq_free_entry(vq, qe); + struct vring_desc_extra *vdx; + + vq_free_slot(sc, vq, slot); + + vdx = &vq->vq_descx[slot]; + vdx->desc_base = NULL; + vdx->desc_free_idx = VRING_DESC_CHAIN_END; return 0; } @@ -1270,19 +1323,15 @@ virtio_dequeue_commit(struct virtio_soft */ void virtio_child_attach_start(struct virtio_softc *sc, device_t child, int ipl, - struct virtqueue *vqs, - virtio_callback config_change, - virtio_callback intr_hand, - int req_flags, int req_features, const char *feat_bits) + uint64_t req_features, const char *feat_bits) { char buf[1024]; + KASSERT(sc->sc_child == NULL); + KASSERT(!ISSET(sc->sc_child_flags, VIRTIO_CHILD_DETACHED)); + sc->sc_child = child; sc->sc_ipl = ipl; - sc->sc_vqs = vqs; - sc->sc_config_change = config_change; - sc->sc_intrhand = intr_hand; - sc->sc_flags = req_flags; virtio_negotiate_features(sc, req_features); snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features); @@ -1290,25 +1339,42 @@ virtio_child_attach_start(struct virtio_ aprint_naive("\n"); } -void -virtio_child_attach_set_vqs(struct virtio_softc *sc, - struct virtqueue *vqs, int nvq_pairs) +int +virtio_child_attach_finish(struct virtio_softc *sc, + struct virtqueue *vqs, size_t nvqs, + virtio_callback config_change, + int req_flags) { + size_t i; + int r; + +#ifdef DIAGNOSTIC + KASSERT(nvqs > 0); +#define VIRTIO_ASSERT_FLAGS (VIRTIO_F_INTR_SOFTINT | VIRTIO_F_INTR_PERVQ) + KASSERT((req_flags & VIRTIO_ASSERT_FLAGS) != VIRTIO_ASSERT_FLAGS); +#undef VIRTIO_ASSERT_FLAGS + + for (i = 0; i < nvqs; i++){ + KASSERT(vqs[i].vq_index == i); + KASSERT(vqs[i].vq_intrhand != NULL); + KASSERT(vqs[i].vq_done == NULL || + vqs[i].vq_intrhand == virtio_vq_done); + } +#endif - KASSERT(nvq_pairs == 1 || - (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0); - if (nvq_pairs > 1) - sc->sc_child_mq = true; sc->sc_vqs = vqs; -} + sc->sc_nvqs = nvqs; + sc->sc_config_change = config_change; + sc->sc_intrhand = virtio_vq_intr; + sc->sc_flags = req_flags; -int -virtio_child_attach_finish(struct virtio_softc *sc) -{ - int r; + /* set the vq address */ + for (i = 0; i < nvqs; i++) { + sc->sc_ops->setup_queue(sc, vqs[i].vq_index, + vqs[i].vq_dmamap->dm_segs[0].ds_addr); + } - sc->sc_finished_called = true; r = sc->sc_ops->alloc_interrupts(sc); if (r != 0) { aprint_error_dev(sc->sc_dev, @@ -1338,6 +1404,7 @@ virtio_child_attach_finish(struct virtio } } + SET(sc->sc_child_flags, VIRTIO_CHILD_ATTACH_FINISHED); virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK); return 0; @@ -1356,7 +1423,11 @@ fail: void virtio_child_detach(struct virtio_softc *sc) { - sc->sc_child = NULL; + + /* already detached */ + if (ISSET(sc->sc_child_flags, VIRTIO_CHILD_DETACHED)) + return; + sc->sc_vqs = NULL; virtio_device_reset(sc); @@ -1367,6 +1438,8 @@ virtio_child_detach(struct virtio_softc softint_disestablish(sc->sc_soft_ih); sc->sc_soft_ih = NULL; } + + SET(sc->sc_child_flags, VIRTIO_CHILD_DETACHED); } void @@ -1376,7 +1449,7 @@ virtio_child_attach_failed(struct virtio virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_FAILED); - sc->sc_child = VIRTIO_CHILD_FAILED; + SET(sc->sc_child_flags, VIRTIO_CHILD_ATTACH_FAILED); } bus_dma_tag_t @@ -1412,19 +1485,19 @@ virtio_attach_failed(struct virtio_softc if (sc->sc_childdevid == 0) return 1; - if (sc->sc_child == NULL) { - aprint_error_dev(self, - "no matching child driver; not configured\n"); + if (ISSET(sc->sc_child_flags, VIRTIO_CHILD_ATTACH_FAILED)) { + aprint_error_dev(self, "virtio configuration failed\n"); return 1; } - if (sc->sc_child == VIRTIO_CHILD_FAILED) { - aprint_error_dev(self, "virtio configuration failed\n"); + if (sc->sc_child == NULL) { + aprint_error_dev(self, + "no matching child driver; not configured\n"); return 1; } /* sanity check */ - if (!sc->sc_finished_called) { + if (!ISSET(sc->sc_child_flags, VIRTIO_CHILD_ATTACH_FINISHED)) { aprint_error_dev(self, "virtio internal error, child driver " "signaled OK but didn't initialize interrupts\n"); return 1; Index: src/sys/dev/pci/virtio_pci.c diff -u src/sys/dev/pci/virtio_pci.c:1.38 src/sys/dev/pci/virtio_pci.c:1.38.4.1 --- src/sys/dev/pci/virtio_pci.c:1.38 Mon May 30 20:28:18 2022 +++ src/sys/dev/pci/virtio_pci.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: virtio_pci.c,v 1.38 2022/05/30 20:28:18 riastradh Exp $ */ +/* $NetBSD: virtio_pci.c,v 1.38.4.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2020 The NetBSD Foundation, Inc. @@ -28,7 +28,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.38 2022/05/30 20:28:18 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.38.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -73,6 +73,7 @@ static int virtio_pci_detach(device_t, i sizeof(pcireg_t)) struct virtio_pci_softc { struct virtio_softc sc_sc; + bool sc_intr_pervq; /* IO space */ bus_space_tag_t sc_iot; @@ -328,14 +329,12 @@ virtio_pci_detach(device_t self, int fla struct virtio_softc * const sc = &psc->sc_sc; int r; - if (sc->sc_child != NULL) { - r = config_detach(sc->sc_child, flags); - if (r) - return r; - } + r = config_detach_children(self, flags); + if (r != 0) + return r; /* Check that child detached properly */ - KASSERT(sc->sc_child == NULL); + KASSERT(ISSET(sc->sc_child_flags, VIRTIO_CHILD_DETACHED)); KASSERT(sc->sc_vqs == NULL); KASSERT(psc->sc_ihs_num == 0); @@ -629,7 +628,7 @@ virtio_pci_setup_queue_09(struct virtio_ if (psc->sc_ihs_num > 1) { int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) + if (psc->sc_intr_pervq) vec += idx; bus_space_write_2(psc->sc_iot, psc->sc_ioh, VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec); @@ -751,7 +750,7 @@ virtio_pci_setup_queue_10(struct virtio_ if (psc->sc_ihs_num > 1) { int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) + if (psc->sc_intr_pervq) vec += idx; bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec); @@ -849,7 +848,7 @@ virtio_pci_setup_interrupts_10(struct vi for (qid = 0; qid < sc->sc_nvqs; qid++) { vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) + if (psc->sc_intr_pervq) vector += qid; bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid); bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, @@ -895,7 +894,7 @@ virtio_pci_setup_interrupts_09(struct vi offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR; vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) + if (psc->sc_intr_pervq) vector += qid; bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector); @@ -941,7 +940,7 @@ virtio_pci_establish_msix_interrupts(str } idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) { + if (psc->sc_intr_pervq) { for (qid = 0; qid < sc->sc_nvqs; qid++) { n = idx + qid; vq = &sc->sc_vqs[qid]; @@ -979,7 +978,7 @@ virtio_pci_establish_msix_interrupts(str intrstr = pci_intr_string(pc, psc->sc_ihp[idx], intrbuf, sizeof(intrbuf)); aprint_normal_dev(self, "config interrupting at %s\n", intrstr); idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) { + if (psc->sc_intr_pervq) { kcpuset_t *affinity; int affinity_to, r; @@ -1019,7 +1018,7 @@ error: if (psc->sc_ihs[idx] != NULL) pci_intr_disestablish(psc->sc_pa.pa_pc, psc->sc_ihs[idx]); idx = VIRTIO_MSIX_QUEUE_VECTOR_INDEX; - if (sc->sc_child_mq) { + if (psc->sc_intr_pervq) { for (qid = 0; qid < sc->sc_nvqs; qid++) { n = idx + qid; if (psc->sc_ihs[n] == NULL) @@ -1085,13 +1084,10 @@ virtio_pci_alloc_interrupts(struct virti counts[PCI_INTR_TYPE_INTX] = 1; } else { /* Try MSI-X first and INTx second */ - if (sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { + if (ISSET(sc->sc_flags, VIRTIO_F_INTR_PERVQ) && + sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX <= nmsix) { nmsix = sc->sc_nvqs + VIRTIO_MSIX_QUEUE_VECTOR_INDEX; } else { - sc->sc_child_mq = false; - } - - if (sc->sc_child_mq == false) { nmsix = 2; } @@ -1109,6 +1105,7 @@ retry: } if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_MSIX) { + psc->sc_intr_pervq = nmsix > 2 ? true : false; psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * nmsix, KM_SLEEP); @@ -1127,6 +1124,7 @@ retry: psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; virtio_pci_adjust_config_region(psc); } else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) { + psc->sc_intr_pervq = false; psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1, KM_SLEEP); @@ -1149,6 +1147,8 @@ retry: } } + if (!psc->sc_intr_pervq) + CLR(sc->sc_flags, VIRTIO_F_INTR_PERVQ); return 0; } Index: src/sys/dev/pci/virtioreg.h diff -u src/sys/dev/pci/virtioreg.h:1.11 src/sys/dev/pci/virtioreg.h:1.11.2.1 --- src/sys/dev/pci/virtioreg.h:1.11 Sat Oct 15 20:00:35 2022 +++ src/sys/dev/pci/virtioreg.h Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: virtioreg.h,v 1.11 2022/10/15 20:00:35 riastradh Exp $ */ +/* $NetBSD: virtioreg.h,v 1.11.2.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2010 Minoura Makoto. @@ -153,7 +153,7 @@ struct vring_desc { struct vring_avail { uint16_t flags; uint16_t idx; - uint16_t ring[0]; + uint16_t ring[]; /* trailed by uint16_t used_event when VIRTIO_F_RING_EVENT_IDX */ } __packed; @@ -168,7 +168,7 @@ struct vring_used_elem { struct vring_used { uint16_t flags; uint16_t idx; - struct vring_used_elem ring[0]; + struct vring_used_elem ring[]; /* trailed by uint16_t avail_event when VIRTIO_F_RING_EVENT_IDX */ } __packed; Index: src/sys/dev/pci/virtiovar.h diff -u src/sys/dev/pci/virtiovar.h:1.24 src/sys/dev/pci/virtiovar.h:1.24.4.1 --- src/sys/dev/pci/virtiovar.h:1.24 Thu Mar 24 08:08:05 2022 +++ src/sys/dev/pci/virtiovar.h Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: virtiovar.h,v 1.24 2022/03/24 08:08:05 andvar Exp $ */ +/* $NetBSD: virtiovar.h,v 1.24.4.1 2023/05/13 10:56:10 martin Exp $ */ /* * Copyright (c) 2010 Minoura Makoto. @@ -70,16 +70,6 @@ #include <sys/bus.h> #include <dev/pci/virtioreg.h> - -struct vq_entry { - SIMPLEQ_ENTRY(vq_entry) qe_list; /* free list */ - uint16_t qe_index; /* index in vq_desc array */ - /* followings are used only when it is the `head' entry */ - int16_t qe_next; /* next enq slot */ - bool qe_indirect; /* 1 if using indirect */ - struct vring_desc *qe_desc_base; -}; - struct virtqueue { struct virtio_softc *vq_owner; unsigned int vq_num; /* queue size (# of entries) */ @@ -89,7 +79,7 @@ struct virtqueue { struct vring_desc *vq_desc; struct vring_avail *vq_avail; struct vring_used *vq_used; - void *vq_indirect; + struct vring_desc *vq_indirect; uint16_t *vq_used_event; /* trails avail */ uint16_t *vq_avail_event; /* trails used */ @@ -105,25 +95,29 @@ struct virtqueue { int vq_maxsegsize; int vq_maxnsegs; - /* free entry management */ - struct vq_entry *vq_entries; - SIMPLEQ_HEAD(, vq_entry) vq_freelist; - kmutex_t vq_freelist_lock; - /* enqueue/dequeue status */ uint16_t vq_avail_idx; uint16_t vq_used_idx; + uint16_t vq_free_idx; int vq_queued; kmutex_t vq_aring_lock; kmutex_t vq_uring_lock; + kmutex_t vq_freedesc_lock; /* interrupt handler */ - int (*vq_done)(struct virtqueue*); + int (*vq_done)(struct virtqueue*); /* for compatibility */ int (*vq_intrhand)(void *); void *vq_intrhand_arg; /* for 1.0 */ uint32_t vq_notify_off; + + struct vring_desc_extra { + bool use_indirect; /* true if using indirect */ + struct vring_desc + *desc_base; + uint16_t desc_free_idx; + } *vq_descx; }; struct virtio_attach_args { @@ -164,16 +158,19 @@ struct virtio_softc { uint64_t sc_active_features; bool sc_indirect; bool sc_version_1; - bool sc_finished_called; int sc_nvqs; /* set by child */ struct virtqueue *sc_vqs; /* set by child */ int sc_childdevid; device_t sc_child; /* set by child */ - bool sc_child_mq; + uint32_t sc_child_flags; +#define VIRTIO_CHILD_ATTACH_FINISHED __BIT(0) +#define VIRTIO_CHILD_ATTACH_FAILED __BIT(1) +#define VIRTIO_CHILD_DETACHED __BIT(2) + virtio_callback sc_config_change; /* set by child */ - virtio_callback sc_intrhand; /* set by child */ + virtio_callback sc_intrhand; }; #else struct virtio_softc; @@ -184,9 +181,7 @@ struct virtio_softc; #define VIRTIO_F_INTR_MPSAFE (1 << 0) #define VIRTIO_F_INTR_SOFTINT (1 << 1) #define VIRTIO_F_INTR_MSIX (1 << 2) - - -#define VIRTIO_CHILD_FAILED ((void *)1) +#define VIRTIO_F_INTR_PERVQ (1 << 3) /* public interface */ void virtio_negotiate_features(struct virtio_softc*, uint64_t); @@ -204,19 +199,21 @@ void virtio_write_device_config_8(struct void virtio_write_device_config_le_2(struct virtio_softc *, int, uint16_t); void virtio_write_device_config_le_4(struct virtio_softc *, int, uint32_t); -int virtio_alloc_vq(struct virtio_softc*, struct virtqueue*, int, int, int, +void virtio_init_vq(struct virtio_softc *, struct virtqueue *, int, + int (*)(void *), void *); +void virtio_init_vq_vqdone(struct virtio_softc *,struct virtqueue *, int, + int (*)(struct virtqueue *)); +int virtio_alloc_vq(struct virtio_softc*, struct virtqueue*, int, int, const char*); int virtio_free_vq(struct virtio_softc*, struct virtqueue*); void virtio_reset(struct virtio_softc *); int virtio_reinit_start(struct virtio_softc *); void virtio_reinit_end(struct virtio_softc *); void virtio_child_attach_start(struct virtio_softc *, device_t, int, - struct virtqueue *, - virtio_callback, virtio_callback, int, - int, const char *); -void virtio_child_attach_set_vqs(struct virtio_softc *, - struct virtqueue *, int); -int virtio_child_attach_finish(struct virtio_softc *); + uint64_t, const char *); +int virtio_child_attach_finish(struct virtio_softc *, + struct virtqueue *, size_t, + virtio_callback, int); void virtio_child_attach_failed(struct virtio_softc *); void virtio_child_detach(struct virtio_softc *); @@ -233,8 +230,6 @@ int virtio_dequeue(struct virtio_softc*, int virtio_dequeue_commit(struct virtio_softc*, struct virtqueue*, int); bool virtio_vq_is_enqueued(struct virtio_softc *, struct virtqueue *); -int virtio_vq_intr(struct virtio_softc *); -int virtio_vq_intrhand(struct virtio_softc *); int virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq, uint16_t nslots); int virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq); Index: src/sys/dev/virtio/viocon.c diff -u src/sys/dev/virtio/viocon.c:1.5 src/sys/dev/virtio/viocon.c:1.5.4.1 --- src/sys/dev/virtio/viocon.c:1.5 Sat Aug 13 17:31:32 2022 +++ src/sys/dev/virtio/viocon.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: viocon.c,v 1.5 2022/08/13 17:31:32 riastradh Exp $ */ +/* $NetBSD: viocon.c,v 1.5.4.1 2023/05/13 10:56:10 martin Exp $ */ /* $OpenBSD: viocon.c,v 1.8 2021/11/05 11:38:29 mpi Exp $ */ /* @@ -18,7 +18,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: viocon.c,v 1.5 2022/08/13 17:31:32 riastradh Exp $"); +__KERNEL_RCSID(0, "$NetBSD: viocon.c,v 1.5.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/types.h> @@ -123,6 +123,9 @@ struct viocon_softc { struct device *sc_dev; struct virtio_softc *sc_virtio; struct virtqueue *sc_vqs; +#define VIOCON_PORT_RX 0 +#define VIOCON_PORT_TX 1 +#define VIOCON_PORT_NQS 2 struct virtqueue *sc_c_vq_rx; struct virtqueue *sc_c_vq_tx; @@ -194,6 +197,7 @@ viocon_attach(struct device *parent, str struct viocon_softc *sc = device_private(self); struct virtio_softc *vsc = device_private(parent); int maxports = 1; + size_t nvqs; sc->sc_dev = self; if (virtio_child(vsc) != NULL) { @@ -203,16 +207,15 @@ viocon_attach(struct device *parent, str } sc->sc_virtio = vsc; sc->sc_max_ports = maxports; + nvqs = VIOCON_PORT_NQS * maxports; - sc->sc_vqs = kmem_zalloc(2 * (maxports + 1) * sizeof(sc->sc_vqs[0]), + sc->sc_vqs = kmem_zalloc(nvqs * sizeof(sc->sc_vqs[0]), KM_SLEEP); sc->sc_ports = kmem_zalloc(maxports * sizeof(sc->sc_ports[0]), KM_SLEEP); - virtio_child_attach_start(vsc, self, IPL_TTY, sc->sc_vqs, - /*config_change*/NULL, virtio_vq_intr, - /*req_flags*/0, /*req_features*/VIRTIO_CONSOLE_F_SIZE, - VIRTIO_CONSOLE_FLAG_BITS); + virtio_child_attach_start(vsc, self, IPL_TTY, + /*req_features*/VIRTIO_CONSOLE_F_SIZE, VIRTIO_CONSOLE_FLAG_BITS); DPRINTF("%s: softc: %p\n", __func__, sc); if (viocon_port_create(sc, 0) != 0) { @@ -221,12 +224,13 @@ viocon_attach(struct device *parent, str } viocon_rx_fill(sc->sc_ports[0]); - if (virtio_child_attach_finish(vsc) != 0) + if (virtio_child_attach_finish(vsc, sc->sc_vqs, nvqs, + /*config_change*/NULL, /*req_flags*/0) != 0) goto err; return; err: - kmem_free(sc->sc_vqs, 2 * (maxports + 1) * sizeof(sc->sc_vqs[0])); + kmem_free(sc->sc_vqs, nvqs * sizeof(sc->sc_vqs[0])); kmem_free(sc->sc_ports, maxports * sizeof(sc->sc_ports[0])); virtio_child_attach_failed(vsc); } @@ -248,31 +252,30 @@ viocon_port_create(struct viocon_softc * vp->vp_sc = sc; DPRINTF("%s: vp: %p\n", __func__, vp); - if (portidx == 0) - rxidx = 0; - else - rxidx = 2 * (portidx + 1); - txidx = rxidx + 1; + rxidx = (portidx * VIOCON_PORT_NQS) + VIOCON_PORT_RX; + txidx = (portidx * VIOCON_PORT_NQS) + VIOCON_PORT_TX; snprintf(name, sizeof(name), "p%drx", portidx); - if (virtio_alloc_vq(vsc, &sc->sc_vqs[rxidx], rxidx, BUFSIZE, 1, + virtio_init_vq_vqdone(vsc, &sc->sc_vqs[rxidx], rxidx, + viocon_rx_intr); + if (virtio_alloc_vq(vsc, &sc->sc_vqs[rxidx], BUFSIZE, 1, name) != 0) { printf("\nCan't alloc %s virtqueue\n", name); goto err; } vp->vp_rx = &sc->sc_vqs[rxidx]; - vp->vp_rx->vq_done = viocon_rx_intr; vp->vp_si = softint_establish(SOFTINT_SERIAL, viocon_rx_soft, vp); DPRINTF("%s: rx: %p\n", __func__, vp->vp_rx); snprintf(name, sizeof(name), "p%dtx", portidx); - if (virtio_alloc_vq(vsc, &sc->sc_vqs[txidx], txidx, BUFSIZE, 1, + virtio_init_vq_vqdone(vsc, &sc->sc_vqs[txidx], txidx, + viocon_tx_intr); + if (virtio_alloc_vq(vsc, &sc->sc_vqs[txidx], BUFSIZE, 1, name) != 0) { printf("\nCan't alloc %s virtqueue\n", name); goto err; } vp->vp_tx = &sc->sc_vqs[txidx]; - vp->vp_tx->vq_done = viocon_tx_intr; DPRINTF("%s: tx: %p\n", __func__, vp->vp_tx); allocsize = (vp->vp_rx->vq_num + vp->vp_tx->vq_num) * BUFSIZE; Index: src/sys/dev/virtio/virtio_mmio.c diff -u src/sys/dev/virtio/virtio_mmio.c:1.7 src/sys/dev/virtio/virtio_mmio.c:1.7.4.1 --- src/sys/dev/virtio/virtio_mmio.c:1.7 Fri Oct 22 02:57:23 2021 +++ src/sys/dev/virtio/virtio_mmio.c Sat May 13 10:56:10 2023 @@ -1,4 +1,4 @@ -/* $NetBSD: virtio_mmio.c,v 1.7 2021/10/22 02:57:23 yamaguchi Exp $ */ +/* $NetBSD: virtio_mmio.c,v 1.7.4.1 2023/05/13 10:56:10 martin Exp $ */ /* $OpenBSD: virtio_mmio.c,v 1.2 2017/02/24 17:12:31 patrick Exp $ */ /* @@ -29,7 +29,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: virtio_mmio.c,v 1.7 2021/10/22 02:57:23 yamaguchi Exp $"); +__KERNEL_RCSID(0, "$NetBSD: virtio_mmio.c,v 1.7.4.1 2023/05/13 10:56:10 martin Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -213,12 +213,11 @@ virtio_mmio_common_detach(struct virtio_ struct virtio_softc *vsc = &sc->sc_sc; int r; - if (vsc->sc_child != NULL && vsc->sc_child != VIRTIO_CHILD_FAILED) { - r = config_detach(vsc->sc_child, flags); - if (r) - return r; - } - KASSERT(vsc->sc_child == NULL || vsc->sc_child == VIRTIO_CHILD_FAILED); + r = config_detach_children(vsc->sc_dev, flags); + if (r != 0) + return r; + + KASSERT(ISSET(vsc->sc_child_flags, VIRTIO_CHILD_DETACHED)); KASSERT(vsc->sc_vqs == NULL); KASSERT(sc->sc_ih == NULL);