The branch stable/14 has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=20fb1e32a23175b5c555e32a36897783838a9f30

commit 20fb1e32a23175b5c555e32a36897783838a9f30
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2025-07-02 13:08:56 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2025-07-09 00:28:28 +0000

    md(4): move type-specific data under union
    
    (cherry picked from commit e91022168101867664e08b4de1f3f09ed4af231c)
---
 sys/dev/md/md.c | 263 +++++++++++++++++++++++++++++++++-----------------------
 1 file changed, 155 insertions(+), 108 deletions(-)

diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c
index 27cf401234ac..e89c5e858c32 100644
--- a/sys/dev/md/md.c
+++ b/sys/dev/md/md.c
@@ -266,25 +266,40 @@ struct md_s {
        struct g_provider *pp;
        int (*start)(struct md_s *sc, struct bio *bp);
        struct devstat *devstat;
-       bool candelete;
-
-       /* MD_MALLOC related fields */
-       struct indir *indir;
-       uma_zone_t uma;
-
-       /* MD_PRELOAD related fields */
-       u_char *pl_ptr;
-       size_t pl_len;
-
-       /* MD_VNODE related fields */
-       struct vnode *vnode;
-       char file[PATH_MAX];
-       char label[PATH_MAX];
        struct ucred *cred;
-       vm_offset_t kva;
+       char label[PATH_MAX];
+       bool candelete;
 
-       /* MD_SWAP related fields */
-       vm_object_t object;
+       union {
+               /* MD_MALLOC related fields */
+               struct {
+                       struct indir *indir;
+                       uma_zone_t uma;
+               } s_malloc;
+
+               /* MD_PRELOAD related fields */
+               struct {
+                       u_char *pl_ptr;
+                       size_t pl_len;
+                       char name[PATH_MAX];
+               } s_preload;
+
+               /* MD_VNODE related fields */
+               struct {
+                       struct vnode *vnode;
+                       char file[PATH_MAX];
+                       vm_offset_t kva;
+               } s_vnode;
+
+               /* MD_SWAP related fields */
+               struct {
+                       vm_object_t object;
+               } s_swap;
+
+               /* MD_NULL */
+               struct {
+               } s_null;
+       };
 };
 
 static struct indir *
@@ -326,7 +341,7 @@ destroy_indir(struct md_s *sc, struct indir *ip)
                if (ip->shift)
                        destroy_indir(sc, (struct indir*)(ip->array[i]));
                else if (ip->array[i] > 255)
-                       uma_zfree(sc->uma, (void *)(ip->array[i]));
+                       uma_zfree(sc->s_malloc.uma, (void *)(ip->array[i]));
        }
        del_indir(ip);
 }
@@ -677,10 +692,10 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
        secno = bp->bio_offset / sc->sectorsize;
        error = 0;
        while (nsec--) {
-               osp = s_read(sc->indir, secno);
+               osp = s_read(sc->s_malloc.indir, secno);
                if (bp->bio_cmd == BIO_DELETE) {
                        if (osp != 0)
-                               error = s_write(sc->indir, secno, 0);
+                               error = s_write(sc->s_malloc.indir, secno, 0);
                } else if (bp->bio_cmd == BIO_READ) {
                        if (osp == 0) {
                                if (notmapped) {
@@ -745,10 +760,12 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
                        }
                        if (i == sc->sectorsize) {
                                if (osp != uc)
-                                       error = s_write(sc->indir, secno, uc);
+                                       error = s_write(sc->s_malloc.indir,
+                                           secno, uc);
                        } else {
                                if (osp <= 255) {
-                                       sp = (uintptr_t)uma_zalloc(sc->uma,
+                                       sp = (uintptr_t)uma_zalloc(
+                                           sc->s_malloc.uma,
                                            md_malloc_wait ? M_WAITOK :
                                            M_NOWAIT);
                                        if (sp == 0) {
@@ -769,7 +786,8 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
                                                bcopy(dst, (void *)sp,
                                                    sc->sectorsize);
                                        }
-                                       error = s_write(sc->indir, secno, sp);
+                                       error = s_write(sc->s_malloc.indir,
+                                           secno, sp);
                                } else {
                                        if (notmapped) {
                                                error = md_malloc_move_ma(&m,
@@ -792,7 +810,7 @@ mdstart_malloc(struct md_s *sc, struct bio *bp)
                        error = EOPNOTSUPP;
                }
                if (osp > 255)
-                       uma_zfree(sc->uma, (void*)osp);
+                       uma_zfree(sc->s_malloc.uma, (void*)osp);
                if (error != 0)
                        break;
                secno++;
@@ -850,7 +868,7 @@ mdstart_preload(struct md_s *sc, struct bio *bp)
 {
        uint8_t *p;
 
-       p = sc->pl_ptr + bp->bio_offset;
+       p = sc->s_preload.pl_ptr + bp->bio_offset;
        switch (bp->bio_cmd) {
        case BIO_READ:
                if ((bp->bio_flags & BIO_VLIST) != 0) {
@@ -890,7 +908,7 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
        bool mapped;
 
        td = curthread;
-       vp = sc->vnode;
+       vp = sc->s_vnode.vnode;
        piov = NULL;
        ma_offs = bp->bio_ma_offset;
        off = bp->bio_offset;
@@ -924,8 +942,8 @@ mdstart_vnode(struct md_s *sc, struct bio *bp)
        case BIO_DELETE:
                if (sc->candelete) {
                        error = vn_deallocate(vp, &off, &len, 0,
-                           sc->flags & MD_ASYNC ? 0 : IO_SYNC, sc->cred,
-                           NOCRED);
+                           sc->flags & MD_ASYNC ? 0 : IO_SYNC,
+                           sc->cred, NOCRED);
                        bp->bio_resid = len;
                        return (error);
                }
@@ -965,8 +983,10 @@ unmapped_step:
                KASSERT(iolen > 0, ("zero iolen"));
                KASSERT(npages <= atop(maxphys + PAGE_SIZE),
                    ("npages %d too large", npages));
-               pmap_qenter(sc->kva, &bp->bio_ma[atop(ma_offs)], npages);
-               aiov.iov_base = (void *)(sc->kva + (ma_offs & PAGE_MASK));
+               pmap_qenter(sc->s_vnode.kva, &bp->bio_ma[atop(ma_offs)],
+                   npages);
+               aiov.iov_base = (void *)(sc->s_vnode.kva + (ma_offs &
+                   PAGE_MASK));
                aiov.iov_len = iolen;
                auio.uio_iov = &aiov;
                auio.uio_iovcnt = 1;
@@ -1000,7 +1020,7 @@ unmapped_step:
                    POSIX_FADV_DONTNEED);
 
        if (mapped) {
-               pmap_qremove(sc->kva, npages);
+               pmap_qremove(sc->s_vnode.kva, npages);
                if (error == 0) {
                        len -= iolen;
                        bp->bio_resid -= iolen;
@@ -1054,20 +1074,21 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
        lastend = (bp->bio_offset + bp->bio_length - 1) % PAGE_SIZE + 1;
 
        rv = VM_PAGER_OK;
-       vm_object_pip_add(sc->object, 1);
+       vm_object_pip_add(sc->s_swap.object, 1);
        for (i = bp->bio_offset / PAGE_SIZE; i <= lastp; i++) {
                len = ((i == lastp) ? lastend : PAGE_SIZE) - offs;
-               m = vm_page_grab_unlocked(sc->object, i, VM_ALLOC_SYSTEM);
+               m = vm_page_grab_unlocked(sc->s_swap.object, i,
+                   VM_ALLOC_SYSTEM);
                if (bp->bio_cmd == BIO_READ) {
                        if (vm_page_all_valid(m))
                                rv = VM_PAGER_OK;
                        else
-                               rv = vm_pager_get_pages(sc->object, &m, 1,
-                                   NULL, NULL);
+                               rv = vm_pager_get_pages(sc->s_swap.object,
+                                   &m, 1, NULL, NULL);
                        if (rv == VM_PAGER_ERROR) {
-                               VM_OBJECT_WLOCK(sc->object);
+                               VM_OBJECT_WLOCK(sc->s_swap.object);
                                vm_page_free(m);
-                               VM_OBJECT_WUNLOCK(sc->object);
+                               VM_OBJECT_WUNLOCK(sc->s_swap.object);
                                break;
                        } else if (rv == VM_PAGER_FAIL) {
                                /*
@@ -1094,12 +1115,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
                        if (len == PAGE_SIZE || vm_page_all_valid(m))
                                rv = VM_PAGER_OK;
                        else
-                               rv = vm_pager_get_pages(sc->object, &m, 1,
-                                   NULL, NULL);
+                               rv = vm_pager_get_pages(sc->s_swap.object,
+                                   &m, 1, NULL, NULL);
                        if (rv == VM_PAGER_ERROR) {
-                               VM_OBJECT_WLOCK(sc->object);
+                               VM_OBJECT_WLOCK(sc->s_swap.object);
                                vm_page_free(m);
-                               VM_OBJECT_WUNLOCK(sc->object);
+                               VM_OBJECT_WUNLOCK(sc->s_swap.object);
                                break;
                        } else if (rv == VM_PAGER_FAIL)
                                pmap_zero_page(m);
@@ -1120,12 +1141,12 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
                        if (len == PAGE_SIZE || vm_page_all_valid(m))
                                rv = VM_PAGER_OK;
                        else
-                               rv = vm_pager_get_pages(sc->object, &m, 1,
-                                   NULL, NULL);
-                       VM_OBJECT_WLOCK(sc->object);
+                               rv = vm_pager_get_pages(sc->s_swap.object,
+                                   &m, 1, NULL, NULL);
+                       VM_OBJECT_WLOCK(sc->s_swap.object);
                        if (rv == VM_PAGER_ERROR) {
                                vm_page_free(m);
-                               VM_OBJECT_WUNLOCK(sc->object);
+                               VM_OBJECT_WUNLOCK(sc->s_swap.object);
                                break;
                        } else if (rv == VM_PAGER_FAIL) {
                                vm_page_free(m);
@@ -1141,7 +1162,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
                                        m = NULL;
                                }
                        }
-                       VM_OBJECT_WUNLOCK(sc->object);
+                       VM_OBJECT_WUNLOCK(sc->s_swap.object);
                }
                if (m != NULL) {
                        /*
@@ -1161,7 +1182,7 @@ mdstart_swap(struct md_s *sc, struct bio *bp)
                offs = 0;
                ma_offs += len;
        }
-       vm_object_pip_wakeup(sc->object);
+       vm_object_pip_wakeup(sc->s_swap.object);
        return (rv != VM_PAGER_ERROR ? 0 : ENOSPC);
 }
 
@@ -1356,18 +1377,18 @@ mdcreate_malloc(struct md_s *sc, struct md_req *mdr)
        if (mdr->md_fwheads != 0)
                sc->fwheads = mdr->md_fwheads;
        sc->flags = mdr->md_options & (MD_COMPRESS | MD_FORCE | MD_RESERVE);
-       sc->indir = dimension(sc->mediasize / sc->sectorsize);
-       sc->uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL, NULL, NULL,
-           0x1ff, 0);
+       sc->s_malloc.indir = dimension(sc->mediasize / sc->sectorsize);
+       sc->s_malloc.uma = uma_zcreate(sc->name, sc->sectorsize, NULL, NULL,
+           NULL, NULL, 0x1ff, 0);
        if (mdr->md_options & MD_RESERVE) {
                off_t nsectors;
 
                nsectors = sc->mediasize / sc->sectorsize;
                for (u = 0; u < nsectors; u++) {
-                       sp = (uintptr_t)uma_zalloc(sc->uma, (md_malloc_wait ?
-                           M_WAITOK : M_NOWAIT) | M_ZERO);
+                       sp = (uintptr_t)uma_zalloc(sc->s_malloc.uma,
+                           (md_malloc_wait ? M_WAITOK : M_NOWAIT) | M_ZERO);
                        if (sp != 0)
-                               error = s_write(sc->indir, u, sp);
+                               error = s_write(sc->s_malloc.indir, u, sp);
                        else
                                error = ENOMEM;
                        if (error != 0)
@@ -1395,7 +1416,7 @@ mdsetcred(struct md_s *sc, struct ucred *cred)
         * Horrible kludge to establish credentials for NFS  XXX.
         */
 
-       if (sc->vnode) {
+       if (sc->type == MD_VNODE && sc->s_vnode.vnode != NULL) {
                struct uio auio;
                struct iovec aiov;
 
@@ -1410,9 +1431,9 @@ mdsetcred(struct md_s *sc, struct ucred *cred)
                auio.uio_rw = UIO_READ;
                auio.uio_segflg = UIO_SYSSPACE;
                auio.uio_resid = aiov.iov_len;
-               vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
-               error = VOP_READ(sc->vnode, &auio, 0, sc->cred);
-               VOP_UNLOCK(sc->vnode);
+               vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
+               error = VOP_READ(sc->s_vnode.vnode, &auio, 0, sc->cred);
+               VOP_UNLOCK(sc->s_vnode.vnode);
                free(tmpbuf, M_TEMP);
        }
        return (error);
@@ -1429,11 +1450,12 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, 
struct thread *td)
 
        fname = mdr->md_file;
        if (mdr->md_file_seg == UIO_USERSPACE) {
-               error = copyinstr(fname, sc->file, sizeof(sc->file), NULL);
+               error = copyinstr(fname, sc->s_vnode.file,
+                   sizeof(sc->s_vnode.file), NULL);
                if (error != 0)
                        return (error);
        } else if (mdr->md_file_seg == UIO_SYSSPACE)
-               strlcpy(sc->file, fname, sizeof(sc->file));
+               strlcpy(sc->s_vnode.file, fname, sizeof(sc->s_vnode.file));
        else
                return (EDOOFUS);
 
@@ -1443,7 +1465,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, 
struct thread *td)
         */
        flags = FREAD | ((mdr->md_options & MD_READONLY) ? 0 : FWRITE) \
            | ((mdr->md_options & MD_VERIFY) ? O_VERIFY : 0);
-       NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->file);
+       NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, sc->s_vnode.file);
        error = vn_open(&nd, &flags, 0, NULL);
        if (error != 0)
                return (error);
@@ -1483,17 +1505,17 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, 
struct thread *td)
            MD_VERIFY | MD_MUSTDEALLOC);
        if (!(flags & FWRITE))
                sc->flags |= MD_READONLY;
-       sc->vnode = nd.ni_vp;
+       sc->s_vnode.vnode = nd.ni_vp;
 
        error = mdsetcred(sc, td->td_ucred);
        if (error != 0) {
-               sc->vnode = NULL;
+               sc->s_vnode.vnode = NULL;
                vn_lock(nd.ni_vp, LK_EXCLUSIVE | LK_RETRY);
                nd.ni_vp->v_vflag &= ~VV_MD;
                goto bad;
        }
 
-       sc->kva = kva_alloc(maxphys + PAGE_SIZE);
+       sc->s_vnode.kva = kva_alloc(maxphys + PAGE_SIZE);
        return (0);
 bad:
        VOP_UNLOCK(nd.ni_vp);
@@ -1537,23 +1559,30 @@ mddestroy(struct md_s *sc, struct thread *td)
                msleep(sc->procp, &sc->queue_mtx, PRIBIO, "mddestroy", hz / 10);
        mtx_unlock(&sc->queue_mtx);
        mtx_destroy(&sc->queue_mtx);
-       if (sc->vnode != NULL) {
-               vn_lock(sc->vnode, LK_EXCLUSIVE | LK_RETRY);
-               sc->vnode->v_vflag &= ~VV_MD;
-               VOP_UNLOCK(sc->vnode);
-               (void)vn_close(sc->vnode, sc->flags & MD_READONLY ?
+       switch (sc->type) {
+       case MD_VNODE:
+               vn_lock(sc->s_vnode.vnode, LK_EXCLUSIVE | LK_RETRY);
+               sc->s_vnode.vnode->v_vflag &= ~VV_MD;
+               VOP_UNLOCK(sc->s_vnode.vnode);
+               (void)vn_close(sc->s_vnode.vnode, sc->flags & MD_READONLY ?
                    FREAD : (FREAD|FWRITE), sc->cred, td);
+               kva_free(sc->s_vnode.kva, maxphys + PAGE_SIZE);
+               break;
+       case MD_SWAP:
+               vm_object_deallocate(sc->s_swap.object);
+               break;
+       case MD_MALLOC:
+               destroy_indir(sc, sc->s_malloc.indir);
+               uma_zdestroy(sc->s_malloc.uma);
+               break;
+       case MD_PRELOAD:
+       case MD_NULL:
+               break;
+       default:
+               __assert_unreachable();
        }
        if (sc->cred != NULL)
                crfree(sc->cred);
-       if (sc->object != NULL)
-               vm_object_deallocate(sc->object);
-       if (sc->indir)
-               destroy_indir(sc, sc->indir);
-       if (sc->uma)
-               uma_zdestroy(sc->uma);
-       if (sc->kva)
-               kva_free(sc->kva, maxphys + PAGE_SIZE);
 
        LIST_REMOVE(sc, list);
        free_unr(md_uh, sc->unit);
@@ -1578,13 +1607,14 @@ mdresize(struct md_s *sc, struct md_req *mdr)
                oldpages = OFF_TO_IDX(sc->mediasize);
                newpages = OFF_TO_IDX(mdr->md_mediasize);
                if (newpages < oldpages) {
-                       VM_OBJECT_WLOCK(sc->object);
-                       vm_object_page_remove(sc->object, newpages, 0, 0);
+                       VM_OBJECT_WLOCK(sc->s_swap.object);
+                       vm_object_page_remove(sc->s_swap.object, newpages,
+                           0, 0);
                        swap_release_by_cred(IDX_TO_OFF(oldpages -
                            newpages), sc->cred);
-                       sc->object->charge = IDX_TO_OFF(newpages);
-                       sc->object->size = newpages;
-                       VM_OBJECT_WUNLOCK(sc->object);
+                       sc->s_swap.object->charge = IDX_TO_OFF(newpages);
+                       sc->s_swap.object->size = newpages;
+                       VM_OBJECT_WUNLOCK(sc->s_swap.object);
                } else if (newpages > oldpages) {
                        res = swap_reserve_by_cred(IDX_TO_OFF(newpages -
                            oldpages), sc->cred);
@@ -1592,7 +1622,7 @@ mdresize(struct md_s *sc, struct md_req *mdr)
                                return (ENOMEM);
                        if ((mdr->md_options & MD_RESERVE) ||
                            (sc->flags & MD_RESERVE)) {
-                               error = swap_pager_reserve(sc->object,
+                               error = swap_pager_reserve(sc->s_swap.object,
                                    oldpages, newpages - oldpages);
                                if (error < 0) {
                                        swap_release_by_cred(
@@ -1601,10 +1631,10 @@ mdresize(struct md_s *sc, struct md_req *mdr)
                                        return (EDOM);
                                }
                        }
-                       VM_OBJECT_WLOCK(sc->object);
-                       sc->object->charge = IDX_TO_OFF(newpages);
-                       sc->object->size = newpages;
-                       VM_OBJECT_WUNLOCK(sc->object);
+                       VM_OBJECT_WLOCK(sc->s_swap.object);
+                       sc->s_swap.object->charge = IDX_TO_OFF(newpages);
+                       sc->s_swap.object->size = newpages;
+                       VM_OBJECT_WUNLOCK(sc->s_swap.object);
                }
                break;
        default:
@@ -1645,13 +1675,13 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, 
struct thread *td)
                sc->fwsectors = mdr->md_fwsectors;
        if (mdr->md_fwheads != 0)
                sc->fwheads = mdr->md_fwheads;
-       sc->object = vm_pager_allocate(OBJT_SWAP, NULL, PAGE_SIZE * npage,
-           VM_PROT_DEFAULT, 0, td->td_ucred);
-       if (sc->object == NULL)
+       sc->s_swap.object = vm_pager_allocate(OBJT_SWAP, NULL,
+           PAGE_SIZE * npage, VM_PROT_DEFAULT, 0, td->td_ucred);
+       if (sc->s_swap.object == NULL)
                return (ENOMEM);
        sc->flags = mdr->md_options & (MD_FORCE | MD_RESERVE);
        if (mdr->md_options & MD_RESERVE) {
-               if (swap_pager_reserve(sc->object, 0, npage) < 0) {
+               if (swap_pager_reserve(sc->s_swap.object, 0, npage) < 0) {
                        error = EDOM;
                        goto finish;
                }
@@ -1659,8 +1689,8 @@ mdcreate_swap(struct md_s *sc, struct md_req *mdr, struct 
thread *td)
        error = mdsetcred(sc, td->td_ucred);
  finish:
        if (error != 0) {
-               vm_object_deallocate(sc->object);
-               sc->object = NULL;
+               vm_object_deallocate(sc->s_swap.object);
+               sc->s_swap.object = NULL;
        }
        return (error);
 }
@@ -1858,10 +1888,13 @@ kern_mdquery_locked(struct md_req *mdr)
                if (error != 0)
                        return (error);
        }
-       if (sc->type == MD_VNODE ||
-           (sc->type == MD_PRELOAD && mdr->md_file != NULL))
-               error = copyout(sc->file, mdr->md_file,
-                   strlen(sc->file) + 1);
+       if (sc->type == MD_VNODE) {
+               error = copyout(sc->s_vnode.file, mdr->md_file,
+                   strlen(sc->s_vnode.file) + 1);
+       } else if (sc->type == MD_PRELOAD && mdr->md_file != NULL) {
+               error = copyout(sc->s_preload.name, mdr->md_file,
+                   strlen(sc->s_preload.name) + 1);
+       }
        return (error);
 }
 
@@ -2013,11 +2046,12 @@ md_preloaded(u_char *image, size_t length, const char 
*name)
                return;
        sc->mediasize = length;
        sc->sectorsize = DEV_BSIZE;
-       sc->pl_ptr = image;
-       sc->pl_len = length;
+       sc->s_preload.pl_ptr = image;
+       sc->s_preload.pl_len = length;
        sc->start = mdstart_preload;
        if (name != NULL)
-               strlcpy(sc->file, name, sizeof(sc->file));
+               strlcpy(sc->s_preload.name, name,
+                   sizeof(sc->s_preload.name));
 #ifdef MD_ROOT
        if (sc->unit == 0) {
 #ifndef ROOTDEVNAME
@@ -2129,9 +2163,14 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, 
struct g_geom *gp,
                        sbuf_printf(sb, " fs %ju", (uintmax_t) mp->fwsectors);
                        sbuf_printf(sb, " l %ju", (uintmax_t) mp->mediasize);
                        sbuf_printf(sb, " t %s", type);
-                       if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
-                           (mp->type == MD_PRELOAD && mp->file[0] != '\0'))
-                               sbuf_printf(sb, " file %s", mp->file);
+                       if (mp->type == MD_VNODE &&
+                           mp->s_vnode.vnode != NULL)
+                               sbuf_printf(sb, " file %s", mp->s_vnode.file);
+                       if (mp->type == MD_PRELOAD &&
+                           mp->s_preload.name[0] != '\0') {
+                               sbuf_printf(sb, " file %s",
+                                   mp->s_preload.name);
+                       }
                        sbuf_printf(sb, " label %s", mp->label);
                } else {
                        sbuf_printf(sb, "%s<unit>%d</unit>\n", indent,
@@ -2156,15 +2195,23 @@ g_md_dumpconf(struct sbuf *sb, const char *indent, 
struct g_geom *gp,
                            "read-only");
                        sbuf_printf(sb, "%s<type>%s</type>\n", indent,
                            type);
-                       if ((mp->type == MD_VNODE && mp->vnode != NULL) ||
-                           (mp->type == MD_PRELOAD && mp->file[0] != '\0')) {
+                       if (mp->type == MD_VNODE) {
+                               if (mp->s_vnode.vnode != NULL) {
+                                       sbuf_printf(sb, "%s<file>", indent);
+                                       g_conf_printf_escaped(sb, "%s",
+                                           mp->s_vnode.file);
+                                       sbuf_printf(sb, "</file>\n");
+                               }
+                               sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
+                                   (mp->flags & MD_CACHE) == 0 ? "off": "on");
+                       }
+                       if (mp->type == MD_PRELOAD &&
+                           mp->s_preload.name[0] != '\0') {
                                sbuf_printf(sb, "%s<file>", indent);
-                               g_conf_printf_escaped(sb, "%s", mp->file);
+                               g_conf_printf_escaped(sb, "%s",
+                                   mp->s_preload.name);
                                sbuf_printf(sb, "</file>\n");
                        }
-                       if (mp->type == MD_VNODE)
-                               sbuf_printf(sb, "%s<cache>%s</cache>\n", indent,
-                                   (mp->flags & MD_CACHE) == 0 ? "off": "on");
                        sbuf_printf(sb, "%s<label>", indent);
                        g_conf_printf_escaped(sb, "%s", mp->label);
                        sbuf_printf(sb, "</label>\n");

Reply via email to