On 4/28/25 10:14, vrms wrote:
hei jed,

that sounds slick. thx for the pointer.

out of interest ...

1. how do you handle snapshots for VMs?
    I guess you have the choice between btrfs/zfs and qemu/virsh snapshots on the images to your avail with a setup as yours

Since ZFS snapshotting is so fast, I rely on it. Occasionally I will do overlays, but I've never relied on qcow2 snapshots. I don't run btrfs so I shouldn't give advice on that.


2. is there anything else then the hd images inside those subvolumes?

Libvirtd stores the domain xml files in /var/lib/libvirtd somewhere, and if you need to back up those settings you can copy them. I have not run that on ZFS because that's bending the OS into a hard to recover combination of file systems. Likewise, I avoid installing VMs into the default libvirtd storage area /var/lib/libvirtd/images. I suggest creating some scripts that can ease the domain pool storage creation to help out with that. Below are some example bash functions I use:

function addpool() {
   [ -z "$1" ] && echo "Whaz da poo name?" && return
   local longname="$PWD/$1"
   sudo virsh pool-define-as --name "$1" --type dir --target "$longname" --source-path 
"$longname"
   sudo virsh pool-start "$1"
   sudo virsh pool-autostart "$1"
}

function NewVmOneDisk() {
    set -o pipefail
    # set -x
    local L="/VMs"
    local V="cholla6-vms${L}"
    cd $L
    local recent=`ls -1d l_45* | awk -F'[_-]' '{print $2}' | sort | tail -1`
    local new_ser=$(( recent + 1 ))
    #echo "New ser: $new_ser"
    read -p "[$new_ser] One word name: " name
    if [[ x$name == x ]]; then
        echo "No name, bye."
        return 1
    fi
    local vmname="${new_ser}-${name}"
    local fqname="${V}/l_${vmname}"
    local clustersz="32K"
    local qi_opts=("create" "-f" "qcow2"
        "-o" "cluster_size=$clustersz"
        "-o" "preallocation=metadata"
        "-o" "lazy_refcounts=on"
        "-o" "extended_l2=on"
    )
    sudo zfs create $fqname
    sudo zfs set recordsize=$clustersz $fqname
    sudo zfs set compression=zstd-5 $fqname
    sudo chown jreynolds: l_$vmname
    cd l_$vmname || {
        echo "Unable to enter directory l_$vmname"
        return 1
    }

    sudo modprobe nbd
    local nb="/dev/nbd2"

    sudo modprobe nbd
    qemu-img ${qi_opts[@]} ${vmname}-00-root.qcow2 60G
    sudo qemu-nbd -c $nb ${vmname}-00-root.qcow2 || {
        echo "Unable to mount ${vmname}-00-root.qcow2 -> $nb, bye"
        set +x
        return 1
    }
    sudo partprobe $nb
    sudo parted -a optimal $nb -s \
        mklabel msdos -- \
        mkpart primary fat32   1M  2M \
        mkpart primary fat32   2M  256M \
        mkpart primary ext4  256M 1280M \
        mkpart primary ext4 1280M   -1s
    sync
    sudo partprobe $nb
    # this changes type 83 (ext) to 8e (lvm)
    sudo sfdisk --change-id $nb 2 ef
    sudo sfdisk --change-id $nb 4 8e
    sudo fdisk -l $nb
    sleep 5
    local vgname=`date +%H%M`
    vgname="ctvg_$vgname"
    sudo mkfs.vfat            ${nb}p1
    sudo mkfs.vfat -n EFI     ${nb}p2
    sudo mkfs.ext4 -L BOOT    ${nb}p3
    sudo pvcreate             ${nb}p4
    sudo vgcreate --pvmetadatacopies 2 -y $vgname ${nb}p4
    sudo lvcreate -n  lv_swap -L  2G $vgname
    sudo lvcreate -n  lv_root -L 20G $vgname
    sudo lvcreate -n  lv_home -L  2G $vgname
    sudo mkswap    -L SWAP /dev/mapper/${vgname}-lv_swap
    sudo mkfs.ext4 -L ROOT /dev/mapper/${vgname}-lv_root
    sudo mkfs.ext4 -L HOME /dev/mapper/${vgname}-lv_home
    sync
    sudo umount /dev/mapper/${vgname}-lv_swap
    sudo umount /dev/mapper/${vgname}-lv_root
    sudo umount /dev/mapper/${vgname}-lv_home
    sudo vgchange -a n $vgname
    sudo qemu-nbd -d $nb

    cd ..
    addpool l_$vmname
    echo "...done"
    set +x
} # ~NewVmOneDisk

--
Jed Reynolds -- Sr Software Developer and Sysadmin
Candela Technologies, Washington USA PST GMT-8
Please CC:supp...@candelatech.com on support topics.

Reply via email to