Putting this out here to ask for feedback for the approach. Better viewed with -w.
Treat target path "/dev/null" in a special way to allow skipping (writes) for restore. Alternative would be to add a new possible option 'skip' to the map line, and re-work the handling of 'path'. A valid line could then look like skip=1:0:drive-scsi0= Another alternative would be to introduce a new type of line altogether, e.g. skip=drive-scsi0 Since in PVE, most archives are compressed and piped to vma for restore, it's not easily possible to skip reads (or is there a good way to do that?) For the reader, a new skip flag for VmaRestoreState is added and the target is allowed to be NULL if skip is specified when registering. If the skip flag is set, no writes will be made as well as no check for duplicate clusters. Therefore, the flag is not set for verify. I tought about allowing target=NULL directly without adding a flag and checking for that, but in the end, the flag seemed a bit more robust. Signed-off-by: Fabian Ebner <f.eb...@proxmox.com> --- vma-reader.c | 64 +++++++++++++++++++++++++++++----------------------- vma.c | 51 ++++++++++++++++++++++------------------- vma.h | 2 +- 3 files changed, 65 insertions(+), 52 deletions(-) diff --git a/vma-reader.c b/vma-reader.c index 2b1d1cdab3..3d53bfc27d 100644 --- a/vma-reader.c +++ b/vma-reader.c @@ -29,6 +29,7 @@ typedef struct VmaRestoreState { bool write_zeroes; unsigned long *bitmap; int bitmap_size; + bool skip; } VmaRestoreState; struct VmaReader { @@ -423,13 +424,14 @@ VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id) } static void allocate_rstate(VmaReader *vmar, guint8 dev_id, - BlockBackend *target, bool write_zeroes) + BlockBackend *target, bool write_zeroes, bool skip) { assert(vmar); assert(dev_id); vmar->rstate[dev_id].target = target; vmar->rstate[dev_id].write_zeroes = write_zeroes; + vmar->rstate[dev_id].skip = skip; int64_t size = vmar->devinfo[dev_id].size; @@ -444,28 +446,30 @@ static void allocate_rstate(VmaReader *vmar, guint8 dev_id, } int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target, - bool write_zeroes, Error **errp) + bool write_zeroes, bool skip, Error **errp) { assert(vmar); - assert(target != NULL); + assert(target != NULL || skip); assert(dev_id); - assert(vmar->rstate[dev_id].target == NULL); - - int64_t size = blk_getlength(target); - int64_t size_diff = size - vmar->devinfo[dev_id].size; - - /* storage types can have different size restrictions, so it - * is not always possible to create an image with exact size. - * So we tolerate a size difference up to 4MB. - */ - if ((size_diff < 0) || (size_diff > 4*1024*1024)) { - error_setg(errp, "vma_reader_register_bs for stream %s failed - " - "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname, - size, vmar->devinfo[dev_id].size); - return -1; + assert(vmar->rstate[dev_id].target == NULL && !vmar->rstate[dev_id].skip); + + if (target != NULL) { + int64_t size = blk_getlength(target); + int64_t size_diff = size - vmar->devinfo[dev_id].size; + + /* storage types can have different size restrictions, so it + * is not always possible to create an image with exact size. + * So we tolerate a size difference up to 4MB. + */ + if ((size_diff < 0) || (size_diff > 4*1024*1024)) { + error_setg(errp, "vma_reader_register_bs for stream %s failed - " + "unexpected size %zd != %zd", vmar->devinfo[dev_id].devname, + size, vmar->devinfo[dev_id].size); + return -1; + } } - allocate_rstate(vmar, dev_id, target, write_zeroes); + allocate_rstate(vmar, dev_id, target, write_zeroes, skip); return 0; } @@ -558,19 +562,23 @@ static int restore_extent(VmaReader *vmar, unsigned char *buf, VmaRestoreState *rstate = &vmar->rstate[dev_id]; BlockBackend *target = NULL; + bool skip = rstate->skip; + if (dev_id != vmar->vmstate_stream) { target = rstate->target; - if (!verify && !target) { + if (!verify && !target && !skip) { error_setg(errp, "got wrong dev id %d", dev_id); return -1; } - if (vma_reader_get_bitmap(rstate, cluster_num)) { - error_setg(errp, "found duplicated cluster %zd for stream %s", - cluster_num, vmar->devinfo[dev_id].devname); - return -1; + if (!skip) { + if (vma_reader_get_bitmap(rstate, cluster_num)) { + error_setg(errp, "found duplicated cluster %zd for stream %s", + cluster_num, vmar->devinfo[dev_id].devname); + return -1; + } + vma_reader_set_bitmap(rstate, cluster_num, 1); } - vma_reader_set_bitmap(rstate, cluster_num, 1); max_sector = vmar->devinfo[dev_id].size/BDRV_SECTOR_SIZE; } else { @@ -616,7 +624,7 @@ static int restore_extent(VmaReader *vmar, unsigned char *buf, return -1; } - if (!verify) { + if (!verify && !skip) { int nb_sectors = end_sector - sector_num; if (restore_write_data(vmar, dev_id, target, vmstate_fd, buf + start, sector_num, nb_sectors, @@ -652,7 +660,7 @@ static int restore_extent(VmaReader *vmar, unsigned char *buf, return -1; } - if (!verify) { + if (!verify && !skip) { int nb_sectors = end_sector - sector_num; if (restore_write_data(vmar, dev_id, target, vmstate_fd, buf + start, sector_num, @@ -677,7 +685,7 @@ static int restore_extent(VmaReader *vmar, unsigned char *buf, vmar->partial_zero_cluster_data += zero_size; } - if (rstate->write_zeroes && !verify) { + if (rstate->write_zeroes && !verify && !skip) { if (restore_write_data(vmar, dev_id, target, vmstate_fd, zero_vma_block, sector_num, nb_sectors, errp) < 0) { @@ -848,7 +856,7 @@ int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp) for (dev_id = 1; dev_id < 255; dev_id++) { if (vma_reader_get_device_info(vmar, dev_id)) { - allocate_rstate(vmar, dev_id, NULL, false); + allocate_rstate(vmar, dev_id, NULL, false, false); } } diff --git a/vma.c b/vma.c index df542b7732..3875d82318 100644 --- a/vma.c +++ b/vma.c @@ -274,6 +274,7 @@ static int extract_content(int argc, char **argv) const char *path; bool write_zero; + bool skip; if (line[0] == '0' && line[1] == ':') { path = line + 2; write_zero = false; @@ -382,36 +383,40 @@ static int extract_content(int argc, char **argv) g_error("invalid cache option: %s\n", cache); } - if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) { - g_error("can't open file %s - %s", devfn, - error_get_pretty(errp)); - } - - if (cache) { - blk_set_enable_write_cache(blk, !writethrough); - } + if (strncmp(devfn, "/dev/null", 9) == 0) { + blk = NULL; + } else { + if (errp || !(blk = blk_new_open(devfn, NULL, options, flags, &errp))) { + g_error("can't open file %s - %s", devfn, + error_get_pretty(errp)); + } - if (throttling_group) { - blk_io_limits_enable(blk, throttling_group); - } + if (cache) { + blk_set_enable_write_cache(blk, !writethrough); + } - if (throttling_bps) { - if (!throttling_group) { - blk_io_limits_enable(blk, devfn); + if (throttling_group) { + blk_io_limits_enable(blk, throttling_group); } - ThrottleConfig cfg; - throttle_config_init(&cfg); - cfg.buckets[THROTTLE_BPS_WRITE].avg = throttling_bps; - Error *err = NULL; - if (!throttle_is_valid(&cfg, &err)) { - error_report_err(err); - g_error("failed to apply throttling"); + if (throttling_bps) { + if (!throttling_group) { + blk_io_limits_enable(blk, devfn); + } + + ThrottleConfig cfg; + throttle_config_init(&cfg); + cfg.buckets[THROTTLE_BPS_WRITE].avg = throttling_bps; + Error *err = NULL; + if (!throttle_is_valid(&cfg, &err)) { + error_report_err(err); + g_error("failed to apply throttling"); + } + blk_set_io_limits(blk, &cfg); } - blk_set_io_limits(blk, &cfg); } - if (vma_reader_register_bs(vmar, i, blk, write_zero, &errp) < 0) { + if (vma_reader_register_bs(vmar, i, blk, write_zero, blk == NULL, &errp) < 0) { g_error("%s", error_get_pretty(errp)); } diff --git a/vma.h b/vma.h index c895c97f6d..1b62859165 100644 --- a/vma.h +++ b/vma.h @@ -142,7 +142,7 @@ GList *vma_reader_get_config_data(VmaReader *vmar); VmaDeviceInfo *vma_reader_get_device_info(VmaReader *vmar, guint8 dev_id); int vma_reader_register_bs(VmaReader *vmar, guint8 dev_id, BlockBackend *target, bool write_zeroes, - Error **errp); + bool skip, Error **errp); int vma_reader_restore(VmaReader *vmar, int vmstate_fd, bool verbose, Error **errp); int vma_reader_verify(VmaReader *vmar, bool verbose, Error **errp); -- 2.30.2 _______________________________________________ pve-devel mailing list pve-devel@lists.proxmox.com https://lists.proxmox.com/cgi-bin/mailman/listinfo/pve-devel