On 3/7/25 12:15, Fabiano Rosas wrote:
> From: Steve Sistare <steven.sist...@oracle.com>
> 
> Unlike cpr-reboot mode, cpr-transfer mode cannot save volatile ram blocks
> in the migration stream file and recreate them later, because the physical
> memory for the blocks is pinned and registered for vfio.  Add a blocker
> for volatile ram blocks.
> 
> Also add a blocker for RAM_GUEST_MEMFD.  Preserving guest_memfd may be
> sufficient for CPR, but it has not been tested yet.
> 
> Signed-off-by: Steve Sistare <steven.sist...@oracle.com>
> Reviewed-by: Fabiano Rosas <faro...@suse.de>
> Reviewed-by: Peter Xu <pet...@redhat.com>
> Reviewed-by: David Hildenbrand <da...@redhat.com>
> Message-ID: <1740667681-257312-1-git-send-email-steven.sist...@oracle.com>
> Signed-off-by: Fabiano Rosas <faro...@suse.de>
> ---
>  include/exec/memory.h   |  3 ++
>  include/exec/ramblock.h |  1 +
>  migration/savevm.c      |  2 ++
>  system/physmem.c        | 66 +++++++++++++++++++++++++++++++++++++++++
>  4 files changed, 72 insertions(+)
This patch breaks booting an SNP guest as it triggers the following
assert:

qemu-system-x86_64: ../util/error.c:68: error_setv: Assertion `*errp == NULL' 
failed.

I tracked it to the err_setg() call in ram_block_add_cpr_blocker().
It looks like the error message is unable to be printed because
rb->cpr_blocker is NULL.

Adding aux-ram-share=on to the -machine object gets me past the error and
therefore the assertion, but isn't that an incompatible change to how an
SNP guest has to be started?

Thanks,
Tom

> 
> diff --git a/include/exec/memory.h b/include/exec/memory.h
> index 78c4e0aec8..d09af58c97 100644
> --- a/include/exec/memory.h
> +++ b/include/exec/memory.h
> @@ -3203,6 +3203,9 @@ bool ram_block_discard_is_disabled(void);
>   */
>  bool ram_block_discard_is_required(void);
>  
> +void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp);
> +void ram_block_del_cpr_blocker(RAMBlock *rb);
> +
>  #endif
>  
>  #endif
> diff --git a/include/exec/ramblock.h b/include/exec/ramblock.h
> index 0babd105c0..64484cd821 100644
> --- a/include/exec/ramblock.h
> +++ b/include/exec/ramblock.h
> @@ -39,6 +39,7 @@ struct RAMBlock {
>      /* RCU-enabled, writes protected by the ramlist lock */
>      QLIST_ENTRY(RAMBlock) next;
>      QLIST_HEAD(, RAMBlockNotifier) ramblock_notifiers;
> +    Error *cpr_blocker;
>      int fd;
>      uint64_t fd_offset;
>      int guest_memfd;
> diff --git a/migration/savevm.c b/migration/savevm.c
> index 5c4fdfd95e..ce158c3512 100644
> --- a/migration/savevm.c
> +++ b/migration/savevm.c
> @@ -3514,12 +3514,14 @@ void vmstate_register_ram(MemoryRegion *mr, 
> DeviceState *dev)
>      qemu_ram_set_idstr(mr->ram_block,
>                         memory_region_name(mr), dev);
>      qemu_ram_set_migratable(mr->ram_block);
> +    ram_block_add_cpr_blocker(mr->ram_block, &error_fatal);
>  }
>  
>  void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
>  {
>      qemu_ram_unset_idstr(mr->ram_block);
>      qemu_ram_unset_migratable(mr->ram_block);
> +    ram_block_del_cpr_blocker(mr->ram_block);
>  }
>  
>  void vmstate_register_ram_global(MemoryRegion *mr)
> diff --git a/system/physmem.c b/system/physmem.c
> index 8c1736f84e..445981a1b4 100644
> --- a/system/physmem.c
> +++ b/system/physmem.c
> @@ -70,7 +70,10 @@
>  
>  #include "qemu/pmem.h"
>  
> +#include "qapi/qapi-types-migration.h"
> +#include "migration/blocker.h"
>  #include "migration/cpr.h"
> +#include "migration/options.h"
>  #include "migration/vmstate.h"
>  
>  #include "qemu/range.h"
> @@ -1903,6 +1906,14 @@ static void ram_block_add(RAMBlock *new_block, Error 
> **errp)
>              qemu_mutex_unlock_ramlist();
>              goto out_free;
>          }
> +
> +        error_setg(&new_block->cpr_blocker,
> +                   "Memory region %s uses guest_memfd, "
> +                   "which is not supported with CPR.",
> +                   memory_region_name(new_block->mr));
> +        migrate_add_blocker_modes(&new_block->cpr_blocker, errp,
> +                                  MIG_MODE_CPR_TRANSFER,
> +                                  -1);
>      }
>  
>      ram_size = (new_block->offset + new_block->max_length) >> 
> TARGET_PAGE_BITS;
> @@ -4094,3 +4105,58 @@ bool ram_block_discard_is_required(void)
>      return qatomic_read(&ram_block_discard_required_cnt) ||
>             qatomic_read(&ram_block_coordinated_discard_required_cnt);
>  }
> +
> +/*
> + * Return true if ram is compatible with CPR.  Do not exclude rom,
> + * because the rom file could change in new QEMU.
> + */
> +static bool ram_is_cpr_compatible(RAMBlock *rb)
> +{
> +    MemoryRegion *mr = rb->mr;
> +
> +    if (!mr || !memory_region_is_ram(mr)) {
> +        return true;
> +    }
> +
> +    /* Ram device is remapped in new QEMU */
> +    if (memory_region_is_ram_device(mr)) {
> +        return true;
> +    }
> +
> +    /*
> +     * A file descriptor is passed to new QEMU and remapped, or its backing
> +     * file is reopened and mapped.  It must be shared to avoid COW.
> +     */
> +    if (rb->fd >= 0 && qemu_ram_is_shared(rb)) {
> +        return true;
> +    }
> +
> +    return false;
> +}
> +
> +/*
> + * Add a blocker for each volatile ram block.  This function should only be
> + * called after we know that the block is migratable.  Non-migratable blocks
> + * are either re-created in new QEMU, or are handled specially, or are 
> covered
> + * by a device-level CPR blocker.
> + */
> +void ram_block_add_cpr_blocker(RAMBlock *rb, Error **errp)
> +{
> +    assert(qemu_ram_is_migratable(rb));
> +
> +    if (ram_is_cpr_compatible(rb)) {
> +        return;
> +    }
> +
> +    error_setg(&rb->cpr_blocker,
> +               "Memory region %s is not compatible with CPR. share=on is "
> +               "required for memory-backend objects, and aux-ram-share=on is 
> "
> +               "required.", memory_region_name(rb->mr));
> +    migrate_add_blocker_modes(&rb->cpr_blocker, errp, MIG_MODE_CPR_TRANSFER,
> +                              -1);
> +}
> +
> +void ram_block_del_cpr_blocker(RAMBlock *rb)
> +{
> +    migrate_del_blocker(&rb->cpr_blocker);
> +}

Reply via email to