From: "Maciej S. Szmigiero" <maciej.szmigi...@oracle.com> load_finish SaveVMHandler allows migration code to poll whether a device-specific asynchronous device state loading operation had finished.
In order to avoid calling this handler needlessly the device is supposed to notify the migration code of its possible readiness via a call to qemu_loadvm_load_finish_ready_broadcast() while holding qemu_loadvm_load_finish_ready_lock. Signed-off-by: Maciej S. Szmigiero <maciej.szmigi...@oracle.com> --- include/migration/register.h | 21 +++++++++++++++ migration/migration.c | 6 +++++ migration/migration.h | 3 +++ migration/savevm.c | 52 ++++++++++++++++++++++++++++++++++++ migration/savevm.h | 4 +++ 5 files changed, 86 insertions(+) diff --git a/include/migration/register.h b/include/migration/register.h index 7d29b7e0b559..f15881fc87cd 100644 --- a/include/migration/register.h +++ b/include/migration/register.h @@ -272,6 +272,27 @@ typedef struct SaveVMHandlers { int (*load_state_buffer)(void *opaque, char *data, size_t data_size, Error **errp); + /** + * @load_finish + * + * Poll whether all asynchronous device state loading had finished. + * Not called on the load failure path. + * + * Called while holding the qemu_loadvm_load_finish_ready_lock. + * + * If this method signals "not ready" then it might not be called + * again until qemu_loadvm_load_finish_ready_broadcast() is invoked + * while holding qemu_loadvm_load_finish_ready_lock. + * + * @opaque: data pointer passed to register_savevm_live() + * @is_finished: whether the loading had finished (output parameter) + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns zero to indicate success and negative for error + * It's not an error that the loading still hasn't finished. + */ + int (*load_finish)(void *opaque, bool *is_finished, Error **errp); + /** * @load_setup * diff --git a/migration/migration.c b/migration/migration.c index 8fe8be71a0e3..e4f82695a338 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -234,6 +234,9 @@ void migration_object_init(void) qemu_cond_init(¤t_incoming->page_request_cond); current_incoming->page_requested = g_tree_new(page_request_addr_cmp); + g_mutex_init(¤t_incoming->load_finish_ready_mutex); + g_cond_init(¤t_incoming->load_finish_ready_cond); + migration_object_check(current_migration, &error_fatal); blk_mig_init(); @@ -387,6 +390,9 @@ void migration_incoming_state_destroy(void) mis->postcopy_qemufile_dst = NULL; } + g_mutex_clear(&mis->load_finish_ready_mutex); + g_cond_clear(&mis->load_finish_ready_cond); + yank_unregister_instance(MIGRATION_YANK_INSTANCE); } diff --git a/migration/migration.h b/migration/migration.h index a6114405917f..92014ef4cfcc 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -227,6 +227,9 @@ struct MigrationIncomingState { * is needed as this field is updated serially. */ unsigned int switchover_ack_pending_num; + + GCond load_finish_ready_cond; + GMutex load_finish_ready_mutex; }; MigrationIncomingState *migration_incoming_get_current(void); diff --git a/migration/savevm.c b/migration/savevm.c index 2e4d63faca06..30521ad3f340 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -2994,6 +2994,37 @@ int qemu_loadvm_state(QEMUFile *f) return ret; } + qemu_loadvm_load_finish_ready_lock(); + while (!ret) { /* Don't call load_finish() handlers on the load failure path */ + bool all_ready = true; + SaveStateEntry *se = NULL; + + QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { + bool this_ready; + + if (!se->ops || !se->ops->load_finish) { + continue; + } + + ret = se->ops->load_finish(se->opaque, &this_ready, &local_err); + if (ret) { + error_report_err(local_err); + + qemu_loadvm_load_finish_ready_unlock(); + return -EINVAL; + } else if (!this_ready) { + all_ready = false; + } + } + + if (all_ready) { + break; + } + + g_cond_wait(&mis->load_finish_ready_cond, &mis->load_finish_ready_mutex); + } + qemu_loadvm_load_finish_ready_unlock(); + if (ret == 0) { ret = qemu_file_get_error(f); } @@ -3098,6 +3129,27 @@ int qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, return 0; } +void qemu_loadvm_load_finish_ready_lock(void) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + + g_mutex_lock(&mis->load_finish_ready_mutex); +} + +void qemu_loadvm_load_finish_ready_unlock(void) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + + g_mutex_unlock(&mis->load_finish_ready_mutex); +} + +void qemu_loadvm_load_finish_ready_broadcast(void) +{ + MigrationIncomingState *mis = migration_incoming_get_current(); + + g_cond_broadcast(&mis->load_finish_ready_cond); +} + bool save_snapshot(const char *name, bool overwrite, const char *vmstate, bool has_devices, strList *devices, Error **errp) { diff --git a/migration/savevm.h b/migration/savevm.h index c879ba8c970e..85e8b882bd37 100644 --- a/migration/savevm.h +++ b/migration/savevm.h @@ -73,4 +73,8 @@ int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, int qemu_loadvm_load_state_buffer(const char *idstr, uint32_t instance_id, char *buf, size_t len, Error **errp); +void qemu_loadvm_load_finish_ready_lock(void); +void qemu_loadvm_load_finish_ready_unlock(void); +void qemu_loadvm_load_finish_ready_broadcast(void); + #endif