Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- cpus-common.c | 25 +++++++++++++++++++++++++ include/qom/cpu.h | 12 ++++++++++++ 2 files changed, 37 insertions(+)
diff --git a/cpus-common.c b/cpus-common.c index 59c8dc8..88cf5ec 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -144,6 +144,11 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) queue_work_on_cpu(cpu, wi); } +typedef struct SafeWorkItem { + run_on_cpu_func func; + void *data; +} SafeWorkItem; + /* Wait for pending exclusive operations to complete. The exclusive lock must be held. */ static inline void exclusive_idle(void) @@ -208,6 +213,26 @@ void cpu_exec_end(CPUState *cpu) qemu_mutex_unlock(&qemu_cpu_list_mutex); } +static void async_safe_run_on_cpu_fn(CPUState *cpu, void *data) +{ + SafeWorkItem *w = data; + + start_exclusive(); + w->func(cpu, w->data); + end_exclusive(); + g_free(w); +} + +void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) +{ + SafeWorkItem *w = g_new(SafeWorkItem, 1); + + w->func = func; + w->data = data; + + async_run_on_cpu(cpu, async_safe_run_on_cpu_fn, w); +} + void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; diff --git a/include/qom/cpu.h b/include/qom/cpu.h index 0e04e8f..54a875e 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -663,6 +663,18 @@ void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); /** + * async_safe_run_on_cpu: + * @cpu: The vCPU to run on. + * @func: The function to be executed. + * @data: Data to pass to the function. + * + * Schedules the function @func for execution on the vCPU @cpu asynchronously, + * while all other vCPUs are sleeping. @func is called with the CPU list lock + * taken (and for system emulation the BQL); any other lock can be taken safely. + */ +void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data); + +/** * qemu_get_cpu: * @index: The CPUState@cpu_index value of the CPU to obtain. * -- 2.7.4