I tried to use it, but it would then create a deadlock at a very early stage of the stress test. The problem is likely related to the fact that flush_queued_work happens with the global mutex locked.
As Frederick suggested, we can use the newly introduced flush_queued_safe_work for this. Regards, alvise On Fri, Jul 10, 2015 at 11:36 AM, Paolo Bonzini <pbonz...@redhat.com> wrote: > > > On 10/07/2015 10:23, Alvise Rigo wrote: >> In order to perfom "lazy" TLB invalidation requests, introduce a >> queue of callbacks at every vCPU disposal that will be fired just >> before entering the next TB. >> >> Suggested-by: Jani Kokkonen <jani.kokko...@huawei.com> >> Suggested-by: Claudio Fontana <claudio.font...@huawei.com> >> Signed-off-by: Alvise Rigo <a.r...@virtualopensystems.com> > > Why is async_run_on_cpu not enough? > > Paolo > >> --- >> cpus.c | 34 ++++++++++++++++++++++++++++++++++ >> exec.c | 1 + >> include/qom/cpu.h | 20 ++++++++++++++++++++ >> 3 files changed, 55 insertions(+) >> >> diff --git a/cpus.c b/cpus.c >> index f4d938e..b9f0329 100644 >> --- a/cpus.c >> +++ b/cpus.c >> @@ -1421,6 +1421,7 @@ static int tcg_cpu_exec(CPUArchState *env) >> cpu->icount_extra = count; >> } >> qemu_mutex_unlock_iothread(); >> + cpu_exit_callbacks_call_all(cpu); >> ret = cpu_exec(env); >> cpu->tcg_executing = 0; >> >> @@ -1469,6 +1470,39 @@ static void tcg_exec_all(CPUState *cpu) >> cpu->exit_request = 0; >> } >> >> +void cpu_exit_callback_add(CPUState *cpu, CPUExitCallback callback, >> + void *opaque) >> +{ >> + CPUExitCB *cb; >> + >> + cb = g_malloc(sizeof(*cb)); >> + cb->callback = callback; >> + cb->opaque = opaque; >> + >> + qemu_mutex_lock(&cpu->exit_cbs.mutex); >> + QTAILQ_INSERT_TAIL(&cpu->exit_cbs.exit_callbacks, cb, entry); >> + qemu_mutex_unlock(&cpu->exit_cbs.mutex); >> +} >> + >> +void cpu_exit_callbacks_call_all(CPUState *cpu) >> +{ >> + CPUExitCB *cb, *next; >> + >> + if (QTAILQ_EMPTY(&cpu->exit_cbs.exit_callbacks)) { >> + return; >> + } >> + >> + QTAILQ_FOREACH_SAFE(cb, &cpu->exit_cbs.exit_callbacks, entry, next) { >> + cb->callback(cpu, cb->opaque); >> + >> + /* one-shot callbacks, remove it after using it */ >> + qemu_mutex_lock(&cpu->exit_cbs.mutex); >> + QTAILQ_REMOVE(&cpu->exit_cbs.exit_callbacks, cb, entry); >> + g_free(cb); >> + qemu_mutex_unlock(&cpu->exit_cbs.mutex); >> + } >> +} >> + >> void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg) >> { >> /* XXX: implement xxx_cpu_list for targets that still miss it */ >> diff --git a/exec.c b/exec.c >> index 51958ed..322f2c6 100644 >> --- a/exec.c >> +++ b/exec.c >> @@ -531,6 +531,7 @@ void cpu_exec_init(CPUArchState *env) >> cpu->numa_node = 0; >> QTAILQ_INIT(&cpu->breakpoints); >> QTAILQ_INIT(&cpu->watchpoints); >> + QTAILQ_INIT(&cpu->exit_cbs.exit_callbacks); >> #ifndef CONFIG_USER_ONLY >> cpu->as = &address_space_memory; >> cpu->thread_id = qemu_get_thread_id(); >> diff --git a/include/qom/cpu.h b/include/qom/cpu.h >> index 8d121b3..0ec020b 100644 >> --- a/include/qom/cpu.h >> +++ b/include/qom/cpu.h >> @@ -201,6 +201,24 @@ typedef struct CPUWatchpoint { >> QTAILQ_ENTRY(CPUWatchpoint) entry; >> } CPUWatchpoint; >> >> +/* vCPU exit callbacks */ >> +typedef void (*CPUExitCallback)(CPUState *cpu, void *opaque); >> +struct CPUExitCBs { >> + QemuMutex mutex; >> + QTAILQ_HEAD(exit_callbacks_head, CPUExitCB) exit_callbacks; >> +}; >> + >> +typedef struct CPUExitCB { >> + CPUExitCallback callback; >> + void *opaque; >> + >> + QTAILQ_ENTRY(CPUExitCB) entry; >> +} CPUExitCB; >> + >> +void cpu_exit_callback_add(CPUState *cpu, CPUExitCallback callback, >> + void *opaque); >> +void cpu_exit_callbacks_call_all(CPUState *cpu); >> + >> /* Rendezvous support */ >> #define TCG_RDV_POLLING_PERIOD 10 >> typedef struct CpuExitRendezvous { >> @@ -305,6 +323,8 @@ struct CPUState { >> >> void *opaque; >> >> + /* One-shot callbacks for stopping requests. */ >> + struct CPUExitCBs exit_cbs; >> volatile int pending_rdv; >> >> /* In order to avoid passing too many arguments to the MMIO helpers, >>