Like cpu_in_exclusive_context, but also true if there is no other cpu against which we could race.
Use it in tb_flush as a direct replacement. Use it in cpu_loop_exit_atomic to ensure that there is no loop against cpu_exec_step_atomic. Signed-off-by: Richard Henderson <richard.hender...@linaro.org> --- accel/tcg/internal.h | 5 +++++ accel/tcg/cpu-exec-common.c | 3 +++ accel/tcg/tb-maint.c | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h index cb13bade4f..f06bf58e7a 100644 --- a/accel/tcg/internal.h +++ b/accel/tcg/internal.h @@ -119,4 +119,9 @@ static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb) #endif } +static inline bool cpu_in_serial_context(CPUState *cs) +{ + return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs); +} + #endif /* ACCEL_TCG_INTERNAL_H */ diff --git a/accel/tcg/cpu-exec-common.c b/accel/tcg/cpu-exec-common.c index c7bc8c6efa..2fb4454c7a 100644 --- a/accel/tcg/cpu-exec-common.c +++ b/accel/tcg/cpu-exec-common.c @@ -21,6 +21,7 @@ #include "sysemu/cpus.h" #include "sysemu/tcg.h" #include "exec/exec-all.h" +#include "internal.h" bool tcg_allowed; @@ -78,6 +79,8 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc) { + /* Prevent looping if already executing in a serial context. */ + g_assert(!cpu_in_serial_context(cpu)); cpu->exception_index = EXCP_ATOMIC; cpu_loop_exit_restore(cpu, pc); } diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 0cdb35548c..a7c067628c 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -119,7 +119,7 @@ void tb_flush(CPUState *cpu) if (tcg_enabled()) { unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count); - if (cpu_in_exclusive_context(cpu)) { + if (cpu_in_serial_context(cpu)) { do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); } else { async_safe_run_on_cpu(cpu, do_tb_flush, -- 2.34.1