From: KONRAD Frederic <fred.kon...@greensocs.com> spinlock is only used in two cases: * cpu-exec.c: to protect TranslationBlock * mem_helper.c: for lock helper in target-i386 (which seems broken).
It's a pthread_mutex_t in user-mode so better using QemuMutex directly in this case. It allows as well to reuse tb_lock mutex of TBContext in case of multithread TCG. Signed-off-by: KONRAD Frederic <fred.kon...@greensocs.com> --- cpu-exec.c | 15 +++++++++++---- include/exec/exec-all.h | 4 ++-- linux-user/main.c | 6 +++--- target-i386/mem_helper.c | 16 +++++++++++++--- tcg/i386/tcg-target.c | 8 ++++++++ 5 files changed, 37 insertions(+), 12 deletions(-) diff --git a/cpu-exec.c b/cpu-exec.c index 97805cc..f3358a9 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -361,7 +361,9 @@ int cpu_exec(CPUState *cpu) SyncClocks sc; /* This must be volatile so it is not trashed by longjmp() */ +#if defined(CONFIG_USER_ONLY) volatile bool have_tb_lock = false; +#endif if (async_safe_work_pending()) { cpu->exit_request = 1; @@ -488,8 +490,10 @@ int cpu_exec(CPUState *cpu) cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } - spin_lock(&tcg_ctx.tb_ctx.tb_lock); +#if defined(CONFIG_USER_ONLY) + qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = true; +#endif tb = tb_find_fast(cpu); /* Note: we do it here to avoid a gcc bug on Mac OS X when doing it in tb_find_slow */ @@ -511,9 +515,10 @@ int cpu_exec(CPUState *cpu) tb_add_jump((TranslationBlock *)(next_tb & ~TB_EXIT_MASK), next_tb & TB_EXIT_MASK, tb); } +#if defined(CONFIG_USER_ONLY) have_tb_lock = false; - spin_unlock(&tcg_ctx.tb_ctx.tb_lock); - + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); +#endif /* cpu_interrupt might be called while translating the TB, but before it is linked into a potentially infinite loop and becomes env->current_tb. Avoid @@ -580,10 +585,12 @@ int cpu_exec(CPUState *cpu) x86_cpu = X86_CPU(cpu); env = &x86_cpu->env; #endif +#if defined(CONFIG_USER_ONLY) if (have_tb_lock) { - spin_unlock(&tcg_ctx.tb_ctx.tb_lock); + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); have_tb_lock = false; } +#endif } } /* for(;;) */ diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index a6fce04..55a6ff2 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -176,7 +176,7 @@ struct TranslationBlock { struct TranslationBlock *jmp_first; }; -#include "exec/spinlock.h" +#include "qemu/thread.h" typedef struct TBContext TBContext; @@ -186,7 +186,7 @@ struct TBContext { TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; int nb_tbs; /* any access to the tbs or the page table must use this lock */ - spinlock_t tb_lock; + QemuMutex tb_lock; /* statistics */ int tb_flush_count; diff --git a/linux-user/main.c b/linux-user/main.c index 05914b1..20e7199 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -107,7 +107,7 @@ static int pending_cpus; /* Make sure everything is in a consistent state for calling fork(). */ void fork_start(void) { - pthread_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); + qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); pthread_mutex_lock(&exclusive_lock); mmap_fork_start(); } @@ -129,11 +129,11 @@ void fork_end(int child) pthread_mutex_init(&cpu_list_mutex, NULL); pthread_cond_init(&exclusive_cond, NULL); pthread_cond_init(&exclusive_resume, NULL); - pthread_mutex_init(&tcg_ctx.tb_ctx.tb_lock, NULL); + qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); gdbserver_fork(thread_cpu); } else { pthread_mutex_unlock(&exclusive_lock); - pthread_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); } } diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c index 1aec8a5..7106cc3 100644 --- a/target-i386/mem_helper.c +++ b/target-i386/mem_helper.c @@ -23,17 +23,27 @@ /* broken thread support */ -static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED; +#if defined(CONFIG_USER_ONLY) +QemuMutex global_cpu_lock; void helper_lock(void) { - spin_lock(&global_cpu_lock); + qemu_mutex_lock(&global_cpu_lock); } void helper_unlock(void) { - spin_unlock(&global_cpu_lock); + qemu_mutex_unlock(&global_cpu_lock); } +#else +void helper_lock(void) +{ +} + +void helper_unlock(void) +{ +} +#endif void helper_cmpxchg8b(CPUX86State *env, target_ulong a0) { diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index ff4d9cf..0d7c99c 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -24,6 +24,10 @@ #include "tcg-be-ldst.h" +#if defined(CONFIG_USER_ONLY) +extern QemuMutex global_cpu_lock; +#endif + #ifndef NDEBUG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #if TCG_TARGET_REG_BITS == 64 @@ -2342,6 +2346,10 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); tcg_add_target_add_op_defs(x86_op_defs); + +#if defined(CONFIG_USER_ONLY) + qemu_mutex_init(global_cpu_lock); +#endif } typedef struct { -- 1.9.0