From: Sergey Fedorov <serge.f...@gmail.com> Signed-off-by: Sergey Fedorov <serge.f...@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedo...@linaro.org> Reviewed-by: Alex Bennée <alex.ben...@linaro.org> Message-Id: <20160715175852.30749-11-sergey.fedo...@linaro.org> Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- cpu-exec.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-)
diff --git a/cpu-exec.c b/cpu-exec.c index 97e18f7..b1279d2 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -281,7 +281,8 @@ static TranslationBlock *tb_find_physical(CPUState *cpu, static TranslationBlock *tb_find_slow(CPUState *cpu, target_ulong pc, target_ulong cs_base, - uint32_t flags) + uint32_t flags, + bool *have_tb_lock) { TranslationBlock *tb; @@ -294,6 +295,7 @@ static TranslationBlock *tb_find_slow(CPUState *cpu, */ mmap_lock(); tb_lock(); + *have_tb_lock = true; /* There's a chance that our desired tb has been translated while * taking the locks so we check again inside the lock. @@ -304,7 +306,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu, tb = tb_gen_code(cpu, pc, cs_base, flags, 0); } - tb_unlock(); mmap_unlock(); } @@ -321,6 +322,7 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; + bool have_tb_lock = false; /* we record a subset of the CPU state. It will always be the same before a given translated block @@ -329,8 +331,8 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, tb = atomic_read(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]); if (unlikely(!tb || atomic_read(&tb->pc) != pc || atomic_read(&tb->cs_base) != cs_base || - atomic_read(&b->flags) != flags)) { - tb = tb_find_slow(cpu, pc, cs_base, flags); + atomic_read(&tb->flags) != flags)) { + tb = tb_find_slow(cpu, pc, cs_base, flags, &have_tb_lock); } #ifndef CONFIG_USER_ONLY /* We don't take care of direct jumps when address mapping changes in @@ -343,13 +345,18 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu, #endif /* See if we can patch the calling TB. */ if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - tb_lock(); + if (!have_tb_lock) { + tb_lock(); + have_tb_lock = true; + } /* Check if translation buffer has been flushed */ if (cpu->tb_flushed) { cpu->tb_flushed = false; } else if (!tb_is_invalid(tb)) { tb_add_jump(last_tb, tb_exit, tb); } + } + if (have_tb_lock) { tb_unlock(); } return tb; -- 2.7.4