> diff --git a/cpu-exec.c b/cpu-exec.c > index dd0bd5007701..54c935039592 100644 > --- a/cpu-exec.c > +++ b/cpu-exec.c > @@ -295,7 +295,8 @@ static TranslationBlock *tb_find_slow(CPUState *cpu, > > /* mmap_lock is needed by tb_gen_code, and mmap_lock must be > * taken outside tb_lock. As system emulation is currently > - * single threaded the locks are NOPs. > + * single threaded the locks are NOPs. Both locks are to be > + * released at the end of tb_find_fast(). > */
Even better: add a "bool *tb_locked" argument to tb_find_slow, and don't move the mmap_lock release. Then tb_find_fast knows directly whether tb_lock is taken, and you don't need any of tb_lock_reset or mmap_lock_reset. Thanks, Paolo > mmap_lock(); > tb_lock(); > @@ -308,9 +309,6 @@ static TranslationBlock *tb_find_slow(CPUState *cpu, > /* if no translated code available, then translate it now */ > tb = tb_gen_code(cpu, pc, cs_base, flags, 0); > } > - > - tb_unlock(); > - mmap_unlock(); > } > > /* We add the TB in the virtual pc hash table for the fast lookup */ > @@ -354,10 +352,15 @@ static inline TranslationBlock *tb_find_fast(CPUState > *cpu, > #endif > /* See if we can patch the calling TB. */ > if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { > - tb_lock(); > + if (!tb_lock_locked()) { > + tb_lock(); > + } > tb_add_jump(*last_tb, tb_exit, tb); > - tb_unlock(); > } > + > + tb_lock_reset(); > + mmap_lock_reset(); > + > return tb; > } > > -- > 1.9.1 > >