On 26/06/2015 16:47, fred.kon...@greensocs.com wrote: > From: KONRAD Frederic <fred.kon...@greensocs.com> > > This removes tcg_halt_cond global variable. > We need one QemuCond per virtual cpu for multithread TCG. > > Signed-off-by: KONRAD Frederic <fred.kon...@greensocs.com> > --- > cpus.c | 18 +++++++----------- > 1 file changed, 7 insertions(+), 11 deletions(-) > > diff --git a/cpus.c b/cpus.c > index 2d62a35..79383df 100644 > --- a/cpus.c > +++ b/cpus.c > @@ -813,7 +813,6 @@ static unsigned iothread_requesting_mutex; > static QemuThread io_thread; > > static QemuThread *tcg_cpu_thread; > -static QemuCond *tcg_halt_cond; > > /* cpu creation */ > static QemuCond qemu_cpu_cond; > @@ -919,15 +918,13 @@ static void qemu_wait_io_event_common(CPUState *cpu) > cpu->thread_kicked = false; > } > > -static void qemu_tcg_wait_io_event(void) > +static void qemu_tcg_wait_io_event(CPUState *cpu) > { > - CPUState *cpu; > - > while (all_cpu_threads_idle()) { > /* Start accounting real time to the virtual clock if the CPUs > are idle. */ > qemu_clock_warp(QEMU_CLOCK_VIRTUAL); > - qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); > + qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); > } > > while (iothread_requesting_mutex) { > @@ -1047,7 +1044,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) > > /* wait for initial kick-off after machine start */ > while (first_cpu->stopped) { > - qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex); > + qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex); > > /* process any pending work */ > CPU_FOREACH(cpu) { > @@ -1068,7 +1065,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) > qemu_clock_notify(QEMU_CLOCK_VIRTUAL); > } > } > - qemu_tcg_wait_io_event(); > + qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
Does this work (for non-multithreaded TCG) if tcg_thread_fn is waiting on the "wrong" condition variable? For example if all CPUs are idle and the second CPU wakes up, qemu_tcg_wait_io_event won't be kicked out of the wait. I think you need to have a CPUThread struct like this: struct CPUThread { QemuThread thread; QemuCond halt_cond; }; and in CPUState have a CPUThread * field instead of the thread and halt_cond fields. Then single-threaded TCG can point all CPUStates to the same instance of the struct, while multi-threaded TCG can point each CPUState to a different struct. Paolo > } > > return NULL; > @@ -1235,12 +1232,12 @@ static void qemu_tcg_init_vcpu(CPUState *cpu) > > tcg_cpu_address_space_init(cpu, cpu->as); > > + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > + qemu_cond_init(cpu->halt_cond); > + > /* share a single thread for all cpus with TCG */ > if (!tcg_cpu_thread) { > cpu->thread = g_malloc0(sizeof(QemuThread)); > - cpu->halt_cond = g_malloc0(sizeof(QemuCond)); > - qemu_cond_init(cpu->halt_cond); > - tcg_halt_cond = cpu->halt_cond; > snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG", > cpu->cpu_index); > qemu_thread_create(cpu->thread, thread_name, qemu_tcg_cpu_thread_fn, > @@ -1254,7 +1251,6 @@ static void qemu_tcg_init_vcpu(CPUState *cpu) > tcg_cpu_thread = cpu->thread; > } else { > cpu->thread = tcg_cpu_thread; > - cpu->halt_cond = tcg_halt_cond; > }