From: Sergey Fedorov <serge.f...@gmail.com> Make CPU work core functions common between system and user-mode emulation. User-mode does not have BQL, so process_queued_cpu_work() is protected by 'exclusive_lock'.
Signed-off-by: Sergey Fedorov <serge.f...@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedo...@linaro.org> Reviewed-by: Alex Bennée <alex.ben...@linaro.org> Signed-off-by: Alex Bennée <alex.ben...@linaro.org> --- cpu-exec-common.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ cpus.c | 86 +------------------------------------------------ include/exec/exec-all.h | 17 ++++++++++ linux-user/main.c | 8 +++++ 4 files changed, 111 insertions(+), 85 deletions(-) diff --git a/cpu-exec-common.c b/cpu-exec-common.c index 0cb4ae6..a233f01 100644 --- a/cpu-exec-common.c +++ b/cpu-exec-common.c @@ -77,3 +77,88 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) } siglongjmp(cpu->jmp_env, 1); } + +QemuCond qemu_work_cond; + +static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) +{ + qemu_mutex_lock(&cpu->work_mutex); + if (cpu->queued_work_first == NULL) { + cpu->queued_work_first = wi; + } else { + cpu->queued_work_last->next = wi; + } + cpu->queued_work_last = wi; + wi->next = NULL; + wi->done = false; + qemu_mutex_unlock(&cpu->work_mutex); + + qemu_cpu_kick(cpu); +} + +void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) +{ + struct qemu_work_item wi; + + if (qemu_cpu_is_self(cpu)) { + func(cpu, data); + return; + } + + wi.func = func; + wi.data = data; + wi.free = false; + + queue_work_on_cpu(cpu, &wi); + while (!atomic_mb_read(&wi.done)) { + CPUState *self_cpu = current_cpu; + + qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex()); + current_cpu = self_cpu; + } +} + +void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) +{ + struct qemu_work_item *wi; + + if (qemu_cpu_is_self(cpu)) { + func(cpu, data); + return; + } + + wi = g_malloc0(sizeof(struct qemu_work_item)); + wi->func = func; + wi->data = data; + wi->free = true; + + queue_work_on_cpu(cpu, wi); +} + +void process_queued_cpu_work(CPUState *cpu) +{ + struct qemu_work_item *wi; + + if (cpu->queued_work_first == NULL) { + return; + } + + qemu_mutex_lock(&cpu->work_mutex); + while (cpu->queued_work_first != NULL) { + wi = cpu->queued_work_first; + cpu->queued_work_first = wi->next; + if (!cpu->queued_work_first) { + cpu->queued_work_last = NULL; + } + qemu_mutex_unlock(&cpu->work_mutex); + wi->func(cpu, wi->data); + qemu_mutex_lock(&cpu->work_mutex); + if (wi->free) { + g_free(wi); + } else { + atomic_mb_set(&wi->done, true); + } + } + qemu_mutex_unlock(&cpu->work_mutex); + qemu_cond_broadcast(&qemu_work_cond); +} diff --git a/cpus.c b/cpus.c index 51fd8c1..282d7e3 100644 --- a/cpus.c +++ b/cpus.c @@ -896,7 +896,6 @@ static QemuThread io_thread; static QemuCond qemu_cpu_cond; /* system init */ static QemuCond qemu_pause_cond; -static QemuCond qemu_work_cond; void qemu_init_cpu_loop(void) { @@ -910,66 +909,11 @@ void qemu_init_cpu_loop(void) qemu_thread_get_self(&io_thread); } -static QemuMutex *qemu_get_cpu_work_mutex(void) +QemuMutex *qemu_get_cpu_work_mutex(void) { return &qemu_global_mutex; } -static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) -{ - qemu_mutex_lock(&cpu->work_mutex); - if (cpu->queued_work_first == NULL) { - cpu->queued_work_first = wi; - } else { - cpu->queued_work_last->next = wi; - } - cpu->queued_work_last = wi; - wi->next = NULL; - wi->done = false; - qemu_mutex_unlock(&cpu->work_mutex); - - qemu_cpu_kick(cpu); -} - -void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) -{ - struct qemu_work_item wi; - - if (qemu_cpu_is_self(cpu)) { - func(cpu, data); - return; - } - - wi.func = func; - wi.data = data; - wi.free = false; - - queue_work_on_cpu(cpu, &wi); - while (!atomic_mb_read(&wi.done)) { - CPUState *self_cpu = current_cpu; - - qemu_cond_wait(&qemu_work_cond, qemu_get_cpu_work_mutex()); - current_cpu = self_cpu; - } -} - -void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) -{ - struct qemu_work_item *wi; - - if (qemu_cpu_is_self(cpu)) { - func(cpu, data); - return; - } - - wi = g_malloc0(sizeof(struct qemu_work_item)); - wi->func = func; - wi->data = data; - wi->free = true; - - queue_work_on_cpu(cpu, wi); -} - static void qemu_kvm_destroy_vcpu(CPUState *cpu) { if (kvm_destroy_vcpu(cpu) < 0) { @@ -982,34 +926,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu) { } -static void process_queued_cpu_work(CPUState *cpu) -{ - struct qemu_work_item *wi; - - if (cpu->queued_work_first == NULL) { - return; - } - - qemu_mutex_lock(&cpu->work_mutex); - while (cpu->queued_work_first != NULL) { - wi = cpu->queued_work_first; - cpu->queued_work_first = wi->next; - if (!cpu->queued_work_first) { - cpu->queued_work_last = NULL; - } - qemu_mutex_unlock(&cpu->work_mutex); - wi->func(cpu, wi->data); - qemu_mutex_lock(&cpu->work_mutex); - if (wi->free) { - g_free(wi); - } else { - atomic_mb_set(&wi->done, true); - } - } - qemu_mutex_unlock(&cpu->work_mutex); - qemu_cond_broadcast(&qemu_work_cond); -} - static void qemu_wait_io_event_common(CPUState *cpu) { if (cpu->stop) { diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index bc0bcc5..e4dfd3c 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -409,4 +409,21 @@ extern int singlestep; extern CPUState *tcg_current_cpu; extern bool exit_request; +/** + * qemu_work_cond - condition to wait for CPU work items completion + */ +extern QemuCond qemu_work_cond; + +/** + * qemu_get_cpu_work_mutex() - get the mutex which protects CPU work execution + * + * Return: A pointer to the mutex. + */ +QemuMutex *qemu_get_cpu_work_mutex(void); +/** + * process_queued_cpu_work() - process all items on CPU work queue + * @cpu: The CPU which work queue to process. + */ +void process_queued_cpu_work(CPUState *cpu); + #endif diff --git a/linux-user/main.c b/linux-user/main.c index f5ddf96..13ac77d 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -124,6 +124,7 @@ void qemu_init_cpu_loop(void) qemu_mutex_init(&exclusive_lock); qemu_cond_init(&exclusive_cond); qemu_cond_init(&exclusive_resume); + qemu_cond_init(&qemu_work_cond); } /* Make sure everything is in a consistent state for calling fork(). */ @@ -152,6 +153,7 @@ void fork_end(int child) qemu_mutex_init(&cpu_list_mutex); qemu_cond_init(&exclusive_cond); qemu_cond_init(&exclusive_resume); + qemu_cond_init(&qemu_work_cond); qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); gdbserver_fork(thread_cpu); } else { @@ -160,6 +162,11 @@ void fork_end(int child) } } +QemuMutex *qemu_get_cpu_work_mutex(void) +{ + return &exclusive_lock; +} + /* Wait for pending exclusive operations to complete. The exclusive lock must be held. */ static inline void exclusive_idle(void) @@ -218,6 +225,7 @@ static inline void cpu_exec_end(CPUState *cpu) qemu_cond_signal(&exclusive_cond); } exclusive_idle(); + process_queued_cpu_work(cpu); qemu_mutex_unlock(&exclusive_lock); } -- 2.7.4