So that qemu_poll_ns can save state in the context in the future. Signed-off-by: Fam Zheng <f...@redhat.com> --- aio-posix.c | 2 +- include/block/aio.h | 2 ++ include/qemu/timer.h | 7 ++++++- main-loop.c | 7 +++++-- qemu-timer.c | 3 ++- 5 files changed, 16 insertions(+), 5 deletions(-)
diff --git a/aio-posix.c b/aio-posix.c index d3ac06e..ca80ca4 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -228,7 +228,7 @@ bool aio_poll(AioContext *ctx, bool blocking) ctx->walking_handlers--; /* wait until next event */ - ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data, + ret = qemu_poll_ns(&ctx->poll_ctx, (GPollFD *)ctx->pollfds->data, ctx->pollfds->len, blocking ? aio_compute_timeout(ctx) : 0); diff --git a/include/block/aio.h b/include/block/aio.h index 6bf0e04..637c95e 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -90,6 +90,8 @@ struct AioContext { /* TimerLists for calling timers - one per clock type */ QEMUTimerListGroup tlg; + + QEMUPollContext poll_ctx; }; /* Used internally to synchronize aio_poll against qemu_bh_schedule. */ diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 5f5210d..be2a4a3 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -645,8 +645,12 @@ void timer_put(QEMUFile *f, QEMUTimer *ts); */ int qemu_timeout_ns_to_ms(int64_t ns); +typedef struct { +} QEMUPollContext; + /** * qemu_poll_ns: + * @ctx: Poll context we are using * @fds: Array of file descriptors * @nfds: number of file descriptors * @timeout: timeout in nanoseconds @@ -656,7 +660,8 @@ int qemu_timeout_ns_to_ms(int64_t ns); * * Returns: number of fds ready */ -int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout); +int qemu_poll_ns(QEMUPollContext *ctx, GPollFD *fds, + guint nfds, int64_t timeout); /** * qemu_soonest_timeout: diff --git a/main-loop.c b/main-loop.c index d2e64f1..ab2c0df 100644 --- a/main-loop.c +++ b/main-loop.c @@ -113,6 +113,7 @@ static int qemu_signal_init(void) #endif static AioContext *qemu_aio_context; +static QEMUPollContext qemu_poll_context; AioContext *qemu_get_aio_context(void) { @@ -234,7 +235,8 @@ static int os_host_main_loop_wait(int64_t timeout) spin_counter++; } - ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout); + ret = qemu_poll_ns(&qemu_poll_context, (GPollFD *)gpollfds->data, + gpollfds->len, timeout); if (timeout) { qemu_mutex_lock_iothread(); @@ -439,7 +441,8 @@ static int os_host_main_loop_wait(int64_t timeout) poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout); qemu_mutex_unlock_iothread(); - g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns); + g_poll_ret = qemu_poll_ns(&qemu_poll_context, + poll_fds, n_poll_fds + w->num, poll_timeout_ns); qemu_mutex_lock_iothread(); if (g_poll_ret > 0) { diff --git a/qemu-timer.c b/qemu-timer.c index 00a5d35..fe78fdf 100644 --- a/qemu-timer.c +++ b/qemu-timer.c @@ -307,7 +307,8 @@ int qemu_timeout_ns_to_ms(int64_t ns) /* qemu implementation of g_poll which uses a nanosecond timeout but is * otherwise identical to g_poll */ -int qemu_poll_ns(GPollFD *fds, guint nfds, int64_t timeout) +int qemu_poll_ns(QEMUPollContext *ctx, GPollFD *fds, + guint nfds, int64_t timeout) { #ifdef CONFIG_PPOLL if (timeout < 0) { -- 1.9.3