This patch allocates 'struct qemu_laio_state' in aio attach, and frees it in aio detach, so that in the following patch we can share one same instance of the structure among multiple linux-aio backend in same AioContext.
Signed-off-by: Ming Lei <ming....@canonical.com> --- block/linux-aio.c | 80 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 27 deletions(-) diff --git a/block/linux-aio.c b/block/linux-aio.c index cf8691e..5fa3c1e 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -52,6 +52,7 @@ typedef struct { QEMUBH *retry; } LaioQueue; +/* lifetime: between aio_attach and aio_detach */ struct qemu_laio_state { io_context_t ctx; EventNotifier e; @@ -66,6 +67,10 @@ struct qemu_laio_state { int event_max; }; +typedef struct { + struct qemu_laio_state *state; +} QemuLaioState; + static inline ssize_t io_event_ret(struct io_event *ev) { return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res); @@ -277,14 +282,16 @@ static int ioq_enqueue(struct qemu_laio_state *s, struct iocb *iocb) void laio_io_plug(BlockDriverState *bs, void *aio_ctx) { - struct qemu_laio_state *s = aio_ctx; + QemuLaioState *qs = aio_ctx; + struct qemu_laio_state *s = qs->state; s->io_q->plugged++; } int laio_io_unplug(BlockDriverState *bs, void *aio_ctx, bool unplug) { - struct qemu_laio_state *s = aio_ctx; + QemuLaioState *qs = aio_ctx; + struct qemu_laio_state *s = qs->state; int ret = 0; assert(s->io_q->plugged > 0 || !unplug); @@ -304,7 +311,8 @@ BlockAIOCB *laio_submit(BlockDriverState *bs, void *aio_ctx, int fd, int64_t sector_num, QEMUIOVector *qiov, int nb_sectors, BlockCompletionFunc *cb, void *opaque, int type) { - struct qemu_laio_state *s = aio_ctx; + QemuLaioState *qs = aio_ctx; + struct qemu_laio_state *s = qs->state; struct qemu_laiocb *laiocb; struct iocb *iocbs; off_t offset = sector_num * 512; @@ -380,27 +388,7 @@ static void laio_free_ioq(struct qemu_laio_state *s, LaioQueue *ioq) s->io_q = NULL; } -void laio_detach_aio_context(void *s_, AioContext *old_context) -{ - struct qemu_laio_state *s = s_; - - aio_set_event_notifier(old_context, &s->e, NULL); - qemu_bh_delete(s->completion_bh); - - laio_free_ioq(s, s->io_q); -} - -void laio_attach_aio_context(void *s_, AioContext *new_context) -{ - struct qemu_laio_state *s = s_; - - s->io_q = laio_alloc_ioq(new_context, s); - - s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); - aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb); -} - -void *laio_init(void) +static struct qemu_laio_state *laio_state_alloc(void) { struct qemu_laio_state *s; @@ -422,10 +410,8 @@ out_free_state: return NULL; } -void laio_cleanup(void *s_) +static void laio_state_free(struct qemu_laio_state *s) { - struct qemu_laio_state *s = s_; - event_notifier_cleanup(&s->e); if (io_destroy(s->ctx) != 0) { @@ -434,3 +420,43 @@ void laio_cleanup(void *s_) } g_free(s); } + +void laio_detach_aio_context(void *s_, AioContext *old_context) +{ + QemuLaioState *qs = s_; + struct qemu_laio_state *s = qs->state; + + aio_set_event_notifier(old_context, &s->e, NULL); + qemu_bh_delete(s->completion_bh); + + laio_free_ioq(s, s->io_q); + laio_state_free(s); + qs->state = NULL; +} + +void laio_attach_aio_context(void *s_, AioContext *new_context) +{ + QemuLaioState *qs = s_; + struct qemu_laio_state *s = laio_state_alloc(); + + s->io_q = laio_alloc_ioq(new_context, s); + + s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s); + aio_set_event_notifier(new_context, &s->e, qemu_laio_completion_cb); + + qs->state = s; +} + +void *laio_init(void) +{ + QemuLaioState *s = g_malloc0(sizeof(*s)); + + return s; +} + +void laio_cleanup(void *s_) +{ + QemuLaioState *s = s_; + + g_free(s); +} -- 1.7.9.5