This patch introduces laio_alloc_resource() for allocating resources for linux aio, then in the following patchs we can allocate IO resources just in demand.
Signed-off-by: Ming Lei <ming....@canonical.com> --- block/linux-aio.c | 55 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 35 insertions(+), 20 deletions(-) diff --git a/block/linux-aio.c b/block/linux-aio.c index 5be8036..e3e0532 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -73,6 +73,7 @@ struct qemu_laio_state { /* All BS in the list shared this 'qemu_laio_state' */ QLIST_HEAD(, LaioTrackedBs) tracked_bs; + AioContext *aio_context; }; typedef struct { @@ -380,20 +381,45 @@ out_free_aiocb: return NULL; } -static LaioQueue *laio_alloc_ioq(AioContext *ctx, struct qemu_laio_state *s) +static int laio_alloc_resources(AioContext *ctx, + struct qemu_laio_state *s) { - LaioQueue *ioq = g_malloc0(sizeof(*ioq)); + LaioQueue *ioq; + if (io_setup(MAX_EVENTS, &s->ctx) != 0) { + return -1; + } + + ioq = g_malloc0(sizeof(*s->io_q)); ioq_init(ioq); ioq->retry = aio_bh_new(ctx, ioq_submit_retry, s); - return ioq; + + s->events = g_malloc(sizeof(*s->events) * MAX_EVENTS); + s->io_q = ioq; + + s->completion_bh = aio_bh_new(ctx, qemu_laio_completion_bh, s); + aio_set_event_notifier(ctx, &s->e, qemu_laio_completion_cb); + + return 0; } -static void laio_free_ioq(struct qemu_laio_state *s, LaioQueue *ioq) +static void laio_free_resources(struct qemu_laio_state *s) { + LaioQueue *ioq = s->io_q; + + aio_set_event_notifier(s->aio_context, &s->e, NULL); + qemu_bh_delete(s->completion_bh); + + g_free(s->events); + qemu_bh_delete(ioq->retry); g_free(ioq); s->io_q = NULL; + + if (io_destroy(s->ctx) != 0) { + fprintf(stderr, "%s: destroy AIO context %p failed\n", + __func__, &s->ctx); + } } static struct qemu_laio_state *laio_state_alloc(AioContext *context) @@ -405,15 +431,10 @@ static struct qemu_laio_state *laio_state_alloc(AioContext *context) goto out_free_state; } - if (io_setup(MAX_EVENTS, &s->ctx) != 0) { + if (laio_alloc_resources(context, s) != 0) { goto out_close_efd; } - s->events = g_malloc(sizeof(*s->events) * MAX_EVENTS); - s->io_q = laio_alloc_ioq(context, s); - s->completion_bh = aio_bh_new(context, qemu_laio_completion_bh, s); - aio_set_event_notifier(context, &s->e, qemu_laio_completion_cb); - return s; out_close_efd: @@ -425,17 +446,8 @@ out_free_state: static void laio_state_free(struct qemu_laio_state *s, AioContext *context) { - aio_set_event_notifier(context, &s->e, NULL); - qemu_bh_delete(s->completion_bh); - - laio_free_ioq(s, s->io_q); + laio_free_resources(s); event_notifier_cleanup(&s->e); - g_free(s->events); - - if (io_destroy(s->ctx) != 0) { - fprintf(stderr, "%s: destroy AIO context %p failed\n", - __func__, &s->ctx); - } g_free(s); } @@ -473,6 +485,9 @@ void laio_attach_aio_context(void *s_, BlockDriverState *bs, if (aio_attach_aio_bs(new_context, bs)) { new_context->opaque = qs->state = laio_state_alloc(new_context); + + /* qemu_laio_state is per AioContext */ + qs->state->aio_context = new_context; } else { qs->state = new_context->opaque; } -- 1.7.9.5