Paolo Bonzini <pbonz...@redhat.com> writes: > With this change async.c does not rely anymore on any service from > main-loop.c, i.e. it is completely self-contained. > > Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
Other than the coding style bits that need to be fixed first in the previous patch: Reviewed-by: Anthony Liguori <aligu...@us.ibm.com> Regards, Anthony Liguori > --- > async.c | 30 ++++++++++++++++++++++++++---- > qemu-aio.h | 18 ++++++++++++++++++ > 2 file modificati, 44 inserzioni(+), 4 rimozioni(-) > > diff --git a/async.c b/async.c > index ed2bd3f..31c6c76 100644 > --- a/async.c > +++ b/async.c > @@ -30,6 +30,7 @@ > /* bottom halves (can be seen as timers which expire ASAP) */ > > struct QEMUBH { > + AioContext *ctx; > QEMUBHFunc *cb; > void *opaque; > QEMUBH *next; > @@ -42,6 +43,7 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void > *opaque) > { > QEMUBH *bh; > bh = g_malloc0(sizeof(QEMUBH)); > + bh->ctx = ctx; > bh->cb = cb; > bh->opaque = opaque; > bh->next = ctx->first_bh; > @@ -101,8 +103,7 @@ void qemu_bh_schedule(QEMUBH *bh) > return; > bh->scheduled = 1; > bh->idle = 0; > - /* stop the currently executing CPU to execute the BH ASAP */ > - qemu_notify_event(); > + aio_notify(bh->ctx); > } > > void qemu_bh_cancel(QEMUBH *bh) > @@ -177,11 +178,20 @@ aio_ctx_dispatch(GSource *source, > return TRUE; > } > > +static void > +aio_ctx_finalize(GSource *source) > +{ > + AioContext *ctx = (AioContext *) source; > + > + aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL); > + event_notifier_cleanup(&ctx->notifier); > +} > + > static GSourceFuncs aio_source_funcs = { > aio_ctx_prepare, > aio_ctx_check, > aio_ctx_dispatch, > - NULL > + aio_ctx_finalize > }; > > GSource *aio_get_g_source(AioContext *ctx) > @@ -190,9 +200,21 @@ GSource *aio_get_g_source(AioContext *ctx) > return &ctx->source; > } > > +void aio_notify(AioContext *ctx) > +{ > + event_notifier_set(&ctx->notifier); > +} > + > AioContext *aio_context_new(void) > { > - return (AioContext *) g_source_new(&aio_source_funcs, > sizeof(AioContext)); > + AioContext *ctx; > + ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); > + event_notifier_init(&ctx->notifier, false); > + aio_set_event_notifier(ctx, &ctx->notifier, > + (EventNotifierHandler *) > + event_notifier_test_and_clear, NULL); > + > + return ctx; > } > > void aio_context_ref(AioContext *ctx) > diff --git a/qemu-aio.h b/qemu-aio.h > index aedf66c..2354617 100644 > --- a/qemu-aio.h > +++ b/qemu-aio.h > @@ -62,6 +62,9 @@ typedef struct AioContext { > * no callbacks are removed while we're walking and dispatching > callbacks. > */ > int walking_bh; > + > + /* Used for aio_notify. */ > + EventNotifier notifier; > } AioContext; > > /* Returns 1 if there are still outstanding AIO requests; 0 otherwise */ > @@ -102,6 +105,21 @@ void aio_context_unref(AioContext *ctx); > QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); > > /** > + * aio_notify: Force processing of pending events. > + * > + * Similar to signaling a condition variable, aio_notify forces > + * aio_wait to exit, so that the next call will re-examine pending events. > + * The caller of aio_notify will usually call aio_wait again very soon, > + * or go through another iteration of the GLib main loop. Hence, aio_notify > + * also has the side effect of recalculating the sets of file descriptors > + * that the main loop waits for. > + * > + * Calling aio_notify is rarely necessary, because for example scheduling > + * a bottom half calls it already. > + */ > +void aio_notify(AioContext *ctx); > + > +/** > * aio_bh_poll: Poll bottom halves for an AioContext. > * > * These are internal functions used by the QEMU main loop. > -- > 1.7.12