From: Paolo Bonzini <pbonz...@redhat.com> This saves about 15% of the clock cycles spent on allocation. Using the slice allocator does not add a visible improvement; allocation is faster than malloc, while freeing seems to be slower.
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> Signed-off-by: Kevin Wolf <kw...@redhat.com> --- async.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/async.c b/async.c index 572f239..2be88cc 100644 --- a/async.c +++ b/async.c @@ -44,10 +44,12 @@ struct QEMUBH { QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque) { QEMUBH *bh; - bh = g_new0(QEMUBH, 1); - bh->ctx = ctx; - bh->cb = cb; - bh->opaque = opaque; + bh = g_new(QEMUBH, 1); + *bh = (QEMUBH){ + .ctx = ctx, + .cb = cb, + .opaque = opaque, + }; qemu_mutex_lock(&ctx->bh_lock); bh->next = ctx->first_bh; /* Make sure that the members are ready before putting bh into list */ -- 1.8.3.1