This is not needed anymore. The new TLS-based algorithm is adaptive. Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- block/block-backend.c | 4 ---- include/block/coroutine.h | 10 ---------- qemu-coroutine.c | 4 ---- 3 files changed, 18 deletions(-)
diff --git a/block/block-backend.c b/block/block-backend.c index d0692b1..abf0cd1 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -260,9 +260,6 @@ int blk_attach_dev(BlockBackend *blk, void *dev) blk_ref(blk); blk->dev = dev; bdrv_iostatus_reset(blk->bs); - - /* We're expecting I/O from the device so bump up coroutine pool size */ - qemu_coroutine_adjust_pool_size(COROUTINE_POOL_RESERVATION); return 0; } @@ -290,7 +287,6 @@ void blk_detach_dev(BlockBackend *blk, void *dev) blk->dev_ops = NULL; blk->dev_opaque = NULL; bdrv_set_guest_block_size(blk->bs, 512); - qemu_coroutine_adjust_pool_size(-COROUTINE_POOL_RESERVATION); blk_unref(blk); } diff --git a/include/block/coroutine.h b/include/block/coroutine.h index 793df0e..20c027a 100644 --- a/include/block/coroutine.h +++ b/include/block/coroutine.h @@ -216,14 +216,4 @@ void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type, */ void coroutine_fn yield_until_fd_readable(int fd); -/** - * Add or subtract from the coroutine pool size - * - * The coroutine implementation keeps a pool of coroutines to be reused by - * qemu_coroutine_create(). This makes coroutine creation cheap. Heavy - * coroutine users should call this to reserve pool space. Call it again with - * a negative number to release pool space. - */ -void qemu_coroutine_adjust_pool_size(int n); - #endif /* QEMU_COROUTINE_H */ diff --git a/qemu-coroutine.c b/qemu-coroutine.c index aee1017..ca40f4f 100644 --- a/qemu-coroutine.c +++ b/qemu-coroutine.c @@ -144,7 +144,3 @@ void coroutine_fn qemu_coroutine_yield(void) self->caller = NULL; coroutine_swap(self, to); } - -void qemu_coroutine_adjust_pool_size(int n) -{ -} -- 2.1.0