The three global properties allow_aio_context_change, disable_request_queuing and allow_write_before_eof are always set for the whole life of a BlockBackend. Make this clear by removing the possibility of clearing them, and by marking the corresponding function GLOBAL_STATE_CODE().
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- block/block-backend.c | 27 +++++++++++++++------------ block/commit.c | 4 ++-- block/export/export.c | 2 +- block/mirror.c | 4 ++-- block/parallels.c | 2 +- block/qcow.c | 2 +- block/qcow2.c | 2 +- block/qed.c | 2 +- block/stream.c | 4 ++-- block/vdi.c | 2 +- block/vhdx.c | 2 +- block/vmdk.c | 4 ++-- block/vpc.c | 2 +- include/sysemu/block-backend-io.h | 6 +++--- nbd/server.c | 3 +-- tests/unit/test-bdrv-drain.c | 4 ++-- tests/unit/test-block-iothread.c | 2 +- 17 files changed, 38 insertions(+), 36 deletions(-) diff --git a/block/block-backend.c b/block/block-backend.c index 9e0f48692a35..10419f8be91e 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -73,8 +73,13 @@ struct BlockBackend { uint64_t shared_perm; bool disable_perm; + /* + * Can only become true; should be written before any requests is + * submitted to the BlockBackend. + */ bool allow_aio_context_change; bool allow_write_beyond_eof; + bool disable_request_queuing; /* Protected by BQL */ NotifierList remove_bs_notifiers, insert_bs_notifiers; @@ -83,7 +88,6 @@ struct BlockBackend { int quiesce_counter; /* atomic: written under BQL, read by other threads */ QemuMutex queued_requests_lock; /* protects queued_requests */ CoQueue queued_requests; - bool disable_request_queuing; /* atomic */ VMChangeStateEntry *vmsh; bool force_allow_inactivate; @@ -1221,22 +1225,22 @@ void blk_iostatus_set_err(BlockBackend *blk, int error) } } -void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow) +void blk_allow_write_beyond_eof(BlockBackend *blk) { - IO_CODE(); - blk->allow_write_beyond_eof = allow; + GLOBAL_STATE_CODE(); + blk->allow_write_beyond_eof = true; } -void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow) +void blk_allow_aio_context_change(BlockBackend *blk) { - IO_CODE(); - blk->allow_aio_context_change = allow; + GLOBAL_STATE_CODE(); + blk->allow_aio_context_change = true; } -void blk_set_disable_request_queuing(BlockBackend *blk, bool disable) +void blk_disable_request_queuing(BlockBackend *blk) { - IO_CODE(); - qatomic_set(&blk->disable_request_queuing, disable); + GLOBAL_STATE_CODE(); + blk->disable_request_queuing = true; } static int coroutine_fn GRAPH_RDLOCK @@ -1275,8 +1279,7 @@ static void coroutine_fn blk_wait_while_drained(BlockBackend *blk) { assert(blk->in_flight > 0); - if (qatomic_read(&blk->quiesce_counter) && - !qatomic_read(&blk->disable_request_queuing)) { + if (qatomic_read(&blk->quiesce_counter) && !blk->disable_request_queuing) { /* * Take lock before decrementing in flight counter so main loop thread * waits for us to enqueue ourselves before it can leave the drained diff --git a/block/commit.c b/block/commit.c index 2b20fd0fd4d2..88e1d7373d36 100644 --- a/block/commit.c +++ b/block/commit.c @@ -379,7 +379,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, if (ret < 0) { goto fail; } - blk_set_disable_request_queuing(s->base, true); + blk_disable_request_queuing(s->base); s->base_bs = base; /* Required permissions are already taken with block_job_add_bdrv() */ @@ -388,7 +388,7 @@ void commit_start(const char *job_id, BlockDriverState *bs, if (ret < 0) { goto fail; } - blk_set_disable_request_queuing(s->top, true); + blk_disable_request_queuing(s->top); s->backing_file_str = g_strdup(backing_file_str); s->on_error = on_error; diff --git a/block/export/export.c b/block/export/export.c index e3fee6061169..0a1336c07fed 100644 --- a/block/export/export.c +++ b/block/export/export.c @@ -155,7 +155,7 @@ BlockExport *blk_exp_add(BlockExportOptions *export, Error **errp) blk = blk_new(ctx, perm, BLK_PERM_ALL); if (!fixed_iothread) { - blk_set_allow_aio_context_change(blk, true); + blk_allow_aio_context_change(blk); } ret = blk_insert_bs(blk, bs, errp); diff --git a/block/mirror.c b/block/mirror.c index 1c46ad51bf50..93eda37660a3 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -1787,8 +1787,8 @@ static BlockJob *mirror_start_job( * ensure that. */ blk_set_force_allow_inactivate(s->target); } - blk_set_allow_aio_context_change(s->target, true); - blk_set_disable_request_queuing(s->target, true); + blk_allow_aio_context_change(s->target); + blk_disable_request_queuing(s->target); s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; diff --git a/block/parallels.c b/block/parallels.c index 013684801a61..97a5c629bbab 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -578,7 +578,7 @@ static int coroutine_fn parallels_co_create(BlockdevCreateOptions* opts, ret = -EPERM; goto out; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* Create image format */ bat_entries = DIV_ROUND_UP(total_size, cl_size); diff --git a/block/qcow.c b/block/qcow.c index 490e4f819ed1..5089dd0c6bf3 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -842,7 +842,7 @@ static int coroutine_fn qcow_co_create(BlockdevCreateOptions *opts, ret = -EPERM; goto exit; } - blk_set_allow_write_beyond_eof(qcow_blk, true); + blk_allow_write_beyond_eof(qcow_blk); /* Create image format */ memset(&header, 0, sizeof(header)); diff --git a/block/qcow2.c b/block/qcow2.c index f8ea03a34515..761aa7e1555a 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -3643,7 +3643,7 @@ qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp) ret = -EPERM; goto out; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* Write the header */ QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header)); diff --git a/block/qed.c b/block/qed.c index 0705a7b4e25f..7fec1cabc4f6 100644 --- a/block/qed.c +++ b/block/qed.c @@ -690,7 +690,7 @@ static int coroutine_fn bdrv_qed_co_create(BlockdevCreateOptions *opts, ret = -EPERM; goto out; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* Prepare image format */ header = (QEDHeader) { diff --git a/block/stream.c b/block/stream.c index d92a4c99d359..935e109a4e05 100644 --- a/block/stream.c +++ b/block/stream.c @@ -336,8 +336,8 @@ void stream_start(const char *job_id, BlockDriverState *bs, * Disable request queuing in the BlockBackend to avoid deadlocks on drain: * The job reports that it's busy until it reaches a pause point. */ - blk_set_disable_request_queuing(s->blk, true); - blk_set_allow_aio_context_change(s->blk, true); + blk_disable_request_queuing(s->blk); + blk_allow_aio_context_change(s->blk); /* * Prevent concurrent jobs trying to modify the graph structure here, we diff --git a/block/vdi.c b/block/vdi.c index f2434d6153e1..1e4eb6a0bd0b 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -813,7 +813,7 @@ static int coroutine_fn vdi_co_do_create(BlockdevCreateOptions *create_options, goto exit; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* We need enough blocks to store the given disk size, so always round up. */ diff --git a/block/vhdx.c b/block/vhdx.c index 81420722a188..7f59b6cb0403 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -2003,7 +2003,7 @@ static int coroutine_fn vhdx_co_create(BlockdevCreateOptions *opts, ret = -EPERM; goto delete_and_exit; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* Create (A) */ diff --git a/block/vmdk.c b/block/vmdk.c index 3f8c731e32e8..08a009f527e1 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -2298,7 +2298,7 @@ vmdk_create_extent(const char *filename, int64_t filesize, bool flat, goto exit; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); ret = vmdk_init_extent(blk, filesize, flat, compress, zeroed_grain, errp); exit: @@ -2796,7 +2796,7 @@ static BlockBackend * coroutine_fn vmdk_co_create_cb(int64_t size, int idx, if (!blk) { return NULL; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); bdrv_unref(bs); if (size != -1) { diff --git a/block/vpc.c b/block/vpc.c index b89b0ff8e275..1dc9a86c6aa2 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -1016,7 +1016,7 @@ static int coroutine_fn vpc_co_create(BlockdevCreateOptions *opts, ret = -EPERM; goto out; } - blk_set_allow_write_beyond_eof(blk, true); + blk_allow_write_beyond_eof(blk); /* Get geometry and check that it matches the image size*/ ret = calculate_rounded_image_size(vpc_opts, &cyls, &heads, &secs_per_cyl, diff --git a/include/sysemu/block-backend-io.h b/include/sysemu/block-backend-io.h index db29c164997d..1a55d25c133a 100644 --- a/include/sysemu/block-backend-io.h +++ b/include/sysemu/block-backend-io.h @@ -27,9 +27,9 @@ const char *blk_name(const BlockBackend *blk); BlockDriverState *blk_bs(BlockBackend *blk); -void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow); -void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow); -void blk_set_disable_request_queuing(BlockBackend *blk, bool disable); +void blk_allow_write_beyond_eof(BlockBackend *blk); +void blk_allow_aio_context_change(BlockBackend *blk); +void blk_disable_request_queuing(BlockBackend *blk); bool blk_iostatus_is_enabled(const BlockBackend *blk); char *blk_get_attached_dev_id(BlockBackend *blk); diff --git a/nbd/server.c b/nbd/server.c index cb41b56095ee..423dc2d2517e 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1777,7 +1777,7 @@ static int nbd_export_create(BlockExport *blk_exp, BlockExportOptions *exp_args, * be properly quiesced when entering a drained section, as our coroutines * servicing pending requests might enter blk_pread(). */ - blk_set_disable_request_queuing(blk, true); + blk_disable_request_queuing(blk); blk_add_aio_context_notifier(blk, blk_aio_attached, blk_aio_detach, exp); @@ -1853,7 +1853,6 @@ static void nbd_export_delete(BlockExport *blk_exp) } blk_remove_aio_context_notifier(exp->common.blk, blk_aio_attached, blk_aio_detach, exp); - blk_set_disable_request_queuing(exp->common.blk, false); for (i = 0; i < exp->nr_export_bitmaps; i++) { bdrv_dirty_bitmap_set_busy(exp->export_bitmaps[i], false); diff --git a/tests/unit/test-bdrv-drain.c b/tests/unit/test-bdrv-drain.c index d9d38070621a..9484e194d6f9 100644 --- a/tests/unit/test-bdrv-drain.c +++ b/tests/unit/test-bdrv-drain.c @@ -513,7 +513,7 @@ static void test_iothread_common(enum drain_type drain_type, int drain_thread) &error_abort); s = bs->opaque; blk_insert_bs(blk, bs, &error_abort); - blk_set_disable_request_queuing(blk, true); + blk_disable_request_queuing(blk); blk_set_aio_context(blk, ctx_a, &error_abort); aio_context_acquire(ctx_a); @@ -739,7 +739,7 @@ static void test_blockjob_common_drain_node(enum drain_type drain_type, &error_abort); blk_target = blk_new(qemu_get_aio_context(), BLK_PERM_ALL, BLK_PERM_ALL); blk_insert_bs(blk_target, target, &error_abort); - blk_set_allow_aio_context_change(blk_target, true); + blk_allow_aio_context_change(blk_target); aio_context_acquire(ctx); tjob = block_job_create("job0", &test_job_driver, NULL, src, diff --git a/tests/unit/test-block-iothread.c b/tests/unit/test-block-iothread.c index 3a5e1eb2c413..90b60ce32c68 100644 --- a/tests/unit/test-block-iothread.c +++ b/tests/unit/test-block-iothread.c @@ -795,7 +795,7 @@ static void test_propagate_mirror(void) /* ...unless we explicitly allow it */ aio_context_acquire(ctx); - blk_set_allow_aio_context_change(blk, true); + blk_allow_aio_context_change(blk); bdrv_try_change_aio_context(target, ctx, NULL, &error_abort); aio_context_release(ctx); -- 2.39.2