18.05.2021 13:07, Emanuele Giuseppe Esposito wrote:
With tasks and calls lock protecting all State fields,
.method is the last BlockCopyState field left unprotected.
Set it as atomic.
Signed-off-by: Emanuele Giuseppe Esposito <eespo...@redhat.com>
OK, in 06 some things are out of coroutine. Here could we just reuse mutex?
I believe, that we don't need any kind of protection for .method inside
block_copy_state_new(), as it's just a creation and initialization of new
structure.
And other things are called from coroutines. So, seems no reasons for
additional atomic access logic?
---
block/block-copy.c | 37 ++++++++++++++++++-------------------
1 file changed, 18 insertions(+), 19 deletions(-)
diff --git a/block/block-copy.c b/block/block-copy.c
index 573e96fefb..ebccb7fbc6 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -108,7 +108,7 @@ typedef struct BlockCopyState {
/* State */
int64_t in_flight_bytes; /* protected by tasks_lock */
- BlockCopyMethod method;
+ BlockCopyMethod method; /* atomic */
CoMutex tasks_lock;
QLIST_HEAD(, BlockCopyTask) tasks; /* All tasks from all block-copy calls
*/
QemuMutex calls_lock;
@@ -184,7 +184,7 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState
*s, int64_t offset,
static inline int64_t block_copy_chunk_size(BlockCopyState *s)
{
- switch (s->method) {
+ switch (qatomic_read(&s->method)) {
case COPY_READ_WRITE_CLUSTER:
return s->cluster_size;
case COPY_READ_WRITE:
@@ -338,16 +338,17 @@ BlockCopyState *block_copy_state_new(BdrvChild *source,
BdrvChild *target,
* buffered copying (read and write respect max_transfer on their
* behalf).
*/
- s->method = COPY_READ_WRITE_CLUSTER;
+ qatomic_set(&s->method, COPY_READ_WRITE_CLUSTER);
} else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
/* Compression supports only cluster-size writes and no copy-range. */
- s->method = COPY_READ_WRITE_CLUSTER;
+ qatomic_set(&s->method, COPY_READ_WRITE_CLUSTER);
} else {
/*
* We enable copy-range, but keep small copy_size, until first
* successful copy_range (look at block_copy_do_copy).
*/
- s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
+ qatomic_set(&s->method, use_copy_range ? COPY_RANGE_SMALL :
+ COPY_READ_WRITE);
}
ratelimit_init(&s->rate_limit);
@@ -432,26 +433,24 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState
*s,
return ret;
}
- if (s->method >= COPY_RANGE_SMALL) {
+ if (qatomic_read(&s->method) >= COPY_RANGE_SMALL) {
ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
0, s->write_flags);
if (ret < 0) {
trace_block_copy_copy_range_fail(s, offset, ret);
- s->method = COPY_READ_WRITE;
+ qatomic_set(&s->method, COPY_READ_WRITE);
/* Fallback to read+write with allocated buffer */
} else {
- if (s->method == COPY_RANGE_SMALL) {
- /*
- * Successful copy-range. Now increase copy_size. copy_range
- * does not respect max_transfer (it's a TODO), so we factor
- * that in here.
- *
- * Note: we double-check s->method for the case when
- * parallel block-copy request unsets it during previous
- * bdrv_co_copy_range call.
- */
- s->method = COPY_RANGE_FULL;
- }
+ /*
+ * Successful copy-range. Now increase copy_size. copy_range
+ * does not respect max_transfer (it's a TODO), so we factor
+ * that in here.
+ *
+ * Note: we double-check s->method for the case when
+ * parallel block-copy request unsets it during previous
+ * bdrv_co_copy_range call.
+ */
+ qatomic_cmpxchg(&s->method, COPY_RANGE_SMALL, COPY_RANGE_FULL);
goto out;
}
}
--
Best regards,
Vladimir