By adding acquire/release pairs, we ensure that .ret and .error_is_read fields are written by block_copy_dirty_clusters before .finished is true.
The atomic here are necessary because the fields are concurrently modified also outside coroutines. Signed-off-by: Emanuele Giuseppe Esposito <eespo...@redhat.com> --- block/block-copy.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index 56f62913e4..55b6ce6a57 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -56,11 +56,11 @@ typedef struct BlockCopyCallState { QLIST_ENTRY(BlockCopyCallState) list; /* State */ - bool finished; + bool finished; /* atomic */ QemuCoSleep sleep; /* TODO: protect API with a lock */ /* OUT parameters */ - bool cancelled; + bool cancelled; /* atomic */ /* Fields protected by lock in BlockCopyState */ bool error_is_read; int ret; @@ -648,7 +648,8 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state) assert(QEMU_IS_ALIGNED(offset, s->cluster_size)); assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); - while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) { + while (bytes && aio_task_pool_status(aio) == 0 && + !qatomic_read(&call_state->cancelled)) { BlockCopyTask *task; int64_t status_bytes; @@ -758,7 +759,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) do { ret = block_copy_dirty_clusters(call_state); - if (ret == 0 && !call_state->cancelled) { + if (ret == 0 && !qatomic_read(&call_state->cancelled)) { ret = block_copy_wait_one(call_state->s, call_state->offset, call_state->bytes); } @@ -772,9 +773,9 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state) * 2. We have waited for some intersecting block-copy request * It may have failed and produced new dirty bits. */ - } while (ret > 0 && !call_state->cancelled); + } while (ret > 0 && !qatomic_read(&call_state->cancelled)); - call_state->finished = true; + qatomic_store_release(&call_state->finished, true); if (call_state->cb) { call_state->cb(call_state->cb_opaque); @@ -837,35 +838,37 @@ void block_copy_call_free(BlockCopyCallState *call_state) return; } - assert(call_state->finished); + assert(qatomic_load_acquire(&call_state->finished)); g_free(call_state); } bool block_copy_call_finished(BlockCopyCallState *call_state) { - return call_state->finished; + return qatomic_load_acquire(&call_state->finished); } bool block_copy_call_succeeded(BlockCopyCallState *call_state) { - return call_state->finished && !call_state->cancelled && - call_state->ret == 0; + return qatomic_load_acquire(&call_state->finished) && + !qatomic_read(&call_state->cancelled) && + call_state->ret == 0; } bool block_copy_call_failed(BlockCopyCallState *call_state) { - return call_state->finished && !call_state->cancelled && - call_state->ret < 0; + return qatomic_load_acquire(&call_state->finished) && + !qatomic_read(&call_state->cancelled) && + call_state->ret < 0; } bool block_copy_call_cancelled(BlockCopyCallState *call_state) { - return call_state->cancelled; + return qatomic_read(&call_state->cancelled); } int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) { - assert(call_state->finished); + assert(qatomic_load_acquire(&call_state->finished)); if (error_is_read) { *error_is_read = call_state->error_is_read; } @@ -874,7 +877,7 @@ int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read) void block_copy_call_cancel(BlockCopyCallState *call_state) { - call_state->cancelled = true; + qatomic_set(&call_state->cancelled, true); block_copy_kick(call_state); } -- 2.30.2