We are going to use aio-task-pool API, so tasks will be handled in parallel. We need therefore separate allocated task on each iteration. Introduce this logic now.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> --- block/block-copy.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/block/block-copy.c b/block/block-copy.c index b00cbfc841..956b4b55e7 100644 --- a/block/block-copy.c +++ b/block/block-copy.c @@ -111,9 +111,11 @@ static bool coroutine_fn block_copy_wait_one(BlockCopyState *s, int64_t start, } /* Called only on full-dirty region */ -static void block_copy_task_begin(BlockCopyState *s, BlockCopyTask *task, - int64_t offset, int64_t bytes) +static BlockCopyTask *block_copy_task_create(BlockCopyState *s, + int64_t offset, int64_t bytes) { + BlockCopyTask *task = g_new(BlockCopyTask, 1); + assert(!block_copy_find_task(s, offset, bytes)); bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes); @@ -122,6 +124,8 @@ static void block_copy_task_begin(BlockCopyState *s, BlockCopyTask *task, task->bytes = bytes; qemu_co_queue_init(&task->wait_queue); QLIST_INSERT_HEAD(&s->tasks, task, list); + + return task; } static void coroutine_fn block_copy_task_shrink(BlockCopyState *s, @@ -454,7 +458,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s, assert(QEMU_IS_ALIGNED(bytes, s->cluster_size)); while (bytes) { - BlockCopyTask task; + g_autofree BlockCopyTask *task = NULL; int64_t next_zero, cur_bytes, status_bytes; if (!bdrv_dirty_bitmap_get(s->copy_bitmap, offset)) { @@ -475,12 +479,12 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s, assert(next_zero < offset + cur_bytes); /* no need to do MIN() */ cur_bytes = next_zero - offset; } - block_copy_task_begin(s, &task, offset, cur_bytes); + task = block_copy_task_create(s, offset, cur_bytes); ret = block_copy_block_status(s, offset, cur_bytes, &status_bytes); - block_copy_task_shrink(s, &task, status_bytes); + block_copy_task_shrink(s, task, status_bytes); if (s->skip_unallocated && !(ret & BDRV_BLOCK_ALLOCATED)) { - block_copy_task_end(s, &task, 0); + block_copy_task_end(s, task, 0); s->progress_reset_callback(s->progress_opaque); trace_block_copy_skip_range(s, offset, status_bytes); offset += status_bytes; @@ -496,7 +500,7 @@ static int coroutine_fn block_copy_dirty_clusters(BlockCopyState *s, ret = block_copy_do_copy(s, offset, cur_bytes, ret & BDRV_BLOCK_ZERO, error_is_read); co_put_to_shres(s->mem, cur_bytes); - block_copy_task_end(s, &task, ret); + block_copy_task_end(s, task, ret); if (ret < 0) { return ret; } -- 2.21.0