backup_cow_with_offload and backup_cow_with_bounce_buffer contains a lot of duplicated logic. Move it into backup_do_cow.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsement...@virtuozzo.com> Reviewed-by: Max Reitz <mre...@redhat.com> --- block/backup.c | 84 +++++++++++++++++++------------------------------- 1 file changed, 31 insertions(+), 53 deletions(-) diff --git a/block/backup.c b/block/backup.c index 155e21d0a3..ae780e1260 100644 --- a/block/backup.c +++ b/block/backup.c @@ -100,85 +100,60 @@ static void cow_request_end(CowRequest *req) /* Copy range to target with a bounce buffer and return the bytes copied. If * error occurred, return a negative error number */ -static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job, - int64_t start, - int64_t end, - bool is_write_notifier, - bool *error_is_read) +static int coroutine_fn backup_cow_with_bounce_buffer( + BackupBlockJob *job, int64_t offset, int64_t bytes, + BdrvRequestFlags read_flags, bool *error_is_read) { - int ret; + int ret = 0; BlockBackend *blk = job->common.blk; - int nbytes; - int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; - void *bounce_buffer; - - assert(QEMU_IS_ALIGNED(start, job->cluster_size)); + void *bounce_buffer = blk_try_blockalign(blk, bytes); - nbytes = MIN(end - start, job->len - start); - bounce_buffer = blk_try_blockalign(blk, nbytes); if (!bounce_buffer) { return -ENOMEM; } - bdrv_reset_dirty_bitmap(job->copy_bitmap, start, end - start); - - ret = blk_co_pread(blk, start, nbytes, bounce_buffer, read_flags); + ret = blk_co_pread(blk, offset, bytes, bounce_buffer, read_flags); if (ret < 0) { - trace_backup_do_cow_read_fail(job, start, ret); + trace_backup_do_cow_read_fail(job, offset, ret); if (error_is_read) { *error_is_read = true; } - goto fail; + goto out; } - ret = blk_co_pwrite(job->target, start, nbytes, bounce_buffer, + ret = blk_co_pwrite(job->target, offset, bytes, bounce_buffer, job->write_flags); if (ret < 0) { - trace_backup_do_cow_write_fail(job, start, ret); + trace_backup_do_cow_write_fail(job, offset, ret); if (error_is_read) { *error_is_read = false; } - goto fail; + goto out; } +out: qemu_vfree(bounce_buffer); - return nbytes; -fail: - bdrv_set_dirty_bitmap(job->copy_bitmap, start, job->cluster_size); - qemu_vfree(bounce_buffer); return ret; - } /* Copy range to target and return the bytes copied. If error occurred, return a * negative error number. */ static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job, - int64_t start, - int64_t end, - bool is_write_notifier) + int64_t offset, + int64_t bytes, + BdrvRequestFlags read_flags) { int ret; - int nr_clusters; BlockBackend *blk = job->common.blk; - int nbytes = MIN(end - start, job->len - start); - int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; - - assert(end - start < INT_MAX); - assert(QEMU_IS_ALIGNED(start, job->cluster_size)); - nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size); - bdrv_reset_dirty_bitmap(job->copy_bitmap, start, - job->cluster_size * nr_clusters); - ret = blk_co_copy_range(blk, start, job->target, start, nbytes, + + ret = blk_co_copy_range(blk, offset, job->target, offset, bytes, read_flags, job->write_flags); if (ret < 0) { - trace_backup_do_cow_copy_range_fail(job, start, ret); - bdrv_set_dirty_bitmap(job->copy_bitmap, start, - job->cluster_size * nr_clusters); - return ret; + trace_backup_do_cow_copy_range_fail(job, offset, ret); } - return nbytes; + return ret; } /* @@ -261,6 +236,8 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, int ret = 0; int64_t start, end; /* bytes */ int64_t skip_bytes; + BdrvRequestFlags read_flags = + is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0; qemu_co_rwlock_rdlock(&job->flush_rwlock); @@ -274,6 +251,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, while (start < end) { int64_t dirty_end; + int64_t cur_bytes; if (!bdrv_dirty_bitmap_get(job->copy_bitmap, start)) { trace_backup_do_cow_skip(job, start); @@ -297,30 +275,30 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job, } trace_backup_do_cow_process(job, start); + cur_bytes = MIN(dirty_end - start, job->len - start); + bdrv_reset_dirty_bitmap(job->copy_bitmap, start, dirty_end - start); if (job->use_copy_range) { - ret = backup_cow_with_offload(job, start, dirty_end, - is_write_notifier); + ret = backup_cow_with_offload(job, start, cur_bytes, read_flags); if (ret < 0) { job->use_copy_range = false; } } if (!job->use_copy_range) { - ret = backup_cow_with_bounce_buffer(job, start, dirty_end, - is_write_notifier, - error_is_read); + ret = backup_cow_with_bounce_buffer(job, start, cur_bytes, + read_flags, error_is_read); } if (ret < 0) { + bdrv_set_dirty_bitmap(job->copy_bitmap, start, dirty_end - start); break; } /* Publish progress, guest I/O counts as progress too. Note that the * offset field is an opaque progress value, it is not a disk offset. */ - start += ret; - job->bytes_read += ret; - job_progress_update(&job->common.job, ret); - ret = 0; + start += cur_bytes; + job->bytes_read += cur_bytes; + job_progress_update(&job->common.job, cur_bytes); } cow_request_end(&cow_request); -- 2.18.0