Re: [Qemu-block] [PATCH v5 02/11] block/backup: move to copy_bitmap with granularity

2019-01-14 Thread Max Reitz
On 29.12.18 13:20, Vladimir Sementsov-Ogievskiy wrote:
> We are going to share this bitmap between backup and backup-top filter
> driver, so let's share something more meaningful. It also simplifies
> some calculations.
> 
> Signed-off-by: Vladimir Sementsov-Ogievskiy 
> ---
>  block/backup.c | 48 +++-
>  1 file changed, 23 insertions(+), 25 deletions(-)

Reviewed-by: Max Reitz 



signature.asc
Description: OpenPGP digital signature


[Qemu-block] [PATCH v5 02/11] block/backup: move to copy_bitmap with granularity

2018-12-29 Thread Vladimir Sementsov-Ogievskiy
We are going to share this bitmap between backup and backup-top filter
driver, so let's share something more meaningful. It also simplifies
some calculations.

Signed-off-by: Vladimir Sementsov-Ogievskiy 
---
 block/backup.c | 48 +++-
 1 file changed, 23 insertions(+), 25 deletions(-)

diff --git a/block/backup.c b/block/backup.c
index fbe7ce19e1..0b3fddeb6c 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -114,7 +114,8 @@ static int coroutine_fn 
backup_cow_with_bounce_buffer(BackupBlockJob *job,
 int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
 
-hbitmap_reset(job->copy_bitmap, start / job->cluster_size, 1);
+assert(QEMU_IS_ALIGNED(start, job->cluster_size));
+hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
 nbytes = MIN(job->cluster_size, job->len - start);
 if (!*bounce_buffer) {
 *bounce_buffer = blk_blockalign(blk, job->cluster_size);
@@ -150,7 +151,7 @@ static int coroutine_fn 
backup_cow_with_bounce_buffer(BackupBlockJob *job,
 
 return nbytes;
 fail:
-hbitmap_set(job->copy_bitmap, start / job->cluster_size, 1);
+hbitmap_set(job->copy_bitmap, start, job->cluster_size);
 return ret;
 
 }
@@ -170,16 +171,15 @@ static int coroutine_fn 
backup_cow_with_offload(BackupBlockJob *job,
 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
 
 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
+assert(QEMU_IS_ALIGNED(start, job->cluster_size));
 nbytes = MIN(job->copy_range_size, end - start);
 nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
-hbitmap_reset(job->copy_bitmap, start / job->cluster_size,
-  nr_clusters);
+hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
 ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
 read_flags, write_flags);
 if (ret < 0) {
 trace_backup_do_cow_copy_range_fail(job, start, ret);
-hbitmap_set(job->copy_bitmap, start / job->cluster_size,
-nr_clusters);
+hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
 return ret;
 }
 
@@ -207,7 +207,7 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
 cow_request_begin(&cow_request, job, start, end);
 
 while (start < end) {
-if (!hbitmap_get(job->copy_bitmap, start / job->cluster_size)) {
+if (!hbitmap_get(job->copy_bitmap, start)) {
 trace_backup_do_cow_skip(job, start);
 start += job->cluster_size;
 continue; /* already copied */
@@ -303,6 +303,11 @@ static void backup_clean(Job *job)
 assert(s->target);
 blk_unref(s->target);
 s->target = NULL;
+
+if (s->copy_bitmap) {
+hbitmap_free(s->copy_bitmap);
+s->copy_bitmap = NULL;
+}
 }
 
 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
@@ -315,7 +320,6 @@ static void backup_attached_aio_context(BlockJob *job, 
AioContext *aio_context)
 void backup_do_checkpoint(BlockJob *job, Error **errp)
 {
 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
-int64_t len;
 
 assert(block_job_driver(job) == &backup_job_driver);
 
@@ -325,8 +329,7 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
 return;
 }
 
-len = DIV_ROUND_UP(backup_job->len, backup_job->cluster_size);
-hbitmap_set(backup_job->copy_bitmap, 0, len);
+hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
 }
 
 static void backup_drain(BlockJob *job)
@@ -381,16 +384,16 @@ static int coroutine_fn 
backup_run_incremental(BackupBlockJob *job)
 {
 int ret;
 bool error_is_read;
-int64_t cluster;
+int64_t offset;
 HBitmapIter hbi;
 
 hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
-while ((cluster = hbitmap_iter_next(&hbi)) != -1) {
+while ((offset = hbitmap_iter_next(&hbi)) != -1) {
 do {
 if (yield_and_check(job)) {
 return 0;
 }
-ret = backup_do_cow(job, cluster * job->cluster_size,
+ret = backup_do_cow(job, offset,
 job->cluster_size, &error_is_read, false);
 if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
BLOCK_ERROR_ACTION_REPORT)
@@ -412,12 +415,9 @@ static void 
backup_incremental_init_copy_bitmap(BackupBlockJob *job)
 while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
  &offset, &bytes))
 {
-uint64_t cluster = offset / job->cluster_size;
-uint64_t last_cluster = (offset + bytes) / job->cluster_size;
+hbitmap_set(job->copy_bitmap, offset, bytes);
 
-hbitmap_set(job->copy_bitmap, cluster, last_cluster - cluster + 1);
-
-