Il 15/05/2013 16:34, Stefan Hajnoczi ha scritto: > + wait_for_overlapping_requests(job, start, end); > + cow_request_begin(&cow_request, job, start, end); > + > + for (; start < end; start++) { > + if (hbitmap_get(job->bitmap, start)) { > + DPRINTF("brdv_co_backup_cow skip C%" PRId64 "\n", start); > + continue; /* already copied */ > + } > + > + /* immediately set bitmap (avoid coroutine race) */ > + hbitmap_set(job->bitmap, start, 1); > +
HBitmap already has code for finding the next set bit, but you're not using it. If you reverse the direction of the bitmap, you can use an HBitmapIter here: > > + start = 0; > + end = DIV_ROUND_UP(bdrv_getlength(bs) / BDRV_SECTOR_SIZE, > + BACKUP_BLOCKS_PER_CLUSTER); > + > + job->bitmap = hbitmap_alloc(end, 0); > + > + before_write = bdrv_add_before_write_cb(bs, backup_before_write); > + > + DPRINTF("backup_run start %s %" PRId64 " %" PRId64 "\n", > + bdrv_get_device_name(bs), start, end); > + > + for (; start < end; start++) { ... instead of iterating through each item manually. Paolo > + if (block_job_is_cancelled(&job->common)) { > + break; > + } > + > + /* we need to yield so that qemu_aio_flush() returns. > + * (without, VM does not reboot) > + */ > + if (job->common.speed) { > + uint64_t delay_ns = ratelimit_calculate_delay( > + &job->limit, job->sectors_read); > + job->sectors_read = 0; > + block_job_sleep_ns(&job->common, rt_clock, delay_ns); > + } else { > + block_job_sleep_ns(&job->common, rt_clock, 0); > + } > + > + if (block_job_is_cancelled(&job->common)) { > + break; > + } > + > + DPRINTF("backup_run loop C%" PRId64 "\n", start); > + > + ret = backup_do_cow(bs, start * BACKUP_BLOCKS_PER_CLUSTER, 1); > + if (ret < 0) { > + break; > + } > + > + /* Publish progress */ > + job->common.offset += BACKUP_CLUSTER_SIZE; > + }