This fixes the sub cluster sized writes race conditions while waiting for a faster solution.
Signed-off-by: Benoit Canet <ben...@irqsave.net> --- block/qcow2.c | 14 ++++++++++++++ block/qcow2.h | 1 + 2 files changed, 15 insertions(+) diff --git a/block/qcow2.c b/block/qcow2.c index 8eb63f1..11c115f 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -534,6 +534,7 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags) /* Initialise locks */ qemu_co_mutex_init(&s->lock); + qemu_co_mutex_init(&s->dedup_lock); /* Repair image if dirty */ if (!(flags & BDRV_O_CHECK) && !bs->read_only && @@ -841,6 +842,15 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, s->cluster_cache_offset = -1; /* disable compressed cache */ + if (s->has_dedup) { + /* This mutex is used to serialize the write requests in the dedup case. + * The goal is to avoid that the dedup process concurrents requests to + * the same clusters and corrupt data. + * With qcow2_dedup_read_missing_and_concatenate that would not work. + */ + qemu_co_mutex_lock(&s->dedup_lock); + } + qemu_co_mutex_lock(&s->lock); if (s->has_dedup) { @@ -1018,6 +1028,10 @@ fail: l2meta = next; } + if (s->has_dedup) { + qemu_co_mutex_unlock(&s->dedup_lock); + } + qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); qemu_vfree(dedup_cluster_data); diff --git a/block/qcow2.h b/block/qcow2.h index 6f85e03..3c6e685 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -364,6 +364,7 @@ typedef struct BDRVQcowState { Coroutine *load_filter_co; /* used to load incarnations filters */ CoMutex lock; + CoMutex dedup_lock; uint32_t crypt_method; /* current crypt method, 0 if no key yet */ uint32_t crypt_method_header; -- 1.7.10.4