This fix the sub cluster sized writes race conditions while waiting for a more faster solution.
Signed-off-by: Benoit Canet <ben...@irqsave.net> --- block/qcow2.c | 14 +++++++++++++- block/qcow2.h | 1 + 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/block/qcow2.c b/block/qcow2.c index 6b8f85f..4f8cf68 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -523,6 +523,7 @@ static int qcow2_open(BlockDriverState *bs, int flags) /* Initialise locks */ qemu_co_mutex_init(&s->lock); + qemu_co_mutex_init(&s->dedup_lock); /* Repair image if dirty */ if (!(flags & BDRV_O_CHECK) && !bs->read_only && @@ -814,8 +815,15 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs, s->cluster_cache_offset = -1; /* disable compressed cache */ qemu_co_mutex_lock(&s->lock); - atomic_dedup_is_running = qcow2_dedup_is_running(bs); + qemu_co_mutex_unlock(&s->lock); + + if (atomic_dedup_is_running) { + qemu_co_mutex_lock(&s->dedup_lock); + } + + qemu_co_mutex_lock(&s->lock); + if (atomic_dedup_is_running) { QTAILQ_INIT(&ds.undedupables); ds.phash.reuse = false; @@ -982,6 +990,10 @@ fail: g_free(l2meta); } + if (atomic_dedup_is_running) { + qemu_co_mutex_unlock(&s->dedup_lock); + } + qemu_iovec_destroy(&hd_qiov); qemu_vfree(cluster_data); qemu_vfree(dedup_cluster_data); diff --git a/block/qcow2.h b/block/qcow2.h index dc9f519..9f5d0f0 100644 --- a/block/qcow2.h +++ b/block/qcow2.h @@ -239,6 +239,7 @@ typedef struct BDRVQcowState { GTree *dedup_tree_by_sect; CoMutex lock; + CoMutex dedup_lock; uint32_t crypt_method; /* current crypt method, 0 if no key yet */ uint32_t crypt_method_header; -- 1.7.10.4