Resizing the xbzrle cache during migration causes qemu-crash, because the main-thread and migration-thread modify the xbzrle cache size concurrently without lock-protection.
Signed-off-by: ChenLiang <chenlian...@huawei.com> Signed-off-by: Gonglei <arei.gong...@huawei.com> --- Changes against the previous version: *Remove function cache_max_num_items and cache_page_size *Protect migration_end and ram_save_setup by XBZRLE.lock arch_init.c | 61 ++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 files changed, 56 insertions(+), 5 deletions(-) diff --git a/arch_init.c b/arch_init.c index 80574a0..d295237 100644 --- a/arch_init.c +++ b/arch_init.c @@ -164,26 +164,69 @@ static struct { uint8_t *encoded_buf; /* buffer for storing page content */ uint8_t *current_buf; - /* Cache for XBZRLE */ + /* Cache for XBZRLE, Protected by lock. */ PageCache *cache; + QemuMutex lock; } XBZRLE = { .encoded_buf = NULL, .current_buf = NULL, .cache = NULL, + /* use the lock carefully */ + .lock = {PTHREAD_MUTEX_INITIALIZER}, }; /* buffer used for XBZRLE decoding */ static uint8_t *xbzrle_decoded_buf; +static void XBZRLE_cache_lock(void) +{ + qemu_mutex_lock(&XBZRLE.lock); +} + +static void XBZRLE_cache_unlock(void) +{ + qemu_mutex_unlock(&XBZRLE.lock); +} + int64_t xbzrle_cache_resize(int64_t new_size) { + PageCache *new_cache, *old_cache, *cache; + if (new_size < TARGET_PAGE_SIZE) { return -1; } - if (XBZRLE.cache != NULL) { - return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) * - TARGET_PAGE_SIZE; + XBZRLE_cache_lock(); + /* check XBZRLE.cache again later */ + cache = XBZRLE.cache; + XBZRLE_cache_unlock(); + + if (cache != NULL) { + if (pow2floor(new_size) == migrate_xbzrle_cache_size()) { + goto ret; + } + new_cache = cache_init(new_size / TARGET_PAGE_SIZE, + TARGET_PAGE_SIZE); + if (!new_cache) { + DPRINTF("Error creating cache\n"); + return -1; + } + + XBZRLE_cache_lock(); + /* the XBZRLE.cache may have be destroyed, check it again */ + if (XBZRLE.cache != NULL) { + old_cache = XBZRLE.cache; + XBZRLE.cache = new_cache; + new_cache = NULL; + } + XBZRLE_cache_unlock(); + + if (NULL == new_cache) { + cache_fini(old_cache); + } else { + cache_fini(new_cache); + } } +ret: return pow2floor(new_size); } @@ -522,6 +565,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage) ret = ram_control_save_page(f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_sent); + XBZRLE_cache_lock(); + if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_sent > 0) { @@ -553,6 +598,8 @@ static int ram_save_block(QEMUFile *f, bool last_stage) acct_info.norm_pages++; } + XBZRLE_cache_unlock(); + /* if page is unmodified, continue to the next */ if (bytes_sent > 0) { last_sent_block = block; @@ -620,6 +667,7 @@ static void migration_end(void) migration_bitmap = NULL; } + XBZRLE_cache_lock(); if (XBZRLE.cache) { cache_fini(XBZRLE.cache); g_free(XBZRLE.cache); @@ -629,6 +677,7 @@ static void migration_end(void) XBZRLE.encoded_buf = NULL; XBZRLE.current_buf = NULL; } + XBZRLE_cache_unlock(); } static void ram_migration_cancel(void *opaque) @@ -659,13 +708,16 @@ static int ram_save_setup(QEMUFile *f, void *opaque) dirty_rate_high_cnt = 0; if (migrate_use_xbzrle()) { + XBZRLE_cache_lock(); XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() / TARGET_PAGE_SIZE, TARGET_PAGE_SIZE); if (!XBZRLE.cache) { + XBZRLE_cache_unlock(); DPRINTF("Error creating cache\n"); return -1; } + XBZRLE_cache_unlock(); /* We prefer not to abort if there is no memory */ XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE); @@ -681,7 +733,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque) XBZRLE.encoded_buf = NULL; return -1; } - acct_clear(); } -- 1.6.0.2 Best regards, -Gonglei