ramlist mutex is implemented to protect the RAMBlock list traversal in the
migration thread from their addition/removal from the iothread.

Note: Combination of iothread mutex and migration thread mutex works as a
rw-lock. Both mutexes are acquired while modifying the ram_list members or RAM
block list.

Signed-off-by: Umesh Deshpande <udesh...@redhat.com>
---
 arch_init.c   |   21 +++++++++++++++++++++
 cpu-all.h     |    3 +++
 exec.c        |   23 +++++++++++++++++++++++
 qemu-common.h |    2 ++
 4 files changed, 49 insertions(+), 0 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index 484b39d..9d02270 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -109,6 +109,7 @@ static int is_dup_page(uint8_t *page, uint8_t ch)
 
 static RAMBlock *last_block;
 static ram_addr_t last_offset;
+static uint64_t last_version;
 
 static int ram_save_block(QEMUFile *f)
 {
@@ -170,6 +171,7 @@ static int ram_save_block(QEMUFile *f)
 
     last_block = block;
     last_offset = offset;
+    last_version = ram_list.version;
 
     return bytes_sent;
 }
@@ -270,6 +272,7 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         bytes_transferred = 0;
         last_block = NULL;
         last_offset = 0;
+        last_version = ram_list.version = 0;
         sort_ram_list();
 
         /* Make sure all dirty bits are set */
@@ -298,6 +301,17 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
     bytes_transferred_last = bytes_transferred;
     bwidth = qemu_get_clock_ns(rt_clock);
 
+    if (stage != 3) {
+        qemu_mutex_lock_migthread();
+        qemu_mutex_unlock_iothread();
+    }
+
+    if (ram_list.version != last_version) {
+        /* RAM block added or removed */
+        last_block = NULL;
+        last_offset = 0;
+    }
+
     while (!qemu_file_rate_limit(f)) {
         int bytes_sent;
 
@@ -308,6 +322,13 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         }
     }
 
+    if (stage != 3) {
+        qemu_mutex_unlock_migthread();
+        qemu_mutex_lock_iothread();
+        /* Lock ordering : iothread mutex is always acquired outside migthread
+         * mutex critical section to avoid deadlock */
+    }
+
     bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
     bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
 
diff --git a/cpu-all.h b/cpu-all.h
index 6b217a2..b85483f 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -21,6 +21,7 @@
 
 #include "qemu-common.h"
 #include "cpu-common.h"
+#include "qemu-thread.h"
 
 /* some important defines:
  *
@@ -932,7 +933,9 @@ typedef struct RAMBlock {
 } RAMBlock;
 
 typedef struct RAMList {
+    QemuMutex mutex;    /* Protects RAM block list */
     uint8_t *phys_dirty;
+    uint32_t version;   /* To detect ram block addition/removal */
     QLIST_HEAD(ram, RAMBlock) blocks;
     QLIST_HEAD(, RAMBlock) blocks_mru;
 } RAMList;
diff --git a/exec.c b/exec.c
index c5c247c..7627483 100644
--- a/exec.c
+++ b/exec.c
@@ -582,6 +582,7 @@ void cpu_exec_init_all(unsigned long tb_size)
     code_gen_alloc(tb_size);
     code_gen_ptr = code_gen_buffer;
     page_init();
+    qemu_mutex_init(&ram_list.mutex);
 #if !defined(CONFIG_USER_ONLY)
     io_mem_init();
 #endif
@@ -2802,6 +2803,16 @@ static long gethugepagesize(const char *path)
     return fs.f_bsize;
 }
 
+void qemu_mutex_lock_migthread(void)
+{
+    qemu_mutex_lock(&ram_list.mutex);
+}
+
+void qemu_mutex_unlock_migthread(void)
+{
+    qemu_mutex_unlock(&ram_list.mutex);
+}
+
 static void *file_ram_alloc(RAMBlock *block,
                             ram_addr_t memory,
                             const char *path)
@@ -2976,14 +2987,20 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, 
const char *name,
     }
     new_block->length = size;
 
+    qemu_mutex_lock_migthread();
+
     QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
     QLIST_INSERT_HEAD(&ram_list.blocks_mru, new_block, next_mru);
 
+    ram_list.version++;
+
     ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
                                        last_ram_offset() >> TARGET_PAGE_BITS);
     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
            0xff, size >> TARGET_PAGE_BITS);
 
+    qemu_mutex_unlock_migthread();
+
     if (kvm_enabled())
         kvm_setup_guest_memory(new_block->host, size);
 
@@ -3001,8 +3018,11 @@ void qemu_ram_free_from_ptr(ram_addr_t addr)
 
     QLIST_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
+            qemu_mutex_lock_migthread();
             QLIST_REMOVE(block, next);
             QLIST_REMOVE(block, next_mru);
+            ram_list.version++;
+            qemu_mutex_unlock_migthread();
             qemu_free(block);
             return;
         }
@@ -3015,8 +3035,11 @@ void qemu_ram_free(ram_addr_t addr)
 
     QLIST_FOREACH(block, &ram_list.blocks, next) {
         if (addr == block->offset) {
+            qemu_mutex_lock_migthread();
             QLIST_REMOVE(block, next);
             QLIST_REMOVE(block, next_mru);
+            ram_list.version++;
+            qemu_mutex_unlock_migthread();
             if (block->flags & RAM_PREALLOC_MASK) {
                 ;
             } else if (mem_path) {
diff --git a/qemu-common.h b/qemu-common.h
index abd7a75..7dabfe9 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -212,6 +212,8 @@ char *qemu_strndup(const char *str, size_t size);
 
 void qemu_mutex_lock_iothread(void);
 void qemu_mutex_unlock_iothread(void);
+void qemu_mutex_lock_migthread(void);
+void qemu_mutex_unlock_migthread(void);
 
 int qemu_open(const char *name, int flags, ...);
 ssize_t qemu_write_full(int fd, const void *buf, size_t count)
-- 
1.7.4.1


Reply via email to