This patch creates a migration bitmap, which is periodically kept in sync with
the qemu bitmap. A separate copy of the dirty bitmap for the migration avoids
concurrent access to the qemu bitmap from iothread and migration thread.

Signed-off-by: Umesh Deshpande <udesh...@redhat.com>
---
 arch_init.c |   26 +++++++++++++++++------
 cpu-all.h   |   37 ++++++++++++++++++++++++++++++++++
 exec.c      |   64 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 120 insertions(+), 7 deletions(-)

diff --git a/arch_init.c b/arch_init.c
index 484b39d..296b7d6 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -123,13 +123,13 @@ static int ram_save_block(QEMUFile *f)
     current_addr = block->offset + offset;
 
     do {
-        if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) 
{
+        if (migration_bitmap_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
             uint8_t *p;
             int cont = (block == last_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
 
-            cpu_physical_memory_reset_dirty(current_addr,
-                                            current_addr + TARGET_PAGE_SIZE,
-                                            MIGRATION_DIRTY_FLAG);
+            migration_bitmap_reset_dirty(current_addr,
+                                         current_addr + TARGET_PAGE_SIZE,
+                                         MIGRATION_DIRTY_FLAG);
 
             p = block->host + offset;
 
@@ -185,7 +185,7 @@ static ram_addr_t ram_save_remaining(void)
         ram_addr_t addr;
         for (addr = block->offset; addr < block->offset + block->length;
              addr += TARGET_PAGE_SIZE) {
-            if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+            if (migration_bitmap_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
                 count++;
             }
         }
@@ -265,6 +265,8 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         return 0;
     }
 
+    sync_migration_bitmap(0, TARGET_PHYS_ADDR_MAX);
+
     if (stage == 1) {
         RAMBlock *block;
         bytes_transferred = 0;
@@ -276,9 +278,9 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         QLIST_FOREACH(block, &ram_list.blocks, next) {
             for (addr = block->offset; addr < block->offset + block->length;
                  addr += TARGET_PAGE_SIZE) {
-                if (!cpu_physical_memory_get_dirty(addr,
+                if (!migration_bitmap_get_dirty(addr,
                                                    MIGRATION_DIRTY_FLAG)) {
-                    cpu_physical_memory_set_dirty(addr);
+                    migration_bitmap_set_dirty(addr);
                 }
             }
         }
@@ -298,6 +300,11 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
     bytes_transferred_last = bytes_transferred;
     bwidth = qemu_get_clock_ns(rt_clock);
 
+    if (stage != 3) {
+        qemu_mutex_lock_ramlist();
+        qemu_mutex_unlock_iothread();
+    }
+
     while (!qemu_file_rate_limit(f)) {
         int bytes_sent;
 
@@ -308,6 +315,11 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, 
void *opaque)
         }
     }
 
+    if (stage != 3) {
+        qemu_mutex_lock_iothread();
+        qemu_mutex_unlock_ramlist();
+    }
+
     bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
     bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
 
diff --git a/cpu-all.h b/cpu-all.h
index eab9803..e709277 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -935,6 +935,7 @@ typedef struct RAMBlock {
 typedef struct RAMList {
     QemuMutex mutex;
     uint8_t *phys_dirty;
+    uint8_t *migration_bitmap;
     QLIST_HEAD(ram, RAMBlock) blocks;
     QLIST_HEAD(, RAMBlock) blocks_mru;
 } RAMList;
@@ -1008,8 +1009,44 @@ static inline void 
cpu_physical_memory_mask_dirty_range(ram_addr_t start,
     }
 }
 
+
+
 void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
                                      int dirty_flags);
+
+static inline int migration_bitmap_get_dirty(ram_addr_t addr,
+                                             int dirty_flags)
+{
+    return ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] & dirty_flags;
+}
+
+static inline void migration_bitmap_set_dirty(ram_addr_t addr)
+{
+    ram_list.migration_bitmap[addr >> TARGET_PAGE_BITS] = 0xff;
+}
+
+static inline void migration_bitmap_mask_dirty_range(ram_addr_t start,
+                                                     int length,
+                                                     int dirty_flags)
+{
+    int i, mask, len;
+    uint8_t *p;
+
+    len = length >> TARGET_PAGE_BITS;
+    mask = ~dirty_flags;
+    p = ram_list.migration_bitmap + (start >> TARGET_PAGE_BITS);
+    for (i = 0; i < len; i++) {
+        p[i] &= mask;
+    }
+}
+
+
+void migration_bitmap_reset_dirty(ram_addr_t start,
+                                  ram_addr_t end,
+                                  int dirty_flags);
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end);
+
 void cpu_tlb_update_dirty(CPUState *env);
 
 int cpu_physical_memory_set_dirty_tracking(int enable);
diff --git a/exec.c b/exec.c
index 404d8ea..36a44c7 100644
--- a/exec.c
+++ b/exec.c
@@ -2111,6 +2111,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, 
ram_addr_t end,
         abort();
     }
 
+    if (kvm_enabled()) {
+        return;
+    }
+
     for(env = first_cpu; env != NULL; env = env->next_cpu) {
         int mmu_idx;
         for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
@@ -2119,8 +2123,61 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, 
ram_addr_t end,
                                       start1, length);
         }
     }
+
+}
+
+void migration_bitmap_reset_dirty(ram_addr_t start, ram_addr_t end,
+                                  int dirty_flags)
+{
+    unsigned long length, start1;
+
+    start &= TARGET_PAGE_MASK;
+    end = TARGET_PAGE_ALIGN(end);
+
+    length = end - start;
+    if (length == 0) {
+        return;
+    }
+
+    migration_bitmap_mask_dirty_range(start, length, dirty_flags);
+
+    /* we modify the TLB cache so that the dirty bit will be set again
+       when accessing the range */
+    start1 = (unsigned long)qemu_safe_ram_ptr(start);
+    /* Check that we don't span multiple blocks - this breaks the
+       address comparisons below.  */
+    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
+            != (end - 1) - start) {
+        abort();
+    }
+}
+
+void sync_migration_bitmap(ram_addr_t start, ram_addr_t end)
+{
+    unsigned long length, len, i;
+    ram_addr_t addr;
+    start &= TARGET_PAGE_MASK;
+    end = TARGET_PAGE_ALIGN(end);
+
+    length = end - start;
+    if (length == 0) {
+        return;
+    }
+
+    len = length >> TARGET_PAGE_BITS;
+    for (i = 0; i < len; i++) {
+        addr = i << TARGET_PAGE_BITS;
+        if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) {
+            migration_bitmap_set_dirty(addr);
+            cpu_physical_memory_reset_dirty(addr, addr + TARGET_PAGE_SIZE,
+                                            MIGRATION_DIRTY_FLAG);
+        }
+    }
+
 }
 
+
+
 int cpu_physical_memory_set_dirty_tracking(int enable)
 {
     int ret = 0;
@@ -2997,6 +3054,13 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, 
const char *name,
     memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
            0xff, size >> TARGET_PAGE_BITS);
 
+    ram_list.migration_bitmap = qemu_realloc(ram_list.phys_dirty,
+                                             last_ram_offset() >>
+                                             TARGET_PAGE_BITS);
+
+    memset(ram_list.migration_bitmap + (new_block->offset >> TARGET_PAGE_BITS),
+           0xff, size >> TARGET_PAGE_BITS);
+
     qemu_mutex_unlock_ramlist();
 
     if (kvm_enabled())
-- 
1.7.4.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to