This patch allows us to distinguish between two
length values for each block:
    max_length - length of memory block that was allocated
    used_length - length of block used by QEMU/guest

Currently, we set used_length - max_length, unconditionally.
Follow-up patches allow used_length <= max_length.

Signed-off-by: Michael S. Tsirkin <m...@redhat.com>
Reviewed-by: Paolo Bonzini <pbonz...@redhat.com>
---
 include/exec/cpu-all.h |  3 ++-
 arch_init.c            | 19 +++++++++---------
 exec.c                 | 52 +++++++++++++++++++++++++++-----------------------
 3 files changed, 40 insertions(+), 34 deletions(-)

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 62f5581..6f2130e 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -303,7 +303,8 @@ typedef struct RAMBlock {
     struct MemoryRegion *mr;
     uint8_t *host;
     ram_addr_t offset;
-    ram_addr_t length;
+    ram_addr_t used_length;
+    ram_addr_t max_length;
     uint32_t flags;
     char idstr[256];
     /* Reads can take either the iothread or the ramlist lock.
diff --git a/arch_init.c b/arch_init.c
index 7680d28..106f46e 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -522,7 +522,7 @@ static void migration_bitmap_sync(void)
     address_space_sync_dirty_bitmap(&address_space_memory);
 
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-        migration_bitmap_sync_range(block->mr->ram_addr, block->length);
+        migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
     }
     trace_migration_bitmap_sync_end(migration_dirty_pages
                                     - num_dirty_pages_init);
@@ -668,7 +668,7 @@ static int ram_find_and_save_block(QEMUFile *f, bool 
last_stage)
             offset >= last_offset) {
             break;
         }
-        if (offset >= block->length) {
+        if (offset >= block->used_length) {
             offset = 0;
             block = QTAILQ_NEXT(block, next);
             if (!block) {
@@ -727,7 +727,7 @@ uint64_t ram_bytes_total(void)
     uint64_t total = 0;
 
     QTAILQ_FOREACH(block, &ram_list.blocks, next)
-        total += block->length;
+        total += block->used_length;
 
     return total;
 }
@@ -831,7 +831,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         uint64_t block_pages;
 
-        block_pages = block->length >> TARGET_PAGE_BITS;
+        block_pages = block->used_length >> TARGET_PAGE_BITS;
         migration_dirty_pages += block_pages;
     }
 
@@ -844,7 +844,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         qemu_put_byte(f, strlen(block->idstr));
         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
-        qemu_put_be64(f, block->length);
+        qemu_put_be64(f, block->used_length);
     }
 
     qemu_mutex_unlock_ramlist();
@@ -1015,7 +1015,7 @@ static inline void *host_from_stream_offset(QEMUFile *f,
     uint8_t len;
 
     if (flags & RAM_SAVE_FLAG_CONTINUE) {
-        if (!block || block->length <= offset) {
+        if (!block || block->max_length <= offset) {
             error_report("Ack, bad migration stream!");
             return NULL;
         }
@@ -1028,7 +1028,8 @@ static inline void *host_from_stream_offset(QEMUFile *f,
     id[len] = 0;
 
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-        if (!strncmp(id, block->idstr, sizeof(id)) && block->length > offset) {
+        if (!strncmp(id, block->idstr, sizeof(id)) &&
+            block->max_length > offset) {
             return memory_region_get_ram_ptr(block->mr) + offset;
         }
     }
@@ -1085,10 +1086,10 @@ static int ram_load(QEMUFile *f, void *opaque, int 
version_id)
 
                 QTAILQ_FOREACH(block, &ram_list.blocks, next) {
                     if (!strncmp(id, block->idstr, sizeof(id))) {
-                        if (block->length != length) {
+                        if (block->used_length != length) {
                             error_report("Length mismatch: %s: 0x" RAM_ADDR_FMT
                                          " in != 0x" RAM_ADDR_FMT, id, length,
-                                         block->length);
+                                         block->used_length);
                             ret =  -EINVAL;
                         }
                         break;
diff --git a/exec.c b/exec.c
index a89aa6c..b69216a 100644
--- a/exec.c
+++ b/exec.c
@@ -812,11 +812,11 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
 
     /* The list is protected by the iothread lock here.  */
     block = ram_list.mru_block;
-    if (block && addr - block->offset < block->length) {
+    if (block && addr - block->offset < block->max_length) {
         goto found;
     }
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-        if (addr - block->offset < block->length) {
+        if (addr - block->offset < block->max_length) {
             goto found;
         }
     }
@@ -1305,13 +1305,14 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
Error **errp)
 
     /* This assumes the iothread lock is taken here too.  */
     qemu_mutex_lock_ramlist();
-    new_block->offset = find_ram_offset(new_block->length);
+    new_block->offset = find_ram_offset(new_block->max_length);
 
     if (!new_block->host) {
         if (xen_enabled()) {
-            xen_ram_alloc(new_block->offset, new_block->length, new_block->mr);
+            xen_ram_alloc(new_block->offset, new_block->max_length,
+                          new_block->mr);
         } else {
-            new_block->host = phys_mem_alloc(new_block->length,
+            new_block->host = phys_mem_alloc(new_block->max_length,
                                              &new_block->mr->align);
             if (!new_block->host) {
                 error_setg_errno(errp, errno,
@@ -1320,13 +1321,13 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
Error **errp)
                 qemu_mutex_unlock_ramlist();
                 return -1;
             }
-            memory_try_enable_merging(new_block->host, new_block->length);
+            memory_try_enable_merging(new_block->host, new_block->max_length);
         }
     }
 
     /* Keep the list sorted from biggest to smallest block.  */
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-        if (block->length < new_block->length) {
+        if (block->max_length < new_block->max_length) {
             break;
         }
     }
@@ -1350,14 +1351,15 @@ static ram_addr_t ram_block_add(RAMBlock *new_block, 
Error **errp)
                                    old_ram_size, new_ram_size);
        }
     }
-    cpu_physical_memory_set_dirty_range(new_block->offset, new_block->length);
+    cpu_physical_memory_set_dirty_range(new_block->offset,
+                                        new_block->used_length);
 
-    qemu_ram_setup_dump(new_block->host, new_block->length);
-    qemu_madvise(new_block->host, new_block->length, QEMU_MADV_HUGEPAGE);
-    qemu_madvise(new_block->host, new_block->length, QEMU_MADV_DONTFORK);
+    qemu_ram_setup_dump(new_block->host, new_block->max_length);
+    qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
+    qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
 
     if (kvm_enabled()) {
-        kvm_setup_guest_memory(new_block->host, new_block->length);
+        kvm_setup_guest_memory(new_block->host, new_block->max_length);
     }
 
     return new_block->offset;
@@ -1391,7 +1393,8 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, 
MemoryRegion *mr,
     size = TARGET_PAGE_ALIGN(size);
     new_block = g_malloc0(sizeof(*new_block));
     new_block->mr = mr;
-    new_block->length = size;
+    new_block->used_length = size;
+    new_block->max_length = size;
     new_block->flags = share ? RAM_SHARED : 0;
     new_block->host = file_ram_alloc(new_block, size,
                                      mem_path, errp);
@@ -1420,7 +1423,8 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void 
*host,
     size = TARGET_PAGE_ALIGN(size);
     new_block = g_malloc0(sizeof(*new_block));
     new_block->mr = mr;
-    new_block->length = size;
+    new_block->used_length = size;
+    new_block->max_length = max_size;
     new_block->fd = -1;
     new_block->host = host;
     if (host) {
@@ -1475,11 +1479,11 @@ void qemu_ram_free(ram_addr_t addr)
                 xen_invalidate_map_cache_entry(block->host);
 #ifndef _WIN32
             } else if (block->fd >= 0) {
-                munmap(block->host, block->length);
+                munmap(block->host, block->max_length);
                 close(block->fd);
 #endif
             } else {
-                qemu_anon_ram_free(block->host, block->length);
+                qemu_anon_ram_free(block->host, block->max_length);
             }
             g_free(block);
             break;
@@ -1499,7 +1503,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
 
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
         offset = addr - block->offset;
-        if (offset < block->length) {
+        if (offset < block->max_length) {
             vaddr = ramblock_ptr(block, offset);
             if (block->flags & RAM_PREALLOC) {
                 ;
@@ -1575,7 +1579,7 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
             return xen_map_cache(addr, 0, 0);
         } else if (block->host == NULL) {
             block->host =
-                xen_map_cache(block->offset, block->length, 1);
+                xen_map_cache(block->offset, block->max_length, 1);
         }
     }
     return ramblock_ptr(block, addr - block->offset);
@@ -1594,9 +1598,9 @@ static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr 
*size)
         RAMBlock *block;
 
         QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-            if (addr - block->offset < block->length) {
-                if (addr - block->offset + *size > block->length)
-                    *size = block->length - addr + block->offset;
+            if (addr - block->offset < block->max_length) {
+                if (addr - block->offset + *size > block->max_length)
+                    *size = block->max_length - addr + block->offset;
                 return ramblock_ptr(block, addr - block->offset);
             }
         }
@@ -1619,7 +1623,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, 
ram_addr_t *ram_addr)
     }
 
     block = ram_list.mru_block;
-    if (block && block->host && host - block->host < block->length) {
+    if (block && block->host && host - block->host < block->max_length) {
         goto found;
     }
 
@@ -1628,7 +1632,7 @@ MemoryRegion *qemu_ram_addr_from_host(void *ptr, 
ram_addr_t *ram_addr)
         if (block->host == NULL) {
             continue;
         }
-        if (host - block->host < block->length) {
+        if (host - block->host < block->max_length) {
             goto found;
         }
     }
@@ -2873,7 +2877,7 @@ void qemu_ram_foreach_block(RAMBlockIterFunc func, void 
*opaque)
     RAMBlock *block;
 
     QTAILQ_FOREACH(block, &ram_list.blocks, next) {
-        func(block->host, block->offset, block->length, opaque);
+        func(block->host, block->offset, block->used_length, opaque);
     }
 }
 #endif
-- 
MST


Reply via email to