Re: [PATCH v3 11/15] util/mmap-alloc: Implement resizeable mmaps

2020-02-28 Thread Peter Xu
On Thu, Feb 27, 2020 at 11:12:01AM +0100, David Hildenbrand wrote:
> Implement resizeable mmaps. For now, the actual resizing is not wired up.
> Introduce qemu_ram_mmap_resizeable() and qemu_ram_mmap_resize(). Make
> qemu_ram_mmap() a wrapper of qemu_ram_mmap_resizeable().
> 
> Cc: Richard Henderson 
> Cc: Igor Kotrasinski 
> Cc: Murilo Opsfelder Araujo 
> Cc: "Michael S. Tsirkin" 
> Cc: Greg Kurz 
> Cc: Eduardo Habkost 
> Cc: "Dr. David Alan Gilbert" 
> Cc: Igor Mammedov 
> Signed-off-by: David Hildenbrand 

Reviewed-by: Peter Xu 

-- 
Peter Xu




[PATCH v3 11/15] util/mmap-alloc: Implement resizeable mmaps

2020-02-27 Thread David Hildenbrand
Implement resizeable mmaps. For now, the actual resizing is not wired up.
Introduce qemu_ram_mmap_resizeable() and qemu_ram_mmap_resize(). Make
qemu_ram_mmap() a wrapper of qemu_ram_mmap_resizeable().

Cc: Richard Henderson 
Cc: Igor Kotrasinski 
Cc: Murilo Opsfelder Araujo 
Cc: "Michael S. Tsirkin" 
Cc: Greg Kurz 
Cc: Eduardo Habkost 
Cc: "Dr. David Alan Gilbert" 
Cc: Igor Mammedov 
Signed-off-by: David Hildenbrand 
---
 include/qemu/mmap-alloc.h | 21 +++
 util/mmap-alloc.c | 43 ---
 2 files changed, 44 insertions(+), 20 deletions(-)

diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h
index e786266b92..ca8f7edf70 100644
--- a/include/qemu/mmap-alloc.h
+++ b/include/qemu/mmap-alloc.h
@@ -7,11 +7,13 @@ size_t qemu_fd_getpagesize(int fd);
 size_t qemu_mempath_getpagesize(const char *mem_path);
 
 /**
- * qemu_ram_mmap: mmap the specified file or device.
+ * qemu_ram_mmap_resizeable: reserve a memory region of @max_size to mmap the
+ *   specified file or device and mmap @size of it.
  *
  * Parameters:
  *  @fd: the file or the device to mmap
  *  @size: the number of bytes to be mmaped
+ *  @max_size: the number of bytes to be reserved
  *  @align: if not zero, specify the alignment of the starting mapping address;
  *  otherwise, the alignment in use will be determined by QEMU.
  *  @shared: map has RAM_SHARED flag.
@@ -21,12 +23,15 @@ size_t qemu_mempath_getpagesize(const char *mem_path);
  *  On success, return a pointer to the mapped area.
  *  On failure, return MAP_FAILED.
  */
-void *qemu_ram_mmap(int fd,
-size_t size,
-size_t align,
-bool shared,
-bool is_pmem);
-
-void qemu_ram_munmap(int fd, void *ptr, size_t size);
+void *qemu_ram_mmap_resizeable(int fd, size_t size, size_t max_size,
+  size_t align, bool shared, bool is_pmem);
+bool qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t new_size,
+  bool shared, bool is_pmem);
+static inline void *qemu_ram_mmap(int fd, size_t size, size_t align,
+  bool shared, bool is_pmem)
+{
+return qemu_ram_mmap_resizeable(fd, size, size, align, shared, is_pmem);
+}
+void qemu_ram_munmap(int fd, void *ptr, size_t max_size);
 
 #endif
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index c6a6075215..8f9c533f32 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -173,11 +173,8 @@ static inline size_t mmap_guard_pagesize(int fd)
 #endif
 }
 
-void *qemu_ram_mmap(int fd,
-size_t size,
-size_t align,
-bool shared,
-bool is_pmem)
+void *qemu_ram_mmap_resizeable(int fd, size_t size, size_t max_size,
+   size_t align, bool shared, bool is_pmem)
 {
 const size_t guard_pagesize = mmap_guard_pagesize(fd);
 const size_t pagesize = qemu_fd_getpagesize(fd);
@@ -185,12 +182,14 @@ void *qemu_ram_mmap(int fd,
 void *ptr, *guardptr;
 
 g_assert(QEMU_IS_ALIGNED(size, pagesize));
+g_assert(QEMU_IS_ALIGNED(max_size, pagesize));
 
 /*
  * Note: this always allocates at least one extra page of virtual address
- * space, even if size is already aligned.
+ * space, even if the size is already aligned. We will reserve an area of
+ * at least max_size, but only populate the requested part of it.
  */
-total = size + align;
+total = max_size + align;
 
 guardptr = mmap_reserve(NULL, total, fd);
 if (guardptr == MAP_FAILED) {
@@ -218,21 +217,41 @@ void *qemu_ram_mmap(int fd,
  * a guard page guarding against potential buffer overflows.
  */
 total -= offset;
-if (total > size + guard_pagesize) {
-munmap(ptr + size + guard_pagesize, total - size - guard_pagesize);
+if (total > max_size + guard_pagesize) {
+munmap(ptr + max_size + guard_pagesize,
+   total - max_size - guard_pagesize);
 }
 
 return ptr;
 }
 
-void qemu_ram_munmap(int fd, void *ptr, size_t size)
+bool qemu_ram_mmap_resize(void *ptr, int fd, size_t old_size, size_t new_size,
+  bool shared, bool is_pmem)
 {
 const size_t pagesize = qemu_fd_getpagesize(fd);
 
-g_assert(QEMU_IS_ALIGNED(size, pagesize));
+g_assert(QEMU_IS_ALIGNED(old_size, pagesize));
+g_assert(QEMU_IS_ALIGNED(new_size, pagesize));
+
+if (old_size < new_size) {
+/* populate the missing piece into the reserved area */
+ptr = mmap_populate(ptr + old_size, new_size - old_size, fd, old_size,
+shared, is_pmem);
+} else if (old_size > new_size) {
+/* discard this piece, marking it reserved */
+ptr = mmap_reserve(ptr + new_size, old_size - new_size, fd);
+}
+return ptr != MAP_FAILED;
+}
+
+void qemu_ram_munmap(int fd, void *ptr, size_t max_size)