On 30.10.2015 08:56, Xiao Guangrong wrote:
Currently file_ram_alloc() is designed for hugetlbfs, however, the memory
of nvdimm can come from either raw pmem device eg, /dev/pmem, or the file
locates at DAX enabled filesystem

So this patch let it work on any kind of path

Signed-off-by: Xiao Guangrong <guangrong.x...@linux.intel.com>
---
  exec.c | 56 +++++++++++++++++---------------------------------------
  1 file changed, 17 insertions(+), 39 deletions(-)

diff --git a/exec.c b/exec.c
index 8af2570..3ca7e50 100644
--- a/exec.c
+++ b/exec.c
@@ -1174,32 +1174,6 @@ void qemu_mutex_unlock_ramlist(void)
  }
#ifdef __linux__
-
-#include <sys/vfs.h>
-
-#define HUGETLBFS_MAGIC       0x958458f6
-
-static long gethugepagesize(const char *path, Error **errp)
-{
-    struct statfs fs;
-    int ret;
-
-    do {
-        ret = statfs(path, &fs);
-    } while (ret != 0 && errno == EINTR);
-
-    if (ret != 0) {
-        error_setg_errno(errp, errno, "failed to get page size of file %s",
-                         path);
-        return 0;
-    }
-
-    if (fs.f_type != HUGETLBFS_MAGIC)
-        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
-
-    return fs.f_bsize;
-}
-
  static void *file_ram_alloc(RAMBlock *block,
                              ram_addr_t memory,
                              const char *path,
@@ -1210,20 +1184,24 @@ static void *file_ram_alloc(RAMBlock *block,
      char *c;
      void *area;
      int fd;
-    uint64_t hpagesize;
-    Error *local_err = NULL;
+    uint64_t pagesize;
- hpagesize = gethugepagesize(path, &local_err);
-    if (local_err) {
-        error_propagate(errp, local_err);
+    pagesize = qemu_file_get_page_size(path);
+    if (!pagesize) {
+        error_setg(errp, "can't get page size for %s", path);
          goto error;
      }
-    block->mr->align = hpagesize;
- if (memory < hpagesize) {
+    if (pagesize == getpagesize()) {
+        fprintf(stderr, "Memory is not allocated from HugeTlbfs.\n");
+    }

It is strange to see this warning every time.

Shouldn't the differentiation be done explicitly in command line? May be separate option mem-tlb, or separate flag tlbfs=on, or for new feature - new option mem-file, or prefixes for paths (tlbfs://, file://).. Or the other way to not mix things but split them.

+
+    block->mr->align = pagesize;
+
+    if (memory < pagesize) {
          error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
-                   "or larger than huge page size 0x%" PRIx64,
-                   memory, hpagesize);
+                   "or larger than page size 0x%" PRIx64,
+                   memory, pagesize);
          goto error;
      }
@@ -1247,14 +1225,14 @@ static void *file_ram_alloc(RAMBlock *block,
      fd = mkstemp(filename);
      if (fd < 0) {
          error_setg_errno(errp, errno,
-                         "unable to create backing store for hugepages");
+                         "unable to create backing store for path %s", path);
          g_free(filename);
          goto error;
      }
      unlink(filename);
      g_free(filename);
- memory = ROUND_UP(memory, hpagesize);
+    memory = ROUND_UP(memory, pagesize);
/*
       * ftruncate is not supported by hugetlbfs in older
@@ -1266,10 +1244,10 @@ static void *file_ram_alloc(RAMBlock *block,
          perror("ftruncate");
      }
- area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
+    area = qemu_ram_mmap(fd, memory, pagesize, block->flags & RAM_SHARED);
      if (area == MAP_FAILED) {
          error_setg_errno(errp, errno,
-                         "unable to map backing store for hugepages");
+                         "unable to map backing store for path %s", path);
          close(fd);
          goto error;
      }


--
Best regards,
Vladimir
* now, @virtuozzo.com instead of @parallels.com. Sorry for this inconvenience.


Reply via email to