Signed-off-by: Juan Quintela <quint...@redhat.com>
---
 migration/dirtyrate.c | 11 +++++------
 softmmu/dirtylimit.c  | 11 +++--------
 2 files changed, 8 insertions(+), 14 deletions(-)

diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 180ba38c7a..38ea95af59 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -16,6 +16,7 @@
 #include "qapi/error.h"
 #include "cpu.h"
 #include "exec/ramblock.h"
+#include "exec/target_page.h"
 #include "exec/ram_addr.h"
 #include "qemu/rcu_queue.h"
 #include "qemu/main-loop.h"
@@ -74,13 +75,11 @@ static inline void record_dirtypages(DirtyPageRecord 
*dirty_pages,
 static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages,
                                       int64_t calc_time_ms)
 {
-    uint64_t memory_size_MB;
     uint64_t increased_dirty_pages =
         dirty_pages.end_pages - dirty_pages.start_pages;
+    uint64_t memory_size_MiB = qemu_target_pages_to_MiB(increased_dirty_pages);
 
-    memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20;
-
-    return memory_size_MB * 1000 / calc_time_ms;
+    return memory_size_MiB * 1000 / calc_time_ms;
 }
 
 void global_dirty_log_change(unsigned int flag, bool start)
@@ -291,8 +290,8 @@ static void update_dirtyrate_stat(struct RamblockDirtyInfo 
*info)
     DirtyStat.page_sampling.total_dirty_samples += info->sample_dirty_count;
     DirtyStat.page_sampling.total_sample_count += info->sample_pages_count;
     /* size of total pages in MB */
-    DirtyStat.page_sampling.total_block_mem_MB += (info->ramblock_pages *
-                                                   TARGET_PAGE_SIZE) >> 20;
+    DirtyStat.page_sampling.total_block_mem_MB +=
+        qemu_target_pages_to_MiB(info->ramblock_pages);
 }
 
 static void update_dirtyrate(uint64_t msec)
diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c
index 71bf6dc7a4..015a9038d1 100644
--- a/softmmu/dirtylimit.c
+++ b/softmmu/dirtylimit.c
@@ -235,20 +235,15 @@ bool dirtylimit_vcpu_index_valid(int cpu_index)
 static uint64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate)
 {
     static uint64_t max_dirtyrate;
-    unsigned target_page_bits = qemu_target_page_bits();
-    uint64_t dirty_ring_size_MB;
+    uint64_t dirty_ring_size_MiB;
 
-    /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */
-    assert(target_page_bits < 20);
-
-    /* Convert ring size (pages) to MiB (2**20). */
-    dirty_ring_size_MB = kvm_dirty_ring_size() >> (20 - target_page_bits);
+    dirty_ring_size_MiB = qemu_target_pages_to_MiB(kvm_dirty_ring_size());
 
     if (max_dirtyrate < dirtyrate) {
         max_dirtyrate = dirtyrate;
     }
 
-    return dirty_ring_size_MB * 1000000 / max_dirtyrate;
+    return dirty_ring_size_MiB * 1000000 / max_dirtyrate;
 }
 
 static inline bool dirtylimit_done(uint64_t quota,
-- 
2.40.1


Reply via email to