On 29/05/2023 15:11, Joao Martins wrote:
External email: Use caution opening links or attachments

Nit, s/nr/number in the subject.

In preparation for including the number of dirty pages in the
vfio_get_dirty_bitmap() tracepoint, return the number of dirty pages in
cpu_physical_memory_set_dirty_lebitmap() similar to
cpu_physical_memory_sync_dirty_bitmap().

To avoid counting twice when GLOBAL_DIRTY_RATE is enabled, stash the
number of bits set per bitmap quad in a variable (@nbits) and reuse it
there.

Signed-off-by: Joao Martins <joao.m.mart...@oracle.com>
Reviewed-by: Peter Xu <pet...@redhat.com>
---
  include/exec/ram_addr.h | 28 ++++++++++++++++++++++------
  1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 90a82692904f..9f2e3893f562 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -334,14 +334,23 @@ static inline void 
cpu_physical_memory_set_dirty_range(ram_addr_t start,
  }

  #if !defined(_WIN32)
-static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long 
*bitmap,
-                                                          ram_addr_t start,
-                                                          ram_addr_t pages)
+
+/*
+ * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns
+ * the number of dirty pages in @bitmap passed as argument. On the other hand,
+ * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that
+ * weren't set in the global migration bitmap.
+ */
+static inline
+uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
+                                                ram_addr_t start,
+                                                ram_addr_t pages)
  {
      unsigned long i, j;
-    unsigned long page_number, c;
+    unsigned long page_number, c, nbits;
      hwaddr addr;
      ram_addr_t ram_addr;
+    uint64_t num_dirty = 0;
      unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
      unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE;
      unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
@@ -369,6 +378,7 @@ static inline void 
cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
                  if (bitmap[k]) {
                      unsigned long temp = leul_to_cpu(bitmap[k]);

+                    nbits = ctpopl(temp);
                      qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);

                      if (global_dirty_tracking) {
@@ -377,10 +387,12 @@ static inline void 
cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
                                  temp);
                          if (unlikely(
                              global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) 
{
-                            total_dirty_pages += ctpopl(temp);
+                            total_dirty_pages += nbits;
                          }
                      }

+                    num_dirty += nbits;
+
                      if (tcg_enabled()) {
                          qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset],
                                     temp);
@@ -409,9 +421,11 @@ static inline void 
cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
          for (i = 0; i < len; i++) {
              if (bitmap[i] != 0) {
                  c = leul_to_cpu(bitmap[i]);
+                nbits = ctpopl(c);
                  if (unlikely(global_dirty_tracking & 
GLOBAL_DIRTY_DIRTY_RATE)) {
-                    total_dirty_pages += ctpopl(c);
+                    total_dirty_pages += nbits;
                  }
+                num_dirty += nbits;
                  do {
                      j = ctzl(c);
                      c &= ~(1ul << j);
@@ -424,6 +438,8 @@ static inline void 
cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
              }
          }
      }
+
+    return num_dirty;
  }
  #endif /* not _WIN32 */

--
2.39.3


Reply via email to