We have users in migration context that don't hold the BQL (when
finishing migration). To prepare for further changes, use a dedicated mutex
instead of atomic operations. Keep using qatomic_read ("READ_ONCE") for the
functions that only extract the current state (e.g., used by
virtio-balloon), locking isn't necessary.

While at it, split up the counter into two variables to make it easier
to understand.

Suggested-by: Peter Xu <pet...@redhat.com>
Reviewed-by: Peter Xu <pet...@redhat.com>
Reviewed-by: Pankaj Gupta <pankaj.gu...@cloud.ionos.com>
Cc: Paolo Bonzini <pbonz...@redhat.com>
Cc: "Michael S. Tsirkin" <m...@redhat.com>
Cc: Alex Williamson <alex.william...@redhat.com>
Cc: Dr. David Alan Gilbert <dgilb...@redhat.com>
Cc: Igor Mammedov <imamm...@redhat.com>
Cc: Pankaj Gupta <pankaj.gupta.li...@gmail.com>
Cc: Peter Xu <pet...@redhat.com>
Cc: Auger Eric <eric.au...@redhat.com>
Cc: Wei Yang <richard.weiy...@linux.alibaba.com>
Cc: teawater <teawat...@linux.alibaba.com>
Cc: Marek Kedzierski <mkedz...@redhat.com>
Signed-off-by: David Hildenbrand <da...@redhat.com>
---
 softmmu/physmem.c | 70 ++++++++++++++++++++++++++---------------------
 1 file changed, 39 insertions(+), 31 deletions(-)

diff --git a/softmmu/physmem.c b/softmmu/physmem.c
index 6301f4f0a5..2e253b8547 100644
--- a/softmmu/physmem.c
+++ b/softmmu/physmem.c
@@ -3660,56 +3660,64 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, 
MemoryRegion *root)
     }
 }
 
-/*
- * If positive, discarding RAM is disabled. If negative, discarding RAM is
- * required to work and cannot be disabled.
- */
-static int ram_block_discard_disabled;
+static unsigned int ram_block_discard_requirers;
+static unsigned int ram_block_discard_disablers;
+static QemuMutex ram_block_discard_disable_mutex;
+
+static void ram_block_discard_disable_mutex_lock(void)
+{
+    static gsize initialized;
+
+    if (g_once_init_enter(&initialized)) {
+        qemu_mutex_init(&ram_block_discard_disable_mutex);
+        g_once_init_leave(&initialized, 1);
+    }
+    qemu_mutex_lock(&ram_block_discard_disable_mutex);
+}
+
+static void ram_block_discard_disable_mutex_unlock(void)
+{
+    qemu_mutex_unlock(&ram_block_discard_disable_mutex);
+}
 
 int ram_block_discard_disable(bool state)
 {
-    int old;
+    int ret = 0;
 
+    ram_block_discard_disable_mutex_lock();
     if (!state) {
-        qatomic_dec(&ram_block_discard_disabled);
-        return 0;
+        ram_block_discard_disablers--;
+    } else if (!ram_block_discard_requirers) {
+        ram_block_discard_disablers++;
+    } else {
+        ret = -EBUSY;
     }
-
-    do {
-        old = qatomic_read(&ram_block_discard_disabled);
-        if (old < 0) {
-            return -EBUSY;
-        }
-    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
-                             old, old + 1) != old);
-    return 0;
+    ram_block_discard_disable_mutex_unlock();
+    return ret;
 }
 
 int ram_block_discard_require(bool state)
 {
-    int old;
+    int ret = 0;
 
+    ram_block_discard_disable_mutex_lock();
     if (!state) {
-        qatomic_inc(&ram_block_discard_disabled);
-        return 0;
+        ram_block_discard_requirers--;
+    } else if (!ram_block_discard_disablers) {
+        ram_block_discard_requirers++;
+    } else {
+        ret = -EBUSY;
     }
-
-    do {
-        old = qatomic_read(&ram_block_discard_disabled);
-        if (old > 0) {
-            return -EBUSY;
-        }
-    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
-                             old, old - 1) != old);
-    return 0;
+    ram_block_discard_disable_mutex_unlock();
+    return ret;
 }
 
 bool ram_block_discard_is_disabled(void)
 {
-    return qatomic_read(&ram_block_discard_disabled) > 0;
+    return qatomic_read(&ram_block_discard_disablers);
 }
 
 bool ram_block_discard_is_required(void)
 {
-    return qatomic_read(&ram_block_discard_disabled) < 0;
+    return qatomic_read(&ram_block_discard_requirers);
 }
-- 
2.29.2


Reply via email to