In kvm mode, vm_clock may be read on AioContexts outside BQL(next
patch). This will make timers_state --the foundation of vm_clock
exposed to race condition. Using private lock to protect it.
Note in tcg mode, vm_clock still read inside BQL, so icount is
left without change.

Lock rule: private lock innermost, ie BQL->"this lock"

Signed-off-by: Liu Ping Fan <pingf...@linux.vnet.ibm.com>
---
 cpus.c | 26 +++++++++++++++++++++-----
 1 file changed, 21 insertions(+), 5 deletions(-)

diff --git a/cpus.c b/cpus.c
index 61e86a8..4af81e9 100644
--- a/cpus.c
+++ b/cpus.c
@@ -112,7 +112,9 @@ typedef struct TimersState {
     int64_t dummy;
 } TimersState;
 
-TimersState timers_state;
+static TimersState timers_state;
+/* lock rule: innermost */
+static QemuMutex timers_state_lock;
 
 /* Return the virtual CPU time, based on the instruction counter.  */
 int64_t cpu_get_icount(void)
@@ -134,11 +136,14 @@ int64_t cpu_get_icount(void)
 /* return the host CPU cycle counter and handle stop/restart */
 int64_t cpu_get_ticks(void)
 {
+    int64_t ret;
+
     if (use_icount) {
         return cpu_get_icount();
     }
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_ticks_offset;
+        ret = timers_state.cpu_ticks_offset;
     } else {
         int64_t ticks;
         ticks = cpu_get_real_ticks();
@@ -148,41 +153,51 @@ int64_t cpu_get_ticks(void)
             timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - 
ticks;
         }
         timers_state.cpu_ticks_prev = ticks;
-        return ticks + timers_state.cpu_ticks_offset;
+        ret = ticks + timers_state.cpu_ticks_offset;
     }
+    qemu_mutex_unlock(&timers_state_lock);
+    return ret;
 }
 
 /* return the host CPU monotonic timer and handle stop/restart */
 int64_t cpu_get_clock(void)
 {
     int64_t ti;
+
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
-        return timers_state.cpu_clock_offset;
+        ti = timers_state.cpu_clock_offset;
     } else {
         ti = get_clock();
-        return ti + timers_state.cpu_clock_offset;
+        ti += timers_state.cpu_clock_offset;
     }
+    qemu_mutex_unlock(&timers_state_lock);
+    return ti;
 }
 
 /* enable cpu_get_ticks() */
 void cpu_enable_ticks(void)
 {
+    qemu_mutex_lock(&timers_state_lock);
     if (!timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset -= cpu_get_real_ticks();
         timers_state.cpu_clock_offset -= get_clock();
         timers_state.cpu_ticks_enabled = 1;
     }
+    qemu_mutex_unlock(&timers_state_lock);
 }
 
 /* disable cpu_get_ticks() : the clock is stopped. You must not call
    cpu_get_ticks() after that.  */
 void cpu_disable_ticks(void)
 {
+    qemu_mutex_lock(&timers_state_lock);
     if (timers_state.cpu_ticks_enabled) {
         timers_state.cpu_ticks_offset = cpu_get_ticks();
         timers_state.cpu_clock_offset = cpu_get_clock();
         timers_state.cpu_ticks_enabled = 0;
     }
+    qemu_mutex_unlock(&timers_state_lock);
 }
 
 /* Correlation between real and virtual time is always going to be
@@ -353,6 +368,7 @@ static const VMStateDescription vmstate_timers = {
 
 void configure_icount(const char *option)
 {
+    qemu_mutex_init(&timers_state_lock);
     vmstate_register(NULL, 0, &vmstate_timers, &timers_state);
     if (!option) {
         return;
-- 
1.8.1.4


Reply via email to