Because of cpu_ticks_prev, we cannot use a seqlock. But then the conversion is even easier. :)
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com> --- cpus.c | 35 ++++++++++++++++++++--------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/cpus.c b/cpus.c index 63ddd4fd21..7786439362 100644 --- a/cpus.c +++ b/cpus.c @@ -316,11 +316,26 @@ int64_t cpu_icount_to_ns(int64_t icount) return icount << atomic_read(&timers_state.icount_time_shift); } +static int64_t cpu_get_ticks_locked(void) +{ + int64_t ticks = timers_state.cpu_ticks_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += cpu_get_host_ticks(); + } + + if (timers_state.cpu_ticks_prev > ticks) { + /* Non increasing ticks may happen if the host uses software suspend. */ + timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; + ticks = timers_state.cpu_ticks_prev; + } + + timers_state.cpu_ticks_prev = ticks; + return ticks; +} + /* return the time elapsed in VM between vm_start and vm_stop. Unless * icount is active, cpu_get_ticks() uses units of the host CPU cycle * counter. - * - * Caller must hold the BQL */ int64_t cpu_get_ticks(void) { @@ -330,19 +345,9 @@ int64_t cpu_get_ticks(void) return cpu_get_icount(); } - ticks = timers_state.cpu_ticks_offset; - if (timers_state.cpu_ticks_enabled) { - ticks += cpu_get_host_ticks(); - } - - if (timers_state.cpu_ticks_prev > ticks) { - /* Note: non increasing ticks may happen if the host uses - software suspend */ - timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; - ticks = timers_state.cpu_ticks_prev; - } - - timers_state.cpu_ticks_prev = ticks; + qemu_spin_lock(&timers_state.vm_clock_lock); + ticks = cpu_get_ticks_locked(); + qemu_spin_unlock(&timers_state.vm_clock_lock); return ticks; } -- 2.17.1