Wrap hv_vcpu_run() calls with cpu_exec_start/end() in order to have the main loop perform more exclusive sections while all vCPUs are quiescent.
Signed-off-by: Philippe Mathieu-Daudé <[email protected]> Reviewed-by: Richard Henderson <[email protected]> Message-Id: <[email protected]> --- target/arm/hvf/hvf.c | 2 ++ target/i386/hvf/hvf.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 0658a99a2d1..1e50632557e 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -1803,7 +1803,9 @@ int hvf_vcpu_exec(CPUState *cpu) flush_cpu_state(cpu); bql_unlock(); + cpu_exec_start(cpu); r = hv_vcpu_run(cpu->accel->fd); + cpu_exec_end(cpu); bql_lock(); switch (r) { case HV_SUCCESS: diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 33f723a76a7..8cd1a800e0a 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -749,8 +749,10 @@ int hvf_vcpu_exec(CPUState *cpu) return EXCP_HLT; } + cpu_exec_start(cpu); hv_return_t r = hv_vcpu_run_until(cpu->accel->fd, HV_DEADLINE_FOREVER); assert_hvf_ok(r); + cpu_exec_end(cpu); /* handle VMEXIT */ uint64_t exit_reason = rvmcs(cpu->accel->fd, VMCS_EXIT_REASON); -- 2.51.0
