From: Eduardo Habkost <ehabk...@redhat.com> Move invocation of CPUClass.cpu_exec_*() to separate helpers, to make it easier to refactor that code later.
Signed-off-by: Eduardo Habkost <ehabk...@redhat.com> --- accel/tcg/cpu-exec.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 58aea605d8..8d31145ad2 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -236,9 +236,22 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles, } #endif +static void cpu_exec_enter(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->cpu_exec_enter(cpu); +} + +static void cpu_exec_exit(CPUState *cpu) +{ + CPUClass *cc = CPU_GET_CLASS(cpu); + + cc->cpu_exec_exit(cpu); +} + void cpu_exec_step_atomic(CPUState *cpu) { - CPUClass *cc = CPU_GET_CLASS(cpu); TranslationBlock *tb; target_ulong cs_base, pc; uint32_t flags; @@ -257,11 +270,11 @@ void cpu_exec_step_atomic(CPUState *cpu) /* Since we got here, we know that parallel_cpus must be true. */ parallel_cpus = false; - cc->cpu_exec_enter(cpu); + cpu_exec_enter(cpu); /* execute the generated code */ trace_exec_tb(tb, pc); cpu_tb_exec(cpu, tb); - cc->cpu_exec_exit(cpu); + cpu_exec_exit(cpu); } else { /* * The mmap_lock is dropped by tb_gen_code if it runs out of @@ -713,7 +726,7 @@ int cpu_exec(CPUState *cpu) rcu_read_lock(); - cc->cpu_exec_enter(cpu); + cpu_exec_enter(cpu); /* Calculate difference between guest clock and host clock. * This delay includes the delay of the last cycle, so @@ -775,7 +788,7 @@ int cpu_exec(CPUState *cpu) } } - cc->cpu_exec_exit(cpu); + cpu_exec_exit(cpu); rcu_read_unlock(); return ret; -- 2.26.2