Better to make the interrupt handler an explicit implementation rather than depending on some generic handler.
Suggested-by: Richard Henderson <richard.hender...@linaro.org> Signed-off-by: Philippe Mathieu-Daudé <phi...@linaro.org> --- accel/dummy-cpus.h | 1 + include/system/accel-ops.h | 1 + accel/dummy-cpus.c | 7 +++++++ accel/hvf/hvf-accel-ops.c | 8 ++++++++ accel/kvm/kvm-accel-ops.c | 8 ++++++++ accel/qtest/qtest.c | 1 + accel/xen/xen-all.c | 1 + system/cpus.c | 15 ++------------- target/i386/nvmm/nvmm-accel-ops.c | 8 ++++++++ target/i386/whpx/whpx-accel-ops.c | 8 ++++++++ 10 files changed, 45 insertions(+), 13 deletions(-) diff --git a/accel/dummy-cpus.h b/accel/dummy-cpus.h index c2f9fee164c..98a1a30f9ca 100644 --- a/accel/dummy-cpus.h +++ b/accel/dummy-cpus.h @@ -11,5 +11,6 @@ void dummy_thread_precreate(CPUState *cpu); void *dummy_cpu_thread_routine(void *arg); +void dummy_handle_interrupt(CPUState *cpu, int old_mask, int new_mask); #endif diff --git a/include/system/accel-ops.h b/include/system/accel-ops.h index 9d2577fe67f..14861eae60c 100644 --- a/include/system/accel-ops.h +++ b/include/system/accel-ops.h @@ -70,6 +70,7 @@ struct AccelOpsClass { void (*synchronize_state)(CPUState *cpu); void (*synchronize_pre_loadvm)(CPUState *cpu); + /* handle_interrupt is mandatory. */ void (*handle_interrupt)(CPUState *cpu, int old_mask, int new_mask); void (*get_vcpu_stats)(CPUState *cpu, GString *buf); diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index f637ab05e32..e9076851c58 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -71,3 +71,10 @@ void dummy_thread_precreate(CPUState *cpu) qemu_sem_init(&cpu->sem, 0); #endif } + +void dummy_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +{ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index d2b3217f145..a9ed93fe8eb 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -207,6 +207,13 @@ static void *hvf_cpu_thread_fn(void *arg) return NULL; } +static void hvf_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +{ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc) { struct hvf_sw_breakpoint *bp; @@ -358,6 +365,7 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, const void *data) ops->kick_vcpu_thread = hvf_kick_vcpu_thread; ops->exec_vcpu_thread = hvf_vcpu_exec; ops->destroy_vcpu_thread = hvf_vcpu_destroy; + ops->handle_interrupt = hvf_handle_interrupt; ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset; ops->synchronize_post_init = hvf_cpu_synchronize_post_init; diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index 21ff3af306f..749c4f244a1 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -68,6 +68,13 @@ static bool kvm_vcpu_thread_is_idle(CPUState *cpu) return !kvm_halt_in_kernel(); } +static void kvm_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +{ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + #ifdef TARGET_KVM_HAVE_GUEST_DEBUG static int kvm_update_guest_debug_ops(CPUState *cpu) { @@ -85,6 +92,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->synchronize_post_init = kvm_cpu_synchronize_post_init; ops->synchronize_state = kvm_cpu_synchronize_state; ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm; + ops->handle_interrupt = kvm_handle_interrupt; #ifdef TARGET_KVM_HAVE_GUEST_DEBUG ops->update_guest_debug = kvm_update_guest_debug_ops; diff --git a/accel/qtest/qtest.c b/accel/qtest/qtest.c index 9f30098d133..19eea8d8daa 100644 --- a/accel/qtest/qtest.c +++ b/accel/qtest/qtest.c @@ -68,6 +68,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, const void *data) ops->cpu_thread_routine = dummy_cpu_thread_routine; ops->get_virtual_clock = qtest_get_virtual_clock; ops->set_virtual_clock = qtest_set_virtual_clock; + ops->handle_interrupt = dummy_handle_interrupt; }; static const TypeInfo qtest_accel_ops_type = { diff --git a/accel/xen/xen-all.c b/accel/xen/xen-all.c index 5ff72d9532c..6a967a8c63d 100644 --- a/accel/xen/xen-all.c +++ b/accel/xen/xen-all.c @@ -153,6 +153,7 @@ static void xen_accel_ops_class_init(ObjectClass *oc, const void *data) ops->thread_precreate = dummy_thread_precreate; ops->cpu_thread_routine = dummy_cpu_thread_routine; + ops->handle_interrupt = dummy_handle_interrupt; } static const TypeInfo xen_accel_ops_type = { diff --git a/system/cpus.c b/system/cpus.c index 8c2647f5f19..e217e42ba03 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -246,13 +246,6 @@ int64_t cpus_get_elapsed_ticks(void) return cpu_get_ticks(); } -static void generic_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) -{ - if (!qemu_cpu_is_self(cpu)) { - qemu_cpu_kick(cpu); - } -} - void cpu_interrupt(CPUState *cpu, int mask) { int old_mask = cpu->interrupt_request; @@ -260,12 +253,7 @@ void cpu_interrupt(CPUState *cpu, int mask) g_assert(bql_locked()); cpu->interrupt_request |= mask; - - if (cpus_accel->handle_interrupt) { - cpus_accel->handle_interrupt(cpu, old_mask, cpu->interrupt_request); - } else { - generic_handle_interrupt(cpu, old_mask, cpu->interrupt_request); - } + cpus_accel->handle_interrupt(cpu, old_mask, cpu->interrupt_request); } /* @@ -674,6 +662,7 @@ void cpus_register_accel(const AccelOpsClass *ops) { assert(ops != NULL); assert(ops->create_vcpu_thread || ops->cpu_thread_routine); + assert(ops->handle_interrupt); cpus_accel = ops; } diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index bef6f61b776..62fc6438c60 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -61,6 +61,13 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) return NULL; } +static void nvmm_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +{ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + /* * Abort the call to run the virtual processor by another thread, and to * return the control to that thread. @@ -77,6 +84,7 @@ static void nvmm_accel_ops_class_init(ObjectClass *oc, const void *data) ops->cpu_thread_routine = qemu_nvmm_cpu_thread_fn; ops->kick_vcpu_thread = nvmm_kick_vcpu_thread; + ops->handle_interrupt = nvmm_handle_interrupt; ops->synchronize_post_reset = nvmm_cpu_synchronize_post_reset; ops->synchronize_post_init = nvmm_cpu_synchronize_post_init; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index 8cbc6f4e2d8..e9969ef2cf3 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -61,6 +61,13 @@ static void *whpx_cpu_thread_fn(void *arg) return NULL; } +static void whpx_handle_interrupt(CPUState *cpu, int old_mask, int new_mask) +{ + if (!qemu_cpu_is_self(cpu)) { + qemu_cpu_kick(cpu); + } +} + static void whpx_kick_vcpu_thread(CPUState *cpu) { if (!qemu_cpu_is_self(cpu)) { @@ -80,6 +87,7 @@ static void whpx_accel_ops_class_init(ObjectClass *oc, const void *data) ops->cpu_thread_routine = whpx_cpu_thread_fn; ops->kick_vcpu_thread = whpx_kick_vcpu_thread; ops->cpu_thread_is_idle = whpx_vcpu_thread_is_idle; + ops->handle_interrupt = whpx_handle_interrupt; ops->synchronize_post_reset = whpx_cpu_synchronize_post_reset; ops->synchronize_post_init = whpx_cpu_synchronize_post_init; -- 2.49.0