From: "Emilio G. Cota" <c...@braap.org> Reviewed-by: Richard Henderson <richard.hender...@linaro.org> Reviewed-by: Alex Bennée <alex.ben...@linaro.org> Signed-off-by: Emilio G. Cota <c...@braap.org> [RF: Converted new code to cpu_interrupt_request] Signed-off-by: Robert Foley <robert.fo...@linaro.org> --- target/i386/hax-all.c | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-)
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c index 6a4152730f..20a75a891f 100644 --- a/target/i386/hax-all.c +++ b/target/i386/hax-all.c @@ -298,7 +298,7 @@ int hax_vm_destroy(struct hax_vm *vm) static void hax_handle_interrupt(CPUState *cpu, int mask) { - cpu->interrupt_request |= mask; + cpu_interrupt_request_or(cpu, mask); if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); @@ -432,7 +432,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) * Unlike KVM, HAX kernel check for the eflags, instead of qemu */ if (ht->ready_for_interrupt_injection && - (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + (cpu_interrupt_request(cpu) & CPU_INTERRUPT_HARD)) { int irq; irq = cpu_get_pic_interrupt(env); @@ -446,7 +446,7 @@ static int hax_vcpu_interrupt(CPUArchState *env) * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ - if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if ((cpu_interrupt_request(cpu) & CPU_INTERRUPT_HARD)) { ht->request_interrupt_window = 1; } else { ht->request_interrupt_window = 0; @@ -485,7 +485,7 @@ static int hax_vcpu_hax_exec(CPUArchState *env) return 0; } - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_interrupt_request(cpu) & CPU_INTERRUPT_POLL) { cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); apic_poll_irq(x86_cpu->apic_state); } @@ -508,20 +508,20 @@ static int hax_vcpu_hax_exec(CPUArchState *env) * c) INIT signal; * d) SIPI signal. */ - if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (((cpu_interrupt_request(cpu) & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + (cpu_interrupt_request(cpu) & CPU_INTERRUPT_NMI)) { cpu_halted_set(cpu, 0); } - if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { + if (cpu_interrupt_request(cpu) & CPU_INTERRUPT_INIT) { DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n", cpu->cpu_index); do_cpu_init(x86_cpu); hax_vcpu_sync_state(env, 1); } - if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_interrupt_request(cpu) & CPU_INTERRUPT_SIPI) { DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n", cpu->cpu_index); hax_vcpu_sync_state(env, 0); @@ -590,13 +590,17 @@ static int hax_vcpu_hax_exec(CPUArchState *env) ret = -1; break; case HAX_EXIT_HLT: - if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) && - !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { - /* hlt instruction with interrupt disabled is shutdown */ - env->eflags |= IF_MASK; - cpu_halted_set(cpu, 1); - cpu->exception_index = EXCP_HLT; - ret = 1; + { + uint32_t interrupt_request = cpu_interrupt_request(cpu); + + if (!(interrupt_request & CPU_INTERRUPT_HARD) && + !(interrupt_request & CPU_INTERRUPT_NMI)) { + /* hlt instruction with interrupt disabled is shutdown */ + env->eflags |= IF_MASK; + cpu_halted_set(cpu, 1); + cpu->exception_index = EXCP_HLT; + ret = 1; + } } break; /* these situations will continue to hax module */ -- 2.17.1