Return to the main loop where we'll be waken again.
This avoid a tricky race with signals introduced in
commit 219c101fa7f ("Add HVF WFI handler").Signed-off-by: Philippe Mathieu-Daudé <[email protected]> --- include/system/hvf_int.h | 1 - accel/hvf/hvf-accel-ops.c | 5 +-- target/arm/hvf/hvf.c | 76 ++++----------------------------------- 3 files changed, 9 insertions(+), 73 deletions(-) diff --git a/include/system/hvf_int.h b/include/system/hvf_int.h index d842d4b2b99..c8e407a1463 100644 --- a/include/system/hvf_int.h +++ b/include/system/hvf_int.h @@ -47,7 +47,6 @@ struct AccelCPUState { #ifdef __aarch64__ hv_vcpu_exit_t *exit; bool vtimer_masked; - sigset_t unblock_ipi_mask; bool guest_debug_enabled; #endif }; diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index bbb0b385fe9..e9dd2d24745 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -128,14 +128,15 @@ static int hvf_init_vcpu(CPUState *cpu) cpu->accel = g_new0(AccelCPUState, 1); /* init cpu signals */ + sigset_t unblock_ipi_mask; struct sigaction sigact; memset(&sigact, 0, sizeof(sigact)); sigact.sa_handler = dummy_signal; sigaction(SIG_IPI, &sigact, NULL); - pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask); - sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI); + pthread_sigmask(SIG_BLOCK, NULL, &unblock_ipi_mask); + sigdelset(&unblock_ipi_mask, SIG_IPI); #ifdef __aarch64__ r = hv_vcpu_create(&cpu->accel->fd, diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 8c2eac6738f..d3d05b0ccba 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -302,7 +302,7 @@ void hvf_arm_init_debug(void) #define TMR_CTL_IMASK (1 << 1) #define TMR_CTL_ISTATUS (1 << 2) -static void hvf_wfi(CPUState *cpu); +static int hvf_wfi(CPUState *cpu); static uint32_t chosen_ipa_bit_size; @@ -1715,85 +1715,21 @@ static uint64_t hvf_vtimer_val_raw(void) return mach_absolute_time() - hvf_state->vtimer_offset; } -static uint64_t hvf_vtimer_val(void) +static int hvf_wfi(CPUState *cpu) { - if (!runstate_is_running()) { - /* VM is paused, the vtimer value is in vtimer.vtimer_val */ - return vtimer.vtimer_val; - } - - return hvf_vtimer_val_raw(); -} - -static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) -{ - /* - * Use pselect to sleep so that other threads can IPI us while we're - * sleeping. - */ - qatomic_set_mb(&cpu->thread_kicked, false); - bql_unlock(); - pselect(0, 0, 0, 0, ts, &cpu->accel->unblock_ipi_mask); - bql_lock(); -} - -static void hvf_wfi(CPUState *cpu) -{ - ARMCPU *arm_cpu = ARM_CPU(cpu); - struct timespec ts; - hv_return_t r; - uint64_t ctl; - uint64_t cval; - int64_t ticks_to_sleep; - uint64_t seconds; - uint64_t nanos; - uint32_t cntfrq; - if (cpu_has_work(cpu)) { /* Don't bother to go into our "low power state" if * we would just wake up immediately. */ - return; + return 0; } if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { /* Interrupt pending, no need to wait */ - return; + return EXCP_INTERRUPT; } - r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); - assert_hvf_ok(r); - - if (!(ctl & 1) || (ctl & 2)) { - /* Timer disabled or masked, just wait for an IPI. */ - hvf_wait_for_ipi(cpu, NULL); - return; - } - - r = hv_vcpu_get_sys_reg(cpu->accel->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); - assert_hvf_ok(r); - - ticks_to_sleep = cval - hvf_vtimer_val(); - if (ticks_to_sleep < 0) { - return; - } - - cntfrq = gt_cntfrq_period_ns(arm_cpu); - seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND); - ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq); - nanos = ticks_to_sleep * cntfrq; - - /* - * Don't sleep for less than the time a context switch would take, - * so that we can satisfy fast timer requests on the same CPU. - * Measurements on M1 show the sweet spot to be ~2ms. - */ - if (!seconds && nanos < (2 * SCALE_MS)) { - return; - } - - ts = (struct timespec) { seconds, nanos }; - hvf_wait_for_ipi(cpu, &ts); + return EXCP_HLT; } /* Must be called by the owning thread */ @@ -1982,7 +1918,7 @@ static int hvf_handle_exception(CPUState *cpu, hv_vcpu_exit_exception_t *excp) case EC_WFX_TRAP: advance_pc = true; if (!(syndrome & WFX_IS_WFE)) { - hvf_wfi(cpu); + ret = hvf_wfi(cpu); } break; case EC_AA64_HVC: -- 2.51.0
