On Mon, Oct 13, 2025, Naman Jain wrote:
> +static int mshv_vtl_ioctl_return_to_lower_vtl(void)
> +{
> + preempt_disable();
> + for (;;) {
> + u32 cancel;
> + unsigned long irq_flags;
> + struct hv_vp_assist_page *hvp;
> + int ret;
> +
> + local_irq_save(irq_flags);
> + cancel = READ_ONCE(mshv_vtl_this_run()->cancel);
> + if (cancel)
> + current_thread_info()->flags |= _TIF_SIGPENDING;
There's no need to force SIGPENDING, this code can return directly if cancel is
set[1]. And then you can wait to disable IRQs until after handling pending
work,
and thus avoid having to immediately re-enable IRQs[2].
[1] https://lore.kernel.org/all/[email protected]
[2] https://lore.kernel.org/all/[email protected]
> +
> + if (unlikely(cancel) || __xfer_to_guest_mode_work_pending()) {
> + local_irq_restore(irq_flags);
> + preempt_enable();
> + ret = xfer_to_guest_mode_handle_work();
> + if (ret)
> + return ret;
> + preempt_disable();
> + continue;
> + }
> +
> + mshv_vtl_return(&mshv_vtl_this_run()->cpu_context);
> + local_irq_restore(irq_flags);
> +
> + hvp = hv_vp_assist_page[smp_processor_id()];
> + this_cpu_inc(num_vtl0_transitions);
> + switch (hvp->vtl_entry_reason) {
> + case MSHV_ENTRY_REASON_INTERRUPT:
> + if (!mshv_vsm_capabilities.intercept_page_available &&
> + likely(!mshv_vtl_process_intercept()))
> + goto done;
> + break;
> +
> + case MSHV_ENTRY_REASON_INTERCEPT:
> +
> WARN_ON(!mshv_vsm_capabilities.intercept_page_available);
> + memcpy(mshv_vtl_this_run()->exit_message,
> hvp->intercept_message,
> + sizeof(hvp->intercept_message));
> + goto done;
> +
> + default:
> + panic("unknown entry reason: %d",
> hvp->vtl_entry_reason);
> + }
> + }
> +
> +done:
> + preempt_enable();
> +
> + return 0;
> +}