Hi,

this diff makes guests runnable without holding the biglock.
If the guest exits because of an interrupt, the interrupt is
handled first before re-grabbing the lock.

This is needed because the TLB shootdown code runs under the biglock
and issues an IPI to other CPUs to invalidate the TLB. Then the TLB
shootdown code spins under the biglock, waiting for the other
CPUs to do their work.

If we try to grab the biglock in vmm(4) before handling the
IPI, vmm(4) will block forever.

Index: sys/arch/amd64/amd64/vmm.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/vmm.c,v
retrieving revision 1.7
diff -u -p -r1.7 vmm.c
--- sys/arch/amd64/amd64/vmm.c  24 Nov 2015 09:07:09 -0000      1.7
+++ sys/arch/amd64/amd64/vmm.c  26 Nov 2015 04:28:28 -0000
@@ -2520,17 +2520,17 @@ vcpu_run_vmx(struct vcpu *vcpu, uint8_t 
                invvpid(IA32_VMX_INVVPID_SINGLE_CTX_GLB, &vid);
 
                /* Start / resume the VM / VCPU */
-               /* XXX unlock the biglock here */
+               KERNEL_UNLOCK();
                ret = vmx_enter_guest(&vcpu->vc_control_pa,
                    &vcpu->vc_gueststate, resume);
-               /* XXX lock the biglock here */
 
-               /* If we exited successfully ... */
-               if (ret == 0) {
+               switch (ret) {
+               case 0: /* If we exited successfully ... */
                        resume = 1;
                        vcpu->vc_last_pcpu = ci;
                        if (vmread(VMCS_GUEST_IA32_RIP,
                            &vcpu->vc_gueststate.vg_rip)) {
+                               KERNEL_LOCK();
                                printf("vcpu_run_vmx: cannot read guest rip\n");
                                ret = EINVAL;
                                exit_handled = 0;
@@ -2538,6 +2538,7 @@ vcpu_run_vmx(struct vcpu *vcpu, uint8_t 
                        }
 
                        if (vmread(VMCS_EXIT_REASON, &exit_reason)) {
+                               KERNEL_LOCK();
                                printf("vcpu_run_vmx: cant read exit reason\n");
                                ret = EINVAL;
                                exit_handled = 0;
@@ -2547,9 +2548,24 @@ vcpu_run_vmx(struct vcpu *vcpu, uint8_t 
                        /*
                         * Handle the exit. This will alter "ret" to EAGAIN if
                         * the exit handler determines help from vmd is needed.
+                        *
+                        * When the guest exited because of an external
+                        * interrupt, handle it first before grabbing the
+                        * kernel lock: we might have gotten an IPI that has
+                        * to do some work that another CPU depends on.
+                        *
+                        * Example: CPU 1 grabs the kernel lock, sets some
+                        * flag to 1, issues an IPI to all other CPUs, and
+                        * waits for them to clear the flag. If this code
+                        * grabs the kernel lock here first, it will block
+                        * forever.
                         */
                        vcpu->vc_gueststate.vg_exit_reason = exit_reason;
+                       if (exit_reason != VMX_EXIT_EXTINT)
+                               KERNEL_LOCK();
                        exit_handled = vmx_handle_exit(vcpu, &ret);
+                       if (exit_reason == VMX_EXIT_EXTINT)
+                               KERNEL_LOCK();
 
                        /* Check if we should yield - don't hog the cpu */
                        spc = &ci->ci_schedstate;
@@ -2561,25 +2577,31 @@ vcpu_run_vmx(struct vcpu *vcpu, uint8_t 
                                }
                                yield();
                        }
-               } else if (ret == VMX_FAIL_LAUNCH_INVALID_VMCS) {
+                       break;
+               case VMX_FAIL_LAUNCH_INVALID_VMCS:
+                       KERNEL_LOCK();
                        printf("vmx_enter_guest: failed launch with invalid "
                            "vmcs\n");
                        ret = EINVAL;
                        exit_handled = 0;
-               } else if (ret == VMX_FAIL_LAUNCH_VALID_VMCS) {
+                       break;
+               case VMX_FAIL_LAUNCH_VALID_VMCS:
+                       KERNEL_LOCK();
                        exit_reason = vcpu->vc_gueststate.vg_exit_reason;
                        printf("vmx_enter_guest: failed launch with valid "
                            "vmcs, code=%lld (%s)\n", exit_reason,
                            vmx_instruction_error_decode(exit_reason));
                        ret = EINVAL;
                        exit_handled = 0;
-               } else {
+                       break;
+               default:
+                       KERNEL_LOCK();
                        printf("vmx_enter_guest: failed launch for unknown "
                            "reason\n");
                        ret = EINVAL;
                        exit_handled = 0;
+                       break;
                }
-
        }
        vcpu->vc_state = VCPU_STATE_STOPPED;
 

Reply via email to