>-----Original Message-----
>From: [EMAIL PROTECTED] [mailto:kvm-devel-
>[EMAIL PROTECTED] On Behalf Of Gregory Haskins
>Sent: Wednesday, May 09, 2007 6:19 PM
>To: kvm-devel@lists.sourceforge.net
>Subject: [kvm-devel] [PATCH 4/9] KVM: Adds ability to preempt an
>executingVCPU
>
>The VCPU executes synchronously w.r.t. userspace today, and therefore
>interrupt injection is pretty straight forward.  However, we will soon
need
>to be able to inject interrupts asynchronous to the execution of the
VCPU
>due to the introduction of SMP, paravirtualized drivers, and
asynchronous
>hypercalls.  This patch adds support to the interrupt mechanism to
force
>a VCPU to VMEXIT when a new interrupt is pending.
>
>Signed-off-by: Gregory Haskins <[EMAIL PROTECTED]>
>---
>
> drivers/kvm/kvm.h      |    2 ++
> drivers/kvm/kvm_main.c |   59
>+++++++++++++++++++++++++++++++++++++++++++++++-
> drivers/kvm/svm.c      |   43 +++++++++++++++++++++++++++++++++++
> drivers/kvm/vmx.c      |   44 ++++++++++++++++++++++++++++++++++++
> 4 files changed, 147 insertions(+), 1 deletions(-)
>
>diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
>index 059f074..0f6cc32 100644
>--- a/drivers/kvm/kvm.h
>+++ b/drivers/kvm/kvm.h
>@@ -329,6 +329,8 @@ struct kvm_vcpu_irq {
>       struct kvm_irqdevice dev;
>       int                  pending;
>       int                  deferred;
>+      struct task_struct  *task;
>+      int                  guest_mode;
> };
>
> struct kvm_vcpu {
>diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
>index 199489b..a160638 100644
>--- a/drivers/kvm/kvm_main.c
>+++ b/drivers/kvm/kvm_main.c
>@@ -1868,6 +1868,9 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu
*vcpu,
>struct kvm_run *kvm_run)
>               kvm_arch_ops->decache_regs(vcpu);
>       }
>
>+      vcpu->irq.task = current;
>+      smp_wmb();
>+
>       r = kvm_arch_ops->run(vcpu, kvm_run);
>
> out:
>@@ -2309,6 +2312,20 @@ out1:
> }
>
> /*
>+ * This function is invoked whenever we want to interrupt a vcpu that
is
>+ * currently executing in guest-mode.  It currently is a no-op because
>+ * the simple delivery of the IPI to execute this function
accomplishes
>our
>+ * goal: To cause a VMEXIT.  We pass the vcpu (which contains the
>+ * vcpu->irq.task, etc) for future use
>+ */
>+static void kvm_vcpu_guest_intr(void *info)
>+{
>+#ifdef NOT_YET
>+      struct kvm_vcpu *vcpu = (struct kvm_vcpu*)info;
>+#endif
>+}
>+
>+/*
>  * This function will be invoked whenever the vcpu->irq.dev raises its
>INTR
>  * line
>  */
>@@ -2318,10 +2335,50 @@ static void kvm_vcpu_intr(struct kvm_irqsink
*this,
> {
>       struct kvm_vcpu *vcpu = (struct kvm_vcpu*)this->private;
>       unsigned long flags;
>+      int direct_ipi = -1;
>
>       spin_lock_irqsave(&vcpu->irq.lock, flags);
>-      __set_bit(pin, &vcpu->irq.pending);
>+
>+      if (!test_bit(pin, &vcpu->irq.pending)) {
>+              /*
>+               * Record the change..
>+               */
>+              __set_bit(pin, &vcpu->irq.pending);
>+
>+              /*
>+               * then wake up the vcpu (if necessary)
>+               */
>+              if (vcpu->irq.task && (vcpu->irq.task != current)) {
>+                      if (vcpu->irq.guest_mode) {
>+                              /*
>+                               * If we are in guest mode, we can
optimize
>+                               * the IPI by executing a function
directly
>+                               * on the owning processor.
>+                               */
>+                              direct_ipi = task_cpu(vcpu->irq.task);
>+                              BUG_ON(direct_ipi ==
smp_processor_id());
>+                      }
>+              }
>+      }
>+
>       spin_unlock_irqrestore(&vcpu->irq.lock, flags);
>+
>+      /*
>+       * we can safely send the IPI outside of the lock-scope because
the
>+       * irq.pending has already been updated.  This code assumes that
>+       * userspace will not sleep on anything other than HLT
instructions.
>+       * HLT is covered in a race-free way because irq.pending was
updated
>+       * in the critical section, and handle_halt() which check if any
>+       * interrupts are pending before returning to userspace.
>+       *
>+       * If it turns out that userspace can sleep on conditions other
than
>+       * HLT, this code will need to be enhanced to allow the
irq.pending
>+       * flags to be exported to userspace
>+       */
>+      if (direct_ipi != -1)
>+              smp_call_function_single(direct_ipi,
>+                                       kvm_vcpu_guest_intr,
>+                                       vcpu, 0, 0);
> }
>
> static void kvm_vcpu_irqsink_init(struct kvm_vcpu *vcpu)
>diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
>index 4c03881..91546ae 100644
>--- a/drivers/kvm/svm.c
>+++ b/drivers/kvm/svm.c
>@@ -1542,11 +1542,40 @@ static int svm_vcpu_run(struct kvm_vcpu *vcpu,
>struct kvm_run *kvm_run)
>       u16 gs_selector;
>       u16 ldt_selector;
>       int r;
>+      unsigned long irq_flags;
>
> again:
>+      /*
>+       * We disable interrupts until the next VMEXIT to eliminate a
race
>+       * condition for delivery of virtual interrutps.  Note that this
is
>+       * probably not as bad as it sounds, as interrupts will still
invoke
>+       * a VMEXIT once transitioned to GUEST mode (and thus exit this
lock
>+       * scope) even if they are disabled.
>+       *
>+       * FIXME: Do we need to do anything additional to mask IPI/NMIs?
>+       */
>+      local_irq_save(irq_flags);
>+
>       spin_lock(&vcpu->irq.lock);
>
>       /*
>+       * If there are any signals pending (virtual interrupt related
or
>+       * otherwise), don't even bother trying to enter guest mode...
>+       */
>+      if (signal_pending(current)) {
>+              kvm_run->exit_reason = KVM_EXIT_INTR;
>+              spin_unlock(&vcpu->irq.lock);
>+              local_irq_restore(irq_flags);
>+              return -EINTR;
>+      }


A possible optimization would be to check if we have an irq to inject.
If we have, then ignore the signal and enter guest mode.
Since an irq is already pending, the signal would not be resulting in
another irq injection.
What do you think?

>+
>+      /*
>+       * There are optimizations we can make when signaling interrupts
>+       * if we know the VCPU is in GUEST mode, so mark that here
>+       */
>+      vcpu->irq.guest_mode = 1;
>+
>+      /*
>        * We must inject interrupts (if any) while the irq_lock
>        * is held
>        */
>@@ -1688,6 +1717,13 @@ again:
> #endif
>               : "cc", "memory" );
>
>+      /*
>+       * FIXME: We'd like to turn on interrupts ASAP, but is this so
early
>+       * that we will mess up the state of the CPU before we fully
>+       * transition from guest to host?
>+       */

The guest_mode can be assigned here, thus eliminating the cli-sti below.

>+      local_irq_restore(irq_flags);
>+
>       if (vcpu->fpu_active) {
>               fx_save(vcpu->guest_fx_image);
>               fx_restore(vcpu->host_fx_image);
>@@ -1710,6 +1746,13 @@ again:
>       reload_tss(vcpu);
>
>       /*
>+       * Signal that we have transitioned back to host mode
>+       */
>+      spin_lock_irqsave(&vcpu->irq.lock, irq_flags);
>+      vcpu->irq.guest_mode = 0;
>+      spin_unlock_irqrestore(&vcpu->irq.lock, irq_flags);
>+

-------------------------------------------------------------------------
This SF.net email is sponsored by DB2 Express
Download DB2 Express C - the FREE version of DB2 express and take
control of your XML. No limits. Just data. Click to get it now.
http://sourceforge.net/powerbar/db2/
_______________________________________________
kvm-devel mailing list
kvm-devel@lists.sourceforge.net
https://lists.sourceforge.net/lists/listinfo/kvm-devel

Reply via email to