On 04/01/2011 09:17 PM, Scott Wood wrote:
This is done lazily.  The SPE save will be done only if the guest has
used SPE since the last preemption or heavyweight exit.  Restore will be
done only on demand, when enabling MSR_SPE in the shadow MSR, in response
to an SPE fault or mtmsr emulation.

For SPEFSCR, Linux already switches it on context switch (non-lazily), so
the only remaining bit is to save it between qemu and the guest.

Signed-off-by: Liu Yu<yu....@freescale.com>
Signed-off-by: Scott Wood<scottw...@freescale.com>
---
v5: Clear shadow MSR[SPE] if guest clears MSR[SPE].

  arch/powerpc/include/asm/kvm_host.h  |    6 ++
  arch/powerpc/include/asm/reg_booke.h |    1 +
  arch/powerpc/kernel/asm-offsets.c    |    6 ++
  arch/powerpc/kvm/booke.c             |   84 +++++++++++++++++++++++++++++++++-
  arch/powerpc/kvm/booke.h             |   22 +++------
  arch/powerpc/kvm/booke_interrupts.S  |   40 ++++++++++++++++
  arch/powerpc/kvm/e500.c              |   15 ++----
  7 files changed, 149 insertions(+), 25 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 072ec7b..a3810ab 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -195,6 +195,12 @@ struct kvm_vcpu_arch {
        u64 fpr[32];
        u64 fpscr;

+#ifdef CONFIG_SPE
+       ulong evr[32];
+       ulong spefscr;
+       ulong host_spefscr;
+       u64 acc;
+#endif
  #ifdef CONFIG_ALTIVEC
        vector128 vr[32];
        vector128 vscr;
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 3b1a9b7..2705f9a 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -312,6 +312,7 @@
  #define ESR_ILK               0x00100000      /* Instr. Cache Locking */
  #define ESR_PUO               0x00040000      /* Unimplemented Operation 
exception */
  #define ESR_BO                0x00020000      /* Byte Ordering */
+#define ESR_SPV                0x00000080      /* Signal Processing operation 
*/

  /* Bit definitions related to the DBCR0. */
  #if defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index 5120a63..4d39f2d 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -494,6 +494,12 @@ int main(void)
        DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3));
        DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7));
  #endif
+#ifdef CONFIG_SPE

if defined(CONFIG_KVM) && defined(CONFIG_SPE)

+       DEFINE(VCPU_EVR, offsetof(struct kvm_vcpu, arch.evr[0]));
+       DEFINE(VCPU_ACC, offsetof(struct kvm_vcpu, arch.acc));
+       DEFINE(VCPU_SPEFSCR, offsetof(struct kvm_vcpu, arch.spefscr));
+       DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
+#endif /* CONFIG_SPE */

  #ifdef CONFIG_KVM_EXIT_TIMING
        DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1204e1d..965e1d8 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -13,6 +13,7 @@
   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
   *
   * Copyright IBM Corp. 2007
+ * Copyright 2010-2011 Freescale Semiconductor, Inc.
   *
   * Authors: Hollis Blanchard<holl...@us.ibm.com>
   *          Christian Ehrhardt<ehrha...@linux.vnet.ibm.com>
@@ -78,6 +79,57 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
        }
  }

+#ifdef CONFIG_SPE
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       enable_kernel_spe();
+       kvmppc_save_guest_spe(vcpu);
+       vcpu->arch.shadow_msr&= ~MSR_SPE;
+       preempt_enable();
+}
+
+static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
+{
+       preempt_disable();
+       enable_kernel_spe();
+       kvmppc_load_guest_spe(vcpu);
+       vcpu->arch.shadow_msr |= MSR_SPE;
+       preempt_enable();
+}
+
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.shared->msr&  MSR_SPE) {
+               if (!(vcpu->arch.shadow_msr&  MSR_SPE))
+                       kvmppc_vcpu_enable_spe(vcpu);
+       } else if (vcpu->arch.shadow_msr&  MSR_SPE) {
+               kvmppc_vcpu_disable_spe(vcpu);

So what if shared->msr & MSR_SPE && shadow_msr & MSR_SPE? Do you disable it then?

+       }
+}
+#else
+static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+/* Helper function for "full" MSR writes. No need to call this if only EE is
+ * changing. */
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+       if ((new_msr&  MSR_PR) != (vcpu->arch.shared->msr&  MSR_PR))
+               kvmppc_mmu_priv_switch(vcpu, new_msr&  MSR_PR);
+
+       vcpu->arch.shared->msr = new_msr;
+
+       if (vcpu->arch.shared->msr&  MSR_WE) {
+               kvm_vcpu_block(vcpu);
+               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
+       };
+
+       kvmppc_vcpu_sync_spe(vcpu);
+}
+
  static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
                                         unsigned int priority)
  {
@@ -344,10 +396,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
                r = RESUME_GUEST;
                break;

-       case BOOKE_INTERRUPT_SPE_UNAVAIL:
-               kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
+#ifdef CONFIG_SPE
+       case BOOKE_INTERRUPT_SPE_UNAVAIL: {
+               if (vcpu->arch.shared->msr&  MSR_SPE)
+                       kvmppc_vcpu_enable_spe(vcpu);
+               else
+                       kvmppc_booke_queue_irqprio(vcpu,
+                                                  BOOKE_IRQPRIO_SPE_UNAVAIL);
                r = RESUME_GUEST;
                break;
+       }

        case BOOKE_INTERRUPT_SPE_FP_DATA:
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
@@ -358,6 +416,28 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
kvm_vcpu *vcpu,
                kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
                r = RESUME_GUEST;
                break;
+#else
+       case BOOKE_INTERRUPT_SPE_UNAVAIL:
+               /*
+                * Guest wants SPE, but host kernel doesn't support it.  Send
+                * an "unimplemented operation" program check to the guest.
+                */
+               kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
+               r = RESUME_GUEST;
+               break;
+
+       /*
+        * These really should never happen without CONFIG_SPE,
+        * as we should never enable the real MSR[SPE] in the guest.
+        */
+       case BOOKE_INTERRUPT_SPE_FP_DATA:
+       case BOOKE_INTERRUPT_SPE_FP_ROUND:
+               printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
+                      __func__, exit_nr, vcpu->arch.pc);
+               run->hw.hardware_exit_reason = exit_nr;
+               r = RESUME_HOST;
+               break;
+#endif

        case BOOKE_INTERRUPT_DATA_STORAGE:
                kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 492bb70..0fa1732 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -52,24 +52,18 @@

  extern unsigned long kvmppc_booke_handlers;

-/* Helper function for "full" MSR writes. No need to call this if only EE is
- * changing. */
-static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
-{
-       if ((new_msr&  MSR_PR) != (vcpu->arch.shared->msr&  MSR_PR))
-               kvmppc_mmu_priv_switch(vcpu, new_msr&  MSR_PR);
-
-       vcpu->arch.shared->msr = new_msr;
-
-       if (vcpu->arch.shared->msr&  MSR_WE) {
-               kvm_vcpu_block(vcpu);
-               kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
-       };
-}
+void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);

  int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                              unsigned int inst, int *advance);
  int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
  int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs);

+/* low-level asm code to transfer guest state */
+void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
+void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
+
+/* high-level function, manages flags, host state */
+void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
+
  #endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_interrupts.S 
b/arch/powerpc/kvm/booke_interrupts.S
index 307771c..f85a7e0 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -13,6 +13,7 @@
   * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
   *
   * Copyright IBM Corp. 2007
+ * Copyright 2011 Freescale Semiconductor, Inc.
   *
   * Authors: Hollis Blanchard<holl...@us.ibm.com>
   */
@@ -239,6 +240,14 @@ _GLOBAL(kvmppc_resume_host)
  heavyweight_exit:
        /* Not returning to guest. */

+#ifdef CONFIG_SPE
+       /* save guest SPEFSCR and load host SPEFSCR */
+       mfspr   r9, SPRN_SPEFSCR
+       stw     r9, VCPU_SPEFSCR(r4)
+       lwz     r9, VCPU_HOST_SPEFSCR(r4)
+       mtspr   SPRN_SPEFSCR, r9
+#endif
+
        /* We already saved guest volatile register state; now save the
         * non-volatiles. */
        stw     r15, VCPU_GPR(r15)(r4)
@@ -340,6 +349,14 @@ _GLOBAL(__kvmppc_vcpu_run)
        lwz     r30, VCPU_GPR(r30)(r4)
        lwz     r31, VCPU_GPR(r31)(r4)

+#ifdef CONFIG_SPE
+       /* save host SPEFSCR and load guest SPEFSCR */
+       mfspr   r3, SPRN_SPEFSCR
+       stw     r3, VCPU_HOST_SPEFSCR(r4)
+       lwz     r3, VCPU_SPEFSCR(r4)
+       mtspr   SPRN_SPEFSCR, r3
+#endif
+
  lightweight_exit:
        stw     r2, HOST_R2(r1)

@@ -426,3 +443,26 @@ lightweight_exit:
        lwz     r3, VCPU_GPR(r3)(r4)
        lwz     r4, VCPU_GPR(r4)(r4)
        rfi
+
+#ifdef CONFIG_SPE
+_GLOBAL(kvmppc_save_guest_spe)
+       cmpi    0,r3,0
+       beqlr-
+       addi    r5,r3,VCPU_EVR
+       SAVE_32EVRS(0, r4, r5, 0)
+       evxor   evr6, evr6, evr6
+       evmwumiaa evr6, evr6, evr6
+       li      r4,VCPU_ACC
+       evstddx evr6, r4, r3            /* save acc */

I'm not sure I fully understand SPE instructions yet, but isn't evr6 just r6 plus its upper 32 bits? Then wouldn't it make sense to work in evr3/evr4 and only copy the upper 32 bits over to another register? Not that it should matter - I'm only being curious here :)


Alex

--
To unsubscribe from this list: send the line "unsubscribe kvm-ppc" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to