Re: [PATCH 36/39] powerpc/kvm/book3s_hv: Add little endian guest support

2013-09-30 Thread Alexander Graf

On 09/23/2013 04:05 AM, Anton Blanchard wrote:

Add support for the H_SET_MODE hcall so we can select the
endianness of our exceptions.

We create a guest MSR from scratch when delivering exceptions in
a few places and instead of extracing the LPCR[ILE] and inserting
it into MSR_LE each time simply create a new variable intr_msr which
contains the entire MSR to use.

Signed-off-by: Anton Blanchardan...@samba.org
Cc: Alexander Grafag...@suse.de
---
  arch/powerpc/include/asm/kvm_host.h |  1 +
  arch/powerpc/kernel/asm-offsets.c   |  1 +
  arch/powerpc/kvm/book3s_64_mmu_hv.c |  2 +-
  arch/powerpc/kvm/book3s_hv.c| 44 +
  arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 ---
  5 files changed, 52 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 3328353..50f204e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -605,6 +605,7 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+   unsigned long intr_msr;
  #endif
  };

diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index d8958be..3967b15 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -490,6 +490,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+   DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
  #endif
  #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8..30459bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -266,7 +266,7 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)

  static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
  {
-   kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+   kvmppc_set_msr(vcpu, vcpu-arch.intr_msr);
  }

  /*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5a..c9b90b8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -503,6 +503,43 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu-arch.dtl.dirty = true;
  }

+static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+unsigned long resource, unsigned long value1,
+unsigned long value2)
+{
+   struct kvm *kvm = vcpu-kvm;
+   struct kvm_vcpu *v;
+   int n;
+
+   if (resource == 4) {
+   if (value1)
+   return H_P3;
+   if (value2)
+   return H_P4;
+
+   switch (mflags) {
+   case 0:
+   kvm-arch.lpcr= ~LPCR_ILE;


Can we live migrate this properly?


Alex

___
Linuxppc-dev mailing list
Linuxppc-dev@lists.ozlabs.org
https://lists.ozlabs.org/listinfo/linuxppc-dev


[PATCH 36/39] powerpc/kvm/book3s_hv: Add little endian guest support

2013-09-22 Thread Anton Blanchard
Add support for the H_SET_MODE hcall so we can select the
endianness of our exceptions.

We create a guest MSR from scratch when delivering exceptions in
a few places and instead of extracing the LPCR[ILE] and inserting
it into MSR_LE each time simply create a new variable intr_msr which
contains the entire MSR to use.

Signed-off-by: Anton Blanchard an...@samba.org
Cc: Alexander Graf ag...@suse.de
---
 arch/powerpc/include/asm/kvm_host.h |  1 +
 arch/powerpc/kernel/asm-offsets.c   |  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c |  2 +-
 arch/powerpc/kvm/book3s_hv.c| 44 +
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 ---
 5 files changed, 52 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index 3328353..50f204e 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -605,6 +605,7 @@ struct kvm_vcpu_arch {
spinlock_t tbacct_lock;
u64 busy_stolen;
u64 busy_preempt;
+   unsigned long intr_msr;
 #endif
 };
 
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index d8958be..3967b15 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -490,6 +490,7 @@ int main(void)
DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+   DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c 
b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 043eec8..30459bd 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -266,7 +266,7 @@ void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-   kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+   kvmppc_set_msr(vcpu, vcpu-arch.intr_msr);
 }
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 62a2b5a..c9b90b8 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -503,6 +503,43 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu-arch.dtl.dirty = true;
 }
 
+static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
+unsigned long resource, unsigned long value1,
+unsigned long value2)
+{
+   struct kvm *kvm = vcpu-kvm;
+   struct kvm_vcpu *v;
+   int n;
+
+   if (resource == 4) {
+   if (value1)
+   return H_P3;
+   if (value2)
+   return H_P4;
+
+   switch (mflags) {
+   case 0:
+   kvm-arch.lpcr = ~LPCR_ILE;
+   kvm_for_each_vcpu(n, v, kvm)
+   v-arch.intr_msr = ~MSR_LE;
+   kick_all_cpus_sync();
+   return H_SUCCESS;
+
+   case 1:
+   kvm-arch.lpcr |= LPCR_ILE;
+   kvm_for_each_vcpu(n, v, kvm)
+   v-arch.intr_msr |= MSR_LE;
+   kick_all_cpus_sync();
+   return H_SUCCESS;
+
+   default:
+   return H_UNSUPPORTED_FLAG_START;
+   }
+   }
+
+   return H_P2;
+}
+
 int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -557,6 +594,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
 
/* Send the error out to userspace via KVM_RUN */
return rc;
+   case H_SET_MODE:
+   ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
+   kvmppc_get_gpr(vcpu, 5),
+   kvmppc_get_gpr(vcpu, 6),
+   kvmppc_get_gpr(vcpu, 7));
+   break;
 
case H_XIRR:
case H_CPPR:
@@ -924,6 +967,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, 
unsigned int id)
spin_lock_init(vcpu-arch.vpa_update_lock);
spin_lock_init(vcpu-arch.tbacct_lock);
vcpu-arch.busy_preempt = TB_NIL;
+   vcpu-arch.intr_msr = MSR_SF | MSR_ME;
 
kvmppc_mmu_book3s_hv_init(vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S 
b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 294b7af..7b7bcea 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -521,8 +521,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:mr  r6,r10
mr  r10,r0
mr  r7,r11
-   li  r11,(MSR_ME  1) | 1