Re: [RFC PATCH 2/4] KVM: PPC: Book3E: Handle LRAT error exception

2014-07-07 Thread Scott Wood
On Fri, 2014-07-04 at 10:15 +0200, Alexander Graf wrote:
 On 03.07.14 16:45, Mihai Caraman wrote:
  diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
  index a192975..ab1077f 100644
  --- a/arch/powerpc/kvm/booke.c
  +++ b/arch/powerpc/kvm/booke.c
  @@ -1286,6 +1286,46 @@ int kvmppc_handle_exit(struct kvm_run *run, struct 
  kvm_vcpu *vcpu,
  break;
  }

  +#ifdef CONFIG_KVM_BOOKE_HV
  +   case BOOKE_INTERRUPT_LRAT_ERROR:
  +   {
  +   gfn_t gfn;
  +
  +   /*
  +* Guest TLB management instructions (EPCR.DGTMI == 0) is not
  +* supported for now
  +*/
  +   if (!(vcpu-arch.fault_esr  ESR_PT)) {
  +   WARN(1, %s: Guest TLB management instructions not 
  supported!\n, __func__);
 
 Wouldn't this allow a guest to flood the host's kernel log?

It shouldn't be possible for this to happen, since the host will never
set EPCR[DGTMI] -- but yes, it should be WARN_ONCE or ratelimited.

  +{
  +   int this, next;
  +
  +   this = local_paca-tcd.lrat_next;
  +   next = (this + 1) % local_paca-tcd.lrat_max;
 
 Can we assume that lrat_max is always a power of 2? IIRC modulo 
 functions with variables can be quite expensive. So if we can instead do
 
next = (this + 1)  local_paca-tcd.lrat_mask;
 
 we should be faster and not rely on division helpers.

Architecturally we can't assume that, though it's true on the only
existing implementation.

Why not do something similar to what is done for tlb1:

unsigned int sesel = vcpu_e500-host_tlb1_nv++;

if (unlikely(vcpu_e500-host_tlb1_nv = tlb1_max_shadow_size()))
vcpu_e500-host_tlb1_nv = 0;

...and while we're at it, use local_paca-tcd for tlb1 as well (except
on 32-bit).

Also, please use get_paca() rather than local_paca so that the
preemption-disabled check is retained.

  +void write_host_lrate(int tsize, gfn_t gfn, unsigned long pfn, uint32_t 
  lpid,
  + int valid, int lrat_entry)
  +{
  +   struct kvm_book3e_206_tlb_entry stlbe;
  +   int esel = lrat_entry;
  +   unsigned long flags;
  +
  +   stlbe.mas1 = (valid ? MAS1_VALID : 0) | MAS1_TSIZE(tsize);
  +   stlbe.mas2 = ((u64)gfn  PAGE_SHIFT);
  +   stlbe.mas7_3 = ((u64)pfn  PAGE_SHIFT);
  +   stlbe.mas8 = MAS8_TGS | lpid;
  +
  +   local_irq_save(flags);
  +   /* book3e_tlb_lock(); */
 
 Hm?

Indeed.

  +
  +   if (esel == -1)
  +   esel = lrat_next();
  +   __write_host_tlbe(stlbe, MAS0_ATSEL | MAS0_ESEL(esel));

Where do you call this function with lrat_entry != -1?  Why rename it to
esel at function entry?

  +   down_read(current-mm-mmap_sem);
  +   vma = find_vma(current-mm, hva);
  +   if (vma  (hva = vma-vm_start)) {
  +   psize = vma_kernel_pagesize(vma);
  +   } else {
  +   pr_err_ratelimited(%s: couldn't find virtual memory address 
  for gfn %lx!\n, __func__, (long)gfn);

While output strings should not be linewrapped, the arguments that come
after the long string should be.

-Scott


--
To unsubscribe from this list: send the line unsubscribe kvm-ppc in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [RFC PATCH 2/4] KVM: PPC: Book3E: Handle LRAT error exception

2014-07-04 Thread Alexander Graf


On 03.07.14 16:45, Mihai Caraman wrote:

Handle LRAT error exception with support for lrat mapping and invalidation.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
  arch/powerpc/include/asm/kvm_host.h   |   1 +
  arch/powerpc/include/asm/kvm_ppc.h|   2 +
  arch/powerpc/include/asm/mmu-book3e.h |   3 +
  arch/powerpc/include/asm/reg_booke.h  |  13 
  arch/powerpc/kernel/asm-offsets.c |   1 +
  arch/powerpc/kvm/booke.c  |  40 +++
  arch/powerpc/kvm/bookehv_interrupts.S |   9 ++-
  arch/powerpc/kvm/e500_mmu_host.c  | 125 ++
  arch/powerpc/kvm/e500mc.c |   2 +
  9 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b..7b6b2ec 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -433,6 +433,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u64 fault_lper;
  #endif
  
  #if defined(CONFIG_BOOKE)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd..2730a29 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -86,6 +86,8 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned 
int gtlb_index,
gva_t eaddr);
  extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
  extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu);
  
  extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,

  unsigned int id);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 088fd9f..ac6acf7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,6 +40,8 @@
  
  /* MAS registers bit definitions */
  
+#define MAS0_ATSEL		0x8000

+#define MAS0_ATSEL_SHIFT   31
  #define MAS0_TLBSEL_MASK0x3000
  #define MAS0_TLBSEL_SHIFT   28
  #define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  
MAS0_TLBSEL_MASK)
@@ -53,6 +55,7 @@
  #define MAS0_WQ_CLR_RSRV  0x2000
  
  #define MAS1_VALID		0x8000

+#define MAS1_VALID_SHIFT   31
  #define MAS1_IPROT0x4000
  #define MAS1_TID(x)   (((x)  16)  0x3FFF)
  #define MAS1_IND  0x2000
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 75bda23..783d617 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -43,6 +43,8 @@
  
  /* Special Purpose Registers (SPRNs)*/

  #define SPRN_DECAR0x036   /* Decrementer Auto Reload Register */
+#define SPRN_LPER  0x038   /* Logical Page Exception Register */
+#define SPRN_LPERU 0x039   /* Logical Page Exception Register Upper */
  #define SPRN_IVPR 0x03F   /* Interrupt Vector Prefix Register */
  #define SPRN_USPRG0   0x100   /* User Special Purpose Register General 0 */
  #define SPRN_SPRG3R   0x103   /* Special Purpose Register General 3 Read */
@@ -358,6 +360,9 @@
  #define ESR_ILK   0x0010  /* Instr. Cache Locking */
  #define ESR_PUO   0x0004  /* Unimplemented Operation 
exception */
  #define ESR_BO0x0002  /* Byte Ordering */
+#define ESR_DATA   0x0400  /* Page Table Data Access */
+#define ESR_TLBI   0x0200  /* Page Table TLB Ineligible */
+#define ESR_PT 0x0100  /* Page Table Translation */
  #define ESR_SPV   0x0080  /* Signal Processing operation 
*/
  
  /* Bit definitions related to the DBCR0. */

@@ -649,6 +654,14 @@
  #define EPC_EPID  0x3fff
  #define EPC_EPID_SHIFT0
  
+/* Bit definitions for LPER */

+#define LPER_ALPN  0x000FF000ULL
+#define LPER_ALPN_SHIFT12
+#define LPER_WIMGE 0x0F80
+#define LPER_WIMGE_SHIFT   7
+#define LPER_LPS   0x000F
+#define LPER_LPS_SHIFT 0
+
  /*
   * The IBM-403 is an even more odd special case, as it is much
   * older than the IBM-405 series.  We put these down here incase someone
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index f5995a9..be6e329 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -713,6 +713,7 @@ int main(void)
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+   DEFINE(VCPU_FAULT_LPER, offsetof(struct kvm_vcpu, arch.fault_lper));
  #endif
  
  #ifdef CONFIG_KVM_EXIT_TIMING

diff --git 

[RFC PATCH 2/4] KVM: PPC: Book3E: Handle LRAT error exception

2014-07-03 Thread Mihai Caraman
Handle LRAT error exception with support for lrat mapping and invalidation.

Signed-off-by: Mihai Caraman mihai.cara...@freescale.com
---
 arch/powerpc/include/asm/kvm_host.h   |   1 +
 arch/powerpc/include/asm/kvm_ppc.h|   2 +
 arch/powerpc/include/asm/mmu-book3e.h |   3 +
 arch/powerpc/include/asm/reg_booke.h  |  13 
 arch/powerpc/kernel/asm-offsets.c |   1 +
 arch/powerpc/kvm/booke.c  |  40 +++
 arch/powerpc/kvm/bookehv_interrupts.S |   9 ++-
 arch/powerpc/kvm/e500_mmu_host.c  | 125 ++
 arch/powerpc/kvm/e500mc.c |   2 +
 9 files changed, 195 insertions(+), 1 deletion(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h 
b/arch/powerpc/include/asm/kvm_host.h
index bb66d8b..7b6b2ec 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -433,6 +433,7 @@ struct kvm_vcpu_arch {
u32 eplc;
u32 epsc;
u32 oldpir;
+   u64 fault_lper;
 #endif
 
 #if defined(CONFIG_BOOKE)
diff --git a/arch/powerpc/include/asm/kvm_ppc.h 
b/arch/powerpc/include/asm/kvm_ppc.h
index 9c89cdd..2730a29 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -86,6 +86,8 @@ extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned 
int gtlb_index,
   gva_t eaddr);
 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
+extern void kvmppc_lrat_map(struct kvm_vcpu *vcpu, gfn_t gfn);
+extern void kvmppc_lrat_invalidate(struct kvm_vcpu *vcpu);
 
 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
 unsigned int id);
diff --git a/arch/powerpc/include/asm/mmu-book3e.h 
b/arch/powerpc/include/asm/mmu-book3e.h
index 088fd9f..ac6acf7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,6 +40,8 @@
 
 /* MAS registers bit definitions */
 
+#define MAS0_ATSEL 0x8000
+#define MAS0_ATSEL_SHIFT   31
 #define MAS0_TLBSEL_MASK0x3000
 #define MAS0_TLBSEL_SHIFT   28
 #define MAS0_TLBSEL(x)  (((x)  MAS0_TLBSEL_SHIFT)  MAS0_TLBSEL_MASK)
@@ -53,6 +55,7 @@
 #define MAS0_WQ_CLR_RSRV   0x2000
 
 #define MAS1_VALID 0x8000
+#define MAS1_VALID_SHIFT   31
 #define MAS1_IPROT 0x4000
 #define MAS1_TID(x)(((x)  16)  0x3FFF)
 #define MAS1_IND   0x2000
diff --git a/arch/powerpc/include/asm/reg_booke.h 
b/arch/powerpc/include/asm/reg_booke.h
index 75bda23..783d617 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -43,6 +43,8 @@
 
 /* Special Purpose Registers (SPRNs)*/
 #define SPRN_DECAR 0x036   /* Decrementer Auto Reload Register */
+#define SPRN_LPER  0x038   /* Logical Page Exception Register */
+#define SPRN_LPERU 0x039   /* Logical Page Exception Register Upper */
 #define SPRN_IVPR  0x03F   /* Interrupt Vector Prefix Register */
 #define SPRN_USPRG00x100   /* User Special Purpose Register General 0 */
 #define SPRN_SPRG3R0x103   /* Special Purpose Register General 3 Read */
@@ -358,6 +360,9 @@
 #define ESR_ILK0x0010  /* Instr. Cache Locking */
 #define ESR_PUO0x0004  /* Unimplemented Operation 
exception */
 #define ESR_BO 0x0002  /* Byte Ordering */
+#define ESR_DATA   0x0400  /* Page Table Data Access */
+#define ESR_TLBI   0x0200  /* Page Table TLB Ineligible */
+#define ESR_PT 0x0100  /* Page Table Translation */
 #define ESR_SPV0x0080  /* Signal Processing operation 
*/
 
 /* Bit definitions related to the DBCR0. */
@@ -649,6 +654,14 @@
 #define EPC_EPID   0x3fff
 #define EPC_EPID_SHIFT 0
 
+/* Bit definitions for LPER */
+#define LPER_ALPN  0x000FF000ULL
+#define LPER_ALPN_SHIFT12
+#define LPER_WIMGE 0x0F80
+#define LPER_WIMGE_SHIFT   7
+#define LPER_LPS   0x000F
+#define LPER_LPS_SHIFT 0
+
 /*
  * The IBM-403 is an even more odd special case, as it is much
  * older than the IBM-405 series.  We put these down here incase someone
diff --git a/arch/powerpc/kernel/asm-offsets.c 
b/arch/powerpc/kernel/asm-offsets.c
index f5995a9..be6e329 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -713,6 +713,7 @@ int main(void)
DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
+   DEFINE(VCPU_FAULT_LPER, offsetof(struct kvm_vcpu, arch.fault_lper));
 #endif
 
 #ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index a192975..ab1077f 100644