Re: [PATCH 25/37] KVM: arm64: Prepare to handle traps on remaining deferred EL1 sysregs

2017-12-03 Thread Christoffer Dall
On Mon, Nov 13, 2017 at 07:56:14PM +0100, Andrew Jones wrote:
> On Thu, Oct 12, 2017 at 12:41:29PM +0200, Christoffer Dall wrote:
> > Handle accesses during traps to any remaining EL1 registers which can be
> > deferred to vcpu_load and vcpu_put, by either accessing them directly on
> > the physical CPU when the latest version is stored there, or by
> > synchronizing the memory representation with the CPU state.
> > 
> > Signed-off-by: Christoffer Dall 
> > ---
> >  arch/arm64/include/asm/kvm_emulate.h | 14 ---
> >  arch/arm64/kvm/inject_fault.c| 79 
> > 
> >  arch/arm64/kvm/sys_regs.c|  6 ++-
> >  3 files changed, 76 insertions(+), 23 deletions(-)
> > 
> > diff --git a/arch/arm64/include/asm/kvm_emulate.h 
> > b/arch/arm64/include/asm/kvm_emulate.h
> > index 630dd60..69bb40d 100644
> > --- a/arch/arm64/include/asm/kvm_emulate.h
> > +++ b/arch/arm64/include/asm/kvm_emulate.h
> > @@ -66,11 +66,6 @@ static inline unsigned long *vcpu_pc(const struct 
> > kvm_vcpu *vcpu)
> > return (unsigned long *)_gp_regs(vcpu)->regs.pc;
> >  }
> >  
> > -static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
> > -{
> > -   return (unsigned long *)_gp_regs(vcpu)->elr_el1;
> > -}
> > -
> >  static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
> >  {
> > return (unsigned long *)_gp_regs(vcpu)->regs.pstate;
> > @@ -120,15 +115,6 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, 
> > u8 reg_num,
> > vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
> >  }
> >  
> > -/* Get vcpu SPSR for current mode */
> > -static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
> > -{
> > -   if (vcpu_mode_is_32bit(vcpu))
> > -   return vcpu_spsr32(vcpu);
> > -
> > -   return (unsigned long *)_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
> > -}
> > -
> >  static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
> >  {
> > u32 mode;
> > diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> > index 45c7026..f4513fc 100644
> > --- a/arch/arm64/kvm/inject_fault.c
> > +++ b/arch/arm64/kvm/inject_fault.c
> > @@ -23,6 +23,7 @@
> >  
> >  #include 
> >  #include 
> > +#include 
> >  #include 
> >  
> >  #define PSTATE_FAULT_BITS_64   (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT 
> > | \
> > @@ -33,13 +34,55 @@
> >  #define LOWER_EL_AArch64_VECTOR0x400
> >  #define LOWER_EL_AArch32_VECTOR0x600
> >  
> > +static u64 vcpu_get_vbar_el1(struct kvm_vcpu *vcpu)
> > +{
> > +   unsigned long vbar;
> > +
> > +   if (vcpu->arch.sysregs_loaded_on_cpu)
> > +   vbar = read_sysreg_el1(vbar);
> > +   else
> > +   vbar = vcpu_sys_reg(vcpu, VBAR_EL1);
> > +
> > +   if (vcpu_el1_is_32bit(vcpu))
> > +   return lower_32_bits(vbar);
> > +   return vbar;
> > +}
> > +
> > +static void vcpu_set_elr_el1(struct kvm_vcpu *vcpu, u64 val)
> > +{
> > +   if (vcpu->arch.sysregs_loaded_on_cpu)
> > +   write_sysreg_el1(val, elr);
> > +   else
> > +   vcpu_gp_regs(vcpu)->elr_el1 = val;
> > +}
> > +
> > +/* Set the SPSR for the current mode */
> > +static void vcpu_set_spsr(struct kvm_vcpu *vcpu, u64 val)
> > +{
> > +   if (vcpu_mode_is_32bit(vcpu))
> > +   *vcpu_spsr32(vcpu) = val;
> > +
> > +   if (vcpu->arch.sysregs_loaded_on_cpu)
> > +   write_sysreg_el1(val, spsr);
> > +   else
> > +   vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = val;
> > +}
> > +
> > +static u32 vcpu_get_c1_sctlr(struct kvm_vcpu *vcpu)
> > +{
> > +   if (vcpu->arch.sysregs_loaded_on_cpu)
> > +   return lower_32_bits(read_sysreg_el1(sctlr));
> > +   else
> > +   return vcpu_cp15(vcpu, c1_SCTLR);
> > +}
> > +
> >  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 
> > vect_offset)
> >  {
> > unsigned long cpsr;
> > unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
> > bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
> > u32 return_offset = (is_thumb) ? 4 : 0;
> > -   u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
> > +   u32 sctlr = vcpu_get_c1_sctlr(vcpu);
> >  
> > cpsr = mode | COMPAT_PSR_I_BIT;
> >  
> > @@ -51,14 +94,14 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 
> > mode, u32 vect_offset)
> > *vcpu_cpsr(vcpu) = cpsr;
> >  
> > /* Note: These now point to the banked copies */
> > -   *vcpu_spsr(vcpu) = new_spsr_value;
> > +   vcpu_set_spsr(vcpu, new_spsr_value);
> > *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
> >  
> > /* Branch to exception vector */
> > if (sctlr & (1 << 13))
> > vect_offset += 0x;
> > else /* always have security exceptions */
> > -   vect_offset += vcpu_cp15(vcpu, c12_VBAR);
> > +   vect_offset += vcpu_get_vbar_el1(vcpu);
> >  
> > *vcpu_pc(vcpu) = vect_offset;
> >  }
> > @@ -79,6 +122,20 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool 
> > is_pabt,
> > u32 *far, *fsr;
> > 

Re: [PATCH 25/37] KVM: arm64: Prepare to handle traps on remaining deferred EL1 sysregs

2017-11-13 Thread Andrew Jones
On Thu, Oct 12, 2017 at 12:41:29PM +0200, Christoffer Dall wrote:
> Handle accesses during traps to any remaining EL1 registers which can be
> deferred to vcpu_load and vcpu_put, by either accessing them directly on
> the physical CPU when the latest version is stored there, or by
> synchronizing the memory representation with the CPU state.
> 
> Signed-off-by: Christoffer Dall 
> ---
>  arch/arm64/include/asm/kvm_emulate.h | 14 ---
>  arch/arm64/kvm/inject_fault.c| 79 
> 
>  arch/arm64/kvm/sys_regs.c|  6 ++-
>  3 files changed, 76 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_emulate.h 
> b/arch/arm64/include/asm/kvm_emulate.h
> index 630dd60..69bb40d 100644
> --- a/arch/arm64/include/asm/kvm_emulate.h
> +++ b/arch/arm64/include/asm/kvm_emulate.h
> @@ -66,11 +66,6 @@ static inline unsigned long *vcpu_pc(const struct kvm_vcpu 
> *vcpu)
>   return (unsigned long *)_gp_regs(vcpu)->regs.pc;
>  }
>  
> -static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
> -{
> - return (unsigned long *)_gp_regs(vcpu)->elr_el1;
> -}
> -
>  static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
>  {
>   return (unsigned long *)_gp_regs(vcpu)->regs.pstate;
> @@ -120,15 +115,6 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, 
> u8 reg_num,
>   vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
>  }
>  
> -/* Get vcpu SPSR for current mode */
> -static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
> -{
> - if (vcpu_mode_is_32bit(vcpu))
> - return vcpu_spsr32(vcpu);
> -
> - return (unsigned long *)_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
> -}
> -
>  static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
>  {
>   u32 mode;
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 45c7026..f4513fc 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -23,6 +23,7 @@
>  
>  #include 
>  #include 
> +#include 
>  #include 
>  
>  #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT 
> | \
> @@ -33,13 +34,55 @@
>  #define LOWER_EL_AArch64_VECTOR  0x400
>  #define LOWER_EL_AArch32_VECTOR  0x600
>  
> +static u64 vcpu_get_vbar_el1(struct kvm_vcpu *vcpu)
> +{
> + unsigned long vbar;
> +
> + if (vcpu->arch.sysregs_loaded_on_cpu)
> + vbar = read_sysreg_el1(vbar);
> + else
> + vbar = vcpu_sys_reg(vcpu, VBAR_EL1);
> +
> + if (vcpu_el1_is_32bit(vcpu))
> + return lower_32_bits(vbar);
> + return vbar;
> +}
> +
> +static void vcpu_set_elr_el1(struct kvm_vcpu *vcpu, u64 val)
> +{
> + if (vcpu->arch.sysregs_loaded_on_cpu)
> + write_sysreg_el1(val, elr);
> + else
> + vcpu_gp_regs(vcpu)->elr_el1 = val;
> +}
> +
> +/* Set the SPSR for the current mode */
> +static void vcpu_set_spsr(struct kvm_vcpu *vcpu, u64 val)
> +{
> + if (vcpu_mode_is_32bit(vcpu))
> + *vcpu_spsr32(vcpu) = val;
> +
> + if (vcpu->arch.sysregs_loaded_on_cpu)
> + write_sysreg_el1(val, spsr);
> + else
> + vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = val;
> +}
> +
> +static u32 vcpu_get_c1_sctlr(struct kvm_vcpu *vcpu)
> +{
> + if (vcpu->arch.sysregs_loaded_on_cpu)
> + return lower_32_bits(read_sysreg_el1(sctlr));
> + else
> + return vcpu_cp15(vcpu, c1_SCTLR);
> +}
> +
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
>   unsigned long cpsr;
>   unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
>   bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
>   u32 return_offset = (is_thumb) ? 4 : 0;
> - u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
> + u32 sctlr = vcpu_get_c1_sctlr(vcpu);
>  
>   cpsr = mode | COMPAT_PSR_I_BIT;
>  
> @@ -51,14 +94,14 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 
> mode, u32 vect_offset)
>   *vcpu_cpsr(vcpu) = cpsr;
>  
>   /* Note: These now point to the banked copies */
> - *vcpu_spsr(vcpu) = new_spsr_value;
> + vcpu_set_spsr(vcpu, new_spsr_value);
>   *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
>  
>   /* Branch to exception vector */
>   if (sctlr & (1 << 13))
>   vect_offset += 0x;
>   else /* always have security exceptions */
> - vect_offset += vcpu_cp15(vcpu, c12_VBAR);
> + vect_offset += vcpu_get_vbar_el1(vcpu);
>  
>   *vcpu_pc(vcpu) = vect_offset;
>  }
> @@ -79,6 +122,20 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool 
> is_pabt,
>   u32 *far, *fsr;
>   bool is_lpae;
>  
> + /*
> +  * We are going to need the latest values of the following system
> +  * regiters:

registers

> +  *   DFAR:  mapped to FAR_EL1

FAR_EL1[31:0]

> +  *   IFAR:  mapped to FAR_EL1

FAR_EL1[63:32]


[PATCH 25/37] KVM: arm64: Prepare to handle traps on remaining deferred EL1 sysregs

2017-10-12 Thread Christoffer Dall
Handle accesses during traps to any remaining EL1 registers which can be
deferred to vcpu_load and vcpu_put, by either accessing them directly on
the physical CPU when the latest version is stored there, or by
synchronizing the memory representation with the CPU state.

Signed-off-by: Christoffer Dall 
---
 arch/arm64/include/asm/kvm_emulate.h | 14 ---
 arch/arm64/kvm/inject_fault.c| 79 
 arch/arm64/kvm/sys_regs.c|  6 ++-
 3 files changed, 76 insertions(+), 23 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 630dd60..69bb40d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -66,11 +66,6 @@ static inline unsigned long *vcpu_pc(const struct kvm_vcpu 
*vcpu)
return (unsigned long *)_gp_regs(vcpu)->regs.pc;
 }
 
-static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
-{
-   return (unsigned long *)_gp_regs(vcpu)->elr_el1;
-}
-
 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
 {
return (unsigned long *)_gp_regs(vcpu)->regs.pstate;
@@ -120,15 +115,6 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 
reg_num,
vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
 }
 
-/* Get vcpu SPSR for current mode */
-static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
-{
-   if (vcpu_mode_is_32bit(vcpu))
-   return vcpu_spsr32(vcpu);
-
-   return (unsigned long *)_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
-}
-
 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
 {
u32 mode;
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 45c7026..f4513fc 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -23,6 +23,7 @@
 
 #include 
 #include 
+#include 
 #include 
 
 #define PSTATE_FAULT_BITS_64   (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
@@ -33,13 +34,55 @@
 #define LOWER_EL_AArch64_VECTOR0x400
 #define LOWER_EL_AArch32_VECTOR0x600
 
+static u64 vcpu_get_vbar_el1(struct kvm_vcpu *vcpu)
+{
+   unsigned long vbar;
+
+   if (vcpu->arch.sysregs_loaded_on_cpu)
+   vbar = read_sysreg_el1(vbar);
+   else
+   vbar = vcpu_sys_reg(vcpu, VBAR_EL1);
+
+   if (vcpu_el1_is_32bit(vcpu))
+   return lower_32_bits(vbar);
+   return vbar;
+}
+
+static void vcpu_set_elr_el1(struct kvm_vcpu *vcpu, u64 val)
+{
+   if (vcpu->arch.sysregs_loaded_on_cpu)
+   write_sysreg_el1(val, elr);
+   else
+   vcpu_gp_regs(vcpu)->elr_el1 = val;
+}
+
+/* Set the SPSR for the current mode */
+static void vcpu_set_spsr(struct kvm_vcpu *vcpu, u64 val)
+{
+   if (vcpu_mode_is_32bit(vcpu))
+   *vcpu_spsr32(vcpu) = val;
+
+   if (vcpu->arch.sysregs_loaded_on_cpu)
+   write_sysreg_el1(val, spsr);
+   else
+   vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = val;
+}
+
+static u32 vcpu_get_c1_sctlr(struct kvm_vcpu *vcpu)
+{
+   if (vcpu->arch.sysregs_loaded_on_cpu)
+   return lower_32_bits(read_sysreg_el1(sctlr));
+   else
+   return vcpu_cp15(vcpu, c1_SCTLR);
+}
+
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
unsigned long cpsr;
unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
u32 return_offset = (is_thumb) ? 4 : 0;
-   u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+   u32 sctlr = vcpu_get_c1_sctlr(vcpu);
 
cpsr = mode | COMPAT_PSR_I_BIT;
 
@@ -51,14 +94,14 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 
mode, u32 vect_offset)
*vcpu_cpsr(vcpu) = cpsr;
 
/* Note: These now point to the banked copies */
-   *vcpu_spsr(vcpu) = new_spsr_value;
+   vcpu_set_spsr(vcpu, new_spsr_value);
*vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
/* Branch to exception vector */
if (sctlr & (1 << 13))
vect_offset += 0x;
else /* always have security exceptions */
-   vect_offset += vcpu_cp15(vcpu, c12_VBAR);
+   vect_offset += vcpu_get_vbar_el1(vcpu);
 
*vcpu_pc(vcpu) = vect_offset;
 }
@@ -79,6 +122,20 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool 
is_pabt,
u32 *far, *fsr;
bool is_lpae;
 
+   /*
+* We are going to need the latest values of the following system
+* regiters:
+*   DFAR:  mapped to FAR_EL1
+*   IFAR:  mapped to FAR_EL1
+*   DFSR:  mapped to ESR_EL1
+*   TTBCR: mapped to TCR_EL1
+*/
+   if (vcpu->arch.sysregs_loaded_on_cpu) {
+   vcpu->arch.ctxt.sys_regs[FAR_EL1] = read_sysreg_el1(far);
+   vcpu->arch.ctxt.sys_regs[ESR_EL1] = read_sysreg_el1(esr);
+