Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Marc Zyngier
On 16/11/15 15:53, Ard Biesheuvel wrote:
> On 16 November 2015 at 14:11, Marc Zyngier  wrote:
>> Add the panic handler, together with the small bits of assembly
>> code to call the kernel's panic implementation.
>>
>> Signed-off-by: Marc Zyngier 
>> ---
>>  arch/arm64/kvm/hyp/hyp-entry.S | 11 ++-
>>  arch/arm64/kvm/hyp/hyp.h   |  1 +
>>  arch/arm64/kvm/hyp/switch.c| 35 +++
>>  3 files changed, 46 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
>> index e11a129..7218eed 100644
>> --- a/arch/arm64/kvm/hyp/hyp-entry.S
>> +++ b/arch/arm64/kvm/hyp/hyp-entry.S
>> @@ -141,7 +141,16 @@ el1_irq:
>> mov x1, #ARM_EXCEPTION_IRQ
>> b   __guest_exit
>>
>> -.macro invalid_vector  label, target = __kvm_hyp_panic
>> +ENTRY(__hyp_do_panic)
>> +   mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
>> + PSR_MODE_EL1h)
>> +   msr spsr_el2, lr
>> +   ldr lr, =panic
>> +   msr elr_el2, lr
>> +   eret
>> +ENDPROC(__hyp_do_panic)
>> +
>> +.macro invalid_vector  label, target = __hyp_panic
>> .align  2
>>  \label:
>> b \target
>> diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
>> index 240fb79..d5d500d 100644
>> --- a/arch/arm64/kvm/hyp/hyp.h
>> +++ b/arch/arm64/kvm/hyp/hyp.h
>> @@ -74,6 +74,7 @@ void __fpsimd_save_state(struct user_fpsimd_state 
>> *fp_regs);
>>  void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
>>
>>  u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
>> +void __noreturn __hyp_do_panic(unsigned long, ...);
>>
>>  #endif /* __ARM64_KVM_HYP_H__ */
>>
>> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
>> index 06d3e20..cdc2a96 100644
>> --- a/arch/arm64/kvm/hyp/switch.c
>> +++ b/arch/arm64/kvm/hyp/switch.c
>> @@ -140,3 +140,38 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
>>
>> return exit_code;
>>  }
>> +
>> +static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
>> ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
>> +
> 
> Re separating the HYP text from the kernel proper: this is exactly the
> thing that is likely to cause trouble when you execute the kernel text
> from HYP.
> 
> __hyp_panic_string is a non-const char pointer containing the absolute
> address of the string in the initializer, as seen from the high kernel
> virtual mapping.
> Better use 'static const char __hyp_panic_string[]' instead.

Definitely.

> (If it currenty works fine, it is only because the compiler optimizes
> the entire variable away, and performs a relative access in the place
> where the variable is referenced.)

That, and the fact that only panic() gets passed a pointer to this
string, so it doesn't really matter where it lives in this case.

But you do have a point here, and I'll address this for the next round.

Thanks,

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Ard Biesheuvel
On 16 November 2015 at 14:11, Marc Zyngier  wrote:
> Add the panic handler, together with the small bits of assembly
> code to call the kernel's panic implementation.
>
> Signed-off-by: Marc Zyngier 
> ---
>  arch/arm64/kvm/hyp/hyp-entry.S | 11 ++-
>  arch/arm64/kvm/hyp/hyp.h   |  1 +
>  arch/arm64/kvm/hyp/switch.c| 35 +++
>  3 files changed, 46 insertions(+), 1 deletion(-)
>
> diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
> index e11a129..7218eed 100644
> --- a/arch/arm64/kvm/hyp/hyp-entry.S
> +++ b/arch/arm64/kvm/hyp/hyp-entry.S
> @@ -141,7 +141,16 @@ el1_irq:
> mov x1, #ARM_EXCEPTION_IRQ
> b   __guest_exit
>
> -.macro invalid_vector  label, target = __kvm_hyp_panic
> +ENTRY(__hyp_do_panic)
> +   mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
> + PSR_MODE_EL1h)
> +   msr spsr_el2, lr
> +   ldr lr, =panic
> +   msr elr_el2, lr
> +   eret
> +ENDPROC(__hyp_do_panic)
> +
> +.macro invalid_vector  label, target = __hyp_panic
> .align  2
>  \label:
> b \target
> diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
> index 240fb79..d5d500d 100644
> --- a/arch/arm64/kvm/hyp/hyp.h
> +++ b/arch/arm64/kvm/hyp/hyp.h
> @@ -74,6 +74,7 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
>  void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
>
>  u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
> +void __noreturn __hyp_do_panic(unsigned long, ...);
>
>  #endif /* __ARM64_KVM_HYP_H__ */
>
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index 06d3e20..cdc2a96 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -140,3 +140,38 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
>
> return exit_code;
>  }
> +
> +static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
> ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
> +

Re separating the HYP text from the kernel proper: this is exactly the
thing that is likely to cause trouble when you execute the kernel text
from HYP.

__hyp_panic_string is a non-const char pointer containing the absolute
address of the string in the initializer, as seen from the high kernel
virtual mapping.
Better use 'static const char __hyp_panic_string[]' instead.

(If it currenty works fine, it is only because the compiler optimizes
the entire variable away, and performs a relative access in the place
where the variable is referenced.)


> +void __hyp_text __noreturn __hyp_panic(void)
> +{
> +   u64 spsr = read_sysreg(spsr_el2);
> +   u64 elr = read_sysreg(elr_el2);
> +   u64 par = read_sysreg(par_el1);
> +
> +   if (read_sysreg(vttbr_el2)) {
> +   struct kvm_vcpu *vcpu;
> +   struct kvm_cpu_context *host_ctxt;
> +
> +   vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
> +   host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> +   __deactivate_traps(vcpu);
> +   __deactivate_vm(vcpu);
> +   __sysreg_restore_state(host_ctxt);
> +
> +   write_sysreg(host_ctxt->gp_regs.sp_el1, sp_el1);
> +   }
> +
> +   /* Call panic for real */
> +   while (1) {
> +   unsigned long str_va = (unsigned long)__hyp_panic_string;
> +
> +   str_va -= HYP_PAGE_OFFSET;
> +   str_va += PAGE_OFFSET;
> +   __hyp_do_panic(str_va,
> +  spsr,  elr,
> +  read_sysreg(esr_el2),   read_sysreg(far_el2),
> +  read_sysreg(hpfar_el2), par,
> +  read_sysreg(tpidr_el2));
> +   }
> +}
> --
> 2.1.4
>
>
> ___
> linux-arm-kernel mailing list
> linux-arm-ker...@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Marc Zyngier
On 16/11/15 14:32, Mark Rutland wrote:
 +  /* Call panic for real */
 +  while (1) {
 +  unsigned long str_va = (unsigned long)__hyp_panic_string;
 +
 +  str_va -= HYP_PAGE_OFFSET;
 +  str_va += PAGE_OFFSET;
 +  __hyp_do_panic(str_va,
 + spsr,  elr,
 + read_sysreg(esr_el2),   read_sysreg(far_el2),
 + read_sysreg(hpfar_el2), par,
 + read_sysreg(tpidr_el2));
 +  }
 +}
>>>
>>> I think the while (1) here is confusing.
>>>
>>> Can we not jsut declare str_va at the start of the function and get rid
>>> of the loop?
>>
>> The while(1) is to prevent GCC from screaming (it otherwise believes
>> that the function actually returns, despite the __noreturn attribute).
> 
> Aha!
> 
> Perhaps a comment to that effect...?
> 
>> Or were you thinking of something else?
> 
> I just failed to derive the __noreturn problem from first principles.
> 
> Perhaps follow the __hyp_do_panic() call with an unreachable(), with the
> comment as to GCC failing to reason about the __noreturn? That would be
> less confusing than the loop, assuming that it works.

Worth giving it a try.

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 17/21] arm64: KVM: Map the kernel RO section into HYP

2015-11-16 Thread Marc Zyngier
On 16/11/15 14:27, Mark Rutland wrote:
> On Mon, Nov 16, 2015 at 01:11:55PM +, Marc Zyngier wrote:
>> In order to run C code in HYP, we must make sure that the kernel's
>> RO section in mapped into HYP (otherwise things break badly).
> 
> Somewhat tangential, but do we have any strong guarantees that the hyp
> text is otherwise safe in its address space which differs from that of
> the kernel proper?
> 
> i.e. do we need something like we did for the EFI stub in commit
> e8f3010f7326c003 ("arm64/efi: isolate EFI stub from the kernel proper")?

Probably. That will make things more difficult for VHE, where there are
function calls between the kernel and the "hypervisor" (kvm_call_hyp()
and panic() are the most obvious ones).

I'll have a look, thanks for the pointer.

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Mark Rutland
> >> +  /* Call panic for real */
> >> +  while (1) {
> >> +  unsigned long str_va = (unsigned long)__hyp_panic_string;
> >> +
> >> +  str_va -= HYP_PAGE_OFFSET;
> >> +  str_va += PAGE_OFFSET;
> >> +  __hyp_do_panic(str_va,
> >> + spsr,  elr,
> >> + read_sysreg(esr_el2),   read_sysreg(far_el2),
> >> + read_sysreg(hpfar_el2), par,
> >> + read_sysreg(tpidr_el2));
> >> +  }
> >> +}
> > 
> > I think the while (1) here is confusing.
> > 
> > Can we not jsut declare str_va at the start of the function and get rid
> > of the loop?
> 
> The while(1) is to prevent GCC from screaming (it otherwise believes
> that the function actually returns, despite the __noreturn attribute).

Aha!

Perhaps a comment to that effect...?

> Or were you thinking of something else?

I just failed to derive the __noreturn problem from first principles.

Perhaps follow the __hyp_do_panic() call with an unreachable(), with the
comment as to GCC failing to reason about the __noreturn? That would be
less confusing than the loop, assuming that it works.

Mark.
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 17/21] arm64: KVM: Map the kernel RO section into HYP

2015-11-16 Thread Mark Rutland
On Mon, Nov 16, 2015 at 01:11:55PM +, Marc Zyngier wrote:
> In order to run C code in HYP, we must make sure that the kernel's
> RO section in mapped into HYP (otherwise things break badly).

Somewhat tangential, but do we have any strong guarantees that the hyp
text is otherwise safe in its address space which differs from that of
the kernel proper?

i.e. do we need something like we did for the EFI stub in commit
e8f3010f7326c003 ("arm64/efi: isolate EFI stub from the kernel proper")?

Mark.

> Signed-off-by: Marc Zyngier 
> ---
>  arch/arm/kvm/arm.c | 7 +++
>  1 file changed, 7 insertions(+)
> 
> diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
> index eab83b2..6c4549a 100644
> --- a/arch/arm/kvm/arm.c
> +++ b/arch/arm/kvm/arm.c
> @@ -44,6 +44,7 @@
>  #include 
>  #include 
>  #include 
> +#include 
>  
>  #ifdef REQUIRES_VIRT
>  __asm__(".arch_extension virt");
> @@ -1072,6 +1073,12 @@ static int init_hyp_mode(void)
>   goto out_free_mappings;
>   }
>  
> + err = create_hyp_mappings(__start_rodata, __end_rodata);
> + if (err) {
> + kvm_err("Cannot map rodata section\n");
> + goto out_free_mappings;
> + }
> +
>   /*
>* Map the Hyp stack pages
>*/
> -- 
> 2.1.4
> 
> ___
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
> 
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Marc Zyngier
On 16/11/15 14:16, Mark Rutland wrote:
>> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
>> index 06d3e20..cdc2a96 100644
>> --- a/arch/arm64/kvm/hyp/switch.c
>> +++ b/arch/arm64/kvm/hyp/switch.c
>> @@ -140,3 +140,38 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
>>  
>>  return exit_code;
>>  }
>> +
>> +static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
>> ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
> 
> I assume that if [1] goes in we'll update this to match.

Definitely.

>> +
>> +void __hyp_text __noreturn __hyp_panic(void)
>> +{
>> +u64 spsr = read_sysreg(spsr_el2);
>> +u64 elr = read_sysreg(elr_el2);
>> +u64 par = read_sysreg(par_el1);
>> +
>> +if (read_sysreg(vttbr_el2)) {
>> +struct kvm_vcpu *vcpu;
>> +struct kvm_cpu_context *host_ctxt;
>> +
>> +vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
>> +host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
>> +__deactivate_traps(vcpu);
>> +__deactivate_vm(vcpu);
>> +__sysreg_restore_state(host_ctxt);
>> +
>> +write_sysreg(host_ctxt->gp_regs.sp_el1, sp_el1);
> 
> __sysreg_restore_state restores the host sp_el1, no?

Better safe than sorry! ;-) Looks like a leftover from some ancient
version... I'll fix that.

>> +}
>> +
>> +/* Call panic for real */
>> +while (1) {
>> +unsigned long str_va = (unsigned long)__hyp_panic_string;
>> +
>> +str_va -= HYP_PAGE_OFFSET;
>> +str_va += PAGE_OFFSET;
>> +__hyp_do_panic(str_va,
>> +   spsr,  elr,
>> +   read_sysreg(esr_el2),   read_sysreg(far_el2),
>> +   read_sysreg(hpfar_el2), par,
>> +   read_sysreg(tpidr_el2));
>> +}
>> +}
> 
> I think the while (1) here is confusing.
> 
> Can we not jsut declare str_va at the start of the function and get rid
> of the loop?

The while(1) is to prevent GCC from screaming (it otherwise believes
that the function actually returns, despite the __noreturn attribute).

Or were you thinking of something else?

M.
-- 
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Mark Rutland
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index 06d3e20..cdc2a96 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -140,3 +140,38 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
>  
>   return exit_code;
>  }
> +
> +static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
> ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";

I assume that if [1] goes in we'll update this to match.

> +
> +void __hyp_text __noreturn __hyp_panic(void)
> +{
> + u64 spsr = read_sysreg(spsr_el2);
> + u64 elr = read_sysreg(elr_el2);
> + u64 par = read_sysreg(par_el1);
> +
> + if (read_sysreg(vttbr_el2)) {
> + struct kvm_vcpu *vcpu;
> + struct kvm_cpu_context *host_ctxt;
> +
> + vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
> + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + __deactivate_traps(vcpu);
> + __deactivate_vm(vcpu);
> + __sysreg_restore_state(host_ctxt);
> +
> + write_sysreg(host_ctxt->gp_regs.sp_el1, sp_el1);

__sysreg_restore_state restores the host sp_el1, no?

> + }
> +
> + /* Call panic for real */
> + while (1) {
> + unsigned long str_va = (unsigned long)__hyp_panic_string;
> +
> + str_va -= HYP_PAGE_OFFSET;
> + str_va += PAGE_OFFSET;
> + __hyp_do_panic(str_va,
> +spsr,  elr,
> +read_sysreg(esr_el2),   read_sysreg(far_el2),
> +read_sysreg(hpfar_el2), par,
> +read_sysreg(tpidr_el2));
> + }
> +}

I think the while (1) here is confusing.

Can we not jsut declare str_va at the start of the function and get rid
of the loop?

Thanks,
Mark.

[1] 
http://lists.infradead.org/pipermail/linux-arm-kernel/2015-November/385199.html
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] arm64: kvm: report original PAR_EL1 upon panic

2015-11-16 Thread Mark Rutland
If we call __kvm_hyp_panic while a guest context is active, we call
__restore_sysregs before acquiring the system register values for the
panic, in the process throwing away the PAR_EL1 value at the point of
the panic.

This patch modifies __kvm_hyp_panic to stash the PAR_EL1 value prior to
restoring host register values, enabling us to report the original
values at the point of the panic.

Signed-off-by: Mark Rutland 
Cc: Marc Zyngier 
Cc: Christoffer Dall 
---
 arch/arm64/kvm/hyp.S | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 11183ce..2a8a4aa 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -864,6 +864,10 @@ ENTRY(__kvm_flush_vm_context)
 ENDPROC(__kvm_flush_vm_context)
 
 __kvm_hyp_panic:
+   // Stash PAR_EL1 before corrupting it in __restore_sysregs
+   mrs x0, par_el1
+   pushx0, xzr
+
// Guess the context by looking at VTTBR:
// If zero, then we're already a host.
// Otherwise restore a minimal host context before panicing.
@@ -898,7 +902,7 @@ __kvm_hyp_panic:
mrs x3, esr_el2
mrs x4, far_el2
mrs x5, hpfar_el2
-   mrs x6, par_el1
+   pop x6, xzr // active context PAR_EL1
mrs x7, tpidr_el2
 
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
-- 
1.9.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH] arm64: kvm: avoid %p in __kvm_hyp_panic

2015-11-16 Thread Mark Rutland
Currently __kvm_hyp_panic uses %p for values which are not pointers,
such as the ESR value. This can confusingly lead to "(null)" being
printed for the value.

Use %x instead, and only use %p for host pointers.

Signed-off-by: Mark Rutland 
Acked-by: Marc Zyngier 
Cc: Christoffer Dall 
---
 arch/arm64/kvm/hyp.S | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701..11183ce 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -914,7 +914,7 @@ __kvm_hyp_panic:
 ENDPROC(__kvm_hyp_panic)
 
 __hyp_panic_str:
-   .ascii  "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p 
PAR:%p\nVCPU:%p\n\0"
+   .ascii  "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x 
PAR:%016x\nVCPU:%p\n\0"
 
.align  2
 
-- 
1.9.1

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 09/21] arm64: KVM: Implement guest entry

2015-11-16 Thread Marc Zyngier
Contrary to the previous patch, the guest entry is fairly different
from its assembly counterpart, mostly because it is only concerned
with saving/restoring the GP registers, and nothing else.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile |   1 +
 arch/arm64/kvm/hyp/entry.S  | 155 
 arch/arm64/kvm/hyp/hyp.h|   2 +
 3 files changed, 158 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/entry.S

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index ec14cac..1e1ff06 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
new file mode 100644
index 000..2c4449a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+
+   .text
+   .pushsection.hyp.text, "ax"
+
+.macro save_common_regs ctxt
+   stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+   stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+   stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+   stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+   stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+   stp x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_common_regs ctxt
+   ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+   ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+   ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+   ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+   ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+   ldp x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro save_host_regs reg
+   save_common_regs \reg
+.endm
+
+.macro restore_host_regs reg
+   restore_common_regs \reg
+.endm
+
+.macro save_guest_regs
+   // x0 is the vcpu address
+   // x1 is the return code, do not corrupt!
+   // x2 is the cpu context
+   // x3 is a tmp register
+   // Guest's x0-x3 are on the stack
+
+   add x2, x0, #VCPU_CONTEXT
+
+   // Compute base to save registers
+   stp x4, x5,   [x2, #CPU_XREG_OFFSET(4)]
+   stp x6, x7,   [x2, #CPU_XREG_OFFSET(6)]
+   stp x8, x9,   [x2, #CPU_XREG_OFFSET(8)]
+   stp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
+   stp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
+   stp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
+   stp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
+   str x18,  [x2, #CPU_XREG_OFFSET(18)]
+
+   pop x6, x7  // x2, x3
+   pop x4, x5  // x0, x1
+
+   stp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
+   stp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+
+   save_common_regs x2
+.endm
+
+.macro restore_guest_regs
+   // Assume vcpu in x0, clobbers everything else
+
+   add x2, x0, #VCPU_CONTEXT
+
+   // Prepare x0-x3 for later restore
+   ldp x4, x5, [x2, #CPU_XREG_OFFSET(0)]
+   ldp x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+   pushx4, x5  // Push x0-x3 on the stack
+   pushx6, x7
+
+   // x4-x18
+   ldp x4, x5,   [x2, #CPU_XREG_OFFSET(4)] 
+   ldp x6, x7,   [x2, #CPU_XREG_OFFSET(6)] 
+   ldp x8, x9,   [x2, #CPU_XREG_OFFSET(8)] 
+   ldp x10, x11, [x2, #CPU_XREG_OFFSET(10)]
+   ldp x12, x13, [x2, #CPU_XREG_OFFSET(12)]
+   ldp x14, x15, [x2, #CPU_XREG_OFFSET(14)]
+   ldp x16, x17, [x2, #CPU_XREG_OFFSET(16)]
+   ldr x18,  [x2, #CPU_XREG_OFFSET(18)]
+
+   // x19-x29, lr
+   restore_common_regs x2
+
+   // Last bits of the 64bit state
+   pop x2, x3
+   pop x0, x1
+
+   // Do not touch any register after this!
+.endm
+
+/*
+ * u64 __guest_enter(struct kvm_vcpu *vcpu,
+ *  struct kvm_cpu_context *host_ctxt);
+ */
+ENTRY(__guest_enter)
+   // x0: vcpu
+   // x1: hos

[PATCH 17/21] arm64: KVM: Map the kernel RO section into HYP

2015-11-16 Thread Marc Zyngier
In order to run C code in HYP, we must make sure that the kernel's
RO section in mapped into HYP (otherwise things break badly).

Signed-off-by: Marc Zyngier 
---
 arch/arm/kvm/arm.c | 7 +++
 1 file changed, 7 insertions(+)

diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2..6c4549a 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -44,6 +44,7 @@
 #include 
 #include 
 #include 
+#include 
 
 #ifdef REQUIRES_VIRT
 __asm__(".arch_extension   virt");
@@ -1072,6 +1073,12 @@ static int init_hyp_mode(void)
goto out_free_mappings;
}
 
+   err = create_hyp_mappings(__start_rodata, __end_rodata);
+   if (err) {
+   kvm_err("Cannot map rodata section\n");
+   goto out_free_mappings;
+   }
+
/*
 * Map the Hyp stack pages
 */
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 15/21] arm64: KVM: Add panic handling

2015-11-16 Thread Marc Zyngier
Add the panic handler, together with the small bits of assembly
code to call the kernel's panic implementation.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/hyp-entry.S | 11 ++-
 arch/arm64/kvm/hyp/hyp.h   |  1 +
 arch/arm64/kvm/hyp/switch.c| 35 +++
 3 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index e11a129..7218eed 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -141,7 +141,16 @@ el1_irq:
mov x1, #ARM_EXCEPTION_IRQ
b   __guest_exit
 
-.macro invalid_vector  label, target = __kvm_hyp_panic
+ENTRY(__hyp_do_panic)
+   mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+ PSR_MODE_EL1h)
+   msr spsr_el2, lr
+   ldr lr, =panic
+   msr elr_el2, lr
+   eret
+ENDPROC(__hyp_do_panic)
+
+.macro invalid_vector  label, target = __hyp_panic
.align  2
 \label:
b \target
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index 240fb79..d5d500d 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -74,6 +74,7 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
 void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
 
 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
 
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 06d3e20..cdc2a96 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -140,3 +140,38 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 
return exit_code;
 }
+
+static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+   u64 spsr = read_sysreg(spsr_el2);
+   u64 elr = read_sysreg(elr_el2);
+   u64 par = read_sysreg(par_el1);
+
+   if (read_sysreg(vttbr_el2)) {
+   struct kvm_vcpu *vcpu;
+   struct kvm_cpu_context *host_ctxt;
+
+   vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+   host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+   __deactivate_traps(vcpu);
+   __deactivate_vm(vcpu);
+   __sysreg_restore_state(host_ctxt);
+
+   write_sysreg(host_ctxt->gp_regs.sp_el1, sp_el1);
+   }
+
+   /* Call panic for real */
+   while (1) {
+   unsigned long str_va = (unsigned long)__hyp_panic_string;
+
+   str_va -= HYP_PAGE_OFFSET;
+   str_va += PAGE_OFFSET;
+   __hyp_do_panic(str_va,
+  spsr,  elr,
+  read_sysreg(esr_el2),   read_sysreg(far_el2),
+  read_sysreg(hpfar_el2), par,
+  read_sysreg(tpidr_el2));
+   }
+}
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 10/21] arm64: KVM: Add patchable function selector

2015-11-16 Thread Marc Zyngier
KVM so far relies on code patching, and is likely to use it more
in the future. The main issue is that our alternative system works
at the instruction level, while we'd like to have alternatives at
the function level.

In order to cope with this, add the "hyp_alternate_select" macro that
outputs a brief sequence of code that in turn can be patched, allowing
al alternative function to be selected.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/hyp.h | 16 
 1 file changed, 16 insertions(+)

diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index 2937552..bf13238 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -27,6 +27,22 @@
 
 #define kern_hyp_va(v) (typeof(v))((unsigned long)v & HYP_PAGE_OFFSET_MASK)
 
+/*
+ * Generates patchable code sequences that are used to switch between
+ * two implementations of a function, depending on the availability of
+ * a feature.
+ */
+#define hyp_alternate_select(fname, orig, alt, cond)   \
+typeof(orig) * __hyp_text fname(void)  \
+{  \
+   typeof(alt) *val = orig;\
+   asm volatile(ALTERNATIVE("nop   \n",\
+"mov   %0, %1  \n",\
+cond)  \
+: "+r" (val) : "r" (alt)); \
+   return val; \
+}
+
 void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 19/21] arm64: KVM: Turn system register numbers to an enum

2015-11-16 Thread Marc Zyngier
Having the system register numbers as #defines has been a pain
since day one, as the ordering is pretty fragile, and moving
things around leads to renumbering and epic conflict resolutions.

Now that we're mostly acessing the sysreg file in C, an enum is
a much better type to use, and we can clean things up a bit.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/include/asm/kvm_asm.h | 76 -
 arch/arm64/include/asm/kvm_emulate.h |  1 -
 arch/arm64/include/asm/kvm_host.h| 81 +++-
 arch/arm64/include/asm/kvm_mmio.h|  1 -
 arch/arm64/kernel/asm-offsets.c  |  1 +
 arch/arm64/kvm/guest.c   |  1 -
 arch/arm64/kvm/handle_exit.c |  1 +
 arch/arm64/kvm/hyp/debug-sr.c|  1 +
 arch/arm64/kvm/hyp/entry.S   |  3 +-
 arch/arm64/kvm/hyp/sysreg-sr.c   |  1 +
 arch/arm64/kvm/sys_regs.c|  1 +
 virt/kvm/arm/vgic-v3.c   |  1 +
 12 files changed, 87 insertions(+), 82 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 5e37710..52b777b 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -20,82 +20,6 @@
 
 #include 
 
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#defineMPIDR_EL1   1   /* MultiProcessor Affinity Register */
-#defineCSSELR_EL1  2   /* Cache Size Selection Register */
-#defineSCTLR_EL1   3   /* System Control Register */
-#defineACTLR_EL1   4   /* Auxiliary Control Register */
-#defineCPACR_EL1   5   /* Coprocessor Access Control */
-#defineTTBR0_EL1   6   /* Translation Table Base Register 0 */
-#defineTTBR1_EL1   7   /* Translation Table Base Register 1 */
-#defineTCR_EL1 8   /* Translation Control Register */
-#defineESR_EL1 9   /* Exception Syndrome Register */
-#defineAFSR0_EL1   10  /* Auxilary Fault Status Register 0 */
-#defineAFSR1_EL1   11  /* Auxilary Fault Status Register 1 */
-#defineFAR_EL1 12  /* Fault Address Register */
-#defineMAIR_EL113  /* Memory Attribute Indirection 
Register */
-#defineVBAR_EL114  /* Vector Base Address Register */
-#defineCONTEXTIDR_EL1  15  /* Context ID Register */
-#defineTPIDR_EL0   16  /* Thread ID, User R/W */
-#defineTPIDRRO_EL0 17  /* Thread ID, User R/O */
-#defineTPIDR_EL1   18  /* Thread ID, Privileged */
-#defineAMAIR_EL1   19  /* Aux Memory Attribute Indirection 
Register */
-#defineCNTKCTL_EL1 20  /* Timer Control Register (EL1) */
-#definePAR_EL1 21  /* Physical Address Register */
-#define MDSCR_EL1  22  /* Monitor Debug System Control Register */
-#define MDCCINT_EL123  /* Monitor Debug Comms Channel Interrupt Enable 
Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#defineDACR32_EL2  24  /* Domain Access Control Register */
-#defineIFSR32_EL2  25  /* Instruction Fault Status Register */
-#defineFPEXC32_EL2 26  /* Floating-Point Exception Control 
Register */
-#defineDBGVCR32_EL227  /* Debug Vector Catch Register */
-#defineNR_SYS_REGS 28
-
-/* 32bit mapping */
-#define c0_MPIDR   (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
-#define c0_CSSELR  (CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR   (SCTLR_EL1 * 2) /* System Control Register */
-#define c1_ACTLR   (ACTLR_EL1 * 2) /* Auxiliary Control Register */
-#define c1_CPACR   (CPACR_EL1 * 2) /* Coprocessor Access Control */
-#define c2_TTBR0   (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
-#define c2_TTBR0_high  (c2_TTBR0 + 1)  /* TTBR0 top 32 bits */
-#define c2_TTBR1   (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
-#define c2_TTBR1_high  (c2_TTBR1 + 1)  /* TTBR1 top 32 bits */
-#define c2_TTBCR   (TCR_EL1 * 2)   /* Translation Table Base Control R. */
-#define c3_DACR(DACR32_EL2 * 2)/* Domain Access Control 
Register */
-#define c5_DFSR(ESR_EL1 * 2)   /* Data Fault Status Register */
-#define c5_IFSR(IFSR32_EL2 * 2)/* Instruction Fault Status 
Register */
-#define c5_ADFSR   (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
-#define c5_AIFSR   (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
-#define c6_DFAR(FAR_EL1 * 2)   /* Data Fault Address Register 
*/
-#define c6_IFAR(c6_DFAR + 1)   /* Instruction Fault Address 
Register */
-#define c7_PAR (PAR_EL1 * 2)   /* Physical Address Register */
-#define c7_PAR_high(c7_PAR + 1)/* PAR top 32 bits */
-#define c10_PRRR   (MAI

[PATCH 13/21] arm64: KVM: Implement TLB handling

2015-11-16 Thread Marc Zyngier
Implement the TLB handling as a direct translation of the assembly
code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile |  1 +
 arch/arm64/kvm/hyp/tlb.c| 72 +
 2 files changed, 73 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/tlb.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 56238d0..1a529f5 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
 obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
new file mode 100644
index 000..d4a07d0
--- /dev/null
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include "hyp.h"
+
+void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+   dsb(ishst);
+
+   /* Switch to requested VMID */
+   kvm = kern_hyp_va(kvm);
+   write_sysreg(kvm->arch.vttbr, vttbr_el2);
+   isb();
+
+   /*
+* We could do so much better if we had the VA as well.
+* Instead, we invalidate Stage-2 for this IPA, and the
+* whole of Stage-1. Weep...
+*/
+   ipa >>= 12;
+   asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
+   dsb(ish);
+   /*
+* We have to ensure completion of the invalidation at Stage-2,
+* since a table walk on another CPU could refill a TLB with a
+* complete (S1 + S2) walk based on the old Stage-2 mapping if
+* the Stage-1 invalidation happened first.
+*/
+   asm volatile("tlbi vmalle1is" : : );
+   dsb(ish);
+   isb();
+
+   write_sysreg(0, vttbr_el2);
+}
+
+void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+   dsb(ishst);
+
+   /* Switch to requested VMID */
+   kvm = kern_hyp_va(kvm);
+   write_sysreg(kvm->arch.vttbr, vttbr_el2);
+   isb();
+
+   asm volatile("tlbi vmalls12e1is" : : );
+   dsb(ish);
+   isb();
+
+   write_sysreg(0, vttbr_el2);
+}
+
+void __hyp_text __tlb_flush_vm_context(void)
+{
+   dsb(ishst);
+   asm volatile("tlbi alle1is  \n"
+"ic ialluis  ": : );
+   dsb(ish);
+}
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 11/21] arm64: KVM: Implement the core world switch

2015-11-16 Thread Marc Zyngier
Implement the core of the world switch in C. Not everything is there
yet, and there is nothing to re-enter the world switch either.

But this already outlines the code structure well enough.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile |   1 +
 arch/arm64/kvm/hyp/switch.c | 134 
 2 files changed, 135 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/switch.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 1e1ff06..9c11b0f 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
new file mode 100644
index 000..a3af81a
--- /dev/null
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+{
+   u64 val;
+
+   /*
+* We are about to set CPTR_EL2.TFP to trap all floating point
+* register accesses to EL2, however, the ARM ARM clearly states that
+* traps are only taken to EL2 if the operation would not otherwise
+* trap to EL1.  Therefore, always make sure that for 32-bit guests,
+* we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+*/
+   val = vcpu->arch.hcr_el2;
+   if (val & HCR_RW) {
+   write_sysreg(1 << 30, fpexc32_el2);
+   isb();
+   }
+   write_sysreg(val, hcr_el2);
+   write_sysreg(1 << 15, hstr_el2);
+   write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+   write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+   write_sysreg(HCR_RW, hcr_el2);
+   write_sysreg(0, hstr_el2);
+   write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
+   write_sysreg(0, cptr_el2);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+   struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+   write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+   write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__vgic_call_save_state,
+   __vgic_v2_save_state, __vgic_v3_save_state,
+   ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static hyp_alternate_select(__vgic_call_restore_state,
+   __vgic_v2_restore_state, __vgic_v3_restore_state,
+   ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+   __vgic_call_save_state()(vcpu);
+   write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+   u64 val;
+
+   val = read_sysreg(hcr_el2);
+   val |=  HCR_INT_OVERRIDE;
+   val |= vcpu->arch.irq_lines;
+   write_sysreg(val, hcr_el2);
+
+   __vgic_call_restore_state()(vcpu);
+}
+
+int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+   struct kvm_cpu_context *host_ctxt;
+   struct kvm_cpu_context *guest_ctxt;
+   u64 exit_code;
+
+   vcpu = kern_hyp_va(vcpu);
+   write_sysreg(vcpu, tpidr_el2);
+
+   host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+   guest_ctxt = &vcpu->arch.ctxt;
+
+   __sysreg_save_state(host_ctxt);
+   __debug_cond_save_state(vcpu, &vcpu->arch.host_debug_state, host_ctxt);
+
+   __activate_traps(vcpu);
+   __activate_vm(vcpu);
+
+   __vgic_restore_state(vcpu);
+   __timer_restore_state(vcpu);
+
+   /*
+* We must restore the 32-bit state before the sysregs, thanks
+* to Cortex-A57 erratum #852523.
+*/
+   __sysreg32_restore_state(vcpu);
+   __sysreg_restore_state(guest_ctxt);
+   __debug_restore_state(vcpu, &vcpu->arch.vcpu_debug_state, guest_ctxt);
+
+   /* Jump in the fire! */
+   exit_code = __guest_enter(vcpu, host_ctxt);
+   /* And we're baaack! */
+
+   __sysreg_save_state(guest_ctxt);
+   __sysreg32_save_state(vcpu);
+

[PATCH 08/21] arm64: KVM: Implement debug save/restore

2015-11-16 Thread Marc Zyngier
Implement the debug save restore as a direct translation of
the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile   |   1 +
 arch/arm64/kvm/hyp/debug-sr.c | 132 ++
 arch/arm64/kvm/hyp/hyp.h  |  13 +
 3 files changed, 146 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/debug-sr.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index ec94200..ec14cac 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
new file mode 100644
index 000..118ea39
--- /dev/null
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+#include 
+
+#include 
+
+#include "hyp.h"
+
+#define read_debug(r,n)read_sysreg(r##n##_el1)
+#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
+
+#define save_debug(ptr,reg,nr) \
+   switch (nr) {   \
+   case 15:ptr[15] = read_debug(reg, 15);  \
+   case 14:ptr[14] = read_debug(reg, 14);  \
+   case 13:ptr[13] = read_debug(reg, 13);  \
+   case 12:ptr[12] = read_debug(reg, 12);  \
+   case 11:ptr[11] = read_debug(reg, 11);  \
+   case 10:ptr[10] = read_debug(reg, 10);  \
+   case 9: ptr[9] = read_debug(reg, 9);\
+   case 8: ptr[8] = read_debug(reg, 8);\
+   case 7: ptr[7] = read_debug(reg, 7);\
+   case 6: ptr[6] = read_debug(reg, 6);\
+   case 5: ptr[5] = read_debug(reg, 5);\
+   case 4: ptr[4] = read_debug(reg, 4);\
+   case 3: ptr[3] = read_debug(reg, 3);\
+   case 2: ptr[2] = read_debug(reg, 2);\
+   case 1: ptr[1] = read_debug(reg, 1);\
+   default:ptr[0] = read_debug(reg, 0);\
+   }
+
+#define restore_debug(ptr,reg,nr)  \
+   switch (nr) {   \
+   case 15:write_debug(ptr[15], reg, 15);  \
+   case 14:write_debug(ptr[14], reg, 14);  \
+   case 13:write_debug(ptr[13], reg, 13);  \
+   case 12:write_debug(ptr[12], reg, 12);  \
+   case 11:write_debug(ptr[11], reg, 11);  \
+   case 10:write_debug(ptr[10], reg, 10);  \
+   case 9: write_debug(ptr[9], reg, 9);\
+   case 8: write_debug(ptr[8], reg, 8);\
+   case 7: write_debug(ptr[7], reg, 7);\
+   case 6: write_debug(ptr[6], reg, 6);\
+   case 5: write_debug(ptr[5], reg, 5);\
+   case 4: write_debug(ptr[4], reg, 4);\
+   case 3: write_debug(ptr[3], reg, 3);\
+   case 2: write_debug(ptr[2], reg, 2);\
+   case 1: write_debug(ptr[1], reg, 1);\
+   default:write_debug(ptr[0], reg, 0);\
+   }
+
+void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
+  struct kvm_guest_debug_arch *dbg,
+  struct kvm_cpu_context *ctxt)
+{
+   if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY) {
+   u64 aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+   int brps, wrps;
+
+   brps = (aa64dfr0 >> 12) & 0xf;
+   wrps = (aa64dfr0 >> 20) & 0xf;
+
+   save_debug(dbg->dbg_bcr, dbgbcr, brps);
+

[PATCH 16/21] arm64: KVM: Add compatibility aliases

2015-11-16 Thread Marc Zyngier
So far, we've implemented the new world switch with a completely
different namespace, so that we could have both implementation
compiled in.

Let's take things one step further by adding weak aliases that
have the same names as the original implementation. The weak
attributes allows the new implementation to be overriden by the
old one, and everything still work.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/debug-sr.c   | 3 +++
 arch/arm64/kvm/hyp/hyp-entry.S  | 3 +++
 arch/arm64/kvm/hyp/switch.c | 3 +++
 arch/arm64/kvm/hyp/tlb.c| 9 +
 arch/arm64/kvm/hyp/vgic-v3-sr.c | 3 +++
 5 files changed, 21 insertions(+)

diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 118ea39..042d074 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -130,3 +130,6 @@ u32 __hyp_text __debug_read_mdcr_el2(void)
 {
return read_sysreg(mdcr_el2);
 }
+
+__alias(__debug_read_mdcr_el2)
+u32 __weak __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 7218eed..28de58f 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -175,6 +175,8 @@ ENDPROC(\label)
 
.align 11
 
+   .weak   __kvm_hyp_vector
+ENTRY(__kvm_hyp_vector)
 ENTRY(__hyp_vector)
ventry  el2t_sync_invalid   // Synchronous EL2t
ventry  el2t_irq_invalid// IRQ EL2t
@@ -196,3 +198,4 @@ ENTRY(__hyp_vector)
ventry  el1_fiq_invalid // FIQ 32-bit EL1
ventry  el1_error_invalid   // Error 32-bit EL1
 ENDPROC(__hyp_vector)
+ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index cdc2a96..ef58066 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -141,6 +141,9 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
return exit_code;
 }
 
+__alias(__guest_run)
+int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
 static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
 
 void __hyp_text __noreturn __hyp_panic(void)
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index d4a07d0..2c279a8 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -47,6 +47,9 @@ void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, 
phys_addr_t ipa)
write_sysreg(0, vttbr_el2);
 }
 
+__alias(__tlb_flush_vmid_ipa)
+void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+
 void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
 {
dsb(ishst);
@@ -63,6 +66,9 @@ void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
 }
 
+__alias(__tlb_flush_vmid)
+void __weak __kvm_tlb_flush_vmid(struct kvm *kvm);
+
 void __hyp_text __tlb_flush_vm_context(void)
 {
dsb(ishst);
@@ -70,3 +76,6 @@ void __hyp_text __tlb_flush_vm_context(void)
 "ic ialluis  ": : );
dsb(ish);
 }
+
+__alias(__tlb_flush_vm_context)
+void __weak __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index f2289ab..0cf316c 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -220,3 +220,6 @@ u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
 {
return read_gicreg(ICH_VTR_EL2);
 }
+
+__alias(__vgic_v3_read_ich_vtr_el2)
+u64 __weak __vgic_v3_get_ich_vtr_el2(void);
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 12/21] arm64: KVM: Implement fpsimd save/restore

2015-11-16 Thread Marc Zyngier
Implement the fpsimd save restore, keeping the lazy part in
assembler (as returning to C would be overkill).

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile |  1 +
 arch/arm64/kvm/hyp/entry.S  | 32 +++-
 arch/arm64/kvm/hyp/fpsimd.S | 33 +
 arch/arm64/kvm/hyp/hyp.h|  3 +++
 arch/arm64/kvm/hyp/switch.c |  8 
 5 files changed, 76 insertions(+), 1 deletion(-)
 create mode 100644 arch/arm64/kvm/hyp/fpsimd.S

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 9c11b0f..56238d0 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -9,3 +9,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index 2c4449a..7552922 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -27,6 +27,7 @@
 
 #define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+#define CPU_SYSREG_OFFSET(x)   (CPU_SYSREGS + 8*x)
 
.text
.pushsection.hyp.text, "ax"
@@ -152,4 +153,33 @@ ENTRY(__guest_exit)
ret
 ENDPROC(__guest_exit)
 
-   /* Insert fault handling here */
+ENTRY(__fpsimd_guest_restore)
+   pushx4, lr
+
+   mrs x2, cptr_el2
+   bic x2, x2, #CPTR_EL2_TFP
+   msr cptr_el2, x2
+   isb
+
+   mrs x3, tpidr_el2
+
+   ldr x0, [x3, #VCPU_HOST_CONTEXT]
+   kern_hyp_va x0
+   add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+   bl  __fpsimd_save_state
+
+   add x2, x3, #VCPU_CONTEXT
+   add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+   bl  __fpsimd_restore_state
+
+   mrs x1, hcr_el2
+   tbnzx1, #HCR_RW_SHIFT, 1f
+   ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
+   msr fpexc32_el2, x4
+1:
+   pop x4, lr
+   pop x2, x3
+   pop x0, x1
+
+   eret
+ENDPROC(__fpsimd_guest_restore)
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
new file mode 100644
index 000..da3f22c
--- /dev/null
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+
+#include 
+
+   .text
+   .pushsection.hyp.text, "ax"
+
+ENTRY(__fpsimd_save_state)
+   fpsimd_save x0, 1
+   ret
+ENDPROC(__fpsimd_save_state)
+
+ENTRY(__fpsimd_restore_state)
+   fpsimd_restore  x0, 1
+   ret
+ENDPROC(__fpsimd_restore_state)
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index bf13238..240fb79 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -70,6 +70,9 @@ void __debug_clear_restore_state(struct kvm_vcpu *vcpu,
 struct kvm_guest_debug_arch *dbg,
 struct kvm_cpu_context *ctxt);
 
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+
 u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
 
 #endif /* __ARM64_KVM_HYP_H__ */
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index a3af81a..06d3e20 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -88,6 +88,7 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 {
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
+   bool fp_enabled;
u64 exit_code;
 
vcpu = kern_hyp_va(vcpu);
@@ -117,6 +118,8 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
 
+   fp_enabled = !!(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+
__sysreg_save_state(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_save_state(vcpu);
@@ -127,6 +130,11 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 
__sysreg_restore_state(host_ctxt);
 
+   if (fp_enabled) {
+   __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
+   __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+   }
+
__debug_save_state(vcp

[PATCH 05/21] arm64: KVM: Implement timer save/restore

2015-11-16 Thread Marc Zyngier
Implement the timer save restore as a direct translation of
the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile   |  1 +
 arch/arm64/kvm/hyp/hyp.h  |  3 ++
 arch/arm64/kvm/hyp/timer-sr.c | 68 +++
 3 files changed, 72 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/timer-sr.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index d1e38ce..455dc0a 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -4,3 +4,4 @@
 
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index a31cb6e..86aa5a2 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -33,5 +33,8 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
 void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
 
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c
new file mode 100644
index 000..1a1d2ac
--- /dev/null
+++ b/arch/arm64/kvm/hyp/timer-sr.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+#include 
+
+#include 
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
+{
+   struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+   struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+
+   if (kvm->arch.timer.enabled) {
+   timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
+   isb();
+   timer->cntv_cval = read_sysreg(cntv_cval_el0);
+   }
+
+   /* Disable the virtual timer */
+   write_sysreg(0, cntv_ctl_el0);
+
+   /* Allow physical timer/counter access for the host */
+   write_sysreg(read_sysreg(cnthctl_el2) | 3, cnthctl_el2);
+
+   /* Clear cntvoff for the host */
+   write_sysreg(0, cntvoff_el2);
+}
+
+void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
+{
+   struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+   struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+   u64 val;
+
+   /*
+* Disallow physical timer access for the guest
+* Physical counter access is allowed
+*/
+   val = read_sysreg(cnthctl_el2);
+   val &= ~(1 << 1);
+   val |= 1;
+   write_sysreg(val, cnthctl_el2);
+
+   if (kvm->arch.timer.enabled) {
+   write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+   write_sysreg(timer->cntv_cval, cntv_cval_el0);
+   isb();
+   write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+   }
+}
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 14/21] arm64: KVM: HYP mode entry points

2015-11-16 Thread Marc Zyngier
Add the entry points for HYP mode (both for hypercalls and
exception handling).

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile|   1 +
 arch/arm64/kvm/hyp/hyp-entry.S | 189 +
 2 files changed, 190 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/hyp-entry.S

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 1a529f5..826032b 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += entry.o
 obj-$(CONFIG_KVM_ARM_HOST) += switch.o
 obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
 obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
new file mode 100644
index 000..e11a129
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+
+   .text
+   .pushsection.hyp.text, "ax"
+
+el1_sync:  // Guest trapped into EL2
+   pushx0, x1
+   pushx2, x3
+
+   mrs x1, esr_el2
+   lsr x2, x1, #ESR_ELx_EC_SHIFT
+
+   cmp x2, #ESR_ELx_EC_HVC64
+   b.neel1_trap
+
+   mrs x3, vttbr_el2   // If vttbr is valid, the 64bit 
guest
+   cbnzx3, el1_trap// called HVC
+
+   /* Here, we're pretty sure the host called HVC. */
+   pop x2, x3
+   pop x0, x1
+
+   /* Check for __hyp_get_vectors */
+   cbnzx0, 1f
+   mrs x0, vbar_el2
+   b   2f
+
+1: pushlr, xzr
+
+   /*
+* Compute the function address in EL2, and shuffle the parameters.
+*/
+   kern_hyp_va x0
+   mov lr, x0
+   mov x0, x1
+   mov x1, x2
+   mov x2, x3
+   blr lr
+
+   pop lr, xzr
+2: eret
+
+el1_trap:
+   /*
+* x1: ESR
+* x2: ESR_EC
+*/
+
+   /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+   cmp x2, #ESR_ELx_EC_FP_ASIMD
+   b.eq__fpsimd_guest_restore
+
+   cmp x2, #ESR_ELx_EC_DABT_LOW
+   mov x0, #ESR_ELx_EC_IABT_LOW
+   ccmpx2, x0, #4, ne
+   b.ne1f  // Not an abort we care about
+
+   /* This is an abort. Check for permission fault */
+   and x2, x1, #ESR_ELx_FSC_TYPE
+   cmp x2, #FSC_PERM
+   b.ne1f  // Not a permission fault
+
+   /*
+* Check for Stage-1 page table walk, which is guaranteed
+* to give a valid HPFAR_EL2.
+*/
+   tbnzx1, #7, 1f  // S1PTW is set
+
+   /* Preserve PAR_EL1 */
+   mrs x3, par_el1
+   pushx3, xzr
+
+   /*
+* Permission fault, HPFAR_EL2 is invalid.
+* Resolve the IPA the hard way using the guest VA.
+* Stage-1 translation already validated the memory access rights.
+* As such, we can use the EL1 translation regime, and don't have
+* to distinguish between EL0 and EL1 access.
+*/
+   mrs x2, far_el2
+   at  s1e1r, x2
+   isb
+
+   /* Read result */
+   mrs x3, par_el1
+   pop x0, xzr // Restore PAR_EL1 from the stack
+   msr par_el1, x0
+   tbnzx3, #0, 3f  // Bail out if we failed the translation
+   ubfxx3, x3, #12, #36// Extract IPA
+   lsl x3, x3, #4  // and present it like HPFAR
+   b   2f
+
+1: mrs x3, hpfar_el2
+   mrs x2, far_el2
+
+2: mrs x0, tpidr_el2
+   str w1, [x0, #VCPU_ESR_EL2]
+   str x2, [x0, #VCPU_FAR_EL2]
+   str x3, [x0, #VCPU_HPFAR_EL2]
+
+   mov x1, #ARM_EXCEPTION_TRAP
+   b   __guest_exit
+
+   /*
+* Translation failed. Just return to the guest and
+* let it fault again. Another CPU is probably playing
+* behind our back.
+*/
+3: pop x2, x3
+   pop x0, x1
+
+   eret
+
+el1_irq:
+   pushx0, x1
+   pushx2, x3
+   mrs x0, tpidr_el2
+   mov x1, #ARM_EXCEPTION_IRQ
+   b   __guest_exit
+
+.macro invalid_vector  label, target = __kvm_

[PATCH 21/21] arm64: KVM: Remove weak attributes

2015-11-16 Thread Marc Zyngier
As we've now switched to the new world switch implementation,
remove the weak attributes, as nobody is supposed to override
it anymore.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/debug-sr.c   |  5 ++---
 arch/arm64/kvm/hyp/hyp-entry.S  |  3 ---
 arch/arm64/kvm/hyp/switch.c |  5 ++---
 arch/arm64/kvm/hyp/tlb.c| 16 +++-
 arch/arm64/kvm/hyp/vgic-v3-sr.c |  5 ++---
 5 files changed, 13 insertions(+), 21 deletions(-)

diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index c7da3ec..f8ec964 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -127,10 +127,9 @@ void __hyp_text __debug_clear_restore_state(struct 
kvm_vcpu *vcpu,
}
 }
 
-u32 __hyp_text __debug_read_mdcr_el2(void)
+static u32 __hyp_text __debug_read_mdcr_el2(void)
 {
return read_sysreg(mdcr_el2);
 }
 
-__alias(__debug_read_mdcr_el2)
-u32 __weak __kvm_get_mdcr_el2(void);
+__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 28de58f..658b9af 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -175,9 +175,7 @@ ENDPROC(\label)
 
.align 11
 
-   .weak   __kvm_hyp_vector
 ENTRY(__kvm_hyp_vector)
-ENTRY(__hyp_vector)
ventry  el2t_sync_invalid   // Synchronous EL2t
ventry  el2t_irq_invalid// IRQ EL2t
ventry  el2t_fiq_invalid// FIQ EL2t
@@ -197,5 +195,4 @@ ENTRY(__hyp_vector)
ventry  el1_irq // IRQ 32-bit EL1
ventry  el1_fiq_invalid // FIQ 32-bit EL1
ventry  el1_error_invalid   // Error 32-bit EL1
-ENDPROC(__hyp_vector)
 ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index ef58066..a2885f5 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -84,7 +84,7 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu 
*vcpu)
__vgic_call_restore_state()(vcpu);
 }
 
-int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
 {
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -141,8 +141,7 @@ int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
return exit_code;
 }
 
-__alias(__guest_run)
-int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 static const char *__hyp_panic_string = "HYP panic:\nPS:%08x PC:%p 
ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n";
 
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 2c279a8..250e06c 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -17,7 +17,7 @@
 
 #include "hyp.h"
 
-void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
dsb(ishst);
 
@@ -47,10 +47,10 @@ void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, 
phys_addr_t ipa)
write_sysreg(0, vttbr_el2);
 }
 
-__alias(__tlb_flush_vmid_ipa)
-void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+   phys_addr_t ipa);
 
-void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
 {
dsb(ishst);
 
@@ -66,10 +66,9 @@ void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
write_sysreg(0, vttbr_el2);
 }
 
-__alias(__tlb_flush_vmid)
-void __weak __kvm_tlb_flush_vmid(struct kvm *kvm);
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
 
-void __hyp_text __tlb_flush_vm_context(void)
+static void __hyp_text __tlb_flush_vm_context(void)
 {
dsb(ishst);
asm volatile("tlbi alle1is  \n"
@@ -77,5 +76,4 @@ void __hyp_text __tlb_flush_vm_context(void)
dsb(ish);
 }
 
-__alias(__tlb_flush_vm_context)
-void __weak __kvm_flush_vm_context(void);
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 0cf316c..9189f71 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -216,10 +216,9 @@ void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu 
*vcpu)
}
 }
 
-u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
+static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
 {
return read_gicreg(ICH_VTR_EL2);
 }
 
-__alias(__vgic_v3_read_ich_vtr_el2)
-u64 __weak __vgic_v3_get_ich_vtr_el2(void);
+__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 04/21] arm64: KVM: Implement vgic-v3 save/restore

2015-11-16 Thread Marc Zyngier
Implement the vgic-v3 save restore as a direct translation of
the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile |   1 +
 arch/arm64/kvm/hyp/hyp.h|   3 +
 arch/arm64/kvm/hyp/vgic-v3-sr.c | 222 
 3 files changed, 226 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/vgic-v3-sr.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index d8d5968..d1e38ce 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -3,3 +3,4 @@
 #
 
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index 78f25c4..a31cb6e 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -30,5 +30,8 @@
 void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
 void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
 
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
new file mode 100644
index 000..f2289ab
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+#include 
+#include 
+
+#include 
+
+#include "hyp.h"
+
+/*
+ * We store LRs in reverse order to let the CPU deal with streaming
+ * access. Use this macro to make it look saner...
+ */
+#define LR_OFFSET(n)   (15 - n)
+
+#define read_gicreg(r) \
+   ({  \
+   u64 reg;\
+   asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+   reg;\
+   })
+
+#define write_gicreg(v,r)  \
+   do {\
+   u64 __val = (v);\
+   asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+   } while (0)
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
+{
+   struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+   u64 val;
+   u32 nr_lr, nr_pri;
+
+   /*
+* Make sure stores to the GIC via the memory mapped interface
+* are now visible to the system register interface.
+*/
+   dsb(st);
+
+   cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
+   cpu_if->vgic_misr  = read_gicreg(ICH_MISR_EL2);
+   cpu_if->vgic_eisr  = read_gicreg(ICH_EISR_EL2);
+   cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
+
+   write_gicreg(0, ICH_HCR_EL2);
+   val = read_gicreg(ICH_VTR_EL2);
+   nr_lr = val & 0xf;
+   nr_pri = ((u32)val >> 29) + 1;
+
+   switch (nr_lr) {
+   case 15:
+   cpu_if->vgic_lr[LR_OFFSET(15)] = read_gicreg(ICH_LR15_EL2);
+   case 14:
+   cpu_if->vgic_lr[LR_OFFSET(14)] = read_gicreg(ICH_LR14_EL2);
+   case 13:
+   cpu_if->vgic_lr[LR_OFFSET(13)] = read_gicreg(ICH_LR13_EL2);
+   case 12:
+   cpu_if->vgic_lr[LR_OFFSET(12)] = read_gicreg(ICH_LR12_EL2);
+   case 11:
+   cpu_if->vgic_lr[LR_OFFSET(11)] = read_gicreg(ICH_LR11_EL2);
+   case 10:
+   cpu_if->vgic_lr[LR_OFFSET(19)] = read_gicreg(ICH_LR10_EL2);
+   case 9:
+   cpu_if->vgic_lr[LR_OFFSET(9)] = read_gicreg(ICH_LR9_EL2);
+   case 8:
+   cpu_if->vgic_lr[LR_OFFSET(8)] = read_gicreg(ICH_LR8_EL2);
+   case 7:
+   cpu_if->vgic_lr[LR_OFFSET(7)] = read_gicreg(ICH_LR7_EL2);
+   case 6:
+   cpu_if->vgic_lr[LR_OFFSET(6)] = read_gicreg(ICH_LR6_EL2);
+   case 5:
+   cpu_if->vgic_lr[LR_OFFSET(5)] = read_gicreg(ICH_LR5_EL2);
+   case 4:
+   cpu_if->vgic_lr[LR_OFFSET(4)] = read_gicreg(ICH_LR4_EL2);
+   case 3:
+   cpu_if->vgic_lr[LR_OFFSET(3)] = read_gicreg(ICH_LR3_EL2);
+   case 2:
+   cpu_if->vgic_lr[LR_OFFSET(2)] = read_gicreg(ICH_LR2_EL2);
+   case 1:
+   cpu_if->vgic_lr

[PATCH 20/21] arm64: KVM: Cleanup asm-offset.c

2015-11-16 Thread Marc Zyngier
As we've now rewritten most of our code-base in C, most of the
KVM-specific code in asm-offset.c is useless. Delete-time again!

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kernel/asm-offsets.c | 39 ---
 1 file changed, 39 deletions(-)

diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 4b72231..94090a6 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -108,50 +108,11 @@ int main(void)
   DEFINE(CPU_GP_REGS,  offsetof(struct kvm_cpu_context, gp_regs));
   DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,  offsetof(struct kvm_regs, fp_regs));
-  DEFINE(CPU_SP_EL1,   offsetof(struct kvm_regs, sp_el1));
-  DEFINE(CPU_ELR_EL1,  offsetof(struct kvm_regs, elr_el1));
-  DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
-  DEFINE(CPU_SYSREGS,  offsetof(struct kvm_cpu_context, sys_regs));
   DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, 
arch.ctxt.sys_regs[FPEXC32_EL2]));
   DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
   DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
   DEFINE(VCPU_HPFAR_EL2,   offsetof(struct kvm_vcpu, 
arch.fault.hpfar_el2));
-  DEFINE(VCPU_DEBUG_FLAGS, offsetof(struct kvm_vcpu, arch.debug_flags));
-  DEFINE(VCPU_DEBUG_PTR,   offsetof(struct kvm_vcpu, arch.debug_ptr));
-  DEFINE(DEBUG_BCR,offsetof(struct kvm_guest_debug_arch, dbg_bcr));
-  DEFINE(DEBUG_BVR,offsetof(struct kvm_guest_debug_arch, dbg_bvr));
-  DEFINE(DEBUG_WCR,offsetof(struct kvm_guest_debug_arch, dbg_wcr));
-  DEFINE(DEBUG_WVR,offsetof(struct kvm_guest_debug_arch, dbg_wvr));
-  DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
-  DEFINE(VCPU_MDCR_EL2,offsetof(struct kvm_vcpu, arch.mdcr_el2));
-  DEFINE(VCPU_IRQ_LINES,   offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HOST_CONTEXT,offsetof(struct kvm_vcpu, 
arch.host_cpu_context));
-  DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, 
arch.host_debug_state));
-  DEFINE(VCPU_TIMER_CNTV_CTL,  offsetof(struct kvm_vcpu, 
arch.timer_cpu.cntv_ctl));
-  DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, 
arch.timer_cpu.cntv_cval));
-  DEFINE(KVM_TIMER_CNTVOFF,offsetof(struct kvm, arch.timer.cntvoff));
-  DEFINE(KVM_TIMER_ENABLED,offsetof(struct kvm, arch.timer.enabled));
-  DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
-  DEFINE(VCPU_VGIC_CPU,offsetof(struct kvm_vcpu, 
arch.vgic_cpu));
-  DEFINE(VGIC_V2_CPU_HCR,  offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
-  DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
-  DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
-  DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
-  DEFINE(VGIC_V2_CPU_ELRSR,offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
-  DEFINE(VGIC_V2_CPU_APR,  offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
-  DEFINE(VGIC_V2_CPU_LR,   offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
-  DEFINE(VGIC_V3_CPU_SRE,  offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
-  DEFINE(VGIC_V3_CPU_HCR,  offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
-  DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
-  DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
-  DEFINE(VGIC_V3_CPU_EISR, offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
-  DEFINE(VGIC_V3_CPU_ELRSR,offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
-  DEFINE(VGIC_V3_CPU_AP0R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
-  DEFINE(VGIC_V3_CPU_AP1R, offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
-  DEFINE(VGIC_V3_CPU_LR,   offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
-  DEFINE(VGIC_CPU_NR_LR,   offsetof(struct vgic_cpu, nr_lr));
-  DEFINE(KVM_VTTBR,offsetof(struct kvm, arch.vttbr));
-  DEFINE(KVM_VGIC_VCTRL,   offsetof(struct kvm, arch.vgic.vctrl_base));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_SUSPEND_SZ,   sizeof(struct cpu_suspend_ctx));
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 02/21] arm64: KVM: Add a HYP-specific header file

2015-11-16 Thread Marc Zyngier
In order to expose the various EL2 services that are private to
the hypervisor, add a new hyp.h file.

So far, it only contains mundane things such as section annotation
and VA manipulation.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/hyp.h | 31 +++
 1 file changed, 31 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/hyp.h

diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
new file mode 100644
index 000..dac843e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include 
+#include 
+#include 
+#include 
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (typeof(v))((unsigned long)v & HYP_PAGE_OFFSET_MASK)
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 18/21] arm64: KVM: Move away from the assembly version of the world switch

2015-11-16 Thread Marc Zyngier
This is it. We remove all of the code that has now been rewritten.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/Makefile |2 -
 arch/arm64/kvm/hyp.S| 1071 +--
 arch/arm64/kvm/vgic-v2-switch.S |  134 -
 arch/arm64/kvm/vgic-v3-switch.S |  269 --
 4 files changed, 1 insertion(+), 1475 deletions(-)
 delete mode 100644 arch/arm64/kvm/vgic-v2-switch.S
 delete mode 100644 arch/arm64/kvm/vgic-v3-switch.S

diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index d31e4e5..caee9ee 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -23,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o 
sys_regs.o sys_regs_generi
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701..0ccdcbb 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,906 +17,7 @@
 
 #include 
 
-#include 
-#include 
 #include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-#include 
-
-#define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
-#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
-#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
-#define CPU_SYSREG_OFFSET(x)   (CPU_SYSREGS + 8*x)
-
-   .text
-   .pushsection.hyp.text, "ax"
-   .align  PAGE_SHIFT
-
-.macro save_common_regs
-   // x2: base address for cpu context
-   // x3: tmp register
-
-   add x3, x2, #CPU_XREG_OFFSET(19)
-   stp x19, x20, [x3]
-   stp x21, x22, [x3, #16]
-   stp x23, x24, [x3, #32]
-   stp x25, x26, [x3, #48]
-   stp x27, x28, [x3, #64]
-   stp x29, lr, [x3, #80]
-
-   mrs x19, sp_el0
-   mrs x20, elr_el2// pc before entering el2
-   mrs x21, spsr_el2   // pstate before entering el2
-
-   stp x19, x20, [x3, #96]
-   str x21, [x3, #112]
-
-   mrs x22, sp_el1
-   mrs x23, elr_el1
-   mrs x24, spsr_el1
-
-   str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
-   str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
-   str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-.endm
-
-.macro restore_common_regs
-   // x2: base address for cpu context
-   // x3: tmp register
-
-   ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
-   ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
-   ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-
-   msr sp_el1, x22
-   msr elr_el1, x23
-   msr spsr_el1, x24
-
-   add x3, x2, #CPU_XREG_OFFSET(31)// SP_EL0
-   ldp x19, x20, [x3]
-   ldr x21, [x3, #16]
-
-   msr sp_el0, x19
-   msr elr_el2, x20// pc on return from el2
-   msr spsr_el2, x21   // pstate on return from el2
-
-   add x3, x2, #CPU_XREG_OFFSET(19)
-   ldp x19, x20, [x3]
-   ldp x21, x22, [x3, #16]
-   ldp x23, x24, [x3, #32]
-   ldp x25, x26, [x3, #48]
-   ldp x27, x28, [x3, #64]
-   ldp x29, lr, [x3, #80]
-.endm
-
-.macro save_host_regs
-   save_common_regs
-.endm
-
-.macro restore_host_regs
-   restore_common_regs
-.endm
-
-.macro save_fpsimd
-   // x2: cpu context address
-   // x3, x4: tmp regs
-   add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
-   fpsimd_save x3, 4
-.endm
-
-.macro restore_fpsimd
-   // x2: cpu context address
-   // x3, x4: tmp regs
-   add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
-   fpsimd_restore x3, 4
-.endm
-
-.macro save_guest_regs
-   // x0 is the vcpu address
-   // x1 is the return code, do not corrupt!
-   // x2 is the cpu context
-   // x3 is a tmp register
-   // Guest's x0-x3 are on the stack
-
-   // Compute base to save registers
-   add x3, x2, #CPU_XREG_OFFSET(4)
-   stp x4, x5, [x3]
-   stp x6, x7, [x3, #16]
-   stp x8, x9, [x3, #32]
-   stp x10, x11, [x3, #48]
-   stp x12, x13, [x3, #64]
-   stp x14, x15, [x3, #80]
-   stp x16, x17, [x3, #96]
-   str x18, [x3, #112]
-
-   pop x6, x7  // x2, x3
-   pop x4, x5  // x0, x1
-
-   add x3, x2, #CPU_XREG_OFFSET(0)
-   stp x4, x5, [x3]
-   stp x6, x7, [x3, #16]
-
-   save_common_regs
-.endm
-
-.macro restore_guest_regs
-   // x0 is the vcpu address.
-   // x2 is the cpu context
-   // x3 is a tmp register
-
-

[PATCH 07/21] arm64: KVM: Implement 32bit system register save/restore

2015-11-16 Thread Marc Zyngier
Implement the 32bit system register save restore as a direct
translation of the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/hyp.h   |  2 ++
 arch/arm64/kvm/hyp/sysreg-sr.c | 41 +
 2 files changed, 43 insertions(+)

diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index 087d3a5..4639330 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -38,6 +38,8 @@ void __timer_restore_state(struct kvm_vcpu *vcpu);
 
 void __sysreg_save_state(struct kvm_cpu_context *ctxt);
 void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
 
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index add8fcb..3f81a4d 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -88,3 +88,44 @@ void __hyp_text __sysreg_restore_state(struct 
kvm_cpu_context *ctxt)
write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
 }
+
+void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
+{
+   if (!(read_sysreg(hcr_el2) & HCR_RW)) {
+   u64 *spsr = vcpu->arch.ctxt.gp_regs.spsr;
+   u64 *sysreg = vcpu->arch.ctxt.sys_regs;
+
+   spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
+   spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
+   spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
+   spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
+
+   sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
+   sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
+
+   if (!(read_sysreg(cptr_el2) & CPTR_EL2_TFP))
+   sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
+
+   if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+   sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
+   }
+}
+
+void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+{
+   if (!(read_sysreg(hcr_el2) & HCR_RW)) {
+   u64 *spsr = vcpu->arch.ctxt.gp_regs.spsr;
+   u64 *sysreg = vcpu->arch.ctxt.sys_regs;
+
+   write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
+   write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
+   write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
+   write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
+
+   write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
+   write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
+
+   if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+   write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+   }
+}
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 06/21] arm64: KVM: Implement system register save/restore

2015-11-16 Thread Marc Zyngier
Implement the system registe save restore as a direct translation of
the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/hyp/Makefile|  1 +
 arch/arm64/kvm/hyp/hyp.h   |  3 ++
 arch/arm64/kvm/hyp/sysreg-sr.c | 90 ++
 3 files changed, 94 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/sysreg-sr.c

diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 455dc0a..ec94200 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
 obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index 86aa5a2..087d3a5 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -36,5 +36,8 @@ void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
 void __timer_save_state(struct kvm_vcpu *vcpu);
 void __timer_restore_state(struct kvm_vcpu *vcpu);
 
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
new file mode 100644
index 000..add8fcb
--- /dev/null
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+#include 
+
+#include 
+
+#include "hyp.h"
+
+/* ctxt is already in the HYP VA space */
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+   ctxt->sys_regs[MPIDR_EL1]   = read_sysreg(vmpidr_el2);
+   ctxt->sys_regs[CSSELR_EL1]  = read_sysreg(csselr_el1);
+   ctxt->sys_regs[SCTLR_EL1]   = read_sysreg(sctlr_el1);
+   ctxt->sys_regs[ACTLR_EL1]   = read_sysreg(actlr_el1);
+   ctxt->sys_regs[CPACR_EL1]   = read_sysreg(cpacr_el1);
+   ctxt->sys_regs[TTBR0_EL1]   = read_sysreg(ttbr0_el1);
+   ctxt->sys_regs[TTBR1_EL1]   = read_sysreg(ttbr1_el1);
+   ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
+   ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
+   ctxt->sys_regs[AFSR0_EL1]   = read_sysreg(afsr0_el1);
+   ctxt->sys_regs[AFSR1_EL1]   = read_sysreg(afsr1_el1);
+   ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
+   ctxt->sys_regs[MAIR_EL1]= read_sysreg(mair_el1);
+   ctxt->sys_regs[VBAR_EL1]= read_sysreg(vbar_el1);
+   ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg(contextidr_el1);
+   ctxt->sys_regs[TPIDR_EL0]   = read_sysreg(tpidr_el0);
+   ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
+   ctxt->sys_regs[TPIDR_EL1]   = read_sysreg(tpidr_el1);
+   ctxt->sys_regs[AMAIR_EL1]   = read_sysreg(amair_el1);
+   ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
+   ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
+   ctxt->sys_regs[MDSCR_EL1]   = read_sysreg(mdscr_el1);
+
+   ctxt->gp_regs.regs.sp   = read_sysreg(sp_el0);
+   ctxt->gp_regs.regs.pc   = read_sysreg(elr_el2);
+   ctxt->gp_regs.regs.pstate   = read_sysreg(spsr_el2);
+   ctxt->gp_regs.sp_el1= read_sysreg(sp_el1);
+   ctxt->gp_regs.elr_el1   = read_sysreg(elr_el1);
+   ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+   write_sysreg(ctxt->sys_regs[MPIDR_EL1],   vmpidr_el2);
+   write_sysreg(ctxt->sys_regs[CSSELR_EL1],  csselr_el1);
+   write_sysreg(ctxt->sys_regs[SCTLR_EL1],   sctlr_el1);
+   write_sysreg(ctxt->sys_regs[ACTLR_EL1],   actlr_el1);
+   write_sysreg(ctxt->sys_regs[CPACR_EL1],   cpacr_el1);
+   write_sysreg(ctxt->sys_regs[TTBR0_EL1],   ttbr0_el1);
+   write_sysreg(ctxt->sys_regs[TTBR1_EL1],   ttbr1_el1);
+   write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
+   write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
+   write_sysreg(ctxt->sys_regs[AFSR0_EL1],   afsr0_el1);
+   write_sysreg(ctxt->sys_regs[AFSR1_EL1],   afsr1_el1);
+   write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
+   write_sysreg(ctxt->sys_regs[MAIR_EL1],mair_el1);
+   write_sysreg(ctxt->sys_

[PATCH 01/21] arm64: add macros to read/write system registers

2015-11-16 Thread Marc Zyngier
From: Mark Rutland 

Rather than crafting custom macros for reading/writing each system
register provide generics accessors, read_sysreg and write_sysreg, for
this purpose.

Unlike read_cpuid, calls to read_exception_reg are never expected
to be optimized away or replaced with synthetic values.

Signed-off-by: Mark Rutland 
Cc: Catalin Marinas 
Cc: Marc Zyngier 
Cc: Suzuki Poulose 
Cc: Will Deacon 
Signed-off-by: Marc Zyngier 
---
 arch/arm64/include/asm/sysreg.h | 17 +
 1 file changed, 17 insertions(+)

diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index d48ab5b..c9c283a 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -20,6 +20,8 @@
 #ifndef __ASM_SYSREG_H
 #define __ASM_SYSREG_H
 
+#include 
+
 #include 
 
 /*
@@ -208,6 +210,8 @@
 
 #else
 
+#include 
+
 asm(
 "  .irp
num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
 "  .equ__reg_num_x\\num, \\num\n"
@@ -232,6 +236,19 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
val |= set;
asm volatile("msr sctlr_el1, %0" : : "r" (val));
 }
+
+#define read_sysreg(r) ({  \
+   u64 __val;  \
+   asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+   __val;  \
+})
+
+#define write_sysreg(v, r) do {\
+   u64 __val = (u64)v; \
+   asm volatile("msr " __stringify(r) ", %0"   \
+: : "r" (__val));  \
+} while (0)
+
 #endif
 
 #endif /* __ASM_SYSREG_H */
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 00/21] arm64: KVM: world switch in C

2015-11-16 Thread Marc Zyngier
Once upon a time, the KVM/arm64 world switch was a nice, clean, lean
and mean piece of hand-crafted assembly code. Over time, features have
crept in, the code has become harder to maintain, and the smallest
change is a pain to introduce. The VHE patches are a prime example of
why this doesn't work anymore.

This series rewrites most of the existing assembly code in C, but keeps
the existing code structure in place (most function names will look
familiar to the reader). The biggest change is that we don't have to
deal with a static register allocation (the compiler does it for us),
we can easily follow structure and pointers, and only the lowest level
is still in assembly code. Oh, and a negative diffstat.

There is still a healthy dose of inline assembly (system register
accessors, runtime code patching), but I've tried not to make it too
invasive. The generated code, while not exactly brilliant, doesn't
look too shaby. I do expect a small performance degradation, but I
believe this is something we can improve over time (my initial
measurements don't show any obvious regression).

Eventually (and assuming people are happy with the general approach
taken here), it should be possible to make the 32bit converge with
this and reuse some parts of the code.

Patches are against 4.4-rc1 (mostly), and I've pushed a branch out
(kvm-arm64/wsinc). This has been tested on Juno, Seattle and the FVP
model. I also have pushed out kvm-arm64/vhe-wsinc that implements VHE
on top of these patches.
M.

Marc Zyngier (20):
  arm64: KVM: Add a HYP-specific header file
  arm64: KVM: Implement vgic-v2 save/restore
  arm64: KVM: Implement vgic-v3 save/restore
  arm64: KVM: Implement timer save/restore
  arm64: KVM: Implement system register save/restore
  arm64: KVM: Implement 32bit system register save/restore
  arm64: KVM: Implement debug save/restore
  arm64: KVM: Implement guest entry
  arm64: KVM: Add patchable function selector
  arm64: KVM: Implement the core world switch
  arm64: KVM: Implement fpsimd save/restore
  arm64: KVM: Implement TLB handling
  arm64: KVM: HYP mode entry points
  arm64: KVM: Add panic handling
  arm64: KVM: Add compatibility aliases
  arm64: KVM: Map the kernel RO section into HYP
  arm64: KVM: Move away from the assembly version of the world switch
  arm64: KVM: Turn system register numbers to an enum
  arm64: KVM: Cleanup asm-offset.c
  arm64: KVM: Remove weak attributes

Mark Rutland (1):
  arm64: add macros to read/write system registers

 arch/arm/kvm/arm.c   |7 +
 arch/arm64/include/asm/kvm_asm.h |   76 ---
 arch/arm64/include/asm/kvm_emulate.h |1 -
 arch/arm64/include/asm/kvm_host.h|   81 ++-
 arch/arm64/include/asm/kvm_mmio.h|1 -
 arch/arm64/include/asm/sysreg.h  |   17 +
 arch/arm64/kernel/asm-offsets.c  |   40 +-
 arch/arm64/kvm/Makefile  |3 +-
 arch/arm64/kvm/guest.c   |1 -
 arch/arm64/kvm/handle_exit.c |1 +
 arch/arm64/kvm/hyp.S | 1071 +-
 arch/arm64/kvm/hyp/Makefile  |   14 +
 arch/arm64/kvm/hyp/debug-sr.c|  135 +
 arch/arm64/kvm/hyp/entry.S   |  184 ++
 arch/arm64/kvm/hyp/fpsimd.S  |   33 ++
 arch/arm64/kvm/hyp/hyp-entry.S   |  198 +++
 arch/arm64/kvm/hyp/hyp.h |   80 +++
 arch/arm64/kvm/hyp/switch.c  |  179 ++
 arch/arm64/kvm/hyp/sysreg-sr.c   |  132 +
 arch/arm64/kvm/hyp/timer-sr.c|   68 +++
 arch/arm64/kvm/hyp/tlb.c |   79 +++
 arch/arm64/kvm/hyp/vgic-v2-sr.c  |   85 +++
 arch/arm64/kvm/hyp/vgic-v3-sr.c  |  224 +++
 arch/arm64/kvm/sys_regs.c|1 +
 arch/arm64/kvm/vgic-v2-switch.S  |  134 -
 arch/arm64/kvm/vgic-v3-switch.S  |  269 -
 virt/kvm/arm/vgic-v3.c   |1 +
 27 files changed, 1521 insertions(+), 1594 deletions(-)
 create mode 100644 arch/arm64/kvm/hyp/Makefile
 create mode 100644 arch/arm64/kvm/hyp/debug-sr.c
 create mode 100644 arch/arm64/kvm/hyp/entry.S
 create mode 100644 arch/arm64/kvm/hyp/fpsimd.S
 create mode 100644 arch/arm64/kvm/hyp/hyp-entry.S
 create mode 100644 arch/arm64/kvm/hyp/hyp.h
 create mode 100644 arch/arm64/kvm/hyp/switch.c
 create mode 100644 arch/arm64/kvm/hyp/sysreg-sr.c
 create mode 100644 arch/arm64/kvm/hyp/timer-sr.c
 create mode 100644 arch/arm64/kvm/hyp/tlb.c
 create mode 100644 arch/arm64/kvm/hyp/vgic-v2-sr.c
 create mode 100644 arch/arm64/kvm/hyp/vgic-v3-sr.c
 delete mode 100644 arch/arm64/kvm/vgic-v2-switch.S
 delete mode 100644 arch/arm64/kvm/vgic-v3-switch.S

-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 03/21] arm64: KVM: Implement vgic-v2 save/restore

2015-11-16 Thread Marc Zyngier
Implement the vgic-v2 save restore as a direct translation of
the assembly code version.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/kvm/Makefile |  1 +
 arch/arm64/kvm/hyp/Makefile |  5 +++
 arch/arm64/kvm/hyp/hyp.h|  3 ++
 arch/arm64/kvm/hyp/vgic-v2-sr.c | 85 +
 4 files changed, 94 insertions(+)
 create mode 100644 arch/arm64/kvm/hyp/Makefile
 create mode 100644 arch/arm64/kvm/hyp/vgic-v2-sr.c

diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 1949fe5..d31e4e5 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -10,6 +10,7 @@ KVM=../../../virt/kvm
 ARM=../../../arch/arm/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o 
$(KVM)/eventfd.o $(KVM)/vfio.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
new file mode 100644
index 000..d8d5968
--- /dev/null
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
index dac843e..78f25c4 100644
--- a/arch/arm64/kvm/hyp/hyp.h
+++ b/arch/arm64/kvm/hyp/hyp.h
@@ -27,5 +27,8 @@
 
 #define kern_hyp_va(v) (typeof(v))((unsigned long)v & HYP_PAGE_OFFSET_MASK)
 
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
 #endif /* __ARM64_KVM_HYP_H__ */
 
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
new file mode 100644
index 000..1382d2e
--- /dev/null
+++ b/arch/arm64/kvm/hyp/vgic-v2-sr.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see .
+ */
+
+#include 
+#include 
+#include 
+
+#include 
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+   struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+   struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+   struct vgic_dist *vgic = &kvm->arch.vgic;
+   void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+   u32 __iomem *lr_base;
+   u32 eisr0, eisr1, elrsr0, elrsr1;
+   int i = 0, nr_lr;
+
+   if (!base)
+   return;
+
+   cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+   cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+   eisr0  = readl_relaxed(base + GICH_EISR0);
+   eisr1  = readl_relaxed(base + GICH_EISR0);
+   elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+   elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+   cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
+   cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+   cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
+   cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+   cpu_if->vgic_apr= readl_relaxed(base + GICH_APR);
+
+   writel_relaxed(0, base + GICH_HCR);
+
+   lr_base = base + GICH_LR0;
+   nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+   do {
+   cpu_if->vgic_lr[i++] = readl_relaxed(lr_base++);
+   } while (--nr_lr);
+}
+
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+   struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+   struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+   struct vgic_dist *vgic = &kvm->arch.vgic;
+   void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+   u32 __iomem *lr_base;
+   unsigned int i = 0, nr_lr;
+
+   if (!base)
+   return;
+
+   writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+   writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+   writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+
+   lr_base = base + GICH_LR0;
+   nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+   do {
+   writel_relaxed(cpu_if->vgic_lr[i++], lr_base++);
+   } while (--nr_lr);
+}
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 1/3] kvm: arm: arm64: remove unreferenced S2_PGD_ORDER

2015-11-16 Thread Vladimir Murzin
Since commit a987370 "arm64: KVM: Fix stage-2 PGD allocation to have
per-page refcounting" there is no reference to S2_PGD_ORDER, so kill it
for the good.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/kvm_arm.h   |1 -
 arch/arm/kvm/mmu.c   |6 +++---
 arch/arm64/include/asm/kvm_mmu.h |1 -
 3 files changed, 3 insertions(+), 5 deletions(-)

diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index dc641dd..b05bb5a 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -135,7 +135,6 @@
 #define KVM_PHYS_SIZE  (1ULL << KVM_PHYS_SHIFT)
 #define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1ULL)
 #define PTRS_PER_S2_PGD(1ULL << (KVM_PHYS_SHIFT - 30))
-#define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 
 /* Virtualization Translation Control Register (VTCR) bits */
 #define VTCR_SH0   (3 << 12)
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 6984342..15fbb62 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -652,9 +652,9 @@ static void *kvm_alloc_hwpgd(void)
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:   The KVM struct pointer for the VM.
  *
- * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
- * support either full 40-bit input addresses or limited to 32-bit input
- * addresses). Clears the allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
  *
  * Note we don't need locking here as this is only called when the VM is
  * created, which can only be done once.
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6150567..54cba80 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -158,7 +158,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 #define PTRS_PER_S2_PGD_SHIFT  (KVM_PHYS_SHIFT - PGDIR_SHIFT)
 #endif
 #define PTRS_PER_S2_PGD(1 << PTRS_PER_S2_PGD_SHIFT)
-#define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 
 #define kvm_pgd_index(addr)(((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 
1))
 
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 2/3] kvm: arm: make kvm_arm.h friendly to assembly code

2015-11-16 Thread Vladimir Murzin
kvm_arm.h is included from both C code and assembly code; however some
definitions in this header supplied with U/UL/ULL suffixes which might
confuse assembly once they got evaluated.
We have _AC macro for such cases, so just wrap problem places with it.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/kvm_arm.h |   33 +
 1 file changed, 17 insertions(+), 16 deletions(-)

diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index b05bb5a..01d4d7a 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -19,6 +19,7 @@
 #ifndef __ARM_KVM_ARM_H__
 #define __ARM_KVM_ARM_H__
 
+#include 
 #include 
 
 /* Hyp Configuration Register (HCR) bits */
@@ -132,9 +133,9 @@
  * space.
  */
 #define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE  (1ULL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1ULL)
-#define PTRS_PER_S2_PGD(1ULL << (KVM_PHYS_SHIFT - 30))
+#define KVM_PHYS_SIZE  (_AC(1, ULL) << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - _AC(1, ULL))
+#define PTRS_PER_S2_PGD(_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
 
 /* Virtualization Translation Control Register (VTCR) bits */
 #define VTCR_SH0   (3 << 12)
@@ -161,17 +162,17 @@
 #define VTTBR_X(5 - KVM_T0SZ)
 #endif
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT  (48LLU)
-#define VTTBR_VMID_MASK  (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << 
VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT  _AC(48, ULL)
+#define VTTBR_VMID_MASK  (_AC(0xff, ULL) << VTTBR_VMID_SHIFT)
 
 /* Hyp Syndrome Register (HSR) bits */
 #define HSR_EC_SHIFT   (26)
-#define HSR_EC (0x3fU << HSR_EC_SHIFT)
-#define HSR_IL (1U << 25)
+#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
+#define HSR_IL (_AC(1, UL) << 25)
 #define HSR_ISS(HSR_IL - 1)
 #define HSR_ISV_SHIFT  (24)
-#define HSR_ISV(1U << HSR_ISV_SHIFT)
+#define HSR_ISV(_AC(1, UL) << HSR_ISV_SHIFT)
 #define HSR_SRT_SHIFT  (16)
 #define HSR_SRT_MASK   (0xf << HSR_SRT_SHIFT)
 #define HSR_FSC(0x3f)
@@ -179,9 +180,9 @@
 #define HSR_SSE(1 << 21)
 #define HSR_WNR(1 << 6)
 #define HSR_CV_SHIFT   (24)
-#define HSR_CV (1U << HSR_CV_SHIFT)
+#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
 #define HSR_COND_SHIFT (20)
-#define HSR_COND   (0xfU << HSR_COND_SHIFT)
+#define HSR_COND   (_AC(0xf, UL) << HSR_COND_SHIFT)
 
 #define FSC_FAULT  (0x04)
 #define FSC_ACCESS (0x08)
@@ -209,13 +210,13 @@
 #define HSR_EC_DABT(0x24)
 #define HSR_EC_DABT_HYP(0x25)
 
-#define HSR_WFI_IS_WFE (1U << 0)
+#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
 
-#define HSR_HVC_IMM_MASK   ((1UL << 16) - 1)
+#define HSR_HVC_IMM_MASK   ((_AC(1, UL) << 16) - 1)
 
-#define HSR_DABT_S1PTW (1U << 7)
-#define HSR_DABT_CM(1U << 8)
-#define HSR_DABT_EA(1U << 9)
+#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
+#define HSR_DABT_CM(_AC(1, UL) << 8)
+#define HSR_DABT_EA(_AC(1, UL) << 9)
 
 #define kvm_arm_exception_type \
{0, "RESET" },  \
-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 3/3] arm64: KVM: Add support for 16-bit VMID

2015-11-16 Thread Vladimir Murzin
The ARMv8.1 architecture extension allows to choose between 8-bit and
16-bit of VMID, so use this capability for KVM.

Signed-off-by: Vladimir Murzin 
---
 arch/arm/include/asm/kvm_arm.h   |2 +-
 arch/arm/include/asm/kvm_mmu.h   |5 +
 arch/arm/kvm/arm.c   |   10 --
 arch/arm64/include/asm/kvm_arm.h |3 ++-
 arch/arm64/include/asm/kvm_mmu.h |8 
 arch/arm64/kvm/hyp-init.S|9 +
 6 files changed, 33 insertions(+), 4 deletions(-)

diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
index 01d4d7a..e22089f 100644
--- a/arch/arm/include/asm/kvm_arm.h
+++ b/arch/arm/include/asm/kvm_arm.h
@@ -164,7 +164,7 @@
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
 #define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << 
VTTBR_BADDR_SHIFT)
 #define VTTBR_VMID_SHIFT  _AC(48, ULL)
-#define VTTBR_VMID_MASK  (_AC(0xff, ULL) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size)  (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
 /* Hyp Syndrome Register (HSR) bits */
 #define HSR_EC_SHIFT   (26)
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 405aa18..9203c21 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
   pgd_t *merged_hyp_pgd,
   unsigned long hyp_idmap_start) { }
 
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+   return 8;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index eab83b2..055980d 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -58,7 +58,8 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, 
kvm_arm_running_vcpu);
 
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u8 kvm_next_vmid;
+static u32 kvm_next_vmid;
+static unsigned int kvm_vmid_bits __read_mostly;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
@@ -433,11 +434,12 @@ static void update_vttbr(struct kvm *kvm)
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
+   kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
 
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
-   vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+   vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & 
VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid;
 
spin_unlock(&kvm_vmid_lock);
@@ -1132,6 +1134,10 @@ static int init_hyp_mode(void)
 
kvm_perf_init();
 
+   /* set size of VMID supported by CPU */
+   kvm_vmid_bits = kvm_get_vmid_bits();
+   kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
kvm_info("Hyp mode initialized successfully\n");
 
return 0;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 5e6857b..738a95f 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -125,6 +125,7 @@
 #define VTCR_EL2_SL0_LVL1  (1 << 6)
 #define VTCR_EL2_T0SZ_MASK 0x3f
 #define VTCR_EL2_T0SZ_40B  24
+#define VTCR_EL2_VS19
 
 /*
  * We configure the Stage-2 page tables to always restrict the IPA space to be
@@ -169,7 +170,7 @@
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
 #define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << 
VTTBR_BADDR_SHIFT)
 #define VTTBR_VMID_SHIFT  (UL(48))
-#define VTTBR_VMID_MASK  (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
 /* Hyp System Trap Register */
 #define HSTR_EL2_T(x)  (1 << x)
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 54cba80..0bf8b43 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -20,6 +20,7 @@
 
 #include 
 #include 
+#include 
 
 /*
  * As we only have the TTBR0_EL2 register, we cannot express
@@ -301,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
 }
 
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+   int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+
+   return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) 
== 2) ? 16 : 8;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 178ba22..3e568dc 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -94,6 +94,15 @@ __do_hyp_init:
 */
mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3
+   /*
+* Read the 

[PATCH 0/3] arm64: KVM: Support 16-bit VMID

2015-11-16 Thread Vladimir Murzin
Hi

This series is supposed to bring support for 16-bit VMID offered by
the ARMv8.1 architecture extension into KVM.

The first to patches are clean-up to make kvm_arm.h fit into assembly
code nicely. The third patch modifies KVM code to recognise and use
16-bit VMID.

Thanks!

Vladimir Murzin (3):
  kvm: arm: arm64: remove unreferenced S2_PGD_ORDER
  kvm: arm: make kvm_arm.h friendly to assembly code
  arm64: KVM: Add support for 16-bit VMID

 arch/arm/include/asm/kvm_arm.h   |   34 +-
 arch/arm/include/asm/kvm_mmu.h   |5 +
 arch/arm/kvm/arm.c   |   10 --
 arch/arm/kvm/mmu.c   |6 +++---
 arch/arm64/include/asm/kvm_arm.h |3 ++-
 arch/arm64/include/asm/kvm_mmu.h |9 -
 arch/arm64/kvm/hyp-init.S|9 +
 7 files changed, 52 insertions(+), 24 deletions(-)

-- 
1.7.9.5

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 2/2] arm64: KVM: Add workaround for Cortex-A57 erratum 834220

2015-11-16 Thread Marc Zyngier
Cortex-A57 parts up to r1p2 can misreport Stage 2 translation faults
when a Stage 1 permission fault or device alignment fault should
have been reported.

This patch implements the workaround (which is to validate that the
Stage-1 translation actually succeeds) by using code patching.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/Kconfig  | 21 +
 arch/arm64/include/asm/cpufeature.h |  3 ++-
 arch/arm64/kernel/cpu_errata.c  |  9 +
 arch/arm64/kvm/hyp.S|  6 ++
 4 files changed, 38 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 9ac16a4..746d985 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -316,6 +316,27 @@ config ARM64_ERRATUM_832075
 
  If unsure, say Y.
 
+config ARM64_ERRATUM_834220
+   bool "Cortex-A57: 834220: Stage 2 translation fault might be 
incorrectly reported in presence of a Stage 1 fault"
+   depends on KVM
+   default y
+   help
+ This option adds an alternative code sequence to work around ARM
+ erratum 834220 on Cortex-A57 parts up to r1p2.
+
+ Affected Cortex-A57 parts might report a Stage 2 translation
+ fault as a the result of a Stage 1 fault for load crossing a
+ page boundary when there is a permission or device memory
+ alignment fault at Stage 1 and a translation fault at Stage 2.
+
+ The workaround is to verify that the Stage-1 translation
+ doesn't generate a fault before handling the Stage-2 fault.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
 config ARM64_ERRATUM_845719
bool "Cortex-A53: 845719: a load might read incorrect data"
depends on COMPAT
diff --git a/arch/arm64/include/asm/cpufeature.h 
b/arch/arm64/include/asm/cpufeature.h
index 11d5bb0f..52722ee 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,8 +29,9 @@
 #define ARM64_HAS_PAN  4
 #define ARM64_HAS_LSE_ATOMICS  5
 #define ARM64_WORKAROUND_CAVIUM_23154  6
+#define ARM64_WORKAROUND_8342207
 
-#define ARM64_NCAPS7
+#define ARM64_NCAPS8
 
 #ifndef __ASSEMBLY__
 
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 24926f2..feb6b4e 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -75,6 +75,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
   (1 << MIDR_VARIANT_SHIFT) | 2),
},
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_834220
+   {
+   /* Cortex-A57 r0p0 - r1p2 */
+   .desc = "ARM erratum 834220",
+   .capability = ARM64_WORKAROUND_834220,
+   MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+  (1 << MIDR_VARIANT_SHIFT) | 2),
+   },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
{
/* Cortex-A53 r0p[01234] */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 1599701..ff2e038 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -1015,9 +1015,15 @@ el1_trap:
b.ne1f  // Not an abort we care about
 
/* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM
b.ne1f  // Not a permission fault
+alternative_else
+   nop // Use the permission fault path to
+   nop // check for a valid S1 translation,
+   nop // regardless of the ESR value.
+alternative_endif
 
/*
 * Check for Stage-1 page table walk, which is guaranteed
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 0/2] arm64: KVM: Fixes for 4.4-rc2

2015-11-16 Thread Marc Zyngier
Here's a couple of fixes for KVM/arm64:

- The first one addresses a misinterpretation of the architecture
  spec, leading to the mishandling of I/O accesses generated from an
  AArch32 guest using banked registers.

- The second one is a workaround for a Cortex-A57 erratum.

Both patches are based on v4.4-rc1.

Marc Zyngier (2):
  arm64: KVM: Fix AArch32 to AArch64 register mapping
  arm64: KVM: Add workaround for Cortex-A57 erratum 834220

 arch/arm64/Kconfig   | 21 +
 arch/arm64/include/asm/cpufeature.h  |  3 ++-
 arch/arm64/include/asm/kvm_emulate.h |  8 +---
 arch/arm64/kernel/cpu_errata.c   |  9 +
 arch/arm64/kvm/hyp.S |  6 ++
 arch/arm64/kvm/inject_fault.c|  2 +-
 6 files changed, 44 insertions(+), 5 deletions(-)

-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


[PATCH 1/2] arm64: KVM: Fix AArch32 to AArch64 register mapping

2015-11-16 Thread Marc Zyngier
When running a 32bit guest under a 64bit hypervisor, the ARMv8
architecture defines a mapping of the 32bit registers in the 64bit
space. This includes banked registers that are being demultiplexed
over the 64bit ones.

On exception caused by an operation involving a 32bit register, the
HW exposes the register number in the ESR_EL2 register. It was so
far understood that SW had to compute which register was AArch64
register was used (based on the current AArch32 mode and register
number).

It turns out that I misinterpreted the ARM ARM, and the clue is in
D1.20.1: "For some exceptions, the exception syndrome given in the
ESR_ELx identifies one or more register numbers from the issued
instruction that generated the exception. Where the exception is
taken from an Exception level using AArch32 these register numbers
give the AArch64 view of the register."

Which means that the HW is already giving us the translated version,
and that we shouldn't try to interpret it at all (for example, doing
an MMIO operation from the IRQ mode using the LR register leads to
very unexpected behaviours).

The fix is thus not to perform a call to vcpu_reg32() at all from
vcpu_reg(), and use whatever register number is supplied directly.
The only case we need to find out about the mapping is when we
actively generate a register access, which only occurs when injecting
a fault in a guest.

Signed-off-by: Marc Zyngier 
---
 arch/arm64/include/asm/kvm_emulate.h | 8 +---
 arch/arm64/kvm/inject_fault.c| 2 +-
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 17e92f0..3ca894e 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -99,11 +99,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
 }
 
+/*
+ * vcpu_reg should always be passed a register number coming from a
+ * read of ESR_EL2. Otherwise, it may give the wrong result on AArch32
+ * with banked registers.
+ */
 static inline unsigned long *vcpu_reg(const struct kvm_vcpu *vcpu, u8 reg_num)
 {
-   if (vcpu_mode_is_32bit(vcpu))
-   return vcpu_reg32(vcpu, reg_num);
-
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.regs[reg_num];
 }
 
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 85c5715..648112e 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -48,7 +48,7 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, 
u32 vect_offset)
 
/* Note: These now point to the banked copies */
*vcpu_spsr(vcpu) = new_spsr_value;
-   *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
+   *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
/* Branch to exception vector */
if (sctlr & (1 << 13))
-- 
2.1.4

___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm