Re: [PATCH 05/15] arm64: kvm: Build hyp-entry.S separately for VHE/nVHE

2020-05-07 Thread Marc Zyngier

On 2020-05-07 16:07, Marc Zyngier wrote:

On Thu, 30 Apr 2020 15:48:21 +0100,
David Brazdil  wrote:


[...]

hyp-entry.S contains implementation of KVM hyp vectors. This code is 
mostly
shared between VHE/nVHE, therefore compile it under both VHE and nVHE 
build
rules, with small differences hidden behind '#ifdef __HYPERVISOR__'. 
These are:

  * only nVHE should handle host HVCs, VHE will now panic,


That's not true. VHE does handle HVCs from the guest. If you make VHE
panic on guest exit, I'll come after you! ;-)


Duh, I can't read. "host HVCs". You can relax. ;-)

M.
--
Jazz is not dead. It just smells funny...
___
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm


Re: [PATCH 05/15] arm64: kvm: Build hyp-entry.S separately for VHE/nVHE

2020-05-07 Thread Marc Zyngier
On Thu, 30 Apr 2020 15:48:21 +0100,
David Brazdil  wrote:
> 
> This patch is part of a series which builds KVM's non-VHE hyp code separately
> from VHE and the rest of the kernel.

This sentence should be part of your cover letter, and not in the 5th
patch.

> 
> hyp-entry.S contains implementation of KVM hyp vectors. This code is mostly
> shared between VHE/nVHE, therefore compile it under both VHE and nVHE build
> rules, with small differences hidden behind '#ifdef __HYPERVISOR__'. These 
> are:
>   * only nVHE should handle host HVCs, VHE will now panic,

That's not true. VHE does handle HVCs from the guest. If you make VHE
panic on guest exit, I'll come after you! ;-)

>   * only nVHE needs kvm_hcall_table, so move host_hypcall.c to nvhe/,
>   * __smccc_workaround_1_smc is not needed by nVHE, only cpu_errata.c in
> kernel proper.

How comes? You certainly need to be able to use the generated code,
don't you? Or do you actually mean that the assembly code doesn't need
to live in the file that contains the vectors themselves (which I'd
agree with)?

> 
> Adjust code which selects which KVM hyp vecs to install to choose the correct
> VHE/nVHE symbol.
> 
> Signed-off-by: David Brazdil 
> ---
>  arch/arm64/include/asm/kvm_asm.h  |  7 +
>  arch/arm64/include/asm/kvm_mmu.h  | 13 +
>  arch/arm64/include/asm/mmu.h  |  7 -
>  arch/arm64/kernel/cpu_errata.c|  2 +-
>  arch/arm64/kernel/image-vars.h| 28 +++
>  arch/arm64/kvm/hyp/Makefile   |  2 +-
>  arch/arm64/kvm/hyp/hyp-entry.S| 27 --
>  arch/arm64/kvm/hyp/nvhe/Makefile  |  2 +-
>  .../arm64/kvm/hyp/{ => nvhe}/host_hypercall.c |  0
>  arch/arm64/kvm/va_layout.c|  2 +-
>  10 files changed, 65 insertions(+), 25 deletions(-)
>  rename arch/arm64/kvm/hyp/{ => nvhe}/host_hypercall.c (100%)
> 
> diff --git a/arch/arm64/include/asm/kvm_asm.h 
> b/arch/arm64/include/asm/kvm_asm.h
> index 99ab204519ca..cdaf3df8085d 100644
> --- a/arch/arm64/include/asm/kvm_asm.h
> +++ b/arch/arm64/include/asm/kvm_asm.h
> @@ -71,6 +71,13 @@ extern char __kvm_hyp_init[];
>  extern char __kvm_hyp_init_end[];
>  
>  extern char __kvm_hyp_vector[];
> +extern char kvm_nvhe_sym(__kvm_hyp_vector)[];

This is becoming pretty ugly. I'd rather we have a helper that emits
the declaration for both symbols. Or something.

> +
> +#ifdef CONFIG_KVM_INDIRECT_VECTORS
> +extern char __bp_harden_hyp_vecs[];
> +extern char kvm_nvhe_sym(__bp_harden_hyp_vecs)[];
> +extern atomic_t arm64_el2_vector_last_slot;
> +#endif
>  
>  extern void __kvm_flush_vm_context(void);
>  extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
> diff --git a/arch/arm64/include/asm/kvm_mmu.h 
> b/arch/arm64/include/asm/kvm_mmu.h
> index 30b0e8d6b895..0a5fa033422c 100644
> --- a/arch/arm64/include/asm/kvm_mmu.h
> +++ b/arch/arm64/include/asm/kvm_mmu.h
> @@ -468,7 +468,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, 
> gpa_t gpa,
>   * VHE, as we don't have hypervisor-specific mappings. If the system
>   * is VHE and yet selects this capability, it will be ignored.
>   */
> -#include 
> +#include 
>  
>  extern void *__kvm_bp_vect_base;
>  extern int __kvm_harden_el2_vector_slot;
> @@ -477,11 +477,11 @@ extern int __kvm_harden_el2_vector_slot;
>  static inline void *kvm_get_hyp_vector(void)
>  {
>   struct bp_hardening_data *data = arm64_get_bp_hardening_data();
> - void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
> + void *vect = kern_hyp_va(kvm_hyp_ref(__kvm_hyp_vector));

I find it pretty annoying (again) that I have to know where a symbol
lives (kernel or nVHE-EL2) to know which kvm_*_ref() helper to use.
Maybe there is no good solution to this, but still...

>   int slot = -1;
>  
>   if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
> - vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
> + vect = kern_hyp_va(kvm_hyp_ref(__bp_harden_hyp_vecs));
>   slot = data->hyp_vectors_slot;
>   }
>  
> @@ -510,12 +510,13 @@ static inline int kvm_map_vectors(void)
>*  HBP +  HEL2 -> use hardened vertors and use exec mapping
>*/
>   if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
> - __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
> + __kvm_bp_vect_base = kvm_hyp_ref(__bp_harden_hyp_vecs);
>   __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
>   }
>  
>   if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
> - phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
> + phys_addr_t vect_pa =
> + __pa_symbol(kvm_nvhe_sym(__bp_harden_hyp_vecs));

Please keep the assignment on a single line (and screw checkpatch).

>   unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
>  
>   /*
> @@ -534

[PATCH 05/15] arm64: kvm: Build hyp-entry.S separately for VHE/nVHE

2020-04-30 Thread David Brazdil
This patch is part of a series which builds KVM's non-VHE hyp code separately
from VHE and the rest of the kernel.

hyp-entry.S contains implementation of KVM hyp vectors. This code is mostly
shared between VHE/nVHE, therefore compile it under both VHE and nVHE build
rules, with small differences hidden behind '#ifdef __HYPERVISOR__'. These are:
  * only nVHE should handle host HVCs, VHE will now panic,
  * only nVHE needs kvm_hcall_table, so move host_hypcall.c to nvhe/,
  * __smccc_workaround_1_smc is not needed by nVHE, only cpu_errata.c in
kernel proper.

Adjust code which selects which KVM hyp vecs to install to choose the correct
VHE/nVHE symbol.

Signed-off-by: David Brazdil 
---
 arch/arm64/include/asm/kvm_asm.h  |  7 +
 arch/arm64/include/asm/kvm_mmu.h  | 13 +
 arch/arm64/include/asm/mmu.h  |  7 -
 arch/arm64/kernel/cpu_errata.c|  2 +-
 arch/arm64/kernel/image-vars.h| 28 +++
 arch/arm64/kvm/hyp/Makefile   |  2 +-
 arch/arm64/kvm/hyp/hyp-entry.S| 27 --
 arch/arm64/kvm/hyp/nvhe/Makefile  |  2 +-
 .../arm64/kvm/hyp/{ => nvhe}/host_hypercall.c |  0
 arch/arm64/kvm/va_layout.c|  2 +-
 10 files changed, 65 insertions(+), 25 deletions(-)
 rename arch/arm64/kvm/hyp/{ => nvhe}/host_hypercall.c (100%)

diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 99ab204519ca..cdaf3df8085d 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -71,6 +71,13 @@ extern char __kvm_hyp_init[];
 extern char __kvm_hyp_init_end[];
 
 extern char __kvm_hyp_vector[];
+extern char kvm_nvhe_sym(__kvm_hyp_vector)[];
+
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
+extern char __bp_harden_hyp_vecs[];
+extern char kvm_nvhe_sym(__bp_harden_hyp_vecs)[];
+extern atomic_t arm64_el2_vector_last_slot;
+#endif
 
 extern void __kvm_flush_vm_context(void);
 extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 30b0e8d6b895..0a5fa033422c 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -468,7 +468,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, 
gpa_t gpa,
  * VHE, as we don't have hypervisor-specific mappings. If the system
  * is VHE and yet selects this capability, it will be ignored.
  */
-#include 
+#include 
 
 extern void *__kvm_bp_vect_base;
 extern int __kvm_harden_el2_vector_slot;
@@ -477,11 +477,11 @@ extern int __kvm_harden_el2_vector_slot;
 static inline void *kvm_get_hyp_vector(void)
 {
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
-   void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+   void *vect = kern_hyp_va(kvm_hyp_ref(__kvm_hyp_vector));
int slot = -1;
 
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
-   vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs));
+   vect = kern_hyp_va(kvm_hyp_ref(__bp_harden_hyp_vecs));
slot = data->hyp_vectors_slot;
}
 
@@ -510,12 +510,13 @@ static inline int kvm_map_vectors(void)
 *  HBP +  HEL2 -> use hardened vertors and use exec mapping
 */
if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
-   __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+   __kvm_bp_vect_base = kvm_hyp_ref(__bp_harden_hyp_vecs);
__kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
}
 
if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
-   phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+   phys_addr_t vect_pa =
+   __pa_symbol(kvm_nvhe_sym(__bp_harden_hyp_vecs));
unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
 
/*
@@ -534,7 +535,7 @@ static inline int kvm_map_vectors(void)
 #else
 static inline void *kvm_get_hyp_vector(void)
 {
-   return kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+   return kern_hyp_va(kvm_hyp_ref(__kvm_hyp_vector));
 }
 
 static inline int kvm_map_vectors(void)
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 68140fdd89d6..4d913f6dd366 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -42,13 +42,6 @@ struct bp_hardening_data {
bp_hardening_cb_t   fn;
 };
 
-#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||\
- defined(CONFIG_HARDEN_EL2_VECTORS))
-
-extern char __bp_harden_hyp_vecs[];
-extern atomic_t arm64_el2_vector_last_slot;
-#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
-
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index a10232