From: Jintack Lim <jintack....@linaro.org>

Forward traps due to FP/ASIMD register accesses to the virtual EL2 if
virtual CPTR_EL2.TFP is set. Note that if TFP bit is set, then even
accesses to FP/ASIMD register from EL2 as well as NS EL0/1 will trap to
EL2. So, we don't check the VM's exception level.

Signed-off-by: Jintack Lim <jintack....@linaro.org>
Signed-off-by: Christoffer Dall <christoffer.d...@arm.com>
Signed-off-by: Marc Zyngier <marc.zyng...@arm.com>
---
 arch/arm64/include/asm/kvm_emulate.h |  7 +++++++
 arch/arm64/kvm/handle_exit.c         | 16 ++++++++++++----
 arch/arm64/kvm/hyp/switch.c          | 11 +++++++++--
 3 files changed, 28 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 2644258e96ba..73d8c54a52c6 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -29,6 +29,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmio.h>
+#include <asm/kvm_nested.h>
 #include <asm/ptrace.h>
 #include <asm/cputype.h>
 #include <asm/virt.h>
@@ -357,6 +358,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu 
*vcpu)
        return mode != PSR_MODE_EL0t;
 }
 
+static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
+{
+       return nested_virt_in_use(vcpu) &&
+               (vcpu_read_sys_reg(vcpu, CPTR_EL2) & CPTR_EL2_TFP);
+}
+
 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.fault.esr_el2;
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index ddba212fd6ec..39602a4c1d61 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -104,11 +104,19 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct 
kvm_run *run)
 }
 
 /*
- * Guest access to FP/ASIMD registers are routed to this handler only
- * when the system doesn't support FP/ASIMD.
+ * This handles the cases where the system does not support FP/ASIMD or when
+ * we are running nested virtualization and the guest hypervisor is trapping
+ * FP/ASIMD accesses by its guest guest.
+ *
+ * All other handling of guest vs. host FP/ASIMD register state is handled in
+ * fixup_guest_exit().
  */
-static int handle_no_fpsimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
+static int kvm_handle_fpasimd(struct kvm_vcpu *vcpu, struct kvm_run *run)
 {
+       if (guest_hyp_fpsimd_traps_enabled(vcpu))
+               return kvm_inject_nested_sync(vcpu, kvm_vcpu_get_hsr(vcpu));
+
+       /* This is the case when the system doesn't support FP/ASIMD. */
        kvm_inject_undefined(vcpu);
        return 1;
 }
@@ -277,7 +285,7 @@ static exit_handle_fn arm_exit_handlers[] = {
        [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
        [ESR_ELx_EC_BKPT32]     = kvm_handle_guest_debug,
        [ESR_ELx_EC_BRK64]      = kvm_handle_guest_debug,
-       [ESR_ELx_EC_FP_ASIMD]   = handle_no_fpsimd,
+       [ESR_ELx_EC_FP_ASIMD]   = kvm_handle_fpasimd,
        [ESR_ELx_EC_PAC]        = kvm_handle_ptrauth,
 };
 
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 62359c7c3d6b..9b5129cdc26a 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -351,11 +351,18 @@ static bool __hyp_text __hyp_handle_fpsimd(struct 
kvm_vcpu *vcpu)
            hsr_ec != ESR_ELx_EC_SVE)
                return false;
 
-       /* Don't handle SVE traps for non-SVE vcpus here: */
-       if (!sve_guest)
+       /*
+        * Don't handle SVE traps for non-SVE vcpus here. This
+        * includes NV guests for the time beeing.
+        */
+       if (!sve_guest) {
                if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
                        return false;
 
+               if (guest_hyp_fpsimd_traps_enabled(vcpu))
+                       return false;
+       }
+
        /* Valid trap.  Switch the context: */
 
        if (vhe) {
-- 
2.20.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to