From: Marc Zyngier <m...@kernel.org>

Implement lazy save/restore of the host FPSIMD register state at EL2.
This allows us to save/restore guest FPSIMD registers without involving
the host and means that we can avoid having to repopulate the shadow
register state on every flush.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 57 ++++++++++++++++++++++++++----
 1 file changed, 51 insertions(+), 6 deletions(-)

diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 2a12d6f710ef..228736a9ab40 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -20,6 +20,14 @@
 
 #include <linux/irqchip/arm-gic-v3.h>
 
+/*
+ * Host FPSIMD state. Written to when the guest accesses its own FPSIMD state,
+ * and read when the guest state is live and we need to switch back to the 
host.
+ *
+ * Only valid when the KVM_ARM64_FP_ENABLED flag is set in the shadow 
structure.
+ */
+static DEFINE_PER_CPU(struct user_fpsimd_state, loaded_host_fpsimd_state);
+
 DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
@@ -195,10 +203,8 @@ static void flush_shadow_state(struct 
kvm_shadow_vcpu_state *shadow_state)
 
        shadow_vcpu->arch.hcr_el2       = host_vcpu->arch.hcr_el2;
        shadow_vcpu->arch.mdcr_el2      = host_vcpu->arch.mdcr_el2;
-       shadow_vcpu->arch.cptr_el2      = host_vcpu->arch.cptr_el2;
 
        shadow_vcpu->arch.debug_ptr     = 
kern_hyp_va(host_vcpu->arch.debug_ptr);
-       shadow_vcpu->arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
 
        shadow_vcpu->arch.vsesr_el2     = host_vcpu->arch.vsesr_el2;
 
@@ -235,7 +241,6 @@ static void sync_shadow_state(struct kvm_shadow_vcpu_state 
*shadow_state,
        host_vcpu->arch.ctxt            = shadow_vcpu->arch.ctxt;
 
        host_vcpu->arch.hcr_el2         = shadow_vcpu->arch.hcr_el2;
-       host_vcpu->arch.cptr_el2        = shadow_vcpu->arch.cptr_el2;
 
        sync_vgic_state(host_vcpu, shadow_vcpu);
        sync_timer_state(shadow_state);
@@ -262,6 +267,27 @@ static void sync_shadow_state(struct kvm_shadow_vcpu_state 
*shadow_state,
        shadow_state->exit_code = exit_reason;
 }
 
+static void fpsimd_host_restore(void)
+{
+       sysreg_clear_set(cptr_el2, CPTR_EL2_TZ | CPTR_EL2_TFP, 0);
+       isb();
+
+       if (unlikely(is_protected_kvm_enabled())) {
+               struct kvm_shadow_vcpu_state *shadow_state = 
pkvm_loaded_shadow_vcpu_state();
+               struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+               struct user_fpsimd_state *host_fpsimd_state = 
this_cpu_ptr(&loaded_host_fpsimd_state);
+
+               __fpsimd_save_state(&shadow_vcpu->arch.ctxt.fp_regs);
+               __fpsimd_restore_state(host_fpsimd_state);
+
+               shadow_vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
+               shadow_vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+       }
+
+       if (system_supports_sve())
+               sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+}
+
 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt)
 {
        DECLARE_REG(unsigned int, shadow_handle, host_ctxt, 1);
@@ -291,6 +317,9 @@ static void handle___pkvm_vcpu_load(struct kvm_cpu_context 
*host_ctxt)
                *last_ran = shadow_vcpu->vcpu_id;
        }
 
+       shadow_vcpu->arch.host_fpsimd_state = 
this_cpu_ptr(&loaded_host_fpsimd_state);
+       shadow_vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
        if (shadow_state_is_protected(shadow_state)) {
                /* Propagate WFx trapping flags, trap ptrauth */
                shadow_vcpu->arch.hcr_el2 &= ~(HCR_TWE | HCR_TWI |
@@ -310,6 +339,10 @@ static void handle___pkvm_vcpu_put(struct kvm_cpu_context 
*host_ctxt)
 
        if (shadow_state) {
                struct kvm_vcpu *host_vcpu = shadow_state->host_vcpu;
+               struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+
+               if (shadow_vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+                       fpsimd_host_restore();
 
                if (!shadow_state_is_protected(shadow_state) &&
                        !(READ_ONCE(host_vcpu->arch.flags) & 
KVM_ARM64_PKVM_STATE_DIRTY))
@@ -377,6 +410,19 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context 
*host_ctxt)
                ret = __kvm_vcpu_run(&shadow_state->shadow_vcpu);
 
                sync_shadow_state(shadow_state, ret);
+
+               if (shadow_state->shadow_vcpu.arch.flags & 
KVM_ARM64_FP_ENABLED) {
+                       /*
+                        * The guest has used the FP, trap all accesses
+                        * from the host (both FP and SVE).
+                        */
+                       u64 reg = CPTR_EL2_TFP;
+
+                       if (system_supports_sve())
+                               reg |= CPTR_EL2_TZ;
+
+                       sysreg_clear_set(cptr_el2, 0, reg);
+               }
        } else {
                ret = __kvm_vcpu_run(vcpu);
        }
@@ -707,10 +753,9 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
        case ESR_ELx_EC_SMC64:
                handle_host_smc(host_ctxt);
                break;
+       case ESR_ELx_EC_FP_ASIMD:
        case ESR_ELx_EC_SVE:
-               sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
-               isb();
-               sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
+               fpsimd_host_restore();
                break;
        case ESR_ELx_EC_IABT_LOW:
        case ESR_ELx_EC_DABT_LOW:
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to