Writes to the physical SVCR.SM and SVCR.ZA change the state of PSTATE.SM
and PSTATE.ZA, causing other floating point state to reset. Emulate this
behaviour for writes done via the KVM userspace ABI.

Setting PSTATE.ZA to 1 causes ZA and ZT0 to be reset to 0, these are stored
in sme_state. Setting PSTATE.ZA to 0 causes ZA and ZT0 to become inaccessible
so no reset is needed.

Any change in PSTATE.SM causes the V, Z, P, FFR and FPMR registers to be
reset to 0 and FPSR to be reset to 0x800009f.

Rather than introduce a requirement that the vector configuration be
finalised before writing to SVCR we check for this before updating the
SVE and SME specific state, when finalisation happens they will be
allocated with an initial state of 0.

Signed-off-by: Mark Brown <[email protected]>
---
 arch/arm64/include/asm/kvm_host.h | 24 ++++++++++++++++++++++++
 arch/arm64/include/asm/sysreg.h   |  2 ++
 arch/arm64/kvm/sys_regs.c         | 30 +++++++++++++++++++++++++++++-
 3 files changed, 55 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h 
b/arch/arm64/include/asm/kvm_host.h
index 094cbf8e7022..aa0817eb1b48 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -1172,6 +1172,30 @@ struct kvm_vcpu_arch {
 
 #define vcpu_sve_state_size(vcpu) 
sve_state_size_from_vl((vcpu)->arch.max_vl[ARM64_VEC_SVE])
 
+#define vcpu_sme_state(vcpu) (kern_hyp_va((vcpu)->arch.sme_state))
+
+#define sme_state_size_from_vl(vl, sme2) ({                            \
+       size_t __size_ret;                                              \
+       unsigned int __vq;                                              \
+                                                                       \
+       if (WARN_ON(!sve_vl_valid(vl))) {                               \
+               __size_ret = 0;                                         \
+       } else {                                                        \
+               __vq = sve_vq_from_vl(vl);                              \
+               __size_ret = ZA_SIG_REGS_SIZE(__vq);                    \
+               if (sme2)                                               \
+                       __size_ret += ZT_SIG_REG_SIZE;                  \
+       }                                                               \
+                                                                       \
+       __size_ret;                                                     \
+})
+
+#define vcpu_sme_state_size(vcpu) ({                                   \
+       unsigned long __vl;                                             \
+       __vl = (vcpu)->arch.max_vl[ARM64_VEC_SME];                      \
+       sme_state_size_from_vl(__vl, vcpu_has_sme2(vcpu));              \
+})
+
 /*
  * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
  * memory backed version of a register, and not the one most recently
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index f4436ecc630c..90d398429d80 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -1101,6 +1101,8 @@
 #define gicr_insn(insn)                        
read_sysreg_s(GICV5_OP_GICR_##insn)
 #define gic_insn(v, insn)              write_sysreg_s(v, GICV5_OP_GIC_##insn)
 
+#define FPSR_RESET_VALUE       0x800009f
+
 #ifdef __ASSEMBLER__
 
        .macro  mrs_s, rt, sreg
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 0ddb89723819..8a9fd8d69d6e 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -927,6 +927,34 @@ static unsigned int hidden_visibility(const struct 
kvm_vcpu *vcpu,
        return REG_HIDDEN;
 }
 
+static int set_svcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+                   u64 val)
+{
+       u64 old = __vcpu_sys_reg(vcpu, rd->reg);
+
+       if (val & SVCR_RES0)
+               return -EINVAL;
+
+       if ((val & SVCR_ZA) && !(old & SVCR_ZA) &&
+           kvm_arm_vcpu_vec_finalized(vcpu))
+               memset(vcpu->arch.sme_state, 0, vcpu_sme_state_size(vcpu));
+
+       if ((val & SVCR_SM) != (old & SVCR_SM)) {
+               memset(vcpu->arch.ctxt.fp_regs.vregs, 0,
+                      sizeof(vcpu->arch.ctxt.fp_regs.vregs));
+
+               if (kvm_arm_vcpu_vec_finalized(vcpu))
+                       memset(vcpu->arch.sve_state, 0,
+                              vcpu_sve_state_size(vcpu));
+
+               __vcpu_assign_sys_reg(vcpu, FPMR, 0);
+               vcpu->arch.ctxt.fp_regs.fpsr = FPSR_RESET_VALUE;
+       }
+
+       __vcpu_assign_sys_reg(vcpu, rd->reg, val);
+       return 0;
+}
+
 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
                                   const struct sys_reg_desc *r)
 {
@@ -3535,7 +3563,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
                    CTR_EL0_DminLine_MASK |
                    CTR_EL0_L1Ip_MASK |
                    CTR_EL0_IminLine_MASK),
-       { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = 
sme_visibility  },
+       { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = 
sme_visibility, .set_user = set_svcr },
        { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = 
fp8_visibility },
 
        { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,

-- 
2.47.3


Reply via email to