Since the function will grow as a result of adding SME support move the
SVE code out of fpsimd_lazy_switch_to_host(). No functional change, just
code motion.

Signed-off-by: Mark Brown <[email protected]>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h | 46 +++++++++++++++++++--------------
 1 file changed, 26 insertions(+), 20 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 4e38610be19a..5b99aa479c59 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -483,11 +483,11 @@ static inline void fpsimd_lazy_switch_to_guest(struct 
kvm_vcpu *vcpu)
        }
 }
 
-static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
+static inline void sve_lazy_switch_to_host(struct kvm_vcpu *vcpu)
 {
        u64 zcr_el1, zcr_el2;
 
-       if (!guest_owns_fp_regs())
+       if (!vcpu_has_sve(vcpu))
                return;
 
        /*
@@ -498,29 +498,35 @@ static inline void fpsimd_lazy_switch_to_host(struct 
kvm_vcpu *vcpu)
         * synchronization event, we don't need an ISB here to avoid taking
         * traps for anything that was exposed to the guest.
         */
-       if (vcpu_has_sve(vcpu)) {
-               zcr_el1 = read_sysreg_el1(SYS_ZCR);
-               __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
+       zcr_el1 = read_sysreg_el1(SYS_ZCR);
+       __vcpu_assign_sys_reg(vcpu, vcpu_sve_zcr_elx(vcpu), zcr_el1);
 
-               /*
-                * The guest's state is always saved using the guest's max VL.
-                * Ensure that the host has the guest's max VL active such that
-                * the host can save the guest's state lazily, but don't
-                * artificially restrict the host to the guest's max VL.
-                */
-               if (has_vhe()) {
-                       zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
-                       write_sysreg_el2(zcr_el2, SYS_ZCR);
-               } else {
-                       zcr_el2 = 
sve_vq_from_vl(kvm_host_max_vl[ARM64_VEC_SVE]) - 1;
-                       write_sysreg_el2(zcr_el2, SYS_ZCR);
+       /*
+        * The guest's state is always saved using the guest's max VL.
+        * Ensure that the host has the guest's max VL active such
+        * that the host can save the guest's state lazily, but don't
+        * artificially restrict the host to the guest's max VL.
+        */
+       if (has_vhe()) {
+               zcr_el2 = vcpu_sve_max_vq(vcpu) - 1;
+               write_sysreg_el2(zcr_el2, SYS_ZCR);
+       } else {
+               zcr_el2 = sve_vq_from_vl(kvm_host_max_vl[ARM64_VEC_SVE]) - 1;
+               write_sysreg_el2(zcr_el2, SYS_ZCR);
 
-                       zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
-                       write_sysreg_el1(zcr_el1, SYS_ZCR);
-               }
+               zcr_el1 = vcpu_sve_max_vq(vcpu) - 1;
+               write_sysreg_el1(zcr_el1, SYS_ZCR);
        }
 }
 
+static inline void fpsimd_lazy_switch_to_host(struct kvm_vcpu *vcpu)
+{
+       if (!guest_owns_fp_regs())
+               return;
+
+       sve_lazy_switch_to_host(vcpu);
+}
+
 static void kvm_hyp_save_fpsimd_host(struct kvm_vcpu *vcpu)
 {
        /*

-- 
2.47.3


Reply via email to