Rather than accessing struct kvm_vcpu_arch_run via the vcpu, pass it
explicitly as an argument where needed. This will allow a hyp-private
copy of the struct to be swapped in when running in protected mode.

Signed-off-by: Andrew Scull <asc...@google.com>
---
 arch/arm64/kvm/hyp/include/hyp/switch.h | 15 +++++++++------
 arch/arm64/kvm/hyp/nvhe/switch.c        |  8 ++++----
 arch/arm64/kvm/hyp/vhe/switch.c         |  2 +-
 3 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h 
b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 3f299c7d42cd..53120cccd2a5 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -178,7 +178,8 @@ static inline bool __populate_fault_info(struct kvm_vcpu 
*vcpu)
 }
 
 /* Check for an FPSIMD/SVE trap and handle as appropriate */
-static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
+static inline bool __hyp_handle_fpsimd(struct kvm_vcpu *vcpu,
+                                      struct kvm_vcpu_arch_run *run)
 {
        bool vhe, sve_guest, sve_host;
        u8 esr_ec;
@@ -227,7 +228,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu 
*vcpu)
 
        isb();
 
-       if (vcpu->arch.run.flags & KVM_ARM64_RUN_FP_HOST) {
+       if (run->flags & KVM_ARM64_RUN_FP_HOST) {
                /*
                 * In the SVE case, VHE is assumed: it is enforced by
                 * Kconfig and kvm_arch_init().
@@ -243,7 +244,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu 
*vcpu)
                        __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
                }
 
-               vcpu->arch.run.flags &= ~KVM_ARM64_RUN_FP_HOST;
+               run->flags &= ~KVM_ARM64_RUN_FP_HOST;
        }
 
        if (sve_guest) {
@@ -261,7 +262,7 @@ static inline bool __hyp_handle_fpsimd(struct kvm_vcpu 
*vcpu)
 
        vcpu->arch.fpsimd_cpu = smp_processor_id();
 
-       vcpu->arch.run.flags |= KVM_ARM64_RUN_FP_ENABLED;
+       run->flags |= KVM_ARM64_RUN_FP_ENABLED;
 
        return true;
 }
@@ -389,7 +390,9 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu 
*vcpu)
  * the guest, false when we should restore the host state and return to the
  * main run loop.
  */
-static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
+static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu,
+                                   struct kvm_vcpu_arch_run *run,
+                                   u64 *exit_code)
 {
        if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
                vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
@@ -430,7 +433,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, 
u64 *exit_code)
         * undefined instruction exception to the guest.
         * Similarly for trapped SVE accesses.
         */
-       if (__hyp_handle_fpsimd(vcpu))
+       if (__hyp_handle_fpsimd(vcpu, run))
                goto guest;
 
        if (__hyp_handle_ptrauth(vcpu))
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index f0a32c993ac4..076c2200324f 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -32,7 +32,7 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
 
-static void __activate_traps(struct kvm_vcpu *vcpu)
+static void __activate_traps(struct kvm_vcpu *vcpu, struct kvm_vcpu_arch_run 
*run)
 {
        u64 val;
 
@@ -41,7 +41,7 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
 
        val = CPTR_EL2_DEFAULT;
        val |= CPTR_EL2_TTA | CPTR_EL2_TZ | CPTR_EL2_TAM;
-       if (!(vcpu->arch.run.flags & KVM_ARM64_RUN_FP_ENABLED)) {
+       if (!(run->flags & KVM_ARM64_RUN_FP_ENABLED)) {
                val |= CPTR_EL2_TFP;
                __activate_traps_fpsimd32(vcpu);
        }
@@ -206,7 +206,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
        __sysreg_restore_state_nvhe(guest_ctxt);
 
        __load_guest_stage2(kern_hyp_va(vcpu->arch.hw_mmu));
-       __activate_traps(vcpu);
+       __activate_traps(vcpu, &vcpu->arch.run);
 
        __hyp_vgic_restore_state(vcpu);
        __timer_enable_traps(vcpu);
@@ -218,7 +218,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                exit_code = __guest_enter(vcpu);
 
                /* And we're baaack! */
-       } while (fixup_guest_exit(vcpu, &exit_code));
+       } while (fixup_guest_exit(vcpu, &vcpu->arch.run, &exit_code));
 
        __sysreg_save_state_nvhe(guest_ctxt);
        __sysreg32_save_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 5bb6a2cf574d..ff3ce150d636 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -143,7 +143,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
                exit_code = __guest_enter(vcpu);
 
                /* And we're baaack! */
-       } while (fixup_guest_exit(vcpu, &exit_code));
+       } while (fixup_guest_exit(vcpu, &vcpu->arch.run, &exit_code));
 
        sysreg_save_guest_state_vhe(guest_ctxt);
 
-- 
2.30.1.766.gb4fecdf3b7-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to