From: Marc Zyngier <m...@kernel.org>

In preparation for save/restore of the timer state at EL2 for protected
VMs, introduce a couple of sync/flush primitives for the architected
timer, in much the same way as we have for the GIC.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/hyp/nvhe/hyp-main.c | 34 ++++++++++++++++++++++++++++++
 1 file changed, 34 insertions(+)

diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c 
b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 58515e5d24ec..32e7e1cad00f 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -63,6 +63,38 @@ static void sync_vgic_state(struct kvm_vcpu *host_vcpu,
                WRITE_ONCE(host_cpu_if->vgic_lr[i], shadow_cpu_if->vgic_lr[i]);
 }
 
+static void flush_timer_state(struct kvm_shadow_vcpu_state *shadow_state)
+{
+       struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+
+       if (!shadow_state_is_protected(shadow_state))
+               return;
+
+       /*
+        * A shadow vcpu has no offset, and sees vtime == ptime. The
+        * ptimer is fully emulated by EL1 and cannot be trusted.
+        */
+       write_sysreg(0, cntvoff_el2);
+       isb();
+       write_sysreg_el0(__vcpu_sys_reg(shadow_vcpu, CNTV_CVAL_EL0), 
SYS_CNTV_CVAL);
+       write_sysreg_el0(__vcpu_sys_reg(shadow_vcpu, CNTV_CTL_EL0), 
SYS_CNTV_CTL);
+}
+
+static void sync_timer_state(struct kvm_shadow_vcpu_state *shadow_state)
+{
+       struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
+
+       if (!shadow_state_is_protected(shadow_state))
+               return;
+
+       /*
+        * Preserve the vtimer state so that it is always correct,
+        * even if the host tries to make a mess.
+        */
+       __vcpu_sys_reg(shadow_vcpu, CNTV_CVAL_EL0) = 
read_sysreg_el0(SYS_CNTV_CVAL);
+       __vcpu_sys_reg(shadow_vcpu, CNTV_CTL_EL0) = 
read_sysreg_el0(SYS_CNTV_CTL);
+}
+
 static void flush_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
 {
        struct kvm_vcpu *shadow_vcpu = &shadow_state->shadow_vcpu;
@@ -85,6 +117,7 @@ static void flush_shadow_state(struct kvm_shadow_vcpu_state 
*shadow_state)
        shadow_vcpu->arch.vsesr_el2     = host_vcpu->arch.vsesr_el2;
 
        flush_vgic_state(host_vcpu, shadow_vcpu);
+       flush_timer_state(shadow_state);
 }
 
 static void sync_shadow_state(struct kvm_shadow_vcpu_state *shadow_state)
@@ -102,6 +135,7 @@ static void sync_shadow_state(struct kvm_shadow_vcpu_state 
*shadow_state)
        host_vcpu->arch.flags           = shadow_vcpu->arch.flags;
 
        sync_vgic_state(host_vcpu, shadow_vcpu);
+       sync_timer_state(shadow_state);
 }
 
 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to