From: Marc Zyngier <m...@kernel.org>

Protected vCPUs always run with a virtual counter offset of 0, so don't
bother trying to update it from the host.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/kvm/arch_timer.c | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 6e542e2eae32..63d06f372eb1 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -88,7 +88,9 @@ static u64 timer_get_offset(struct arch_timer_context *ctxt)
 
        switch(arch_timer_ctx_index(ctxt)) {
        case TIMER_VTIMER:
-               return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+               if (likely(!kvm_vm_is_protected(vcpu->kvm)))
+                       return __vcpu_sys_reg(vcpu, CNTVOFF_EL2);
+               fallthrough;
        default:
                return 0;
        }
@@ -753,6 +755,9 @@ static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, 
u64 cntvoff)
        struct kvm *kvm = vcpu->kvm;
        struct kvm_vcpu *tmp;
 
+       if (unlikely(kvm_vm_is_protected(vcpu->kvm)))
+               cntvoff = 0;
+
        mutex_lock(&kvm->lock);
        kvm_for_each_vcpu(i, tmp, kvm)
                timer_set_offset(vcpu_vtimer(tmp), cntvoff);
-- 
2.36.1.124.g0e6072fb45-goog

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to