As all the VNCR-capable system registers are nicely separated
from the rest of the crowd, let's set HCR_EL2.NV2 on and let
the ball rolling.

Signed-off-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_arm.h     |  1 +
 arch/arm64/include/asm/kvm_emulate.h | 23 +++++++++++++----------
 arch/arm64/include/asm/sysreg.h      |  1 +
 arch/arm64/kvm/hyp/vhe/switch.c      | 14 +++++++++++++-
 4 files changed, 28 insertions(+), 11 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index aa3bdce1b166..19998cf067ce 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -20,6 +20,7 @@
 #define HCR_AMVOFFEN   (UL(1) << 51)
 #define HCR_FIEN       (UL(1) << 47)
 #define HCR_FWB                (UL(1) << 46)
+#define HCR_NV2                (UL(1) << 45)
 #define HCR_AT         (UL(1) << 44)
 #define HCR_NV1                (UL(1) << 43)
 #define HCR_NV         (UL(1) << 42)
diff --git a/arch/arm64/include/asm/kvm_emulate.h 
b/arch/arm64/include/asm/kvm_emulate.h
index 2128d623a8b3..7c09d36fd593 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -245,21 +245,24 @@ static inline bool is_hyp_ctxt(const struct kvm_vcpu 
*vcpu)
 
 static inline u64 __fixup_spsr_el2_write(struct kvm_cpu_context *ctxt, u64 val)
 {
-       if (!__vcpu_el2_e2h_is_set(ctxt)) {
-               /*
-                * Clear the .M field when writing SPSR to the CPU, so that we
-                * can detect when the CPU clobbered our SPSR copy during a
-                * local exception.
-                */
-               val &= ~0xc;
-       }
+       struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+       if (vcpu_has_nv2(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
+               return val;
 
-       return val;
+       /*
+        * Clear the .M field when writing SPSR to the CPU, so that we
+        * can detect when the CPU clobbered our SPSR copy during a
+        * local exception.
+        */
+       return val &= ~0xc;
 }
 
 static inline u64 __fixup_spsr_el2_read(const struct kvm_cpu_context *ctxt, 
u64 val)
 {
-       if (__vcpu_el2_e2h_is_set(ctxt))
+       struct kvm_vcpu *vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt);
+
+       if (vcpu_has_nv2(vcpu) || __vcpu_el2_e2h_is_set(ctxt))
                return val;
 
        /*
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index ff6d3af8ed34..0a21be5263d9 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -551,6 +551,7 @@
 #define SYS_TCR_EL2                    sys_reg(3, 4, 2, 0, 2)
 #define SYS_VTTBR_EL2                  sys_reg(3, 4, 2, 1, 0)
 #define SYS_VTCR_EL2                   sys_reg(3, 4, 2, 1, 2)
+#define SYS_VNCR_EL2                   sys_reg(3, 4, 2, 2, 0)
 
 #define SYS_ZCR_EL2                    sys_reg(3, 4, 1, 2, 0)
 #define SYS_TRFCR_EL2                  sys_reg(3, 4, 1, 2, 1)
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 5e8eafac27c6..65120c9027d6 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -44,7 +44,13 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
                         * the EL1 virtual memory control register accesses
                         * as well as the AT S1 operations.
                         */
-                       hcr |= HCR_TVM | HCR_TRVM | HCR_AT | HCR_TTLB | HCR_NV1;
+                       if (vcpu_has_nv2(vcpu)) {
+                               hcr &= ~HCR_TVM;
+                       } else {
+                               hcr |= HCR_TVM | HCR_TRVM | HCR_TTLB;
+                       }
+
+                       hcr |= HCR_AT | HCR_NV1;
                } else {
                        /*
                         * For a guest hypervisor on v8.1 (VHE), allow to
@@ -78,6 +84,12 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
                        if (!vcpu_el2_tge_is_set(vcpu))
                                hcr |= HCR_AT | HCR_TTLB;
                }
+
+               if (vcpu_has_nv2(vcpu)) {
+                       hcr |= HCR_AT | HCR_TTLB | HCR_NV2;
+                       write_sysreg_s(vcpu->arch.ctxt.vncr_array,
+                                      SYS_VNCR_EL2);
+               }
        } else if (vcpu_has_nv(vcpu)) {
                u64 vhcr_el2 = __vcpu_sys_reg(vcpu, HCR_EL2);
 
-- 
2.30.2

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

Reply via email to