Define the new system registers that POE introduces and context switch them.

Signed-off-by: Joey Gouly <joey.go...@arm.com>
Cc: Marc Zyngier <m...@kernel.org>
Cc: Oliver Upton <oliver.up...@linux.dev>
Cc: Catalin Marinas <catalin.mari...@arm.com>
Cc: Will Deacon <w...@kernel.org>
Reviewed-by: Marc Zyngier <m...@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h          |  4 ++++
 arch/arm64/include/asm/vncr_mapping.h      |  1 +
 arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 27 ++++++++++++++++++++++
 arch/arm64/kvm/sys_regs.c                  | 19 +++++++++++++--
 4 files changed, 49 insertions(+), 2 deletions(-)

diff --git arch/arm64/include/asm/kvm_host.h arch/arm64/include/asm/kvm_host.h
index a33f5996ca9f..5c9de7692201 100644
--- arch/arm64/include/asm/kvm_host.h
+++ arch/arm64/include/asm/kvm_host.h
@@ -446,6 +446,8 @@ enum vcpu_sysreg {
        GCR_EL1,        /* Tag Control Register */
        TFSRE0_EL1,     /* Tag Fault Status Register (EL0) */
 
+       POR_EL0,        /* Permission Overlay Register 0 (EL0) */
+
        /* 32bit specific registers. */
        DACR32_EL2,     /* Domain Access Control Register */
        IFSR32_EL2,     /* Instruction Fault Status Register */
@@ -517,6 +519,8 @@ enum vcpu_sysreg {
        VNCR(PIR_EL1),   /* Permission Indirection Register 1 (EL1) */
        VNCR(PIRE0_EL1), /*  Permission Indirection Register 0 (EL1) */
 
+       VNCR(POR_EL1),  /* Permission Overlay Register 1 (EL1) */
+
        VNCR(HFGRTR_EL2),
        VNCR(HFGWTR_EL2),
        VNCR(HFGITR_EL2),
diff --git arch/arm64/include/asm/vncr_mapping.h 
arch/arm64/include/asm/vncr_mapping.h
index df2c47c55972..06f8ec0906a6 100644
--- arch/arm64/include/asm/vncr_mapping.h
+++ arch/arm64/include/asm/vncr_mapping.h
@@ -52,6 +52,7 @@
 #define VNCR_PIRE0_EL1         0x290
 #define VNCR_PIRE0_EL2         0x298
 #define VNCR_PIR_EL1           0x2A0
+#define VNCR_POR_EL1           0x2A8
 #define VNCR_ICH_LR0_EL2        0x400
 #define VNCR_ICH_LR1_EL2        0x408
 #define VNCR_ICH_LR2_EL2        0x410
diff --git arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h 
arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
index 4c0fdabaf8ae..1579a3c08a36 100644
--- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
+++ arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h
@@ -16,9 +16,15 @@
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
 
+static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
+
 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
 {
        ctxt_sys_reg(ctxt, MDSCR_EL1)   = read_sysreg(mdscr_el1);
+
+       // POR_EL0 can affect uaccess, so must be saved/restored early.
+       if (ctxt_has_s1poe(ctxt))
+               ctxt_sys_reg(ctxt, POR_EL0)     = read_sysreg_s(SYS_POR_EL0);
 }
 
 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
@@ -66,6 +72,17 @@ static inline bool ctxt_has_tcrx(struct kvm_cpu_context 
*ctxt)
        return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, TCRX, 
IMP);
 }
 
+static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
+{
+       struct kvm_vcpu *vcpu;
+
+       if (!system_supports_poe())
+               return false;
+
+       vcpu = ctxt_to_vcpu(ctxt);
+       return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, 
IMP);
+}
+
 static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
 {
        ctxt_sys_reg(ctxt, SCTLR_EL1)   = read_sysreg_el1(SYS_SCTLR);
@@ -80,6 +97,9 @@ static inline void __sysreg_save_el1_state(struct 
kvm_cpu_context *ctxt)
                        ctxt_sys_reg(ctxt, PIR_EL1)     = 
read_sysreg_el1(SYS_PIR);
                        ctxt_sys_reg(ctxt, PIRE0_EL1)   = 
read_sysreg_el1(SYS_PIRE0);
                }
+
+               if (ctxt_has_s1poe(ctxt))
+                       ctxt_sys_reg(ctxt, POR_EL1)     = 
read_sysreg_el1(SYS_POR);
        }
        ctxt_sys_reg(ctxt, ESR_EL1)     = read_sysreg_el1(SYS_ESR);
        ctxt_sys_reg(ctxt, AFSR0_EL1)   = read_sysreg_el1(SYS_AFSR0);
@@ -120,6 +140,10 @@ static inline void __sysreg_save_el2_return_state(struct 
kvm_cpu_context *ctxt)
 static inline void __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
 {
        write_sysreg(ctxt_sys_reg(ctxt, MDSCR_EL1),  mdscr_el1);
+
+       // POR_EL0 can affect uaccess, so must be saved/restored early.
+       if (ctxt_has_s1poe(ctxt))
+               write_sysreg_s(ctxt_sys_reg(ctxt, POR_EL0),     SYS_POR_EL0);
 }
 
 static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
@@ -158,6 +182,9 @@ static inline void __sysreg_restore_el1_state(struct 
kvm_cpu_context *ctxt)
                        write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1),   
SYS_PIR);
                        write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), 
SYS_PIRE0);
                }
+
+               if (ctxt_has_s1poe(ctxt))
+                       write_sysreg_el1(ctxt_sys_reg(ctxt, POR_EL1),   
SYS_POR);
        }
        write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1),   SYS_ESR);
        write_sysreg_el1(ctxt_sys_reg(ctxt, AFSR0_EL1), SYS_AFSR0);
diff --git arch/arm64/kvm/sys_regs.c arch/arm64/kvm/sys_regs.c
index c90324060436..e7208b59ea12 100644
--- arch/arm64/kvm/sys_regs.c
+++ arch/arm64/kvm/sys_regs.c
@@ -2255,6 +2255,15 @@ static bool access_zcr_el2(struct kvm_vcpu *vcpu,
        return true;
 }
 
+static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
+                                    const struct sys_reg_desc *rd)
+{
+       if (kvm_has_feat(vcpu->kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+               return 0;
+
+       return REG_HIDDEN;
+}
+
 /*
  * Architected system registers.
  * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -2492,6 +2501,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1 },
        { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1 },
+       { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
+         .visibility = s1poe_visibility },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
 
        { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
@@ -2578,6 +2589,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
          .access = access_pmovs, .reg = PMOVSSET_EL0,
          .get_user = get_pmreg, .set_user = set_pmreg },
 
+       { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
+         .visibility = s1poe_visibility },
        { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
        { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
        { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
@@ -4568,8 +4581,6 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
        kvm->arch.fgu[HFGxTR_GROUP] = (HFGxTR_EL2_nAMAIR2_EL1           |
                                       HFGxTR_EL2_nMAIR2_EL1            |
                                       HFGxTR_EL2_nS2POR_EL1            |
-                                      HFGxTR_EL2_nPOR_EL1              |
-                                      HFGxTR_EL2_nPOR_EL0              |
                                       HFGxTR_EL2_nACCDATA_EL1          |
                                       HFGxTR_EL2_nSMPRI_EL1_MASK       |
                                       HFGxTR_EL2_nTPIDR2_EL0_MASK);
@@ -4604,6 +4615,10 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
                kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 |
                                                HFGxTR_EL2_nPIR_EL1);
 
+       if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, S1POE, IMP))
+               kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
+                                               HFGxTR_EL2_nPOR_EL0);
+
        if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
                kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
                                                  HAFGRTR_EL2_RES1);
-- 
2.25.1


Reply via email to