Instead disabling interrupts by setting the PSR.I bit, use a priority
higher than the one used for interrupts to mask them via PMR.

When using PMR to disable interrupts, the value of PMR will be used
instead of PSR.[DAIF] for the irqflags.

Signed-off-by: Julien Thierry <[email protected]>
Suggested-by: Daniel Thompson <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Ard Biesheuvel <[email protected]>
Cc: Oleg Nesterov <[email protected]>
---
 arch/arm64/include/asm/efi.h      |  11 ++++
 arch/arm64/include/asm/irqflags.h | 123 +++++++++++++++++++++++++++++---------
 2 files changed, 106 insertions(+), 28 deletions(-)

diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..134ff6e 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -44,6 +44,17 @@
 
 #define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 
+#define arch_efi_save_flags(state_flags)               \
+       do {                                            \
+               (state_flags) = read_sysreg(daif);      \
+       } while (0)
+
+#define arch_efi_restore_flags(state_flags)            \
+       do {                                            \
+               write_sysreg(state_flags, daif);        \
+       } while (0)
+
+
 /* arch specific definitions used by the stub code */
 
 /*
diff --git a/arch/arm64/include/asm/irqflags.h 
b/arch/arm64/include/asm/irqflags.h
index 24692ed..fa3b06f 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -18,7 +18,9 @@
 
 #ifdef __KERNEL__
 
+#include <asm/alternative.h>
 #include <asm/ptrace.h>
+#include <asm/sysreg.h>
 
 /*
  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
@@ -36,47 +38,96 @@
 /*
  * CPU interrupt mask handling.
  */
-static inline unsigned long arch_local_irq_save(void)
-{
-       unsigned long flags;
-       asm volatile(
-               "mrs    %0, daif                // arch_local_irq_save\n"
-               "msr    daifset, #2"
-               : "=r" (flags)
-               :
-               : "memory");
-       return flags;
-}
-
 static inline void arch_local_irq_enable(void)
 {
-       asm volatile(
-               "msr    daifclr, #2             // arch_local_irq_enable"
-               :
+       unsigned long unmasked = GIC_PRIO_IRQON;
+
+       asm volatile(ALTERNATIVE(
+               "msr    daifclr, #2             // arch_local_irq_enable\n"
+               "nop",
+               "msr_s  " __stringify(SYS_ICC_PMR_EL1) ",%0\n"
+               "dsb    sy",
+               ARM64_HAS_IRQ_PRIO_MASKING)
                :
+               : "r" (unmasked)
                : "memory");
 }
 
 static inline void arch_local_irq_disable(void)
 {
-       asm volatile(
-               "msr    daifset, #2             // arch_local_irq_disable"
-               :
+       unsigned long masked = GIC_PRIO_IRQOFF;
+
+       asm volatile(ALTERNATIVE(
+               "msr    daifset, #2             // arch_local_irq_disable",
+               "msr_s  " __stringify(SYS_ICC_PMR_EL1) ", %0",
+               ARM64_HAS_IRQ_PRIO_MASKING)
                :
+               : "r" (masked)
                : "memory");
 }
 
 /*
+ * Having two ways to control interrupt status is a bit complicated. Some
+ * locations like exception entries will have PSR.I bit set by the architecture
+ * while PMR is unmasked.
+ * We need the irqflags to represent that interrupts are disabled in such 
cases.
+ *
+ * For this, we lower the value read from PMR when the I bit is set so it is
+ * considered as an irq masking priority. (With PMR, lower value means masking
+ * more interrupts).
+ */
+#define _get_irqflags(daif_bits, pmr)                                  \
+({                                                                     \
+       unsigned long flags;                                            \
+                                                                       \
+       BUILD_BUG_ON(GIC_PRIO_IRQOFF < (GIC_PRIO_IRQON & ~PSR_I_BIT));  \
+       asm volatile(ALTERNATIVE(                                       \
+               "mov    %0, %1\n"                                       \
+               "nop\n"                                                 \
+               "nop",                                                  \
+               "and    %0, %1, #" __stringify(PSR_I_BIT) "\n"          \
+               "mvn    %0, %0\n"                                       \
+               "and    %0, %0, %2",                                    \
+               ARM64_HAS_IRQ_PRIO_MASKING)                             \
+               : "=&r" (flags)                                         \
+               : "r" (daif_bits), "r" (pmr)                            \
+               : "memory");                                            \
+                                                                       \
+       flags;                                                          \
+})
+
+/*
  * Save the current interrupt enable state.
  */
 static inline unsigned long arch_local_save_flags(void)
 {
-       unsigned long flags;
-       asm volatile(
-               "mrs    %0, daif                // arch_local_save_flags"
-               : "=r" (flags)
+       unsigned long daif_bits;
+       unsigned long pmr; // Only used if alternative is on
+
+       daif_bits = read_sysreg(daif);
+
+       // Get PMR
+       asm volatile(ALTERNATIVE(
+                       "nop",
+                       "mrs_s  %0, " __stringify(SYS_ICC_PMR_EL1),
+                       ARM64_HAS_IRQ_PRIO_MASKING)
+               : "=&r" (pmr)
                :
                : "memory");
+
+       return _get_irqflags(daif_bits, pmr);
+}
+
+#undef _get_irqflags
+
+static inline unsigned long arch_local_irq_save(void)
+{
+       unsigned long flags;
+
+       flags = arch_local_save_flags();
+
+       arch_local_irq_disable();
+
        return flags;
 }
 
@@ -85,16 +136,32 @@ static inline unsigned long arch_local_save_flags(void)
  */
 static inline void arch_local_irq_restore(unsigned long flags)
 {
-       asm volatile(
-               "msr    daif, %0                // arch_local_irq_restore"
-       :
-       : "r" (flags)
-       : "memory");
+       asm volatile(ALTERNATIVE(
+                       "msr    daif, %0\n"
+                       "nop",
+                       "msr_s  " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
+                       "dsb    sy",
+                       ARM64_HAS_IRQ_PRIO_MASKING)
+               : "+r" (flags)
+               :
+               : "memory");
 }
 
 static inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-       return flags & PSR_I_BIT;
+       int res;
+
+       asm volatile(ALTERNATIVE(
+                       "and    %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+                       "nop",
+                       "cmp    %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
+                       "cset   %w0, ls",
+                       ARM64_HAS_IRQ_PRIO_MASKING)
+               : "=&r" (res)
+               : "r" ((int) flags)
+               : "memory");
+
+       return res;
 }
 #endif
 #endif
-- 
1.9.1

Reply via email to