To support the arm64 pseudo nmi, function arch_local_irq_save() and
arch_local_irq_restore() now operate ICC_PMR_EL1 insead of daif.
But i found the logic of the save and restore may be suspicious:

arch_local_irq_save():
daif.i_on       pmr_on  ->      flag.i_on
1                       0               |       0
1                       1               |       1
0                       1               |       0               --[1]
0                       0               |       0

arch_local_irq_restore():
daif.i_on       pmr_on  <-      flag.i_on
x                       0               |       0
x                       1               |       1

As we see, the condintion [1] will never be restored honestly. When doing
function_graph trace at gic_handle_irq(), calling local_irq_save() and
local_irq_restore() in trace_graph_entry() will just go into this
condintion. Therefore the irq can never be processed and leading to hang.

In this patch, we do the save & restore exactly, and make sure the
arch_irqs_disabled_flags() returns correctly.

Fix: commit 4a503217ce37 ("arm64: irqflags: Use ICC_PMR_EL1 for interrupt 
masking")
Signed-off-by: Wei Li <liwei...@huawei.com>
---
 arch/arm64/include/asm/irqflags.h | 17 +++++++++--------
 1 file changed, 9 insertions(+), 8 deletions(-)

diff --git a/arch/arm64/include/asm/irqflags.h 
b/arch/arm64/include/asm/irqflags.h
index 43d8366c1e87..7bc6a79f31c4 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -76,8 +76,7 @@ static inline unsigned long arch_local_save_flags(void)
         * The asm is logically equivalent to:
         *
         * if (system_uses_irq_prio_masking())
-        *      flags = (daif_bits & PSR_I_BIT) ?
-        *                      GIC_PRIO_IRQOFF :
+        *      flags = (daif_bits << 32) |
         *                      read_sysreg_s(SYS_ICC_PMR_EL1);
         * else
         *      flags = daif_bits;
@@ -87,11 +86,11 @@ static inline unsigned long arch_local_save_flags(void)
                        "nop\n"
                        "nop",
                        "mrs_s  %0, " __stringify(SYS_ICC_PMR_EL1) "\n"
-                       "ands   %1, %1, " __stringify(PSR_I_BIT) "\n"
-                       "csel   %0, %0, %2, eq",
+                       "lsl    %1, %1, #32\n"
+                       "orr    %0, %0, %1",
                        ARM64_HAS_IRQ_PRIO_MASKING)
                : "=&r" (flags), "+r" (daif_bits)
-               : "r" ((unsigned long) GIC_PRIO_IRQOFF)
+               :
                : "memory");
 
        return flags;
@@ -119,8 +118,8 @@ static inline void arch_local_irq_restore(unsigned long 
flags)
                        "msr_s  " __stringify(SYS_ICC_PMR_EL1) ", %0\n"
                        "dsb    sy",
                        ARM64_HAS_IRQ_PRIO_MASKING)
-               : "+r" (flags)
                :
+               : "r" ((int)flags)
                : "memory");
 }
 
@@ -130,12 +129,14 @@ static inline int arch_irqs_disabled_flags(unsigned long 
flags)
 
        asm volatile(ALTERNATIVE(
                        "and    %w0, %w1, #" __stringify(PSR_I_BIT) "\n"
+                       "nop\n"
                        "nop",
+                       "and    %w0, %w2, #" __stringify(PSR_I_BIT) "\n"
                        "cmp    %w1, #" __stringify(GIC_PRIO_IRQOFF) "\n"
-                       "cset   %w0, ls",
+                       "cinc   %w0, %w0, ls",
                        ARM64_HAS_IRQ_PRIO_MASKING)
                : "=&r" (res)
-               : "r" ((int) flags)
+               : "r" ((int) flags), "r" (flags >> 32)
                : "memory");
 
        return res;
-- 
2.17.1

Reply via email to