__find_linux_mm_pte return a page table entry pointer walking the
page table without holding locks. To make it safe against a THP
split and collapse, we disable interrupts around the lockless
page table walk. We need to keep the interrupts disabled as long
as we use the page table entry pointer.

Cc: Balbir Singh <bsinghar...@gmail.com>
Cc: Reza Arbab <ar...@linux.ibm.com>
Cc: Santosh Sivaraj <sant...@fossix.org>
Fixes: ba41e1e1ccb9 ("powerpc/mce: Hookup derror (load/store) UE errors")
Signed-off-by: Aneesh Kumar K.V <aneesh.ku...@linux.ibm.com>
---
 arch/powerpc/kernel/mce_power.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index 356e7b99f661..585c37dc1b18 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -28,6 +28,7 @@
 unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
 {
        pte_t *ptep;
+       unsigned long pfn;
        unsigned int shift;
        unsigned long flags;
        struct mm_struct *mm;
@@ -39,18 +40,21 @@ unsigned long addr_to_pfn(struct pt_regs *regs, unsigned 
long addr)
 
        local_irq_save(flags);
        ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
-       local_irq_restore(flags);
 
-       if (!ptep || pte_special(*ptep))
-               return ULONG_MAX;
+       if (!ptep || pte_special(*ptep)) {
+               pfn = ULONG_MAX;
+               goto err_out;
+       }
 
        if (shift > PAGE_SHIFT) {
                unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
 
-               return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
-       }
-
-       return pte_pfn(*ptep);
+               pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+       } else
+               pfn = pte_pfn(*ptep);
+err_out:
+       local_irq_restore(flags);
+       return pfn;
 }
 
 /* flush SLBs and reload */
-- 
2.21.0

Reply via email to