This moves the 64s/hash context tracking from hash_page_mm() to
__do_hash_fault(), so it's no longer called by OCXL / SPU
accelerators, which was certainly the wrong thing to be doing,
because those callers are not low level interrupt handlers, so
should have entered a kernel context tracking already.

Then remain in kernel context for the duration of the fault,
rather than enter/exit for the hash fault then enter/exit for
the page fault, which is pointless.

Even still, calling exception_enter/exit in __do_hash_fault seems
questionable because that's touching per-cpu variables, tracing,
etc., which might have been interrupted by this hash fault or
themselves cause hash faults. But maybe I miss something because
hash_page_mm very deliberately calls trace_hash_fault too, for
example. So for now go with it, it's no worse than before, in this
regard.

Signed-off-by: Nicholas Piggin <npig...@gmail.com>
---
 arch/powerpc/include/asm/bug.h        |  1 +
 arch/powerpc/mm/book3s64/hash_utils.c |  7 ++++---
 arch/powerpc/mm/fault.c               | 30 +++++++++++++++++++++------
 3 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h
index c10ae0a9bbaf..d1635ffbb179 100644
--- a/arch/powerpc/include/asm/bug.h
+++ b/arch/powerpc/include/asm/bug.h
@@ -112,6 +112,7 @@
 
 struct pt_regs;
 long do_page_fault(struct pt_regs *);
+long hash__do_page_fault(struct pt_regs *);
 void bad_page_fault(struct pt_regs *, int);
 void __bad_page_fault(struct pt_regs *regs, int sig);
 void do_bad_page_fault_segv(struct pt_regs *regs);
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c 
b/arch/powerpc/mm/book3s64/hash_utils.c
index 824fcb2627e4..b7b78aeea3eb 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -1289,7 +1289,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
                 unsigned long flags)
 {
        bool is_thp;
-       enum ctx_state prev_state = exception_enter();
        pgd_t *pgdir;
        unsigned long vsid;
        pte_t *ptep;
@@ -1491,7 +1490,6 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea,
        DBG_LOW(" -> rc=%d\n", rc);
 
 bail:
-       exception_exit(prev_state);
        return rc;
 }
 EXPORT_SYMBOL_GPL(hash_page_mm);
@@ -1515,6 +1513,7 @@ EXPORT_SYMBOL_GPL(hash_page);
 
 DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
 {
+       enum ctx_state prev_state = exception_enter();
        unsigned long ea = regs->dar;
        unsigned long dsisr = regs->dsisr;
        unsigned long access = _PAGE_PRESENT | _PAGE_READ;
@@ -1563,6 +1562,8 @@ DEFINE_INTERRUPT_HANDLER_RET(__do_hash_fault)
                err = 0;
        }
 
+       exception_exit(prev_state);
+
        return err;
 }
 
@@ -1599,7 +1600,7 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_hash_fault)
        err = __do_hash_fault(regs);
        if (err) {
 page_fault:
-               err = do_page_fault(regs);
+               err = hash__do_page_fault(regs);
        }
 
        return err;
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eef7281507f9..620ff623b2c6 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -387,7 +387,7 @@ static void sanity_check_fault(bool is_write, bool is_user,
  * The return value is 0 if the fault was handled, or the signal
  * number if this is a kernel fault that can't be handled here.
  */
-static int __do_page_fault(struct pt_regs *regs, unsigned long address,
+static int ___do_page_fault(struct pt_regs *regs, unsigned long address,
                           unsigned long error_code)
 {
        struct vm_area_struct * vma;
@@ -537,15 +537,13 @@ static int __do_page_fault(struct pt_regs *regs, unsigned 
long address,
 
        return 0;
 }
-NOKPROBE_SYMBOL(__do_page_fault);
+NOKPROBE_SYMBOL(___do_page_fault);
 
-DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+static long __do_page_fault(struct pt_regs *regs)
 {
-       enum ctx_state prev_state;
        long err;
 
-       prev_state = exception_enter();
-       err = __do_page_fault(regs, regs->dar, regs->dsisr);
+       err = ___do_page_fault(regs, regs->dar, regs->dsisr);
        if (unlikely(err)) {
                const struct exception_table_entry *entry;
 
@@ -560,12 +558,32 @@ DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
                }
        }
 
+       return err;
+}
+NOKPROBE_SYMBOL(__do_page_fault);
+
+DEFINE_INTERRUPT_HANDLER_RET(do_page_fault)
+{
+       enum ctx_state prev_state = exception_enter();
+       long err;
+
+       err = __do_page_fault(regs);
+
        exception_exit(prev_state);
 
        return err;
 }
 NOKPROBE_SYMBOL(do_page_fault);
 
+#ifdef CONFIG_PPC_BOOK3S_64
+/* Same as do_page_fault but interrupt entry has already run in do_hash_fault 
*/
+long hash__do_page_fault(struct pt_regs *regs)
+{
+       return __do_page_fault(regs);
+}
+NOKPROBE_SYMBOL(hash__do_page_fault);
+#endif
+
 /*
  * bad_page_fault is called when we have a bad access from the kernel.
  * It is called from the DSI and ISI handlers in head.S and from some
-- 
2.23.0

Reply via email to