The existing patching alias page setup and teardown sections can be
simplified to make use of the new open_patch_window() abstraction.

This eliminates the _mm variants of the helpers, consumers no longer
need to check mm_patch_enabled(), and consumers no longer need to worry
about synchronization and flushing beyond the changes they make in the
patching window.

Signed-off-by: Benjamin Gray <bg...@linux.ibm.com>
---
 arch/powerpc/lib/code-patching.c | 180 +++----------------------------
 1 file changed, 16 insertions(+), 164 deletions(-)

diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index d1b812f84154..fd6f8576033a 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -66,40 +66,6 @@ static bool mm_patch_enabled(void)
        return IS_ENABLED(CONFIG_SMP) && radix_enabled();
 }
 
-/*
- * The following applies for Radix MMU. Hash MMU has different requirements,
- * and so is not supported.
- *
- * Changing mm requires context synchronising instructions on both sides of
- * the context switch, as well as a hwsync between the last instruction for
- * which the address of an associated storage access was translated using
- * the current context.
- *
- * switch_mm_irqs_off() performs an isync after the context switch. It is
- * the responsibility of the caller to perform the CSI and hwsync before
- * starting/stopping the temp mm.
- */
-static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
-{
-       struct mm_struct *orig_mm = current->active_mm;
-
-       lockdep_assert_irqs_disabled();
-       switch_mm_irqs_off(orig_mm, temp_mm, current);
-
-       WARN_ON(!mm_is_thread_local(temp_mm));
-
-       suspend_breakpoints();
-       return orig_mm;
-}
-
-static void stop_using_temp_mm(struct mm_struct *temp_mm,
-                              struct mm_struct *orig_mm)
-{
-       lockdep_assert_irqs_disabled();
-       switch_mm_irqs_off(temp_mm, orig_mm, current);
-       restore_breakpoints();
-}
-
 static int text_area_cpu_up(unsigned int cpu)
 {
        struct vm_struct *area;
@@ -389,73 +355,20 @@ static void close_patch_window(struct patch_window *ctx)
        pte_unmap_unlock(ctx->ptep, ctx->ptl);
 }
 
-static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr)
-{
-       int err;
-       u32 *patch_addr;
-       unsigned long text_poke_addr;
-       pte_t *pte;
-       unsigned long pfn = get_patch_pfn(addr);
-       struct mm_struct *patching_mm;
-       struct mm_struct *orig_mm;
-       spinlock_t *ptl;
-
-       patching_mm = __this_cpu_read(cpu_patching_context.mm);
-       text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
-       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
-
-       pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
-       if (!pte)
-               return -ENOMEM;
-
-       __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, 
PAGE_KERNEL), 0);
-
-       /* order PTE update before use, also serves as the hwsync */
-       asm volatile("ptesync": : :"memory");
-
-       /* order context switch after arbitrary prior code */
-       isync();
-
-       orig_mm = start_using_temp_mm(patching_mm);
-
-       err = __patch_instruction(addr, instr, patch_addr);
-
-       /* context synchronisation performed by __patch_instruction (isync or 
exception) */
-       stop_using_temp_mm(patching_mm, orig_mm);
-
-       pte_clear(patching_mm, text_poke_addr, pte);
-       /*
-        * ptesync to order PTE update before TLB invalidation done
-        * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
-        */
-       local_flush_tlb_page_psize(patching_mm, text_poke_addr, 
mmu_virtual_psize);
-
-       pte_unmap_unlock(pte, ptl);
-
-       return err;
-}
-
 static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
 {
-       int err;
+       struct patch_window ctx;
        u32 *patch_addr;
-       unsigned long text_poke_addr;
-       pte_t *pte;
-       unsigned long pfn = get_patch_pfn(addr);
-
-       text_poke_addr = (unsigned 
long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
-       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+       int err;
 
-       pte = __this_cpu_read(cpu_patching_context.pte);
-       __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 
0);
-       /* See ptesync comment in radix__set_pte_at() */
-       if (radix_enabled())
-               asm volatile("ptesync": : :"memory");
+       err = open_patch_window(addr, &ctx);
+       if (err)
+               return err;
 
+       patch_addr = (u32 *)(ctx.text_poke_addr + offset_in_page(addr));
        err = __patch_instruction(addr, instr, patch_addr);
 
-       pte_clear(&init_mm, text_poke_addr, pte);
-       flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+       close_patch_window(&ctx);
 
        return err;
 }
@@ -475,10 +388,7 @@ int patch_instruction(u32 *addr, ppc_inst_t instr)
                return raw_patch_instruction(addr, instr);
 
        local_irq_save(flags);
-       if (mm_patch_enabled())
-               err = __do_patch_instruction_mm(addr, instr);
-       else
-               err = __do_patch_instruction(addr, instr);
+       err = __do_patch_instruction(addr, instr);
        local_irq_restore(flags);
 
        return err;
@@ -545,80 +455,25 @@ static int __patch_instructions(u32 *patch_addr, u32 
*code, size_t len, bool rep
        return err;
 }
 
-/*
- * A page is mapped and instructions that fit the page are patched.
- * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
- */
-static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool 
repeat_instr)
-{
-       struct mm_struct *patching_mm, *orig_mm;
-       unsigned long pfn = get_patch_pfn(addr);
-       unsigned long text_poke_addr;
-       spinlock_t *ptl;
-       u32 *patch_addr;
-       pte_t *pte;
-       int err;
-
-       patching_mm = __this_cpu_read(cpu_patching_context.mm);
-       text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
-       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
-
-       pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
-       if (!pte)
-               return -ENOMEM;
-
-       __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, 
PAGE_KERNEL), 0);
-
-       /* order PTE update before use, also serves as the hwsync */
-       asm volatile("ptesync" ::: "memory");
-
-       /* order context switch after arbitrary prior code */
-       isync();
-
-       orig_mm = start_using_temp_mm(patching_mm);
-
-       err = __patch_instructions(patch_addr, code, len, repeat_instr);
-
-       /* context synchronisation performed by __patch_instructions */
-       stop_using_temp_mm(patching_mm, orig_mm);
-
-       pte_clear(patching_mm, text_poke_addr, pte);
-       /*
-        * ptesync to order PTE update before TLB invalidation done
-        * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
-        */
-       local_flush_tlb_page_psize(patching_mm, text_poke_addr, 
mmu_virtual_psize);
-
-       pte_unmap_unlock(pte, ptl);
-
-       return err;
-}
-
 /*
  * A page is mapped and instructions that fit the page are patched.
  * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
  */
 static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool 
repeat_instr)
 {
-       unsigned long pfn = get_patch_pfn(addr);
-       unsigned long text_poke_addr;
+       struct patch_window ctx;
        u32 *patch_addr;
-       pte_t *pte;
        int err;
 
-       text_poke_addr = (unsigned 
long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
-       patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
-
-       pte = __this_cpu_read(cpu_patching_context.pte);
-       __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 
0);
-       /* See ptesync comment in radix__set_pte_at() */
-       if (radix_enabled())
-               asm volatile("ptesync" ::: "memory");
+       err = open_patch_window(addr, &ctx);
+       if (err)
+               return err;
 
+       patch_addr = (u32 *)(ctx.text_poke_addr + offset_in_page(addr));
        err = __patch_instructions(patch_addr, code, len, repeat_instr);
 
-       pte_clear(&init_mm, text_poke_addr, pte);
-       flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
+       /* context synchronisation performed by __patch_instructions */
+       close_patch_window(&ctx);
 
        return err;
 }
@@ -639,10 +494,7 @@ int patch_instructions(u32 *addr, u32 *code, size_t len, 
bool repeat_instr)
                plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
 
                local_irq_save(flags);
-               if (mm_patch_enabled())
-                       err = __do_patch_instructions_mm(addr, code, plen, 
repeat_instr);
-               else
-                       err = __do_patch_instructions(addr, code, plen, 
repeat_instr);
+               err = __do_patch_instructions(addr, code, plen, repeat_instr);
                local_irq_restore(flags);
                if (err)
                        return err;
-- 
2.44.0

Reply via email to