To keep balance in future, __update_tlb() remember to pte_unmap() after
pte_offset_map().  This is an odd case, since the caller has already done
pte_offset_map_lock(), then mips forgets the address and recalculates it;
but my two naive attempts to clean that up did more harm than good.

Tested-by: Nathan Chancellor <nat...@kernel.org>
Signed-off-by: Hugh Dickins <hu...@google.com>
---
Andrew, please replace my mips patch, and its build warning fix patch,
in mm-unstable by this less ambitious but working replacement - thanks.

 arch/mips/mm/tlb-r4k.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 1b939abbe4ca..93c2d695588a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -297,7 +297,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned 
long address, pte_t pte)
        p4d_t *p4dp;
        pud_t *pudp;
        pmd_t *pmdp;
-       pte_t *ptep;
+       pte_t *ptep, *ptemap = NULL;
        int idx, pid;
 
        /*
@@ -344,7 +344,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned 
long address, pte_t pte)
        } else
 #endif
        {
-               ptep = pte_offset_map(pmdp, address);
+               ptemap = ptep = pte_offset_map(pmdp, address);
+               /*
+                * update_mmu_cache() is called between pte_offset_map_lock()
+                * and pte_unmap_unlock(), so we can assume that ptep is not
+                * NULL here: and what should be done below if it were NULL?
+                */
 
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 #ifdef CONFIG_XPA
@@ -373,6 +378,9 @@ void __update_tlb(struct vm_area_struct * vma, unsigned 
long address, pte_t pte)
        tlbw_use_hazard();
        htw_start();
        flush_micro_tlb_vm(vma);
+
+       if (ptemap)
+               pte_unmap(ptemap);
        local_irq_restore(flags);
 }
 
-- 
2.35.3

Reply via email to