From: Arnd Bergmann <[email protected]>

Arm has stopped setting ARCH_NEEDS_KMAP_HIGH_GET, so this
is handled using the same wrappers across all architectures
now, which leaves room for simplification.

Replace lock_kmap()/unlock_kmap() with open-coded spinlocks
and drop the now empty arch_kmap_local_high_get() and
kmap_high_unmap_local() helpers.

Signed-off-by: Arnd Bergmann <[email protected]>
---
 mm/highmem.c | 100 ++++++---------------------------------------------
 1 file changed, 10 insertions(+), 90 deletions(-)

diff --git a/mm/highmem.c b/mm/highmem.c
index b5c8e4c2d5d4..bdeec56471c9 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -143,25 +143,6 @@ static  __cacheline_aligned_in_smp 
DEFINE_SPINLOCK(kmap_lock);
 
 pte_t *pkmap_page_table;
 
-/*
- * Most architectures have no use for kmap_high_get(), so let's abstract
- * the disabling of IRQ out of the locking in that case to save on a
- * potential useless overhead.
- */
-#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-#define lock_kmap()             spin_lock_irq(&kmap_lock)
-#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
-#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
-#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
-#else
-#define lock_kmap()             spin_lock(&kmap_lock)
-#define unlock_kmap()           spin_unlock(&kmap_lock)
-#define lock_kmap_any(flags)    \
-               do { spin_lock(&kmap_lock); (void)(flags); } while (0)
-#define unlock_kmap_any(flags)  \
-               do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
-#endif
-
 struct page *__kmap_to_page(void *vaddr)
 {
        unsigned long base = (unsigned long) vaddr & PAGE_MASK;
@@ -237,9 +218,9 @@ static void flush_all_zero_pkmaps(void)
 
 void __kmap_flush_unused(void)
 {
-       lock_kmap();
+       spin_lock(&kmap_lock);
        flush_all_zero_pkmaps();
-       unlock_kmap();
+       spin_unlock(&kmap_lock);
 }
 
 static inline unsigned long map_new_virtual(struct page *page)
@@ -273,10 +254,10 @@ static inline unsigned long map_new_virtual(struct page 
*page)
 
                        __set_current_state(TASK_UNINTERRUPTIBLE);
                        add_wait_queue(pkmap_map_wait, &wait);
-                       unlock_kmap();
+                       spin_unlock(&kmap_lock);
                        schedule();
                        remove_wait_queue(pkmap_map_wait, &wait);
-                       lock_kmap();
+                       spin_lock(&kmap_lock);
 
                        /* Somebody else might have mapped it while we slept */
                        if (page_address(page))
@@ -312,60 +293,32 @@ void *kmap_high(struct page *page)
         * For highmem pages, we can't trust "virtual" until
         * after we have the lock.
         */
-       lock_kmap();
+       spin_lock(&kmap_lock);
        vaddr = (unsigned long)page_address(page);
        if (!vaddr)
                vaddr = map_new_virtual(page);
        pkmap_count[PKMAP_NR(vaddr)]++;
        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
-       unlock_kmap();
+       spin_unlock(&kmap_lock);
        return (void *) vaddr;
 }
 EXPORT_SYMBOL(kmap_high);
 
-#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-/**
- * kmap_high_get - pin a highmem page into memory
- * @page: &struct page to pin
- *
- * Returns the page's current virtual memory address, or NULL if no mapping
- * exists.  If and only if a non null address is returned then a
- * matching call to kunmap_high() is necessary.
- *
- * This can be called from any context.
- */
-void *kmap_high_get(const struct page *page)
-{
-       unsigned long vaddr, flags;
-
-       lock_kmap_any(flags);
-       vaddr = (unsigned long)page_address(page);
-       if (vaddr) {
-               BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
-               pkmap_count[PKMAP_NR(vaddr)]++;
-       }
-       unlock_kmap_any(flags);
-       return (void *) vaddr;
-}
-#endif
-
 /**
  * kunmap_high - unmap a highmem page into memory
  * @page: &struct page to unmap
  *
- * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
- * only from user context.
+ * This may be called only from user context.
  */
 void kunmap_high(const struct page *page)
 {
        unsigned long vaddr;
        unsigned long nr;
-       unsigned long flags;
        int need_wakeup;
        unsigned int color = get_pkmap_color(page);
        wait_queue_head_t *pkmap_map_wait;
 
-       lock_kmap_any(flags);
+       spin_lock(&kmap_lock);
        vaddr = (unsigned long)page_address(page);
        BUG_ON(!vaddr);
        nr = PKMAP_NR(vaddr);
@@ -392,7 +345,7 @@ void kunmap_high(const struct page *page)
                pkmap_map_wait = get_pkmap_wait_queue_head(color);
                need_wakeup = waitqueue_active(pkmap_map_wait);
        }
-       unlock_kmap_any(flags);
+       spin_unlock(&kmap_lock);
 
        /* do wake-up, if needed, race-free outside of the spin lock */
        if (need_wakeup)
@@ -507,30 +460,11 @@ static inline void kmap_local_idx_pop(void)
 #define arch_kmap_local_unmap_idx(idx, vaddr)  kmap_local_calc_idx(idx)
 #endif
 
-#ifndef arch_kmap_local_high_get
-static inline void *arch_kmap_local_high_get(const struct page *page)
-{
-       return NULL;
-}
-#endif
-
 #ifndef arch_kmap_local_set_pte
 #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
        set_pte_at(mm, vaddr, ptep, ptev)
 #endif
 
-/* Unmap a local mapping which was obtained by kmap_high_get() */
-static inline bool kmap_high_unmap_local(unsigned long vaddr)
-{
-#ifdef ARCH_NEEDS_KMAP_HIGH_GET
-       if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
-               
kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)])));
-               return true;
-       }
-#endif
-       return false;
-}
-
 static pte_t *__kmap_pte;
 
 static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
@@ -574,8 +508,6 @@ EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
 
 void *__kmap_local_page_prot(const struct page *page, pgprot_t prot)
 {
-       void *kmap;
-
        /*
         * To broaden the usage of the actual kmap_local() machinery always map
         * pages when debugging is enabled and the architecture has no problems
@@ -584,11 +516,6 @@ void *__kmap_local_page_prot(const struct page *page, 
pgprot_t prot)
        if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && 
!PageHighMem(page))
                return page_address(page);
 
-       /* Try kmap_high_get() if architecture has it enabled */
-       kmap = arch_kmap_local_high_get(page);
-       if (kmap)
-               return kmap;
-
        return __kmap_local_pfn_prot(page_to_pfn(page), prot);
 }
 EXPORT_SYMBOL(__kmap_local_page_prot);
@@ -606,14 +533,7 @@ void kunmap_local_indexed(const void *vaddr)
                        WARN_ON_ONCE(1);
                        return;
                }
-               /*
-                * Handle mappings which were obtained by kmap_high_get()
-                * first as the virtual address of such mappings is below
-                * PAGE_OFFSET. Warn for all other addresses which are in
-                * the user space part of the virtual address space.
-                */
-               if (!kmap_high_unmap_local(addr))
-                       WARN_ON_ONCE(addr < PAGE_OFFSET);
+               WARN_ON_ONCE(addr < PAGE_OFFSET);
                return;
        }
 
-- 
2.39.5


Reply via email to