__vunmap() calls find_vm_area() twice without an obvious reason:
first directly to get the area pointer, second indirectly by calling
vm_remove_mappings()->remove_vm_area(), which is again searching
for the area.

To remove this redundancy, let's split remove_vm_area() into
__remove_vm_area(struct vmap_area *), which performs the actual area
removal, and remove_vm_area(const void *addr) wrapper, which can
be used everywhere, where it has been used before. Let's pass
a pointer to the vm_area instead of vm_struct to vm_remove_mappings(),
so it can pass it to __remove_vm_area() and avoid the redundant area
lookup.

On my test setup, I've got 5-10% speed up on vfree()'ing 1000000
of 4-pages vmalloc blocks.

Perf report before:
  29.44%  cat      [kernel.kallsyms]  [k] free_unref_page
  11.88%  cat      [kernel.kallsyms]  [k] find_vmap_area
   9.28%  cat      [kernel.kallsyms]  [k] __free_pages
   7.44%  cat      [kernel.kallsyms]  [k] __slab_free
   7.28%  cat      [kernel.kallsyms]  [k] vunmap_page_range
   4.56%  cat      [kernel.kallsyms]  [k] __vunmap
   3.64%  cat      [kernel.kallsyms]  [k] __purge_vmap_area_lazy
   3.04%  cat      [kernel.kallsyms]  [k] __free_vmap_area

Perf report after:
  32.41%  cat      [kernel.kallsyms]  [k] free_unref_page
   7.79%  cat      [kernel.kallsyms]  [k] find_vmap_area
   7.40%  cat      [kernel.kallsyms]  [k] __slab_free
   7.31%  cat      [kernel.kallsyms]  [k] vunmap_page_range
   6.84%  cat      [kernel.kallsyms]  [k] __free_pages
   6.01%  cat      [kernel.kallsyms]  [k] __vunmap
   3.98%  cat      [kernel.kallsyms]  [k] smp_call_function_single
   3.81%  cat      [kernel.kallsyms]  [k] __purge_vmap_area_lazy
   2.77%  cat      [kernel.kallsyms]  [k] __free_vmap_area

Signed-off-by: Roman Gushchin <g...@fb.com>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Matthew Wilcox <wi...@infradead.org>
Cc: Vlastimil Babka <vba...@suse.cz>
---
 mm/vmalloc.c | 52 +++++++++++++++++++++++++++++-----------------------
 1 file changed, 29 insertions(+), 23 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c42872ed82ac..8d4907865614 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2075,6 +2075,22 @@ struct vm_struct *find_vm_area(const void *addr)
        return NULL;
 }
 
+static struct vm_struct *__remove_vm_area(struct vmap_area *va)
+{
+       struct vm_struct *vm = va->vm;
+
+       spin_lock(&vmap_area_lock);
+       va->vm = NULL;
+       va->flags &= ~VM_VM_AREA;
+       va->flags |= VM_LAZY_FREE;
+       spin_unlock(&vmap_area_lock);
+
+       kasan_free_shadow(vm);
+       free_unmap_vmap_area(va);
+
+       return vm;
+}
+
 /**
  * remove_vm_area - find and remove a continuous kernel virtual area
  * @addr:          base address
@@ -2087,26 +2103,14 @@ struct vm_struct *find_vm_area(const void *addr)
  */
 struct vm_struct *remove_vm_area(const void *addr)
 {
+       struct vm_struct *vm = NULL;
        struct vmap_area *va;
 
-       might_sleep();
-
        va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA) {
-               struct vm_struct *vm = va->vm;
-
-               spin_lock(&vmap_area_lock);
-               va->vm = NULL;
-               va->flags &= ~VM_VM_AREA;
-               va->flags |= VM_LAZY_FREE;
-               spin_unlock(&vmap_area_lock);
-
-               kasan_free_shadow(vm);
-               free_unmap_vmap_area(va);
+       if (va && va->flags & VM_VM_AREA)
+               vm = __remove_vm_area(va);
 
-               return vm;
-       }
-       return NULL;
+       return vm;
 }
 
 static inline void set_area_direct_map(const struct vm_struct *area,
@@ -2119,9 +2123,10 @@ static inline void set_area_direct_map(const struct 
vm_struct *area,
                        set_direct_map(area->pages[i]);
 }
 
-/* Handle removing and resetting vm mappings related to the vm_struct. */
-static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
+/* Handle removing and resetting vm mappings related to the va->vm vm_struct. 
*/
+static void vm_remove_mappings(struct vmap_area *va, int deallocate_pages)
 {
+       struct vm_struct *area = va->vm;
        unsigned long addr = (unsigned long)area->addr;
        unsigned long start = ULONG_MAX, end = 0;
        int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
@@ -2138,7 +2143,7 @@ static void vm_remove_mappings(struct vm_struct *area, 
int deallocate_pages)
                set_memory_rw(addr, area->nr_pages);
        }
 
-       remove_vm_area(area->addr);
+       __remove_vm_area(va);
 
        /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
        if (!flush_reset)
@@ -2178,6 +2183,7 @@ static void vm_remove_mappings(struct vm_struct *area, 
int deallocate_pages)
 static void __vunmap(const void *addr, int deallocate_pages)
 {
        struct vm_struct *area;
+       struct vmap_area *va;
 
        if (!addr)
                return;
@@ -2186,17 +2192,18 @@ static void __vunmap(const void *addr, int 
deallocate_pages)
                        addr))
                return;
 
-       area = find_vm_area(addr);
-       if (unlikely(!area)) {
+       va = find_vmap_area((unsigned long)addr);
+       if (unlikely(!va || !(va->flags & VM_VM_AREA))) {
                WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
                                addr);
                return;
        }
 
+       area = va->vm;
        debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
        debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
 
-       vm_remove_mappings(area, deallocate_pages);
+       vm_remove_mappings(va, deallocate_pages);
 
        if (deallocate_pages) {
                int i;
@@ -2212,7 +2219,6 @@ static void __vunmap(const void *addr, int 
deallocate_pages)
        }
 
        kfree(area);
-       return;
 }
 
 static inline void __vfree_deferred(const void *addr)
-- 
2.20.1

Reply via email to