Objective
---------
The current implementation of struct vmap_area wasted space.

After applying this commit, sizeof(struct vmap_area) has been
reduced from 11 words to 8 words.

Description
-----------
1) Pack "subtree_max_size", "vm" and "purge_list".
This is no problem because
    A) "subtree_max_size" is only used when vmap_area is in
       "free" tree
    B) "vm" is only used when vmap_area is in "busy" tree
    C) "purge_list" is only used when vmap_area is in
       vmap_purge_list

2) Eliminate "flags".
Since only one flag VM_VM_AREA is being used, and the same
thing can be done by judging whether "vm" is NULL, then the
"flags" can be eliminated.

Signed-off-by: Pengfei Li <lpf.vec...@gmail.com>
Suggested-by: Uladzislau Rezki (Sony) <ure...@gmail.com>
---
 include/linux/vmalloc.h | 20 +++++++++++++-------
 mm/vmalloc.c            | 24 ++++++++++--------------
 2 files changed, 23 insertions(+), 21 deletions(-)

diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9b21d0047710..a1334bd18ef1 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -51,15 +51,21 @@ struct vmap_area {
        unsigned long va_start;
        unsigned long va_end;
 
-       /*
-        * Largest available free size in subtree.
-        */
-       unsigned long subtree_max_size;
-       unsigned long flags;
        struct rb_node rb_node;         /* address sorted rbtree */
        struct list_head list;          /* address sorted list */
-       struct llist_node purge_list;    /* "lazy purge" list */
-       struct vm_struct *vm;
+
+       /*
+        * The following three variables can be packed, because
+        * a vmap_area object is always one of the three states:
+        *    1) in "free" tree (root is vmap_area_root)
+        *    2) in "busy" tree (root is free_vmap_area_root)
+        *    3) in purge list  (head is vmap_purge_list)
+        */
+       union {
+               unsigned long subtree_max_size; /* in "free" tree */
+               struct vm_struct *vm;           /* in "busy" tree */
+               struct llist_node purge_list;   /* in purge list */
+       };
 };
 
 /*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 71d8040a8a0b..2f7edc0466e7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -329,7 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
 #define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 #define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 
-#define VM_VM_AREA     0x04
 
 static DEFINE_SPINLOCK(vmap_area_lock);
 /* Export for kexec only */
@@ -1115,7 +1114,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long 
size,
 
        va->va_start = addr;
        va->va_end = addr + size;
-       va->flags = 0;
+       va->vm = NULL;
        insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
 
        spin_unlock(&vmap_area_lock);
@@ -1922,7 +1921,6 @@ void __init vmalloc_init(void)
                if (WARN_ON_ONCE(!va))
                        continue;
 
-               va->flags = VM_VM_AREA;
                va->va_start = (unsigned long)tmp->addr;
                va->va_end = va->va_start + tmp->size;
                va->vm = tmp;
@@ -2020,7 +2018,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct 
vmap_area *va,
        vm->size = va->va_end - va->va_start;
        vm->caller = caller;
        va->vm = vm;
-       va->flags |= VM_VM_AREA;
        spin_unlock(&vmap_area_lock);
 }
 
@@ -2125,10 +2122,10 @@ struct vm_struct *find_vm_area(const void *addr)
        struct vmap_area *va;
 
        va = find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA)
-               return va->vm;
+       if (!va)
+               return NULL;
 
-       return NULL;
+       return va->vm;
 }
 
 /**
@@ -2149,11 +2146,10 @@ struct vm_struct *remove_vm_area(const void *addr)
 
        spin_lock(&vmap_area_lock);
        va = __find_vmap_area((unsigned long)addr);
-       if (va && va->flags & VM_VM_AREA) {
+       if (va && va->vm) {
                struct vm_struct *vm = va->vm;
 
                va->vm = NULL;
-               va->flags &= ~VM_VM_AREA;
                spin_unlock(&vmap_area_lock);
 
                kasan_free_shadow(vm);
@@ -2856,7 +2852,7 @@ long vread(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -2936,7 +2932,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
                if (!count)
                        break;
 
-               if (!(va->flags & VM_VM_AREA))
+               if (!va->vm)
                        continue;
 
                vm = va->vm;
@@ -3466,10 +3462,10 @@ static int s_show(struct seq_file *m, void *p)
        va = list_entry(p, struct vmap_area, list);
 
        /*
-        * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
-        * behalf of vmap area is being tear down or vm_map_ram allocation.
+        * s_show can encounter race with remove_vm_area, !vm on behalf
+        * of vmap area is being tear down or vm_map_ram allocation.
         */
-       if (!(va->flags & VM_VM_AREA)) {
+       if (!va->vm) {
                seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
                        (void *)va->va_start, (void *)va->va_end,
                        va->va_end - va->va_start);
-- 
2.21.0

Reply via email to