Currently they are noops and just introduce unnecessary
changes in "Initial patch". Kill them.

Please, merge this to "Initial patch" on rebase.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 fs/exec.c          |    4 ++--
 include/linux/mm.h |    4 +---
 kernel/fork.c      |    8 ++++----
 mm/mmap.c          |   22 +++++++++++-----------
 4 files changed, 18 insertions(+), 20 deletions(-)

diff --git a/fs/exec.c b/fs/exec.c
index c8450118925..bbbe293ea24 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -257,7 +257,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
                                NULL, UB_SOFT))
                goto err_charge;
 
-       bprm->vma = vma = allocate_vma(mm, GFP_KERNEL | __GFP_ZERO);
+       bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma)
                goto err_alloc;
 
@@ -289,7 +289,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
 err:
        up_write(&mm->mmap_sem);
        bprm->vma = NULL;
-       free_vma(mm, vma);
+       kmem_cache_free(vm_area_cachep, vma);
 err_alloc:
        ub_memory_uncharge(mm, PAGE_SIZE, VM_STACK_FLAGS | mm->def_flags, NULL);
 err_charge:
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 897d7cfd226..57a63882fff 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -93,9 +93,7 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, 
void __user *,
  * mmap() functions).
  */
 
-extern struct kmem_cache *__vm_area_cachep;
-#define allocate_vma(mm, gfp_flags)    kmem_cache_alloc(__vm_area_cachep, 
gfp_flags)
-#define free_vma(mm, vma)              kmem_cache_free(__vm_area_cachep, vma)
+extern struct kmem_cache *vm_area_cachep;
 
 #ifndef CONFIG_MMU
 extern struct rb_root nommu_region_tree;
diff --git a/kernel/fork.c b/kernel/fork.c
index 3ad0977c698..78a47e0f471 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -235,7 +235,7 @@ struct kmem_cache *files_cachep;
 struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-struct kmem_cache *__vm_area_cachep;
+struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
@@ -458,7 +458,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct 
*oldmm)
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = allocate_vma(mm, GFP_KERNEL);
+               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (!tmp)
                        goto fail_nomem;
                *tmp = *mpnt;
@@ -538,7 +538,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct 
*oldmm)
 fail_nomem_anon_vma_fork:
        mpol_put(pol);
 fail_nomem_policy:
-       free_vma(mm, tmp);
+       kmem_cache_free(vm_area_cachep, tmp);
 fail_nomem:
        ub_memory_uncharge(mm, mpnt->vm_end - mpnt->vm_start,
                        mpnt->vm_flags & ~VM_LOCKED, mpnt->vm_file);
@@ -1923,7 +1923,7 @@ void __init proc_caches_init(void)
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
                        NULL);
-       __vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
+       vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();
        nsproxy_cache_init();
 }
diff --git a/mm/mmap.c b/mm/mmap.c
index 3e9eeea3522..b66e5f23b54 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -290,7 +290,7 @@ static struct vm_area_struct *remove_vma(struct 
vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        mpol_put(vma_policy(vma));
-       free_vma(vma->vm_mm, vma);
+       kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
 
@@ -1001,7 +1001,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned 
long start,
                        anon_vma_merge(vma, next);
                mm->map_count--;
                mpol_put(vma_policy(next));
-               free_vma(mm, next);
+               kmem_cache_free(vm_area_cachep, next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
                 * we must remove another next too. It would clutter
@@ -1742,7 +1742,7 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = allocate_vma(mm, GFP_KERNEL | __GFP_ZERO);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
@@ -1866,7 +1866,7 @@ unsigned long mmap_region(struct file *file, unsigned 
long addr,
        if (vm_flags & VM_DENYWRITE)
                allow_write_access(file);
 free_vma:
-       free_vma(mm, vma);
+       kmem_cache_free(vm_area_cachep, vma);
 unacct_error:
        if (ub_charged)
                ub_memory_uncharge(mm, len, vm_flags, file);
@@ -2679,7 +2679,7 @@ static int __split_vma(struct mm_struct * mm, struct 
vm_area_struct * vma,
                                        ~(huge_page_mask(hstate_vma(vma)))))
                return -EINVAL;
 
-       new = allocate_vma(mm, GFP_KERNEL);
+       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
        if (!new)
                goto out_err;
 
@@ -2730,7 +2730,7 @@ static int __split_vma(struct mm_struct * mm, struct 
vm_area_struct * vma,
  out_free_mpol:
        mpol_put(pol);
  out_free_vma:
-       free_vma(mm, new);
+       kmem_cache_free(vm_area_cachep, new);
  out_err:
        return err;
 }
@@ -3046,7 +3046,7 @@ static unsigned long do_brk_flags(unsigned long addr, 
unsigned long len,
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = allocate_vma(mm, GFP_KERNEL | __GFP_ZERO);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (!vma)
                goto fail_alloc;
 
@@ -3243,7 +3243,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct 
**vmap,
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
        } else {
-               new_vma = allocate_vma(mm, GFP_KERNEL);
+               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
                        new_vma->vm_start = addr;
@@ -3269,7 +3269,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct 
**vmap,
  out_free_mempol:
        mpol_put(pol);
  out_free_vma:
-       free_vma(mm, new_vma);
+       kmem_cache_free(vm_area_cachep, new_vma);
        return NULL;
 }
 
@@ -3345,7 +3345,7 @@ int install_special_mapping(struct mm_struct *mm,
        int ret;
        struct vm_area_struct *vma;
 
-       vma = allocate_vma(mm, GFP_KERNEL | __GFP_ZERO);
+       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
        if (unlikely(vma == NULL))
                return -ENOMEM;
 
@@ -3371,7 +3371,7 @@ int install_special_mapping(struct mm_struct *mm,
        return 0;
 
 out:
-       free_vma(mm, vma);
+       kmem_cache_free(vm_area_cachep, vma);
        return ret;
 }
 

_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to