The commit is pushed to "branch-rh7-3.10.0-327.18.2.vz7.14.x-ovz" and will 
appear at https://src.openvz.org/scm/ovz/vzkernel.git
after rh7-3.10.0-327.18.2.vz7.14.9
------>
commit 3813b61da45aff9ce1324dd0201ae9e59b1e5a26
Author: Vladimir Davydov <vdavy...@virtuozzo.com>
Date:   Tue May 31 13:14:07 2016 +0400

    mm: Drop alloc_kmem_pages and friends
    
    Patchset description:
    Some kmemcg related fixes
    
    This patch set backports some changes from the following patch set
    submitted upstream:
    
      lkml.kernel.org/r/cover.1464079537.git.vdavy...@virtuozzo.com
    
    [hasn't been merged yet]
    
    namely:
     - move kmemcg charge/uncharge to generic allocator paths
     - fix pipe buffer stealing
     - avoid charging kernel page tables
     - account unix socket buffers to kmemcg (PSBM-34562)
    
    Vladimir Davydov (7):
      Drop alloc_kmem_pages and friends
      mm: memcontrol: drop memcg_kmem_commit_charge
      Move PageBalloon and PageBuddy helpers to page-flags.h
      mm: charge/uncharge kmemcg from generic page allocator paths
      af_unix: charge buffers to kmemcg
      pipe: uncharge page on ->steal
      arch: x86: don't charge kernel page tables to kmemcg
    
    =======================
    This patch description:
    
    These functions work exactly like alloc_pages and others except they
    will charge allocated page to current memcg if __GFP_ACCOUNT is passed.
    In the next patch I'm going to move charge/uncharge to generic
    allocation paths, so that these special helpers won't be necessary.
    
    Signed-off-by: Vladimir Davydov <vdavy...@virtuozzo.com>
---
 arch/x86/include/asm/pgalloc.h    | 14 ++++-----
 arch/x86/kernel/ldt.c             |  6 ++--
 arch/x86/mm/pgtable.c             | 19 +++++-------
 fs/pipe.c                         | 11 +++----
 include/linux/gfp.h               |  8 -----
 kernel/fork.c                     |  6 ++--
 mm/memcontrol.c                   |  1 -
 mm/page_alloc.c                   | 65 ---------------------------------------
 mm/slab_common.c                  |  2 +-
 mm/slub.c                         |  4 +--
 mm/vmalloc.c                      |  6 ++--
 net/netfilter/nf_conntrack_core.c |  6 ++--
 net/packet/af_packet.c            |  8 ++---
 13 files changed, 38 insertions(+), 118 deletions(-)

diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index f589758..58e4567 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, 
pte_t *pte)
 static inline void pte_free(struct mm_struct *mm, struct page *pte)
 {
        pgtable_page_dtor(pte);
-       __free_kmem_pages(pte, 0);
+       __free_page(pte);
 }
 
 extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
@@ -81,11 +81,11 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t 
*pmd,
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        struct page *page;
-       page = alloc_kmem_pages(GFP_KERNEL_ACCOUNT | __GFP_REPEAT | __GFP_ZERO, 
0);
+       page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_REPEAT | __GFP_ZERO, 0);
        if (!page)
                return NULL;
        if (!pgtable_pmd_page_ctor(page)) {
-               __free_kmem_pages(page, 0);
+               __free_page(page);
                return NULL;
        }
        return (pmd_t *)page_address(page);
@@ -95,7 +95,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 {
        BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
        pgtable_pmd_page_dtor(virt_to_page(pmd));
-       free_kmem_pages((unsigned long)pmd, 0);
+       free_page((unsigned long)pmd);
 }
 
 extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
@@ -125,14 +125,14 @@ static inline void pgd_populate(struct mm_struct *mm, 
pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pud_t *)__get_free_kmem_pages(GFP_KERNEL_ACCOUNT|__GFP_REPEAT|
-                                             __GFP_ZERO, 0);
+       return (pud_t *)__get_free_page(GFP_KERNEL_ACCOUNT|__GFP_REPEAT|
+                                       __GFP_ZERO);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 {
        BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
-       free_kmem_pages((unsigned long)pud, 0);
+       free_page((unsigned long)pud);
 }
 
 extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 4a6c8fe..942b0a4 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -44,7 +44,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int 
reload)
        if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
                newldt = vmalloc_account(mincount * LDT_ENTRY_SIZE);
        else
-               newldt = (void *)__get_free_kmem_pages(GFP_KERNEL_ACCOUNT, 0);
+               newldt = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
 
        if (!newldt)
                return -ENOMEM;
@@ -83,7 +83,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int 
reload)
                if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(oldldt);
                else
-                       __free_kmem_pages(virt_to_page(oldldt), 0);
+                       __free_page(virt_to_page(oldldt));
        }
        return 0;
 }
@@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm)
                if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
                        vfree(mm->context.ldt);
                else
-                       __free_kmem_pages(virt_to_page(mm->context.ldt), 0);
+                       __free_page(virt_to_page(mm->context.ldt));
                mm->context.size = 0;
        }
 }
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 02ec624..ba13ef8 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -25,11 +25,11 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long 
address)
 {
        struct page *pte;
 
-       pte = alloc_kmem_pages(__userpte_alloc_gfp, 0);
+       pte = alloc_pages(__userpte_alloc_gfp, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
-               __free_kmem_pages(pte, 0);
+               __free_page(pte);
                return NULL;
        }
        return pte;
@@ -56,7 +56,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
 {
        pgtable_page_dtor(pte);
        paravirt_release_pte(page_to_pfn(pte));
-       memcg_kmem_uncharge_pages(pte, 0);
        tlb_remove_page(tlb, pte);
 }
 
@@ -73,7 +72,6 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
        tlb->need_flush_all = 1;
 #endif
        pgtable_pmd_page_dtor(page);
-       memcg_kmem_uncharge_pages(page, 0);
        tlb_remove_page(tlb, page);
 }
 
@@ -83,7 +81,6 @@ void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
        struct page *page = virt_to_page(pud);
 
        paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
-       memcg_kmem_uncharge_pages(page, 0);
        tlb_remove_page(tlb, page);
 }
 #endif /* PAGETABLE_LEVELS > 3 */
@@ -203,7 +200,7 @@ static void free_pmds(pmd_t *pmds[])
        for(i = 0; i < PREALLOCATED_PMDS; i++)
                if (pmds[i]) {
                        pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
-                       free_kmem_pages((unsigned long)pmds[i], 0);
+                       free_page((unsigned long)pmds[i]);
                }
 }
 
@@ -213,11 +210,11 @@ static int preallocate_pmds(pmd_t *pmds[])
        bool failed = false;
 
        for(i = 0; i < PREALLOCATED_PMDS; i++) {
-               pmd_t *pmd = (pmd_t *)__get_free_kmem_pages(PGALLOC_GFP, 0);
+               pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
                if (!pmd)
                        failed = true;
                if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
-                       free_kmem_pages((unsigned long)pmd, 0);
+                       free_page((unsigned long)pmd);
                        pmd = NULL;
                        failed = true;
                }
@@ -284,7 +281,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
        pgd_t *pgd;
        pmd_t *pmds[PREALLOCATED_PMDS];
 
-       pgd = (pgd_t *)__get_free_kmem_pages(PGALLOC_GFP, 0);
+       pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
 
        if (pgd == NULL)
                goto out;
@@ -314,7 +311,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 out_free_pmds:
        free_pmds(pmds);
 out_free_pgd:
-       free_kmem_pages((unsigned long)pgd, 0);
+       free_page((unsigned long)pgd);
 out:
        return NULL;
 }
@@ -324,7 +321,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
        pgd_mop_up_pmds(mm, pgd);
        pgd_dtor(pgd);
        paravirt_pgd_free(mm, pgd);
-       free_kmem_pages((unsigned long)pgd, 0);
+       free_page((unsigned long)pgd);
 }
 
 /*
diff --git a/fs/pipe.c b/fs/pipe.c
index d038ff8..975fe3f 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -223,11 +223,8 @@ static void anon_pipe_buf_release(struct pipe_inode_info 
*pipe,
         * temporary page, let's keep track of it as a one-deep
         * allocation cache. (Otherwise just release our reference to it)
         */
-       if (page_count(page) == 1) {
-               if (!pipe->tmp_page)
-                       pipe->tmp_page = page;
-               else
-                       __free_kmem_pages(page, 0);
+       if (page_count(page) == 1 && !pipe->tmp_page) {
+               pipe->tmp_page = page;
        } else
                page_cache_release(page);
 }
@@ -590,7 +587,7 @@ redo1:
                        size_t remaining;
 
                        if (!page) {
-                               page = alloc_kmem_pages(GFP_HIGHUSER | 
__GFP_ACCOUNT, 0);
+                               page = alloc_pages(GFP_HIGHUSER | 
__GFP_ACCOUNT, 0);
                                if (unlikely(!page)) {
                                        ret = ret ? : -ENOMEM;
                                        break;
@@ -827,7 +824,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
                        buf->ops->release(pipe, buf);
        }
        if (pipe->tmp_page)
-               __free_kmem_pages(pipe->tmp_page, 0);
+               __free_page(pipe->tmp_page);
        kfree(pipe->bufs);
        kfree(pipe);
 }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index b454948..8452f50 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -354,12 +354,7 @@ extern struct page *alloc_pages_vma(gfp_t gfp_mask, int 
order,
 #define alloc_page_vma_node(gfp_mask, vma, addr, node)         \
        alloc_pages_vma(gfp_mask, 0, vma, addr, node)
 
-extern struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order);
-extern struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask,
-                                         unsigned int order);
-
 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
-extern unsigned long __get_free_kmem_pages(gfp_t gfp_mask, unsigned int order);
 extern unsigned long get_zeroed_page(gfp_t gfp_mask);
 
 void *alloc_pages_exact(size_t size, gfp_t gfp_mask);
@@ -378,9 +373,6 @@ extern void free_pages(unsigned long addr, unsigned int 
order);
 extern void free_hot_cold_page(struct page *page, bool cold);
 extern void free_hot_cold_page_list(struct list_head *list, bool cold);
 
-extern void __free_kmem_pages(struct page *page, unsigned int order);
-extern void free_kmem_pages(unsigned long addr, unsigned int order);
-
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr), 0)
 
diff --git a/kernel/fork.c b/kernel/fork.c
index 1c84066..2fcde98 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -154,15 +154,15 @@ void __weak arch_release_thread_info(struct thread_info 
*ti)
 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
                                                  int node)
 {
-       struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
-                                                 THREAD_SIZE_ORDER);
+       struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+                                            THREAD_SIZE_ORDER);
 
        return page ? page_address(page) : NULL;
 }
 
 static inline void free_thread_info(struct thread_info *ti)
 {
-       free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+       free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
 }
 # else
 static struct kmem_cache *thread_info_cache;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index af2c14b..50154da 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -3617,7 +3617,6 @@ struct mem_cgroup *__mem_cgroup_from_kmem(void *ptr)
                if (!is_root_cache(cachep))
                        memcg = cachep->memcg_params.memcg;
        } else {
-               /* page allocated by alloc_kmem_pages */
                pc = lookup_page_cgroup(page);
                if (PageCgroupUsed(pc))
                        memcg = pc->mem_cgroup;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1ae4ab3..9f02d80 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2828,19 +2828,6 @@ unsigned long __get_free_pages(gfp_t gfp_mask, unsigned 
int order)
 }
 EXPORT_SYMBOL(__get_free_pages);
 
-unsigned long __get_free_kmem_pages(gfp_t gfp_mask, unsigned int order)
-{
-       struct page *page;
-
-       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
-       page = alloc_kmem_pages(gfp_mask, order);
-       if (!page)
-               return 0;
-       return (unsigned long) page_address(page);
-}
-EXPORT_SYMBOL(__get_free_kmem_pages);
-
 unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
        return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
@@ -2869,58 +2856,6 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
-/*
- * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
- * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
- * equivalent to alloc_pages.
- *
- * It should be used when the caller would like to use kmalloc, but since the
- * allocation is large, it has to fall back to the page allocator.
- */
-struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
-{
-       struct page *page;
-       struct mem_cgroup *memcg = NULL;
-
-       if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
-               return NULL;
-       page = alloc_pages(gfp_mask, order);
-       memcg_kmem_commit_charge(page, memcg, order);
-       return page;
-}
-
-struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
-{
-       struct page *page;
-       struct mem_cgroup *memcg = NULL;
-
-       if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
-               return NULL;
-       page = alloc_pages_node(nid, gfp_mask, order);
-       memcg_kmem_commit_charge(page, memcg, order);
-       return page;
-}
-
-/*
- * __free_kmem_pages and free_kmem_pages will free pages allocated with
- * alloc_kmem_pages.
- */
-void __free_kmem_pages(struct page *page, unsigned int order)
-{
-       memcg_kmem_uncharge_pages(page, order);
-       __free_pages(page, order);
-}
-EXPORT_SYMBOL(__free_kmem_pages);
-
-void free_kmem_pages(unsigned long addr, unsigned int order)
-{
-       if (addr != 0) {
-               VM_BUG_ON(!virt_addr_valid((void *)addr));
-               __free_kmem_pages(virt_to_page((void *)addr), order);
-       }
-}
-EXPORT_SYMBOL(free_kmem_pages);
-
 static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
 {
        if (addr) {
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 18226a6..abad07d 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -784,7 +784,7 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int 
order)
        struct page *page;
 
        flags |= __GFP_COMP;
-       page = alloc_kmem_pages(flags, order);
+       page = alloc_pages(flags, order);
        ret = page ? page_address(page) : NULL;
        kmemleak_alloc(ret, size, 1, flags);
        kasan_kmalloc_large(ret, size);
diff --git a/mm/slub.c b/mm/slub.c
index c62e381..da1dbca 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3308,7 +3308,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, 
int node)
        void *ptr = NULL;
 
        flags |= __GFP_COMP | __GFP_NOTRACK;
-       page = alloc_kmem_pages_node(node, flags, get_order(size));
+       page = alloc_pages_node(node, flags, get_order(size));
        if (page)
                ptr = page_address(page);
 
@@ -3426,7 +3426,7 @@ void kfree(const void *x)
                BUG_ON(!PageCompound(page));
                kmemleak_free(x);
                kasan_kfree_large(x);
-               __free_kmem_pages(page, compound_order(page));
+               __free_pages(page, compound_order(page));
                return;
        }
        slab_free(page->slab_cache, page, object, _RET_IP_);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a8c2b28..b74bc34 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1496,7 +1496,7 @@ static void __vunmap(const void *addr, int 
deallocate_pages)
                        struct page *page = area->pages[i];
 
                        BUG_ON(!page);
-                       __free_kmem_pages(page, 0);
+                       __free_page(page);
                }
 
                if (area->flags & VM_VPAGES)
@@ -1629,9 +1629,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, 
gfp_t gfp_mask,
                gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
 
                if (node < 0)
-                       page = alloc_kmem_pages(tmp_mask, order);
+                       page = alloc_pages(tmp_mask, order);
                else
-                       page = alloc_kmem_pages_node(node, tmp_mask, order);
+                       page = alloc_pages_node(node, tmp_mask, order);
 
                if (unlikely(!page)) {
                        /* Successfully allocated i pages, free them in 
__vunmap() */
diff --git a/net/netfilter/nf_conntrack_core.c 
b/net/netfilter/nf_conntrack_core.c
index a59a108..0c94c3a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1469,8 +1469,8 @@ void nf_ct_free_hashtable(void *hash, unsigned int size)
        if (is_vmalloc_addr(hash))
                vfree(hash);
        else
-               free_kmem_pages((unsigned long)hash,
-                               get_order(sizeof(struct hlist_head) * size));
+               free_pages((unsigned long)hash,
+                          get_order(sizeof(struct hlist_head) * size));
 }
 EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
 
@@ -1597,7 +1597,7 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int 
nulls)
        BUILD_BUG_ON(sizeof(struct hlist_nulls_head) != sizeof(struct 
hlist_head));
        nr_slots = *sizep = roundup(*sizep, PAGE_SIZE / sizeof(struct 
hlist_nulls_head));
        sz = nr_slots * sizeof(struct hlist_nulls_head);
-       hash = (void *)__get_free_kmem_pages(GFP_KERNEL_ACCOUNT | __GFP_NOWARN 
| __GFP_ZERO,
+       hash = (void *)__get_free_pages(GFP_KERNEL_ACCOUNT | __GFP_NOWARN | 
__GFP_ZERO,
                                        get_order(sz));
        if (!hash) {
                printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 52d0d42..ecb5464 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3701,8 +3701,8 @@ static void free_pg_vec(struct pgv *pg_vec, unsigned int 
order,
                        if (is_vmalloc_addr(pg_vec[i].buffer))
                                vfree(pg_vec[i].buffer);
                        else
-                               free_kmem_pages((unsigned long)pg_vec[i].buffer,
-                                               order);
+                               free_pages((unsigned long)pg_vec[i].buffer,
+                                          order);
                        pg_vec[i].buffer = NULL;
                }
        }
@@ -3715,7 +3715,7 @@ static char *alloc_one_pg_vec_page(unsigned long order)
        gfp_t gfp_flags = GFP_KERNEL_ACCOUNT | __GFP_COMP |
                          __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-       buffer = (char *) __get_free_kmem_pages(gfp_flags, order);
+       buffer = (char *) __get_free_pages(gfp_flags, order);
 
        if (buffer)
                return buffer;
@@ -3732,7 +3732,7 @@ static char *alloc_one_pg_vec_page(unsigned long order)
         * vmalloc failed, lets dig into swap here
         */
        gfp_flags &= ~__GFP_NORETRY;
-       buffer = (char *)__get_free_kmem_pages(gfp_flags, order);
+       buffer = (char *)__get_free_pages(gfp_flags, order);
        if (buffer)
                return buffer;
 
_______________________________________________
Devel mailing list
Devel@openvz.org
https://lists.openvz.org/mailman/listinfo/devel

Reply via email to