charge_slab_page() and uncharge_slab_page() are not related anymore
to memcg charging and uncharging. In order to make their names
less confusing, let's rename them to account_slab_page() and
unaccount_slab_page() respectively.

Signed-off-by: Roman Gushchin <g...@fb.com>
---
 mm/slab.c | 4 ++--
 mm/slab.h | 8 ++++----
 mm/slub.c | 4 ++--
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index fafd46877504..300adfb67245 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1379,7 +1379,7 @@ static struct page *kmem_getpages(struct kmem_cache 
*cachep, gfp_t flags,
                return NULL;
        }
 
-       charge_slab_page(page, cachep->gfporder, cachep);
+       account_slab_page(page, cachep->gfporder, cachep);
        __SetPageSlab(page);
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
        if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1403,7 +1403,7 @@ static void kmem_freepages(struct kmem_cache *cachep, 
struct page *page)
 
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       uncharge_slab_page(page, order, cachep);
+       unaccount_slab_page(page, order, cachep);
        __free_pages(page, order);
 }
 
diff --git a/mm/slab.h b/mm/slab.h
index 704a65713f81..72ff7cd933db 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -440,15 +440,15 @@ static inline struct kmem_cache *virt_to_cache(const void 
*obj)
        return page->slab_cache;
 }
 
-static __always_inline void charge_slab_page(struct page *page, int order,
-                                            struct kmem_cache *s)
+static __always_inline void account_slab_page(struct page *page, int order,
+                                             struct kmem_cache *s)
 {
        mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
                            PAGE_SIZE << order);
 }
 
-static __always_inline void uncharge_slab_page(struct page *page, int order,
-                                              struct kmem_cache *s)
+static __always_inline void unaccount_slab_page(struct page *page, int order,
+                                               struct kmem_cache *s)
 {
        if (memcg_kmem_enabled())
                memcg_free_page_obj_cgroups(page);
diff --git a/mm/slub.c b/mm/slub.c
index d9b33a935e58..5cffd3fd440b 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1621,7 +1621,7 @@ static inline struct page *alloc_slab_page(struct 
kmem_cache *s,
                page = __alloc_pages_node(node, flags, order);
 
        if (page)
-               charge_slab_page(page, order, s);
+               account_slab_page(page, order, s);
 
        return page;
 }
@@ -1844,7 +1844,7 @@ static void __free_slab(struct kmem_cache *s, struct page 
*page)
        page->mapping = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
-       uncharge_slab_page(page, order, s);
+       unaccount_slab_page(page, order, s);
        __free_pages(page, order);
 }
 
-- 
2.26.2

Reply via email to