Josef's redesign of the balancing between slab caches and the page
cache requires slab cache statistics at the lruvec level.

Signed-off-by: Johannes Weiner <han...@cmpxchg.org>
---
 mm/slab.c | 12 ++++--------
 mm/slab.h | 18 +-----------------
 mm/slub.c |  4 ++--
 3 files changed, 7 insertions(+), 27 deletions(-)

diff --git a/mm/slab.c b/mm/slab.c
index b55853399559..908908aa8250 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1425,11 +1425,9 @@ static struct page *kmem_getpages(struct kmem_cache 
*cachep, gfp_t flags,
 
        nr_pages = (1 << cachep->gfporder);
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
-               add_node_page_state(page_pgdat(page),
-                       NR_SLAB_RECLAIMABLE, nr_pages);
+               mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, nr_pages);
        else
-               add_node_page_state(page_pgdat(page),
-                       NR_SLAB_UNRECLAIMABLE, nr_pages);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, nr_pages);
 
        __SetPageSlab(page);
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
@@ -1459,11 +1457,9 @@ static void kmem_freepages(struct kmem_cache *cachep, 
struct page *page)
        kmemcheck_free_shadow(page, order);
 
        if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
-               sub_node_page_state(page_pgdat(page),
-                               NR_SLAB_RECLAIMABLE, nr_freed);
+               mod_lruvec_page_state(page, NR_SLAB_RECLAIMABLE, -nr_freed);
        else
-               sub_node_page_state(page_pgdat(page),
-                               NR_SLAB_UNRECLAIMABLE, nr_freed);
+               mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE, -nr_freed);
 
        BUG_ON(!PageSlab(page));
        __ClearPageSlabPfmemalloc(page);
diff --git a/mm/slab.h b/mm/slab.h
index 7b84e3839dfe..6885e1192ec5 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -274,22 +274,11 @@ static __always_inline int memcg_charge_slab(struct page 
*page,
                                             gfp_t gfp, int order,
                                             struct kmem_cache *s)
 {
-       int ret;
-
        if (!memcg_kmem_enabled())
                return 0;
        if (is_root_cache(s))
                return 0;
-
-       ret = memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
-       if (ret)
-               return ret;
-
-       mod_memcg_page_state(page,
-                            (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-                            NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-                            1 << order);
-       return 0;
+       return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
 }
 
 static __always_inline void memcg_uncharge_slab(struct page *page, int order,
@@ -297,11 +286,6 @@ static __always_inline void memcg_uncharge_slab(struct 
page *page, int order,
 {
        if (!memcg_kmem_enabled())
                return;
-
-       mod_memcg_page_state(page,
-                            (s->flags & SLAB_RECLAIM_ACCOUNT) ?
-                            NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-                            -(1 << order));
        memcg_kmem_uncharge(page, order);
 }
 
diff --git a/mm/slub.c b/mm/slub.c
index 673e72698d9b..edaf102284e8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1615,7 +1615,7 @@ static struct page *allocate_slab(struct kmem_cache *s, 
gfp_t flags, int node)
        if (!page)
                return NULL;
 
-       mod_node_page_state(page_pgdat(page),
+       mod_lruvec_page_state(page,
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                1 << oo_order(oo));
@@ -1655,7 +1655,7 @@ static void __free_slab(struct kmem_cache *s, struct page 
*page)
 
        kmemcheck_free_shadow(page, compound_order(page));
 
-       mod_node_page_state(page_pgdat(page),
+       mod_lruvec_page_state(page,
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
                -pages);
-- 
2.12.2

Reply via email to