SLUB per cpu partial cache is a list of slab caches to accelerate objects 
allocation. However, current codes just accumulate the objects number of 
the first slab cache of per cpu partial cache instead of traverse the whole 
list.

Signed-off-by: Wanpeng Li <liw...@linux.vnet.ibm.com>
---
 mm/slub.c |   32 +++++++++++++++++++++++---------
 1 files changed, 23 insertions(+), 9 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 545a170..799bfdc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4280,7 +4280,7 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
                                                               cpu);
                        int node;
-                       struct page *page;
+                       struct page *page, *p;
 
                        page = ACCESS_ONCE(c->page);
                        if (!page)
@@ -4298,8 +4298,9 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        nodes[node] += x;
 
                        page = ACCESS_ONCE(c->partial);
-                       if (page) {
-                               x = page->pobjects;
+                       while ((p = page)) {
+                               page = p->next;
+                               x = p->pobjects;
                                total += x;
                                nodes[node] += x;
                        }
@@ -4520,13 +4521,15 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache 
*s, char *buf)
        int pages = 0;
        int cpu;
        int len;
+       struct page *p;
 
        for_each_online_cpu(cpu) {
                struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
 
-               if (page) {
-                       pages += page->pages;
-                       objects += page->pobjects;
+               while ((p = page)) {
+                       page = p->next;
+                       pages += p->pages;
+                       objects += p->pobjects;
                }
        }
 
@@ -4535,10 +4538,21 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache 
*s, char *buf)
 #ifdef CONFIG_SMP
        for_each_online_cpu(cpu) {
                struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
+               objects = 0;
+               pages = 0;
+
+               if (!page)
+                       continue;
+
+               while ((p = page)) {
+                       page = p->next;
+                       pages += p->pages;
+                       objects += p->pobjects;
+               }
 
-               if (page && len < PAGE_SIZE - 20)
-                       len += sprintf(buf + len, " C%d=%d(%d)", cpu,
-                               page->pobjects, page->pages);
+               if (len < PAGE_SIZE - 20)
+                       len += sprintf(buf + len, " C%d=%d(%d)", cpu,
+                               objects, pages);
        }
 #endif
        return len + sprintf(buf + len, "\n");
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to