Currently we use the page->lru list for maintaining lists of slabs.  We
have a list in the page structure (slab_list) that can be used for this
purpose.  Doing so makes the code cleaner since we are not overloading
the lru list.

Use the slab_list instead of the lru list for maintaining lists of
slabs.

Acked-by: Christoph Lameter <c...@linux.com>
Signed-off-by: Tobin C. Harding <to...@kernel.org>
---
 mm/slub.c | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

diff --git a/mm/slub.c b/mm/slub.c
index 8fbba4ff6c67..d17f117830a9 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1023,7 +1023,7 @@ static void add_full(struct kmem_cache *s,
                return;
 
        lockdep_assert_held(&n->list_lock);
-       list_add(&page->lru, &n->full);
+       list_add(&page->slab_list, &n->full);
 }
 
 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 
struct page *page)
@@ -1032,7 +1032,7 @@ static void remove_full(struct kmem_cache *s, struct 
kmem_cache_node *n, struct
                return;
 
        lockdep_assert_held(&n->list_lock);
-       list_del(&page->lru);
+       list_del(&page->slab_list);
 }
 
 /* Tracking of the number of slabs for debugging purposes */
@@ -1773,9 +1773,9 @@ __add_partial(struct kmem_cache_node *n, struct page 
*page, int tail)
 {
        n->nr_partial++;
        if (tail == DEACTIVATE_TO_TAIL)
-               list_add_tail(&page->lru, &n->partial);
+               list_add_tail(&page->slab_list, &n->partial);
        else
-               list_add(&page->lru, &n->partial);
+               list_add(&page->slab_list, &n->partial);
 }
 
 static inline void add_partial(struct kmem_cache_node *n,
@@ -1789,7 +1789,7 @@ static inline void remove_partial(struct kmem_cache_node 
*n,
                                        struct page *page)
 {
        lockdep_assert_held(&n->list_lock);
-       list_del(&page->lru);
+       list_del(&page->slab_list);
        n->nr_partial--;
 }
 
@@ -1863,7 +1863,7 @@ static void *get_partial_node(struct kmem_cache *s, 
struct kmem_cache_node *n,
                return NULL;
 
        spin_lock(&n->list_lock);
-       list_for_each_entry_safe(page, page2, &n->partial, lru) {
+       list_for_each_entry_safe(page, page2, &n->partial, slab_list) {
                void *t;
 
                if (!pfmemalloc_match(page, flags))
@@ -2407,7 +2407,7 @@ static unsigned long count_partial(struct kmem_cache_node 
*n,
        struct page *page;
 
        spin_lock_irqsave(&n->list_lock, flags);
-       list_for_each_entry(page, &n->partial, lru)
+       list_for_each_entry(page, &n->partial, slab_list)
                x += get_count(page);
        spin_unlock_irqrestore(&n->list_lock, flags);
        return x;
@@ -3705,10 +3705,10 @@ static void free_partial(struct kmem_cache *s, struct 
kmem_cache_node *n)
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
-       list_for_each_entry_safe(page, h, &n->partial, lru) {
+       list_for_each_entry_safe(page, h, &n->partial, slab_list) {
                if (!page->inuse) {
                        remove_partial(n, page);
-                       list_add(&page->lru, &discard);
+                       list_add(&page->slab_list, &discard);
                } else {
                        list_slab_objects(s, page,
                        "Objects remaining in %s on __kmem_cache_shutdown()");
@@ -3716,7 +3716,7 @@ static void free_partial(struct kmem_cache *s, struct 
kmem_cache_node *n)
        }
        spin_unlock_irq(&n->list_lock);
 
-       list_for_each_entry_safe(page, h, &discard, lru)
+       list_for_each_entry_safe(page, h, &discard, slab_list)
                discard_slab(s, page);
 }
 
@@ -3996,7 +3996,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
                 * Note that concurrent frees may occur while we hold the
                 * list_lock. page->inuse here is the upper limit.
                 */
-               list_for_each_entry_safe(page, t, &n->partial, lru) {
+               list_for_each_entry_safe(page, t, &n->partial, slab_list) {
                        int free = page->objects - page->inuse;
 
                        /* Do not reread page->inuse */
@@ -4006,10 +4006,10 @@ int __kmem_cache_shrink(struct kmem_cache *s)
                        BUG_ON(free <= 0);
 
                        if (free == page->objects) {
-                               list_move(&page->lru, &discard);
+                               list_move(&page->slab_list, &discard);
                                n->nr_partial--;
                        } else if (free <= SHRINK_PROMOTE_MAX)
-                               list_move(&page->lru, promote + free - 1);
+                               list_move(&page->slab_list, promote + free - 1);
                }
 
                /*
@@ -4022,7 +4022,7 @@ int __kmem_cache_shrink(struct kmem_cache *s)
                spin_unlock_irqrestore(&n->list_lock, flags);
 
                /* Release empty slabs */
-               list_for_each_entry_safe(page, t, &discard, lru)
+               list_for_each_entry_safe(page, t, &discard, slab_list)
                        discard_slab(s, page);
 
                if (slabs_node(s, node))
@@ -4214,11 +4214,11 @@ static struct kmem_cache * __init bootstrap(struct 
kmem_cache *static_cache)
        for_each_kmem_cache_node(s, node, n) {
                struct page *p;
 
-               list_for_each_entry(p, &n->partial, lru)
+               list_for_each_entry(p, &n->partial, slab_list)
                        p->slab_cache = s;
 
 #ifdef CONFIG_SLUB_DEBUG
-               list_for_each_entry(p, &n->full, lru)
+               list_for_each_entry(p, &n->full, slab_list)
                        p->slab_cache = s;
 #endif
        }
@@ -4435,7 +4435,7 @@ static int validate_slab_node(struct kmem_cache *s,
 
        spin_lock_irqsave(&n->list_lock, flags);
 
-       list_for_each_entry(page, &n->partial, lru) {
+       list_for_each_entry(page, &n->partial, slab_list) {
                validate_slab_slab(s, page, map);
                count++;
        }
@@ -4446,7 +4446,7 @@ static int validate_slab_node(struct kmem_cache *s,
        if (!(s->flags & SLAB_STORE_USER))
                goto out;
 
-       list_for_each_entry(page, &n->full, lru) {
+       list_for_each_entry(page, &n->full, slab_list) {
                validate_slab_slab(s, page, map);
                count++;
        }
@@ -4642,9 +4642,9 @@ static int list_locations(struct kmem_cache *s, char *buf,
                        continue;
 
                spin_lock_irqsave(&n->list_lock, flags);
-               list_for_each_entry(page, &n->partial, lru)
+               list_for_each_entry(page, &n->partial, slab_list)
                        process_slab(&t, s, page, alloc, map);
-               list_for_each_entry(page, &n->full, lru)
+               list_for_each_entry(page, &n->full, slab_list)
                        process_slab(&t, s, page, alloc, map);
                spin_unlock_irqrestore(&n->list_lock, flags);
        }
-- 
2.21.0

Reply via email to