The active_nodes mask allows us to skip empty nodes when walking over
list_lru items from all nodes in list_lru_count/walk. However, these
functions are never called from really hot paths, so it doesn't seem we
need such kind of optimization there. OTOH, removing the mask will make
it easier to make list_lru per-memcg.

Signed-off-by: Vladimir Davydov <[email protected]>
---
 include/linux/list_lru.h |    5 ++---
 mm/list_lru.c            |   10 +++-------
 2 files changed, 5 insertions(+), 10 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index f500a2e39b13..53c1d6b78270 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -31,7 +31,6 @@ struct list_lru_node {
 
 struct list_lru {
        struct list_lru_node    *node;
-       nodemask_t              active_nodes;
 };
 
 void list_lru_destroy(struct list_lru *lru);
@@ -94,7 +93,7 @@ static inline unsigned long list_lru_count(struct list_lru 
*lru)
        long count = 0;
        int nid;
 
-       for_each_node_mask(nid, lru->active_nodes)
+       for_each_node_state(nid, N_NORMAL_MEMORY)
                count += list_lru_count_node(lru, nid);
 
        return count;
@@ -142,7 +141,7 @@ list_lru_walk(struct list_lru *lru, list_lru_walk_cb 
isolate,
        long isolated = 0;
        int nid;
 
-       for_each_node_mask(nid, lru->active_nodes) {
+       for_each_node_state(nid, N_NORMAL_MEMORY) {
                isolated += list_lru_walk_node(lru, nid, isolate,
                                               cb_arg, &nr_to_walk);
                if (nr_to_walk <= 0)
diff --git a/mm/list_lru.c b/mm/list_lru.c
index f1a0db194173..07e198c77888 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -19,8 +19,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head 
*item)
        WARN_ON_ONCE(nlru->nr_items < 0);
        if (list_empty(item)) {
                list_add_tail(item, &nlru->list);
-               if (nlru->nr_items++ == 0)
-                       node_set(nid, lru->active_nodes);
+               nlru->nr_items++;
                spin_unlock(&nlru->lock);
                return true;
        }
@@ -37,8 +36,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head 
*item)
        spin_lock(&nlru->lock);
        if (!list_empty(item)) {
                list_del_init(item);
-               if (--nlru->nr_items == 0)
-                       node_clear(nid, lru->active_nodes);
+               nlru->nr_items--;
                WARN_ON_ONCE(nlru->nr_items < 0);
                spin_unlock(&nlru->lock);
                return true;
@@ -90,8 +88,7 @@ restart:
                case LRU_REMOVED_RETRY:
                        assert_spin_locked(&nlru->lock);
                case LRU_REMOVED:
-                       if (--nlru->nr_items == 0)
-                               node_clear(nid, lru->active_nodes);
+                       nlru->nr_items--;
                        WARN_ON_ONCE(nlru->nr_items < 0);
                        isolated++;
                        /*
@@ -133,7 +130,6 @@ int list_lru_init_key(struct list_lru *lru, struct 
lock_class_key *key)
        if (!lru->node)
                return -ENOMEM;
 
-       nodes_clear(lru->active_nodes);
        for (i = 0; i < nr_node_ids; i++) {
                spin_lock_init(&lru->node[i].lock);
                if (key)
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to