This patch adds the new field and teaches kmalloc()
to allocate memory for it.

Signed-off-by: Kirill Tkhai <ktk...@virtuozzo.com>
---
 include/linux/list_lru.h |    1 +
 mm/list_lru.c            |    7 ++++---
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index fa7fd03cb5f9..b65505b32a3d 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -31,6 +31,7 @@ struct list_lru_one {
 };
 
 struct list_lru_memcg {
+       struct rcu_head         rcu;
        /* array of per cgroup lists, indexed by memcg_cache_id */
        struct list_lru_one     *lru[0];
 };
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 7a40fa2be858..a726e321bf3e 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -325,7 +325,8 @@ static int memcg_init_list_lru_node(struct list_lru_node 
*nlru)
 {
        int size = memcg_nr_cache_ids;
 
-       nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL);
+       nlru->memcg_lrus = kmalloc(sizeof(struct list_lru_memcg) +
+                                  size * sizeof(void *), GFP_KERNEL);
        if (!nlru->memcg_lrus)
                return -ENOMEM;
 
@@ -351,7 +352,7 @@ static int memcg_update_list_lru_node(struct list_lru_node 
*nlru,
        BUG_ON(old_size > new_size);
 
        old = nlru->memcg_lrus;
-       new = kmalloc(new_size * sizeof(void *), GFP_KERNEL);
+       new = kmalloc(sizeof(*new) + new_size * sizeof(void *), GFP_KERNEL);
        if (!new)
                return -ENOMEM;
 
@@ -360,7 +361,7 @@ static int memcg_update_list_lru_node(struct list_lru_node 
*nlru,
                return -ENOMEM;
        }
 
-       memcpy(new, old, old_size * sizeof(void *));
+       memcpy(&new->lru, &old->lru, old_size * sizeof(void *));
 
        /*
         * The lock guarantees that we won't race with a reader

Reply via email to