[PATCH v4] memcg: refactor mem_cgroup_resize_limit()

2018-01-08 Thread Yu Zhao
mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics. Refactor code so we don't need to keep two pieces
of code that does same thing.

Signed-off-by: Yu Zhao 
Acked-by: Vladimir Davydov 
Acked-by: Michal Hocko 
---
 mm/memcontrol.c | 77 +
 1 file changed, 17 insertions(+), 60 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac2ffd5e02b9..9af733fa9381 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2467,13 +2467,15 @@ static inline int 
mem_cgroup_move_swap_account(swp_entry_t entry,
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-  unsigned long limit)
+  unsigned long limit, bool memsw)
 {
unsigned long curusage;
unsigned long oldusage;
bool enlarge = false;
int retry_count;
int ret;
+   bool limits_invariant;
+   struct page_counter *counter = memsw ? >memsw : >memory;
 
/*
 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2483,7 +2485,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
  mem_cgroup_count_children(memcg);
 
-   oldusage = page_counter_read(>memory);
+   oldusage = page_counter_read(counter);
 
do {
if (signal_pending(current)) {
@@ -2492,73 +2494,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
}
 
mutex_lock(_limit_mutex);
-   if (limit > memcg->memsw.limit) {
-   mutex_unlock(_limit_mutex);
-   ret = -EINVAL;
-   break;
-   }
-   if (limit > memcg->memory.limit)
-   enlarge = true;
-   ret = page_counter_limit(>memory, limit);
-   mutex_unlock(_limit_mutex);
-
-   if (!ret)
-   break;
-
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
-   curusage = page_counter_read(>memory);
-   /* Usage is reduced ? */
-   if (curusage >= oldusage)
-   retry_count--;
-   else
-   oldusage = curusage;
-   } while (retry_count);
-
-   if (!ret && enlarge)
-   memcg_oom_recover(memcg);
-
-   return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-unsigned long limit)
-{
-   unsigned long curusage;
-   unsigned long oldusage;
-   bool enlarge = false;
-   int retry_count;
-   int ret;
-
-   /* see mem_cgroup_resize_res_limit */
-   retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
-   oldusage = page_counter_read(>memsw);
-
-   do {
-   if (signal_pending(current)) {
-   ret = -EINTR;
-   break;
-   }
-
-   mutex_lock(_limit_mutex);
-   if (limit < memcg->memory.limit) {
+   /*
+* Make sure that the new limit (memsw or memory limit) doesn't
+* break our basic invariant rule memory.limit <= memsw.limit.
+*/
+   limits_invariant = memsw ? limit >= memcg->memory.limit :
+  limit <= memcg->memsw.limit;
+   if (!limits_invariant) {
mutex_unlock(_limit_mutex);
ret = -EINVAL;
break;
}
-   if (limit > memcg->memsw.limit)
+   if (limit > counter->limit)
enlarge = true;
-   ret = page_counter_limit(>memsw, limit);
+   ret = page_counter_limit(counter, limit);
mutex_unlock(_limit_mutex);
 
if (!ret)
break;
 
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
+   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
 
-   curusage = page_counter_read(>memsw);
+   curusage = page_counter_read(counter);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -3020,10 +2977,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file 
*of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
-   ret = mem_cgroup_resize_limit(memcg, nr_pages);
+   ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
break;
case _MEMSWAP:
- 

[PATCH v4] memcg: refactor mem_cgroup_resize_limit()

2018-01-08 Thread Yu Zhao
mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics. Refactor code so we don't need to keep two pieces
of code that does same thing.

Signed-off-by: Yu Zhao 
Acked-by: Vladimir Davydov 
Acked-by: Michal Hocko 
---
 mm/memcontrol.c | 77 +
 1 file changed, 17 insertions(+), 60 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac2ffd5e02b9..9af733fa9381 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2467,13 +2467,15 @@ static inline int 
mem_cgroup_move_swap_account(swp_entry_t entry,
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-  unsigned long limit)
+  unsigned long limit, bool memsw)
 {
unsigned long curusage;
unsigned long oldusage;
bool enlarge = false;
int retry_count;
int ret;
+   bool limits_invariant;
+   struct page_counter *counter = memsw ? >memsw : >memory;
 
/*
 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2483,7 +2485,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
  mem_cgroup_count_children(memcg);
 
-   oldusage = page_counter_read(>memory);
+   oldusage = page_counter_read(counter);
 
do {
if (signal_pending(current)) {
@@ -2492,73 +2494,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
}
 
mutex_lock(_limit_mutex);
-   if (limit > memcg->memsw.limit) {
-   mutex_unlock(_limit_mutex);
-   ret = -EINVAL;
-   break;
-   }
-   if (limit > memcg->memory.limit)
-   enlarge = true;
-   ret = page_counter_limit(>memory, limit);
-   mutex_unlock(_limit_mutex);
-
-   if (!ret)
-   break;
-
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
-   curusage = page_counter_read(>memory);
-   /* Usage is reduced ? */
-   if (curusage >= oldusage)
-   retry_count--;
-   else
-   oldusage = curusage;
-   } while (retry_count);
-
-   if (!ret && enlarge)
-   memcg_oom_recover(memcg);
-
-   return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-unsigned long limit)
-{
-   unsigned long curusage;
-   unsigned long oldusage;
-   bool enlarge = false;
-   int retry_count;
-   int ret;
-
-   /* see mem_cgroup_resize_res_limit */
-   retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
-   oldusage = page_counter_read(>memsw);
-
-   do {
-   if (signal_pending(current)) {
-   ret = -EINTR;
-   break;
-   }
-
-   mutex_lock(_limit_mutex);
-   if (limit < memcg->memory.limit) {
+   /*
+* Make sure that the new limit (memsw or memory limit) doesn't
+* break our basic invariant rule memory.limit <= memsw.limit.
+*/
+   limits_invariant = memsw ? limit >= memcg->memory.limit :
+  limit <= memcg->memsw.limit;
+   if (!limits_invariant) {
mutex_unlock(_limit_mutex);
ret = -EINVAL;
break;
}
-   if (limit > memcg->memsw.limit)
+   if (limit > counter->limit)
enlarge = true;
-   ret = page_counter_limit(>memsw, limit);
+   ret = page_counter_limit(counter, limit);
mutex_unlock(_limit_mutex);
 
if (!ret)
break;
 
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
+   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
 
-   curusage = page_counter_read(>memsw);
+   curusage = page_counter_read(counter);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -3020,10 +2977,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file 
*of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
-   ret = mem_cgroup_resize_limit(memcg, nr_pages);
+   ret = mem_cgroup_resize_limit(memcg, nr_pages, false);
break;
case _MEMSWAP:
-   ret = mem_cgroup_resize_memsw_limit(memcg, 

[PATCH v4] memcg: refactor mem_cgroup_resize_limit()

2017-06-14 Thread Yu Zhao
mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics. Refactor code so we don't need to keep two pieces
of code that does same thing.

Signed-off-by: Yu Zhao 
Acked-by: Vladimir Davydov 
Acked-by: Michal Hocko 
---
Changelog since v1:
* minor style change
Changelog since v2:
* fix build error
Changelog since v3:
* minor style change

 mm/memcontrol.c | 77 +
 1 file changed, 17 insertions(+), 60 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 94172089f52f..401f64a3dda1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2422,13 +2422,15 @@ static inline int 
mem_cgroup_move_swap_account(swp_entry_t entry,
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-  unsigned long limit)
+  unsigned long limit, bool memsw)
 {
unsigned long curusage;
unsigned long oldusage;
bool enlarge = false;
int retry_count;
int ret;
+   bool limits_invariant;
+   struct page_counter *counter = memsw ? >memsw : >memory;
 
/*
 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2438,7 +2440,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
  mem_cgroup_count_children(memcg);
 
-   oldusage = page_counter_read(>memory);
+   oldusage = page_counter_read(counter);
 
do {
if (signal_pending(current)) {
@@ -2447,73 +2449,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
}
 
mutex_lock(_limit_mutex);
-   if (limit > memcg->memsw.limit) {
-   mutex_unlock(_limit_mutex);
-   ret = -EINVAL;
-   break;
-   }
-   if (limit > memcg->memory.limit)
-   enlarge = true;
-   ret = page_counter_limit(>memory, limit);
-   mutex_unlock(_limit_mutex);
-
-   if (!ret)
-   break;
-
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
-   curusage = page_counter_read(>memory);
-   /* Usage is reduced ? */
-   if (curusage >= oldusage)
-   retry_count--;
-   else
-   oldusage = curusage;
-   } while (retry_count);
-
-   if (!ret && enlarge)
-   memcg_oom_recover(memcg);
-
-   return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-unsigned long limit)
-{
-   unsigned long curusage;
-   unsigned long oldusage;
-   bool enlarge = false;
-   int retry_count;
-   int ret;
-
-   /* see mem_cgroup_resize_res_limit */
-   retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
-   oldusage = page_counter_read(>memsw);
-
-   do {
-   if (signal_pending(current)) {
-   ret = -EINTR;
-   break;
-   }
-
-   mutex_lock(_limit_mutex);
-   if (limit < memcg->memory.limit) {
+   /*
+* Make sure that the new limit (memsw or memory limit) doesn't
+* break our basic invariant rule memory.limit <= memsw.limit.
+*/
+   limits_invariant = memsw ? limit >= memcg->memory.limit :
+  limit <= memcg->memsw.limit;
+   if (!limits_invariant) {
mutex_unlock(_limit_mutex);
ret = -EINVAL;
break;
}
-   if (limit > memcg->memsw.limit)
+   if (limit > counter->limit)
enlarge = true;
-   ret = page_counter_limit(>memsw, limit);
+   ret = page_counter_limit(counter, limit);
mutex_unlock(_limit_mutex);
 
if (!ret)
break;
 
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
+   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
 
-   curusage = page_counter_read(>memsw);
+   curusage = page_counter_read(counter);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -2975,10 +2932,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file 
*of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
-   ret = mem_cgroup_resize_limit(memcg, nr_pages);
+   ret = 

[PATCH v4] memcg: refactor mem_cgroup_resize_limit()

2017-06-14 Thread Yu Zhao
mem_cgroup_resize_limit() and mem_cgroup_resize_memsw_limit() have
identical logics. Refactor code so we don't need to keep two pieces
of code that does same thing.

Signed-off-by: Yu Zhao 
Acked-by: Vladimir Davydov 
Acked-by: Michal Hocko 
---
Changelog since v1:
* minor style change
Changelog since v2:
* fix build error
Changelog since v3:
* minor style change

 mm/memcontrol.c | 77 +
 1 file changed, 17 insertions(+), 60 deletions(-)

diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 94172089f52f..401f64a3dda1 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2422,13 +2422,15 @@ static inline int 
mem_cgroup_move_swap_account(swp_entry_t entry,
 static DEFINE_MUTEX(memcg_limit_mutex);
 
 static int mem_cgroup_resize_limit(struct mem_cgroup *memcg,
-  unsigned long limit)
+  unsigned long limit, bool memsw)
 {
unsigned long curusage;
unsigned long oldusage;
bool enlarge = false;
int retry_count;
int ret;
+   bool limits_invariant;
+   struct page_counter *counter = memsw ? >memsw : >memory;
 
/*
 * For keeping hierarchical_reclaim simple, how long we should retry
@@ -2438,7 +2440,7 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
retry_count = MEM_CGROUP_RECLAIM_RETRIES *
  mem_cgroup_count_children(memcg);
 
-   oldusage = page_counter_read(>memory);
+   oldusage = page_counter_read(counter);
 
do {
if (signal_pending(current)) {
@@ -2447,73 +2449,28 @@ static int mem_cgroup_resize_limit(struct mem_cgroup 
*memcg,
}
 
mutex_lock(_limit_mutex);
-   if (limit > memcg->memsw.limit) {
-   mutex_unlock(_limit_mutex);
-   ret = -EINVAL;
-   break;
-   }
-   if (limit > memcg->memory.limit)
-   enlarge = true;
-   ret = page_counter_limit(>memory, limit);
-   mutex_unlock(_limit_mutex);
-
-   if (!ret)
-   break;
-
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true);
-
-   curusage = page_counter_read(>memory);
-   /* Usage is reduced ? */
-   if (curusage >= oldusage)
-   retry_count--;
-   else
-   oldusage = curusage;
-   } while (retry_count);
-
-   if (!ret && enlarge)
-   memcg_oom_recover(memcg);
-
-   return ret;
-}
-
-static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg,
-unsigned long limit)
-{
-   unsigned long curusage;
-   unsigned long oldusage;
-   bool enlarge = false;
-   int retry_count;
-   int ret;
-
-   /* see mem_cgroup_resize_res_limit */
-   retry_count = MEM_CGROUP_RECLAIM_RETRIES *
- mem_cgroup_count_children(memcg);
-
-   oldusage = page_counter_read(>memsw);
-
-   do {
-   if (signal_pending(current)) {
-   ret = -EINTR;
-   break;
-   }
-
-   mutex_lock(_limit_mutex);
-   if (limit < memcg->memory.limit) {
+   /*
+* Make sure that the new limit (memsw or memory limit) doesn't
+* break our basic invariant rule memory.limit <= memsw.limit.
+*/
+   limits_invariant = memsw ? limit >= memcg->memory.limit :
+  limit <= memcg->memsw.limit;
+   if (!limits_invariant) {
mutex_unlock(_limit_mutex);
ret = -EINVAL;
break;
}
-   if (limit > memcg->memsw.limit)
+   if (limit > counter->limit)
enlarge = true;
-   ret = page_counter_limit(>memsw, limit);
+   ret = page_counter_limit(counter, limit);
mutex_unlock(_limit_mutex);
 
if (!ret)
break;
 
-   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false);
+   try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, !memsw);
 
-   curusage = page_counter_read(>memsw);
+   curusage = page_counter_read(counter);
/* Usage is reduced ? */
if (curusage >= oldusage)
retry_count--;
@@ -2975,10 +2932,10 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file 
*of,
}
switch (MEMFILE_TYPE(of_cft(of)->private)) {
case _MEM:
-   ret = mem_cgroup_resize_limit(memcg, nr_pages);
+   ret = mem_cgroup_resize_limit(memcg, nr_pages, false);