On Tue 22-10-19 10:48:02, Johannes Weiner wrote:
> This function is getting long and unwieldy, split out the memcg bits.
> 
> The updated shrink_node() handles the generic (node) reclaim aspects:
>   - global vmpressure notifications
>   - writeback and congestion throttling
>   - reclaim/compaction management
>   - kswapd giving up on unreclaimable nodes
> 
> It then calls a new shrink_node_memcgs() which handles cgroup specifics:
>   - the cgroup tree traversal
>   - memory.low considerations
>   - per-cgroup slab shrinking callbacks
>   - per-cgroup vmpressure notifications
> 
> Signed-off-by: Johannes Weiner <han...@cmpxchg.org>

Acked-by: Michal Hocko <mho...@suse.com>

> ---
>  mm/vmscan.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)
> 
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index db073b40c432..65baa89740dd 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -2722,18 +2722,10 @@ static bool pgdat_memcg_congested(pg_data_t *pgdat, 
> struct mem_cgroup *memcg)
>               (memcg && memcg_congested(pgdat, memcg));
>  }
>  
> -static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> +static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
>  {
> -     struct reclaim_state *reclaim_state = current->reclaim_state;
>       struct mem_cgroup *root = sc->target_mem_cgroup;
> -     unsigned long nr_reclaimed, nr_scanned;
> -     bool reclaimable = false;
>       struct mem_cgroup *memcg;
> -again:
> -     memset(&sc->nr, 0, sizeof(sc->nr));
> -
> -     nr_reclaimed = sc->nr_reclaimed;
> -     nr_scanned = sc->nr_scanned;
>  
>       memcg = mem_cgroup_iter(root, NULL, NULL);
>       do {
> @@ -2786,6 +2778,22 @@ static bool shrink_node(pg_data_t *pgdat, struct 
> scan_control *sc)
>                          sc->nr_reclaimed - reclaimed);
>  
>       } while ((memcg = mem_cgroup_iter(root, memcg, NULL)));
> +}
> +
> +static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
> +{
> +     struct reclaim_state *reclaim_state = current->reclaim_state;
> +     struct mem_cgroup *root = sc->target_mem_cgroup;
> +     unsigned long nr_reclaimed, nr_scanned;
> +     bool reclaimable = false;
> +
> +again:
> +     memset(&sc->nr, 0, sizeof(sc->nr));
> +
> +     nr_reclaimed = sc->nr_reclaimed;
> +     nr_scanned = sc->nr_scanned;
> +
> +     shrink_node_memcgs(pgdat, sc);
>  
>       if (reclaim_state) {
>               sc->nr_reclaimed += reclaim_state->reclaimed_slab;
> @@ -2793,7 +2801,7 @@ static bool shrink_node(pg_data_t *pgdat, struct 
> scan_control *sc)
>       }
>  
>       /* Record the subtree's reclaim efficiency */
> -     vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true,
> +     vmpressure(sc->gfp_mask, root, true,
>                  sc->nr_scanned - nr_scanned,
>                  sc->nr_reclaimed - nr_reclaimed);
>  
> -- 
> 2.23.0
> 

-- 
Michal Hocko
SUSE Labs

Reply via email to