>  static bool shrink_zone(struct zone *zone, struct scan_control *sc,
>                       bool is_classzone)
>  {
> +     struct reclaim_state *reclaim_state = current->reclaim_state;
>       unsigned long nr_reclaimed, nr_scanned;
>       bool reclaimable = false;
> 
> @@ -2318,16 +2357,22 @@ static bool shrink_zone(struct zone *zone, struct 
> scan_control *sc,
> 
>               memcg = mem_cgroup_iter(root, NULL, &reclaim);
>               do {
> -                     unsigned long lru_pages;
> +                     unsigned long lru_pages, scanned;
>                       struct lruvec *lruvec;
>                       int swappiness;
> 
>                       lruvec = mem_cgroup_zone_lruvec(zone, memcg);
>                       swappiness = mem_cgroup_swappiness(memcg);
> +                     scanned = sc->nr_scanned;
> 
>                       shrink_lruvec(lruvec, swappiness, sc, &lru_pages);
>                       zone_lru_pages += lru_pages;
> 
> +                     if (memcg && is_classzone)
> +                             shrink_slab(sc->gfp_mask, zone_to_nid(zone),
> +                                         memcg, sc->nr_scanned - scanned,
> +                                         lru_pages);
> +
Looks sc->nr_reclaimed has to be updated for "limit reclaim".

Hillf
>                       /*
>                        * Direct reclaim and kswapd have to scan all memory
>                        * cgroups to fulfill the overall scan target for the
> @@ -2350,19 +2395,14 @@ static bool shrink_zone(struct zone *zone, struct 
> scan_control *sc,
>                * Shrink the slab caches in the same proportion that
>                * the eligible LRU pages were scanned.
>                */
> -             if (global_reclaim(sc) && is_classzone) {
> -                     struct reclaim_state *reclaim_state;
> -
> -                     shrink_node_slabs(sc->gfp_mask, zone_to_nid(zone),
> -                                       sc->nr_scanned - nr_scanned,
> -                                       zone_lru_pages);
> -
> -                     reclaim_state = current->reclaim_state;
> -                     if (reclaim_state) {
> -                             sc->nr_reclaimed +=
> -                                     reclaim_state->reclaimed_slab;
> -                             reclaim_state->reclaimed_slab = 0;
> -                     }
> +             if (global_reclaim(sc) && is_classzone)
> +                     shrink_slab(sc->gfp_mask, zone_to_nid(zone), NULL,
> +                                 sc->nr_scanned - nr_scanned,
> +                                 zone_lru_pages);
> +
> +             if (reclaim_state) {
> +                     sc->nr_reclaimed += reclaim_state->reclaimed_slab;
> +                     reclaim_state->reclaimed_slab = 0;
>               }
> 
>               vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
> --
> 1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to