The balance gap was introduced to apply equal pressure to all zones when
reclaiming for a higher zone.  With node-based LRU, the need for the
balance gap is removed and the code is dead so remove it.

[vba...@suse.cz: Also remove KSWAPD_ZONE_BALANCE_GAP_RATIO]
Link: 
http://lkml.kernel.org/r/1466518566-30034-8-git-send-email-mgor...@techsingularity.net
Signed-off-by: Mel Gorman <mgor...@techsingularity.net>
Acked-by: Vlastimil Babka <vba...@suse.cz>
Cc: Johannes Weiner <han...@cmpxchg.org>
Cc: Rik van Riel <r...@surriel.com>
Signed-off-by: Andrew Morton <a...@linux-foundation.org>
---
 include/linux/swap.h |  9 ---------
 mm/vmscan.c          | 19 ++++++++-----------
 2 files changed, 8 insertions(+), 20 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index c82f916008b7..916e2eddecd6 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -157,15 +157,6 @@ enum {
 #define SWAP_CLUSTER_MAX 32UL
 #define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
 
-/*
- * Ratio between zone->managed_pages and the "gap" that above the per-zone
- * "high_wmark". While balancing nodes, We allow kswapd to shrink zones that
- * do not meet the (high_wmark + gap) watermark, even which already met the
- * high_wmark, in order to provide better per-zone lru behavior. We are ok to
- * spend not more than 1% of the memory for this zone balancing "gap".
- */
-#define KSWAPD_ZONE_BALANCE_GAP_RATIO 100
-
 #define SWAP_MAP_MAX   0x3e    /* Max duplication count, in first swap_map */
 #define SWAP_MAP_BAD   0x3f    /* Note pageblock is bad, in first swap_map */
 #define SWAP_HAS_CACHE 0x40    /* Flag page is cached, in first swap_map */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7b382b90b145..a52167eabc96 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2518,7 +2518,7 @@ static bool shrink_node(pg_data_t *pgdat, struct 
scan_control *sc,
  */
 static inline bool compaction_ready(struct zone *zone, int order, int 
classzone_idx)
 {
-       unsigned long balance_gap, watermark;
+       unsigned long watermark;
        bool watermark_ok;
 
        /*
@@ -2527,9 +2527,7 @@ static inline bool compaction_ready(struct zone *zone, 
int order, int classzone_
         * there is a buffer of free pages available to give compaction
         * a reasonable chance of completing and allocating the page
         */
-       balance_gap = min(low_wmark_pages(zone), DIV_ROUND_UP(
-                       zone->managed_pages, KSWAPD_ZONE_BALANCE_GAP_RATIO));
-       watermark = high_wmark_pages(zone) + balance_gap + (2UL << order);
+       watermark = high_wmark_pages(zone) + (2UL << order);
        watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 
classzone_idx);
 
        /*
@@ -3000,10 +2998,9 @@ static void age_active_anon(struct pglist_data *pgdat,
        } while (memcg);
 }
 
-static bool zone_balanced(struct zone *zone, int order,
-                       unsigned long balance_gap, int classzone_idx)
+static bool zone_balanced(struct zone *zone, int order, int classzone_idx)
 {
-       unsigned long mark = high_wmark_pages(zone) + balance_gap;
+       unsigned long mark = high_wmark_pages(zone);
 
        return zone_watermark_ok_safe(zone, order, mark, classzone_idx);
 }
@@ -3045,7 +3042,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int 
order, long remaining,
                if (!populated_zone(zone))
                        continue;
 
-               if (zone_balanced(zone, order, 0, classzone_idx))
+               if (zone_balanced(zone, order, classzone_idx))
                        return true;
        }
 
@@ -3148,7 +3145,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int 
classzone_idx)
                                break;
                        }
 
-                       if (!zone_balanced(zone, order, 0, 0)) {
+                       if (!zone_balanced(zone, order, 0)) {
                                classzone_idx = i;
                                break;
                        } else {
@@ -3216,7 +3213,7 @@ static int balance_pgdat(pg_data_t *pgdat, int order, int 
classzone_idx)
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone_balanced(zone, sc.order, 0, classzone_idx)) {
+                       if (zone_balanced(zone, sc.order, classzone_idx)) {
                                clear_bit(PGDAT_CONGESTED, &pgdat->flags);
                                clear_bit(PGDAT_DIRTY, &pgdat->flags);
                                goto out;
@@ -3427,7 +3424,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum 
zone_type classzone_idx)
        }
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
-       if (zone_balanced(zone, order, 0, 0))
+       if (zone_balanced(zone, order, 0))
                return;
 
        trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
-- 
2.6.4

Reply via email to