[Qemu-devel] [PATCH 4/6] Revert mm: have order 0 compaction start off where it left

2012-09-20 Thread Mel Gorman
This reverts commit 7db8889a (mm: have order  0 compaction start off
where it left) and commit de74f1cc (mm: have order  0 compaction start
near a pageblock with free pages). These patches were a good idea and
tests confirmed that they massively reduced the amount of scanning but
the implementation is complex and tricky to understand. A later patch
will cache what pageblocks should be skipped and reimplements the
concept of compact_cached_free_pfn on top for both migration and
free scanners.

Signed-off-by: Mel Gorman mgor...@suse.de
---
 include/linux/mmzone.h |4 ---
 mm/compaction.c|   65 
 mm/internal.h  |6 -
 mm/page_alloc.c|5 
 4 files changed, 5 insertions(+), 75 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 2daa54f..603d0b5 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -368,10 +368,6 @@ struct zone {
 */
spinlock_t  lock;
int all_unreclaimable; /* All pages pinned */
-#if defined CONFIG_COMPACTION || defined CONFIG_CMA
-   /* pfn where the last incremental compaction isolated free pages */
-   unsigned long   compact_cached_free_pfn;
-#endif
 #ifdef CONFIG_MEMORY_HOTPLUG
/* see spanned/present_pages for more description */
seqlock_t   span_seqlock;
diff --git a/mm/compaction.c b/mm/compaction.c
index 70c7cbd..6058822 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -481,20 +481,6 @@ next_pageblock:
 #endif /* CONFIG_COMPACTION || CONFIG_CMA */
 #ifdef CONFIG_COMPACTION
 /*
- * Returns the start pfn of the last page block in a zone.  This is the 
starting
- * point for full compaction of a zone.  Compaction searches for free pages 
from
- * the end of each zone, while isolate_freepages_block scans forward inside 
each
- * page block.
- */
-static unsigned long start_free_pfn(struct zone *zone)
-{
-   unsigned long free_pfn;
-   free_pfn = zone-zone_start_pfn + zone-spanned_pages;
-   free_pfn = ~(pageblock_nr_pages-1);
-   return free_pfn;
-}
-
-/*
  * Based on information in the current compact_control, find blocks
  * suitable for isolating free pages from and then isolate them.
  */
@@ -562,19 +548,8 @@ static void isolate_freepages(struct zone *zone,
 * looking for free pages, the search will restart here as
 * page migration may have returned some pages to the allocator
 */
-   if (isolated) {
+   if (isolated)
high_pfn = max(high_pfn, pfn);
-
-   /*
-* If the free scanner has wrapped, update
-* compact_cached_free_pfn to point to the highest
-* pageblock with free pages. This reduces excessive
-* scanning of full pageblocks near the end of the
-* zone
-*/
-   if (cc-order  0  cc-wrapped)
-   zone-compact_cached_free_pfn = high_pfn;
-   }
}
 
/* split_free_page does not map the pages */
@@ -582,11 +557,6 @@ static void isolate_freepages(struct zone *zone,
 
cc-free_pfn = high_pfn;
cc-nr_freepages = nr_freepages;
-
-   /* If compact_cached_free_pfn is reset then set it now */
-   if (cc-order  0  !cc-wrapped 
-   zone-compact_cached_free_pfn == start_free_pfn(zone))
-   zone-compact_cached_free_pfn = high_pfn;
 }
 
 /*
@@ -682,26 +652,8 @@ static int compact_finished(struct zone *zone,
if (fatal_signal_pending(current))
return COMPACT_PARTIAL;
 
-   /*
-* A full (order == -1) compaction run starts at the beginning and
-* end of a zone; it completes when the migrate and free scanner meet.
-* A partial (order  0) compaction can start with the free scanner
-* at a random point in the zone, and may have to restart.
-*/
-   if (cc-free_pfn = cc-migrate_pfn) {
-   if (cc-order  0  !cc-wrapped) {
-   /* We started partway through; restart at the end. */
-   unsigned long free_pfn = start_free_pfn(zone);
-   zone-compact_cached_free_pfn = free_pfn;
-   cc-free_pfn = free_pfn;
-   cc-wrapped = 1;
-   return COMPACT_CONTINUE;
-   }
-   return COMPACT_COMPLETE;
-   }
-
-   /* We wrapped around and ended up where we started. */
-   if (cc-wrapped  cc-free_pfn = cc-start_free_pfn)
+   /* Compaction run completes if the migrate and free scanner meet */
+   if (cc-free_pfn = cc-migrate_pfn)
return COMPACT_COMPLETE;
 
/*
@@ -799,15 +751,8 @@ static int compact_zone(struct zone *zone, struct 

Re: [Qemu-devel] [PATCH 4/6] Revert mm: have order 0 compaction start off where it left

2012-09-20 Thread Rik van Riel

On 09/20/2012 10:04 AM, Mel Gorman wrote:

This reverts commit 7db8889a (mm: have order  0 compaction start off
where it left) and commit de74f1cc (mm: have order  0 compaction start
near a pageblock with free pages). These patches were a good idea and
tests confirmed that they massively reduced the amount of scanning but
the implementation is complex and tricky to understand. A later patch
will cache what pageblocks should be skipped and reimplements the
concept of compact_cached_free_pfn on top for both migration and
free scanners.

Signed-off-by: Mel Gorman mgor...@suse.de


Sure, it makes your next patches easier...

Acked-by: Rik van Riel r...@redhat.com