Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-26 Thread Joonsoo Kim
On Wed, Jun 25, 2014 at 10:59:19AM +0200, Vlastimil Babka wrote:
> On 06/25/2014 02:53 AM, Joonsoo Kim wrote:
> >On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:
> >>On 06/24/2014 10:33 AM, Joonsoo Kim wrote:
> >>>On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
> isolate_migratepages_range() is the main function of the compaction 
> scanner,
> called either on a single pageblock by isolate_migratepages() during 
> regular
> compaction, or on an arbitrary range by CMA's 
> __alloc_contig_migrate_range().
> It currently perfoms two pageblock-wide compaction suitability checks, and
> because of the CMA callpath, it tracks if it crossed a pageblock boundary 
> in
> order to repeat those checks.
> 
> However, closer inspection shows that those checks are always true for 
> CMA:
> - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to 
> true
> - migrate_async_suitable() check is skipped because CMA uses sync 
> compaction
> 
> We can therefore move the checks to isolate_migratepages(), reducing 
> variables
> and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> function also no longer needs set_unsuitable parameter.
> 
> Furthermore, going back to compact_zone() and compact_finished() when 
> pageblock
> is unsuitable is wasteful - the checks are meant to skip pageblocks 
> quickly.
> The patch therefore also introduces a simple loop into 
> isolate_migratepages()
> so that it does not return immediately on pageblock checks, but keeps 
> going
> until isolate_migratepages_range() gets called once. Similarily to
> isolate_freepages(), the function periodically checks if it needs to 
> reschedule
> or abort async compaction.
> 
> Signed-off-by: Vlastimil Babka 
> Cc: Minchan Kim 
> Cc: Mel Gorman 
> Cc: Joonsoo Kim 
> Cc: Michal Nazarewicz 
> Cc: Naoya Horiguchi 
> Cc: Christoph Lameter 
> Cc: Rik van Riel 
> Cc: David Rientjes 
> ---
>   mm/compaction.c | 112 
>  +---
>   1 file changed, 59 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3064a7f..ebe30c9 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
>    */
>   static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>   {
>   struct zone *zone = cc->zone;
>   unsigned long pfn;
> @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct 
> compact_control *cc,
>   if (nr_isolated)
>   return;
> 
> - /*
> -  * Only skip pageblocks when all forms of compaction will be known to
> -  * fail in the near future.
> -  */
> - if (set_unsuitable)
> - set_pageblock_skip(page);
> + set_pageblock_skip(page);
> 
>   pfn = page_to_pfn(page);
> 
> @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
> compact_control *cc,
> 
>   static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>   {
>   }
>   #endif /* CONFIG_COMPACTION */
> @@ -345,8 +340,7 @@ isolate_fail:
> 
>   /* Update the pageblock-skip if the whole pageblock was scanned 
>  */
>   if (blockpfn == end_pfn)
> - update_pageblock_skip(cc, valid_page, total_isolated, true,
> -   false);
> + update_pageblock_skip(cc, valid_page, total_isolated, false);
> 
>   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
>   if (total_isolated)
> @@ -474,14 +468,12 @@ unsigned long
>   isolate_migratepages_range(struct zone *zone, struct compact_control 
>  *cc,
>   unsigned long low_pfn, unsigned long end_pfn, bool 
>  unevictable)
>   {
> - unsigned long last_pageblock_nr = 0, pageblock_nr;
>   unsigned long nr_scanned = 0, nr_isolated = 0;
>   struct list_head *migratelist = >migratepages;
>   struct lruvec *lruvec;
>   unsigned long flags;
>   bool locked = false;
>   struct page *page = NULL, *valid_page = NULL;
> - bool set_unsuitable = true;
>   const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
>   

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-26 Thread Joonsoo Kim
On Wed, Jun 25, 2014 at 10:59:19AM +0200, Vlastimil Babka wrote:
 On 06/25/2014 02:53 AM, Joonsoo Kim wrote:
 On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:
 On 06/24/2014 10:33 AM, Joonsoo Kim wrote:
 On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
 isolate_migratepages_range() is the main function of the compaction 
 scanner,
 called either on a single pageblock by isolate_migratepages() during 
 regular
 compaction, or on an arbitrary range by CMA's 
 __alloc_contig_migrate_range().
 It currently perfoms two pageblock-wide compaction suitability checks, and
 because of the CMA callpath, it tracks if it crossed a pageblock boundary 
 in
 order to repeat those checks.
 
 However, closer inspection shows that those checks are always true for 
 CMA:
 - isolation_suitable() is true because CMA sets cc-ignore_skip_hint to 
 true
 - migrate_async_suitable() check is skipped because CMA uses sync 
 compaction
 
 We can therefore move the checks to isolate_migratepages(), reducing 
 variables
 and simplifying isolate_migratepages_range(). The update_pageblock_skip()
 function also no longer needs set_unsuitable parameter.
 
 Furthermore, going back to compact_zone() and compact_finished() when 
 pageblock
 is unsuitable is wasteful - the checks are meant to skip pageblocks 
 quickly.
 The patch therefore also introduces a simple loop into 
 isolate_migratepages()
 so that it does not return immediately on pageblock checks, but keeps 
 going
 until isolate_migratepages_range() gets called once. Similarily to
 isolate_freepages(), the function periodically checks if it needs to 
 reschedule
 or abort async compaction.
 
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 Cc: Minchan Kim minc...@kernel.org
 Cc: Mel Gorman mgor...@suse.de
 Cc: Joonsoo Kim iamjoonsoo@lge.com
 Cc: Michal Nazarewicz min...@mina86.com
 Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
 Cc: Christoph Lameter c...@linux.com
 Cc: Rik van Riel r...@redhat.com
 Cc: David Rientjes rient...@google.com
 ---
   mm/compaction.c | 112 
  +---
   1 file changed, 59 insertions(+), 53 deletions(-)
 
 diff --git a/mm/compaction.c b/mm/compaction.c
 index 3064a7f..ebe30c9 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
 @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
*/
   static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
   {
   struct zone *zone = cc-zone;
   unsigned long pfn;
 @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct 
 compact_control *cc,
   if (nr_isolated)
   return;
 
 - /*
 -  * Only skip pageblocks when all forms of compaction will be known to
 -  * fail in the near future.
 -  */
 - if (set_unsuitable)
 - set_pageblock_skip(page);
 + set_pageblock_skip(page);
 
   pfn = page_to_pfn(page);
 
 @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
 compact_control *cc,
 
   static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
   {
   }
   #endif /* CONFIG_COMPACTION */
 @@ -345,8 +340,7 @@ isolate_fail:
 
   /* Update the pageblock-skip if the whole pageblock was scanned 
  */
   if (blockpfn == end_pfn)
 - update_pageblock_skip(cc, valid_page, total_isolated, true,
 -   false);
 + update_pageblock_skip(cc, valid_page, total_isolated, false);
 
   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
   if (total_isolated)
 @@ -474,14 +468,12 @@ unsigned long
   isolate_migratepages_range(struct zone *zone, struct compact_control 
  *cc,
   unsigned long low_pfn, unsigned long end_pfn, bool 
  unevictable)
   {
 - unsigned long last_pageblock_nr = 0, pageblock_nr;
   unsigned long nr_scanned = 0, nr_isolated = 0;
   struct list_head *migratelist = cc-migratepages;
   struct lruvec *lruvec;
   unsigned long flags;
   bool locked = false;
   struct page *page = NULL, *valid_page = NULL;
 - bool set_unsuitable = true;
   const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
   ISOLATE_ASYNC_MIGRATE : 0) |
   (unevictable ? ISOLATE_UNEVICTABLE 
  : 0);
 @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
 compact_control *cc,
   if (!valid_page)
   valid_page = page;
 
 - /* If isolation recently failed, do not retry */
 - pageblock_nr = low_pfn  pageblock_order;
 - if 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Naoya Horiguchi
On Wed, Jun 25, 2014 at 10:50:51AM +0200, Vlastimil Babka wrote:
> On 06/24/2014 06:58 PM, Naoya Horiguchi wrote:
> >On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:
> >>On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:
> - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
> - if (!low_pfn || cc->contended)
> - return ISOLATE_ABORT;
> + /* Do not scan within a memory hole */
> + if (!pfn_valid(low_pfn))
> + continue;
> +
> + page = pfn_to_page(low_pfn);
> >>>
> >>>Can we move (page_zone != zone) check here as isolate_freepages() does?
> >>
> >>Duplicate perhaps, not sure about move.
> >
> >Sorry for my unclearness.
> >I meant that we had better do this check in per-pageblock loop (as the free
> >scanner does) instead of in per-pfn loop (as we do now.)
> 
> Hm I see, the migration and free scanners really do this differently. Free
> scanned per-pageblock, but migration scanner per-page.
> Can we assume that zones will never overlap within a single pageblock?

Maybe not, we have no such assumption.

> The example dc9086004 seems to be overlapping at even higher alignment so it
> should be safe only to check first page in pageblock.
> And if it wasn't the case, then I guess the freepage scanner would already
> hit some errors on such system?

That's right. Such system might be rare so nobody detected it, I guess.
So I was wrong, and page_zone check should be done in per-pfn loop in
both scanner?

I just think that it might be good if we have an iterator to run over
pfns only on a given zone (not that check page zone on each page,)
but it introduces some more complexity on the scanners, so at this time
we don't have to do it in this series.

> But if that's true, why does page_is_buddy test if pages are in the same
> zone?

Yeah, this is why we think we can't have the above mentioned assumption.

Thanks,
Naoya Horiguchi
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Vlastimil Babka

On 06/25/2014 02:53 AM, Joonsoo Kim wrote:

On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:

On 06/24/2014 10:33 AM, Joonsoo Kim wrote:

On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:

isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka 
Cc: Minchan Kim 
Cc: Mel Gorman 
Cc: Joonsoo Kim 
Cc: Michal Nazarewicz 
Cc: Naoya Horiguchi 
Cc: Christoph Lameter 
Cc: Rik van Riel 
Cc: David Rientjes 
---
  mm/compaction.c | 112 +---
  1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
struct zone *zone = cc->zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;

-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);

pfn = page_to_pfn(page);

@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,

  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:

/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);

count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = >migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;

-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn >> pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
-   if (!isolation_suitable(cc, page))
-   goto next_pageblock;
-
-   /*
-* For async 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Vlastimil Babka

On 06/24/2014 06:58 PM, Naoya Horiguchi wrote:

On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:

On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:

-   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
-   if (!low_pfn || cc->contended)
-   return ISOLATE_ABORT;
+   /* Do not scan within a memory hole */
+   if (!pfn_valid(low_pfn))
+   continue;
+
+   page = pfn_to_page(low_pfn);


Can we move (page_zone != zone) check here as isolate_freepages() does?


Duplicate perhaps, not sure about move.


Sorry for my unclearness.
I meant that we had better do this check in per-pageblock loop (as the free
scanner does) instead of in per-pfn loop (as we do now.)


Hm I see, the migration and free scanners really do this differently. 
Free scanned per-pageblock, but migration scanner per-page.

Can we assume that zones will never overlap within a single pageblock?
The example dc9086004 seems to be overlapping at even higher alignment 
so it should be safe only to check first page in pageblock.
And if it wasn't the case, then I guess the freepage scanner would 
already hit some errors on such system?


But if that's true, why does page_is_buddy test if pages are in the same 
zone?



Does CMA make sure that all pages
are in the same zone?


It seems not, CMA just specifies start pfn and end pfn, so it can cover
multiple zones.
And we also have a case of node overlapping as commented in commit dc9086004
"mm: compaction: check for overlapping nodes during isolation for migration".
So we need this check in compaction side.

Thanks,
Naoya Horiguchi


Common sense tells me it would be useless otherwise,
but I haven't checked if we can rely on it.


--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Vlastimil Babka

On 06/24/2014 06:58 PM, Naoya Horiguchi wrote:

On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:

On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:

-   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
-   if (!low_pfn || cc-contended)
-   return ISOLATE_ABORT;
+   /* Do not scan within a memory hole */
+   if (!pfn_valid(low_pfn))
+   continue;
+
+   page = pfn_to_page(low_pfn);


Can we move (page_zone != zone) check here as isolate_freepages() does?


Duplicate perhaps, not sure about move.


Sorry for my unclearness.
I meant that we had better do this check in per-pageblock loop (as the free
scanner does) instead of in per-pfn loop (as we do now.)


Hm I see, the migration and free scanners really do this differently. 
Free scanned per-pageblock, but migration scanner per-page.

Can we assume that zones will never overlap within a single pageblock?
The example dc9086004 seems to be overlapping at even higher alignment 
so it should be safe only to check first page in pageblock.
And if it wasn't the case, then I guess the freepage scanner would 
already hit some errors on such system?


But if that's true, why does page_is_buddy test if pages are in the same 
zone?



Does CMA make sure that all pages
are in the same zone?


It seems not, CMA just specifies start pfn and end pfn, so it can cover
multiple zones.
And we also have a case of node overlapping as commented in commit dc9086004
mm: compaction: check for overlapping nodes during isolation for migration.
So we need this check in compaction side.

Thanks,
Naoya Horiguchi


Common sense tells me it would be useless otherwise,
but I haven't checked if we can rely on it.


--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Vlastimil Babka

On 06/25/2014 02:53 AM, Joonsoo Kim wrote:

On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:

On 06/24/2014 10:33 AM, Joonsoo Kim wrote:

On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:

isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka vba...@suse.cz
Cc: Minchan Kim minc...@kernel.org
Cc: Mel Gorman mgor...@suse.de
Cc: Joonsoo Kim iamjoonsoo@lge.com
Cc: Michal Nazarewicz min...@mina86.com
Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
Cc: Christoph Lameter c...@linux.com
Cc: Rik van Riel r...@redhat.com
Cc: David Rientjes rient...@google.com
---
  mm/compaction.c | 112 +---
  1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
struct zone *zone = cc-zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;

-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);

pfn = page_to_pfn(page);

@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,

  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:

/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);

count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = cc-migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;

-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn  pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
- 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-25 Thread Naoya Horiguchi
On Wed, Jun 25, 2014 at 10:50:51AM +0200, Vlastimil Babka wrote:
 On 06/24/2014 06:58 PM, Naoya Horiguchi wrote:
 On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:
 On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:
 - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
 - if (!low_pfn || cc-contended)
 - return ISOLATE_ABORT;
 + /* Do not scan within a memory hole */
 + if (!pfn_valid(low_pfn))
 + continue;
 +
 + page = pfn_to_page(low_pfn);
 
 Can we move (page_zone != zone) check here as isolate_freepages() does?
 
 Duplicate perhaps, not sure about move.
 
 Sorry for my unclearness.
 I meant that we had better do this check in per-pageblock loop (as the free
 scanner does) instead of in per-pfn loop (as we do now.)
 
 Hm I see, the migration and free scanners really do this differently. Free
 scanned per-pageblock, but migration scanner per-page.
 Can we assume that zones will never overlap within a single pageblock?

Maybe not, we have no such assumption.

 The example dc9086004 seems to be overlapping at even higher alignment so it
 should be safe only to check first page in pageblock.
 And if it wasn't the case, then I guess the freepage scanner would already
 hit some errors on such system?

That's right. Such system might be rare so nobody detected it, I guess.
So I was wrong, and page_zone check should be done in per-pfn loop in
both scanner?

I just think that it might be good if we have an iterator to run over
pfns only on a given zone (not that check page zone on each page,)
but it introduces some more complexity on the scanners, so at this time
we don't have to do it in this series.

 But if that's true, why does page_is_buddy test if pages are in the same
 zone?

Yeah, this is why we think we can't have the above mentioned assumption.

Thanks,
Naoya Horiguchi
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Joonsoo Kim
On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:
> On 06/24/2014 10:33 AM, Joonsoo Kim wrote:
> >On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
> >>isolate_migratepages_range() is the main function of the compaction scanner,
> >>called either on a single pageblock by isolate_migratepages() during regular
> >>compaction, or on an arbitrary range by CMA's 
> >>__alloc_contig_migrate_range().
> >>It currently perfoms two pageblock-wide compaction suitability checks, and
> >>because of the CMA callpath, it tracks if it crossed a pageblock boundary in
> >>order to repeat those checks.
> >>
> >>However, closer inspection shows that those checks are always true for CMA:
> >>- isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
> >>- migrate_async_suitable() check is skipped because CMA uses sync compaction
> >>
> >>We can therefore move the checks to isolate_migratepages(), reducing 
> >>variables
> >>and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> >>function also no longer needs set_unsuitable parameter.
> >>
> >>Furthermore, going back to compact_zone() and compact_finished() when 
> >>pageblock
> >>is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
> >>The patch therefore also introduces a simple loop into 
> >>isolate_migratepages()
> >>so that it does not return immediately on pageblock checks, but keeps going
> >>until isolate_migratepages_range() gets called once. Similarily to
> >>isolate_freepages(), the function periodically checks if it needs to 
> >>reschedule
> >>or abort async compaction.
> >>
> >>Signed-off-by: Vlastimil Babka 
> >>Cc: Minchan Kim 
> >>Cc: Mel Gorman 
> >>Cc: Joonsoo Kim 
> >>Cc: Michal Nazarewicz 
> >>Cc: Naoya Horiguchi 
> >>Cc: Christoph Lameter 
> >>Cc: Rik van Riel 
> >>Cc: David Rientjes 
> >>---
> >>  mm/compaction.c | 112 
> >> +---
> >>  1 file changed, 59 insertions(+), 53 deletions(-)
> >>
> >>diff --git a/mm/compaction.c b/mm/compaction.c
> >>index 3064a7f..ebe30c9 100644
> >>--- a/mm/compaction.c
> >>+++ b/mm/compaction.c
> >>@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
> >>   */
> >>  static void update_pageblock_skip(struct compact_control *cc,
> >>struct page *page, unsigned long nr_isolated,
> >>-   bool set_unsuitable, bool migrate_scanner)
> >>+   bool migrate_scanner)
> >>  {
> >>struct zone *zone = cc->zone;
> >>unsigned long pfn;
> >>@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct 
> >>compact_control *cc,
> >>if (nr_isolated)
> >>return;
> >>
> >>-   /*
> >>-* Only skip pageblocks when all forms of compaction will be known to
> >>-* fail in the near future.
> >>-*/
> >>-   if (set_unsuitable)
> >>-   set_pageblock_skip(page);
> >>+   set_pageblock_skip(page);
> >>
> >>pfn = page_to_pfn(page);
> >>
> >>@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
> >>compact_control *cc,
> >>
> >>  static void update_pageblock_skip(struct compact_control *cc,
> >>struct page *page, unsigned long nr_isolated,
> >>-   bool set_unsuitable, bool migrate_scanner)
> >>+   bool migrate_scanner)
> >>  {
> >>  }
> >>  #endif /* CONFIG_COMPACTION */
> >>@@ -345,8 +340,7 @@ isolate_fail:
> >>
> >>/* Update the pageblock-skip if the whole pageblock was scanned */
> >>if (blockpfn == end_pfn)
> >>-   update_pageblock_skip(cc, valid_page, total_isolated, true,
> >>- false);
> >>+   update_pageblock_skip(cc, valid_page, total_isolated, false);
> >>
> >>count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
> >>if (total_isolated)
> >>@@ -474,14 +468,12 @@ unsigned long
> >>  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
> >>unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
> >>  {
> >>-   unsigned long last_pageblock_nr = 0, pageblock_nr;
> >>unsigned long nr_scanned = 0, nr_isolated = 0;
> >>struct list_head *migratelist = >migratepages;
> >>struct lruvec *lruvec;
> >>unsigned long flags;
> >>bool locked = false;
> >>struct page *page = NULL, *valid_page = NULL;
> >>-   bool set_unsuitable = true;
> >>const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
> >>ISOLATE_ASYNC_MIGRATE : 0) |
> >>(unevictable ? ISOLATE_UNEVICTABLE : 0);
> >>@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
> >>compact_control *cc,
> >>if (!valid_page)
> >>valid_page = page;
> >>
> >>-   /* If isolation recently failed, do not retry */
> >>-   pageblock_nr = low_pfn >> pageblock_order;
> >>-   if (last_pageblock_nr != pageblock_nr) {
> >>- 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Naoya Horiguchi
On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:
> On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:
> >>-   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
> >>-   if (!low_pfn || cc->contended)
> >>-   return ISOLATE_ABORT;
> >>+   /* Do not scan within a memory hole */
> >>+   if (!pfn_valid(low_pfn))
> >>+   continue;
> >>+
> >>+   page = pfn_to_page(low_pfn);
> >
> >Can we move (page_zone != zone) check here as isolate_freepages() does?
> 
> Duplicate perhaps, not sure about move.

Sorry for my unclearness.
I meant that we had better do this check in per-pageblock loop (as the free
scanner does) instead of in per-pfn loop (as we do now.)

> Does CMA make sure that all pages
> are in the same zone?

It seems not, CMA just specifies start pfn and end pfn, so it can cover
multiple zones.
And we also have a case of node overlapping as commented in commit dc9086004
"mm: compaction: check for overlapping nodes during isolation for migration".
So we need this check in compaction side.

Thanks,
Naoya Horiguchi

> Common sense tells me it would be useless otherwise,
> but I haven't checked if we can rely on it.
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Vlastimil Babka

On 06/24/2014 10:33 AM, Joonsoo Kim wrote:

On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:

isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka 
Cc: Minchan Kim 
Cc: Mel Gorman 
Cc: Joonsoo Kim 
Cc: Michal Nazarewicz 
Cc: Naoya Horiguchi 
Cc: Christoph Lameter 
Cc: Rik van Riel 
Cc: David Rientjes 
---
  mm/compaction.c | 112 +---
  1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
struct zone *zone = cc->zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;

-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);

pfn = page_to_pfn(page);

@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,

  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:

/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);

count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = >migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;

-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn >> pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
-   if (!isolation_suitable(cc, page))
-   goto next_pageblock;
-
-   /*
-* For async migration, also only scan in MOVABLE
-* blocks. Async migration is optimistic to see 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Vlastimil Babka

On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:

-   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
-   if (!low_pfn || cc->contended)
-   return ISOLATE_ABORT;
+   /* Do not scan within a memory hole */
+   if (!pfn_valid(low_pfn))
+   continue;
+
+   page = pfn_to_page(low_pfn);


Can we move (page_zone != zone) check here as isolate_freepages() does?


Duplicate perhaps, not sure about move. Does CMA make sure that all 
pages are in the same zone? Common sense tells me it would be useless 
otherwise, but I haven't checked if we can rely on it.



Thanks,
Naoya Horiguchi



--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Joonsoo Kim
On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
> isolate_migratepages_range() is the main function of the compaction scanner,
> called either on a single pageblock by isolate_migratepages() during regular
> compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
> It currently perfoms two pageblock-wide compaction suitability checks, and
> because of the CMA callpath, it tracks if it crossed a pageblock boundary in
> order to repeat those checks.
> 
> However, closer inspection shows that those checks are always true for CMA:
> - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
> - migrate_async_suitable() check is skipped because CMA uses sync compaction
> 
> We can therefore move the checks to isolate_migratepages(), reducing variables
> and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> function also no longer needs set_unsuitable parameter.
> 
> Furthermore, going back to compact_zone() and compact_finished() when 
> pageblock
> is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
> The patch therefore also introduces a simple loop into isolate_migratepages()
> so that it does not return immediately on pageblock checks, but keeps going
> until isolate_migratepages_range() gets called once. Similarily to
> isolate_freepages(), the function periodically checks if it needs to 
> reschedule
> or abort async compaction.
> 
> Signed-off-by: Vlastimil Babka 
> Cc: Minchan Kim 
> Cc: Mel Gorman 
> Cc: Joonsoo Kim 
> Cc: Michal Nazarewicz 
> Cc: Naoya Horiguchi 
> Cc: Christoph Lameter 
> Cc: Rik van Riel 
> Cc: David Rientjes 
> ---
>  mm/compaction.c | 112 
> +---
>  1 file changed, 59 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3064a7f..ebe30c9 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
>   */
>  static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>  {
>   struct zone *zone = cc->zone;
>   unsigned long pfn;
> @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
> *cc,
>   if (nr_isolated)
>   return;
>  
> - /*
> -  * Only skip pageblocks when all forms of compaction will be known to
> -  * fail in the near future.
> -  */
> - if (set_unsuitable)
> - set_pageblock_skip(page);
> + set_pageblock_skip(page);
>  
>   pfn = page_to_pfn(page);
>  
> @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
> compact_control *cc,
>  
>  static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>  {
>  }
>  #endif /* CONFIG_COMPACTION */
> @@ -345,8 +340,7 @@ isolate_fail:
>  
>   /* Update the pageblock-skip if the whole pageblock was scanned */
>   if (blockpfn == end_pfn)
> - update_pageblock_skip(cc, valid_page, total_isolated, true,
> -   false);
> + update_pageblock_skip(cc, valid_page, total_isolated, false);
>  
>   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
>   if (total_isolated)
> @@ -474,14 +468,12 @@ unsigned long
>  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
>   unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
>  {
> - unsigned long last_pageblock_nr = 0, pageblock_nr;
>   unsigned long nr_scanned = 0, nr_isolated = 0;
>   struct list_head *migratelist = >migratepages;
>   struct lruvec *lruvec;
>   unsigned long flags;
>   bool locked = false;
>   struct page *page = NULL, *valid_page = NULL;
> - bool set_unsuitable = true;
>   const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
>   ISOLATE_ASYNC_MIGRATE : 0) |
>   (unevictable ? ISOLATE_UNEVICTABLE : 0);
> @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
> compact_control *cc,
>   if (!valid_page)
>   valid_page = page;
>  
> - /* If isolation recently failed, do not retry */
> - pageblock_nr = low_pfn >> pageblock_order;
> - if (last_pageblock_nr != pageblock_nr) {
> - int mt;
> -
> - last_pageblock_nr = pageblock_nr;
> - if (!isolation_suitable(cc, page))
> - goto next_pageblock;
> -
> - /*
> -  * For async migration, also 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Joonsoo Kim
On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
 isolate_migratepages_range() is the main function of the compaction scanner,
 called either on a single pageblock by isolate_migratepages() during regular
 compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
 It currently perfoms two pageblock-wide compaction suitability checks, and
 because of the CMA callpath, it tracks if it crossed a pageblock boundary in
 order to repeat those checks.
 
 However, closer inspection shows that those checks are always true for CMA:
 - isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
 - migrate_async_suitable() check is skipped because CMA uses sync compaction
 
 We can therefore move the checks to isolate_migratepages(), reducing variables
 and simplifying isolate_migratepages_range(). The update_pageblock_skip()
 function also no longer needs set_unsuitable parameter.
 
 Furthermore, going back to compact_zone() and compact_finished() when 
 pageblock
 is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
 The patch therefore also introduces a simple loop into isolate_migratepages()
 so that it does not return immediately on pageblock checks, but keeps going
 until isolate_migratepages_range() gets called once. Similarily to
 isolate_freepages(), the function periodically checks if it needs to 
 reschedule
 or abort async compaction.
 
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 Cc: Minchan Kim minc...@kernel.org
 Cc: Mel Gorman mgor...@suse.de
 Cc: Joonsoo Kim iamjoonsoo@lge.com
 Cc: Michal Nazarewicz min...@mina86.com
 Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
 Cc: Christoph Lameter c...@linux.com
 Cc: Rik van Riel r...@redhat.com
 Cc: David Rientjes rient...@google.com
 ---
  mm/compaction.c | 112 
 +---
  1 file changed, 59 insertions(+), 53 deletions(-)
 
 diff --git a/mm/compaction.c b/mm/compaction.c
 index 3064a7f..ebe30c9 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
 @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
  {
   struct zone *zone = cc-zone;
   unsigned long pfn;
 @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
 *cc,
   if (nr_isolated)
   return;
  
 - /*
 -  * Only skip pageblocks when all forms of compaction will be known to
 -  * fail in the near future.
 -  */
 - if (set_unsuitable)
 - set_pageblock_skip(page);
 + set_pageblock_skip(page);
  
   pfn = page_to_pfn(page);
  
 @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
 compact_control *cc,
  
  static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
 @@ -345,8 +340,7 @@ isolate_fail:
  
   /* Update the pageblock-skip if the whole pageblock was scanned */
   if (blockpfn == end_pfn)
 - update_pageblock_skip(cc, valid_page, total_isolated, true,
 -   false);
 + update_pageblock_skip(cc, valid_page, total_isolated, false);
  
   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
   if (total_isolated)
 @@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
   unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
 - unsigned long last_pageblock_nr = 0, pageblock_nr;
   unsigned long nr_scanned = 0, nr_isolated = 0;
   struct list_head *migratelist = cc-migratepages;
   struct lruvec *lruvec;
   unsigned long flags;
   bool locked = false;
   struct page *page = NULL, *valid_page = NULL;
 - bool set_unsuitable = true;
   const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
   ISOLATE_ASYNC_MIGRATE : 0) |
   (unevictable ? ISOLATE_UNEVICTABLE : 0);
 @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
 compact_control *cc,
   if (!valid_page)
   valid_page = page;
  
 - /* If isolation recently failed, do not retry */
 - pageblock_nr = low_pfn  pageblock_order;
 - if (last_pageblock_nr != pageblock_nr) {
 - int mt;
 -
 - last_pageblock_nr = pageblock_nr;
 - if (!isolation_suitable(cc, page))
 - goto next_pageblock;
 -
 - /*
 - 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Vlastimil Babka

On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:

-   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
-   if (!low_pfn || cc-contended)
-   return ISOLATE_ABORT;
+   /* Do not scan within a memory hole */
+   if (!pfn_valid(low_pfn))
+   continue;
+
+   page = pfn_to_page(low_pfn);


Can we move (page_zone != zone) check here as isolate_freepages() does?


Duplicate perhaps, not sure about move. Does CMA make sure that all 
pages are in the same zone? Common sense tells me it would be useless 
otherwise, but I haven't checked if we can rely on it.



Thanks,
Naoya Horiguchi



--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Vlastimil Babka

On 06/24/2014 10:33 AM, Joonsoo Kim wrote:

On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:

isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka vba...@suse.cz
Cc: Minchan Kim minc...@kernel.org
Cc: Mel Gorman mgor...@suse.de
Cc: Joonsoo Kim iamjoonsoo@lge.com
Cc: Michal Nazarewicz min...@mina86.com
Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
Cc: Christoph Lameter c...@linux.com
Cc: Rik van Riel r...@redhat.com
Cc: David Rientjes rient...@google.com
---
  mm/compaction.c | 112 +---
  1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
struct zone *zone = cc-zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;

-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);

pfn = page_to_pfn(page);

@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,

  static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:

/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);

count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = cc-migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;

-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn  pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
-   if (!isolation_suitable(cc, page))
-   goto next_pageblock;
-
- 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Naoya Horiguchi
On Tue, Jun 24, 2014 at 05:34:32PM +0200, Vlastimil Babka wrote:
 On 06/24/2014 06:52 AM, Naoya Horiguchi wrote:
 -   low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
 -   if (!low_pfn || cc-contended)
 -   return ISOLATE_ABORT;
 +   /* Do not scan within a memory hole */
 +   if (!pfn_valid(low_pfn))
 +   continue;
 +
 +   page = pfn_to_page(low_pfn);
 
 Can we move (page_zone != zone) check here as isolate_freepages() does?
 
 Duplicate perhaps, not sure about move.

Sorry for my unclearness.
I meant that we had better do this check in per-pageblock loop (as the free
scanner does) instead of in per-pfn loop (as we do now.)

 Does CMA make sure that all pages
 are in the same zone?

It seems not, CMA just specifies start pfn and end pfn, so it can cover
multiple zones.
And we also have a case of node overlapping as commented in commit dc9086004
mm: compaction: check for overlapping nodes during isolation for migration.
So we need this check in compaction side.

Thanks,
Naoya Horiguchi

 Common sense tells me it would be useless otherwise,
 but I haven't checked if we can rely on it.
--
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-24 Thread Joonsoo Kim
On Tue, Jun 24, 2014 at 05:42:50PM +0200, Vlastimil Babka wrote:
 On 06/24/2014 10:33 AM, Joonsoo Kim wrote:
 On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
 isolate_migratepages_range() is the main function of the compaction scanner,
 called either on a single pageblock by isolate_migratepages() during regular
 compaction, or on an arbitrary range by CMA's 
 __alloc_contig_migrate_range().
 It currently perfoms two pageblock-wide compaction suitability checks, and
 because of the CMA callpath, it tracks if it crossed a pageblock boundary in
 order to repeat those checks.
 
 However, closer inspection shows that those checks are always true for CMA:
 - isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
 - migrate_async_suitable() check is skipped because CMA uses sync compaction
 
 We can therefore move the checks to isolate_migratepages(), reducing 
 variables
 and simplifying isolate_migratepages_range(). The update_pageblock_skip()
 function also no longer needs set_unsuitable parameter.
 
 Furthermore, going back to compact_zone() and compact_finished() when 
 pageblock
 is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
 The patch therefore also introduces a simple loop into 
 isolate_migratepages()
 so that it does not return immediately on pageblock checks, but keeps going
 until isolate_migratepages_range() gets called once. Similarily to
 isolate_freepages(), the function periodically checks if it needs to 
 reschedule
 or abort async compaction.
 
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 Cc: Minchan Kim minc...@kernel.org
 Cc: Mel Gorman mgor...@suse.de
 Cc: Joonsoo Kim iamjoonsoo@lge.com
 Cc: Michal Nazarewicz min...@mina86.com
 Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
 Cc: Christoph Lameter c...@linux.com
 Cc: Rik van Riel r...@redhat.com
 Cc: David Rientjes rient...@google.com
 ---
   mm/compaction.c | 112 
  +---
   1 file changed, 59 insertions(+), 53 deletions(-)
 
 diff --git a/mm/compaction.c b/mm/compaction.c
 index 3064a7f..ebe30c9 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
 @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
*/
   static void update_pageblock_skip(struct compact_control *cc,
 struct page *page, unsigned long nr_isolated,
 -   bool set_unsuitable, bool migrate_scanner)
 +   bool migrate_scanner)
   {
 struct zone *zone = cc-zone;
 unsigned long pfn;
 @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct 
 compact_control *cc,
 if (nr_isolated)
 return;
 
 -   /*
 -* Only skip pageblocks when all forms of compaction will be known to
 -* fail in the near future.
 -*/
 -   if (set_unsuitable)
 -   set_pageblock_skip(page);
 +   set_pageblock_skip(page);
 
 pfn = page_to_pfn(page);
 
 @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
 compact_control *cc,
 
   static void update_pageblock_skip(struct compact_control *cc,
 struct page *page, unsigned long nr_isolated,
 -   bool set_unsuitable, bool migrate_scanner)
 +   bool migrate_scanner)
   {
   }
   #endif /* CONFIG_COMPACTION */
 @@ -345,8 +340,7 @@ isolate_fail:
 
 /* Update the pageblock-skip if the whole pageblock was scanned */
 if (blockpfn == end_pfn)
 -   update_pageblock_skip(cc, valid_page, total_isolated, true,
 - false);
 +   update_pageblock_skip(cc, valid_page, total_isolated, false);
 
 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
 if (total_isolated)
 @@ -474,14 +468,12 @@ unsigned long
   isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
 unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
   {
 -   unsigned long last_pageblock_nr = 0, pageblock_nr;
 unsigned long nr_scanned = 0, nr_isolated = 0;
 struct list_head *migratelist = cc-migratepages;
 struct lruvec *lruvec;
 unsigned long flags;
 bool locked = false;
 struct page *page = NULL, *valid_page = NULL;
 -   bool set_unsuitable = true;
 const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
 ISOLATE_ASYNC_MIGRATE : 0) |
 (unevictable ? ISOLATE_UNEVICTABLE : 0);
 @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
 compact_control *cc,
 if (!valid_page)
 valid_page = page;
 
 -   /* If isolation recently failed, do not retry */
 -   pageblock_nr = low_pfn  pageblock_order;
 -   if (last_pageblock_nr != pageblock_nr) {
 -   int mt;
 -
 -   last_pageblock_nr = pageblock_nr;
 -   if (!isolation_suitable(cc, page))
 -   goto next_pageblock;
 -
 -  

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-23 Thread Naoya Horiguchi
On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
> isolate_migratepages_range() is the main function of the compaction scanner,
> called either on a single pageblock by isolate_migratepages() during regular
> compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
> It currently perfoms two pageblock-wide compaction suitability checks, and

(nit-picking) s/perfoms/performs/

> because of the CMA callpath, it tracks if it crossed a pageblock boundary in
> order to repeat those checks.
> 
> However, closer inspection shows that those checks are always true for CMA:
> - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
> - migrate_async_suitable() check is skipped because CMA uses sync compaction
> 
> We can therefore move the checks to isolate_migratepages(), reducing variables
> and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> function also no longer needs set_unsuitable parameter.
> 
> Furthermore, going back to compact_zone() and compact_finished() when 
> pageblock
> is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
> The patch therefore also introduces a simple loop into isolate_migratepages()
> so that it does not return immediately on pageblock checks, but keeps going
> until isolate_migratepages_range() gets called once. Similarily to
> isolate_freepages(), the function periodically checks if it needs to 
> reschedule
> or abort async compaction.

This looks to me a good direction.
One thing below ...

> Signed-off-by: Vlastimil Babka 
> Cc: Minchan Kim 
> Cc: Mel Gorman 
> Cc: Joonsoo Kim 
> Cc: Michal Nazarewicz 
> Cc: Naoya Horiguchi 
> Cc: Christoph Lameter 
> Cc: Rik van Riel 
> Cc: David Rientjes 
> ---
>  mm/compaction.c | 112 
> +---
>  1 file changed, 59 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3064a7f..ebe30c9 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
...
> @@ -840,34 +809,74 @@ typedef enum {
>  } isolate_migrate_t;
>  
>  /*
> - * Isolate all pages that can be migrated from the block pointed to by
> - * the migrate scanner within compact_control.
> + * Isolate all pages that can be migrated from the first suitable block,
> + * starting at the block pointed to by the migrate scanner pfn within
> + * compact_control.
>   */
>  static isolate_migrate_t isolate_migratepages(struct zone *zone,
>   struct compact_control *cc)
>  {
>   unsigned long low_pfn, end_pfn;
> + struct page *page;
>  
> - /* Do not scan outside zone boundaries */
> - low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
> + /* Start at where we last stopped, or beginning of the zone */
> + low_pfn = cc->migrate_pfn;
>  
>   /* Only scan within a pageblock boundary */
>   end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
>  
> - /* Do not cross the free scanner or scan within a memory hole */
> - if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
> - cc->migrate_pfn = end_pfn;
> - return ISOLATE_NONE;
> - }
> + /*
> +  * Iterate over whole pageblocks until we find the first suitable.
> +  * Do not cross the free scanner.
> +  */
> + for (; end_pfn <= cc->free_pfn;
> + low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
> +
> + /*
> +  * This can potentially iterate a massively long zone with
> +  * many pageblocks unsuitable, so periodically check if we
> +  * need to schedule, or even abort async compaction.
> +  */
> + if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
> + && compact_should_abort(cc))
> + break;
>  
> - /* Perform the isolation */
> - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
> - if (!low_pfn || cc->contended)
> - return ISOLATE_ABORT;
> + /* Do not scan within a memory hole */
> + if (!pfn_valid(low_pfn))
> + continue;
> +
> + page = pfn_to_page(low_pfn);

Can we move (page_zone != zone) check here as isolate_freepages() does?

Thanks,
Naoya Horiguchi

> + /* If isolation recently failed, do not retry */
> + if (!isolation_suitable(cc, page))
> + continue;
>  
> + /*
> +  * For async compaction, also only scan in MOVABLE blocks.
> +  * Async compaction is optimistic to see if the minimum amount
> +  * of work satisfies the allocation.
> +  */
> + if (cc->mode == MIGRATE_ASYNC &&
> + !migrate_async_suitable(get_pageblock_migratetype(page)))
> + continue;
> +
> + /* Perform the isolation */
> + low_pfn = 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-23 Thread Zhang Yanfei
On 06/20/2014 11:49 PM, Vlastimil Babka wrote:
> isolate_migratepages_range() is the main function of the compaction scanner,
> called either on a single pageblock by isolate_migratepages() during regular
> compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
> It currently perfoms two pageblock-wide compaction suitability checks, and
> because of the CMA callpath, it tracks if it crossed a pageblock boundary in
> order to repeat those checks.
> 
> However, closer inspection shows that those checks are always true for CMA:
> - isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
> - migrate_async_suitable() check is skipped because CMA uses sync compaction
> 
> We can therefore move the checks to isolate_migratepages(), reducing variables
> and simplifying isolate_migratepages_range(). The update_pageblock_skip()
> function also no longer needs set_unsuitable parameter.
> 
> Furthermore, going back to compact_zone() and compact_finished() when 
> pageblock
> is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
> The patch therefore also introduces a simple loop into isolate_migratepages()
> so that it does not return immediately on pageblock checks, but keeps going
> until isolate_migratepages_range() gets called once. Similarily to
> isolate_freepages(), the function periodically checks if it needs to 
> reschedule
> or abort async compaction.
> 
> Signed-off-by: Vlastimil Babka 
> Cc: Minchan Kim 
> Cc: Mel Gorman 
> Cc: Joonsoo Kim 
> Cc: Michal Nazarewicz 
> Cc: Naoya Horiguchi 
> Cc: Christoph Lameter 
> Cc: Rik van Riel 
> Cc: David Rientjes 

I think this is a good clean-up to make code more clear.

Reviewed-by: Zhang Yanfei 

Only a tiny nit-pick below.

> ---
>  mm/compaction.c | 112 
> +---
>  1 file changed, 59 insertions(+), 53 deletions(-)
> 
> diff --git a/mm/compaction.c b/mm/compaction.c
> index 3064a7f..ebe30c9 100644
> --- a/mm/compaction.c
> +++ b/mm/compaction.c
> @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
>   */
>  static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>  {
>   struct zone *zone = cc->zone;
>   unsigned long pfn;
> @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
> *cc,
>   if (nr_isolated)
>   return;
>  
> - /*
> -  * Only skip pageblocks when all forms of compaction will be known to
> -  * fail in the near future.
> -  */
> - if (set_unsuitable)
> - set_pageblock_skip(page);
> + set_pageblock_skip(page);
>  
>   pfn = page_to_pfn(page);
>  
> @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
> compact_control *cc,
>  
>  static void update_pageblock_skip(struct compact_control *cc,
>   struct page *page, unsigned long nr_isolated,
> - bool set_unsuitable, bool migrate_scanner)
> + bool migrate_scanner)
>  {
>  }
>  #endif /* CONFIG_COMPACTION */
> @@ -345,8 +340,7 @@ isolate_fail:
>  
>   /* Update the pageblock-skip if the whole pageblock was scanned */
>   if (blockpfn == end_pfn)
> - update_pageblock_skip(cc, valid_page, total_isolated, true,
> -   false);
> + update_pageblock_skip(cc, valid_page, total_isolated, false);
>  
>   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
>   if (total_isolated)
> @@ -474,14 +468,12 @@ unsigned long
>  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
>   unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
>  {
> - unsigned long last_pageblock_nr = 0, pageblock_nr;
>   unsigned long nr_scanned = 0, nr_isolated = 0;
>   struct list_head *migratelist = >migratepages;
>   struct lruvec *lruvec;
>   unsigned long flags;
>   bool locked = false;
>   struct page *page = NULL, *valid_page = NULL;
> - bool set_unsuitable = true;
>   const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
>   ISOLATE_ASYNC_MIGRATE : 0) |
>   (unevictable ? ISOLATE_UNEVICTABLE : 0);
> @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
> compact_control *cc,
>   if (!valid_page)
>   valid_page = page;
>  
> - /* If isolation recently failed, do not retry */
> - pageblock_nr = low_pfn >> pageblock_order;
> - if (last_pageblock_nr != pageblock_nr) {
> - int mt;
> -
> - last_pageblock_nr = pageblock_nr;
> - if (!isolation_suitable(cc, page))
> - goto 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-23 Thread Naoya Horiguchi
On Fri, Jun 20, 2014 at 05:49:34PM +0200, Vlastimil Babka wrote:
 isolate_migratepages_range() is the main function of the compaction scanner,
 called either on a single pageblock by isolate_migratepages() during regular
 compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
 It currently perfoms two pageblock-wide compaction suitability checks, and

(nit-picking) s/perfoms/performs/

 because of the CMA callpath, it tracks if it crossed a pageblock boundary in
 order to repeat those checks.
 
 However, closer inspection shows that those checks are always true for CMA:
 - isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
 - migrate_async_suitable() check is skipped because CMA uses sync compaction
 
 We can therefore move the checks to isolate_migratepages(), reducing variables
 and simplifying isolate_migratepages_range(). The update_pageblock_skip()
 function also no longer needs set_unsuitable parameter.
 
 Furthermore, going back to compact_zone() and compact_finished() when 
 pageblock
 is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
 The patch therefore also introduces a simple loop into isolate_migratepages()
 so that it does not return immediately on pageblock checks, but keeps going
 until isolate_migratepages_range() gets called once. Similarily to
 isolate_freepages(), the function periodically checks if it needs to 
 reschedule
 or abort async compaction.

This looks to me a good direction.
One thing below ...

 Signed-off-by: Vlastimil Babka vba...@suse.cz
 Cc: Minchan Kim minc...@kernel.org
 Cc: Mel Gorman mgor...@suse.de
 Cc: Joonsoo Kim iamjoonsoo@lge.com
 Cc: Michal Nazarewicz min...@mina86.com
 Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
 Cc: Christoph Lameter c...@linux.com
 Cc: Rik van Riel r...@redhat.com
 Cc: David Rientjes rient...@google.com
 ---
  mm/compaction.c | 112 
 +---
  1 file changed, 59 insertions(+), 53 deletions(-)
 
 diff --git a/mm/compaction.c b/mm/compaction.c
 index 3064a7f..ebe30c9 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
...
 @@ -840,34 +809,74 @@ typedef enum {
  } isolate_migrate_t;
  
  /*
 - * Isolate all pages that can be migrated from the block pointed to by
 - * the migrate scanner within compact_control.
 + * Isolate all pages that can be migrated from the first suitable block,
 + * starting at the block pointed to by the migrate scanner pfn within
 + * compact_control.
   */
  static isolate_migrate_t isolate_migratepages(struct zone *zone,
   struct compact_control *cc)
  {
   unsigned long low_pfn, end_pfn;
 + struct page *page;
  
 - /* Do not scan outside zone boundaries */
 - low_pfn = max(cc-migrate_pfn, zone-zone_start_pfn);
 + /* Start at where we last stopped, or beginning of the zone */
 + low_pfn = cc-migrate_pfn;
  
   /* Only scan within a pageblock boundary */
   end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
  
 - /* Do not cross the free scanner or scan within a memory hole */
 - if (end_pfn  cc-free_pfn || !pfn_valid(low_pfn)) {
 - cc-migrate_pfn = end_pfn;
 - return ISOLATE_NONE;
 - }
 + /*
 +  * Iterate over whole pageblocks until we find the first suitable.
 +  * Do not cross the free scanner.
 +  */
 + for (; end_pfn = cc-free_pfn;
 + low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
 +
 + /*
 +  * This can potentially iterate a massively long zone with
 +  * many pageblocks unsuitable, so periodically check if we
 +  * need to schedule, or even abort async compaction.
 +  */
 + if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
 +  compact_should_abort(cc))
 + break;
  
 - /* Perform the isolation */
 - low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
 - if (!low_pfn || cc-contended)
 - return ISOLATE_ABORT;
 + /* Do not scan within a memory hole */
 + if (!pfn_valid(low_pfn))
 + continue;
 +
 + page = pfn_to_page(low_pfn);

Can we move (page_zone != zone) check here as isolate_freepages() does?

Thanks,
Naoya Horiguchi

 + /* If isolation recently failed, do not retry */
 + if (!isolation_suitable(cc, page))
 + continue;
  
 + /*
 +  * For async compaction, also only scan in MOVABLE blocks.
 +  * Async compaction is optimistic to see if the minimum amount
 +  * of work satisfies the allocation.
 +  */
 + if (cc-mode == MIGRATE_ASYNC 
 + !migrate_async_suitable(get_pageblock_migratetype(page)))
 + continue;
 +
 + /* Perform the isolation 

Re: [PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-23 Thread Zhang Yanfei
On 06/20/2014 11:49 PM, Vlastimil Babka wrote:
 isolate_migratepages_range() is the main function of the compaction scanner,
 called either on a single pageblock by isolate_migratepages() during regular
 compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
 It currently perfoms two pageblock-wide compaction suitability checks, and
 because of the CMA callpath, it tracks if it crossed a pageblock boundary in
 order to repeat those checks.
 
 However, closer inspection shows that those checks are always true for CMA:
 - isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
 - migrate_async_suitable() check is skipped because CMA uses sync compaction
 
 We can therefore move the checks to isolate_migratepages(), reducing variables
 and simplifying isolate_migratepages_range(). The update_pageblock_skip()
 function also no longer needs set_unsuitable parameter.
 
 Furthermore, going back to compact_zone() and compact_finished() when 
 pageblock
 is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
 The patch therefore also introduces a simple loop into isolate_migratepages()
 so that it does not return immediately on pageblock checks, but keeps going
 until isolate_migratepages_range() gets called once. Similarily to
 isolate_freepages(), the function periodically checks if it needs to 
 reschedule
 or abort async compaction.
 
 Signed-off-by: Vlastimil Babka vba...@suse.cz
 Cc: Minchan Kim minc...@kernel.org
 Cc: Mel Gorman mgor...@suse.de
 Cc: Joonsoo Kim iamjoonsoo@lge.com
 Cc: Michal Nazarewicz min...@mina86.com
 Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
 Cc: Christoph Lameter c...@linux.com
 Cc: Rik van Riel r...@redhat.com
 Cc: David Rientjes rient...@google.com

I think this is a good clean-up to make code more clear.

Reviewed-by: Zhang Yanfei zhangyan...@cn.fujitsu.com

Only a tiny nit-pick below.

 ---
  mm/compaction.c | 112 
 +---
  1 file changed, 59 insertions(+), 53 deletions(-)
 
 diff --git a/mm/compaction.c b/mm/compaction.c
 index 3064a7f..ebe30c9 100644
 --- a/mm/compaction.c
 +++ b/mm/compaction.c
 @@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
   */
  static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
  {
   struct zone *zone = cc-zone;
   unsigned long pfn;
 @@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
 *cc,
   if (nr_isolated)
   return;
  
 - /*
 -  * Only skip pageblocks when all forms of compaction will be known to
 -  * fail in the near future.
 -  */
 - if (set_unsuitable)
 - set_pageblock_skip(page);
 + set_pageblock_skip(page);
  
   pfn = page_to_pfn(page);
  
 @@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
 compact_control *cc,
  
  static void update_pageblock_skip(struct compact_control *cc,
   struct page *page, unsigned long nr_isolated,
 - bool set_unsuitable, bool migrate_scanner)
 + bool migrate_scanner)
  {
  }
  #endif /* CONFIG_COMPACTION */
 @@ -345,8 +340,7 @@ isolate_fail:
  
   /* Update the pageblock-skip if the whole pageblock was scanned */
   if (blockpfn == end_pfn)
 - update_pageblock_skip(cc, valid_page, total_isolated, true,
 -   false);
 + update_pageblock_skip(cc, valid_page, total_isolated, false);
  
   count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
   if (total_isolated)
 @@ -474,14 +468,12 @@ unsigned long
  isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
   unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
  {
 - unsigned long last_pageblock_nr = 0, pageblock_nr;
   unsigned long nr_scanned = 0, nr_isolated = 0;
   struct list_head *migratelist = cc-migratepages;
   struct lruvec *lruvec;
   unsigned long flags;
   bool locked = false;
   struct page *page = NULL, *valid_page = NULL;
 - bool set_unsuitable = true;
   const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
   ISOLATE_ASYNC_MIGRATE : 0) |
   (unevictable ? ISOLATE_UNEVICTABLE : 0);
 @@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
 compact_control *cc,
   if (!valid_page)
   valid_page = page;
  
 - /* If isolation recently failed, do not retry */
 - pageblock_nr = low_pfn  pageblock_order;
 - if (last_pageblock_nr != pageblock_nr) {
 - int mt;
 -
 - last_pageblock_nr = pageblock_nr;
 - if 

[PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-20 Thread Vlastimil Babka
isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc->ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka 
Cc: Minchan Kim 
Cc: Mel Gorman 
Cc: Joonsoo Kim 
Cc: Michal Nazarewicz 
Cc: Naoya Horiguchi 
Cc: Christoph Lameter 
Cc: Rik van Riel 
Cc: David Rientjes 
---
 mm/compaction.c | 112 +---
 1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
  */
 static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
 {
struct zone *zone = cc->zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;
 
-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);
 
pfn = page_to_pfn(page);
 
@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,
 
 static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
 {
 }
 #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:
 
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);
 
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
 {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = >migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;
 
-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn >> pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
-   if (!isolation_suitable(cc, page))
-   goto next_pageblock;
-
-   /*
-* For async migration, also only scan in MOVABLE
-* blocks. Async migration is optimistic to see if
-* the minimum amount of work satisfies the allocation
-*/
- 

[PATCH v3 04/13] mm, compaction: move pageblock checks up from isolate_migratepages_range()

2014-06-20 Thread Vlastimil Babka
isolate_migratepages_range() is the main function of the compaction scanner,
called either on a single pageblock by isolate_migratepages() during regular
compaction, or on an arbitrary range by CMA's __alloc_contig_migrate_range().
It currently perfoms two pageblock-wide compaction suitability checks, and
because of the CMA callpath, it tracks if it crossed a pageblock boundary in
order to repeat those checks.

However, closer inspection shows that those checks are always true for CMA:
- isolation_suitable() is true because CMA sets cc-ignore_skip_hint to true
- migrate_async_suitable() check is skipped because CMA uses sync compaction

We can therefore move the checks to isolate_migratepages(), reducing variables
and simplifying isolate_migratepages_range(). The update_pageblock_skip()
function also no longer needs set_unsuitable parameter.

Furthermore, going back to compact_zone() and compact_finished() when pageblock
is unsuitable is wasteful - the checks are meant to skip pageblocks quickly.
The patch therefore also introduces a simple loop into isolate_migratepages()
so that it does not return immediately on pageblock checks, but keeps going
until isolate_migratepages_range() gets called once. Similarily to
isolate_freepages(), the function periodically checks if it needs to reschedule
or abort async compaction.

Signed-off-by: Vlastimil Babka vba...@suse.cz
Cc: Minchan Kim minc...@kernel.org
Cc: Mel Gorman mgor...@suse.de
Cc: Joonsoo Kim iamjoonsoo@lge.com
Cc: Michal Nazarewicz min...@mina86.com
Cc: Naoya Horiguchi n-horigu...@ah.jp.nec.com
Cc: Christoph Lameter c...@linux.com
Cc: Rik van Riel r...@redhat.com
Cc: David Rientjes rient...@google.com
---
 mm/compaction.c | 112 +---
 1 file changed, 59 insertions(+), 53 deletions(-)

diff --git a/mm/compaction.c b/mm/compaction.c
index 3064a7f..ebe30c9 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -132,7 +132,7 @@ void reset_isolation_suitable(pg_data_t *pgdat)
  */
 static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
 {
struct zone *zone = cc-zone;
unsigned long pfn;
@@ -146,12 +146,7 @@ static void update_pageblock_skip(struct compact_control 
*cc,
if (nr_isolated)
return;
 
-   /*
-* Only skip pageblocks when all forms of compaction will be known to
-* fail in the near future.
-*/
-   if (set_unsuitable)
-   set_pageblock_skip(page);
+   set_pageblock_skip(page);
 
pfn = page_to_pfn(page);
 
@@ -180,7 +175,7 @@ static inline bool isolation_suitable(struct 
compact_control *cc,
 
 static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated,
-   bool set_unsuitable, bool migrate_scanner)
+   bool migrate_scanner)
 {
 }
 #endif /* CONFIG_COMPACTION */
@@ -345,8 +340,7 @@ isolate_fail:
 
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
-   update_pageblock_skip(cc, valid_page, total_isolated, true,
- false);
+   update_pageblock_skip(cc, valid_page, total_isolated, false);
 
count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
if (total_isolated)
@@ -474,14 +468,12 @@ unsigned long
 isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
 {
-   unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = cc-migratepages;
struct lruvec *lruvec;
unsigned long flags;
bool locked = false;
struct page *page = NULL, *valid_page = NULL;
-   bool set_unsuitable = true;
const isolate_mode_t mode = (cc-mode == MIGRATE_ASYNC ?
ISOLATE_ASYNC_MIGRATE : 0) |
(unevictable ? ISOLATE_UNEVICTABLE : 0);
@@ -545,28 +537,6 @@ isolate_migratepages_range(struct zone *zone, struct 
compact_control *cc,
if (!valid_page)
valid_page = page;
 
-   /* If isolation recently failed, do not retry */
-   pageblock_nr = low_pfn  pageblock_order;
-   if (last_pageblock_nr != pageblock_nr) {
-   int mt;
-
-   last_pageblock_nr = pageblock_nr;
-   if (!isolation_suitable(cc, page))
-   goto next_pageblock;
-
-   /*
-* For async migration, also only scan in MOVABLE
-*