[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-12-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 fs/buffer.c  |8 
 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 4 files changed, 18 insertions(+), 15 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/fs/buffer.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/fs/buffer.c
--- linux-2.6.24-rc4-mm1-clean/fs/buffer.c  2007-12-07 12:14:06.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/fs/buffer.c 2007-12-07 
15:13:16.0 +
@@ -368,16 +368,16 @@ void invalidate_bdev(struct block_device
  */
 static void free_more_memory(void)
 {
-   struct zone **zones;
+   struct zonelist *zonelist;
pg_data_t *pgdat;
 
wakeup_pdflush(1024);
yield();
 
for_each_online_pgdat(pgdat) {
-   zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
-   if (*zones)
-   try_to_free_pages(zones, 0, GFP_NOFS);
+   zonelist = >node_zonelists[gfp_zone(GFP_NOFS)];
+   if (zonelist->zones[0])
+   try_to_free_pages(zonelist, 0, GFP_NOFS);
}
 }
 
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/include/linux/swap.h 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc4-mm1-clean/include/linux/swap.h 2007-12-07 
12:14:07.0 +
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-12-07 12:17:22.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/mm/page_alloc.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc4-mm1-clean/mm/page_alloc.c  2007-12-07 12:14:07.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-12-07 
12:17:22.0 +
@@ -1624,7 +1624,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/mm/vmscan.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc4-mm1-clean/mm/vmscan.c  2007-12-07 12:14:07.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-12-07 
12:19:14.0 +
@@ -1267,10 +1267,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
 
@@ -1322,8 +1323,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1331,6 +1332,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1354,7 +1356,7 @@ static unsigned long do_try_to_free_page
sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-12-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 fs/buffer.c  |8 
 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 4 files changed, 18 insertions(+), 15 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/fs/buffer.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/fs/buffer.c
--- linux-2.6.24-rc4-mm1-clean/fs/buffer.c  2007-12-07 12:14:06.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/fs/buffer.c 2007-12-07 
15:13:16.0 +
@@ -368,16 +368,16 @@ void invalidate_bdev(struct block_device
  */
 static void free_more_memory(void)
 {
-   struct zone **zones;
+   struct zonelist *zonelist;
pg_data_t *pgdat;
 
wakeup_pdflush(1024);
yield();
 
for_each_online_pgdat(pgdat) {
-   zones = pgdat-node_zonelists[gfp_zone(GFP_NOFS)].zones;
-   if (*zones)
-   try_to_free_pages(zones, 0, GFP_NOFS);
+   zonelist = pgdat-node_zonelists[gfp_zone(GFP_NOFS)];
+   if (zonelist-zones[0])
+   try_to_free_pages(zonelist, 0, GFP_NOFS);
}
 }
 
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/include/linux/swap.h 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc4-mm1-clean/include/linux/swap.h 2007-12-07 
12:14:07.0 +
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-12-07 12:17:22.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/mm/page_alloc.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc4-mm1-clean/mm/page_alloc.c  2007-12-07 12:14:07.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-12-07 
12:17:22.0 +
@@ -1624,7 +1624,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc4-mm1-clean/mm/vmscan.c 
linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc4-mm1-clean/mm/vmscan.c  2007-12-07 12:14:07.0 
+
+++ linux-2.6.24-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-12-07 
12:19:14.0 +
@@ -1267,10 +1267,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
 
@@ -1322,8 +1323,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1331,6 +1332,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1354,7 +1356,7 @@ static unsigned long do_try_to_free_page
sc-nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-11-20 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/include/linux/swap.h 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc2-mm1-hotfixes/include/linux/swap.h  2007-11-15 
11:28:03.0 +
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/include/linux/swap.h
2007-11-20 23:25:22.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/mm/page_alloc.c 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc2-mm1-hotfixes/mm/page_alloc.c   2007-11-15 
11:28:11.0 +
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-11-20 
23:25:22.0 +
@@ -1619,7 +1619,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/mm/vmscan.c 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc2-mm1-hotfixes/mm/vmscan.c   2007-11-15 11:28:11.0 
+
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/vmscan.c 2007-11-20 
23:25:22.0 +
@@ -1216,10 +1216,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1257,8 +1258,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1266,6 +1267,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1285,7 +1287,7 @@ static unsigned long do_try_to_free_page
sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1344,7 +1346,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1357,7 +1360,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1376,11 +1379,11 @@ unsigned long try_to_free_mem_cgroup_pag
 

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-11-20 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/include/linux/swap.h 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc2-mm1-hotfixes/include/linux/swap.h  2007-11-15 
11:28:03.0 +
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/include/linux/swap.h
2007-11-20 23:25:22.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/mm/page_alloc.c 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc2-mm1-hotfixes/mm/page_alloc.c   2007-11-15 
11:28:11.0 +
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-11-20 
23:25:22.0 +
@@ -1619,7 +1619,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc2-mm1-hotfixes/mm/vmscan.c 
linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc2-mm1-hotfixes/mm/vmscan.c   2007-11-15 11:28:11.0 
+
+++ linux-2.6.24-rc2-mm1-005_freepages_zonelist/mm/vmscan.c 2007-11-20 
23:25:22.0 +
@@ -1216,10 +1216,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1257,8 +1258,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1266,6 +1267,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1285,7 +1287,7 @@ static unsigned long do_try_to_free_page
sc-nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1344,7 +1346,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1357,7 +1360,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1376,11 +1379,11 @@ unsigned long try_to_free_mem_cgroup_pag

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-11-09 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/include/linux/swap.h 
linux-2.6.24-rc1-mm-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc1-mm-b1106/include/linux/swap.h  2007-11-08 
19:04:09.0 +
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/include/linux/swap.h 
2007-11-08 19:05:07.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/mm/page_alloc.c 
linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc1-mm-b1106/mm/page_alloc.c   2007-11-08 19:04:17.0 
+
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/page_alloc.c  2007-11-08 
19:05:07.0 +
@@ -1647,7 +1647,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/mm/vmscan.c 
linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc1-mm-b1106/mm/vmscan.c   2007-11-08 19:04:17.0 
+
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/vmscan.c  2007-11-08 
19:06:49.0 +
@@ -1216,10 +1216,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1257,8 +1258,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1266,6 +1267,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1285,7 +1287,7 @@ static unsigned long do_try_to_free_page
sc->nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1344,7 +1346,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1357,7 +1360,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1376,11 +1379,11 @@ unsigned long try_to_free_mem_cgroup_pag

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-11-09 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/include/linux/swap.h 
linux-2.6.24-rc1-mm-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.24-rc1-mm-b1106/include/linux/swap.h  2007-11-08 
19:04:09.0 +
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/include/linux/swap.h 
2007-11-08 19:05:07.0 +
@@ -181,7 +181,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/mm/page_alloc.c 
linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.24-rc1-mm-b1106/mm/page_alloc.c   2007-11-08 19:04:17.0 
+
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/page_alloc.c  2007-11-08 
19:05:07.0 +
@@ -1647,7 +1647,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.24-rc1-mm-b1106/mm/vmscan.c 
linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.24-rc1-mm-b1106/mm/vmscan.c   2007-11-08 19:04:17.0 
+
+++ linux-2.6.24-rc1-mm-005_freepages_zonelist/mm/vmscan.c  2007-11-08 
19:06:49.0 +
@@ -1216,10 +1216,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1257,8 +1258,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1266,6 +1267,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1285,7 +1287,7 @@ static unsigned long do_try_to_free_page
sc-nr_io_pages = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1344,7 +1346,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1357,7 +1360,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1376,11 +1379,11 @@ unsigned long try_to_free_mem_cgroup_pag
.isolate_pages 

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-28 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/include/linux/swap.h 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc8-mm2-clean/include/linux/swap.h 2007-09-27 
14:41:05.0 +0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/include/linux/swap.h
2007-09-28 15:48:35.0 +0100
@@ -185,7 +185,7 @@ extern void move_tail_pages(void);
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/mm/page_alloc.c 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc8-mm2-clean/mm/page_alloc.c  2007-09-27 14:41:05.0 
+0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/page_alloc.c 2007-09-28 
15:48:35.0 +0100
@@ -1668,7 +1668,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/mm/vmscan.c 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc8-mm2-clean/mm/vmscan.c  2007-09-27 14:41:05.0 
+0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/vmscan.c 2007-09-28 
15:48:35.0 +0100
@@ -1204,10 +1204,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1245,8 +1246,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1254,6 +1255,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1272,7 +1274,7 @@ static unsigned long do_try_to_free_page
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1330,7 +1332,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1343,7 +1346,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1362,12 +1365,12 @@ unsigned long try_to_free_mem_cgroup_pag

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-28 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   21 -
 3 files changed, 14 insertions(+), 11 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/include/linux/swap.h 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc8-mm2-clean/include/linux/swap.h 2007-09-27 
14:41:05.0 +0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/include/linux/swap.h
2007-09-28 15:48:35.0 +0100
@@ -185,7 +185,7 @@ extern void move_tail_pages(void);
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
gfp_t gfp_mask);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/mm/page_alloc.c 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc8-mm2-clean/mm/page_alloc.c  2007-09-27 14:41:05.0 
+0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/page_alloc.c 2007-09-28 
15:48:35.0 +0100
@@ -1668,7 +1668,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc8-mm2-clean/mm/vmscan.c 
linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc8-mm2-clean/mm/vmscan.c  2007-09-27 14:41:05.0 
+0100
+++ linux-2.6.23-rc8-mm2-005_freepages_zonelist/mm/vmscan.c 2007-09-28 
15:48:35.0 +0100
@@ -1204,10 +1204,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1245,8 +1246,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
- struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+   gfp_t gfp_mask, struct scan_control *sc)
 {
int priority;
int ret = 0;
@@ -1254,6 +1255,7 @@ static unsigned long do_try_to_free_page
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1272,7 +1274,7 @@ static unsigned long do_try_to_free_page
sc-nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit cgroups
@@ -1330,7 +1332,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1343,7 +1346,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CGROUP_MEM_CONT
@@ -1362,12 +1365,12 @@ unsigned long try_to_free_mem_cgroup_pag
.isolate_pages 

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-13 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   19 +++
 3 files changed, 13 insertions(+), 10 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-13 11:57:20.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-13 
11:57:20.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-13 
11:57:20.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
@@ -1370,11 +1373,11 @@ unsigned long try_to_free_mem_container_
.isolate_pages = mem_container_isolate_pages,
};

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-13 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   19 +++
 3 files changed, 13 insertions(+), 10 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-13 11:57:20.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-13 
11:57:20.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-13 
11:57:20.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc-nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
@@ -1370,11 +1373,11 @@ unsigned long try_to_free_mem_container_
.isolate_pages = mem_container_isolate_pages,
};
int 

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-12 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-12 16:05:11.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-12 
16:05:11.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-12 
16:05:11.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-12 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-12 16:05:11.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-12 
16:05:11.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-12 
16:05:11.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc-nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-10 16:06:06.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-10 
16:06:06.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-10 
16:06:06.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-10 16:06:06.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-10 
16:06:06.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-10 
16:06:06.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc->nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, );
+   return do_try_to_free_pages(zonelist, gfp_mask, );
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-10 16:06:06.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-10 
16:06:06.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-10 
16:06:06.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc-nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-09-11 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |   13 -
 3 files changed, 10 insertions(+), 7 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc4-mm1-fix-pcnet32/include/linux/swap.h   2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/include/linux/swap.h
2007-09-10 16:06:06.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_container_pages(struct mem_container 
*mem);
 extern int __isolate_lru_page(struct page *page, int mode);
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/page_alloc.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-09-10 
16:06:06.0 +0100
@@ -1667,7 +1667,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c 
linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc4-mm1-fix-pcnet32/mm/vmscan.c2007-09-10 
09:29:14.0 +0100
+++ linux-2.6.23-rc4-mm1-005_freepages_zonelist/mm/vmscan.c 2007-09-10 
16:06:06.0 +0100
@@ -1207,10 +1207,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1248,7 +1249,7 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
+unsigned long do_try_to_free_pages(struct zonelist *zonelist, gfp_t gfp_mask,
struct scan_control *sc)
 {
int priority;
@@ -1257,6 +1258,7 @@ unsigned long do_try_to_free_pages(struc
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
count_vm_event(ALLOCSTALL);
@@ -1275,7 +1277,7 @@ unsigned long do_try_to_free_pages(struc
sc-nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
/*
 * Don't shrink slabs when reclaiming memory from
 * over limit containers
@@ -1333,7 +1335,8 @@ out:
return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1346,7 +1349,7 @@ unsigned long try_to_free_pages(struct z
.isolate_pages = isolate_pages_global,
};
 
-   return do_try_to_free_pages(zones, gfp_mask, sc);
+   return do_try_to_free_pages(zonelist, gfp_mask, sc);
 }
 
 #ifdef CONFIG_CONTAINER_MEM_CONT
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  

[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-08-31 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |9 ++---
 3 files changed, 8 insertions(+), 5 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/include/linux/swap.h 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc3-mm1-clean/include/linux/swap.h 2007-08-22 
11:32:13.0 +0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/include/linux/swap.h
2007-08-31 16:54:44.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/mm/page_alloc.c 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc3-mm1-clean/mm/page_alloc.c  2007-08-22 11:32:13.0 
+0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-08-31 
16:54:44.0 +0100
@@ -1665,7 +1665,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/mm/vmscan.c 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc3-mm1-clean/mm/vmscan.c  2007-08-22 11:32:13.0 
+0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/vmscan.c 2007-08-31 
16:54:44.0 +0100
@@ -1180,10 +1180,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1221,7 +1222,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
int priority;
int ret = 0;
@@ -1229,6 +1231,7 @@ unsigned long try_to_free_pages(struct z
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1256,7 +1259,7 @@ unsigned long try_to_free_pages(struct z
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, );
+   nr_reclaimed += shrink_zones(priority, zonelist, );
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state->reclaimed_slab;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-08-31 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |9 ++---
 3 files changed, 8 insertions(+), 5 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/include/linux/swap.h 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc3-mm1-clean/include/linux/swap.h 2007-08-22 
11:32:13.0 +0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/include/linux/swap.h
2007-08-31 16:54:44.0 +0100
@@ -189,7 +189,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/mm/page_alloc.c 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc3-mm1-clean/mm/page_alloc.c  2007-08-22 11:32:13.0 
+0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/page_alloc.c 2007-08-31 
16:54:44.0 +0100
@@ -1665,7 +1665,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-mm1-clean/mm/vmscan.c 
linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc3-mm1-clean/mm/vmscan.c  2007-08-22 11:32:13.0 
+0100
+++ linux-2.6.23-rc3-mm1-005_freepages_zonelist/mm/vmscan.c 2007-08-31 
16:54:44.0 +0100
@@ -1180,10 +1180,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zone **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1221,7 +1222,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
int priority;
int ret = 0;
@@ -1229,6 +1231,7 @@ unsigned long try_to_free_pages(struct z
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1256,7 +1259,7 @@ unsigned long try_to_free_pages(struct z
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state-reclaimed_slab;
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-08-17 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman <[EMAIL PROTECTED]>
Acked-by: Christoph Lameter <[EMAIL PROTECTED]>
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |9 ++---
 3 files changed, 8 insertions(+), 5 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/include/linux/swap.h 
linux-2.6.23-rc3-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc3-clean/include/linux/swap.h 2007-08-13 05:25:24.0 
+0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/include/linux/swap.h
2007-08-17 16:35:48.0 +0100
@@ -188,7 +188,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/mm/page_alloc.c 
linux-2.6.23-rc3-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc3-clean/mm/page_alloc.c  2007-08-13 05:25:24.0 
+0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/mm/page_alloc.c 2007-08-17 
16:35:48.0 +0100
@@ -1326,7 +1326,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = _state;
 
-   did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p->reclaim_state = NULL;
p->flags &= ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/mm/vmscan.c 
linux-2.6.23-rc3-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc3-clean/mm/vmscan.c  2007-08-13 05:25:24.0 +0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/mm/vmscan.c 2007-08-17 
16:35:48.0 +0100
@@ -1075,10 +1075,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zones **zones = zonelist->zones;
int i;
 
sc->all_unreclaimable = 1;
@@ -1116,7 +1117,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
int priority;
int ret = 0;
@@ -1124,6 +1126,7 @@ unsigned long try_to_free_pages(struct z
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current->reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist->zones;
int i;
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1150,7 +1153,7 @@ unsigned long try_to_free_pages(struct z
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, );
+   nr_reclaimed += shrink_zones(priority, zonelist, );
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state->reclaimed_slab;
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/


[PATCH 1/6] Use zonelists instead of zones when direct reclaiming pages

2007-08-17 Thread Mel Gorman

The allocator deals with zonelists which indicate the order in which zones
should be targeted for an allocation. Similarly, direct reclaim of pages
iterates over an array of zones. For consistency, this patch converts direct
reclaim to use a zonelist. No functionality is changed by this patch. This
simplifies zonelist iterators in the next patch.

Signed-off-by: Mel Gorman [EMAIL PROTECTED]
Acked-by: Christoph Lameter [EMAIL PROTECTED]
---

 include/linux/swap.h |2 +-
 mm/page_alloc.c  |2 +-
 mm/vmscan.c  |9 ++---
 3 files changed, 8 insertions(+), 5 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/include/linux/swap.h 
linux-2.6.23-rc3-005_freepages_zonelist/include/linux/swap.h
--- linux-2.6.23-rc3-clean/include/linux/swap.h 2007-08-13 05:25:24.0 
+0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/include/linux/swap.h
2007-08-17 16:35:48.0 +0100
@@ -188,7 +188,7 @@ extern int rotate_reclaimable_page(struc
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask);
 extern unsigned long shrink_all_memory(unsigned long nr_pages);
 extern int vm_swappiness;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/mm/page_alloc.c 
linux-2.6.23-rc3-005_freepages_zonelist/mm/page_alloc.c
--- linux-2.6.23-rc3-clean/mm/page_alloc.c  2007-08-13 05:25:24.0 
+0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/mm/page_alloc.c 2007-08-17 
16:35:48.0 +0100
@@ -1326,7 +1326,7 @@ nofail_alloc:
reclaim_state.reclaimed_slab = 0;
p-reclaim_state = reclaim_state;
 
-   did_some_progress = try_to_free_pages(zonelist-zones, order, gfp_mask);
+   did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
p-reclaim_state = NULL;
p-flags = ~PF_MEMALLOC;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff 
linux-2.6.23-rc3-clean/mm/vmscan.c 
linux-2.6.23-rc3-005_freepages_zonelist/mm/vmscan.c
--- linux-2.6.23-rc3-clean/mm/vmscan.c  2007-08-13 05:25:24.0 +0100
+++ linux-2.6.23-rc3-005_freepages_zonelist/mm/vmscan.c 2007-08-17 
16:35:48.0 +0100
@@ -1075,10 +1075,11 @@ static unsigned long shrink_zone(int pri
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
 {
unsigned long nr_reclaimed = 0;
+   struct zones **zones = zonelist-zones;
int i;
 
sc-all_unreclaimable = 1;
@@ -1116,7 +1117,8 @@ static unsigned long shrink_zones(int pr
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+   gfp_t gfp_mask)
 {
int priority;
int ret = 0;
@@ -1124,6 +1126,7 @@ unsigned long try_to_free_pages(struct z
unsigned long nr_reclaimed = 0;
struct reclaim_state *reclaim_state = current-reclaim_state;
unsigned long lru_pages = 0;
+   struct zone **zones = zonelist-zones;
int i;
struct scan_control sc = {
.gfp_mask = gfp_mask,
@@ -1150,7 +1153,7 @@ unsigned long try_to_free_pages(struct z
sc.nr_scanned = 0;
if (!priority)
disable_swap_token();
-   nr_reclaimed += shrink_zones(priority, zones, sc);
+   nr_reclaimed += shrink_zones(priority, zonelist, sc);
shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
if (reclaim_state) {
nr_reclaimed += reclaim_state-reclaimed_slab;
-
To unsubscribe from this list: send the line unsubscribe linux-kernel in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/