[V5 PATCH 17/26] page_alloc: use N_MEMORY instead N_HIGH_MEMORY change the node_states initialization

2012-10-29 Thread Lai Jiangshan
N_HIGH_MEMORY stands for the nodes that has normal or high memory.
N_MEMORY stands for the nodes that has any memory.

The code here need to handle with the nodes which have memory, we should
use N_MEMORY instead.

Since we introduced N_MEMORY, we update the initialization of node_states.

Signed-off-by: Lai Jiangshan 
---
 arch/x86/mm/init_64.c |4 +++-
 mm/page_alloc.c   |   40 ++--
 2 files changed, 25 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3baff25..2ead3c8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -630,7 +630,9 @@ void __init paging_init(void)
 *   numa support is not compiled in, and later node_set_state
 *   will not set it back.
 */
-   node_clear_state(0, N_NORMAL_MEMORY);
+   node_clear_state(0, N_MEMORY);
+   if (N_MEMORY != N_NORMAL_MEMORY)
+   node_clear_state(0, N_NORMAL_MEMORY);
 
zone_sizes_init();
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b1ef9b0..b70c929 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1692,7 +1692,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, 
unsigned long mark,
  *
  * If the zonelist cache is present in the passed in zonelist, then
  * returns a pointer to the allowed node mask (either the current
- * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
+ * tasks mems_allowed, or node_states[N_MEMORY].)
  *
  * If the zonelist cache is not available for this zonelist, does
  * nothing and returns NULL.
@@ -1721,7 +1721,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, 
int alloc_flags)
 
allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
_current_mems_allowed :
-   _states[N_HIGH_MEMORY];
+   _states[N_MEMORY];
return allowednodes;
 }
 
@@ -3194,7 +3194,7 @@ static int find_next_best_node(int node, nodemask_t 
*used_node_mask)
return node;
}
 
-   for_each_node_state(n, N_HIGH_MEMORY) {
+   for_each_node_state(n, N_MEMORY) {
 
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
@@ -3336,7 +3336,7 @@ static int default_zonelist_order(void)
 * local memory, NODE_ORDER may be suitable.
  */
average_size = total_size /
-   (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
+   (nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) {
low_kmem_size = 0;
total_size = 0;
@@ -4669,7 +4669,7 @@ unsigned long __init 
find_min_pfn_with_active_regions(void)
 /*
  * early_calculate_totalpages()
  * Sum pages in active regions for movable zone.
- * Populate N_HIGH_MEMORY for calculating usable_nodes.
+ * Populate N_MEMORY for calculating usable_nodes.
  */
 static unsigned long __init early_calculate_totalpages(void)
 {
@@ -4682,7 +4682,7 @@ static unsigned long __init 
early_calculate_totalpages(void)
 
totalpages += pages;
if (pages)
-   node_set_state(nid, N_HIGH_MEMORY);
+   node_set_state(nid, N_MEMORY);
}
return totalpages;
 }
@@ -4699,9 +4699,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
/* save the state before borrow the nodemask */
-   nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
+   nodemask_t saved_node_state = node_states[N_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
-   int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+   int usable_nodes = nodes_weight(node_states[N_MEMORY]);
 
/*
 * If movablecore was specified, calculate what size of
@@ -4736,7 +4736,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
 restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
-   for_each_node_state(nid, N_HIGH_MEMORY) {
+   for_each_node_state(nid, N_MEMORY) {
unsigned long start_pfn, end_pfn;
 
/*
@@ -4828,23 +4828,27 @@ restart:
 
 out:
/* restore the node_state */
-   node_states[N_HIGH_MEMORY] = saved_node_state;
+   node_states[N_MEMORY] = saved_node_state;
 }
 
-/* Any regular memory on that node ? */
-static void __init check_for_regular_memory(pg_data_t *pgdat)
+/* Any regular or high memory on that node ? */
+static void check_for_memory(pg_data_t *pgdat, int nid)
 {
-#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
 
-   for (zone_type = 0; zone_type <= ZONE_NORMAL; zone_type++) {
+   if (N_MEMORY == N_NORMAL_MEMORY)

[V5 PATCH 17/26] page_alloc: use N_MEMORY instead N_HIGH_MEMORY change the node_states initialization

2012-10-29 Thread Lai Jiangshan
N_HIGH_MEMORY stands for the nodes that has normal or high memory.
N_MEMORY stands for the nodes that has any memory.

The code here need to handle with the nodes which have memory, we should
use N_MEMORY instead.

Since we introduced N_MEMORY, we update the initialization of node_states.

Signed-off-by: Lai Jiangshan la...@cn.fujitsu.com
---
 arch/x86/mm/init_64.c |4 +++-
 mm/page_alloc.c   |   40 ++--
 2 files changed, 25 insertions(+), 19 deletions(-)

diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 3baff25..2ead3c8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -630,7 +630,9 @@ void __init paging_init(void)
 *   numa support is not compiled in, and later node_set_state
 *   will not set it back.
 */
-   node_clear_state(0, N_NORMAL_MEMORY);
+   node_clear_state(0, N_MEMORY);
+   if (N_MEMORY != N_NORMAL_MEMORY)
+   node_clear_state(0, N_NORMAL_MEMORY);
 
zone_sizes_init();
 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b1ef9b0..b70c929 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1692,7 +1692,7 @@ bool zone_watermark_ok_safe(struct zone *z, int order, 
unsigned long mark,
  *
  * If the zonelist cache is present in the passed in zonelist, then
  * returns a pointer to the allowed node mask (either the current
- * tasks mems_allowed, or node_states[N_HIGH_MEMORY].)
+ * tasks mems_allowed, or node_states[N_MEMORY].)
  *
  * If the zonelist cache is not available for this zonelist, does
  * nothing and returns NULL.
@@ -1721,7 +1721,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, 
int alloc_flags)
 
allowednodes = !in_interrupt()  (alloc_flags  ALLOC_CPUSET) ?
cpuset_current_mems_allowed :
-   node_states[N_HIGH_MEMORY];
+   node_states[N_MEMORY];
return allowednodes;
 }
 
@@ -3194,7 +3194,7 @@ static int find_next_best_node(int node, nodemask_t 
*used_node_mask)
return node;
}
 
-   for_each_node_state(n, N_HIGH_MEMORY) {
+   for_each_node_state(n, N_MEMORY) {
 
/* Don't want a node to appear more than once */
if (node_isset(n, *used_node_mask))
@@ -3336,7 +3336,7 @@ static int default_zonelist_order(void)
 * local memory, NODE_ORDER may be suitable.
  */
average_size = total_size /
-   (nodes_weight(node_states[N_HIGH_MEMORY]) + 1);
+   (nodes_weight(node_states[N_MEMORY]) + 1);
for_each_online_node(nid) {
low_kmem_size = 0;
total_size = 0;
@@ -4669,7 +4669,7 @@ unsigned long __init 
find_min_pfn_with_active_regions(void)
 /*
  * early_calculate_totalpages()
  * Sum pages in active regions for movable zone.
- * Populate N_HIGH_MEMORY for calculating usable_nodes.
+ * Populate N_MEMORY for calculating usable_nodes.
  */
 static unsigned long __init early_calculate_totalpages(void)
 {
@@ -4682,7 +4682,7 @@ static unsigned long __init 
early_calculate_totalpages(void)
 
totalpages += pages;
if (pages)
-   node_set_state(nid, N_HIGH_MEMORY);
+   node_set_state(nid, N_MEMORY);
}
return totalpages;
 }
@@ -4699,9 +4699,9 @@ static void __init find_zone_movable_pfns_for_nodes(void)
unsigned long usable_startpfn;
unsigned long kernelcore_node, kernelcore_remaining;
/* save the state before borrow the nodemask */
-   nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
+   nodemask_t saved_node_state = node_states[N_MEMORY];
unsigned long totalpages = early_calculate_totalpages();
-   int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
+   int usable_nodes = nodes_weight(node_states[N_MEMORY]);
 
/*
 * If movablecore was specified, calculate what size of
@@ -4736,7 +4736,7 @@ static void __init find_zone_movable_pfns_for_nodes(void)
 restart:
/* Spread kernelcore memory as evenly as possible throughout nodes */
kernelcore_node = required_kernelcore / usable_nodes;
-   for_each_node_state(nid, N_HIGH_MEMORY) {
+   for_each_node_state(nid, N_MEMORY) {
unsigned long start_pfn, end_pfn;
 
/*
@@ -4828,23 +4828,27 @@ restart:
 
 out:
/* restore the node_state */
-   node_states[N_HIGH_MEMORY] = saved_node_state;
+   node_states[N_MEMORY] = saved_node_state;
 }
 
-/* Any regular memory on that node ? */
-static void __init check_for_regular_memory(pg_data_t *pgdat)
+/* Any regular or high memory on that node ? */
+static void check_for_memory(pg_data_t *pgdat, int nid)
 {
-#ifdef CONFIG_HIGHMEM
enum zone_type zone_type;
 
-   for (zone_type = 0; zone_type = ZONE_NORMAL; zone_type++) {
+   if