From: Yinghai Lu <ying...@kernel.org>

As requested by hpa, add comments for why we choose 5 to be
the step size shift.

Signed-off-by: Yinghai Lu <ying...@kernel.org>
Reviewed-by: Tang Chen <tangc...@cn.fujitsu.com>
Tested-by: Tang Chen <tangc...@cn.fujitsu.com>
---
 arch/x86/mm/init.c |   21 ++++++++++++++++++---
 1 files changed, 18 insertions(+), 3 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 3c21f16..5f38e72 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -395,8 +395,23 @@ static unsigned long __init init_range_memory_mapping(
        return mapped_ram_size;
 }
 
-/* (PUD_SHIFT-PMD_SHIFT)/2 */
-#define STEP_SIZE_SHIFT 5
+static unsigned long __init get_new_step_size(unsigned long step_size)
+{
+       /*
+        * initial mapped size is PMD_SIZE, aka 2M.
+        * We can not set step_size to be PUD_SIZE aka 1G yet.
+        * In worse case, when 1G is cross the 1G boundary, and
+        * PG_LEVEL_2M is not set, we will need 1+1+512 pages (aka 2M + 8k)
+        * to map 1G range with PTE. Use 5 as shift for now.
+        */
+       unsigned long new_step_size = step_size << 5;
+
+       if (new_step_size > step_size)
+               step_size = new_step_size;
+
+       return  step_size;
+}
+
 void __init init_mem_mapping(void)
 {
        unsigned long end, real_end, start, last_start;
@@ -445,7 +460,7 @@ void __init init_mem_mapping(void)
                min_pfn_mapped = last_start >> PAGE_SHIFT;
                /* only increase step_size after big range get mapped */
                if (new_mapped_ram_size > mapped_ram_size)
-                       step_size <<= STEP_SIZE_SHIFT;
+                       step_size = get_new_step_size(step_size);
                mapped_ram_size += new_mapped_ram_size;
        }
 
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to