If the 1G aligned pfn exceed the range, we are sure there won't be
possible 1G range. so we can just jump to split 2M range directly.

Signed-off-by: Wei Yang <richardw.y...@linux.intel.com>
---
 arch/x86/mm/init.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 87275238dbb0..cbb105388f24 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -394,10 +394,10 @@ static int __meminit split_mem_range(struct map_range *mr,
         * Range (B):
         * big page (2M) range
         */
-       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
        end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE));
        if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE)))
-               end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE));
+               goto no_1G_range;
+       start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE));
        if (start_pfn < end_pfn) {
                nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
                                page_size_mask & (1<<PG_LEVEL_2M));
@@ -416,6 +416,7 @@ static int __meminit split_mem_range(struct map_range *mr,
                                 ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
                pfn = end_pfn;
        }
+no_1G_range:
 #endif
 
        /*
-- 
2.19.1

Reply via email to