Current kernel is not allowed to be loaded above 512g, it thinks
that address is too big.

We only need to add one extra spare page for needed level3 to
point another 512g range.

Need to check _text range and set level4 pg to point to that spare
level3 page, and set level3 to point to level2 page to cover
[_text, _end] with extra mapping.

We need this to put relocatable bzImage high above 512g.

Signed-off-by: Yinghai Lu <ying...@kernel.org>
Cc: "Eric W. Biederman" <ebied...@xmission.com>
---
 arch/x86/kernel/head_64.S |   34 +++++++++++++++++++++++++++-------
 1 files changed, 27 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index efc0c08..32fa9d0 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -78,12 +78,6 @@ startup_64:
        testl   %eax, %eax
        jnz     bad_address
 
-       /* Is the address too large? */
-       leaq    _text(%rip), %rdx
-       movq    $PGDIR_SIZE, %rax
-       cmpq    %rax, %rdx
-       jae     bad_address
-
        /* Fixup the physical addresses in the page table
         */
        addq    %rbp, init_level4_pgt + 0(%rip)
@@ -102,12 +96,35 @@ startup_64:
        andq    $PMD_PAGE_MASK, %rdi
 
        movq    %rdi, %rax
+       shrq    $PGDIR_SHIFT, %rax
+       andq    $(PTRS_PER_PGD - 1), %rax
+       jz      skip_level3_spare
+
+       /* Set level3 at first */
+       leaq    (level3_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), 
%rdx
+       leaq    init_level4_pgt(%rip), %rbx
+       movq    %rdx, 0(%rbx, %rax, 8)
+       addq    $L4_PAGE_OFFSET, %rax
+       movq    %rdx, 0(%rbx, %rax, 8)
+
+       /* always need to set level2 */
+       movq    %rdi, %rax
+       shrq    $PUD_SHIFT, %rax
+       andq    $(PTRS_PER_PUD - 1), %rax
+       leaq    level3_spare_pgt(%rip), %rbx
+       jmp     set_level2_spare
+
+skip_level3_spare:
+       movq    %rdi, %rax
        shrq    $PUD_SHIFT, %rax
        andq    $(PTRS_PER_PUD - 1), %rax
        jz      ident_complete
 
-       leaq    (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), 
%rdx
+       /* only set level2 with out level3 spare */
        leaq    level3_ident_pgt(%rip), %rbx
+
+set_level2_spare:
+       leaq    (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), 
%rdx
        movq    %rdx, 0(%rbx, %rax, 8)
 
        movq    %rdi, %rax
@@ -435,6 +452,9 @@ NEXT_PAGE(level2_kernel_pgt)
        PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
                KERNEL_IMAGE_SIZE/PMD_SIZE)
 
+NEXT_PAGE(level3_spare_pgt)
+       .fill   512, 8, 0
+
 NEXT_PAGE(level2_spare_pgt)
        .fill   512, 8, 0
 
-- 
1.7.7

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to