Module Name: src
Committed By: manu
Date: Tue Dec 10 02:06:07 UTC 2019
Modified Files:
src/sys/arch/amd64/amd64: locore.S machdep.c
src/sys/arch/amd64/conf: GENERIC files.amd64 kern.ldscript
src/sys/arch/x86/x86: efi.c multiboot2.c
Log Message:
Add multiboot 2 support to amd64 kernel
To generate a diff of this commit:
cvs rdiff -u -r1.192 -r1.193 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.342 -r1.343 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.548 -r1.549 src/sys/arch/amd64/conf/GENERIC
cvs rdiff -u -r1.113 -r1.114 src/sys/arch/amd64/conf/files.amd64
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/amd64/conf/kern.ldscript
cvs rdiff -u -r1.20 -r1.21 src/sys/arch/x86/x86/efi.c
cvs rdiff -u -r1.2 -r1.3 src/sys/arch/x86/x86/multiboot2.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.192 src/sys/arch/amd64/amd64/locore.S:1.193
--- src/sys/arch/amd64/amd64/locore.S:1.192 Fri Nov 22 23:36:25 2019
+++ src/sys/arch/amd64/amd64/locore.S Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.S,v 1.192 2019/11/22 23:36:25 ad Exp $ */
+/* $NetBSD: locore.S,v 1.193 2019/12/10 02:06:07 manu Exp $ */
/*
* Copyright-o-rama!
@@ -158,6 +158,7 @@
#include "opt_compat_netbsd.h"
#include "opt_compat_netbsd32.h"
+#include "opt_multiboot.h"
#include "opt_xen.h"
#include "opt_svs.h"
@@ -177,6 +178,13 @@
#include <machine/frameasm.h>
#include <machine/cputypes.h>
+#ifndef XENPV
+#include <arch/i386/include/multiboot.h>
+#endif
+
+#define CODE_SEGMENT 0x08
+#define DATA_SEGMENT 0x10
+
#if NLAPIC > 0
#include <machine/i82489reg.h>
#endif
@@ -424,6 +432,50 @@ END(farjmp64)
.space 512
tmpstk:
+.section multiboot,"ax",@progbits
+#if defined(MULTIBOOT)
+ .align 8
+ .globl Multiboot2_Header
+_C_LABEL(Multiboot2_Header):
+ .int MULTIBOOT2_HEADER_MAGIC
+ .int MULTIBOOT2_ARCHITECTURE_I386
+ .int Multiboot2_Header_end - Multiboot2_Header
+ .int -(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT2_ARCHITECTURE_I386 \
+ + (Multiboot2_Header_end - Multiboot2_Header))
+
+ .int 1 /* MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST */
+ .int 12 /* sizeof(multiboot_header_tag_information_request) */
+ /* + sizeof(uint32_t) * requests */
+ .int 4 /* MULTIBOOT_TAG_TYPE_BASIC_MEMINFO */
+ .align 8
+
+ .int 3 /* MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS */
+ .int 16 /* sizeof(struct multiboot_tag_efi64) */
+ .quad (multiboot2_entry - KERNBASE)
+ .align 8
+
+ .int 9 /* MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS_EFI64 */
+ .int 16 /* sizeof(struct multiboot_tag_efi64) */
+ .quad (multiboot2_entry - KERNBASE)
+ .align 8
+
+#if notyet
+ /*
+ * Could be used to get an early console for debug,
+ * but this is broken.
+ */
+ .int 7 /* MULTIBOOT_HEADER_TAG_EFI_BS */
+ .int 8 /* sizeof(struct multiboot_tag) */
+ .align 8
+#endif
+
+ .int 0 /* MULTIBOOT_HEADER_TAG_END */
+ .int 8 /* sizeof(struct multiboot_tag) */
+ .align 8
+ .globl Multiboot2_Header_end
+_C_LABEL(Multiboot2_Header_end):
+#endif /* MULTIBOOT */
+
/*
* Some hackage to deal with 64bit symbols in 32 bit mode.
* This may not be needed if things are cleaned up a little.
@@ -440,6 +492,700 @@ ENTRY(start)
/* Warm boot */
movw $0x1234,0x472
+#if defined(MULTIBOOT)
+ jmp .Lnative_loader
+
+
+multiboot2_entry:
+ .code64
+ /*
+ * multiboot2 entry point. We are left here without
+ * stack and with no idea of where we were loaded in memory.
+ * The only inputs are
+ * %eax MULTIBOOT2_BOOTLOADER_MAGIC
+ * %ebx pointer to multiboot_info
+ *
+ * Here we will:
+ * - copy the kernel to 0x200000 (KERNTEXTOFF - KERNBASE)
+ * as almost all the code in locore.S assume it is there.
+ * This is derived from
+ * src/sys/arch/i386/stand/efiboot/bootx64/startprog64.S
+ * - copy multiboot_info, as done in multiboot_pre_reloc() from
+ * src/sys/arch/x86/x86/multiboot2.c
+ * Unfortunately we cannot call that function as there is
+ * no simple way to build it as 32 bit code in a 64 bit kernel.
+ * - Copy ELF symbols, also as in multiboot_pre_reloc()
+ */
+
+ cli
+
+ /*
+ * Discover our load address and use it to get start address
+ */
+ mov $_RELOC(tmpstk),%rsp
+ call next
+next: pop %r8
+ sub $(next - start), %r8
+
+ /*
+ * Save multiboot_info for later. We cannot use
+ * temporary stack for that since we are going to
+ * overwrite it.
+ */
+ movl %ebx, (multiboot2_info_ptr - start)(%r8)
+
+ /*
+ * Get relocated multiboot2_loader entry point in %r9
+ */
+ mov $(KERNTEXTOFF - KERNBASE), %r9
+ add $(multiboot2_loader - kernel_text), %r9
+
+ /* Copy kernel */
+ mov $(KERNTEXTOFF - KERNBASE), %rdi /* dest */
+ mov %r8, %rsi
+ sub $(start - kernel_text), %rsi /* src */
+ mov $(__kernel_end - kernel_text), %rcx /* size *.
+ mov %rcx, %r12
+ movq %rdi, %r11 /* for misaligned check */
+
+#if !defined(NO_OVERLAP)
+ movq %rdi, %r13
+ subq %rsi, %r13
+#endif
+
+ shrq $3, %rcx /* count for copy by words */
+ jz 8f /* j if less than 8 bytes */
+
+ lea -8(%rdi, %r12), %r14 /* target address of last 8 */
+ mov -8(%rsi, %r12), %r15 /* get last word */
+#if !defined(NO_OVERLAP)
+ cmpq %r12, %r13 /* overlapping? */
+ jb 10f
+#endif
+
+/*
+ * Non-overlaping, copy forwards.
+ * Newer Intel cpus (Nehalem) will do 16byte read/write transfers
+ * if %ecx is more than 76.
+ * AMD might do something similar some day.
+ */
+ and $7, %r11 /* destination misaligned ? */
+ jnz 12f
+ rep
+ movsq
+ mov %r15, (%r14) /* write last word */
+ jmp .Lcopy_done
+
+/*
+ * Destination misaligned
+ * AMD say it is better to align the destination (not the source).
+ * This will also re-align copies if the source and dest are both
+ * misaligned by the same amount)
+ * (I think Nehalem will use its accelerated copy if the source
+ * and destination have the same alignment.)
+ */
+12:
+ lea -9(%r11, %r12), %rcx /* post re-alignment count */
+ neg %r11 /* now -1 .. -7 */
+ mov (%rsi), %r12 /* get first word */
+ mov %rdi, %r13 /* target for first word */
+ lea 8(%rsi, %r11), %rsi
+ lea 8(%rdi, %r11), %rdi
+ shr $3, %rcx
+ rep
+ movsq
+ mov %r12, (%r13) /* write first word */
+ mov %r15, (%r14) /* write last word */
+ jmp .Lcopy_done
+
+#if !defined(NO_OVERLAP)
+/* Must copy backwards.
+ * Reverse copy is probably easy to code faster than 'rep movds'
+ * since that requires (IIRC) an extra clock every 3 iterations (AMD).
+ * However I don't suppose anything cares that much!
+ * The big cost is the std/cld pair - reputedly 50+ cycles on Netburst P4.
+ * The copy is aligned with the buffer start (more likely to
+ * be a multiple of 8 than the end).
+ */
+10:
+ lea -8(%rsi, %rcx, 8), %rsi
+ lea -8(%rdi, %rcx, 8), %rdi
+ std
+ rep
+ movsq
+ cld
+ mov %r15, (%r14) /* write last bytes */
+ jmp .Lcopy_done
+#endif
+
+/* Less than 8 bytes to copy, copy by bytes */
+/* Intel Nehalem optimise 'rep movsb' for <= 7 bytes (9-15 clocks).
+ * For longer transfers it is 50+ !
+ */
+8: mov %r12, %rcx
+
+#if !defined(NO_OVERLAP)
+ cmpq %r12, %r13 /* overlapping? */
+ jb 81f
+#endif
+
+ /* nope, copy forwards. */
+ rep
+ movsb
+ jmp .Lcopy_done
+
+#if !defined(NO_OVERLAP)
+/* Must copy backwards */
+81:
+ lea -1(%rsi, %rcx), %rsi
+ lea -1(%rdi, %rcx), %rdi
+ std
+ rep
+ movsb
+ cld
+#endif
+ /* End of copy kernel */
+.Lcopy_done:
+
+ mov %r8, %rdi /* %rdi: loaded start address */
+ mov %r9, %rsi /* %rsi: kernel entry address */
+
+ /* Prepare jump address */
+ lea (multiboot2_loader32a - start)(%rdi), %rax
+ movl %eax, (multiboot2_loader32r - start)(%rdi)
+
+ /* Setup GDT */
+ lea (gdt - start)(%rdi), %rax
+ mov %rax, (gdtrr - start)(%rdi)
+ lgdt (gdtr - start)(%rdi)
+
+ /* Jump to set %cs */
+ ljmp *(multiboot2_loader32r - start)(%rdi)
+
+ .align 4
+ .code32
+multiboot2_loader32a:
+ movl $DATA_SEGMENT, %eax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+
+ /* Already set new stack pointer */
+ movl %esp, %ebp
+
+ /* Disable Paging in CR0 */
+ movl %cr0, %eax
+ andl $(~CR0_PG), %eax
+ movl %eax, %cr0
+
+ /* Disable PAE in CR4 */
+ movl %cr4, %eax
+ andl $(~CR4_PAE), %eax
+ movl %eax, %cr4
+
+ jmp multiboot2_loader32b
+
+ .align 4
+multiboot2_loader32b:
+ xor %eax, %eax
+
+ /*
+ * Reload multiboot info from target location
+ */
+ movl _RELOC(multiboot2_info_ptr), %ebx
+ call *%esi
+
+ .align 16
+multiboot2_loader32r:
+ .long 0
+ .long CODE_SEGMENT
+ .align 16
+gdt:
+ .long 0, 0
+ .byte 0xff, 0xff, 0x00, 0x00, 0x00, 0x9f, 0xcf, 0x00
+ .byte 0xff, 0xff, 0x00, 0x00, 0x00, 0x93, 0xcf, 0x00
+gdtr:
+ .word gdtr - gdt
+gdtrr:
+ .quad 0
+
+multiboot2_info_ptr:
+ .long 0
+
+ .align 16
+multiboot2_loader:
+ /*
+ * Here we would like to call multiboot2_pre_reloc() but
+ * we do not yet run in long mode, which means we need
+ * a 32 bit version of that function. Unfortunately,
+ * mixing 32-bit and 64-bit object file at link time
+ * does not work. As a result, we need to do the job
+ * of multiboot2_pre_reloc() here in assembly.
+ */
+#if multiboot2_pre_reloc_would_be_built_as_ia32
+ movl $_RELOC(tmpstk),%esp
+ mov %ebx,%edi /* Address of Multiboot information */
+ call _C_LABEL(multiboot2_pre_reloc)
+#else
+ /*
+ * Copy multiboot_info
+ */
+ movl $_RELOC(multiboot_info),%edi
+ movl %ebx,%esi
+ movl (%ebx),%ecx
+ shr $2,%ecx
+ rep
+ movsl
+
+ /*
+ * Set multiboot2_enabled
+ */
+ movl $1,%eax
+ movl %eax,RELOC(multiboot2_enabled)
+
+ /*
+ * Look for MULTIBOOT_TAG_TYPE_ELF_SECTIONS
+ */
+ movl $_RELOC(multiboot_info),%esi
+ movl (%esi),%ecx /* multiboot_info size */
+ movl %esi,%edx
+ addl %ecx,%edx /* %edx: end of multiboot_info */
+ addl $8,%esi /* skip two words of multiboot_info header */
+mbt_loop:
+ movl (%esi),%ebx /* mbt->type */
+ cmpl $9,%ebx /* 9 for MULTIBOOT_TAG_TYPE_ELF_SECTIONS */
+ je found_elf_sections
+
+ movl 4(%esi),%eax /* mbt->size */
+ addl %eax,%esi
+ addl $7,%esi /* roundup(%esi,8) */
+ andl $~7,%esi
+
+ cmpl %edx,%esi
+ jle mbt_loop
+ jmp elf_sections_done
+
+found_elf_sections:
+ movl $0,%eax
+ movl %esp,%ebp /* %ebp is esymp */
+ push %eax
+ push $KERNBASE_LO /* kernbase */
+ push $_RELOC(end) /* void *end */
+ push %ebp /* int **esymp */
+ push $_RELOC(has_syms) /* bool *has_symsp */
+ push $_RELOC(Multiboot_Symbols)/* struct multiboot_symbol *ms */
+ push %esi /* struct multiboot_tag_elf_sections *mbt_elf */
+ call multiboot2_copy_syms32
+
+ /* Asjust esym as a 64 bit pointer if esymp was set */
+ movl (%ebp),%eax
+ testl %eax,%eax /* esymp = NULL? */
+ jz elf_sections_done
+
+ movl $RELOC(esym),%ebp
+ movl %eax,(%ebp)
+ movl $KERNBASE_HI,4(%ebp)
+
+ jmp elf_sections_done
+
+ /*
+ * This is multiboot2_copy_syms() from
+ * src/sys/arch/x86/x86/multiboot2.c
+ * built with -m32 -mcmodel=32 -D_LOCORE_64
+ */
+multiboot2_copy_syms32:
+ push %ebp
+ mov %esp,%ebp
+ push %edi
+ push %esi
+ push %ebx
+ sub $0x20,%esp
+ mov 0x8(%ebp),%esi
+ /* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
+ mov 0x8(%esi),%ebx
+ test %ebx,%ebx
+ je copy_syms_4ce
+ add $0x14,%esi
+ mov %esi,%eax
+ xor %edx,%edx
+ jmp copy_syms_3a0
+copy_syms_395:
+ cmp %edx,%ebx
+ jbe copy_syms_4ce
+copy_syms_39d:
+ add $0x40,%eax
+copy_syms_3a0:
+ add $0x1,%edx
+ /* if ((shdrp->sh_type == SHT_SYMTAB) && */
+ cmpl $0x2,0x4(%eax)
+ jne copy_syms_395
+ /* shdrp->sh_link != SHN_UNDEF) { */
+ mov 0x28(%eax),%ecx
+ /* if ((shdrp->sh_type == SHT_SYMTAB) && */
+ test %ecx,%ecx
+ je copy_syms_395
+ /* [shdrp->sh_link]; */
+ shl $0x6,%ecx
+ /* shdrp2 = &((locore_Elf_Shdr *)mbt_elf->sections) */
+ add %esi,%ecx
+ /* if (shdrp2->sh_type == SHT_STRTAB) { */
+ cmpl $0x3,0x4(%ecx)
+ jne copy_syms_395
+ /* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
+ cmp %ebx,%edx
+ jae copy_syms_6d1
+ test %eax,%eax
+ je copy_syms_608
+ /* if (symtabp == NULL || strtabp == NULL) */
+copy_syms_3cb:
+ test %ecx,%ecx
+ lea 0x0(%esi),%esi
+ je copy_syms_4ce
+ /* symaddr = symtabp->sh_addr; */
+ mov 0x10(%eax),%edi
+ mov %edi,-0x10(%ebp)
+ mov 0x14(%eax),%ebx
+ mov %ebx,-0x18(%ebp)
+ /* straddr = strtabp->sh_addr; */
+ mov 0x10(%ecx),%esi
+ mov %esi,-0x14(%ebp)
+ mov 0x14(%ecx),%ebx
+ mov %ebx,-0x20(%ebp)
+ /* symsize = symtabp->sh_size; */
+ mov 0x20(%eax),%ebx
+ /* strsize = strtabp->sh_size; */
+ mov 0x20(%ecx),%eax
+ mov %eax,-0x1c(%ebp)
+ cmp 0x18(%ebp),%edi
+ jae copy_syms_4d6
+ cmp %esi,0x18(%ebp)
+ ja copy_syms_4e0
+ jae copy_syms_54d
+ /* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
+copy_syms_40f:
+ mov -0x1c(%ebp),%ecx
+ mov %ecx,%eax
+ xor %edx,%edx
+ /* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
+ mov 0x18(%ebp),%esi
+ xor %edi,%edi
+ /* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
+ add %esi,%eax
+ adc %edi,%edx
+ mov %eax,-0x2c(%ebp)
+ mov %edx,-0x28(%ebp)
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+ mov %ecx,%eax
+ mov 0x18(%ebp),%edi
+ mov -0x14(%ebp),%esi
+ cmp $0x4,%ecx
+ jae copy_syms_5e8
+copy_syms_436:
+ test $0x2,%al
+ je copy_syms_43c
+ movsw %ds:(%esi),%es:(%edi)
+copy_syms_43c:
+ test $0x1,%al
+ je copy_syms_441
+ movsb %ds:(%esi),%es:(%edi)
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_441:
+ mov %ebx,%eax
+ mov 0x18(%ebp),%edi
+ mov -0x1c(%ebp),%esi
+ add %esi,%edi
+ mov -0x10(%ebp),%esi
+ cmp $0x4,%ebx
+ jae copy_syms_5c4
+copy_syms_457:
+ test $0x2,%al
+ je copy_syms_45d
+ movsw %ds:(%esi),%es:(%edi)
+copy_syms_45d:
+ test $0x1,%al
+ je copy_syms_462
+ movsb %ds:(%esi),%es:(%edi)
+ /* symstart = (cp1src == symaddr) ? cp1dst : cp2dst; */
+copy_syms_462:
+ mov -0x18(%ebp),%edx
+ mov -0x20(%ebp),%edi
+ xor %edi,%edx
+ mov -0x10(%ebp),%eax
+ mov -0x14(%ebp),%ecx
+ xor %ecx,%eax
+ or %eax,%edx
+ je copy_syms_6ba
+ mov -0x2c(%ebp),%eax
+ mov %eax,-0x24(%ebp)
+ mov %ecx,-0x10(%ebp)
+ mov %edi,-0x18(%ebp)
+ /* strstart = (cp1src == straddr) ? cp1dst : cp2dst; */
+copy_syms_486:
+ mov -0x20(%ebp),%edx
+ xor -0x18(%ebp),%edx
+ mov -0x14(%ebp),%eax
+ xor -0x10(%ebp),%eax
+ or %eax,%edx
+ je copy_syms_545
+copy_syms_49a:
+ mov -0x2c(%ebp),%esi
+ /* ms->s_symstart = symstart + kernbase; */
+copy_syms_49d:
+ mov -0x24(%ebp),%eax
+ add 0x1c(%ebp),%eax
+ mov 0xc(%ebp),%edi
+ mov %eax,(%edi)
+ /* ms->s_symsize = symsize; */
+ mov %edi,%eax
+ mov %ebx,0x4(%edi)
+ /* ms->s_strstart = strstart + kernbase; */
+ add 0x1c(%ebp),%esi
+ mov %esi,0x8(%edi)
+ /* ms->s_strsize = strsize; */
+ mov -0x1c(%ebp),%edi
+ mov %edi,0xc(%eax)
+ /* *has_symsp = true; */
+ mov 0x10(%ebp),%eax
+ movb $0x1,(%eax)
+ /* *esymp = (int *)((uintptr_t)endp + symsize + strsize + kernbase); */
+ mov 0x18(%ebp),%eax
+ add 0x1c(%ebp),%eax
+ add %eax,%ebx
+ add %edi,%ebx
+ mov 0x14(%ebp),%eax
+ mov %ebx,(%eax)
+copy_syms_4ce:
+ add $0x20,%esp
+ pop %ebx
+ pop %esi
+ pop %edi
+ pop %ebp
+ ret
+copy_syms_4d6:
+ jbe copy_syms_54d
+ mov -0x14(%ebp),%eax
+ cmp %eax,0x18(%ebp)
+ jbe copy_syms_54d
+ /* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
+copy_syms_4e0:
+ mov 0x18(%ebp),%eax
+ mov %eax,-0x24(%ebp)
+ /* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
+ mov %ebx,%eax
+ xor %edx,%edx
+ /* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
+ mov 0x18(%ebp),%esi
+ xor %edi,%edi
+ /* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
+ add %esi,%eax
+ adc %edi,%edx
+ mov %eax,-0x2c(%ebp)
+ mov %edx,-0x28(%ebp)
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+ mov %ebx,%eax
+ mov 0x18(%ebp),%edi
+ mov -0x10(%ebp),%esi
+ cmp $0x4,%ebx
+ jae copy_syms_5a8
+copy_syms_50a:
+ test $0x2,%al
+ jne copy_syms_57b
+ test $0x1,%al
+ jne copy_syms_578
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_512:
+ mov -0x1c(%ebp),%ecx
+ mov %ecx,%eax
+ mov 0x18(%ebp),%edi
+ add %ebx,%edi
+ mov -0x14(%ebp),%esi
+ cmp $0x4,%ecx
+ jae copy_syms_584
+copy_syms_524:
+ test $0x2,%al
+ jne copy_syms_56c
+ test $0x1,%al
+ je copy_syms_486
+copy_syms_530:
+ movsb %ds:(%esi),%es:(%edi)
+ /* strstart = (cp1src == straddr) ? cp1dst : cp2dst; */
+ mov -0x20(%ebp),%edx
+ xor -0x18(%ebp),%edx
+ mov -0x14(%ebp),%eax
+ xor -0x10(%ebp),%eax
+ or %eax,%edx
+ jne copy_syms_49a
+copy_syms_545:
+ mov 0x18(%ebp),%esi
+ jmp copy_syms_49d
+ /* if (symaddr < straddr) { */
+copy_syms_54d:
+ mov -0x20(%ebp),%edi
+ cmp %edi,-0x18(%ebp)
+ jb copy_syms_4e0
+ ja copy_syms_40f
+ mov -0x14(%ebp),%edi
+ cmp %edi,-0x10(%ebp)
+ jb copy_syms_4e0
+ jmp copy_syms_40f
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_56c:
+ movsw %ds:(%esi),%es:(%edi)
+ test $0x1,%al
+ je copy_syms_486
+ jmp copy_syms_530
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+copy_syms_578:
+ movsb %ds:(%esi),%es:(%edi)
+ jmp copy_syms_512
+copy_syms_57b:
+ movsw %ds:(%esi),%es:(%edi)
+ test $0x1,%al
+ nop
+ je copy_syms_512
+ jmp copy_syms_578
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_584:
+ test $0x1,%edi
+ jne copy_syms_650
+copy_syms_590:
+ test $0x2,%edi
+ jne copy_syms_63c
+copy_syms_59c:
+ mov %eax,%ecx
+ shr $0x2,%ecx
+ rep movsl %ds:(%esi),%es:(%edi)
+ jmp copy_syms_524
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+copy_syms_5a8:
+ test $0x1,%edi
+ jne copy_syms_626
+copy_syms_5b0:
+ test $0x2,%edi
+ jne copy_syms_615
+copy_syms_5b8:
+ mov %eax,%ecx
+ shr $0x2,%ecx
+ rep movsl %ds:(%esi),%es:(%edi)
+ jmp copy_syms_50a
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_5c4:
+ test $0x1,%edi
+ jne copy_syms_666
+copy_syms_5d0:
+ test $0x2,%edi
+ jne copy_syms_6a6
+copy_syms_5dc:
+ mov %eax,%ecx
+ shr $0x2,%ecx
+ rep movsl %ds:(%esi),%es:(%edi)
+ jmp copy_syms_457
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+copy_syms_5e8:
+ test $0x1,%edi
+ jne copy_syms_68d
+copy_syms_5f4:
+ test $0x2,%edi
+ jne copy_syms_679
+copy_syms_5fc:
+ mov %eax,%ecx
+ shr $0x2,%ecx
+ rep movsl %ds:(%esi),%es:(%edi)
+ jmp copy_syms_436
+ /* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
+copy_syms_608:
+ test %ecx,%ecx
+ jne copy_syms_4ce
+ jmp copy_syms_39d
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+copy_syms_615:
+ movzwl (%esi),%edx
+ mov %dx,(%edi)
+ add $0x2,%edi
+ add $0x2,%esi
+ sub $0x2,%eax
+ jmp copy_syms_5b8
+copy_syms_626:
+ movzbl (%esi),%eax
+ mov %al,(%edi)
+ mov 0x18(%ebp),%eax
+ lea 0x1(%eax),%edi
+ add $0x1,%esi
+ lea -0x1(%ebx),%eax
+ jmp copy_syms_5b0
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_63c:
+ movzwl (%esi),%edx
+ mov %dx,(%edi)
+ add $0x2,%edi
+ add $0x2,%esi
+ sub $0x2,%eax
+ jmp copy_syms_59c
+copy_syms_650:
+ movzbl (%esi),%eax
+ mov %al,(%edi)
+ add $0x1,%edi
+ add $0x1,%esi
+ mov -0x1c(%ebp),%eax
+ sub $0x1,%eax
+ jmp copy_syms_590
+copy_syms_666:
+ movzbl (%esi),%eax
+ mov %al,(%edi)
+ add $0x1,%edi
+ add $0x1,%esi
+ lea -0x1(%ebx),%eax
+ jmp copy_syms_5d0
+ /* (void)memcpy((void *)(uintptr_t)cp1dst, */
+copy_syms_679:
+ movzwl (%esi),%edx
+ mov %dx,(%edi)
+ add $0x2,%edi
+ add $0x2,%esi
+ sub $0x2,%eax
+ jmp copy_syms_5fc
+copy_syms_68d:
+ movzbl (%esi),%eax
+ mov %al,(%edi)
+ mov 0x18(%ebp),%eax
+ lea 0x1(%eax),%edi
+ add $0x1,%esi
+ mov -0x1c(%ebp),%eax
+ sub $0x1,%eax
+ jmp copy_syms_5f4
+ /* (void)memcpy((void *)(uintptr_t)cp2dst, */
+copy_syms_6a6:
+ movzwl (%esi),%edx
+ mov %dx,(%edi)
+ add $0x2,%edi
+ add $0x2,%esi
+ sub $0x2,%eax
+ jmp copy_syms_5dc
+copy_syms_6ba:
+ mov -0x14(%ebp),%eax
+ mov %eax,-0x10(%ebp)
+ mov -0x20(%ebp),%eax
+ mov %eax,-0x18(%ebp)
+ /* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
+ mov 0x18(%ebp),%eax
+ mov %eax,-0x24(%ebp)
+ jmp copy_syms_486
+ /* if (symtabp == NULL || strtabp == NULL) */
+copy_syms_6d1:
+ test %eax,%eax
+ jne copy_syms_3cb
+ jmp copy_syms_4ce
+elf_sections_done:
+#endif
+
+ jmp .Lbegin
+
+
+#endif /* MULTIBOOT */
+
+.Lnative_loader:
/*
* Load parameters from the stack (32 bits):
* boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem
@@ -562,6 +1308,7 @@ ENTRY(start)
* Done with the parameters!
*/
+.Lbegin:
/* First, reset the PSL. */
pushl $PSL_MBO
popfl
@@ -877,6 +1624,16 @@ longmode_hi:
leaq (USPACE-FRAMESIZE)(%rax),%rsp
xorq %rbp,%rbp /* mark end of frames */
+#if defined(MULTIBOOT)
+ /* It is now safe to parse the Multiboot information structure
+ * we saved before from C code. Note that we cannot delay its
+ * parsing any more because initgdt (called below) needs to make
+ * use of this information.
+ */
+ pushq %rsi
+ call _C_LABEL(multiboot2_post_reloc)
+ popq %rsi
+#endif
xorw %ax,%ax
movw %ax,%gs
movw %ax,%fs
Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.342 src/sys/arch/amd64/amd64/machdep.c:1.343
--- src/sys/arch/amd64/amd64/machdep.c:1.342 Fri Dec 6 08:35:21 2019
+++ src/sys/arch/amd64/amd64/machdep.c Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: machdep.c,v 1.342 2019/12/06 08:35:21 maxv Exp $ */
+/* $NetBSD: machdep.c,v 1.343 2019/12/10 02:06:07 manu Exp $ */
/*
* Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,9 +110,10 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.342 2019/12/06 08:35:21 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.343 2019/12/10 02:06:07 manu Exp $");
#include "opt_modular.h"
+#include "opt_multiboot.h"
#include "opt_user_ldt.h"
#include "opt_ddb.h"
#include "opt_kgdb.h"
@@ -184,6 +185,8 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v
#include <x86/cpuvar.h>
#include <x86/machdep.h>
+#include <arch/i386/include/multiboot.h>
+
#include <x86/x86/tsc.h>
#include <dev/isa/isareg.h>
@@ -371,6 +374,10 @@ cpu_startup(void)
initmsgbuf((void *)msgbuf_vaddr, round_page(sz));
+#ifdef MULTIBOOT
+ multiboot2_print_info();
+#endif
+
minaddr = 0;
/*
@@ -1504,6 +1511,11 @@ init_x86_64_ksyms(void)
db_machine_init();
#endif
+#if defined(MULTIBOOT)
+ if (multiboot2_ksyms_addsyms_elf())
+ return;
+#endif
+
#ifndef XENPV
symtab = lookup_bootinfo(BTINFO_SYMTAB);
if (symtab) {
Index: src/sys/arch/amd64/conf/GENERIC
diff -u src/sys/arch/amd64/conf/GENERIC:1.548 src/sys/arch/amd64/conf/GENERIC:1.549
--- src/sys/arch/amd64/conf/GENERIC:1.548 Thu Dec 5 22:05:05 2019
+++ src/sys/arch/amd64/conf/GENERIC Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-# $NetBSD: GENERIC,v 1.548 2019/12/05 22:05:05 sevan Exp $
+# $NetBSD: GENERIC,v 1.549 2019/12/10 02:06:07 manu Exp $
#
# GENERIC machine description file
#
@@ -22,10 +22,12 @@ include "arch/amd64/conf/std.amd64"
options INCLUDE_CONFIG_FILE # embed config file in kernel binary
-#ident "GENERIC-$Revision: 1.548 $"
+#ident "GENERIC-$Revision: 1.549 $"
maxusers 64 # estimated number of users
+options MULTIBOOT # Multiboot support (see multiboot(8))
+
# delay between "rebooting ..." message and hardware reset, in milliseconds
#options CPURESET_DELAY=2000
Index: src/sys/arch/amd64/conf/files.amd64
diff -u src/sys/arch/amd64/conf/files.amd64:1.113 src/sys/arch/amd64/conf/files.amd64:1.114
--- src/sys/arch/amd64/conf/files.amd64:1.113 Sat Sep 7 18:56:01 2019
+++ src/sys/arch/amd64/conf/files.amd64 Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-# $NetBSD: files.amd64,v 1.113 2019/09/07 18:56:01 maxv Exp $
+# $NetBSD: files.amd64,v 1.114 2019/12/10 02:06:07 manu Exp $
#
# new style config file for amd64 architecture
#
@@ -30,6 +30,10 @@ defflag opt_spectre.h SPECTRE_V2_GCC_MIT
defflag USER_LDT
defflag eisa.h EISA
+# Multiboot support
+defflag opt_multiboot.h MULTIBOOT
+file arch/x86/x86/multiboot2.c multiboot
+
# Start code
file arch/amd64/amd64/locore.S machdep
file arch/amd64/amd64/vector.S machdep
Index: src/sys/arch/amd64/conf/kern.ldscript
diff -u src/sys/arch/amd64/conf/kern.ldscript:1.27 src/sys/arch/amd64/conf/kern.ldscript:1.28
--- src/sys/arch/amd64/conf/kern.ldscript:1.27 Mon Aug 20 15:04:51 2018
+++ src/sys/arch/amd64/conf/kern.ldscript Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: kern.ldscript,v 1.27 2018/08/20 15:04:51 maxv Exp $ */
+/* $NetBSD: kern.ldscript,v 1.28 2019/12/10 02:06:07 manu Exp $ */
#include "assym.h"
@@ -13,9 +13,13 @@ __LARGE_PAGE_SIZE = 0x200000 ;
ENTRY(_start)
SECTIONS
{
+ multiboot 0x4000 :
+ {
+ KEEP(*(multiboot));
+ }
.text : AT (ADDR(.text) & 0x0fffffff)
{
- . = ALIGN(__PAGE_SIZE);
+ . = ALIGN(__LARGE_PAGE_SIZE);
__text_user_start = . ;
*(.text.user)
. = ALIGN(__PAGE_SIZE);
Index: src/sys/arch/x86/x86/efi.c
diff -u src/sys/arch/x86/x86/efi.c:1.20 src/sys/arch/x86/x86/efi.c:1.21
--- src/sys/arch/x86/x86/efi.c:1.20 Fri Oct 18 00:56:25 2019
+++ src/sys/arch/x86/x86/efi.c Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: efi.c,v 1.20 2019/10/18 00:56:25 manu Exp $ */
+/* $NetBSD: efi.c,v 1.21 2019/12/10 02:06:07 manu Exp $ */
/*-
* Copyright (c) 2016 The NetBSD Foundation, Inc.
@@ -27,7 +27,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: efi.c,v 1.20 2019/10/18 00:56:25 manu Exp $");
+__KERNEL_RCSID(0, "$NetBSD: efi.c,v 1.21 2019/12/10 02:06:07 manu Exp $");
#include <sys/kmem.h>
#include <sys/param.h>
@@ -77,11 +77,6 @@ efi_getva(paddr_t pa)
vaddr_t va;
int rv;
-#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
- if (mm_md_direct_mapped_phys(pa, &va))
- return va;
-#endif
-
rv = _x86_memio_map(x86_bus_space_mem, pa,
PAGE_SIZE, 0, (bus_space_handle_t *)&va);
if (rv != 0) {
@@ -98,15 +93,6 @@ efi_getva(paddr_t pa)
static void
efi_relva(paddr_t pa, vaddr_t va)
{
-
-#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
- vaddr_t va0 __diagused;
- if (mm_md_direct_mapped_phys(pa, &va0)) {
- KASSERT(va0 == va);
- return;
- }
-#endif
-
(void)_x86_memio_unmap(x86_bus_space_mem, (bus_space_handle_t)va,
PAGE_SIZE, NULL);
}
Index: src/sys/arch/x86/x86/multiboot2.c
diff -u src/sys/arch/x86/x86/multiboot2.c:1.2 src/sys/arch/x86/x86/multiboot2.c:1.3
--- src/sys/arch/x86/x86/multiboot2.c:1.2 Fri Oct 18 14:59:22 2019
+++ src/sys/arch/x86/x86/multiboot2.c Tue Dec 10 02:06:07 2019
@@ -1,4 +1,4 @@
-/* $NetBSD: multiboot2.c,v 1.2 2019/10/18 14:59:22 hannken Exp $ */
+/* $NetBSD: multiboot2.c,v 1.3 2019/12/10 02:06:07 manu Exp $ */
/*-
* Copyright (c) 2005, 2006 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: multiboot2.c,v 1.2 2019/10/18 14:59:22 hannken Exp $");
+__KERNEL_RCSID(0, "$NetBSD: multiboot2.c,v 1.3 2019/12/10 02:06:07 manu Exp $");
#include "opt_multiboot.h"
@@ -48,7 +48,19 @@ __KERNEL_RCSID(0, "$NetBSD: multiboot2.c
#include <x86/efi.h>
#include <machine/bootinfo.h>
-#include <machine/multiboot2.h>
+#include <arch/i386/include/multiboot2.h>
+
+#ifdef _LOCORE64
+typedef uint64_t locore_vaddr_t;
+typedef Elf64_Shdr locore_Elf_Shdr;
+typedef Elf64_Word locore_Elf_Word;
+typedef Elf64_Addr locore_Elf_Addr;
+#else
+typedef vaddr_t locore_vaddr_t;
+typedef Elf_Shdr locore_Elf_Shdr;
+typedef Elf_Word locore_Elf_Word;
+typedef Elf_Addr locore_Elf_Addr;
+#endif
#if !defined(MULTIBOOT)
# error "MULTIBOOT not defined; this cannot happen."
@@ -65,12 +77,15 @@ __KERNEL_RCSID(0, "$NetBSD: multiboot2.c
(efi_char *)__UNCONST(wstring))
struct multiboot_symbols {
- void * s_symstart;
- size_t s_symsize;
- void * s_strstart;
- size_t s_strsize;
+ uint32_t s_symstart;
+ uint32_t s_symsize;
+ uint32_t s_strstart;
+ uint32_t s_strsize;
};
+void multiboot2_copy_syms(struct multiboot_tag_elf_sections *,
+ struct multiboot_symbols *,
+ bool *, int **, void *, vaddr_t);
/*
* Because of clashes between multiboot.h and multiboot2.h we
* cannot include both, and we need to redefine here:
@@ -82,21 +97,24 @@ bool multiboot2_ksyms_addsyms
extern int biosbasemem;
extern int biosextmem;
+#ifdef __i386__
extern int biosmem_implicit;
+#endif
extern int boothowto;
extern struct bootinfo bootinfo;
extern int end;
extern int * esym;
+extern char start;
/*
* There is no way to perform dynamic allocation
* at this time, hence we need to waste memory,
* with the hope data will fit.
*/
-static char multiboot_info[16384] = "\0\0\0\0";
-static bool multiboot2_enabled = false;
-static bool has_syms = false;
-static struct multiboot_symbols Multiboot_Symbols;
+char multiboot_info[16384] = "\0\0\0\0";
+bool multiboot2_enabled = false;
+bool has_syms = false;
+struct multiboot_symbols Multiboot_Symbols;
#define RELOC(type, x) ((type)((vaddr_t)(x) - KERNBASE))
@@ -151,17 +169,21 @@ exit_bs:
return;
}
-static void
-copy_syms(struct multiboot_tag_elf_sections *mbt_elf)
+void
+multiboot2_copy_syms(struct multiboot_tag_elf_sections *mbt_elf,
+ struct multiboot_symbols *ms,
+ bool *has_symsp, int **esymp, void *endp,
+ vaddr_t kernbase)
{
int i;
- struct multiboot_symbols *ms;
- Elf32_Shdr *symtabp, *strtabp;
- Elf32_Word symsize, strsize;
- Elf32_Addr symaddr, straddr;
- Elf32_Addr symstart, strstart;
-
- ms = RELOC(struct multiboot_symbols *, &Multiboot_Symbols);
+ locore_Elf_Shdr *symtabp, *strtabp;
+ locore_Elf_Word symsize, strsize;
+ locore_Elf_Addr symaddr, straddr;
+ locore_Elf_Addr symstart, strstart;
+ locore_Elf_Addr cp1src, cp1dst;
+ locore_Elf_Word cp1size;
+ locore_Elf_Addr cp2src, cp2dst;
+ locore_Elf_Word cp2size;
/*
* Locate a symbol table and its matching string table in the
@@ -171,20 +193,20 @@ copy_syms(struct multiboot_tag_elf_secti
symtabp = strtabp = NULL;
for (i = 0; i < mbt_elf->num && symtabp == NULL &&
strtabp == NULL; i++) {
- Elf32_Shdr *shdrp;
+ locore_Elf_Shdr *shdrp;
- shdrp = &((Elf32_Shdr *)mbt_elf->sections)[i];
+ shdrp = &((locore_Elf_Shdr *)mbt_elf->sections)[i];
if ((shdrp->sh_type == SHT_SYMTAB) &&
shdrp->sh_link != SHN_UNDEF) {
- Elf32_Shdr *shdrp2;
+ locore_Elf_Shdr *shdrp2;
- shdrp2 = &((Elf32_Shdr *)mbt_elf->sections)
+ shdrp2 = &((locore_Elf_Shdr *)mbt_elf->sections)
[shdrp->sh_link];
if (shdrp2->sh_type == SHT_STRTAB) {
- symtabp = shdrp;
- strtabp = shdrp2;
+ symtabp = (locore_Elf_Shdr *)shdrp;
+ strtabp = (locore_Elf_Shdr *)shdrp2;
}
}
}
@@ -208,47 +230,48 @@ copy_syms(struct multiboot_tag_elf_secti
* that if the tables start before the kernel's end address,
* they will not grow over this address.
*/
- if ((void *)symtabp < RELOC(void *, &end) &&
- (void *)strtabp < RELOC(void *, &end)) {
- symstart = RELOC(Elf32_Addr, &end);
- strstart = symstart + symsize;
- memcpy((void *)symstart, (void *)symaddr, symsize);
- memcpy((void *)strstart, (void *)straddr, strsize);
- } else if ((void *)symtabp > RELOC(void *, &end) &&
- (void *)strtabp < RELOC(void *, &end)) {
- symstart = RELOC(Elf32_Addr, &end);
- strstart = symstart + symsize;
- memcpy((void *)symstart, (void *)symaddr, symsize);
- memcpy((void *)strstart, (void *)straddr, strsize);
- } else if ((void *)symtabp < RELOC(void *, &end) &&
- (void *)strtabp > RELOC(void *, &end)) {
- strstart = RELOC(Elf32_Addr, &end);
- symstart = strstart + strsize;
- memcpy((void *)strstart, (void *)straddr, strsize);
- memcpy((void *)symstart, (void *)symaddr, symsize);
+ if ((void *)(uintptr_t)symaddr < endp &&
+ (void *)(uintptr_t)straddr < endp) {
+ cp1src = symaddr; cp1size = symsize;
+ cp2src = straddr; cp2size = strsize;
+ } else if ((void *)(uintptr_t)symaddr > endp &&
+ (void *)(uintptr_t)straddr < endp) {
+ cp1src = symaddr; cp1size = symsize;
+ cp2src = straddr; cp2size = strsize;
+ } else if ((void *)(uintptr_t)symaddr < endp &&
+ (void *)(uintptr_t)straddr > endp) {
+ cp1src = straddr; cp1size = strsize;
+ cp2src = symaddr; cp2size = symsize;
} else {
- /* symtabp and strtabp are both over end */
- if (symtabp < strtabp) {
- symstart = RELOC(Elf32_Addr, &end);
- strstart = symstart + symsize;
- memcpy((void *)symstart, (void *)symaddr, symsize);
- memcpy((void *)strstart, (void *)straddr, strsize);
+ /* symaddr and straddr are both over end */
+ if (symaddr < straddr) {
+ cp1src = symaddr; cp1size = symsize;
+ cp2src = straddr; cp2size = strsize;
} else {
- strstart = RELOC(Elf32_Addr, &end);
- symstart = strstart + strsize;
- memcpy((void *)strstart, (void *)straddr, strsize);
- memcpy((void *)symstart, (void *)symaddr, symsize);
+ cp1src = straddr; cp1size = strsize;
+ cp2src = symaddr; cp2size = symsize;
}
}
- *RELOC(bool *, &has_syms) = true;
- *RELOC(int *, &esym) =
- (int)(symstart + symsize + strsize + KERNBASE);
+ cp1dst = (locore_Elf_Addr)(uintptr_t)endp;
+ cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size;
+
+ (void)memcpy((void *)(uintptr_t)cp1dst,
+ (void *)(uintptr_t)cp1src, cp1size);
+ (void)memcpy((void *)(uintptr_t)cp2dst,
+ (void *)(uintptr_t)cp2src, cp2size);
+
+ symstart = (cp1src == symaddr) ? cp1dst : cp2dst;
+ strstart = (cp1src == straddr) ? cp1dst : cp2dst;
- ms->s_symstart = (void *)(symstart + KERNBASE);
+ ms->s_symstart = symstart + kernbase;
ms->s_symsize = symsize;
- ms->s_strstart = (void *)(strstart + KERNBASE);
+ ms->s_strstart = strstart + kernbase;
ms->s_strsize = strsize;
+
+ *has_symsp = true;
+ *esymp = (int *)((uintptr_t)endp + symsize + strsize + kernbase);
+
}
void
@@ -313,7 +336,12 @@ multiboot2_pre_reloc(char *mbi)
efi_exit_bs(efi_systbl, efi_ih);
if (mbt_elf)
- copy_syms(mbt_elf);
+ multiboot2_copy_syms(mbt_elf,
+ RELOC(struct multiboot_symbols *, &Multiboot_Symbols),
+ RELOC(bool *, &has_syms),
+ RELOC(int **, &esym),
+ RELOC(void *, &end),
+ KERNBASE);
return;
}
@@ -460,11 +488,15 @@ mbi_basic_meminfo(struct multiboot_tag_b
/* Make sure we don't override user-set variables. */
if (biosbasemem == 0) {
biosbasemem = mbt->mem_lower;
+#ifdef __i386__
biosmem_implicit = 1;
+#endif
}
if (biosextmem == 0) {
biosextmem = mbt->mem_upper;
+#ifdef __i386__
biosmem_implicit = 1;
+#endif
}
return;
@@ -985,28 +1017,45 @@ bool
multiboot2_ksyms_addsyms_elf(void)
{
struct multiboot_symbols *ms = &Multiboot_Symbols;
+ vaddr_t symstart = (vaddr_t)ms->s_symstart;
+ vaddr_t strstart = (vaddr_t)ms->s_strstart;
+ Elf_Ehdr ehdr;
- if (!multiboot2_enabled)
+ if (!multiboot2_enabled || !has_syms)
return false;
- if (has_syms) {
- Elf32_Ehdr ehdr;
+ KASSERT(esym != 0);
- KASSERT(esym != 0);
+#ifdef __LP64__
+ /* Adjust pointer as 64 bits */
+ symstart &= 0xffffffff;
+ symstart |= ((vaddr_t)KERNBASE_HI << 32);
+ strstart &= 0xffffffff;
+ strstart |= ((vaddr_t)KERNBASE_HI << 32);
+#endif
- memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
- ehdr.e_ident[EI_CLASS] = ELFCLASS32;
- ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
- ehdr.e_ident[EI_VERSION] = EV_CURRENT;
- ehdr.e_type = ET_EXEC;
- ehdr.e_machine = EM_386;
- ehdr.e_version = 1;
- ehdr.e_ehsize = sizeof(ehdr);
-
- ksyms_addsyms_explicit((void *)&ehdr,
- ms->s_symstart, ms->s_symsize,
- ms->s_strstart, ms->s_strsize);
- }
+ memset(&ehdr, 0, sizeof(ehdr));
+ memcpy(ehdr.e_ident, ELFMAG, SELFMAG);
+ ehdr.e_ident[EI_CLASS] = ELFCLASS;
+ ehdr.e_ident[EI_DATA] = ELFDATA2LSB;
+ ehdr.e_ident[EI_VERSION] = EV_CURRENT;
+ ehdr.e_ident[EI_OSABI] = ELFOSABI_SYSV;
+ ehdr.e_ident[EI_ABIVERSION] = 0;
+ ehdr.e_type = ET_EXEC;
+#ifdef __amd64__
+ ehdr.e_machine = EM_X86_64;
+#elif __i386__
+ ehdr.e_machine = EM_386;
+#else
+ #error "Unknwo ELF machine type"
+#endif
+ ehdr.e_version = 1;
+ ehdr.e_entry = (Elf_Addr)&start;
+ ehdr.e_ehsize = sizeof(ehdr);
+
+ ksyms_addsyms_explicit((void *)&ehdr,
+ (void *)symstart, ms->s_symsize,
+ (void *)strstart, ms->s_strsize);
- return has_syms;
+ return true;
}