Module Name:    src
Committed By:   manu
Date:           Thu Jan  9 00:42:24 UTC 2020

Modified Files:
        src/sys/arch/amd64/amd64: locore.S machdep.c
        src/sys/arch/amd64/conf: GENERIC files.amd64 kern.ldscript

Log Message:
Rollback multiboot2 for amd64, as requested by core


To generate a diff of this commit:
cvs rdiff -u -r1.197 -r1.198 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.344 -r1.345 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.553 -r1.554 src/sys/arch/amd64/conf/GENERIC
cvs rdiff -u -r1.114 -r1.115 src/sys/arch/amd64/conf/files.amd64
cvs rdiff -u -r1.30 -r1.31 src/sys/arch/amd64/conf/kern.ldscript

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.197 src/sys/arch/amd64/amd64/locore.S:1.198
--- src/sys/arch/amd64/amd64/locore.S:1.197	Wed Jan  8 20:59:18 2020
+++ src/sys/arch/amd64/amd64/locore.S	Thu Jan  9 00:42:24 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.197 2020/01/08 20:59:18 skrll Exp $	*/
+/*	$NetBSD: locore.S,v 1.198 2020/01/09 00:42:24 manu Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -158,7 +158,6 @@
 
 #include "opt_compat_netbsd.h"
 #include "opt_compat_netbsd32.h"
-#include "opt_multiboot.h"
 #include "opt_xen.h"
 #include "opt_svs.h"
 
@@ -178,13 +177,6 @@
 #include <machine/frameasm.h>
 #include <machine/cputypes.h>
 
-#ifndef XENPV
-#include <arch/i386/include/multiboot.h>
-#endif 
-
-#define CODE_SEGMENT	0x08
-#define DATA_SEGMENT	0x10
-
 #if NLAPIC > 0
 #include <machine/i82489reg.h>
 #endif
@@ -432,50 +424,6 @@ END(farjmp64)
 	.space	512
 tmpstk:
 
-.section multiboot,"a"
-#if defined(MULTIBOOT)
-	.align	8
-	.globl	Multiboot2_Header
-_C_LABEL(Multiboot2_Header):
-	.int	MULTIBOOT2_HEADER_MAGIC
-	.int	MULTIBOOT2_ARCHITECTURE_I386
-	.int	Multiboot2_Header_end - Multiboot2_Header
-	.int	-(MULTIBOOT2_HEADER_MAGIC + MULTIBOOT2_ARCHITECTURE_I386 \
-		+ (Multiboot2_Header_end - Multiboot2_Header))
-
-	.int	1	/* MULTIBOOT_HEADER_TAG_INFORMATION_REQUEST */
-	.int	12	/* sizeof(multiboot_header_tag_information_request) */
-			/* + sizeof(uint32_t) * requests */
-	.int	4	/* MULTIBOOT_TAG_TYPE_BASIC_MEMINFO */
-	.align	8
-
-	.int	3	/* MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS */
-	.int	16	/* sizeof(struct multiboot_tag_efi64) */
-	.quad	(multiboot2_entry - KERNBASE)
-	.align	8
-
-	.int	9	/* MULTIBOOT_HEADER_TAG_ENTRY_ADDRESS_EFI64 */
-	.int	16	/* sizeof(struct multiboot_tag_efi64) */
-	.quad	(multiboot2_entry - KERNBASE)
-	.align	8
-
-#if notyet
-	/*
-	 * Could be used to get an early console for debug,
-	 * but this is broken.
-	 */
-	.int	7	/* MULTIBOOT_HEADER_TAG_EFI_BS */
-	.int	8	/* sizeof(struct multiboot_tag) */
-	.align	8
-#endif
-
-	.int	0	/* MULTIBOOT_HEADER_TAG_END */
-	.int	8	/* sizeof(struct multiboot_tag) */
-	.align	8
-	.globl	Multiboot2_Header_end
-_C_LABEL(Multiboot2_Header_end):
-#endif	/* MULTIBOOT */
-
 /*
  * Some hackage to deal with 64bit symbols in 32 bit mode.
  * This may not be needed if things are cleaned up a little.
@@ -492,700 +440,6 @@ ENTRY(start)
 	/* Warm boot */
 	movw	$0x1234,0x472
 
-#if defined(MULTIBOOT)
-	jmp	.Lnative_loader
-
-
-multiboot2_entry:
-	.code64
-	/*
-	 * multiboot2 entry point. We are left here without
-	 * stack and with no idea of where we were loaded in memory.
-	 * The only inputs are
-	 * %eax MULTIBOOT2_BOOTLOADER_MAGIC
-	 * %ebx pointer to multiboot_info
-	 *
-	 * Here we will:
-	 * - copy the kernel to 0x200000 (KERNTEXTOFF - KERNBASE)
-	 *	as almost all the code in locore.S assume it is there. 
-	 *	This is derived from 
-	 *	src/sys/arch/i386/stand/efiboot/bootx64/startprog64.S
-	 * - copy multiboot_info, as done in multiboot_pre_reloc() from
-	 *	src/sys/arch/x86/x86/multiboot2.c
-	 *	Unfortunately we cannot call that function as there is 
-	 *	no simple way to build it as 32 bit code in a 64 bit kernel.
-	 * - Copy ELF symbols, also as in multiboot_pre_reloc()
-	 */
-
-	cli
-
-	/*
-	 * Discover our load address and use it to get start address
-	 */
-	mov	$_RELOC(tmpstk),%rsp
-	call	next
-next:	pop	%r8
-	sub	$(next - start), %r8
-
-	/*
-	 * Save multiboot_info for later. We cannot use	
-	 * temporary stack for that since we are going to
-	 * overwrite it.
-	 */
-	movl	%ebx, (multiboot2_info_ptr - start)(%r8)
-
-	/*
-	 * Get relocated multiboot2_loader entry point in %r9
-	 */
-	mov	$(KERNTEXTOFF - KERNBASE), %r9
-	add	$(multiboot2_loader - kernel_text), %r9
-
-	/* Copy kernel */
-	mov	$(KERNTEXTOFF - KERNBASE), %rdi			/* dest */
-	mov	%r8, %rsi		
-	sub	$(start - kernel_text), %rsi			/* src */
-	mov	$(__kernel_end - kernel_text), %rcx		/* size */
-	mov	%rcx, %r12		
-	movq	%rdi, %r11		/* for misaligned check */
-
-#if !defined(NO_OVERLAP)
-	movq	%rdi, %r13
-	subq	%rsi, %r13
-#endif
-
-	shrq	$3, %rcx		/* count for copy by words */
-	jz	8f			/* j if less than 8 bytes */
-
-	lea	-8(%rdi, %r12), %r14	/* target address of last 8 */
-	mov	-8(%rsi, %r12), %r15	/* get last word */
-#if !defined(NO_OVERLAP)
-	cmpq	%r12, %r13		/* overlapping? */
-	jb	10f
-#endif
-
-/*
- * Non-overlaping, copy forwards.
- * Newer Intel cpus (Nehalem) will do 16byte read/write transfers
- * if %ecx is more than 76.
- * AMD might do something similar some day.
- */
-	and	$7, %r11		/* destination misaligned ? */
-	jnz	12f
-	rep
-	movsq
-	mov	%r15, (%r14)		/* write last word */
-	jmp	.Lcopy_done
-
-/*
- * Destination misaligned
- * AMD say it is better to align the destination (not the source).
- * This will also re-align copies if the source and dest are both
- * misaligned by the same amount)
- * (I think Nehalem will use its accelerated copy if the source
- * and destination have the same alignment.)
- */
-12:
-	lea	-9(%r11, %r12), %rcx	/* post re-alignment count */
-	neg	%r11			/* now -1 .. -7 */
-	mov	(%rsi), %r12		/* get first word */
-	mov	%rdi, %r13		/* target for first word */
-	lea	8(%rsi, %r11), %rsi
-	lea	8(%rdi, %r11), %rdi
-	shr	$3, %rcx
-	rep
-	movsq
-	mov	%r12, (%r13)		/* write first word */
-	mov	%r15, (%r14)		/* write last word */
-	jmp	.Lcopy_done
-
-#if !defined(NO_OVERLAP)
-/* Must copy backwards.
- * Reverse copy is probably easy to code faster than 'rep movds'
- * since that requires (IIRC) an extra clock every 3 iterations (AMD).
- * However I don't suppose anything cares that much!
- * The big cost is the std/cld pair - reputedly 50+ cycles on Netburst P4.
- * The copy is aligned with the buffer start (more likely to
- * be a multiple of 8 than the end).
- */
-10:
-	lea	-8(%rsi, %rcx, 8), %rsi
-	lea	-8(%rdi, %rcx, 8), %rdi
-	std
-	rep
-	movsq
-	cld
-	mov	%r15, (%r14)	/* write last bytes */
-	jmp	.Lcopy_done
-#endif
-
-/* Less than 8 bytes to copy, copy by bytes */
-/* Intel Nehalem optimise 'rep movsb' for <= 7 bytes (9-15 clocks).
- * For longer transfers it is 50+ !
- */
-8:	mov	%r12, %rcx
-
-#if !defined(NO_OVERLAP)
-	cmpq	%r12, %r13	/* overlapping? */
-	jb	81f
-#endif
-
-	/* nope, copy forwards. */
-	rep
-	movsb
-	jmp	.Lcopy_done
-
-#if !defined(NO_OVERLAP)
-/* Must copy backwards */
-81:
-	lea	-1(%rsi, %rcx), %rsi
-	lea	-1(%rdi, %rcx), %rdi
-	std
-	rep
-	movsb
-	cld
-#endif
-	/* End of copy kernel */
-.Lcopy_done:
-
-	mov	%r8, %rdi	/* %rdi: loaded start address */
-	mov	%r9, %rsi	/* %rsi: kernel entry address */
-
-	/* Prepare jump address */
-	lea	(multiboot2_loader32a - start)(%rdi), %rax
-	movl	%eax, (multiboot2_loader32r - start)(%rdi)
-
-	/* Setup GDT */
-	lea	(gdt - start)(%rdi), %rax
-	mov	%rax, (gdtrr - start)(%rdi)
-	lgdt	(gdtr - start)(%rdi)
-
-	/* Jump to set %cs */
-	ljmp	*(multiboot2_loader32r - start)(%rdi)
-
-	.align	4
-	.code32
-multiboot2_loader32a:
-	movl	$DATA_SEGMENT, %eax
-	movw	%ax, %ds
-	movw	%ax, %es
-	movw	%ax, %fs
-	movw	%ax, %gs
-	movw	%ax, %ss
-
-	/* Already set new stack pointer */
-	movl	%esp, %ebp
-
-	/* Disable Paging in CR0 */
-	movl	%cr0, %eax
-	andl	$(~CR0_PG), %eax
-	movl	%eax, %cr0
-
-	/* Disable PAE in CR4 */
-	movl	%cr4, %eax
-	andl	$(~CR4_PAE), %eax
-	movl	%eax, %cr4
-
-	jmp	multiboot2_loader32b
-
-	.align	4
-multiboot2_loader32b:
-	xor	%eax, %eax
-
-	/* 
-	* Reload multiboot info from target location
-	*/	
-	movl	_RELOC(multiboot2_info_ptr), %ebx
-	call	*%esi
-
-	.align	16
-multiboot2_loader32r:
-	.long	0
-	.long	CODE_SEGMENT
-	.align	16
-gdt:
-	.long	0, 0
-	.byte	0xff, 0xff, 0x00, 0x00, 0x00, 0x9f, 0xcf, 0x00
-	.byte	0xff, 0xff, 0x00, 0x00, 0x00, 0x93, 0xcf, 0x00
-gdtr:
-	.word	gdtr - gdt
-gdtrr:
-	.quad	0
-
-multiboot2_info_ptr:
-	.long	0
-	
-	.align 16
-multiboot2_loader:
-	/*
-	 * Here we would like to call multiboot2_pre_reloc() but
-	 * we do not yet run in long mode, which means we need
-	 * a 32 bit version of that function. Unfortunately, 
-	 * mixing 32-bit and 64-bit object file at link time
-	 * does not work. As a result, we need to do the job
-	 * of multiboot2_pre_reloc() here in assembly.
-	 */
-#if multiboot2_pre_reloc_would_be_built_as_ia32
-	movl	$_RELOC(tmpstk),%esp
-	mov	%ebx,%edi	/* Address of Multiboot information */
-	call	_C_LABEL(multiboot2_pre_reloc)
-#else
-	/*
-	 * Copy multiboot_info
-	 */
-	movl	$_RELOC(multiboot_info),%edi
-	movl	%ebx,%esi
-	movl	(%ebx),%ecx
-	shr	$2,%ecx
-	rep
-	movsl
-
-	/*
-	 * Set multiboot2_enabled
-	 */
-	movl	$1,%eax
-	movl	%eax,RELOC(multiboot2_enabled)
-
-	/*
-	 * Look for MULTIBOOT_TAG_TYPE_ELF_SECTIONS
-	 */
-	movl	$_RELOC(multiboot_info),%esi
-	movl	(%esi),%ecx	/* multiboot_info size */
-	movl	%esi,%edx
-	addl	%ecx,%edx	/* %edx: end of multiboot_info */
-	addl	$8,%esi		/* skip two words of multiboot_info header */
-mbt_loop:
-	movl	(%esi),%ebx	/* mbt->type */
-	cmpl	$9,%ebx		/* 9 for MULTIBOOT_TAG_TYPE_ELF_SECTIONS */
-	je	found_elf_sections
-
-	movl	4(%esi),%eax	/* mbt->size */
-	addl	%eax,%esi
-	addl	$7,%esi		/* roundup(%esi,8) */
-	andl	$~7,%esi
-
-	cmpl	%edx,%esi
-	jle	mbt_loop
-	jmp	elf_sections_done
-
-found_elf_sections:
-	movl	$0,%eax
-	movl	%esp,%ebp			/* %ebp is esymp */
-	push	%eax
-	push	$KERNBASE_LO			/* kernbase */
-	push	$_RELOC(end)			/* void *end */
-	push	%ebp				/* int **esymp */
-	push	$_RELOC(has_syms)		/* bool *has_symsp */
-	push	$_RELOC(Multiboot_Symbols)/* struct multiboot_symbol *ms */
-	push	%esi		/* struct multiboot_tag_elf_sections *mbt_elf */
-	call	multiboot2_copy_syms32
-
-	/* Asjust esym as a 64 bit pointer if esymp was set */
-	movl	(%ebp),%eax
-	testl	%eax,%eax		/* esymp = NULL? */
-	jz	elf_sections_done
-
-	movl	$RELOC(esym),%ebp
-	movl	%eax,(%ebp)
-	movl	$KERNBASE_HI,4(%ebp)
-
-	jmp	elf_sections_done
-
-	/*
-	 * This is multiboot2_copy_syms() from 
-	 * src/sys/arch/x86/x86/multiboot2.c
-	 * built with -m32 -mcmodel=32 -D_LOCORE_64
-	 */
-multiboot2_copy_syms32:
-	push	%ebp
-	mov	%esp,%ebp
-	push	%edi
-	push	%esi
-	push	%ebx
-	sub	$0x20,%esp
-	mov	0x8(%ebp),%esi
-	/* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
-	mov	0x8(%esi),%ebx
-	test	%ebx,%ebx
-	je	copy_syms_4ce
-	add	$0x14,%esi
-	mov	%esi,%eax
-	xor	%edx,%edx
-	jmp	copy_syms_3a0
-copy_syms_395:
-	cmp	%edx,%ebx
-	jbe	copy_syms_4ce
-copy_syms_39d:
-	add	$0x40,%eax
-copy_syms_3a0:
-	add	$0x1,%edx
-	/* 	if ((shdrp->sh_type == SHT_SYMTAB) && */
-	cmpl	$0x2,0x4(%eax)
-	jne	copy_syms_395
-	/* 		shdrp->sh_link != SHN_UNDEF) { */
-	mov	0x28(%eax),%ecx
-	/* 	if ((shdrp->sh_type == SHT_SYMTAB) && */
-	test	%ecx,%ecx
-	je	copy_syms_395
-	/* 			[shdrp->sh_link]; */
-	shl	$0x6,%ecx
-	/* 		shdrp2 = &((locore_Elf_Shdr *)mbt_elf->sections) */
-	add	%esi,%ecx
-	/* 		if (shdrp2->sh_type == SHT_STRTAB) { */
-	cmpl	$0x3,0x4(%ecx)
-	jne	copy_syms_395
-	/* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
-	cmp	%ebx,%edx
-	jae	copy_syms_6d1
-	test	%eax,%eax
-	je	copy_syms_608
-	/* if (symtabp == NULL || strtabp == NULL) */
-copy_syms_3cb:
-	test	%ecx,%ecx
-	lea	0x0(%esi),%esi
-	je	copy_syms_4ce
-	/* symaddr = symtabp->sh_addr; */
-	mov	0x10(%eax),%edi
-	mov	%edi,-0x10(%ebp)
-	mov	0x14(%eax),%ebx
-	mov	%ebx,-0x18(%ebp)
-	/* straddr = strtabp->sh_addr; */
-	mov	0x10(%ecx),%esi
-	mov	%esi,-0x14(%ebp)
-	mov	0x14(%ecx),%ebx
-	mov	%ebx,-0x20(%ebp)
-	/* symsize = symtabp->sh_size; */
-	mov	0x20(%eax),%ebx
-	/* strsize = strtabp->sh_size; */
-	mov	0x20(%ecx),%eax
-	mov	%eax,-0x1c(%ebp)
-	cmp	0x18(%ebp),%edi
-	jae	copy_syms_4d6
-	cmp	%esi,0x18(%ebp)
-	ja	copy_syms_4e0
-	jae	copy_syms_54d
-	/* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
-copy_syms_40f:
-	mov	-0x1c(%ebp),%ecx
-	mov	%ecx,%eax
-	xor	%edx,%edx
-	/* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
-	mov	0x18(%ebp),%esi
-	xor	%edi,%edi
-	/* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
-	add	%esi,%eax
-	adc	%edi,%edx
-	mov	%eax,-0x2c(%ebp)
-	mov	%edx,-0x28(%ebp)
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-	mov	%ecx,%eax
-	mov	0x18(%ebp),%edi
-	mov	-0x14(%ebp),%esi
-	cmp	$0x4,%ecx
-	jae	copy_syms_5e8
-copy_syms_436:
-	test	$0x2,%al
-	je	copy_syms_43c
-	movsw	%ds:(%esi),%es:(%edi)
-copy_syms_43c:
-	test	$0x1,%al
-	je	copy_syms_441
-	movsb	%ds:(%esi),%es:(%edi)
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_441:
-	mov	%ebx,%eax
-	mov	0x18(%ebp),%edi
-	mov	-0x1c(%ebp),%esi
-	add	%esi,%edi
-	mov	-0x10(%ebp),%esi
-	cmp	$0x4,%ebx
-	jae	copy_syms_5c4
-copy_syms_457:
-	test	$0x2,%al
-	je	copy_syms_45d
-	movsw	%ds:(%esi),%es:(%edi)
-copy_syms_45d:
-	test	$0x1,%al
-	je	copy_syms_462
-	movsb	%ds:(%esi),%es:(%edi)
-	/* symstart = (cp1src == symaddr) ? cp1dst : cp2dst; */
-copy_syms_462:
-	mov	-0x18(%ebp),%edx
-	mov	-0x20(%ebp),%edi
-	xor	%edi,%edx
-	mov	-0x10(%ebp),%eax
-	mov	-0x14(%ebp),%ecx
-	xor	%ecx,%eax
-	or	%eax,%edx
-	je	copy_syms_6ba
-	mov	-0x2c(%ebp),%eax
-	mov	%eax,-0x24(%ebp)
-	mov	%ecx,-0x10(%ebp)
-	mov	%edi,-0x18(%ebp)
-	/* strstart = (cp1src == straddr) ? cp1dst : cp2dst; */
-copy_syms_486:
-	mov	-0x20(%ebp),%edx
-	xor	-0x18(%ebp),%edx
-	mov	-0x14(%ebp),%eax
-	xor	-0x10(%ebp),%eax
-	or	%eax,%edx
-	je	copy_syms_545
-copy_syms_49a:
-	mov	-0x2c(%ebp),%esi
-	/* ms->s_symstart = symstart + kernbase; */
-copy_syms_49d:
-	mov	-0x24(%ebp),%eax
-	add	0x1c(%ebp),%eax
-	mov	0xc(%ebp),%edi
-	mov	%eax,(%edi)
-	/* ms->s_symsize	= symsize; */
-	mov	%edi,%eax
-	mov	%ebx,0x4(%edi)
-	/* ms->s_strstart = strstart + kernbase; */
-	add	0x1c(%ebp),%esi
-	mov	%esi,0x8(%edi)
-	/* ms->s_strsize	= strsize; */
-	mov	-0x1c(%ebp),%edi
-	mov	%edi,0xc(%eax)
-	/* *has_symsp = true; */
-	mov	0x10(%ebp),%eax
-	movb	$0x1,(%eax)
-	/* *esymp = (int *)((uintptr_t)endp + symsize + strsize + kernbase); */
-	mov	0x18(%ebp),%eax
-	add	0x1c(%ebp),%eax
-	add	%eax,%ebx
-	add	%edi,%ebx
-	mov	0x14(%ebp),%eax
-	mov	%ebx,(%eax)
-copy_syms_4ce:
-	add	$0x20,%esp
-	pop	%ebx
-	pop	%esi
-	pop	%edi
-	pop	%ebp
-	ret	
-copy_syms_4d6:
-	jbe	copy_syms_54d
-	mov	-0x14(%ebp),%eax
-	cmp	%eax,0x18(%ebp)
-	jbe	copy_syms_54d
-	/* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
-copy_syms_4e0:
-	mov	0x18(%ebp),%eax
-	mov	%eax,-0x24(%ebp)
-	/* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
-	mov	%ebx,%eax
-	xor	%edx,%edx
-	/* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
-	mov	0x18(%ebp),%esi
-	xor	%edi,%edi
-	/* cp2dst = (locore_Elf_Addr)(uintptr_t)endp + cp1size; */
-	add	%esi,%eax
-	adc	%edi,%edx
-	mov	%eax,-0x2c(%ebp)
-	mov	%edx,-0x28(%ebp)
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-	mov	%ebx,%eax
-	mov	0x18(%ebp),%edi
-	mov	-0x10(%ebp),%esi
-	cmp	$0x4,%ebx
-	jae	copy_syms_5a8
-copy_syms_50a:
-	test	$0x2,%al
-	jne	copy_syms_57b
-	test	$0x1,%al
-	jne	copy_syms_578
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_512:
-	mov	-0x1c(%ebp),%ecx
-	mov	%ecx,%eax
-	mov	0x18(%ebp),%edi
-	add	%ebx,%edi
-	mov	-0x14(%ebp),%esi
-	cmp	$0x4,%ecx
-	jae	copy_syms_584
-copy_syms_524:
-	test	$0x2,%al
-	jne	copy_syms_56c
-	test	$0x1,%al
-	je	copy_syms_486
-copy_syms_530:
-	movsb	%ds:(%esi),%es:(%edi)
-	/* strstart = (cp1src == straddr) ? cp1dst : cp2dst; */
-	mov	-0x20(%ebp),%edx
-	xor	-0x18(%ebp),%edx
-	mov	-0x14(%ebp),%eax
-	xor	-0x10(%ebp),%eax
-	or	%eax,%edx
-	jne	copy_syms_49a
-copy_syms_545:
-	mov	0x18(%ebp),%esi
-	jmp	copy_syms_49d
-	/* 	if (symaddr < straddr) { */
-copy_syms_54d:
-	mov	-0x20(%ebp),%edi
-	cmp	%edi,-0x18(%ebp)
-	jb	copy_syms_4e0
-	ja	copy_syms_40f
-	mov	-0x14(%ebp),%edi
-	cmp	%edi,-0x10(%ebp)
-	jb	copy_syms_4e0
-	jmp	copy_syms_40f
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_56c:
-	movsw	%ds:(%esi),%es:(%edi)
-	test	$0x1,%al
-	je	copy_syms_486
-	jmp	copy_syms_530
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-copy_syms_578:
-	movsb	%ds:(%esi),%es:(%edi)
-	jmp	copy_syms_512
-copy_syms_57b:
-	movsw	%ds:(%esi),%es:(%edi)
-	test	$0x1,%al
-	nop
-	je	copy_syms_512
-	jmp	copy_syms_578
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_584:
-	test	$0x1,%edi
-	jne	copy_syms_650
-copy_syms_590:
-	test	$0x2,%edi
-	jne	copy_syms_63c
-copy_syms_59c:
-	mov	%eax,%ecx
-	shr	$0x2,%ecx
-	rep movsl %ds:(%esi),%es:(%edi)
-	jmp	copy_syms_524
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-copy_syms_5a8:
-	test	$0x1,%edi
-	jne	copy_syms_626
-copy_syms_5b0:
-	test	$0x2,%edi
-	jne	copy_syms_615
-copy_syms_5b8:
-	mov	%eax,%ecx
-	shr	$0x2,%ecx
-	rep movsl %ds:(%esi),%es:(%edi)
-	jmp	copy_syms_50a
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_5c4:
-	test	$0x1,%edi
-	jne	copy_syms_666
-copy_syms_5d0:
-	test	$0x2,%edi
-	jne	copy_syms_6a6
-copy_syms_5dc:
-	mov	%eax,%ecx
-	shr	$0x2,%ecx
-	rep movsl %ds:(%esi),%es:(%edi)
-	jmp	copy_syms_457
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-copy_syms_5e8:
-	test	$0x1,%edi
-	jne	copy_syms_68d
-copy_syms_5f4:
-	test	$0x2,%edi
-	jne	copy_syms_679
-copy_syms_5fc:
-	mov	%eax,%ecx
-	shr	$0x2,%ecx
-	rep movsl %ds:(%esi),%es:(%edi)
-	jmp	copy_syms_436
-	/* for (i = 0; i < mbt_elf->num && symtabp == NULL && */
-copy_syms_608:
-	test	%ecx,%ecx
-	jne	copy_syms_4ce
-	jmp	copy_syms_39d
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-copy_syms_615:
-	movzwl (%esi),%edx
-	mov	%dx,(%edi)
-	add	$0x2,%edi
-	add	$0x2,%esi
-	sub	$0x2,%eax
-	jmp	copy_syms_5b8
-copy_syms_626:
-	movzbl (%esi),%eax
-	mov	%al,(%edi)
-	mov	0x18(%ebp),%eax
-	lea	0x1(%eax),%edi
-	add	$0x1,%esi
-	lea	-0x1(%ebx),%eax
-	jmp	copy_syms_5b0
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_63c:
-	movzwl (%esi),%edx
-	mov	%dx,(%edi)
-	add	$0x2,%edi
-	add	$0x2,%esi
-	sub	$0x2,%eax
-	jmp	copy_syms_59c
-copy_syms_650:
-	movzbl (%esi),%eax
-	mov	%al,(%edi)
-	add	$0x1,%edi
-	add	$0x1,%esi
-	mov	-0x1c(%ebp),%eax
-	sub	$0x1,%eax
-	jmp	copy_syms_590
-copy_syms_666:
-	movzbl (%esi),%eax
-	mov	%al,(%edi)
-	add	$0x1,%edi
-	add	$0x1,%esi
-	lea	-0x1(%ebx),%eax
-	jmp	copy_syms_5d0
-	/* (void)memcpy((void *)(uintptr_t)cp1dst, */
-copy_syms_679:
-	movzwl (%esi),%edx
-	mov	%dx,(%edi)
-	add	$0x2,%edi
-	add	$0x2,%esi
-	sub	$0x2,%eax
-	jmp	copy_syms_5fc
-copy_syms_68d:
-	movzbl (%esi),%eax
-	mov	%al,(%edi)
-	mov	0x18(%ebp),%eax
-	lea	0x1(%eax),%edi
-	add	$0x1,%esi
-	mov	-0x1c(%ebp),%eax
-	sub	$0x1,%eax
-	jmp	copy_syms_5f4
-	/* (void)memcpy((void *)(uintptr_t)cp2dst, */
-copy_syms_6a6:
-	movzwl (%esi),%edx
-	mov	%dx,(%edi)
-	add	$0x2,%edi
-	add	$0x2,%esi
-	sub	$0x2,%eax
-	jmp	copy_syms_5dc
-copy_syms_6ba:
-	mov	-0x14(%ebp),%eax
-	mov	%eax,-0x10(%ebp)
-	mov	-0x20(%ebp),%eax
-	mov	%eax,-0x18(%ebp)
-	/* cp1dst = (locore_Elf_Addr)(uintptr_t)endp; */
-	mov	0x18(%ebp),%eax
-	mov	%eax,-0x24(%ebp)
-	jmp	copy_syms_486
-	/* if (symtabp == NULL || strtabp == NULL) */
-copy_syms_6d1:
-	test	%eax,%eax
-	jne	copy_syms_3cb
-	jmp	copy_syms_4ce
-elf_sections_done:
-#endif
-
-	jmp	.Lbegin
-
-
-#endif /* MULTIBOOT */
-
-.Lnative_loader:
 	/*
 	 * Load parameters from the stack (32 bits):
 	 *     boothowto, [bootdev], bootinfo, esym, biosextmem, biosbasemem
@@ -1308,7 +562,6 @@ elf_sections_done:
 	 * Done with the parameters!
 	 */
 
-.Lbegin:
 	/* First, reset the PSL. */
 	pushl	$PSL_MBO
 	popfl
@@ -1624,16 +877,6 @@ longmode_hi:
 	leaq	(USPACE-FRAMESIZE)(%rax),%rsp
 	xorq	%rbp,%rbp			/* mark end of frames */
 
-#if defined(MULTIBOOT)
-	/* It is now safe to parse the Multiboot information structure
-	 * we saved before from C code.  Note that we cannot delay its
-	 * parsing any more because initgdt (called below) needs to make
-	 * use of this information.
-	 */
-	pushq	%rsi
-	call	_C_LABEL(multiboot2_post_reloc)
-	popq	%rsi
-#endif 
 	xorw	%ax,%ax
 	movw	%ax,%gs
 	movw	%ax,%fs

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.344 src/sys/arch/amd64/amd64/machdep.c:1.345
--- src/sys/arch/amd64/amd64/machdep.c:1.344	Fri Dec 13 20:14:25 2019
+++ src/sys/arch/amd64/amd64/machdep.c	Thu Jan  9 00:42:24 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.344 2019/12/13 20:14:25 ad Exp $	*/
+/*	$NetBSD: machdep.c,v 1.345 2020/01/09 00:42:24 manu Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,10 +110,9 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.344 2019/12/13 20:14:25 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.345 2020/01/09 00:42:24 manu Exp $");
 
 #include "opt_modular.h"
-#include "opt_multiboot.h"
 #include "opt_user_ldt.h"
 #include "opt_ddb.h"
 #include "opt_kgdb.h"
@@ -185,8 +184,6 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v 
 #include <x86/cpuvar.h>
 #include <x86/machdep.h>
 
-#include <arch/i386/include/multiboot.h>
-
 #include <x86/x86/tsc.h>
 
 #include <dev/isa/isareg.h>
@@ -374,10 +371,6 @@ cpu_startup(void)
 
 	initmsgbuf((void *)msgbuf_vaddr, round_page(sz));
 
-#ifdef MULTIBOOT
-	multiboot2_print_info();
-#endif
-
 	minaddr = 0;
 
 	/*
@@ -1511,11 +1504,6 @@ init_x86_64_ksyms(void)
 	db_machine_init();
 #endif
 
-#if defined(MULTIBOOT)
-	if (multiboot2_ksyms_addsyms_elf())
-		return;
-#endif
-
 #ifndef XENPV
 	symtab = lookup_bootinfo(BTINFO_SYMTAB);
 	if (symtab) {

Index: src/sys/arch/amd64/conf/GENERIC
diff -u src/sys/arch/amd64/conf/GENERIC:1.553 src/sys/arch/amd64/conf/GENERIC:1.554
--- src/sys/arch/amd64/conf/GENERIC:1.553	Wed Jan  1 10:36:43 2020
+++ src/sys/arch/amd64/conf/GENERIC	Thu Jan  9 00:42:24 2020
@@ -1,4 +1,4 @@
-# $NetBSD: GENERIC,v 1.553 2020/01/01 10:36:43 ryo Exp $
+# $NetBSD: GENERIC,v 1.554 2020/01/09 00:42:24 manu Exp $
 #
 # GENERIC machine description file
 #
@@ -22,12 +22,10 @@ include 	"arch/amd64/conf/std.amd64"
 
 options 	INCLUDE_CONFIG_FILE	# embed config file in kernel binary
 
-#ident		"GENERIC-$Revision: 1.553 $"
+#ident		"GENERIC-$Revision: 1.554 $"
 
 maxusers	64		# estimated number of users
 
-#options 	MULTIBOOT	# Multiboot support (see multiboot(8)) 
-
 # delay between "rebooting ..." message and hardware reset, in milliseconds
 #options 	CPURESET_DELAY=2000
 

Index: src/sys/arch/amd64/conf/files.amd64
diff -u src/sys/arch/amd64/conf/files.amd64:1.114 src/sys/arch/amd64/conf/files.amd64:1.115
--- src/sys/arch/amd64/conf/files.amd64:1.114	Tue Dec 10 02:06:07 2019
+++ src/sys/arch/amd64/conf/files.amd64	Thu Jan  9 00:42:24 2020
@@ -1,4 +1,4 @@
-#	$NetBSD: files.amd64,v 1.114 2019/12/10 02:06:07 manu Exp $
+#	$NetBSD: files.amd64,v 1.115 2020/01/09 00:42:24 manu Exp $
 #
 # new style config file for amd64 architecture
 #
@@ -30,10 +30,6 @@ defflag opt_spectre.h	SPECTRE_V2_GCC_MIT
 defflag			USER_LDT
 defflag eisa.h		EISA
 
-# Multiboot support
-defflag	opt_multiboot.h	MULTIBOOT
-file	arch/x86/x86/multiboot2.c		multiboot
-
 # Start code
 file	arch/amd64/amd64/locore.S		machdep
 file	arch/amd64/amd64/vector.S		machdep

Index: src/sys/arch/amd64/conf/kern.ldscript
diff -u src/sys/arch/amd64/conf/kern.ldscript:1.30 src/sys/arch/amd64/conf/kern.ldscript:1.31
--- src/sys/arch/amd64/conf/kern.ldscript:1.30	Sun Dec 15 02:56:40 2019
+++ src/sys/arch/amd64/conf/kern.ldscript	Thu Jan  9 00:42:24 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: kern.ldscript,v 1.30 2019/12/15 02:56:40 manu Exp $	*/
+/*	$NetBSD: kern.ldscript,v 1.31 2020/01/09 00:42:24 manu Exp $	*/
 
 #include "assym.h"
 
@@ -13,17 +13,7 @@ __LARGE_PAGE_SIZE = 0x200000 ;
 ENTRY(_start)
 SECTIONS
 {
-	/*
-	 * multiboot (file_offset) : AT (load_address) 
-	 * file_offset must be below 32k for multiboot 2 specification
-	 * BIOS boot requires load_address above 0x200000
-	 */
-	multiboot 0x1000 : AT (0x200000)
-	{
-		. = ALIGN(8);
-		KEEP(*(multiboot));
-	}
-	.text : AT (0x200000 + SIZEOF(multiboot))
+	.text : AT (ADDR(.text) & 0x0fffffff)
 	{
 		. = ALIGN(__PAGE_SIZE);
 		__text_user_start = . ;

Reply via email to