Module Name: src Committed By: maxv Date: Sun Oct 29 11:28:30 UTC 2017
Modified Files: src/sys/arch/amd64/stand/prekern: elf.c locore.S mm.c prekern.c prekern.h Log Message: Randomize the kernel segments independently. That is to say, put text, rodata and data at different addresses (and in a random order). To achieve that, the mapping order in the prekern is changed. Until now, we were creating the kernel map the following way: -> choose a random VA -> map [kernpa_start; kernpa_end[ at this VA -> parse the ELF structures from there -> determine where exactly the kernel segments are located -> relocate etc Now, we are doing: -> create a read-only view of [kernpa_start; kernpa_end[ -> from this view, compute the size of the "head" region -> choose a random VA in the HEAD window, and map the head there -> for each region in (text, rodata, data, boot) -> compute the size of the region from the RO view -> choose a random VA in the KASLR window -> map the region there -> relocate etc Each time we map a region, we initialize its bootspace fields right away. The "head" region must be put before the other regions in memory, because the kernel uses (headva + sh_offset) to get the addresses of the symbols, and the offset is unsigned. Given that the head does not have an mcmodel constraint, its location is randomized in a window located below the KASLR window. The rest of the regions being in the same window, we need to detect collisions. Note that the module map is embedded in the "boot" region, and that therefore its location is randomized too. To generate a diff of this commit: cvs rdiff -u -r1.3 -r1.4 src/sys/arch/amd64/stand/prekern/elf.c cvs rdiff -u -r1.2 -r1.3 src/sys/arch/amd64/stand/prekern/locore.S \ src/sys/arch/amd64/stand/prekern/prekern.c \ src/sys/arch/amd64/stand/prekern/prekern.h cvs rdiff -u -r1.5 -r1.6 src/sys/arch/amd64/stand/prekern/mm.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/amd64/stand/prekern/elf.c diff -u src/sys/arch/amd64/stand/prekern/elf.c:1.3 src/sys/arch/amd64/stand/prekern/elf.c:1.4 --- src/sys/arch/amd64/stand/prekern/elf.c:1.3 Sun Oct 29 10:07:08 2017 +++ src/sys/arch/amd64/stand/prekern/elf.c Sun Oct 29 11:28:30 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: elf.c,v 1.3 2017/10/29 10:07:08 maxv Exp $ */ +/* $NetBSD: elf.c,v 1.4 2017/10/29 11:28:30 maxv Exp $ */ /* * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved. @@ -56,6 +56,8 @@ struct elfinfo { } data; }; +extern paddr_t kernpa_start, kernpa_end; + static struct elfinfo eif; static const char entrypoint[] = "start_prekern"; @@ -256,6 +258,37 @@ elf_apply_reloc(uintptr_t relocbase, con } } +/* -------------------------------------------------------------------------- */ + +size_t +elf_get_head_size(vaddr_t headva) +{ + Elf_Ehdr *ehdr; + Elf_Shdr *shdr; + size_t size; + + ehdr = (Elf_Ehdr *)headva; + shdr = (Elf_Shdr *)((uint8_t *)ehdr + ehdr->e_shoff); + + size = (vaddr_t)shdr + (vaddr_t)(ehdr->e_shnum * sizeof(Elf_Shdr)) - + (vaddr_t)ehdr; + + return roundup(size, PAGE_SIZE); +} + +void +elf_build_head(vaddr_t headva) +{ + memset(&eif, 0, sizeof(struct elfinfo)); + + eif.ehdr = (Elf_Ehdr *)headva; + eif.shdr = (Elf_Shdr *)((uint8_t *)eif.ehdr + eif.ehdr->e_shoff); + + if (elf_check_header() == -1) { + fatal("elf_build_info: wrong kernel ELF header"); + } +} + static bool elf_section_is_text(Elf_Shdr *shdr) { @@ -296,20 +329,180 @@ elf_section_is_data(Elf_Shdr *shdr) return true; } -static void -elf_build_info(vaddr_t baseva) +void +elf_get_text(paddr_t *pa, size_t *sz) { - vaddr_t secva, minva, maxva; - size_t secsz; - size_t i, j; + const paddr_t basepa = kernpa_start; + paddr_t minpa, maxpa, secpa; + size_t i, secsz; - memset(&eif, 0, sizeof(struct elfinfo)); + minpa = 0xFFFFFFFFFFFFFFFF, maxpa = 0; + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_text(&eif.shdr[i])) { + continue; + } + secpa = basepa + eif.shdr[i].sh_offset; + secsz = eif.shdr[i].sh_size; + if (secpa < minpa) { + minpa = secpa; + } + if (secpa + secsz > maxpa) { + maxpa = secpa + secsz; + } + } + ASSERT(minpa % PAGE_SIZE == 0); - eif.ehdr = (Elf_Ehdr *)baseva; - eif.shdr = (Elf_Shdr *)((uint8_t *)eif.ehdr + eif.ehdr->e_shoff); + *pa = minpa; + *sz = roundup(maxpa - minpa, PAGE_SIZE); +} - if (elf_check_header(&eif) == -1) { - fatal("elf_build_info: wrong kernel ELF header"); +void +elf_build_text(vaddr_t textva, paddr_t textpa, size_t textsz) +{ + const paddr_t basepa = kernpa_start; + const vaddr_t headva = (vaddr_t)eif.ehdr; + size_t i, offtext; + + eif.text.va = textva; + eif.text.sz = textsz; + + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_text(&eif.shdr[i])) { + continue; + } + + /* Offset of the section within the text segment. */ + offtext = basepa + eif.shdr[i].sh_offset - textpa; + + /* We want (headva + sh_offset) to be the VA of the section. */ + eif.shdr[i].sh_offset = (eif.text.va + offtext - headva); + } +} + +void +elf_get_rodata(paddr_t *pa, size_t *sz) +{ + const paddr_t basepa = kernpa_start; + paddr_t minpa, maxpa, secpa; + size_t i, secsz; + + minpa = 0xFFFFFFFFFFFFFFFF, maxpa = 0; + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_rodata(&eif.shdr[i])) { + continue; + } + secpa = basepa + eif.shdr[i].sh_offset; + secsz = eif.shdr[i].sh_size; + if (secpa < minpa) { + minpa = secpa; + } + if (secpa + secsz > maxpa) { + maxpa = secpa + secsz; + } + } + ASSERT(minpa % PAGE_SIZE == 0); + + *pa = minpa; + *sz = roundup(maxpa - minpa, PAGE_SIZE); +} + +void +elf_build_rodata(vaddr_t rodatava, paddr_t rodatapa, size_t rodatasz) +{ + const paddr_t basepa = kernpa_start; + const vaddr_t headva = (vaddr_t)eif.ehdr; + size_t i, offrodata; + + eif.rodata.va = rodatava; + eif.rodata.sz = rodatasz; + + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_rodata(&eif.shdr[i])) { + continue; + } + + /* Offset of the section within the rodata segment. */ + offrodata = basepa + eif.shdr[i].sh_offset - rodatapa; + + /* We want (headva + sh_offset) to be the VA of the section. */ + eif.shdr[i].sh_offset = (eif.rodata.va + offrodata - headva); + } +} + +void +elf_get_data(paddr_t *pa, size_t *sz) +{ + const paddr_t basepa = kernpa_start; + paddr_t minpa, maxpa, secpa; + size_t i, secsz; + + minpa = 0xFFFFFFFFFFFFFFFF, maxpa = 0; + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_data(&eif.shdr[i])) { + continue; + } + secpa = basepa + eif.shdr[i].sh_offset; + secsz = eif.shdr[i].sh_size; + if (secpa < minpa) { + minpa = secpa; + } + if (secpa + secsz > maxpa) { + maxpa = secpa + secsz; + } + } + ASSERT(minpa % PAGE_SIZE == 0); + + *pa = minpa; + *sz = roundup(maxpa - minpa, PAGE_SIZE); +} + +void +elf_build_data(vaddr_t datava, paddr_t datapa, size_t datasz) +{ + const paddr_t basepa = kernpa_start; + const vaddr_t headva = (vaddr_t)eif.ehdr; + size_t i, offdata; + + eif.data.va = datava; + eif.data.sz = datasz; + + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (!elf_section_is_data(&eif.shdr[i])) { + continue; + } + + /* Offset of the section within the data segment. */ + offdata = basepa + eif.shdr[i].sh_offset - datapa; + + /* We want (headva + sh_offset) to be the VA of the section. */ + eif.shdr[i].sh_offset = (eif.data.va + offdata - headva); + } +} + +void +elf_build_boot(vaddr_t bootva, paddr_t bootpa) +{ + const paddr_t basepa = kernpa_start; + const vaddr_t headva = (vaddr_t)eif.ehdr; + size_t i, j, offboot; + + for (i = 0; i < eif.ehdr->e_shnum; i++) { + if (eif.shdr[i].sh_type != SHT_STRTAB && + eif.shdr[i].sh_type != SHT_REL && + eif.shdr[i].sh_type != SHT_RELA && + eif.shdr[i].sh_type != SHT_SYMTAB) { + continue; + } + if (eif.shdr[i].sh_offset == 0) { + /* hasn't been loaded */ + continue; + } + + /* Offset of the section within the boot region. */ + offboot = basepa + eif.shdr[i].sh_offset - bootpa; + + /* We want (headva + sh_offset) to be the VA of the region. */ + eif.shdr[i].sh_offset = (bootva + offboot - headva); } /* Locate the section names */ @@ -344,80 +537,16 @@ elf_build_info(vaddr_t baseva) } eif.strtab = (char *)((uint8_t *)eif.ehdr + eif.shdr[j].sh_offset); eif.strsz = eif.shdr[j].sh_size; - - /* - * Save the locations of the kernel segments. Attention: there is a - * difference between "segment" and "section". A segment can contain - * several sections. - */ - - /* text */ - minva = 0xFFFFFFFFFFFFFFFF, maxva = 0; - for (i = 0; i < eif.ehdr->e_shnum; i++) { - if (!elf_section_is_text(&eif.shdr[i])) { - continue; - } - secva = baseva + eif.shdr[i].sh_offset; - secsz = eif.shdr[i].sh_size; - if (secva < minva) { - minva = secva; - } - if (secva + secsz > maxva) { - maxva = secva + secsz; - } - } - eif.text.va = minva; - eif.text.sz = roundup(maxva - minva, PAGE_SIZE); - ASSERT(eif.text.va % PAGE_SIZE == 0); - - /* rodata */ - minva = 0xFFFFFFFFFFFFFFFF, maxva = 0; - for (i = 0; i < eif.ehdr->e_shnum; i++) { - if (!elf_section_is_rodata(&eif.shdr[i])) { - continue; - } - secva = baseva + eif.shdr[i].sh_offset; - secsz = eif.shdr[i].sh_size; - if (secva < minva) { - minva = secva; - } - if (secva + secsz > maxva) { - maxva = secva + secsz; - } - } - eif.rodata.va = minva; - eif.rodata.sz = roundup(maxva - minva, PAGE_SIZE); - ASSERT(eif.rodata.va % PAGE_SIZE == 0); - - /* data */ - minva = 0xFFFFFFFFFFFFFFFF, maxva = 0; - for (i = 0; i < eif.ehdr->e_shnum; i++) { - if (!elf_section_is_data(&eif.shdr[i])) { - continue; - } - secva = baseva + eif.shdr[i].sh_offset; - secsz = eif.shdr[i].sh_size; - if (secva < minva) { - minva = secva; - } - if (secva + secsz > maxva) { - maxva = secva + secsz; - } - } - eif.data.va = minva; - eif.data.sz = roundup(maxva - minva, PAGE_SIZE); - ASSERT(eif.data.va % PAGE_SIZE == 0); } vaddr_t -elf_kernel_reloc(vaddr_t baseva) +elf_kernel_reloc() { + const vaddr_t baseva = (vaddr_t)eif.ehdr; vaddr_t secva, ent; Elf_Sym *sym; size_t i, j; - elf_build_info(baseva); - print_state(true, "ELF info created"); /* @@ -523,26 +652,3 @@ elf_kernel_reloc(vaddr_t baseva) return ent; } -void -elf_get_text(vaddr_t *va, paddr_t *pa, size_t *sz) -{ - *va = eif.text.va; - *pa = mm_vatopa(eif.text.va); - *sz = eif.text.sz; -} - -void -elf_get_rodata(vaddr_t *va, paddr_t *pa, size_t *sz) -{ - *va = eif.rodata.va; - *pa = mm_vatopa(eif.rodata.va); - *sz = eif.rodata.sz; -} - -void -elf_get_data(vaddr_t *va, paddr_t *pa, size_t *sz) -{ - *va = eif.data.va; - *pa = mm_vatopa(eif.data.va); - *sz = eif.data.sz; -} Index: src/sys/arch/amd64/stand/prekern/locore.S diff -u src/sys/arch/amd64/stand/prekern/locore.S:1.2 src/sys/arch/amd64/stand/prekern/locore.S:1.3 --- src/sys/arch/amd64/stand/prekern/locore.S:1.2 Wed Oct 11 16:18:11 2017 +++ src/sys/arch/amd64/stand/prekern/locore.S Sun Oct 29 11:28:30 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: locore.S,v 1.2 2017/10/11 16:18:11 maxv Exp $ */ +/* $NetBSD: locore.S,v 1.3 2017/10/29 11:28:30 maxv Exp $ */ /* * Copyright (c) 1998, 2000, 2007, 2008, 2016, 2017 The NetBSD Foundation, Inc. @@ -444,12 +444,13 @@ no_NOX: orl $(PG_V|PG_KW),%eax fillkpt_nox - /* Map some blank space, to keep pa = va. */ + /* Map a RO view of the kernel. */ movl $_C_LABEL(__prekern_end),%eax movl %esi,%ecx /* start of BOOTSTRAP TABLES */ subl %eax,%ecx shrl $PGSHIFT,%ecx - fillkpt_blank + orl $(PG_V|PG_KR),%eax + fillkpt_nox /* Map the BOOTSTRAP TABLES RW. */ movl %esi,%eax /* start of BOOTSTRAP TABLES */ Index: src/sys/arch/amd64/stand/prekern/prekern.c diff -u src/sys/arch/amd64/stand/prekern/prekern.c:1.2 src/sys/arch/amd64/stand/prekern/prekern.c:1.3 --- src/sys/arch/amd64/stand/prekern/prekern.c:1.2 Sun Oct 29 10:01:22 2017 +++ src/sys/arch/amd64/stand/prekern/prekern.c Sun Oct 29 11:28:30 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: prekern.c,v 1.2 2017/10/29 10:01:22 maxv Exp $ */ +/* $NetBSD: prekern.c,v 1.3 2017/10/29 11:28:30 maxv Exp $ */ /* * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved. @@ -219,8 +219,6 @@ static void init_idt() /* -------------------------------------------------------------------------- */ -struct bootspace bootspace; - struct prekern_args { int boothowto; void *bootinfo; @@ -239,34 +237,9 @@ struct prekern_args { struct prekern_args pkargs; static void -init_bootspace(vaddr_t baseva) -{ - extern vaddr_t iom_base; - extern uint64_t PDPpaddr; - - elf_get_text(&bootspace.text.va, &bootspace.text.pa, - &bootspace.text.sz); - elf_get_rodata(&bootspace.rodata.va, &bootspace.rodata.pa, - &bootspace.rodata.sz); - elf_get_data(&bootspace.data.va, &bootspace.data.pa, - &bootspace.data.sz); - - bootspace.head.va = baseva; - bootspace.head.pa = mm_vatopa(bootspace.head.va); - bootspace.head.sz = bootspace.text.va - baseva; - - bootspace.boot.va = bootspace.data.va + bootspace.data.sz; - bootspace.boot.pa = mm_vatopa(bootspace.boot.va); - bootspace.boot.sz = (size_t)(iom_base + IOM_SIZE) - - (size_t)bootspace.boot.va; - bootspace.spareva = baseva + NKL2_KIMG_ENTRIES * NBPD_L2; - bootspace.pdir = baseva + (PDPpaddr - kernpa_start); - bootspace.emodule = baseva + NKL2_KIMG_ENTRIES * NBPD_L2; -} - -static void -init_prekern_args(vaddr_t baseva) +init_prekern_args() { + extern struct bootspace bootspace; extern int esym; extern int biosextmem; extern int biosbasemem; @@ -288,7 +261,7 @@ init_prekern_args(vaddr_t baseva) pkargs.nox_flag = nox_flag; pkargs.PDPpaddr = PDPpaddr; pkargs.atdevbase = iom_base; - pkargs.lwp0uarea = baseva + (stkpa - kernpa_start); + pkargs.lwp0uarea = bootspace.boot.va + (stkpa - bootspace.boot.pa); pkargs.first_avail = pa_avail; extern vaddr_t stkva; @@ -322,7 +295,7 @@ exec_kernel(vaddr_t ent) void init_prekern(paddr_t pa_start) { - vaddr_t baseva, ent; + vaddr_t ent; init_cons(); print_banner(); @@ -363,18 +336,13 @@ init_prekern(paddr_t pa_start) /* * Relocate the kernel. */ - baseva = mm_map_kernel(); - ent = elf_kernel_reloc(baseva); - - /* - * Build the bootspace. - */ - init_bootspace(baseva); + mm_map_kernel(); + ent = elf_kernel_reloc(); /* * Build the arguments. */ - init_prekern_args(baseva); + init_prekern_args(); /* * Finally, jump into the kernel. Index: src/sys/arch/amd64/stand/prekern/prekern.h diff -u src/sys/arch/amd64/stand/prekern/prekern.h:1.2 src/sys/arch/amd64/stand/prekern/prekern.h:1.3 --- src/sys/arch/amd64/stand/prekern/prekern.h:1.2 Sun Oct 29 10:01:22 2017 +++ src/sys/arch/amd64/stand/prekern/prekern.h Sun Oct 29 11:28:30 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: prekern.h,v 1.2 2017/10/29 10:01:22 maxv Exp $ */ +/* $NetBSD: prekern.h,v 1.3 2017/10/29 11:28:30 maxv Exp $ */ /* * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved. @@ -52,6 +52,9 @@ typedef uint64_t pte_prot_t; #define RED_ON_BLACK 0x04 #define GREEN_ON_BLACK 0x02 +#define HEAD_WINDOW_BASE (KERNBASE - NBPD_L3) +#define HEAD_WINDOW_SIZE NBPD_L3 + #define KASLR_WINDOW_BASE KERNBASE /* max - 2GB */ #define KASLR_WINDOW_SIZE (2LLU * (1 << 30)) /* 2GB */ @@ -96,10 +99,16 @@ void print_state(bool, char *); void print_banner(); /* elf.c */ -vaddr_t elf_kernel_reloc(vaddr_t); -void elf_get_text(vaddr_t *, paddr_t *, size_t *); -void elf_get_rodata(vaddr_t *, paddr_t *, size_t *); -void elf_get_data(vaddr_t *, paddr_t *, size_t *); +size_t elf_get_head_size(vaddr_t); +void elf_build_head(vaddr_t); +void elf_get_text(paddr_t *, size_t *); +void elf_build_text(vaddr_t, paddr_t, size_t); +void elf_get_rodata(paddr_t *, size_t *); +void elf_build_rodata(vaddr_t, paddr_t, size_t); +void elf_get_data(paddr_t *, size_t *); +void elf_build_data(vaddr_t, paddr_t, size_t); +void elf_build_boot(vaddr_t, paddr_t); +vaddr_t elf_kernel_reloc(); /* locore.S */ void lidt(void *); @@ -110,7 +119,7 @@ void jump_kernel(); void mm_init(paddr_t); paddr_t mm_vatopa(vaddr_t); void mm_mprotect(vaddr_t, size_t, int); -vaddr_t mm_map_kernel(); +void mm_map_kernel(); /* prekern.c */ void fatal(char *); Index: src/sys/arch/amd64/stand/prekern/mm.c diff -u src/sys/arch/amd64/stand/prekern/mm.c:1.5 src/sys/arch/amd64/stand/prekern/mm.c:1.6 --- src/sys/arch/amd64/stand/prekern/mm.c:1.5 Sat Oct 28 19:28:11 2017 +++ src/sys/arch/amd64/stand/prekern/mm.c Sun Oct 29 11:28:30 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: mm.c,v 1.5 2017/10/28 19:28:11 maxv Exp $ */ +/* $NetBSD: mm.c,v 1.6 2017/10/29 11:28:30 maxv Exp $ */ /* * Copyright (c) 2017 The NetBSD Foundation, Inc. All rights reserved. @@ -37,6 +37,8 @@ static const pt_entry_t protection_codes /* RWX does not exist */ }; +struct bootspace bootspace; + extern paddr_t kernpa_start, kernpa_end; vaddr_t iom_base; @@ -168,62 +170,222 @@ mm_map_tree(vaddr_t startva, vaddr_t end } } -/* - * Select a random VA, and create a page tree. The size of this tree is - * actually hard-coded, and matches the one created by the generic NetBSD - * locore. - */ +static uint64_t +mm_rand_num64() +{ + /* XXX: yes, this is ridiculous, will be fixed soon */ + return rdtsc(); +} + +static void +mm_map_head() +{ + size_t i, npages, size; + uint64_t rnd; + vaddr_t randva; + + /* + * To get the size of the head, we give a look at the read-only + * mapping of the kernel we created in locore. We're identity mapped, + * so kernpa = kernva. + */ + size = elf_get_head_size((vaddr_t)kernpa_start); + npages = size / PAGE_SIZE; + + rnd = mm_rand_num64(); + randva = rounddown(HEAD_WINDOW_BASE + rnd % (HEAD_WINDOW_SIZE - size), + PAGE_SIZE); + mm_map_tree(randva, randva + size); + + /* Enter the area and build the ELF info */ + for (i = 0; i < npages; i++) { + mm_enter_pa(kernpa_start + i * PAGE_SIZE, + randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); + } + elf_build_head(randva); + + /* Register the values in bootspace */ + bootspace.head.va = randva; + bootspace.head.pa = kernpa_start; + bootspace.head.sz = size; +} + static vaddr_t -mm_rand_base() +mm_randva_kregion(size_t size) { + static struct { + vaddr_t sva; + vaddr_t eva; + } regions[4]; + static size_t idx = 0; vaddr_t randva; uint64_t rnd; - size_t size; + size_t i; + bool ok; - size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2; + ASSERT(idx < 4); - /* XXX: yes, this is ridiculous, will be fixed soon */ - rnd = rdtsc(); - randva = rounddown(KASLR_WINDOW_BASE + rnd % (KASLR_WINDOW_SIZE - size), - PAGE_SIZE); + while (1) { + rnd = mm_rand_num64(); + randva = rounddown(KASLR_WINDOW_BASE + + rnd % (KASLR_WINDOW_SIZE - size), PAGE_SIZE); + + /* Detect collisions */ + ok = true; + for (i = 0; i < idx; i++) { + if ((regions[i].sva <= randva) && + (randva < regions[i].eva)) { + ok = false; + break; + } + if ((regions[i].sva < randva + size) && + (randva + size <= regions[i].eva)) { + ok = false; + break; + } + } + if (ok) { + break; + } + } + + regions[idx].eva = randva; + regions[idx].sva = randva + size; + idx++; mm_map_tree(randva, randva + size); return randva; } -/* - * Virtual address space of the kernel: - * +---------------+---------------------+------------------+-------------+ - * | KERNEL + SYMS | [PRELOADED MODULES] | BOOTSTRAP TABLES | ISA I/O MEM | - * +---------------+---------------------+------------------+-------------+ - * We basically choose a random VA, and map everything contiguously starting - * from there. Note that the physical pages allocated by mm_palloc are part - * of the BOOTSTRAP TABLES. - */ -vaddr_t -mm_map_kernel() +static void +mm_map_segments() { size_t i, npages, size; - vaddr_t baseva; + vaddr_t randva; + paddr_t pa; - size = (pa_avail - kernpa_start); - baseva = mm_rand_base(); + /* + * Kernel text segment. + */ + elf_get_text(&pa, &size); + randva = mm_randva_kregion(size); npages = size / PAGE_SIZE; - /* Enter the whole area linearly */ + /* Enter the area and build the ELF info */ for (i = 0; i < npages; i++) { - mm_enter_pa(kernpa_start + i * PAGE_SIZE, - baseva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); + mm_enter_pa(pa + i * PAGE_SIZE, + randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); } + elf_build_text(randva, pa, size); + + /* Register the values in bootspace */ + bootspace.text.va = randva; + bootspace.text.pa = pa; + bootspace.text.sz = size; + + /* + * Kernel rodata segment. + */ + elf_get_rodata(&pa, &size); + randva = mm_randva_kregion(size); + npages = size / PAGE_SIZE; + + /* Enter the area and build the ELF info */ + for (i = 0; i < npages; i++) { + mm_enter_pa(pa + i * PAGE_SIZE, + randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); + } + elf_build_rodata(randva, pa, size); + + /* Register the values in bootspace */ + bootspace.rodata.va = randva; + bootspace.rodata.pa = pa; + bootspace.rodata.sz = size; + + /* + * Kernel data segment. + */ + elf_get_data(&pa, &size); + randva = mm_randva_kregion(size); + npages = size / PAGE_SIZE; + + /* Enter the area and build the ELF info */ + for (i = 0; i < npages; i++) { + mm_enter_pa(pa + i * PAGE_SIZE, + randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); + } + elf_build_data(randva, pa, size); + + /* Register the values in bootspace */ + bootspace.data.va = randva; + bootspace.data.pa = pa; + bootspace.data.sz = size; +} + +static void +mm_map_boot() +{ + size_t i, npages, size; + vaddr_t randva; + paddr_t bootpa; + + /* + * The "boot" region is special: its page tree has a fixed size, but + * the number of pages entered is lower. + */ + + /* Create the page tree */ + size = (NKL2_KIMG_ENTRIES + 1) * NBPD_L2; + randva = mm_randva_kregion(size); + + /* Enter the area and build the ELF info */ + bootpa = bootspace.data.pa + bootspace.data.sz; + size = (pa_avail - bootpa); + npages = size / PAGE_SIZE; + for (i = 0; i < npages; i++) { + mm_enter_pa(bootpa + i * PAGE_SIZE, + randva + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); + } + elf_build_boot(randva, bootpa); /* Enter the ISA I/O MEM */ - iom_base = baseva + npages * PAGE_SIZE; + iom_base = randva + npages * PAGE_SIZE; npages = IOM_SIZE / PAGE_SIZE; for (i = 0; i < npages; i++) { mm_enter_pa(IOM_BEGIN + i * PAGE_SIZE, iom_base + i * PAGE_SIZE, MM_PROT_READ|MM_PROT_WRITE); } - return baseva; + /* Register the values in bootspace */ + bootspace.boot.va = randva; + bootspace.boot.pa = bootpa; + bootspace.boot.sz = (size_t)(iom_base + IOM_SIZE) - + (size_t)bootspace.boot.va; + + /* Initialize the values that are located in the "boot" region */ + extern uint64_t PDPpaddr; + bootspace.spareva = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2; + bootspace.pdir = bootspace.boot.va + (PDPpaddr - bootspace.boot.pa); + bootspace.emodule = bootspace.boot.va + NKL2_KIMG_ENTRIES * NBPD_L2; } + +/* + * There are five independent regions: head, text, rodata, data, boot. They are + * all mapped at random VAs. + * + * Head contains the ELF Header and ELF Section Headers, and we use them to + * map the rest of the regions. Head must be placed in memory *before* the + * other regions. + * + * At the end of this function, the bootspace structure is fully constructed. + */ +void +mm_map_kernel() +{ + memset(&bootspace, 0, sizeof(bootspace)); + mm_map_head(); + mm_map_segments(); + mm_map_boot(); +} +