Module Name: src
Committed By: skrll
Date: Tue Sep 20 07:18:24 UTC 2022
Modified Files:
src/sys/arch/riscv/include: locore.h pmap.h vmparam.h
src/sys/arch/riscv/riscv: genassym.cf locore.S pmap_machdep.c
riscv_machdep.c
Added Files:
src/sys/arch/riscv/include: machdep.h
Log Message:
Checkpoint WIP.
QEMU RV64 virt can boot into virtual mode
OpenSBI v1.0
____ _____ ____ _____
/ __ \ / ____| _ \_ _|
| | | |_ __ ___ _ __ | (___ | |_) || |
| | | | '_ \ / _ \ '_ \ \___ \| _ < | |
| |__| | |_) | __/ | | |____) | |_) || |_
\____/| .__/ \___|_| |_|_____/|____/_____|
| |
|_|
Platform Name : riscv-virtio,qemu
Platform Features : medeleg
Platform HART Count : 1
Platform IPI Device : aclint-mswi
Platform Timer Device : aclint-mtimer @ 10000000Hz
Platform Console Device : uart8250
Platform HSM Device : ---
Platform Reboot Device : sifive_test
Platform Shutdown Device : sifive_test
Firmware Base : 0x80000000
Firmware Size : 252 KB
Runtime SBI Version : 0.3
Domain0 Name : root
Domain0 Boot HART : 0
Domain0 HARTs : 0*
Domain0 Region00 : 0x0000000002000000-0x000000000200ffff (I)
Domain0 Region01 : 0x0000000080000000-0x000000008003ffff ()
Domain0 Region02 : 0x0000000000000000-0xffffffffffffffff (R,W,X)
Domain0 Next Address : 0x0000000080200000
Domain0 Next Arg1 : 0x00000000bfe00000
Domain0 Next Mode : S-mode
Domain0 SysReset : yes
Boot HART ID : 0
Boot HART Domain : root
Boot HART ISA : rv64imafdcsuh
Boot HART Features : scounteren,mcounteren,mcountinhibit,time
Boot HART PMP Count : 16
Boot HART PMP Granularity : 4
Boot HART PMP Address Bits: 54
Boot HART MHPM Count : 16
Boot HART MIDELEG : 0x0000000000001666
Boot HART MEDELEG : 0x0000000000f0b509
------------
NetBSD start
sp: 0x0000_0000_80a0_2000
pc: 0x0000_0000_8020_0090
hart: 0x0000_0000_0000_0000
dtb: 0x0000_0000_bfe0_0000
l1: 0x0000_0000_80a0_2000
l2: 0x0000_0000_80a0_3000
uspace: 0x0000_0000_80a0_0000
bootstk: 0x0000_0000_80a0_2000
vtopdiff:0xffff_ffbf_7fe0_0000
bss: 0x0000_0000_808a_8bdc - 0x0000_0000_80a0_4000
0x0000_0000_80a0_3800: 0x0000_0000_2028_0821
kern 0x0000_0000_80a0_2000: 0x0000_0000_2008_002f
kern 0x0000_0000_80a0_2008: 0x0000_0000_2010_002f
kern 0x0000_0000_80a0_2010: 0x0000_0000_2018_002f
kern 0x0000_0000_80a0_2018: 0x0000_0000_2020_002f
kern 0x0000_0000_80a0_2020: 0x0000_0000_2028_002f
kern 0x0000_0000_80a0_2028: 0x0000_0000_2030_002f
kern 0x0000_0000_80a0_2030: 0x0000_0000_2038_002f
kern 0x0000_0000_80a0_2038: 0x0000_0000_2040_002f
kern 0x0000_0000_80a0_2040: 0x0000_0000_2048_002f
kern 0x0000_0000_80a0_2048: 0x0000_0000_2050_002f
kern 0x0000_0000_80a0_2050: 0x0000_0000_2058_002f
kern 0x0000_0000_80a0_2058: 0x0000_0000_2060_002f
kern 0x0000_0000_80a0_2060: 0x0000_0000_2068_002f
kern 0x0000_0000_80a0_2068: 0x0000_0000_2070_002f
kern 0x0000_0000_80a0_2070: 0x0000_0000_2078_002f
kern 0x0000_0000_80a0_2078: 0x0000_0000_2080_002f
dtb 0x0000_0000_80a0_2080: 0x0000_0000_2ff8_0027
PM
[ 1.0000000] FDT<0xffffffc002000000>
[ 1.0000000] consinit ok
[ 1.0000000] NetBSD/riscv (fdt) booting ...
[ 1.0000000] FDT /memory @ 0x80000000 size 0x40000000
[ 1.0000000] init_riscv: memory start 80000000 end c0000000 (len 40000000)
[ 1.0000000] bootargs: root=ld4a -v -x
[ 1.0000000] bootflag 'r' not recognised
[ 1.0000000] bootflag 'o' not recognised
[ 1.0000000] bootflag 'o' not recognised
[ 1.0000000] bootflag 't' not recognised
[ 1.0000000] bootflag '=' not recognised
[ 1.0000000] bootflag 'l' not recognised
[ 1.0000000] bootflag ' ' not recognised
[ 1.0000000] bootflag ' ' not recognised
[ 1.0000000] ------------------------------------------
[ 1.0000000] kern_vtopdiff = 0xffffffbf7fe00000
[ 1.0000000] memory_start = 0x 80000000
[ 1.0000000] memory_end = 0x c0000000
[ 1.0000000] memory_size = 0x 40000000
[ 1.0000000] kernstart_phys = 0x 80200000
[ 1.0000000] kernend_phys = 0x 80a00000
[ 1.0000000] VM_MIN_KERNEL_ADDRESS = 0xffffffc000000000
[ 1.0000000] kernstart_mega = 0xffffffc000000000
[ 1.0000000] kernstart = 0xffffffc000000000
[ 1.0000000] kernend = 0xffffffc000800000
[ 1.0000000] kernend_mega = 0xffffffc000800000
[ 1.0000000] VM_MAX_KERNEL_ADDRESS = 0xffffffd000000000
[ 1.0000000] ------------------------------------------
[ 1.0000000] panic: kernel diagnostic assertion "msgbufaddr != 0" failed:
file "/home/nick/netbsd/nbcvs/src/sys/arch/riscv/riscv/riscv_machdep.c", line
564
To generate a diff of this commit:
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/riscv/include/locore.h
cvs rdiff -u -r0 -r1.1 src/sys/arch/riscv/include/machdep.h
cvs rdiff -u -r1.9 -r1.10 src/sys/arch/riscv/include/pmap.h \
src/sys/arch/riscv/include/vmparam.h
cvs rdiff -u -r1.11 -r1.12 src/sys/arch/riscv/riscv/genassym.cf
cvs rdiff -u -r1.24 -r1.25 src/sys/arch/riscv/riscv/locore.S
cvs rdiff -u -r1.10 -r1.11 src/sys/arch/riscv/riscv/pmap_machdep.c
cvs rdiff -u -r1.17 -r1.18 src/sys/arch/riscv/riscv/riscv_machdep.c
Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.
Modified files:
Index: src/sys/arch/riscv/include/locore.h
diff -u src/sys/arch/riscv/include/locore.h:1.10 src/sys/arch/riscv/include/locore.h:1.11
--- src/sys/arch/riscv/include/locore.h:1.10 Tue Oct 5 11:01:49 2021
+++ src/sys/arch/riscv/include/locore.h Tue Sep 20 07:18:23 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.h,v 1.10 2021/10/05 11:01:49 jmcneill Exp $ */
+/* $NetBSD: locore.h,v 1.11 2022/09/20 07:18:23 skrll Exp $ */
/*-
* Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -216,7 +216,6 @@ void cpu_lwp_trampoline(void);
void * cpu_sendsig_getframe(struct lwp *, int, bool *);
-void init_riscv(vaddr_t, vaddr_t);
#endif
#endif /* _RISCV_LOCORE_H_ */
Index: src/sys/arch/riscv/include/pmap.h
diff -u src/sys/arch/riscv/include/pmap.h:1.9 src/sys/arch/riscv/include/pmap.h:1.10
--- src/sys/arch/riscv/include/pmap.h:1.9 Sat May 1 07:41:24 2021
+++ src/sys/arch/riscv/include/pmap.h Tue Sep 20 07:18:23 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.9 2021/05/01 07:41:24 skrll Exp $ */
+/* $NetBSD: pmap.h,v 1.10 2022/09/20 07:18:23 skrll Exp $ */
/*
* Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
@@ -117,12 +117,10 @@ pmap_procwr(struct proc *p, vaddr_t va,
#define __HAVE_PMAP_MD
struct pmap_md {
- paddr_t md_ptbr;
+ paddr_t md_ppn;
pd_entry_t *md_pdetab;
};
-void pmap_bootstrap(void);
-
struct vm_page *
pmap_md_alloc_poolpage(int flags);
vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);
@@ -132,13 +130,14 @@ bool pmap_md_io_vaddr_p(vaddr_t);
paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
vaddr_t pmap_md_direct_map_paddr(paddr_t);
void pmap_md_init(void);
-bool pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
void pmap_md_xtab_activate(struct pmap *, struct lwp *);
void pmap_md_xtab_deactivate(struct pmap *);
void pmap_md_pdetab_init(struct pmap *);
bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
+void pmap_bootstrap(vaddr_t kstart, vaddr_t kend);
+
extern vaddr_t pmap_direct_base;
extern vaddr_t pmap_direct_end;
#define PMAP_DIRECT_MAP(pa) (pmap_direct_base + (pa))
@@ -148,6 +147,14 @@ extern vaddr_t pmap_direct_end;
#define MEGAPAGE_ROUND(x) MEGAPAGE_TRUNC((x) + SEGOFSET)
#ifdef __PMAP_PRIVATE
+
+static inline bool
+pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
+{
+ // TLB not walked and so not called.
+ return false;
+}
+
static inline void
pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *kc)
{
Index: src/sys/arch/riscv/include/vmparam.h
diff -u src/sys/arch/riscv/include/vmparam.h:1.9 src/sys/arch/riscv/include/vmparam.h:1.10
--- src/sys/arch/riscv/include/vmparam.h:1.9 Sat May 1 07:41:24 2021
+++ src/sys/arch/riscv/include/vmparam.h Tue Sep 20 07:18:23 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: vmparam.h,v 1.9 2021/05/01 07:41:24 skrll Exp $ */
+/* $NetBSD: vmparam.h,v 1.10 2022/09/20 07:18:23 skrll Exp $ */
/*-
* Copyright (c) 2014, 2020 The NetBSD Foundation, Inc.
@@ -126,8 +126,15 @@
#define VM_MAX_KERNEL_ADDRESS ((vaddr_t)-0x40000000) /* 0xffffffffc0000000 */
#endif
-#define VM_KERNEL_VM_BASE VM_MIN_KERNEL_ADDRESS
-#define VM_KERNEL_VM_SIZE 0x2000000 /* 32 MiB (8 / 16 megapages) */
+#define VM_KERNEL_BASE VM_MIN_KERNEL_ADDRESS
+#define VM_KERNEL_SIZE 0x2000000 /* 32 MiB (8 / 16 megapages) */
+#define VM_KERNEL_DTB_BASE (VM_KERNEL_BASE + VM_KERNEL_SIZE)
+#define VM_KERNEL_DTB_SIZE 0x2000000 /* 32 MiB (8 / 16 megapages) */
+
+#define VM_KERNEL_RESERVED (VM_KERNEL_SIZE + VM_KERNEL_DTB_SIZE)
+
+#define VM_KERNEL_VM_BASE (VM_MIN_KERNEL_ADDRESS + VM_KERNEL_RESERVED)
+#define VM_KERNEL_VM_SIZE (VM_MAX_KERNEL_ADDRESS - VM_KERNEL_VM_BASE)
#define VM_MAX_ADDRESS VM_MAXUSER_ADDRESS
#define VM_MAXUSER_ADDRESS32 ((vaddr_t)(1UL << 31))/* 0x0000000080000000 */
Index: src/sys/arch/riscv/riscv/genassym.cf
diff -u src/sys/arch/riscv/riscv/genassym.cf:1.11 src/sys/arch/riscv/riscv/genassym.cf:1.12
--- src/sys/arch/riscv/riscv/genassym.cf:1.11 Sun Sep 11 15:31:12 2022
+++ src/sys/arch/riscv/riscv/genassym.cf Tue Sep 20 07:18:23 2022
@@ -1,4 +1,4 @@
-# $NetBSD: genassym.cf,v 1.11 2022/09/11 15:31:12 skrll Exp $
+# $NetBSD: genassym.cf,v 1.12 2022/09/20 07:18:23 skrll Exp $
#-
# Copyright (c) 2014 The NetBSD Foundation, Inc.
@@ -49,18 +49,13 @@ include <uvm/uvm_extern.h>
include <riscv/locore.h>
include <riscv/sysreg.h>
-#define SR_IM SR_IM
-#define SR_IM_LSHIFT __SIZEOF_LONG__ * 8 - (ilog2(SR_IM) + 1)
-#define SR_IM_RSHIFT ilog2(__LOWEST_SET_BIT(SR_IM))
-#define SR_VM SR_VM
-#define SR_U64 SR_U64
-#define SR_S64 SR_S64
-#define SR_EF SR_EF
-#define SR_PEI SR_PEI
-#define SR_EI SR_EI
-#define SR_PS SR_PS
-#define SR_S SR_S
+
+define SR_SPP SR_SPP
define SR_SIE SR_SIE
+define SR_FS SR_FS
+# define SR_PS SR_PS
+# define SR_S SR_S
+define SR_SUM SR_SUM
define CAUSE_SYSCALL CAUSE_SYSCALL
@@ -161,6 +156,7 @@ define FB_SR offsetof(struct faultbuf,
define PAGE_SIZE PAGE_SIZE
define PAGE_MASK PAGE_MASK
define PAGE_SHIFT PAGE_SHIFT
+define UPAGES UPAGES
define USRSTACK USRSTACK
ifdef __HAVE_FAST_SOFTINTS
@@ -186,6 +182,8 @@ define RW_READER RW_READER
define VM_MIN_KERNEL_ADDRESS VM_MIN_KERNEL_ADDRESS
define VM_MAX_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
+define VM_KERNEL_BASE VM_KERNEL_BASE
+define VM_KERNEL_SIZE VM_KERNEL_SIZE
define USPACE USPACE
ifdef XSEGSHIFT
@@ -196,6 +194,7 @@ define PGSHIFT PGSHIFT
define NPDEPG NPDEPG
define NBSEG NBSEG
+# Constants from pte.h
define PTE_D PTE_D
define PTE_A PTE_A
define PTE_G PTE_G
@@ -205,8 +204,29 @@ define PTE_W PTE_W
define PTE_R PTE_R
define PTE_V PTE_V
+define PTE_KERN PTE_KERN
+
+define L0_SHIFT L0_SHIFT
+define L1_SHIFT L1_SHIFT
+define L1_SIZE L1_SIZE
+define L1_OFFSET L1_OFFSET
+define L2_SHIFT L2_SHIFT
+define L2_SIZE L2_SIZE
+define L2_OFFSET L2_OFFSET
+#define L3_SHIFT L3_SHIFT
+#define L3_SIZE L3_SIZE
+#define L3_OFFSET L3_OFFSET
+define Ln_ENTRIES Ln_ENTRIES
+define Ln_ADDR_MASK Ln_ADDR_MASK
+#define PTE_PPN0_S PTE_PPN0_S
+#define PTE_PPN1_S PTE_PPN1_S
+#define PTE_PPN2_S PTE_PPN2_S
+#define PTE_PPN3_S PTE_PPN3_S
+#define PTE_SIZE PTE_SIZE
+define PTE_PPN_SHIFT PTE_PPN_SHIFT
+
define PM_MD_PDETAB offsetof(struct pmap, pm_md.md_pdetab)
-define PM_MD_PTBR offsetof(struct pmap, pm_md.md_ptbr)
+define PM_MD_PPN offsetof(struct pmap, pm_md.md_ppn)
# for bus_space_asm
define BS_STRIDE offsetof(struct bus_space, bs_stride)
Index: src/sys/arch/riscv/riscv/locore.S
diff -u src/sys/arch/riscv/riscv/locore.S:1.24 src/sys/arch/riscv/riscv/locore.S:1.25
--- src/sys/arch/riscv/riscv/locore.S:1.24 Sun Apr 10 09:50:45 2022
+++ src/sys/arch/riscv/riscv/locore.S Tue Sep 20 07:18:23 2022
@@ -1,11 +1,11 @@
-/* $NetBSD: locore.S,v 1.24 2022/04/10 09:50:45 andvar Exp $ */
+/* $NetBSD: locore.S,v 1.25 2022/09/20 07:18:23 skrll Exp $ */
/*-
- * Copyright (c) 2014 The NetBSD Foundation, Inc.
+ * Copyright (c) 2014, 2022 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Matt Thomas of 3am Software Foundry.
+ * by Matt Thomas of 3am Software Foundry, and by Nick Hudson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,125 +29,524 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_console.h"
+#include "opt_riscv_debug.h"
+
#include <machine/asm.h>
#include "assym.h"
.globl _C_LABEL(exception_userexit)
.globl _C_LABEL(cpu_Debugger_insn)
+#if defined(VERBOSE_INIT_RISCV)
+
+#define VPRINTS(string) \
+ call locore_prints ; \
+ .asciz string ; \
+ .align 3 ; \
+
+#define VPRINTX(regno) \
+ mv a0, regno ; \
+ call locore_printx
+
+#define VPRINTXNL(regno) \
+ mv a0, regno ; \
+ call locore_printxnl
+
+/* Need to turn relaxation off for VPRINTS */
+ .option norelax
+
+#else
+#define VPRINTS(string) /* nothing */
+#define VPRINTX(regno) /* nothing */
+#define VPRINTXNL(regno) /* nothing */
+#endif
+
+#if VM_MIN_KERNEL_ADDRESS != VM_KERNEL_BASE
+#error VM_MIN_KERNEL_ADDRESS assumed to match VM_KERNEL_BASE
+#endif
+
+/*
+ * Entry point where.
+ * a0 is hartid
+ * a1 is pointer to dtb (PA)
+ */
ENTRY_NP(start)
- // We get loaded and starting running at or near 0, not where we
- // should be. We need to construct an initial PDETAB
+ csrw sie, zero // disable interrupts
+ csrw sip, zero // clear any pending
- li s11, VM_MAX_KERNEL_ADDRESS
- li s10, PAGE_SIZE
- li s9, USPACE
+ li s0, SR_FS
+ csrc sstatus, s0 // disable FP
/*
- * XXX XXX XXX: This is completely broken and wrong, we should map only
- * the kernel sections, and the direct map should be mapped later in C.
+ * atomically swap a non-zero value into hart_boot. If we see zero
+ * we won in race to become BP.
*/
-#if 0
-#if 0
- // The kernel doesn't use gp/_gp since we'd have to reload it on
- // each exception.
- PTR_LA gp, _C_LABEL(_gp)
-#endif
+ li s1, 1
+ la s0, hart_boot
+
+ amoswap.w s0, s1, (s0)
+ bnez s0, mpentry
+ /*
+ * The BP only executes from here on.
+ */
+ mv s0, a0 // copy hartid
+ mv s1, a1 // copy dtb PA
+
+ /* set the stack pointer for boot */
+ PTR_LA s8, _C_LABEL(bootstk)
+ mv sp, s8
+
+ VPRINTS("\n------------\nNetBSD start\n\n")
+ VPRINTS("sp: ")
+ VPRINTXNL(sp)
+
+ VPRINTS("pc: ")
+ auipc a0, 0
+ VPRINTXNL(a0)
+
+ VPRINTS("hart: ")
+ VPRINTXNL(s0)
+
+ VPRINTS("dtb: ")
+ VPRINTXNL(s1)
+
+ /*
+ * Calculate the difference between the VA and PA for start and
+ * keep in s8. Store this in kern_vtopdiff once the MMU is on.
+ */
+ PTR_LA s11, start
+ PTR_L s8, .Lstart
+
+ sub s8, s8, s11
+
+ /*
+ * Our load address is not fixed, but our VA is. We need to construct
+ * an initial PDETAB.
+ */
+
+ li s10, PAGE_SIZE
+ li s9, USPACE
+
+ PTR_LA s5, _C_LABEL(lwp0uspace)
+ PTR_LA s6, _C_LABEL(bootstk)
+
+ // The space for the inital page table is included in the kernel
+ // .bss size calculation so we know the space exists.
- PTR_LA a0, _C_LABEL(__bss_start)
- PTR_LA s1, _C_LABEL(_end)
li a1, 0
+ PTR_LA s2, _C_LABEL(l1_pte)
+ mv s4, s2 // last page table
+#ifdef _LP64
+ PTR_LA s3, _C_LABEL(l2_pte) // s3 = second PDE page (RV64 only)
+ mv s4, s3 // last page table
+#ifdef notyet
+ PTR_LA s4, _C_LABEL(l3_pte)
+#endif
+#endif
+ PTR_LA s7, _C_LABEL(mmutables_end)
+
- add s1, s1, s10 // PAGE_SIZE
- addi s1, s1, -1 // -1 == PAGE_MASK
- neg a1, s10 // -PAGE_SIZE
- and s1, s1, a1 // s1 is page aligned end of kernel
- // s1 = uarea
- add s2, s1, s9 // s2 = first PDE page
+ // s2 L1 PDE (SV32:4MiB megapages, SV{39,48}: 2MiB megapages)
+ // s3 L2 PDE (_LP64 SV39 only)
+ // s4 L3 PDE (_LP64 SV48 only)
+ // s5 lwp0uspace
+ // s6 bootstk
+ // s7 end of memory to clear
+
+ VPRINTS("l1: ")
+ VPRINTXNL(s2)
#ifdef _LP64
- add s3, s2, s10 // s3 = second PDE page (RV64 only)
-#else
- mv s3, s2
+ VPRINTS("l2: ")
+ VPRINTXNL(s3)
+#ifdef notyet
+ VPRINTS("l3: ")
+ VPRINTXNL(s4)
#endif
- add s4, s3, s10 // s4 = first kernel PTE page
- add s5, s1, s9 // s5 = kernel_end
- sub a2, s5, a0
- call memset // zero through kernel_end
-
- // As a temporary hack, word 0 contains the amount of memory in MB
- INT_L a7, (zero) // load memory size
- slli a7, a7, (20-PGSHIFT) // convert MB to pages
- PTR_LA t0, physmem
- INT_S a7, (t0) // store it in physmem
+#endif
+
+ VPRINTS("uspace: ")
+ VPRINTXNL(s5)
+ VPRINTS("bootstk: ")
+ VPRINTXNL(s6)
+
+ VPRINTS("vtopdiff:")
+ VPRINTXNL(s8)
+
+ VPRINTS("\n\r")
+
+ VPRINTS("bss: ")
+ PTR_LA a0, _C_LABEL(__bss_start)
+ VPRINTX(a0)
+ VPRINTS(" - ")
+ VPRINTXNL(s7)
+
+ VPRINTS("\n\r")
+
+ // a0 start of memory to clear
+ // a1 end of memory to clear
+ PTR_LA a0, _C_LABEL(__bss_start)
+ mv a1, s7
+
+ call clear_bss // zero through kernel_end (inc. stack)
+
+ li s7, PTE_KERN // for megapages
+
+ // We allocated the kernel first PDE page so let's insert in the
+ // page table.
+
+ // Need to setup tables so that for
+ // sv32 : s2
+ // sv39 : s3 -> s2
+ // sv48 : s4 -> s3 -> s2
- li t4, PTE_V | PTE_SX | PTE_SW | PTE_SR | PTE_G
#ifdef _LP64
- REG_S t4, 0(s2) // keep a mapping for the first 8GB.
- or t0, s3, t4 // point to next page
- or t0, t0, PTE_T // tranverse it.
- REG_S t0, -SZREG(s3) // store in highest first level PDE
-#endif
-
-#if (VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) != (VM_MAX_KERNEL_ADDRESS >> XSEGSHIFT)
-#error VM_MIN_KERNEL_ADDRESS not in same first level PDE as VM_MAX_KERNEL_ADDRESS
-#endif
- // We allocated the kernel first PTE page so let's insert in the
- // page table. For now, we assume it's in the same PDE page as the
- // direct-mapped memory.
- or t0, s4, t4
- or t0, t0, PTE_T
-#if ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG
- li t1, ((VM_MIN_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG
+ srli t0, s2, (PGSHIFT - PTE_PPN_SHIFT)
+ or t0, t0, s7 // Assumes s2[11:0] == 0
+#if ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
+ li t1, ((VM_MIN_KERNEL_ADDRESS >> XSEGSHIFT) & (NPDEPG - 1)) * SZREG
+#ifdef notyet
+ add t1, t1, s4
+#else
add t1, t1, s3
+#endif
REG_S t0, 0(t1)
+
+ VPRINTX(t1)
+#else
+#ifdef notyet
+ REG_S t0, 0(s4)
#else
REG_S t0, 0(s3)
#endif
- li t0, ((VM_MAX_KERNEL_ADDRESS >> SEGSHIFT) & (NPDEPG-1)) * SZREG
- add s3, s3, t0
- srli a7, a7, (SEGSHIFT-PGSHIFT) // pages to segments
- li t3, NBSEG // load for ease
+ VPRINTX(s3)
+#endif
+#endif
+
+ VPRINTS(": ")
+ VPRINTXNL(t0)
+ VPRINTS("\n\r")
+
+#if PGSHIFT < PTE_PPN_SHIFT
+#error Code assumes PGSHIFT is greater than PTE_PPN_SHIFT
+#endif
+
+ li s5, (VM_KERNEL_SIZE >> SEGSHIFT) // # of megapages
+ li s6, (NBSEG >> (PGSHIFT - PTE_PPN_SHIFT)) // load for ease
+ li s7, PTE_KERN | PTE_R | PTE_W | PTE_X
//
- // Fill in the PDEs to direct map memory.
+ // Fill in the PDEs for kernel.
//
-.Lfill: REG_S t4, 0(s3) // store PDE
- add t4, t4, t3 // advance PA in PDE to next segment
- add s3, s3, SZREG // advance to next PDE slot
- addi a7, a7, -1 // count down segment
- bgtz a6, .Lfill // loop if more
+ PTR_LA s0, start
+ srli s0, s0, (PGSHIFT - PTE_PPN_SHIFT)
+ or s0, s0, s7
+.Lfill:
+ VPRINTS("kern ")
+ VPRINTX(s2)
+ VPRINTS(": ")
+ VPRINTXNL(s0)
+
+ REG_S s0, 0(s2) // store PDE
+ add s0, s0, s6 // advance PA in PDE to next segment
+ add s2, s2, SZREG // advance to next PDE slot
+ addi s5, s5, -1 // count down segment
+ bnez s5, .Lfill // loop if more
+
+ li s7, PTE_KERN | PTE_R | PTE_W
+
+ // DTB physical address
+ mv s0, s1
+ srli s0, s0, SEGSHIFT // round down to NBSEG, and shift in
+ slli s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT) // ... to PPN
+ or s0, s0, s7
+
+ VPRINTS("dtb ")
+ VPRINTX(s2)
+ VPRINTS(": ")
+ VPRINTXNL(s0)
+
+ REG_S s0, 0(s2)
+ add s2, s2, SZREG // advance to next PDE slot
+
+#ifdef CONSADDR
+ ld s0, .Lconsaddr
+ srli s0, s0, SEGSHIFT // round down to NBSEG, and shift in
+ slli s0, s0, (SEGSHIFT - PGSHIFT + PTE_PPN_SHIFT) // ... to PPN
+ or s0, s0, s7
+
+ VPRINTS("cons ")
+ VPRINTX(s2)
+ VPRINTS(": ")
+ VPRINTXNL(s0)
+
+ REG_S s0, 0(s2)
+ add s2, s2, SZREG // advance to next PDE slot
#endif
- // We should have a VM so let's start using our real addresses
+ li a0, 'P'
+ call _C_LABEL(uartputc)
+
+ /* Set supervisor trap vector base register */
PTR_LA t0, .Lmmu_on
+ add t0, t0, s8
+ csrw stvec, t0
+
+ /* Set supervisor address translation and protection register */
+ srli t1, s4, PGSHIFT
+#ifdef _LP64
+ li t0, SATP_MODE_SV39
+#else
+ li t0, SATP_MODE_SV32
+#endif
+ or t0, t0, t1
+ sfence.vma
+ csrw satp, t0
+ .align 2
.Lmmu_on:
// MMU is on!
csrw sscratch, zero // zero in sscratch to mark kernel
+ li a0, 'M'
+ call _C_LABEL(uartputc) // uartputs doesn't use stack
+ li a0, '\n'
+ call _C_LABEL(uartputc) // uartputs doesn't use stack
+ li a0, '\r'
+ call _C_LABEL(uartputc) // uartputs doesn't use stack
+
PTR_LA tp, _C_LABEL(lwp0) // put curlwp in tp
+ .global vstart
+vstart:
+
+ /* Set supervisor trap vector base register */
PTR_LA a0, _C_LABEL(cpu_exception_handler)
csrw stvec, a0
- PTR_S s1, L_PCB(tp) // set uarea of lwp (already zeroed)
+ PTR_LA s2, bootstk // top of lwp0uspace
+ PTR_S s2, L_PCB(tp) // set uarea of lwp (already zeroed)
addi sp, s2, -TF_LEN // switch to new stack
PTR_S sp, L_MD_UTF(tp) // store pointer to empty trapframe
PTR_LA t1, _C_LABEL(kernel_pmap_store)
- add t2, s2, s11 // PA -> VA
+ add t2, s4, s8 // PA -> VA
+ srli t3, s4, PGSHIFT
PTR_S t2, PM_MD_PDETAB(t1) // VA of kernel PDETAB
- PTR_S s2, PM_MD_PTBR(t1) // PA of kernel PDETAB
+ PTR_S t3, PM_MD_PPN(t1) // PPN of kernel PDETAB
+
+ /*
+ * Store kern_vtopdiff (the difference between the physical
+ * and virtual address of the "start" symbol).
+ */
+ PTR_LA s11, _C_LABEL(kern_vtopdiff)
+ PTR_S s8, 0(s11) /* kern_vtopdiff = start(virt) - start(phys) */
+
+#if notyet
+ mv a0, s1 // dtb
+ call _C_LABEL(init_mmu)
+#endif
+
+ li t0, VM_MIN_KERNEL_ADDRESS + VM_KERNEL_SIZE
+ li t1, NBSEG - 1
+ and t1, s1, t1
+ or t0, t0, t1
+
+ /* Set the global pointer */
+ .option push
+ .option norelax
+ lla gp, __global_pointer$
+ .option pop
// Now we should ready to start initializing the kernel.
- PTR_LA a0, _C_LABEL(start) // kernel_start
- add a1, s5, s11 // kernel_end
+ mv a0, s0 // hartid
+ mv a1, t0 // vdtb
+ //mv a1, s1 // dtb (physical)
call _C_LABEL(init_riscv) // do MD startup
tail _C_LABEL(main) // and transfer to main
- // not reached
+ /* No return from main */
END(start)
+
+ENTRY(mpentry)
+1:
+ wfi
+ j 1b
+END(mpentry)
+
+
+ .align 3
+.Lstart:
+#ifdef _LP64
+ .quad start
+#else
+ .word start
+#endif
+
+
+#ifdef CONSADDR
+ .align 3
+.Lconsaddr:
+#ifdef _LP64
+ .quad CONSADDR
+#else
+ .word CONSADDR
+#endif
+#endif
+
+
+ENTRY_NP(uartputc)
+#ifdef EARLYCONS
+ tail ___CONCAT(EARLYCONS, _platform_early_putchar)
+#else
+#define SBI_LEGACY_CONSOLE_PUTCHAR 1
+ li a7, SBI_LEGACY_CONSOLE_PUTCHAR
+ ecall
+ ret
+#endif
+END(uartputc)
+
+
+ENTRY_NP(clear_bss)
+ bgeu a0, a1, 1f
+2:
+ sb zero, 0(a0)
+ addi a0, a0, 1
+ bne a1, a0, 2b
+1:
+ ret
+END(clear_bss)
+
+
+#if defined(VERBOSE_INIT_RISCV)
+ENTRY_NP(locore_prints)
+ addi sp, sp, -(SZREG * 2)
+ REG_S s0, (0 * SZREG)(sp)
+ mv s0, ra
+1:
+ lbu a0, 0(s0)
+ beqz a0, 2f
+
+ call uartputc
+
+ addi s0, s0, 1
+ j 1b
+2:
+ addi s0, s0, 8 // s0 points to the null terminator
+ andi ra, s0, -8
+
+ REG_L s0, (0 * SZREG)(sp)
+ addi sp, sp, (SZREG * 2)
+ ret
+
+END(locore_prints)
+
+
+ENTRY_NP(locore_printx)
+ addi sp, sp, -(SZREG * 4)
+ REG_S s0, (0 * SZREG)(sp)
+ REG_S s1, (1 * SZREG)(sp)
+ REG_S s2, (2 * SZREG)(sp)
+ REG_S ra, (3 * SZREG)(sp)
+
+ mv s1, a0 // our print value
+ li s2, 10
+
+ li a0, '0'
+ call uartputc
+ li a0, 'x'
+ call uartputc
+
+ // Word size in bits
+ li s0, (SZREG * 8)
+1:
+ addi s0, s0, -4 // nibble shift
+
+ srl a0, s1, s0 // extract ...
+ andi a0, a0, 0xf
+
+ bltu a0, s2, 2f
+ addi a0, a0, ('a' - '0' - 10)
+2: addi a0, a0, '0'
+
+ call uartputc
+
+ beqz s0, 3f
+
+ and a0, s0, (16 - 1)
+ bnez a0, 1b
+
+ li a0, '_'
+ call uartputc
+
+ j 1b
+
+3:
+ REG_L s0, (0 * SZREG)(sp)
+ REG_L s1, (1 * SZREG)(sp)
+ REG_L s2, (2 * SZREG)(sp)
+ REG_L ra, (3 * SZREG)(sp)
+ addi sp, sp, (SZREG * 4)
+ ret
+END(locore_printx)
+
+
+ENTRY_NP(locore_printxnl)
+ addi sp, sp, -(SZREG * 2)
+ REG_S ra, (1 * SZREG)(sp)
+
+ call locore_printx
+ li a0, '\n'
+ call uartputc
+
+ li a0, '\r'
+ call uartputc
+
+ REG_L ra, (1 * SZREG)(sp)
+ addi sp, sp, (SZREG * 2)
+
+ ret
+END(locore_printxnl)
+#endif /* VERBOSE_INIT_RISCV */
+
+
+ .data
+ .align 2
+hart_boot:
+ .word 0
+
+ .section "_init_memory", "aw", %nobits
+ .align PGSHIFT
+ .global _C_LABEL(lwp0uspace)
+_C_LABEL(lwp0uspace):
+ .space UPAGES * PAGE_SIZE
+bootstk:
+
+ /*
+ * Allocate some memory after the kernel image for stacks and
+ * bootstrap L1PT
+ */
+ .align PGSHIFT
+
+ .section "_init_memory", "aw", %nobits
+ .align PGSHIFT
+mmutables_start:
+ .global _C_LABEL(l1_pte)
+l1_pte:
+ .space PAGE_SIZE
+#ifdef _LP64
+ .global _C_LABEL(l2_pte)
+l2_pte:
+ .space PAGE_SIZE
+#ifdef notyet
+l3_pte:
+ .space PAGE_SIZE
+#endif
+#endif
+mmutables_end:
+
+
//
// struct lwp *cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning);
//
Index: src/sys/arch/riscv/riscv/pmap_machdep.c
diff -u src/sys/arch/riscv/riscv/pmap_machdep.c:1.10 src/sys/arch/riscv/riscv/pmap_machdep.c:1.11
--- src/sys/arch/riscv/riscv/pmap_machdep.c:1.10 Sat Oct 30 07:18:46 2021
+++ src/sys/arch/riscv/riscv/pmap_machdep.c Tue Sep 20 07:18:23 2022
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_machdep.c,v 1.10 2021/10/30 07:18:46 skrll Exp $ */
+/* $NetBSD: pmap_machdep.c,v 1.11 2022/09/20 07:18:23 skrll Exp $ */
/*
* Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
@@ -30,17 +30,26 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
+#include "opt_riscv_debug.h"
+
#define __PMAP_PRIVATE
#include <sys/cdefs.h>
-
-__RCSID("$NetBSD: pmap_machdep.c,v 1.10 2021/10/30 07:18:46 skrll Exp $");
+__RCSID("$NetBSD: pmap_machdep.c,v 1.11 2022/09/20 07:18:23 skrll Exp $");
#include <sys/param.h>
+#include <sys/buf.h>
#include <uvm/uvm.h>
-#include <riscv/locore.h>
+#include <riscv/machdep.h>
+#include <riscv/sysreg.h>
+
+#ifdef VERBOSE_INIT_RISCV
+#define VPRINTF(...) printf(__VA_ARGS__)
+#else
+#define VPRINTF(...) __nothing
+#endif
int riscv_poolpage_vmfreelist = VM_FREELIST_DEFAULT;
@@ -48,13 +57,6 @@ vaddr_t pmap_direct_base __read_mostly;
vaddr_t pmap_direct_end __read_mostly;
void
-pmap_bootstrap(void)
-{
-
- pmap_bootstrap_common();
-}
-
-void
pmap_zero_page(paddr_t pa)
{
#ifdef _LP64
@@ -124,7 +126,8 @@ pmap_md_direct_mapped_vaddr_to_paddr(vad
#ifdef PMAP_DIRECT_MAP
return PMAP_DIRECT_UNMAP(va);
#else
-#error "no direct map"
+ KASSERT(false);
+ return 0;
#endif
#else
KASSERT(false);
@@ -150,29 +153,122 @@ pmap_md_ok_to_steal_p(const uvm_physseg_
return true;
}
-bool
-pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
-{
- return false;
-}
void
pmap_md_xtab_activate(struct pmap *pmap, struct lwp *l)
{
- __asm("csrw\tsptbr, %0" :: "r"(pmap->pm_md.md_ptbr));
+ struct pmap_asid_info * const pai = PMAP_PAI(pmap, cpu_tlb_info(ci));
+
+ uint64_t satp =
+#ifdef _LP64
+ __SHIFTIN(SATP_MODE_SV39, SATP_MODE) |
+#else
+ __SHIFTIN(SATP_MODE_SV32, SATP_MODE) |
+#endif
+ __SHIFTIN(pai->pai_asid, SATP_ASID) |
+ __SHIFTIN(pmap->pm_md.md_ppn, SATP_PPN);
+
+ riscvreg_satp_write(satp);
}
void
pmap_md_xtab_deactivate(struct pmap *pmap)
{
+
+ riscvreg_satp_write(0);
}
void
pmap_md_pdetab_init(struct pmap *pmap)
{
+ KASSERT(pmap != NULL);
+
+ const vaddr_t pdetabva = (vaddr_t)pmap->pm_md.md_pdetab;
+ const paddr_t pdetabpa = pmap_md_direct_mapped_vaddr_to_paddr(pdetabva);
pmap->pm_md.md_pdetab[NPDEPG-1] = pmap_kernel()->pm_md.md_pdetab[NPDEPG-1];
- pmap->pm_md.md_ptbr =
- pmap_md_direct_mapped_vaddr_to_paddr((vaddr_t)pmap->pm_md.md_pdetab);
+ pmap->pm_md.md_ppn = pdetabpa >> PAGE_SHIFT;
+}
+
+void
+pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
+{
+ extern pd_entry_t l1_pte[512];
+ pmap_t pm = pmap_kernel();
+
+ pmap_bootstrap_common();
+
+ /* Use the tables we already built in init_mmu() */
+ pm->pm_md.md_pdetab = l1_pte;
+
+ /* Get the PPN for l1_pte */
+ pm->pm_md.md_ppn = atop(KERN_VTOPHYS((vaddr_t)l1_pte));
+
+ /* Setup basic info like pagesize=PAGE_SIZE */
+ uvm_md_init();
+
+ /* init the lock */
+ pmap_tlb_info_init(&pmap_tlb0_info);
+
+#ifdef MULTIPROCESSOR
+ VPRINTF("kcpusets ");
+
+ kcpuset_create(&pm->pm_onproc, true);
+ kcpuset_create(&pm->pm_active, true);
+ KASSERT(pm->pm_onproc != NULL);
+ KASSERT(pm->pm_active != NULL);
+ kcpuset_set(pm->pm_onproc, cpu_number());
+ kcpuset_set(pm->pm_active, cpu_number());
+#endif
+
+ VPRINTF("nkmempages ");
+ /*
+ * Compute the number of pages kmem_arena will have. This will also
+ * be called by uvm_km_bootstrap later, but that doesn't matter
+ */
+ kmeminit_nkmempages();
+
+ /* Get size of buffer cache and set an upper limit */
+ buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
+ vsize_t bufsz = buf_memcalc();
+ buf_setvalimit(bufsz);
+
+ vsize_t kvmsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
+ bufsz + 16 * NCARGS + pager_map_size) +
+ /*(maxproc * UPAGES) + */nkmempages * NBPG;
+
+#ifdef SYSVSHM
+ kvmsize += shminfo.shmall;
+#endif
+
+ /* Calculate VA address space and roundup to NBSEG tables */
+ kvmsize = roundup(kvmsize, NBSEG);
+
+ /*
+ * Initialize `FYI' variables. Note we're relying on
+ * the fact that BSEARCH sorts the vm_physmem[] array
+ * for us. Must do this before uvm_pageboot_alloc()
+ * can be called.
+ */
+ pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
+ pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
+
+ /*
+ * Update the naive settings in pmap_limits to the actual KVA range.
+ */
+ pmap_limits.virtual_start = vstart;
+ pmap_limits.virtual_end = vend;
+
+ VPRINTF("\nlimits: %" PRIxVADDR " - %" PRIxVADDR "\n", vstart, vend);
+
+ /*
+ * Initialize the pools.
+ */
+ pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
+ &pool_allocator_nointr, IPL_NONE);
+ pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
+ &pmap_pv_page_allocator, IPL_NONE);
+
+ pmap_pvlist_lock_init(/*riscv_dcache_align*/ 64);
}
/* -------------------------------------------------------------------------- */
@@ -196,18 +292,37 @@ void tlb_invalidate_globals(void);
void
tlb_invalidate_asids(tlb_asid_t lo, tlb_asid_t hi)
{
- __asm __volatile("sfence.vm" ::: "memory");
+ for (; lo <= hi; lo++) {
+ __asm __volatile("sfence.vma zero, %[asid]"
+ : /* output operands */
+ : [asid] "r" (lo)
+ : "memory");
+ }
}
void
tlb_invalidate_addr(vaddr_t va, tlb_asid_t asid)
{
- __asm __volatile("sfence.vm" ::: "memory");
+ if (asid == KERNEL_PID) {
+ __asm __volatile("sfence.vma %[va]"
+ : /* output operands */
+ : [va] "r" (va)
+ : "memory");
+ } else {
+ __asm __volatile("sfence.vma %[va], %[asid]"
+ : /* output operands */
+ : [va] "r" (va), [asid] "r" (asid)
+ : "memory");
+ }
}
bool
tlb_update_addr(vaddr_t va, tlb_asid_t asid, pt_entry_t pte, bool insert_p)
{
- __asm __volatile("sfence.vm" ::: "memory");
+ KASSERT(asid != KERNEL_PID);
+ __asm __volatile("sfence.vma %[va], %[asid]"
+ : /* output operands */
+ : [va] "r" (va), [asid] "r" (asid)
+ : "memory");
return false;
}
@@ -216,13 +331,19 @@ tlb_record_asids(u_long *ptr, tlb_asid_t
{
memset(ptr, 0xff, PMAP_TLB_NUM_PIDS / NBBY);
ptr[0] = -2UL;
+
return PMAP_TLB_NUM_PIDS - 1;
}
+void
+tlb_walk(void *ctx, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t))
+{
+ /* no way to view the TLB */
+}
+
#if 0
void tlb_enter_addr(size_t, const struct tlbmask *);
void tlb_read_entry(size_t, struct tlbmask *);
void tlb_write_entry(size_t, const struct tlbmask *);
-void tlb_walk(void *, bool (*)(void *, vaddr_t, tlb_asid_t, pt_entry_t));
void tlb_dump(void (*)(const char *, ...));
#endif
Index: src/sys/arch/riscv/riscv/riscv_machdep.c
diff -u src/sys/arch/riscv/riscv/riscv_machdep.c:1.17 src/sys/arch/riscv/riscv/riscv_machdep.c:1.18
--- src/sys/arch/riscv/riscv/riscv_machdep.c:1.17 Tue Sep 20 06:53:36 2022
+++ src/sys/arch/riscv/riscv/riscv_machdep.c Tue Sep 20 07:18:24 2022
@@ -1,11 +1,11 @@
-/* $NetBSD: riscv_machdep.c,v 1.17 2022/09/20 06:53:36 skrll Exp $ */
+/* $NetBSD: riscv_machdep.c,v 1.18 2022/09/20 07:18:24 skrll Exp $ */
/*-
- * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
+ * Copyright (c) 2014, 2019, 2022 The NetBSD Foundation, Inc.
* All rights reserved.
*
* This code is derived from software contributed to The NetBSD Foundation
- * by Matt Thomas of 3am Software Foundry.
+ * by Matt Thomas of 3am Software Foundry, and by Nick Hudson.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,32 +29,73 @@
* POSSIBILITY OF SUCH DAMAGE.
*/
-#include <sys/cdefs.h>
-
#include "opt_modular.h"
-__RCSID("$NetBSD: riscv_machdep.c,v 1.17 2022/09/20 06:53:36 skrll Exp $");
+#include "opt_riscv_debug.h"
+
+#include <sys/cdefs.h>
+__RCSID("$NetBSD: riscv_machdep.c,v 1.18 2022/09/20 07:18:24 skrll Exp $");
#include <sys/param.h>
+#include <sys/boot_flag.h>
#include <sys/cpu.h>
#include <sys/exec.h>
#include <sys/kmem.h>
#include <sys/ktrace.h>
#include <sys/lwp.h>
#include <sys/module.h>
+#include <sys/msgbuf.h>
#include <sys/proc.h>
#include <sys/reboot.h>
#include <sys/syscall.h>
#include <sys/systm.h>
+#include <dev/cons.h>
#include <uvm/uvm_extern.h>
#include <riscv/locore.h>
+#include <riscv/machdep.h>
+#include <riscv/pte.h>
int cpu_printfataltraps;
char machine[] = MACHINE;
char machine_arch[] = MACHINE_ARCH;
+#include <libfdt.h>
+#include <dev/fdt/fdtvar.h>
+#include <dev/fdt/fdt_memory.h>
+
+#ifdef VERBOSE_INIT_RISCV
+#define VPRINTF(...) printf(__VA_ARGS__)
+#else
+#define VPRINTF(...) __nothing
+#endif
+
+#ifndef FDT_MAX_BOOT_STRING
+#define FDT_MAX_BOOT_STRING 1024
+#endif
+
+char bootargs[FDT_MAX_BOOT_STRING] = "";
+char *boot_args = NULL;
+
+static void
+earlyconsputc(dev_t dev, int c)
+{
+ uartputc(c);
+}
+
+static int
+earlyconsgetc(dev_t dev)
+{
+ return 0;
+}
+
+static struct consdev earlycons = {
+ .cn_putc = earlyconsputc,
+ .cn_getc = earlyconsgetc,
+ .cn_pollc = nullcnpollc,
+};
+
struct vm_map *phys_map;
struct trapframe cpu_ddb_regs;
@@ -70,6 +111,12 @@ const pcu_ops_t * const pcu_ops_md_defs[
#endif
};
+/*
+ * Used by PHYSTOV and VTOPHYS -- Will be set be BSS is zeroed so
+ * keep it in data
+ */
+unsigned long kern_vtopdiff __attribute__((__section__(".data")));
+
void
delay(unsigned long us)
{
@@ -338,10 +385,191 @@ cpu_startup(void)
printf("avail memory = %s\n", pbuf);
}
+static void
+riscv_init_lwp0_uarea(void)
+{
+ extern char lwp0uspace[];
+
+ uvm_lwp_setuarea(&lwp0, (vaddr_t)lwp0uspace);
+ memset(&lwp0.l_md, 0, sizeof(lwp0.l_md));
+ memset(lwp_getpcb(&lwp0), 0, sizeof(struct pcb));
+
+ struct trapframe *tf = (struct trapframe *)(lwp0uspace + USPACE) - 1;
+ memset(tf, 0, sizeof(struct trapframe));
+
+ lwp0.l_md.md_utf = lwp0.l_md.md_ktf = tf;
+}
+
+
+static void
+riscv_print_memory(const struct fdt_memory *m, void *arg)
+{
+
+ VPRINTF("FDT /memory @ 0x%" PRIx64 " size 0x%" PRIx64 "\n",
+ m->start, m->end - m->start);
+}
+
+
+static void
+parse_bi_bootargs(char *args)
+{
+ int howto;
+
+ for (char *cp = args; *cp; cp++) {
+ /* Ignore superfluous '-', if there is one */
+ if (*cp == '-')
+ continue;
+
+ howto = 0;
+ BOOT_FLAG(*cp, howto);
+ if (!howto)
+ printf("bootflag '%c' not recognised\n", *cp);
+ else
+ boothowto |= howto;
+ }
+}
+
+
void
-init_riscv(vaddr_t kernstart, vaddr_t kernend)
+init_riscv(register_t hartid, vaddr_t vdtb)
{
- /* Early VM bootstrap. */
- pmap_bootstrap();
+ /* set temporally to work printf()/panic() even before consinit() */
+ cn_tab = &earlycons;
+
+ /* Load FDT */
+ void *fdt_data = (void *)vdtb;
+ int error = fdt_check_header(fdt_data);
+ if (error != 0)
+ panic("fdt_check_header failed: %s", fdt_strerror(error));
+
+ fdtbus_init(fdt_data);
+
+#if 0
+ /* Lookup platform specific backend */
+ plat = riscv_fdt_platform();
+ if (plat == NULL)
+ panic("Kernel does not support this device");
+
+#endif
+ /* Early console may be available, announce ourselves. */
+ VPRINTF("FDT<%p>\n", fdt_data);
+
+ const int chosen = OF_finddevice("/chosen");
+ if (chosen >= 0)
+ OF_getprop(chosen, "bootargs", bootargs, sizeof(bootargs));
+ boot_args = bootargs;
+
+#if 0
+ /*
+ * If stdout-path is specified on the command line, override the
+ * value in /chosen/stdout-path before initializing console.
+ */
+ VPRINTF("stdout\n");
+ fdt_update_stdout_path();
+#endif
+
+ /*
+ * Done making changes to the FDT.
+ */
+ fdt_pack(fdt_data);
+
+ VPRINTF("consinit ");
+ consinit();
+ VPRINTF("ok\n");
+
+ /* Talk to the user */
+ printf("NetBSD/riscv (fdt) booting ...\n");
+
+#ifdef BOOT_ARGS
+ char mi_bootargs[] = BOOT_ARGS;
+ parse_bi_bootargs(mi_bootargs);
+#endif
+
+ /* SPAM me while testing */
+ boothowto |= AB_DEBUG;
+
+ uint64_t memory_start, memory_end;
+ fdt_memory_get(&memory_start, &memory_end);
+
+ fdt_memory_foreach(riscv_print_memory, NULL);
+
+ /* Cannot map memory above largest page number */
+ const uint64_t maxppn = __SHIFTOUT_MASK(PTE_PPN) - 1;
+ const uint64_t memory_limit = ptoa(maxppn);
+
+ if (memory_end > memory_limit) {
+ fdt_memory_remove_range(memory_limit, memory_end);
+ memory_end = memory_limit;
+ }
+
+ uint64_t memory_size __unused = memory_end - memory_start;
+
+ VPRINTF("%s: memory start %" PRIx64 " end %" PRIx64 " (len %"
+ PRIx64 ")\n", __func__, memory_start, memory_end, memory_size);
+
+ /* Perform PT build and VM init */
+ //cpu_kernel_vm_init();
+
+ VPRINTF("bootargs: %s\n", bootargs);
+
+ parse_bi_bootargs(boot_args);
+
+
+ // initarm_common
+ extern char __kernel_text[];
+ extern char _end[];
+
+ vaddr_t kernstart = trunc_page((vaddr_t)__kernel_text);
+ vaddr_t kernend = round_page((vaddr_t)_end);
+ paddr_t kernstart_phys __unused = KERN_VTOPHYS(kernstart);
+ paddr_t kernend_phys __unused = KERN_VTOPHYS(kernend);
+
+ vaddr_t kernelvmstart;
+
+ vaddr_t kernstart_mega __unused = MEGAPAGE_TRUNC(kernstart);
+ vaddr_t kernend_mega = MEGAPAGE_ROUND(kernend);
+
+ kernelvmstart = kernend_mega;
+
+#define DPRINTF(v) VPRINTF("%24s = 0x%16lx\n", #v, (unsigned long)v);
+
+ VPRINTF("------------------------------------------\n");
+ DPRINTF(kern_vtopdiff);
+ DPRINTF(memory_start);
+ DPRINTF(memory_end);
+ DPRINTF(memory_size);
+ DPRINTF(kernstart_phys);
+ DPRINTF(kernend_phys)
+ DPRINTF(VM_MIN_KERNEL_ADDRESS);
+ DPRINTF(kernstart_mega);
+ DPRINTF(kernstart);
+ DPRINTF(kernend);
+ DPRINTF(kernend_mega);
+ DPRINTF(VM_MAX_KERNEL_ADDRESS);
+ VPRINTF("------------------------------------------\n");
+
+#undef DPRINTF
+
+ KASSERT(kernelvmstart < VM_KERNEL_VM_BASE);
+
+ kernelvmstart = VM_KERNEL_VM_BASE;
+
+ /*
+ * msgbuf is allocated from the bottom of any one of memory blocks
+ * to avoid corruption due to bootloader or changing kernel layout.
+ */
+ paddr_t msgbufaddr = 0;
+
+ KASSERT(msgbufaddr != 0); /* no space for msgbuf */
+#ifdef _LP64
+ initmsgbuf((void *)RISCV_PA_TO_KVA(msgbufaddr), MSGBUFSIZE);
+#endif
+
+ uvm_md_init();
+
+ pmap_bootstrap(kernelvmstart, VM_MAX_KERNEL_ADDRESS);
+
+ /* Finish setting up lwp0 on our end before we call main() */
+ riscv_init_lwp0_uarea();
}
Added files:
Index: src/sys/arch/riscv/include/machdep.h
diff -u /dev/null src/sys/arch/riscv/include/machdep.h:1.1
--- /dev/null Tue Sep 20 07:18:24 2022
+++ src/sys/arch/riscv/include/machdep.h Tue Sep 20 07:18:23 2022
@@ -0,0 +1,68 @@
+/* $NetBSD: machdep.h,v 1.1 2022/09/20 07:18:23 skrll Exp $ */
+
+/*-
+ * Copyright (c) 2022 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Nick Hudson
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RISCV_MACHDEP_H_
+#define _RISCV_MACHDEP_H_
+
+#include <sys/cdefs.h>
+__KERNEL_RCSID(0, "$NetBSD: machdep.h,v 1.1 2022/09/20 07:18:23 skrll Exp $");
+
+#include <sys/proc.h>
+#include <sys/lwp.h>
+#include <sys/siginfo.h>
+
+static inline paddr_t
+riscv_kern_vtophys(vaddr_t va)
+{
+ extern unsigned long kern_vtopdiff;
+
+ return va - kern_vtopdiff;
+}
+
+static inline vaddr_t
+riscv_kern_phystov(paddr_t pa)
+{
+ extern unsigned long kern_vtopdiff;
+
+ return pa + kern_vtopdiff;
+}
+
+#define KERN_VTOPHYS(va) riscv_kern_vtophys((vaddr_t)va)
+#define KERN_PHYSTOV(pa) riscv_kern_phystov((paddr_t)pa)
+
+
+void uartputc(int);
+
+paddr_t init_mmu(paddr_t);
+void init_riscv(register_t, vaddr_t);
+
+
+#endif /* _RISCV_MACHDEP_H_ */