Module Name:    src
Committed By:   maxv
Date:           Thu Jan 11 09:00:04 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: locore.S machdep.c
        src/sys/arch/amd64/include: frameasm.h types.h

Log Message:
Declare new SVS_* variants: SVS_ENTER_NOSTACK and SVS_LEAVE_NOSTACK. Use
SVS_ENTER_NOSTACK in the syscall entry point, and put it before the code
that touches curlwp. (curlwp is located in the direct map.)

Then, disable __HAVE_CPU_UAREA_ROUTINES (to be removed later). This moves
the kernel stack into pmap_kernel(), and not the direct map. That's a
change I've always wanted to make: because of the direct map we can't add
a redzone on the stack, and basically, a stack overflow can go very far
in memory without being detected (as far as erasing all of the system's
memory).

Finally, unmap the direct map from userland.


To generate a diff of this commit:
cvs rdiff -u -r1.145 -r1.146 src/sys/arch/amd64/amd64/locore.S
cvs rdiff -u -r1.285 -r1.286 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.27 -r1.28 src/sys/arch/amd64/include/frameasm.h
cvs rdiff -u -r1.53 -r1.54 src/sys/arch/amd64/include/types.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/locore.S
diff -u src/sys/arch/amd64/amd64/locore.S:1.145 src/sys/arch/amd64/amd64/locore.S:1.146
--- src/sys/arch/amd64/amd64/locore.S:1.145	Sun Jan  7 16:10:16 2018
+++ src/sys/arch/amd64/amd64/locore.S	Thu Jan 11 09:00:04 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.S,v 1.145 2018/01/07 16:10:16 maxv Exp $	*/
+/*	$NetBSD: locore.S,v 1.146 2018/01/11 09:00:04 maxv Exp $	*/
 
 /*
  * Copyright-o-rama!
@@ -1268,6 +1268,7 @@ IDTVEC(syscall)
 	 * is ignored as well.
 	 */
 	swapgs
+	SVS_ENTER_NOSTACK
 	movq	%r15,CPUVAR(SCRATCH)
 	movq	CPUVAR(CURLWP),%r15
 	movq	L_PCB(%r15),%r15
@@ -1295,7 +1296,6 @@ IDTVEC(syscall)
 	subq	$TF_REGSIZE,%rsp
 	cld
 #endif
-	SVS_ENTER
 	INTR_SAVE_GPRS
 	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_DS(%rsp)
 	movw	$GSEL(GUDATA_SEL, SEL_UPL),TF_ES(%rsp)

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.285 src/sys/arch/amd64/amd64/machdep.c:1.286
--- src/sys/arch/amd64/amd64/machdep.c:1.285	Sun Jan  7 16:10:16 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Thu Jan 11 09:00:04 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.286 2018/01/11 09:00:04 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.285 2018/01/07 16:10:16 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.286 2018/01/11 09:00:04 maxv Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -2339,6 +2339,8 @@ svs_pte_atomic_read(struct pmap *pmap, s
 void
 svs_pdir_switch(struct pmap *pmap)
 {
+	extern size_t pmap_direct_pdpe;
+	extern size_t pmap_direct_npdp;
 	struct cpu_info *ci = curcpu();
 	pt_entry_t pte;
 	size_t i;
@@ -2351,8 +2353,14 @@ svs_pdir_switch(struct pmap *pmap)
 	mutex_enter(&ci->ci_svs_mtx);
 
 	for (i = 0; i < 512; i++) {
-		if (i == PDIR_SLOT_PTE) {
-			/* We don't want to have this mapped. */
+		/*
+		 * This is where we decide what to unmap from the user page
+		 * tables.
+		 */
+		if (pmap_direct_pdpe <= i &&
+		    i < pmap_direct_pdpe + pmap_direct_npdp) {
+			ci->ci_svs_updir[i] = 0;
+		} else if (i == PDIR_SLOT_PTE) {
 			ci->ci_svs_updir[i] = 0;
 		} else {
 			pte = svs_pte_atomic_read(pmap, i);

Index: src/sys/arch/amd64/include/frameasm.h
diff -u src/sys/arch/amd64/include/frameasm.h:1.27 src/sys/arch/amd64/include/frameasm.h:1.28
--- src/sys/arch/amd64/include/frameasm.h:1.27	Sun Jan  7 16:10:16 2018
+++ src/sys/arch/amd64/include/frameasm.h	Thu Jan 11 09:00:04 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: frameasm.h,v 1.27 2018/01/07 16:10:16 maxv Exp $	*/
+/*	$NetBSD: frameasm.h,v 1.28 2018/01/11 09:00:04 maxv Exp $	*/
 
 #ifndef _AMD64_MACHINE_FRAMEASM_H
 #define _AMD64_MACHINE_FRAMEASM_H
@@ -107,9 +107,21 @@
 	movq	CPUVAR(UPDIRPA),%rax	; \
 	movq	%rax,%cr3		; \
 	popq	%rax
+#define SVS_ENTER_NOSTACK \
+	movq	%rax,CPUVAR(SCRATCH)	; \
+	movq	CPUVAR(KPDIRPA),%rax	; \
+	movq	%rax,%cr3		; \
+	movq	CPUVAR(SCRATCH),%rax
+#define SVS_LEAVE_NOSTACK \
+	movq	%rax,CPUVAR(SCRATCH)	; \
+	movq	CPUVAR(UPDIRPA),%rax	; \
+	movq	%rax,%cr3		; \
+	movq	CPUVAR(SCRATCH),%rax
 #else
 #define SVS_ENTER	/* nothing */
 #define SVS_LEAVE	/* nothing */
+#define SVS_ENTER_NOSTACK	/* nothing */
+#define SVS_LEAVE_NOSTACK	/* nothing */
 #endif
 
 #define	INTRENTRY_L(kernel_trap, usertrap) \

Index: src/sys/arch/amd64/include/types.h
diff -u src/sys/arch/amd64/include/types.h:1.53 src/sys/arch/amd64/include/types.h:1.54
--- src/sys/arch/amd64/include/types.h:1.53	Fri Jan  5 08:04:21 2018
+++ src/sys/arch/amd64/include/types.h	Thu Jan 11 09:00:04 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.53 2018/01/05 08:04:21 maxv Exp $	*/
+/*	$NetBSD: types.h,v 1.54 2018/01/11 09:00:04 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -106,7 +106,7 @@ typedef	unsigned char		__cpu_simple_lock
 #define	__HAVE_DIRECT_MAP 1
 #define	__HAVE_MM_MD_DIRECT_MAPPED_IO
 #define	__HAVE_MM_MD_DIRECT_MAPPED_PHYS
-#define	__HAVE_CPU_UAREA_ROUTINES
+/* #define	__HAVE_CPU_UAREA_ROUTINES */
 #if !defined(NO_PCI_MSI_MSIX)
 #define	__HAVE_PCI_MSI_MSIX
 #endif

Reply via email to