Module Name:    src
Committed By:   maxv
Date:           Fri Jan  5 08:04:21 UTC 2018

Modified Files:
        src/sys/arch/amd64/amd64: gdt.c machdep.c
        src/sys/arch/amd64/include: types.h
        src/sys/arch/x86/include: cpu.h pmap.h
        src/sys/arch/x86/x86: cpu.c pmap.c

Log Message:
Add a __HAVE_PCPU_AREA option, enabled by default on native amd64 but not
Xen.

With this option, the CPU structures that must always be present in the
CPU's page tables are moved on L4 slot 384, which means address
0xffffc00000000000.

A new pcpu_area structure is defined. It contains shared structures (IDT,
LDT), and then an array of pcpu_entry structures, indexed by cpu_index(ci).
Theoretically the LDT should be in the array, but this will be done later.

During the boot procedure, cpu0 calls pmap_init_pcpu, which creates a
page tree that is able to map the pcpu_area structure entirely. cpu0 then
immediately maps the shared structures. Later, every CPU goes through
cpu_pcpuarea_init, which allocates physical pages and kenters the relevant
pcpu_entry to them. Finally, each pointer is replaced to point to pcpuarea.

The point of this change is to make sure that the structures that must
always be present in the page tables have their own L4 slot. Until now
their L4 slot was that of pmap_kernel, and making a distinction between
what must be mapped and what does not need to be was complicated.

Even in the non-speculative-bug case this change makes some sense: there
are several x86 instructions that leak the addresses of the CPU structures,
and putting these structures inside pmap_kernel actually offered a way to
compute the address of the kernel heap - which would have made ASLR on it
plainly useless, had we implemented that.

Note that, for now, pcpuarea does not contain rsp0.

Unfortunately this change adds many #ifdefs, and makes the code harder to
understand. There is also some duplication, but that will be solved later.


To generate a diff of this commit:
cvs rdiff -u -r1.44 -r1.45 src/sys/arch/amd64/amd64/gdt.c
cvs rdiff -u -r1.283 -r1.284 src/sys/arch/amd64/amd64/machdep.c
cvs rdiff -u -r1.52 -r1.53 src/sys/arch/amd64/include/types.h
cvs rdiff -u -r1.86 -r1.87 src/sys/arch/x86/include/cpu.h
cvs rdiff -u -r1.72 -r1.73 src/sys/arch/x86/include/pmap.h
cvs rdiff -u -r1.141 -r1.142 src/sys/arch/x86/x86/cpu.c
cvs rdiff -u -r1.275 -r1.276 src/sys/arch/x86/x86/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/amd64/amd64/gdt.c
diff -u src/sys/arch/amd64/amd64/gdt.c:1.44 src/sys/arch/amd64/amd64/gdt.c:1.45
--- src/sys/arch/amd64/amd64/gdt.c:1.44	Thu Jan  4 20:38:30 2018
+++ src/sys/arch/amd64/amd64/gdt.c	Fri Jan  5 08:04:20 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: gdt.c,v 1.44 2018/01/04 20:38:30 maxv Exp $	*/
+/*	$NetBSD: gdt.c,v 1.45 2018/01/05 08:04:20 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 2009 The NetBSD Foundation, Inc.
@@ -37,7 +37,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.44 2018/01/04 20:38:30 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: gdt.c,v 1.45 2018/01/05 08:04:20 maxv Exp $");
 
 #include "opt_multiprocessor.h"
 #include "opt_xen.h"
@@ -132,8 +132,6 @@ void
 gdt_init(void)
 {
 	char *old_gdt;
-	struct vm_page *pg;
-	vaddr_t va;
 	struct cpu_info *ci = &cpu_info_primary;
 
 	/* Initialize the global values */
@@ -142,6 +140,13 @@ gdt_init(void)
 
 	old_gdt = gdtstore;
 
+#ifdef __HAVE_PCPU_AREA
+	/* The GDT is part of the pcpuarea */
+	gdtstore = (char *)&pcpuarea->ent[cpu_index(ci)].gdt;
+#else
+	struct vm_page *pg;
+	vaddr_t va;
+
 	/* Allocate gdt_size bytes of memory. */
 	gdtstore = (char *)uvm_km_alloc(kernel_map, gdt_size, 0,
 	    UVM_KMF_VAONLY);
@@ -155,6 +160,7 @@ gdt_init(void)
 		    VM_PROT_READ | VM_PROT_WRITE, 0);
 	}
 	pmap_update(pmap_kernel());
+#endif
 
 	/* Copy the initial bootstrap GDT into the new area. */
 	memcpy(gdtstore, old_gdt, DYNSEL_START);
@@ -174,6 +180,9 @@ gdt_init(void)
 void
 gdt_alloc_cpu(struct cpu_info *ci)
 {
+#ifdef __HAVE_PCPU_AREA
+	ci->ci_gdt = (union descriptor *)&pcpuarea->ent[cpu_index(ci)].gdt;
+#else
 	struct vm_page *pg;
 	vaddr_t va;
 
@@ -189,6 +198,7 @@ gdt_alloc_cpu(struct cpu_info *ci)
 		    VM_PROT_READ | VM_PROT_WRITE, 0);
 	}
 	pmap_update(pmap_kernel());
+#endif
 
 	memcpy(ci->ci_gdt, gdtstore, gdt_size);
 }

Index: src/sys/arch/amd64/amd64/machdep.c
diff -u src/sys/arch/amd64/amd64/machdep.c:1.283 src/sys/arch/amd64/amd64/machdep.c:1.284
--- src/sys/arch/amd64/amd64/machdep.c:1.283	Thu Jan  4 13:36:30 2018
+++ src/sys/arch/amd64/amd64/machdep.c	Fri Jan  5 08:04:20 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: machdep.c,v 1.283 2018/01/04 13:36:30 maxv Exp $	*/
+/*	$NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $	*/
 
 /*
  * Copyright (c) 1996, 1997, 1998, 2000, 2006, 2007, 2008, 2011
@@ -110,7 +110,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.283 2018/01/04 13:36:30 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.284 2018/01/05 08:04:20 maxv Exp $");
 
 /* #define XENDEBUG_LOW  */
 
@@ -393,6 +393,9 @@ cpu_startup(void)
 	x86_bus_space_mallocok();
 #endif
 
+#ifdef __HAVE_PCPU_AREA
+	cpu_pcpuarea_init(&cpu_info_primary);
+#endif
 	gdt_init();
 	x86_64_proc0_pcb_ldt_init();
 
@@ -502,21 +505,36 @@ x86_64_proc0_pcb_ldt_init(void)
 void
 cpu_init_tss(struct cpu_info *ci)
 {
+#ifdef __HAVE_PCPU_AREA
+	const cpuid_t cid = cpu_index(ci);
+#endif
 	struct cpu_tss *cputss;
 	uintptr_t p;
 
+#ifdef __HAVE_PCPU_AREA
+	cputss = (struct cpu_tss *)&pcpuarea->ent[cid].tss;
+#else
 	cputss = (struct cpu_tss *)uvm_km_alloc(kernel_map,
 	    sizeof(struct cpu_tss), 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
+#endif
 
 	cputss->tss.tss_iobase = IOMAP_INVALOFF << 16;
 	/* cputss->tss.tss_ist[0] is filled by cpu_intr_init */
 
 	/* double fault */
+#ifdef __HAVE_PCPU_AREA
+	p = (vaddr_t)&pcpuarea->ent[cid].ist1;
+#else
 	p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
+#endif
 	cputss->tss.tss_ist[1] = p + PAGE_SIZE - 16;
 
 	/* NMI */
+#ifdef __HAVE_PCPU_AREA
+	p = (vaddr_t)&pcpuarea->ent[cid].ist2;
+#else
 	p = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED);
+#endif
 	cputss->tss.tss_ist[2] = p + PAGE_SIZE - 16;
 
 	ci->ci_tss = cputss;

Index: src/sys/arch/amd64/include/types.h
diff -u src/sys/arch/amd64/include/types.h:1.52 src/sys/arch/amd64/include/types.h:1.53
--- src/sys/arch/amd64/include/types.h:1.52	Thu Jan 26 15:55:09 2017
+++ src/sys/arch/amd64/include/types.h	Fri Jan  5 08:04:21 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: types.h,v 1.52 2017/01/26 15:55:09 christos Exp $	*/
+/*	$NetBSD: types.h,v 1.53 2018/01/05 08:04:21 maxv Exp $	*/
 
 /*-
  * Copyright (c) 1990 The Regents of the University of California.
@@ -102,6 +102,7 @@ typedef	unsigned char		__cpu_simple_lock
 
 #include "opt_xen.h"
 #if defined(__x86_64__) && !defined(XEN)
+#define	__HAVE_PCPU_AREA 1
 #define	__HAVE_DIRECT_MAP 1
 #define	__HAVE_MM_MD_DIRECT_MAPPED_IO
 #define	__HAVE_MM_MD_DIRECT_MAPPED_PHYS

Index: src/sys/arch/x86/include/cpu.h
diff -u src/sys/arch/x86/include/cpu.h:1.86 src/sys/arch/x86/include/cpu.h:1.87
--- src/sys/arch/x86/include/cpu.h:1.86	Thu Jan  4 13:36:30 2018
+++ src/sys/arch/x86/include/cpu.h	Fri Jan  5 08:04:21 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.86 2018/01/04 13:36:30 maxv Exp $	*/
+/*	$NetBSD: cpu.h,v 1.87 2018/01/05 08:04:21 maxv Exp $	*/
 
 /*
  * Copyright (c) 1990 The Regents of the University of California.
@@ -332,6 +332,8 @@ void cpu_load_pmap(struct pmap *, struct
 void cpu_broadcast_halt(void);
 void cpu_kick(struct cpu_info *);
 
+void cpu_pcpuarea_init(struct cpu_info *);
+
 #define	curcpu()		x86_curcpu()
 #define	curlwp			x86_curlwp()
 #define	curpcb			((struct pcb *)lwp_getpcb(curlwp))

Index: src/sys/arch/x86/include/pmap.h
diff -u src/sys/arch/x86/include/pmap.h:1.72 src/sys/arch/x86/include/pmap.h:1.73
--- src/sys/arch/x86/include/pmap.h:1.72	Thu Dec 28 14:34:39 2017
+++ src/sys/arch/x86/include/pmap.h	Fri Jan  5 08:04:21 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.72 2017/12/28 14:34:39 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.73 2018/01/05 08:04:21 maxv Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -153,6 +153,25 @@ struct bootspace {
 	vaddr_t emodule;
 };
 
+#ifndef MAXGDTSIZ
+#define MAXGDTSIZ 65536 /* XXX */
+#endif
+
+struct pcpu_entry {
+	uint8_t gdt[MAXGDTSIZ];
+	uint8_t tss[PAGE_SIZE];
+	uint8_t ist1[PAGE_SIZE];
+	uint8_t ist2[PAGE_SIZE];
+} __packed;
+
+struct pcpu_area {
+	uint8_t idt[PAGE_SIZE];
+	uint8_t ldt[PAGE_SIZE];
+	struct pcpu_entry ent[MAXCPUS];
+} __packed;
+
+extern struct pcpu_area *pcpuarea;
+
 /*
  * pmap data structures: see pmap.c for details of locking.
  */
@@ -526,6 +545,12 @@ void	pmap_free_ptps(struct vm_page *);
  */
 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
 
+#ifdef __HAVE_PCPU_AREA
+extern struct pcpu_area *pcpuarea;
+#define PDIR_SLOT_PCPU		384
+#define PMAP_PCPU_BASE		(VA_SIGN_NEG((PDIR_SLOT_PCPU * NBPD_L4)))
+#endif
+
 #ifdef __HAVE_DIRECT_MAP
 
 extern vaddr_t pmap_direct_base;

Index: src/sys/arch/x86/x86/cpu.c
diff -u src/sys/arch/x86/x86/cpu.c:1.141 src/sys/arch/x86/x86/cpu.c:1.142
--- src/sys/arch/x86/x86/cpu.c:1.141	Sat Nov 11 11:00:46 2017
+++ src/sys/arch/x86/x86/cpu.c	Fri Jan  5 08:04:21 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.141 2017/11/11 11:00:46 maxv Exp $	*/
+/*	$NetBSD: cpu.c,v 1.142 2018/01/05 08:04:21 maxv Exp $	*/
 
 /*
  * Copyright (c) 2000-2012 NetBSD Foundation, Inc.
@@ -62,7 +62,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.141 2017/11/11 11:00:46 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.142 2018/01/05 08:04:21 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_mpbios.h"		/* for MPDEBUG */
@@ -221,6 +221,36 @@ cpu_match(device_t parent, cfdata_t matc
 	return 1;
 }
 
+#ifdef __HAVE_PCPU_AREA
+void
+cpu_pcpuarea_init(struct cpu_info *ci)
+{
+	struct vm_page *pg;
+	size_t i, npages;
+	vaddr_t base, va;
+	paddr_t pa;
+
+	CTASSERT(sizeof(struct pcpu_entry) % PAGE_SIZE == 0);
+
+	npages = sizeof(struct pcpu_entry) / PAGE_SIZE;
+	base = (vaddr_t)&pcpuarea->ent[cpu_index(ci)];
+
+	for (i = 0; i < npages; i++) {
+		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
+		if (pg == NULL) {
+			panic("failed to allocate pcpu PA");
+		}
+
+		va = base + i * PAGE_SIZE;
+		pa = VM_PAGE_TO_PHYS(pg);
+
+		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, 0);
+	}
+
+	pmap_update(pmap_kernel());
+}
+#endif
+
 static void
 cpu_vm_init(struct cpu_info *ci)
 {
@@ -358,6 +388,9 @@ cpu_attach(device_t parent, device_t sel
 			    "mi_cpu_attach failed with %d\n", error);
 			return;
 		}
+#ifdef __HAVE_PCPU_AREA
+		cpu_pcpuarea_init(ci);
+#endif
 		cpu_init_tss(ci);
 	} else {
 		KASSERT(ci->ci_data.cpu_idlelwp != NULL);

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.275 src/sys/arch/x86/x86/pmap.c:1.276
--- src/sys/arch/x86/x86/pmap.c:1.275	Thu Jan  4 13:36:30 2018
+++ src/sys/arch/x86/x86/pmap.c	Fri Jan  5 08:04:21 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.275 2018/01/04 13:36:30 maxv Exp $	*/
+/*	$NetBSD: pmap.c,v 1.276 2018/01/05 08:04:21 maxv Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -170,7 +170,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.275 2018/01/04 13:36:30 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.276 2018/01/05 08:04:21 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -322,6 +322,8 @@ long nkptp[] = NKPTP_INITIALIZER;
 struct pmap_head pmaps;
 kmutex_t pmaps_lock;
 
+struct pcpu_area *pcpuarea __read_mostly;
+
 static vaddr_t pmap_maxkvaddr;
 
 /*
@@ -550,6 +552,9 @@ extern vaddr_t pentium_idt_vaddr;
  * Local prototypes
  */
 
+#ifdef __HAVE_PCPU_AREA
+static void pmap_init_pcpu(void);
+#endif
 #ifdef __HAVE_DIRECT_MAP
 static void pmap_init_directmap(struct pmap *);
 #endif
@@ -1315,6 +1320,10 @@ pmap_bootstrap(vaddr_t kva_start)
 	pmap_init_lapic();
 #endif /* !XEN */
 
+#ifdef __HAVE_PCPU_AREA
+	pmap_init_pcpu();
+#endif
+
 #ifdef __HAVE_DIRECT_MAP
 	pmap_init_directmap(kpm);
 #else
@@ -1364,13 +1373,21 @@ pmap_bootstrap(vaddr_t kva_start)
 	/*
 	 * Allocate space for the IDT, GDT and LDT.
 	 */
+#ifdef __HAVE_PCPU_AREA
+	idt_vaddr = (vaddr_t)&pcpuarea->idt;
+#else
 	idt_vaddr = pmap_bootstrap_valloc(1);
+#endif
 	idt_paddr = pmap_bootstrap_palloc(1);
 
 	gdt_vaddr = pmap_bootstrap_valloc(1);
 	gdt_paddr = pmap_bootstrap_palloc(1);
 
+#ifdef __HAVE_PCPU_AREA
+	ldt_vaddr = (vaddr_t)&pcpuarea->ldt;
+#else
 	ldt_vaddr = pmap_bootstrap_valloc(1);
+#endif
 	ldt_paddr = pmap_bootstrap_palloc(1);
 
 #if !defined(__x86_64__) && !defined(XEN)
@@ -1426,16 +1443,106 @@ pmap_init_lapic(void)
 }
 #endif
 
-#ifdef __HAVE_DIRECT_MAP
+#if defined(__HAVE_PCPU_AREA) || defined(__HAVE_DIRECT_MAP)
 static size_t
-pmap_dmap_nentries_range(vaddr_t startva, vaddr_t endva, size_t pgsz)
+pmap_pagetree_nentries_range(vaddr_t startva, vaddr_t endva, size_t pgsz)
 {
 	size_t npages;
 	npages = (roundup(endva, pgsz) / pgsz) -
 	    (rounddown(startva, pgsz) / pgsz);
 	return npages;
 }
+#endif
+
+#ifdef __HAVE_PCPU_AREA
+static void
+pmap_init_pcpu(void)
+{
+	const vaddr_t startva = PMAP_PCPU_BASE;
+	size_t nL4e, nL3e, nL2e, nL1e;
+	size_t L4e_idx, L3e_idx, L2e_idx, L1e_idx;
+	paddr_t pa;
+	vaddr_t endva;
+	vaddr_t tmpva;
+	pt_entry_t *pte;
+	size_t size;
+	int i;
+
+	const pd_entry_t pteflags = PG_V | PG_KW | pmap_pg_nx;
 
+	size = sizeof(struct pcpu_area);
+
+	endva = startva + size;
+
+	/* We will use this temporary va. */
+	tmpva = bootspace.spareva;
+	pte = PTE_BASE + pl1_i(tmpva);
+
+	/* Build L4 */
+	L4e_idx = pl4_i(startva);
+	nL4e = pmap_pagetree_nentries_range(startva, endva, NBPD_L4);
+	KASSERT(nL4e  == 1);
+	for (i = 0; i < nL4e; i++) {
+		KASSERT(L4_BASE[L4e_idx+i] == 0);
+
+		pa = pmap_bootstrap_palloc(1);
+		*pte = (pa & PG_FRAME) | pteflags;
+		pmap_update_pg(tmpva);
+		memset((void *)tmpva, 0, PAGE_SIZE);
+
+		L4_BASE[L4e_idx+i] = pa | pteflags | PG_U;
+	}
+
+	/* Build L3 */
+	L3e_idx = pl3_i(startva);
+	nL3e = pmap_pagetree_nentries_range(startva, endva, NBPD_L3);
+	for (i = 0; i < nL3e; i++) {
+		KASSERT(L3_BASE[L3e_idx+i] == 0);
+
+		pa = pmap_bootstrap_palloc(1);
+		*pte = (pa & PG_FRAME) | pteflags;
+		pmap_update_pg(tmpva);
+		memset((void *)tmpva, 0, PAGE_SIZE);
+
+		L3_BASE[L3e_idx+i] = pa | pteflags | PG_U;
+	}
+
+	/* Build L2 */
+	L2e_idx = pl2_i(startva);
+	nL2e = pmap_pagetree_nentries_range(startva, endva, NBPD_L2);
+	for (i = 0; i < nL2e; i++) {
+
+		KASSERT(L2_BASE[L2e_idx+i] == 0);
+
+		pa = pmap_bootstrap_palloc(1);
+		*pte = (pa & PG_FRAME) | pteflags;
+		pmap_update_pg(tmpva);
+		memset((void *)tmpva, 0, PAGE_SIZE);
+
+		L2_BASE[L2e_idx+i] = pa | pteflags | PG_U;
+	}
+
+	/* Build L1 */
+	L1e_idx = pl1_i(startva);
+	nL1e = pmap_pagetree_nentries_range(startva, endva, NBPD_L1);
+	for (i = 0; i < nL1e; i++) {
+		/*
+		 * Nothing to do, the PTEs will be entered via
+		 * pmap_kenter_pa.
+		 */
+		KASSERT(L1_BASE[L1e_idx+i] == 0);
+	}
+
+	*pte = 0;
+	pmap_update_pg(tmpva);
+
+	pcpuarea = (struct pcpu_area *)startva;
+
+	tlbflush();
+}
+#endif
+
+#ifdef __HAVE_DIRECT_MAP
 /*
  * Create the amd64 direct map. Called only once at boot time. We map all of
  * the physical memory contiguously using 2MB large pages, with RW permissions.
@@ -1487,7 +1594,7 @@ pmap_init_directmap(struct pmap *kpm)
 
 	/* Build L4 */
 	L4e_idx = pl4_i(startva);
-	nL4e = pmap_dmap_nentries_range(startva, endva, NBPD_L4);
+	nL4e = pmap_pagetree_nentries_range(startva, endva, NBPD_L4);
 	KASSERT(nL4e <= NL4_SLOT_DIRECT);
 	for (i = 0; i < nL4e; i++) {
 		KASSERT(L4_BASE[L4e_idx+i] == 0);
@@ -1502,7 +1609,7 @@ pmap_init_directmap(struct pmap *kpm)
 
 	/* Build L3 */
 	L3e_idx = pl3_i(startva);
-	nL3e = pmap_dmap_nentries_range(startva, endva, NBPD_L3);
+	nL3e = pmap_pagetree_nentries_range(startva, endva, NBPD_L3);
 	for (i = 0; i < nL3e; i++) {
 		KASSERT(L3_BASE[L3e_idx+i] == 0);
 
@@ -1516,7 +1623,7 @@ pmap_init_directmap(struct pmap *kpm)
 
 	/* Build L2 */
 	L2e_idx = pl2_i(startva);
-	nL2e = pmap_dmap_nentries_range(startva, endva, NBPD_L2);
+	nL2e = pmap_pagetree_nentries_range(startva, endva, NBPD_L2);
 	for (i = 0; i < nL2e; i++) {
 		KASSERT(L2_BASE[L2e_idx+i] == 0);
 
@@ -2231,6 +2338,9 @@ pmap_pdp_ctor(void *arg, void *v, int fl
 		pdir[idx] = PDP_BASE[idx];
 	}
 
+#ifdef __HAVE_PCPU_AREA
+	pdir[PDIR_SLOT_PCPU] = PDP_BASE[PDIR_SLOT_PCPU];
+#endif
 #ifdef __HAVE_DIRECT_MAP
 	memcpy(&pdir[pmap_direct_pdpe], &PDP_BASE[pmap_direct_pdpe],
 	    pmap_direct_npdp * sizeof(pd_entry_t));

Reply via email to