Module Name:    src
Committed By:   maxv
Date:           Thu Jul 26 17:20:09 UTC 2018

Modified Files:
        src/sys/arch/x86/x86: pmap.c
        src/sys/arch/xen/include: xenpmap.h
        src/sys/arch/xen/include/i386: hypercalls.h
        src/sys/arch/xen/x86: cpu.c x86_xpmap.c xen_pmap.c

Log Message:
Remove the non-PAE-i386 code of Xen. The branches are reordered so that
__x86_64__ comes first, eg:

        #if defined(PAE)
                /* i386+PAE */
        #elif defined(__x86_64__)
                /* amd64 */
        #else
                /* i386 */
        #endif

becomes

        #ifdef __x86_64__
                /* amd64 */
        #else
                /* i386+PAE */
        #endif

Tested on i386pae-domU and amd64-dom0.


To generate a diff of this commit:
cvs rdiff -u -r1.294 -r1.295 src/sys/arch/x86/x86/pmap.c
cvs rdiff -u -r1.39 -r1.40 src/sys/arch/xen/include/xenpmap.h
cvs rdiff -u -r1.15 -r1.16 src/sys/arch/xen/include/i386/hypercalls.h
cvs rdiff -u -r1.123 -r1.124 src/sys/arch/xen/x86/cpu.c
cvs rdiff -u -r1.78 -r1.79 src/sys/arch/xen/x86/x86_xpmap.c
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/xen/x86/xen_pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/x86/x86/pmap.c
diff -u src/sys/arch/x86/x86/pmap.c:1.294 src/sys/arch/x86/x86/pmap.c:1.295
--- src/sys/arch/x86/x86/pmap.c:1.294	Thu Jul 26 08:22:19 2018
+++ src/sys/arch/x86/x86/pmap.c	Thu Jul 26 17:20:08 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.294 2018/07/26 08:22:19 maxv Exp $	*/
+/*	$NetBSD: pmap.c,v 1.295 2018/07/26 17:20:08 maxv Exp $	*/
 
 /*
  * Copyright (c) 2008, 2010, 2016, 2017 The NetBSD Foundation, Inc.
@@ -157,7 +157,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.294 2018/07/26 08:22:19 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.295 2018/07/26 17:20:08 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -4540,7 +4540,7 @@ pmap_alloc_level(struct pmap *cpm, vaddr
 			pte = pmap_pa2pte(pa) | PG_V | PG_RW;
 			pmap_pte_set(&pdep[i], pte);
 
-#if defined(XEN) && (defined(PAE) || defined(__x86_64__))
+#ifdef XEN
 			if (level == PTP_LEVELS && i >= PDIR_SLOT_KERN) {
 				if (__predict_true(
 				    cpu_info_primary.ci_flags & CPUF_PRESENT)) {
@@ -4551,18 +4551,17 @@ pmap_alloc_level(struct pmap *cpm, vaddr
 					 * too early; update primary CPU
 					 * PMD only (without locks)
 					 */
-#ifdef PAE
-					pd_entry_t *cpu_pdep =
-					    &cpu_info_primary.ci_kpm_pdir[l2tol2(i)];
-#endif
 #ifdef __x86_64__
 					pd_entry_t *cpu_pdep =
 						&cpu_info_primary.ci_kpm_pdir[i];
+#else
+					pd_entry_t *cpu_pdep =
+					    &cpu_info_primary.ci_kpm_pdir[l2tol2(i)];
 #endif
 					pmap_pte_set(cpu_pdep, pte);
 				}
 			}
-#endif /* XEN && (PAE || __x86_64__) */
+#endif
 
 			KASSERT(level != PTP_LEVELS || nkptp[level - 1] +
 			    pl_i(VM_MIN_KERNEL_ADDRESS, level) == i);
@@ -4620,7 +4619,7 @@ pmap_growkernel(vaddr_t maxkvaddr)
 		needed_kptp[i] = target_nptp - nkptp[i];
 	}
 
-#if defined(XEN) && (defined(__x86_64__) || defined(PAE))
+#ifdef XEN
 	/* only pmap_kernel(), or the per-cpu map, has kernel entries */
 	cpm = kpm;
 #else
@@ -4643,21 +4642,6 @@ pmap_growkernel(vaddr_t maxkvaddr)
 		/* nothing, kernel entries are never entered in user pmap */
 #else
 		int pdkidx;
-#ifndef PAE
-		/*
-		 * for PAE this is not needed, because pmap_alloc_level()
-		 * already did update the per-CPU tables
-		 */
-		if (cpm != kpm) {
-			for (pdkidx = PDIR_SLOT_KERN + old;
-			    pdkidx < PDIR_SLOT_KERN + nkptp[PTP_LEVELS - 1];
-			    pdkidx++) {
-				pmap_pte_set(&kpm->pm_pdir[pdkidx],
-				    cpm->pm_pdir[pdkidx]);
-			}
-			pmap_pte_flush();
-		}
-#endif /* !PAE */
 
 		mutex_enter(&pmaps_lock);
 		LIST_FOREACH(pm, &pmaps, pm_list) {

Index: src/sys/arch/xen/include/xenpmap.h
diff -u src/sys/arch/xen/include/xenpmap.h:1.39 src/sys/arch/xen/include/xenpmap.h:1.40
--- src/sys/arch/xen/include/xenpmap.h:1.39	Wed Mar  8 18:00:49 2017
+++ src/sys/arch/xen/include/xenpmap.h	Thu Jul 26 17:20:08 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: xenpmap.h,v 1.39 2017/03/08 18:00:49 maxv Exp $	*/
+/*	$NetBSD: xenpmap.h,v 1.40 2018/07/26 17:20:08 maxv Exp $	*/
 
 /*
  *
@@ -61,9 +61,7 @@ void pmap_xen_suspend(void);
 void pmap_map_recursive_entries(void);
 void pmap_unmap_recursive_entries(void);
 
-#if defined(PAE) || defined(__x86_64__)
 void xen_kpm_sync(struct pmap *, int);
-#endif /* PAE || __x86_64__ */
 
 #define xpq_queue_pin_l1_table(pa)	\
 	xpq_queue_pin_table(pa, MMUEXT_PIN_L1_TABLE)
@@ -135,11 +133,7 @@ MULTI_update_va_mapping(
 	mcl->args[2] = flags;
 #else
 	mcl->args[1] = (new_val & 0xffffffff);
-#ifdef PAE
 	mcl->args[2] = (new_val >> 32);
-#else
-	mcl->args[2] = 0;
-#endif
 	mcl->args[3] = flags;
 #endif
 }
@@ -157,11 +151,7 @@ MULTI_update_va_mapping_otherdomain(
 	mcl->args[3] = domid;
 #else
 	mcl->args[1] = (new_val & 0xffffffff);
-#ifdef PAE
 	mcl->args[2] = (new_val >> 32);
-#else
-	mcl->args[2] = 0;
-#endif
 	mcl->args[3] = flags;
 	mcl->args[4] = domid;
 #endif

Index: src/sys/arch/xen/include/i386/hypercalls.h
diff -u src/sys/arch/xen/include/i386/hypercalls.h:1.15 src/sys/arch/xen/include/i386/hypercalls.h:1.16
--- src/sys/arch/xen/include/i386/hypercalls.h:1.15	Wed Jun 27 00:37:09 2012
+++ src/sys/arch/xen/include/i386/hypercalls.h	Thu Jul 26 17:20:08 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: hypercalls.h,v 1.15 2012/06/27 00:37:09 jym Exp $	*/
+/*	$NetBSD: hypercalls.h,v 1.16 2018/07/26 17:20:08 maxv Exp $	*/
 
 /*
  * Copyright (c) 2006 Manuel Bouyer.
@@ -266,11 +266,7 @@ HYPERVISOR_update_va_mapping(unsigned lo
     unsigned long pte_low, pte_hi;
 
     pte_low = new_val & 0xffffffff;
-#ifdef PAE
     pte_hi = new_val >> 32;
-#else
-    pte_hi = 0;
-#endif
 
     _hypercall(__HYPERVISOR_update_va_mapping,
 	_harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags)),
@@ -319,11 +315,7 @@ HYPERVISOR_update_va_mapping_otherdomain
     unsigned long pte_low, pte_hi;
 
     pte_low = new_val & 0xffffffff;
-#ifdef PAE
     pte_hi = new_val >> 32;
-#else
-    pte_hi = 0;
-#endif
 
     _hypercall(__HYPERVISOR_update_va_mapping_otherdomain,
 	_harg("1" (page_nr), "2" (pte_low), "3" (pte_hi), "4" (flags), "5" (domid)),

Index: src/sys/arch/xen/x86/cpu.c
diff -u src/sys/arch/xen/x86/cpu.c:1.123 src/sys/arch/xen/x86/cpu.c:1.124
--- src/sys/arch/xen/x86/cpu.c:1.123	Tue Jul 24 12:24:45 2018
+++ src/sys/arch/xen/x86/cpu.c	Thu Jul 26 17:20:09 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.c,v 1.123 2018/07/24 12:24:45 bouyer Exp $	*/
+/*	$NetBSD: cpu.c,v 1.124 2018/07/26 17:20:09 maxv Exp $	*/
 
 /*-
  * Copyright (c) 2000 The NetBSD Foundation, Inc.
@@ -65,7 +65,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.123 2018/07/24 12:24:45 bouyer Exp $");
+__KERNEL_RCSID(0, "$NetBSD: cpu.c,v 1.124 2018/07/26 17:20:09 maxv Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -423,9 +423,7 @@ cpu_attach_common(device_t parent, devic
 	/* No user PGD mapped for this CPU yet */
 	ci->ci_xen_current_user_pgd = 0;
 #endif
-#if defined(__x86_64__) || defined(PAE)
 	mutex_init(&ci->ci_kpm_mtx, MUTEX_DEFAULT, IPL_VM);
-#endif
 	pmap_reference(pmap_kernel());
 	ci->ci_pmap = pmap_kernel();
 	ci->ci_tlbstate = TLBSTATE_STALE;
@@ -984,11 +982,7 @@ xen_init_i386_vcpuctxt(struct cpu_info *
 	initctx->ctrlreg[0] = pcb->pcb_cr0;
 	initctx->ctrlreg[1] = 0; /* "resuming" from kernel - no User cr3. */
 	initctx->ctrlreg[2] = (vaddr_t)targeteip;
-#ifdef PAE
 	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(ci->ci_pae_l3_pdirpa)));
-#else
-	initctx->ctrlreg[3] = xen_pfn_to_cr3(x86_btop(xpmap_ptom(pcb->pcb_cr3)));
-#endif
 	initctx->ctrlreg[4] = /* CR4_PAE | */CR4_OSFXSR | CR4_OSXMMEXCPT;
 
 	/* Xen callbacks */
@@ -1118,19 +1112,16 @@ x86_cpu_idle_xen(void)
 void
 cpu_load_pmap(struct pmap *pmap, struct pmap *oldpmap)
 {
-	KASSERT(pmap != pmap_kernel());
-
-#if defined(__x86_64__) || defined(PAE)
 	struct cpu_info *ci = curcpu();
 	cpuid_t cid = cpu_index(ci);
 
+	KASSERT(pmap != pmap_kernel());
+
 	mutex_enter(&ci->ci_kpm_mtx);
 	/* make new pmap visible to xen_kpm_sync() */
 	kcpuset_atomic_set(pmap->pm_xen_ptp_cpus, cid);
-#endif
 
 #ifdef i386
-#ifdef PAE
 	{
 		int i;
 		paddr_t l3_pd = xpmap_ptom_masked(ci->ci_pae_l3_pdirpa);
@@ -1141,10 +1132,7 @@ cpu_load_pmap(struct pmap *pmap, struct 
 		}
 		tlbflush();
 	}
-#else /* PAE */
-	lcr3(pmap_pdirpa(pmap, 0));
-#endif /* PAE */
-#endif /* i386 */
+#endif
 
 #ifdef __x86_64__
 	{
@@ -1176,15 +1164,13 @@ cpu_load_pmap(struct pmap *pmap, struct 
 
 		tlbflush();
 	}
-#endif /* __x86_64__ */
+#endif
 
-#if defined(__x86_64__) || defined(PAE)
 	/* old pmap no longer visible to xen_kpm_sync() */
 	if (oldpmap != pmap_kernel()) {
 		kcpuset_atomic_clear(oldpmap->pm_xen_ptp_cpus, cid);
 	}
 	mutex_exit(&ci->ci_kpm_mtx);
-#endif
 }
 
 /*
@@ -1209,27 +1195,26 @@ cpu_load_pmap(struct pmap *pmap, struct 
 void
 pmap_cpu_init_late(struct cpu_info *ci)
 {
-#if defined(PAE) || defined(__x86_64__)
 	/*
 	 * The BP has already its own PD page allocated during early
 	 * MD startup.
 	 */
 
-#if defined(__x86_64__)
+#ifdef __x86_64__
 	/* Setup per-cpu normal_pdes */
 	int i;
 	extern pd_entry_t * const normal_pdes[];
 	for (i = 0;i < PTP_LEVELS - 1;i++) {
 		ci->ci_normal_pdes[i] = normal_pdes[i];
 	}
-#endif /* __x86_64__ */
+#endif
 
 	if (ci == &cpu_info_primary)
 		return;
 
 	KASSERT(ci != NULL);
 
-#if defined(PAE)
+#if defined(i386)
 	cpu_alloc_l3_page(ci);
 	KASSERT(ci->ci_pae_l3_pdirpa != 0);
 
@@ -1239,7 +1224,7 @@ pmap_cpu_init_late(struct cpu_info *ci)
 		ci->ci_pae_l3_pdir[i] =
 		    xpmap_ptom_masked(pmap_kernel()->pm_pdirpa[i]) | PG_V;
 	}
-#endif /* PAE */
+#endif
 
 	ci->ci_kpm_pdir = (pd_entry_t *)uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
 	    UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_NOWAIT);
@@ -1251,7 +1236,7 @@ pmap_cpu_init_late(struct cpu_info *ci)
 	ci->ci_kpm_pdirpa = vtophys((vaddr_t)ci->ci_kpm_pdir);
 	KASSERT(ci->ci_kpm_pdirpa != 0);
 
-#if defined(__x86_64__)
+#ifdef __x86_64__
 	extern pt_entry_t xpmap_pg_nx;
 
 	/* Copy over the pmap_kernel() shadow L4 entries */
@@ -1260,7 +1245,7 @@ pmap_cpu_init_late(struct cpu_info *ci)
 	/* Recursive kernel mapping */
 	ci->ci_kpm_pdir[PDIR_SLOT_PTE] = xpmap_ptom_masked(ci->ci_kpm_pdirpa)
 	    | PG_V | xpmap_pg_nx;
-#elif defined(PAE)
+#else
 	/* Copy over the pmap_kernel() shadow L2 entries */
 	memcpy(ci->ci_kpm_pdir, pmap_kernel()->pm_pdir + PDIR_SLOT_KERN,
 	    nkptp[PTP_LEVELS - 1] * sizeof(pd_entry_t));
@@ -1270,7 +1255,10 @@ pmap_cpu_init_late(struct cpu_info *ci)
 	pmap_protect(pmap_kernel(), (vaddr_t)ci->ci_kpm_pdir,
 	    (vaddr_t)ci->ci_kpm_pdir + PAGE_SIZE, VM_PROT_READ);
 	pmap_update(pmap_kernel());
-#if defined(PAE)
+
+#ifdef __x86_64__
+	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
+#else
 	/*
 	 * Initialize L3 entry 3. This mapping is shared across all pmaps and is
 	 * static, ie: loading a new pmap will not update this entry.
@@ -1283,11 +1271,7 @@ pmap_cpu_init_late(struct cpu_info *ci)
 	pmap_update(pmap_kernel());
 
 	xpq_queue_pin_l3_table(xpmap_ptom_masked(ci->ci_pae_l3_pdirpa));
-
-#elif defined(__x86_64__)
-	xpq_queue_pin_l4_table(xpmap_ptom_masked(ci->ci_kpm_pdirpa));
-#endif /* PAE , __x86_64__ */
-#endif /* defined(PAE) || defined(__x86_64__) */
+#endif
 }
 
 /*

Index: src/sys/arch/xen/x86/x86_xpmap.c
diff -u src/sys/arch/xen/x86/x86_xpmap.c:1.78 src/sys/arch/xen/x86/x86_xpmap.c:1.79
--- src/sys/arch/xen/x86/x86_xpmap.c:1.78	Thu Jul 26 15:46:09 2018
+++ src/sys/arch/xen/x86/x86_xpmap.c	Thu Jul 26 17:20:09 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: x86_xpmap.c,v 1.78 2018/07/26 15:46:09 maxv Exp $	*/
+/*	$NetBSD: x86_xpmap.c,v 1.79 2018/07/26 17:20:09 maxv Exp $	*/
 
 /*
  * Copyright (c) 2017 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.78 2018/07/26 15:46:09 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.79 2018/07/26 17:20:09 maxv Exp $");
 
 #include "opt_xen.h"
 #include "opt_ddb.h"
@@ -447,16 +447,14 @@ xpq_update_foreign(paddr_t ptr, pt_entry
 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
 #endif
 
-#ifdef PAE
+#ifdef __x86_64__
+static const int l2_4_count = PTP_LEVELS;
+#else
 /*
  * For PAE, we consider a single contiguous L2 "superpage" of 4 pages, all of
  * them mapped by the L3 page. We also need a shadow page for L3[3].
  */
 static const int l2_4_count = 6;
-#elif defined(__x86_64__)
-static const int l2_4_count = PTP_LEVELS;
-#else
-static const int l2_4_count = PTP_LEVELS - 1;
 #endif
 
 /*
@@ -698,7 +696,7 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 	addr = ((u_long)pde) - KERNBASE;
 	pdtpe[pl3_pi(KERNTEXTOFF)] =
 	    xpmap_ptom_masked(addr) | PG_V | PG_RW;
-#elif defined(PAE)
+#else
 	pdtpe = bt_pgd;
 
 	/*
@@ -723,9 +721,6 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 	}
 	addr += PAGE_SIZE;
 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_V;
-#else
-	pdtpe = bt_pgd;
-	pde = bt_pgd;
 #endif
 
 	/* Level 1 */
@@ -816,7 +811,17 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 	}
 
 	/* Install recursive page tables mapping */
-#ifdef PAE
+#ifdef __x86_64__
+	/* Recursive entry in pmap_kernel(). */
+	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
+	    | PG_RO | PG_V | xpmap_pg_nx;
+	/* Recursive entry in higher-level per-cpu PD. */
+	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
+	    | PG_RO | PG_V | xpmap_pg_nx;
+
+	/* Mark tables RO */
+	xen_bt_set_readonly((vaddr_t)pde);
+#else
 	/* Copy L2 KERN into L2 KERN SHADOW, and reference the latter in cpu0. */
 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
@@ -844,23 +849,9 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
 	}
-#else
-	/* Recursive entry in pmap_kernel(). */
-	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE)
-	    | PG_RO | PG_V | xpmap_pg_nx;
-#ifdef __x86_64__
-	/* Recursive entry in higher-level per-cpu PD. */
-	bt_cpu_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE)
-	    | PG_RO | PG_V | xpmap_pg_nx;
 #endif
 
-	/* Mark tables RO */
-	xen_bt_set_readonly((vaddr_t)pde);
-#endif
-
-#if defined(__x86_64__) || defined(PAE)
 	xen_bt_set_readonly((vaddr_t)pdtpe);
-#endif
 #ifdef __x86_64__
 	xen_bt_set_readonly(new_pgd);
 #endif
@@ -868,24 +859,26 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 	/* Pin the PGD */
 #ifdef __x86_64__
 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
-#elif PAE
-	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
 #else
-	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
+	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
 #endif
 
 	/* Save phys. addr of PDP, for libkvm. */
-#ifdef PAE
-	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
-#else
+#ifdef __x86_64__
 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
+#else
+	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
 #endif
 
 	/* Switch to new tables */
 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
 
-#ifdef PAE
 	if (final) {
+#ifdef __x86_64__
+		/* Save the address of the real per-cpu L4 page. */
+		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
+		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
+#else
 		/* Save the address of the L3 page */
 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
@@ -896,15 +889,8 @@ xen_bootstrap_tables(vaddr_t old_pgd, va
 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
 		    xpmap_ptom_masked(addr) | PG_V);
 		xpq_flush_queue();
-	}
-#elif defined(__x86_64__)
-	if (final) {
-		/* Save the address of the real per-cpu L4 page. */
-		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
-		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t)bt_cpu_pgd - KERNBASE);
-	}
 #endif
-	__USE(pdtpe);
+	}
 
 	/*
 	 * Now we can safely reclaim the space taken by the old tables.

Index: src/sys/arch/xen/x86/xen_pmap.c
diff -u src/sys/arch/xen/x86/xen_pmap.c:1.26 src/sys/arch/xen/x86/xen_pmap.c:1.27
--- src/sys/arch/xen/x86/xen_pmap.c:1.26	Thu Mar 23 18:08:06 2017
+++ src/sys/arch/xen/x86/xen_pmap.c	Thu Jul 26 17:20:09 2018
@@ -1,4 +1,4 @@
-/*	$NetBSD: xen_pmap.c,v 1.26 2017/03/23 18:08:06 maxv Exp $	*/
+/*	$NetBSD: xen_pmap.c,v 1.27 2018/07/26 17:20:09 maxv Exp $	*/
 
 /*
  * Copyright (c) 2007 Manuel Bouyer.
@@ -101,7 +101,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.26 2017/03/23 18:08:06 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: xen_pmap.c,v 1.27 2018/07/26 17:20:09 maxv Exp $");
 
 #include "opt_user_ldt.h"
 #include "opt_lockdebug.h"
@@ -322,8 +322,6 @@ pmap_unmap_recursive_entries(void)
 	}
 }
 
-#if defined(PAE) || defined(__x86_64__)
-
 static __inline void
 pmap_kpm_setpte(struct cpu_info *ci, struct pmap *pmap, int index)
 {
@@ -332,15 +330,17 @@ pmap_kpm_setpte(struct cpu_info *ci, str
 	if (pmap == pmap_kernel()) {
 		KASSERT(index >= PDIR_SLOT_KERN);
 	}
-#ifdef PAE
+
+#ifdef __x86_64__
 	xpq_queue_pte_update(
-	    xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
+	    xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
 	    pmap->pm_pdir[index]);
-#elif defined(__x86_64__)
+#else
 	xpq_queue_pte_update(
-	    xpmap_ptetomach(&ci->ci_kpm_pdir[index]),
+	    xpmap_ptetomach(&ci->ci_kpm_pdir[l2tol2(index)]),
 	    pmap->pm_pdir[index]);
 #endif
+
 	xpq_flush_queue();
 }
 
@@ -377,5 +377,3 @@ xen_kpm_sync(struct pmap *pmap, int inde
 		mutex_exit(&ci->ci_kpm_mtx);
 	}
 }
-
-#endif /* PAE || __x86_64__ */

Reply via email to