Module Name:    src
Committed By:   thorpej
Date:           Wed Mar 11 13:30:31 UTC 2020

Modified Files:
        src/sys/arch/mips/mips: pmap_machdep.c
        src/sys/arch/powerpc/booke: booke_pmap.c
        src/sys/arch/powerpc/include/booke: pmap.h
        src/sys/arch/riscv/include: pmap.h
        src/sys/arch/riscv/riscv: pmap_machdep.c riscv_machdep.c
        src/sys/uvm/pmap: pmap.c pmap.h

Log Message:
With DEBUG defined, it's possible to execute a TLB-vs-segmap consistency
check from a (soft) interrupt handler.  But if a platform does not otherwise
require the pmap_tlb_miss_lock, then where will be a brief window of
inconsistency that, while harmless, will still fire an assertion in the
consistency check.

Fix this with the following changes:
1- Refactor the pmap_tlb_miss_lock into MI code and rename it from
   pmap_tlb_miss_lock_{enter,exit}() to pmap_tlb_miss_lock_{enter,exit}().
   MD code can still define the "md" hooks as necessary, and if so, will
   override the common implementation.
2- Provde a pmap_bootstrap_common() function to perform common pmap bootstrap
   operations, namely initializing the pmap_tlb_miss_lock if it's needed.
   If MD code overrides the implementation, it's responsible for initializing
   its own lock.
3- Call pmap_bootstrap_common() from the mips, powerpc booke, and riscv
   pmap_bootstrap() routines.  (This required adding one for riscv.)
4- Switch powerpc booke to the common pmap_tlb_miss_lock.
5- Enable pmap_tlb_miss_lock if DEBUG is defined, even if it's not otherwise
   required.

PR port-mips/55062 (Failed assertion in pmap_md_tlb_check_entry())


To generate a diff of this commit:
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/mips/mips/pmap_machdep.c
cvs rdiff -u -r1.26 -r1.27 src/sys/arch/powerpc/booke/booke_pmap.c
cvs rdiff -u -r1.18 -r1.19 src/sys/arch/powerpc/include/booke/pmap.h
cvs rdiff -u -r1.3 -r1.4 src/sys/arch/riscv/include/pmap.h
cvs rdiff -u -r1.4 -r1.5 src/sys/arch/riscv/riscv/pmap_machdep.c
cvs rdiff -u -r1.8 -r1.9 src/sys/arch/riscv/riscv/riscv_machdep.c
cvs rdiff -u -r1.45 -r1.46 src/sys/uvm/pmap/pmap.c
cvs rdiff -u -r1.12 -r1.13 src/sys/uvm/pmap/pmap.h

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/mips/pmap_machdep.c
diff -u src/sys/arch/mips/mips/pmap_machdep.c:1.26 src/sys/arch/mips/mips/pmap_machdep.c:1.27
--- src/sys/arch/mips/mips/pmap_machdep.c:1.26	Sun Oct 20 08:29:38 2019
+++ src/sys/arch/mips/mips/pmap_machdep.c	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_machdep.c,v 1.26 2019/10/20 08:29:38 skrll Exp $	*/
+/*	$NetBSD: pmap_machdep.c,v 1.27 2020/03/11 13:30:31 thorpej Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.26 2019/10/20 08:29:38 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.27 2020/03/11 13:30:31 thorpej Exp $");
 
 /*
  *	Manages physical address maps.
@@ -358,6 +358,9 @@ pmap_bootstrap(void)
 	kcpuset_set(pm->pm_onproc, cpu_number());
 	kcpuset_set(pm->pm_active, cpu_number());
 #endif
+
+	pmap_bootstrap_common();
+
 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
 
 	/*

Index: src/sys/arch/powerpc/booke/booke_pmap.c
diff -u src/sys/arch/powerpc/booke/booke_pmap.c:1.26 src/sys/arch/powerpc/booke/booke_pmap.c:1.27
--- src/sys/arch/powerpc/booke/booke_pmap.c:1.26	Mon Sep  3 16:29:26 2018
+++ src/sys/arch/powerpc/booke/booke_pmap.c	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: booke_pmap.c,v 1.26 2018/09/03 16:29:26 riastradh Exp $	*/
+/*	$NetBSD: booke_pmap.c,v 1.27 2020/03/11 13:30:31 thorpej Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -38,7 +38,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.26 2018/09/03 16:29:26 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.27 2020/03/11 13:30:31 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/kcore.h>
@@ -49,10 +49,6 @@ __KERNEL_RCSID(0, "$NetBSD: booke_pmap.c
 
 #include <machine/pmap.h>
 
-#if defined(MULTIPROCESSOR)
-kmutex_t pmap_tlb_miss_lock;
-#endif
-
 PMAP_COUNTER(zeroed_pages, "pages zeroed");
 PMAP_COUNTER(copied_pages, "pages copied");
 
@@ -154,13 +150,12 @@ pmap_bootstrap(vaddr_t startkernel, vadd
 
 	KASSERT(endkernel == trunc_page(endkernel));
 
+	/* common initialization */
+	pmap_bootstrap_common();
+
 	/* init the lock */
 	pmap_tlb_info_init(&pmap_tlb0_info);
 
-#if defined(MULTIPROCESSOR)
-	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
-#endif
-
 	/*
 	 * Compute the number of pages kmem_arena will have.
 	 */
@@ -422,18 +417,4 @@ pmap_md_tlb_info_attach(struct pmap_tlb_
 {
 	/* nothing */
 }
-
-void
-pmap_md_tlb_miss_lock_enter(void)
-{
-
-	mutex_spin_enter(&pmap_tlb_miss_lock);
-}
-
-void
-pmap_md_tlb_miss_lock_exit(void)
-{
-
-	mutex_spin_exit(&pmap_tlb_miss_lock);
-}
 #endif /* MULTIPROCESSOR */

Index: src/sys/arch/powerpc/include/booke/pmap.h
diff -u src/sys/arch/powerpc/include/booke/pmap.h:1.18 src/sys/arch/powerpc/include/booke/pmap.h:1.19
--- src/sys/arch/powerpc/include/booke/pmap.h:1.18	Thu Apr 19 21:50:07 2018
+++ src/sys/arch/powerpc/include/booke/pmap.h	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.18 2018/04/19 21:50:07 christos Exp $	*/
+/*	$NetBSD: pmap.h,v 1.19 2020/03/11 13:30:31 thorpej Exp $	*/
 /*-
  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
  * All rights reserved.
@@ -94,9 +94,7 @@ void	pmap_md_init(void);
 bool	pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t);
 
 #ifdef MULTIPROCESSOR
-#define	PMAP_MD_NEED_TLB_MISS_LOCK
-void	pmap_md_tlb_miss_lock_enter(void);
-void	pmap_md_tlb_miss_lock_exit(void);
+#define	PMAP_NEED_TLB_MISS_LOCK
 #endif	/* MULTIPROCESSOR */
 
 #ifdef PMAP_MINIMALTLB

Index: src/sys/arch/riscv/include/pmap.h
diff -u src/sys/arch/riscv/include/pmap.h:1.3 src/sys/arch/riscv/include/pmap.h:1.4
--- src/sys/arch/riscv/include/pmap.h:1.3	Sun Jun 16 07:42:52 2019
+++ src/sys/arch/riscv/include/pmap.h	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap.h,v 1.3 2019/06/16 07:42:52 maxv Exp $ */
+/* $NetBSD: pmap.h,v 1.4 2020/03/11 13:30:31 thorpej Exp $ */
 
 /*
  * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
@@ -108,6 +108,8 @@ struct pmap_md {
 	pd_entry_t *md_pdetab;
 };
 
+void	pmap_bootstrap(void);
+
 struct vm_page *
         pmap_md_alloc_poolpage(int flags);
 vaddr_t pmap_md_map_poolpage(paddr_t, vsize_t);

Index: src/sys/arch/riscv/riscv/pmap_machdep.c
diff -u src/sys/arch/riscv/riscv/pmap_machdep.c:1.4 src/sys/arch/riscv/riscv/pmap_machdep.c:1.5
--- src/sys/arch/riscv/riscv/pmap_machdep.c:1.4	Sun Jun 16 07:42:52 2019
+++ src/sys/arch/riscv/riscv/pmap_machdep.c	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/* $NetBSD: pmap_machdep.c,v 1.4 2019/06/16 07:42:52 maxv Exp $ */
+/* $NetBSD: pmap_machdep.c,v 1.5 2020/03/11 13:30:31 thorpej Exp $ */
 
 /*
  * Copyright (c) 2014, 2019 The NetBSD Foundation, Inc.
@@ -33,7 +33,7 @@
 
 #include <sys/cdefs.h>
 
-__RCSID("$NetBSD: pmap_machdep.c,v 1.4 2019/06/16 07:42:52 maxv Exp $");
+__RCSID("$NetBSD: pmap_machdep.c,v 1.5 2020/03/11 13:30:31 thorpej Exp $");
 
 #include <sys/param.h>
 
@@ -47,6 +47,13 @@ vaddr_t pmap_direct_base __read_mostly;
 vaddr_t pmap_direct_end __read_mostly;
 
 void
+pmap_bootstrap(void)
+{
+
+	pmap_bootstrap_common();
+}
+
+void
 pmap_zero_page(paddr_t pa)
 {
 #ifdef PMAP_DIRECT_MAP

Index: src/sys/arch/riscv/riscv/riscv_machdep.c
diff -u src/sys/arch/riscv/riscv/riscv_machdep.c:1.8 src/sys/arch/riscv/riscv/riscv_machdep.c:1.9
--- src/sys/arch/riscv/riscv/riscv_machdep.c:1.8	Tue Dec 31 13:07:12 2019
+++ src/sys/arch/riscv/riscv/riscv_machdep.c	Wed Mar 11 13:30:31 2020
@@ -31,7 +31,7 @@
 
 #include "opt_modular.h"
 
-__RCSID("$NetBSD: riscv_machdep.c,v 1.8 2019/12/31 13:07:12 ad Exp $");
+__RCSID("$NetBSD: riscv_machdep.c,v 1.9 2020/03/11 13:30:31 thorpej Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -333,4 +333,7 @@ cpu_startup(void)
 void
 init_riscv(vaddr_t kernstart, vaddr_t kernend)
 {
+
+	/* Early VM bootstrap. */
+	pmap_bootstrap();
 }

Index: src/sys/uvm/pmap/pmap.c
diff -u src/sys/uvm/pmap/pmap.c:1.45 src/sys/uvm/pmap/pmap.c:1.46
--- src/sys/uvm/pmap/pmap.c:1.45	Wed Dec 18 10:55:50 2019
+++ src/sys/uvm/pmap/pmap.c	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.45 2019/12/18 10:55:50 skrll Exp $	*/
+/*	$NetBSD: pmap.c,v 1.46 2020/03/11 13:30:31 thorpej Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.45 2019/12/18 10:55:50 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.46 2020/03/11 13:30:31 thorpej Exp $");
 
 /*
  *	Manages physical address maps.
@@ -262,10 +262,49 @@ struct pool_allocator pmap_pv_page_alloc
 #define	pmap_pv_alloc()		pool_get(&pmap_pv_pool, PR_NOWAIT)
 #define	pmap_pv_free(pv)	pool_put(&pmap_pv_pool, (pv))
 
-#if !defined(MULTIPROCESSOR) || !defined(PMAP_MD_NEED_TLB_MISS_LOCK)
-#define	pmap_md_tlb_miss_lock_enter()	do { } while(/*CONSTCOND*/0)
-#define	pmap_md_tlb_miss_lock_exit()	do { } while(/*CONSTCOND*/0)
-#endif /* !MULTIPROCESSOR || !PMAP_MD_NEED_TLB_MISS_LOCK */
+#ifndef PMAP_NEED_TLB_MISS_LOCK
+
+#if defined(PMAP_MD_NEED_TLB_MISS_LOCK) || defined(DEBUG)
+#define	PMAP_NEED_TLB_MISS_LOCK
+#endif /* PMAP_MD_NEED_TLB_MISS_LOCK || DEBUG */
+
+#endif /* PMAP_NEED_TLB_MISS_LOCK */
+
+#ifdef PMAP_NEED_TLB_MISS_LOCK
+
+#ifdef PMAP_MD_NEED_TLB_MISS_LOCK
+#define	pmap_tlb_miss_lock_init()	__nothing /* MD code deals with this */
+#define	pmap_tlb_miss_lock_enter()	pmap_md_tlb_miss_lock_enter()
+#define	pmap_tlb_miss_lock_exit()	pmap_md_tlb_miss_lock_exit()
+#else
+static kmutex_t pmap_tlb_miss_lock __cacheline_aligned;
+
+static void
+pmap_tlb_miss_lock_init(void)
+{
+	mutex_init(&pmap_tlb_miss_lock, MUTEX_SPIN, IPL_HIGH);
+}
+
+static inline void
+pmap_tlb_miss_lock_enter(void)
+{
+	mutex_spin_enter(&pmap_tlb_miss_lock);
+}
+
+static inline void
+pmap_tlb_miss_lock_exit(void)
+{
+	mutex_spin_exit(&pmap_tlb_miss_lock);
+}
+#endif /* PMAP_MD_NEED_TLB_MISS_LOCK */
+
+#else
+
+#define	pmap_tlb_miss_lock_init()	__nothing
+#define	pmap_tlb_miss_lock_enter()	__nothing
+#define	pmap_tlb_miss_lock_exit()	__nothing
+
+#endif /* PMAP_NEED_TLB_MISS_LOCK */
 
 #ifndef MULTIPROCESSOR
 kmutex_t pmap_pvlist_mutex	__cacheline_aligned;
@@ -522,6 +561,16 @@ pmap_steal_memory(vsize_t size, vaddr_t 
 }
 
 /*
+ *	Bootstrap the system enough to run with virtual memory.
+ *	(Common routine called by machine-dependent bootstrap code.)
+ */
+void
+pmap_bootstrap_common(void)
+{
+	pmap_tlb_miss_lock_init();
+}
+
+/*
  *	Initialize the pmap module.
  *	Called by vm_init, to initialize any structures that the pmap
  *	system needs to map virtual memory.
@@ -621,10 +670,10 @@ pmap_destroy(pmap_t pmap)
 	PMAP_COUNT(destroy);
 	KASSERT(pmap->pm_count == 0);
 	kpreempt_disable();
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 	pmap_tlb_asid_release_all(pmap);
 	pmap_segtab_destroy(pmap, NULL, 0);
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 
 #ifdef MULTIPROCESSOR
 	kcpuset_destroy(pmap->pm_active);
@@ -670,12 +719,12 @@ pmap_activate(struct lwp *l)
 	PMAP_COUNT(activate);
 
 	kpreempt_disable();
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 	pmap_tlb_asid_acquire(pmap, l);
 	if (l == curlwp) {
 		pmap_segtab_activate(pmap, l);
 	}
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
@@ -772,7 +821,7 @@ pmap_page_remove(struct vm_page *pg)
 			pmap->pm_stats.wired_count--;
 		pmap->pm_stats.resident_count--;
 
-		pmap_md_tlb_miss_lock_enter();
+		pmap_tlb_miss_lock_enter();
 		const pt_entry_t npte = pte_nv_entry(is_kernel_pmap_p);
 		pte_set(ptep, npte);
 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
@@ -781,7 +830,7 @@ pmap_page_remove(struct vm_page *pg)
 			 */
 			pmap_tlb_invalidate_addr(pmap, va);
 		}
-		pmap_md_tlb_miss_lock_exit();
+		pmap_tlb_miss_lock_exit();
 
 		/*
 		 * non-null means this is a non-pvh_first pv, so we should
@@ -820,13 +869,13 @@ pmap_deactivate(struct lwp *l)
 
 	kpreempt_disable();
 	KASSERT(l == curlwp || l->l_cpu == curlwp->l_cpu);
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 	curcpu()->ci_pmap_user_segtab = PMAP_INVALID_SEGTAB_ADDRESS;
 #ifdef _LP64
 	curcpu()->ci_pmap_user_seg0tab = NULL;
 #endif
 	pmap_tlb_asid_deactivate(pmap);
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, " <-- done (%ju:%ju)", l->l_proc->p_pid,
@@ -846,7 +895,7 @@ pmap_update(struct pmap *pmap)
 	if (pending && pmap_tlb_shootdown_bystanders(pmap))
 		PMAP_COUNT(shootdown_ipis);
 #endif
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 #if defined(DEBUG) && !defined(MULTIPROCESSOR)
 	pmap_tlb_check(pmap, pmap_md_tlb_check_entry);
 #endif /* DEBUG */
@@ -860,7 +909,7 @@ pmap_update(struct pmap *pmap)
 		pmap_tlb_asid_acquire(pmap, curlwp);
 		pmap_segtab_activate(pmap, curlwp);
 	}
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	UVMHIST_LOG(pmaphist, " <-- done (kernel=%#jx)",
@@ -905,7 +954,7 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva
 		if (__predict_true(pg != NULL)) {
 			pmap_remove_pv(pmap, sva, pg, pte_modified_p(pte));
 		}
-		pmap_md_tlb_miss_lock_enter();
+		pmap_tlb_miss_lock_enter();
 		pte_set(ptep, npte);
 		if (__predict_true(!(pmap->pm_flags & PMAP_DEFERRED_ACTIVATE))) {
 
@@ -914,7 +963,7 @@ pmap_pte_remove(pmap_t pmap, vaddr_t sva
 			 */
 			pmap_tlb_invalidate_addr(pmap, sva);
 		}
-		pmap_md_tlb_miss_lock_exit();
+		pmap_tlb_miss_lock_exit();
 	}
 
 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
@@ -1061,13 +1110,13 @@ pmap_pte_protect(pmap_t pmap, vaddr_t sv
 		}
 		pte = pte_prot_downgrade(pte, prot);
 		if (*ptep != pte) {
-			pmap_md_tlb_miss_lock_enter();
+			pmap_tlb_miss_lock_enter();
 			pte_set(ptep, pte);
 			/*
 			 * Update the TLB if needed.
 			 */
 			pmap_tlb_update_addr(pmap, sva, pte, PMAP_TLB_NEED_IPI);
-			pmap_md_tlb_miss_lock_exit();
+			pmap_tlb_miss_lock_exit();
 		}
 	}
 
@@ -1143,10 +1192,10 @@ pmap_page_cache(struct vm_page *pg, bool
 		pt_entry_t pte = *ptep;
 		if (pte_valid_p(pte)) {
 			pte = pte_cached_change(pte, cached);
-			pmap_md_tlb_miss_lock_enter();
+			pmap_tlb_miss_lock_enter();
 			pte_set(ptep, pte);
 			pmap_tlb_update_addr(pmap, va, pte, PMAP_TLB_NEED_IPI);
-			pmap_md_tlb_miss_lock_exit();
+			pmap_tlb_miss_lock_exit();
 		}
 	}
 
@@ -1277,10 +1326,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 
 	KASSERT(pte_valid_p(npte));
 
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 	pte_set(ptep, npte);
 	pmap_tlb_update_addr(pmap, va, npte, update_flags);
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	kpreempt_enable();
 
 	if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) {
@@ -1362,10 +1411,10 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, v
 	 * We have the option to force this mapping into the TLB but we
 	 * don't.  Instead let the next reference to the page do it.
 	 */
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 	pte_set(ptep, npte);
 	pmap_tlb_update_addr(pmap_kernel(), va, npte, 0);
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	kpreempt_enable();
 #if DEBUG > 1
 	for (u_int i = 0; i < PAGE_SIZE / sizeof(long); i++) {
@@ -1413,10 +1462,10 @@ pmap_pte_kremove(pmap_t pmap, vaddr_t sv
 		}
 #endif
 
-		pmap_md_tlb_miss_lock_enter();
+		pmap_tlb_miss_lock_enter();
 		pte_set(ptep, new_pte);
 		pmap_tlb_invalidate_addr(pmap, sva);
-		pmap_md_tlb_miss_lock_exit();
+		pmap_tlb_miss_lock_exit();
 	}
 
 	UVMHIST_LOG(pmaphist, " <-- done", 0, 0, 0, 0);
@@ -1453,7 +1502,7 @@ pmap_remove_all(struct pmap *pmap)
 	 * Free all of our ASIDs which means we can skip doing all the
 	 * tlb_invalidate_addrs().
 	 */
-	pmap_md_tlb_miss_lock_enter();
+	pmap_tlb_miss_lock_enter();
 #ifdef MULTIPROCESSOR
 	// This should be the last CPU with this pmap onproc
 	KASSERT(!kcpuset_isotherset(pmap->pm_onproc, cpu_index(curcpu())));
@@ -1464,7 +1513,7 @@ pmap_remove_all(struct pmap *pmap)
 	KASSERT(kcpuset_iszero(pmap->pm_onproc));
 #endif
 	pmap_tlb_asid_release_all(pmap);
-	pmap_md_tlb_miss_lock_exit();
+	pmap_tlb_miss_lock_exit();
 	pmap->pm_flags |= PMAP_DEFERRED_ACTIVATE;
 
 #ifdef PMAP_FAULTINFO
@@ -1506,9 +1555,9 @@ pmap_unwire(pmap_t pmap, vaddr_t va)
 	    pmap, va, pte_value(pte), ptep);
 
 	if (pte_wired_p(pte)) {
-		pmap_md_tlb_miss_lock_enter();
+		pmap_tlb_miss_lock_enter();
 		pte_set(ptep, pte_unwire_entry(pte));
-		pmap_md_tlb_miss_lock_exit();
+		pmap_tlb_miss_lock_exit();
 		pmap->pm_stats.wired_count--;
 	}
 #ifdef DIAGNOSTIC
@@ -1672,10 +1721,10 @@ pmap_clear_modify(struct vm_page *pg)
 		}
 		KASSERT(pte_valid_p(pte));
 		const uintptr_t gen = VM_PAGEMD_PVLIST_UNLOCK(mdpg);
-		pmap_md_tlb_miss_lock_enter();
+		pmap_tlb_miss_lock_enter();
 		pte_set(ptep, pte);
 		pmap_tlb_invalidate_addr(pmap, va);
-		pmap_md_tlb_miss_lock_exit();
+		pmap_tlb_miss_lock_exit();
 		pmap_update(pmap);
 		if (__predict_false(gen != VM_PAGEMD_PVLIST_READLOCK(mdpg))) {
 			/*

Index: src/sys/uvm/pmap/pmap.h
diff -u src/sys/uvm/pmap/pmap.h:1.12 src/sys/uvm/pmap/pmap.h:1.13
--- src/sys/uvm/pmap/pmap.h:1.12	Sat Jun  1 12:42:28 2019
+++ src/sys/uvm/pmap/pmap.h	Wed Mar 11 13:30:31 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.12 2019/06/01 12:42:28 maxv Exp $	*/
+/*	$NetBSD: pmap.h,v 1.13 2020/03/11 13:30:31 thorpej Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -103,6 +103,7 @@ typedef union pmap_segtab {
 struct pmap;
 typedef bool (*pte_callback_t)(struct pmap *, vaddr_t, vaddr_t,
 	pt_entry_t *, uintptr_t);
+void pmap_bootstrap_common(void);
 pt_entry_t *pmap_pte_lookup(struct pmap *, vaddr_t);
 pt_entry_t *pmap_pte_reserve(struct pmap *, vaddr_t, int);
 void pmap_pte_process(struct pmap *, vaddr_t, vaddr_t, pte_callback_t,

Reply via email to