Module Name:    src
Committed By:   matt
Date:           Fri Dec 23 22:31:30 UTC 2011

Modified Files:
        src/sys/arch/mips/conf [matt-nb5-mips64]: files.mips
        src/sys/arch/mips/include [matt-nb5-mips64]: pmap.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: pmap.c pmap_tlb.c
Added Files:
        src/sys/arch/mips/mips [matt-nb5-mips64]: pmap_syncicache.c

Log Message:
Split syncicache functions into separate file: pmap_syncicache.
Support up to 1024 ASIDs.
Always use atomic ops for manipulating pm_shootdown_pending
Nuke PMAP_POOLPAGE_DEBUG
defparam MIPS_PAGE_SHIFT
Track colors of execpages.


To generate a diff of this commit:
cvs rdiff -u -r1.58.24.17 -r1.58.24.18 src/sys/arch/mips/conf/files.mips
cvs rdiff -u -r1.54.26.19 -r1.54.26.20 src/sys/arch/mips/include/pmap.h
cvs rdiff -u -r1.179.16.34 -r1.179.16.35 src/sys/arch/mips/mips/pmap.c
cvs rdiff -u -r0 -r1.1.2.1 src/sys/arch/mips/mips/pmap_syncicache.c
cvs rdiff -u -r1.1.2.20 -r1.1.2.21 src/sys/arch/mips/mips/pmap_tlb.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/conf/files.mips
diff -u src/sys/arch/mips/conf/files.mips:1.58.24.17 src/sys/arch/mips/conf/files.mips:1.58.24.18
--- src/sys/arch/mips/conf/files.mips:1.58.24.17	Fri Dec  2 00:01:37 2011
+++ src/sys/arch/mips/conf/files.mips	Fri Dec 23 22:31:30 2011
@@ -3,8 +3,6 @@
 
 defflag	opt_cputype.h		NOFPU FPEMUL
 				MIPS64_SB1
-				ENABLE_MIPS_16KB_PAGE
-				ENABLE_MIPS_8KB_PAGE
 				ENABLE_MIPS_KSEGX
 				MIPS64_XLP MIPS64_XLR MIPS64_XLS
 					# and the rest...
@@ -15,9 +13,10 @@ defflag	opt_cputype.h		NOFPU FPEMUL
 					# ENABLE_MIPS_TX3900
 					# ENABLE_MIPS_R4700
 					# ENABLE_MIPS_R3NKK
-defflag	opt_mips_cache.h		MIPS3_NO_PV_UNCACHED
-					ENABLE_MIPS4_CACHE_R10K
-defflag opt_mips3_wired.h		ENABLE_MIPS3_WIRED_MAP
+defparam opt_cputype.h		MIPS_PAGE_SHIFT
+defflag	opt_mips_cache.h	MIPS3_NO_PV_UNCACHED
+				ENABLE_MIPS4_CACHE_R10K
+defflag opt_mips3_wired.h	ENABLE_MIPS3_WIRED_MAP
 
 defflag	opt_ddb.h		DDB_TRACE
 defflag	opt_ddb.h		MIPS_DDB_WATCH
@@ -47,6 +46,7 @@ file	arch/mips/mips/kgdb_machdep.c		kgdb
 file	arch/mips/mips/mem.c
 file	arch/mips/mips/pmap.c
 file	arch/mips/mips/pmap_segtab.c
+file	arch/mips/mips/pmap_syncicache.c
 file	arch/mips/mips/pmap_tlb.c
 file	arch/mips/mips/trap.c			# trap handlers
 file	arch/mips/mips/syscall.c		# syscall entries

Index: src/sys/arch/mips/include/pmap.h
diff -u src/sys/arch/mips/include/pmap.h:1.54.26.19 src/sys/arch/mips/include/pmap.h:1.54.26.20
--- src/sys/arch/mips/include/pmap.h:1.54.26.19	Sat Dec  3 01:56:55 2011
+++ src/sys/arch/mips/include/pmap.h	Fri Dec 23 22:31:30 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.54.26.19 2011/12/03 01:56:55 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.54.26.20 2011/12/23 22:31:30 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -201,25 +201,26 @@ struct pmap_tlb_info {
 	uint32_t ti_asid_mask;
 	uint32_t ti_asid_max;
 	LIST_HEAD(, pmap_asid_info) ti_pais; /* list of active ASIDs */
+	uint32_t ti_syncicache_bitmap;	/* page indices needing a syncicache */
+	struct evcnt ti_evcnt_syncicache_asts;
+	struct evcnt ti_evcnt_syncicache_all;
+	struct evcnt ti_evcnt_syncicache_pages;
+	struct evcnt ti_evcnt_syncicache_desired;
+	struct evcnt ti_evcnt_syncicache_duplicate;
 #ifdef MULTIPROCESSOR
 	kmutex_t *ti_hwlock;
 	pmap_t ti_victim;
-	uint32_t ti_synci_page_bitmap;	/* page indices needing a syncicache */
 	uint32_t ti_cpu_mask;		/* bitmask of CPUs sharing this TLB */
 	enum tlb_invalidate_op ti_tlbinvop;
 	u_int ti_index;
 #define tlbinfo_index(ti)	((ti)->ti_index)
-	struct evcnt ti_evcnt_synci_asts;
-	struct evcnt ti_evcnt_synci_all;
-	struct evcnt ti_evcnt_synci_pages;
-	struct evcnt ti_evcnt_synci_deferred;
-	struct evcnt ti_evcnt_synci_desired;
-	struct evcnt ti_evcnt_synci_duplicate;
+	struct evcnt ti_evcnt_syncipage_deferred;
 #else
 #define tlbinfo_index(ti)	(0)
 #endif
 	struct evcnt ti_evcnt_asid_reinits;
-	u_long ti_asid_bitmap[256 / (sizeof(u_long) * 8)];
+	struct evcnt ti_evcnt_asid_reclaims;
+	u_long ti_asid_bitmap[1024 / (sizeof(u_long) * 8)];
 };
 
 #ifdef	_KERNEL
@@ -237,6 +238,8 @@ extern struct pmap_tlb_info pmap_tlb0_in
 extern struct pmap_tlb_info *pmap_tlbs[MAXCPUS];
 extern u_int pmap_ntlbs;
 #endif
+extern u_int pmap_syncipage_page_mask;
+extern u_int pmap_syncipage_map_mask;
 extern paddr_t mips_avail_start;
 extern paddr_t mips_avail_end;
 extern vaddr_t mips_virtual_end;
@@ -261,10 +264,12 @@ void	pmap_procwr(struct proc *, vaddr_t,
 void	pmap_tlb_shootdown_process(void);
 bool	pmap_tlb_shootdown_bystanders(pmap_t pmap);
 void	pmap_tlb_info_attach(struct pmap_tlb_info *, struct cpu_info *);
-void	pmap_tlb_syncicache_ast(struct cpu_info *);
-void	pmap_tlb_syncicache_wanted(struct cpu_info *);
-void	pmap_tlb_syncicache(vaddr_t, uint32_t);
+void	pmap_syncicache_wanted(struct cpu_info *);
+void	pmap_syncicache(uint32_t, uint32_t);
 #endif
+void	pmap_syncicache_page(struct vm_page *, uint32_t);
+void	pmap_syncicache_init(void);
+void	pmap_syncicache_ast(struct cpu_info *);
 void	pmap_tlb_info_init(struct pmap_tlb_info *);
 void	pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *);
 void	pmap_tlb_asid_acquire(pmap_t pmap, struct lwp *l);

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.179.16.34 src/sys/arch/mips/mips/pmap.c:1.179.16.35
--- src/sys/arch/mips/mips/pmap.c:1.179.16.34	Fri Dec 16 23:15:39 2011
+++ src/sys/arch/mips/mips/pmap.c	Fri Dec 23 22:31:30 2011
@@ -279,15 +279,6 @@ vaddr_t mips_virtual_end;	/* VA of last 
 pt_entry_t	*Sysmap;		/* kernel pte table */
 unsigned int	Sysmapsize;		/* number of pte's in Sysmap */
 
-#ifdef PMAP_POOLPAGE_DEBUG
-struct poolpage_info {
-	vaddr_t base;
-	vaddr_t size;
-	vaddr_t hint;
-	pt_entry_t *sysmap;
-} poolpage;
-#endif
-
 static void pmap_pvlist_lock_init(void);
 
 /*
@@ -367,38 +358,6 @@ pmap_set_mdpage_attributes(struct vm_pag
 #endif
 }
 
-static inline void
-pmap_page_syncicache(struct vm_page *pg)
-{
-	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
-#ifdef MULTIPROCESSOR
-	pv_entry_t pv = &md->pvh_first;
-	uint32_t onproc = 0;
-	(void)PG_MD_PVLIST_LOCK(md, false);
-	if (pv->pv_pmap != NULL) {
-		for (; pv != NULL; pv = pv->pv_next) {
-			onproc |= pv->pv_pmap->pm_onproc;
-			if (onproc == cpus_running)
-				break;
-		}
-	}
-	PG_MD_PVLIST_UNLOCK(md);
-	kpreempt_disable();
-	pmap_tlb_syncicache(md->pvh_first.pv_va, onproc);
-	kpreempt_enable();
-#else
-	if (MIPS_HAS_R4K_MMU) {
-		if (PG_MD_CACHED_P(md)) {
-			mips_icache_sync_range_index(
-			    md->pvh_first.pv_va, PAGE_SIZE);
-		}
-	} else {
-		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
-		    PAGE_SIZE);
-	}
-#endif
-}
-
 static vaddr_t
 pmap_map_ephemeral_page(struct vm_page *pg, int prot, pt_entry_t *old_pt_entry_p)
 {
@@ -501,6 +460,8 @@ pmap_bootstrap(void)
 	if (MIPS_CACHE_VIRTUAL_ALIAS && uvmexp.ncolors)
 		pmap_page_colormask = (uvmexp.ncolors - 1) << PAGE_SHIFT;
 
+	KASSERT(uvmexp.ncolors <= 16 - PG_MD_EXECPAGE_SHIFT);
+
 	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
 
 #ifdef ENABLE_MIPS_KSEGX
@@ -547,10 +508,6 @@ pmap_bootstrap(void)
 #ifdef KSEG2IOBUFSIZE
 	Sysmapsize += (KSEG2IOBUFSIZE >> PGSHIFT);
 #endif
-#ifdef PMAP_POOLPAGE_DEBUG
-	poolpage.size = nkmempages + MCLBYTES * nmbclusters;
-	Sysmapsize += poolpage.size;
-#endif
 #ifdef _LP64
 	/*
 	 * If we are using tmpfs, then we might want to use a great deal of
@@ -586,7 +543,11 @@ pmap_bootstrap(void)
 	}
 #endif
  
-	if (mips_virtual_end > VM_MAX_KERNEL_ADDRESS) {
+	if (mips_virtual_end > VM_MAX_KERNEL_ADDRESS
+	   || mips_virtual_end < VM_MIN_KERNEL_ADDRESS) {
+		printf("%s: chaning last kernel VA from %#"PRIxVADDR
+		    " to %#"PRIxVADDR"\n", __func__,
+		    mips_virtual_end, VM_MAX_KERNEL_ADDRESS);
 		mips_virtual_end = VM_MAX_KERNEL_ADDRESS;
 		Sysmapsize =
 		    (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG;
@@ -601,11 +562,6 @@ pmap_bootstrap(void)
 	Sysmap = (pt_entry_t *)
 	    uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize);
 
-#ifdef PMAP_POOLPAGE_DEBUG
-	mips_virtual_end -= poolpage.size;
-	poolpage.base = mips_virtual_end;
-	poolpage.sysmap = Sysmap + atop(poolpage.size);
-#endif
 	/*
 	 * Initialize the pools.
 	 */
@@ -1151,7 +1107,7 @@ pmap_page_protect(struct vm_page *pg, vm
 		 * Do this first so that for each unmapping, pmap_remove_pv
 		 * won't try to sync the icache.
 		 */
-		if (pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE)) {
+		if (pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE_ANY)) {
 			PMAP_COUNT(exec_uncached_page_protect);
 		}
 		(void)PG_MD_PVLIST_LOCK(md, false);
@@ -1190,10 +1146,11 @@ pmap_pte_protect(pmap_t pmap, vaddr_t sv
 			    && MIPS_CACHE_VIRTUAL_ALIAS
 			    && PG_MD_CACHED_P(md))
 				mips_dcache_wbinv_range_index(sva, PAGE_SIZE);
-			if (PG_MD_EXECPAGE_P(md)) {
+			if (PG_MD_EXECPAGE_ANY_P(md)) {
 				KASSERT(md->pvh_first.pv_pmap != NULL);
 				if (PG_MD_CACHED_P(md)) {
-					pmap_page_syncicache(pg);
+					pmap_syncicache_page(pg,
+					    PG_MD_EXECPAGES(md));
 					PMAP_COUNT(exec_synced_protect);
 				}
 			}
@@ -1550,10 +1507,10 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 	    && prot == (VM_PROT_READ | VM_PROT_EXECUTE)) {
 		struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 		PMAP_COUNT(enter_exec_mapping);
-		if (!PG_MD_EXECPAGE_P(md)) {
+		if (!PG_MD_EXECPAGE_P(md, va)) {
 			mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa),
 			    PAGE_SIZE);
-			pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE);
+			pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE(va));
 			PMAP_COUNT(exec_syncicache_entry);
 		}
 	}
@@ -1679,9 +1636,9 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd
 			    va - NBPG, pa);
 #endif
 		PMAP_COUNT(exec_mappings);
-		if (!PG_MD_EXECPAGE_P(md) && PG_MD_CACHED_P(md)) {
-			pmap_page_syncicache(pg);
-			pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE);
+		if (!PG_MD_EXECPAGE_P(md, va) && PG_MD_CACHED_P(md)) {
+			pmap_set_mdpage_attributes(md, PG_MD_EXECPAGE(va));
+			pmap_syncicache_page(pg, PG_MD_EXECPAGE(va));
 			PMAP_COUNT(exec_synced_mappings);
 		}
 	}
@@ -2061,12 +2018,12 @@ pmap_clear_modify(struct vm_page *pg)
 	if (pmapdebug & PDB_FOLLOW)
 		printf("pmap_clear_modify(%#"PRIxPADDR")\n", VM_PAGE_TO_PHYS(pg));
 #endif
-	if (PG_MD_EXECPAGE_P(md)) {
+	if (PG_MD_EXECPAGE_ANY_P(md)) {
 		if (pv->pv_pmap == NULL) {
-			pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE);
+			pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE_ANY);
 			PMAP_COUNT(exec_uncached_clear_modify);
 		} else {
-			pmap_page_syncicache(pg);
+			pmap_syncicache_page(pg, PG_MD_EXECPAGES(md));
 			PMAP_COUNT(exec_synced_clear_modify);
 		}
 	}
@@ -2319,7 +2276,7 @@ again:
 			    pmap, va);
 #endif
 		if (__predict_true(apv == NULL)) {
-#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG) || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(LOCKDEBUG)
 			/*
 			 * To allocate a PV, we have to release the PVLIST lock
 			 * so get the page generation.  We allocate the PV, and
@@ -2330,7 +2287,7 @@ again:
 			apv = (pv_entry_t)pmap_pv_alloc();
 			if (apv == NULL)
 				panic("pmap_enter_pv: pmap_pv_alloc() failed");
-#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG) || defined(LOCKDEBUG)
+#if defined(MULTIPROCESSOR) || !defined(_LP64) || defined(LOCKDEBUG)
 #ifdef MULTIPROCESSOR
 			/*
 			 * If the generation has changed, then someone else
@@ -2439,20 +2396,20 @@ pmap_remove_pv(pmap_t pmap, vaddr_t va, 
 	 */
 	if (npv)
 		pmap_pv_free(npv);
-	if (PG_MD_EXECPAGE_P(md) && dirty) {
+	if (PG_MD_EXECPAGE_P(md, va) && dirty) {
 		if (last) {
 			/*
 			 * If this was the page's last mapping, we no longer
 			 * care about its execness.
 			 */
-			pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE);
+			pmap_clear_mdpage_attributes(md, PG_MD_EXECPAGE_ANY);
 			PMAP_COUNT(exec_uncached_remove);
 		} else {
 			/*
 			 * Someone still has it mapped as an executable page
 			 * so we must sync it.
 			 */
-			pmap_page_syncicache(pg);
+			pmap_syncicache_page(pg, PG_MD_EXECPAGES(md));
 			PMAP_COUNT(exec_synced_remove);
 		}
 	}
@@ -2660,31 +2617,6 @@ mips_pmap_map_poolpage(paddr_t pa)
 	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
 	pmap_set_mdpage_attributes(md, PG_MD_POOLPAGE);
 
-#ifdef PMAP_POOLPAGE_DEBUG
-	KASSERT((poolpage.hint & MIPS_CACHE_ALIAS_MASK) == 0);
-	vaddr_t va_offset = poolpage.hint + mips_cache_indexof(pa);
-	pt_entry_t *pte = poolpage.sysmap + atop(va_offset);
-	const size_t va_inc = MIPS_CACHE_ALIAS_MASK + PAGE_SIZE;
-	const size_t pte_inc = atop(va_inc);
-
-	for (; va_offset < poolpage.size;
-	     va_offset += va_inc, pte += pte_inc) {
-		if (!mips_pg_v(pte->pt_entry))
-			break;
-	}
-	if (va_offset >= poolpage.size) {
-		for (va_offset -= poolpage.size, pte -= atop(poolpage.size);
-		     va_offset < poolpage.hint;
-		     va_offset += va_inc, pte += pte_inc) {
-			if (!mips_pg_v(pte->pt_entry))
-				break;
-		}
-	}
-	KASSERT(!mips_pg_v(pte->pt_entry));
-	va = poolpage.base + va_offset;
-	poolpage.hint = roundup2(va_offset + 1, va_inc);
-	pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PORT_WRITE);
-#else
 #ifdef _LP64
 	KASSERT(mips_options.mips3_xkphys_cached);
 	va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
@@ -2700,8 +2632,7 @@ mips_pmap_map_poolpage(paddr_t pa)
 	else
 		va = MIPS_PHYS_TO_KSEG0(pa);
 #endif
-#endif
-#if !defined(_LP64) || defined(PMAP_POOLPAGE_DEBUG)
+#if !defined(_LP64)
 	if (MIPS_CACHE_VIRTUAL_ALIAS) {
 		/*
 		 * If this page was last mapped with an address that might
@@ -2724,10 +2655,7 @@ paddr_t
 mips_pmap_unmap_poolpage(vaddr_t va)
 {
 	paddr_t pa;
-#ifdef PMAP_POOLPAGE_DEBUG
-	KASSERT(poolpage.base <= va && va < poolpage.base + poolpage.size);
-	pa = mips_tlbpfn_to_paddr(kvtopte(va)->pt_entry);
-#elif defined(_LP64)
+#if defined(_LP64)
 	KASSERT(MIPS_XKPHYS_P(va));
 	pa = MIPS_XKPHYS_TO_PHYS(va);
 #else
@@ -2752,9 +2680,6 @@ mips_pmap_unmap_poolpage(vaddr_t va)
 		mips_dcache_inv_range(va, PAGE_SIZE);
 	}
 #endif
-#ifdef PMAP_POOLPAGE_DEBUG
-	pmap_kremove(va, PAGE_SIZE);
-#endif
 	return pa;
 }
 

Index: src/sys/arch/mips/mips/pmap_tlb.c
diff -u src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.20 src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.21
--- src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.20	Tue Dec  6 17:49:34 2011
+++ src/sys/arch/mips/mips/pmap_tlb.c	Fri Dec 23 22:31:30 2011
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_tlb.c,v 1.1.2.20 2011/12/06 17:49:34 matt Exp $	*/
+/*	$NetBSD: pmap_tlb.c,v 1.1.2.21 2011/12/23 22:31:30 matt Exp $	*/
 
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.20 2011/12/06 17:49:34 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.21 2011/12/23 22:31:30 matt Exp $");
 
 /*
  * Manages address spaces in a TLB.
@@ -170,8 +170,6 @@ struct pmap_tlb_info *pmap_tlbs[MAXCPUS]
 	[0] = &pmap_tlb0_info,
 };
 u_int pmap_ntlbs = 1;
-u_int pmap_tlb_synci_page_mask;
-u_int pmap_tlb_synci_map_mask;
 #endif
 #define	__BITMAP_SET(bm, n) \
 	((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
@@ -223,34 +221,38 @@ pmap_pai_reset(struct pmap_tlb_info *ti,
 void
 pmap_tlb_info_evcnt_attach(struct pmap_tlb_info *ti)
 {
-#ifdef MULTIPROCESSOR
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_desired,
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_desired,
 	    EVCNT_TYPE_MISC, NULL,
 	    ti->ti_name, "icache syncs desired");
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_asts,
-	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_asts,
+	    EVCNT_TYPE_MISC, &ti->ti_evcnt_syncicache_desired,
 	    ti->ti_name, "icache sync asts");
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_all,
-	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_all,
+	    EVCNT_TYPE_MISC, &ti->ti_evcnt_syncicache_asts,
 	    ti->ti_name, "icache full syncs");
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_pages,
-	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_asts,
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_pages,
+	    EVCNT_TYPE_MISC, &ti->ti_evcnt_syncicache_asts,
 	    ti->ti_name, "icache pages synced");
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_duplicate,
-	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_duplicate,
+	    EVCNT_TYPE_MISC, &ti->ti_evcnt_syncicache_desired,
 	    ti->ti_name, "icache dup pages skipped");
-	evcnt_attach_dynamic(&ti->ti_evcnt_synci_deferred,
-	    EVCNT_TYPE_MISC, &ti->ti_evcnt_synci_desired,
+#ifdef MULTIPROCESSOR
+	evcnt_attach_dynamic(&ti->ti_evcnt_syncicache_deferred,
+	    EVCNT_TYPE_MISC, &ti->ti_evcnt_syncicache_desired,
 	    ti->ti_name, "icache pages deferred");
 #endif /* MULTIPROCESSOR */
 	evcnt_attach_dynamic(&ti->ti_evcnt_asid_reinits,
 	    EVCNT_TYPE_MISC, NULL,
 	    ti->ti_name, "asid pool reinit");
+	evcnt_attach_dynamic(&ti->ti_evcnt_asid_reclaims,
+	    EVCNT_TYPE_MISC, NULL,
+	    ti->ti_name, "asid pool reclaims");
 }
 
 void
 pmap_tlb_info_init(struct pmap_tlb_info *ti)
 {
+	const struct mips_options * const opts = &mips_options;
 #ifdef MULTIPROCESSOR
 	if (ti == &pmap_tlb0_info) {
 #endif /* MULTIPROCESSOR */
@@ -259,8 +261,11 @@ pmap_tlb_info_init(struct pmap_tlb_info 
 #ifdef MULTIPROCESSOR
 		mutex_init(ti->ti_hwlock, MUTEX_DEFAULT, IPL_SCHED);
 #endif
-		if (!CPUISMIPSNN || !__builtin_constant_p(MIPS_TLB_NUM_PIDS)) {
-			ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1;
+		KASSERT(opts->mips_num_tlb_asids > 0);
+		if (!CPUISMIPSNN || !__builtin_constant_p(MIPS_TLB_NUM_PIDS)
+		    || ti->ti_asid_max != opts->mips_num_tlb_asids - 1) {
+			KASSERT(ti->ti_asid_max + 1 <= sizeof(ti->ti_asid_bitmap)*8);
+			ti->ti_asid_max = opts->mips_num_tlb_asids - 1;
 			ti->ti_asids_free = ti->ti_asid_max;
 			ti->ti_asid_mask = ti->ti_asid_max;
 			/*
@@ -271,16 +276,7 @@ pmap_tlb_info_init(struct pmap_tlb_info 
 				ti->ti_asid_mask |= ti->ti_asid_mask >> 1;
 			}
 		}
-#ifdef MULTIPROCESSOR
-		const u_int icache_way_pages =
-			mips_cache_info.mci_picache_way_size >> PGSHIFT;
-		KASSERT(icache_way_pages <= 8*sizeof(pmap_tlb_synci_page_mask));
-		pmap_tlb_synci_page_mask = icache_way_pages - 1;
-		pmap_tlb_synci_map_mask = ~(~0 << icache_way_pages);
-		printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
-		    pmap_tlb_synci_page_mask, pmap_tlb_synci_map_mask,
-		    icache_way_pages);
-#endif
+		pmap_syncicache_init();
 		return;
 #ifdef MULTIPROCESSOR
 	}
@@ -372,13 +368,20 @@ pmap_tlb_asid_reinitialize(struct pmap_t
 	ti->ti_asid_bitmap[0] = 1;
 	for (size_t word = 1; word <= asid_bitmap_words; word++)
 		ti->ti_asid_bitmap[word] = 0;
+#ifdef DIAGNOSTIC
+	for (size_t word = asid_bitmap_words + 1;
+	     word < __arraycount(ti->ti_asid_bitmap);
+	     word++) {
+		KASSERT(ti->ti_asid_bitmap[word] == 0);
+	}
+#endif
 
 	switch (op) {
 	case TLBINV_ALL:
 		tlb_invalidate_all();
 		break;
 	case TLBINV_ALLUSER:
-		tlb_invalidate_asids(1, ti->ti_asid_mask);
+		tlb_invalidate_asids(1, ti->ti_asid_max + 1);
 		break;
 	case TLBINV_NOBODY: {
 		/*
@@ -396,11 +399,13 @@ pmap_tlb_asid_reinitialize(struct pmap_t
 		pmap_tlb_asid_check();
 		KASSERT(asids_found == pmap_tlb_asid_count(ti));
 		if (__predict_false(asids_found >= ti->ti_asid_max / 2)) {
-			tlb_invalidate_asids(1, ti->ti_asid_mask);
+			tlb_invalidate_asids(1, ti->ti_asid_max + 1);
+			ti->ti_evcnt_asid_reinits.ev_count++;
 			ti->ti_asid_bitmap[0] = 1;
 			for (size_t word = 1; word <= asid_bitmap_words; word++)
 				ti->ti_asid_bitmap[word] = 0;
 		} else {
+			ti->ti_evcnt_asid_reclaims.ev_count++;
 			ti->ti_asids_free -= asids_found;
 		}
 		break;
@@ -643,7 +648,7 @@ pmap_tlb_update_addr(pmap_t pm, vaddr_t 
 		pmap_tlb_asid_check();
 	}
 #ifdef MULTIPROCESSOR
-	pm->pm_shootdown_pending = need_ipi;
+	atomic_or_uint(&pm->pm_shootdown_pending, need_ipi);
 #endif
 	TLBINFO_UNLOCK(ti);
 
@@ -666,7 +671,7 @@ pmap_tlb_invalidate_addr(pmap_t pm, vadd
 		pmap_tlb_asid_check();
 	}
 #ifdef MULTIPROCESSOR
-	pm->pm_shootdown_pending = 1;
+	(void) atomic_swap_uint(&pm->pm_shootdown_pending, 1);
 #endif
 	TLBINFO_UNLOCK(ti);
 }
@@ -789,9 +794,9 @@ pmap_tlb_asid_acquire(pmap_t pm, struct 
 		 * icache synched, make sure to do that before returning to
 		 * userland.
 		 */
-		if (ti->ti_synci_page_bitmap) {
+		if (ti->ti_syncipage_page_bitmap) {
 			l->l_md.md_astpending = 1; /* force call to ast() */
-			ci->ci_evcnt_synci_activate_rqst.ev_count++;
+			ci->ci_evcnt_syncipage_activate_rqst.ev_count++;
 		}
 		atomic_or_ulong(&ci->ci_flags, CPUF_USERPMAP);
 #endif /* MULTIPROCESSOR */
@@ -879,160 +884,6 @@ pmap_tlb_asid_release_all(struct pmap *p
 #endif /* MULTIPROCESSOR */
 }
 
-#ifdef MULTIPROCESSOR
-void
-pmap_tlb_syncicache_ast(struct cpu_info *ci)
-{
-	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
-
-	kpreempt_disable();
-	uint32_t page_bitmap = atomic_swap_32(&ti->ti_synci_page_bitmap, 0);
-#if 0
-	printf("%s: need to sync %#x\n", __func__, page_bitmap);
-#endif
-	ti->ti_evcnt_synci_asts.ev_count++;
-	/*
-	 * If every bit is set in the bitmap, sync the entire icache.
-	 */
-	if (page_bitmap == pmap_tlb_synci_map_mask) {
-		mips_icache_sync_all();
-		ti->ti_evcnt_synci_all.ev_count++;
-		ti->ti_evcnt_synci_pages.ev_count += pmap_tlb_synci_page_mask+1;
-		kpreempt_enable();
-		return;
-	}
-
-	/*
-	 * Loop through the bitmap clearing each set of indices for each page.
-	 */
-	for (vaddr_t va = 0;
-	     page_bitmap != 0;
-	     page_bitmap >>= 1, va += PAGE_SIZE) {
-		if (page_bitmap & 1) {
-			/*
-			 * Each bit set represents a page to be synced.
-			 */
-			mips_icache_sync_range_index(va, PAGE_SIZE);
-			ti->ti_evcnt_synci_pages.ev_count++;
-		}
-	}
-
-	kpreempt_enable();
-}
-
-void
-pmap_tlb_syncicache(vaddr_t va, uint32_t page_onproc)
-{
-	KASSERT(kpreempt_disabled());
-	/*
-	 * We don't sync the icache here but let ast do it for us just before
-	 * returning to userspace.  We do this because we don't really know
-	 * on which CPU we will return to userspace and if we synch the icache
-	 * now it might not be on the CPU we need it on.  In addition, others
-	 * threads might sync the icache before we get to return to userland
-	 * so there's no reason for us to do it.
-	 *
-	 * Each TLB/cache keeps a synci sequence number which gets advanced
-	 * each that TLB/cache performs a mips_sync_icache_all.  When we
-	 * return to userland, we check the pmap's corresponding synci
-	 * sequence number for that TLB/cache.  If they match, it means that
-	 * no one has yet synched the icache so we much do it ourselves.  If
-	 * they don't match someone has already synced the icache for us.
-	 *
-	 * There is a small chance that the generation numbers will wrap and
-	 * then become equal but that's a one in 4 billion cache and will
-	 * just cause an extra sync of the icache.
-	 */
-	const uint32_t cpu_mask = 1L << cpu_index(curcpu());
-	const uint32_t page_mask =
-	    1L << ((va >> PGSHIFT) & pmap_tlb_synci_page_mask);
-	uint32_t onproc = 0;
-	for (size_t i = 0; i < pmap_ntlbs; i++) {
-		struct pmap_tlb_info * const ti = pmap_tlbs[0];
-		TLBINFO_LOCK(ti);
-		for (;;) {
-			uint32_t old_page_bitmap = ti->ti_synci_page_bitmap;
-			if (old_page_bitmap & page_mask) {
-				ti->ti_evcnt_synci_duplicate.ev_count++;
-				break;
-			}
-
-			uint32_t orig_page_bitmap = atomic_cas_32(
-			    &ti->ti_synci_page_bitmap, old_page_bitmap,
-			    old_page_bitmap | page_mask);
-
-			if (orig_page_bitmap == old_page_bitmap) {
-				if (old_page_bitmap == 0) {
-					onproc |= ti->ti_cpu_mask;
-				} else {
-					ti->ti_evcnt_synci_deferred.ev_count++;
-				}
-				ti->ti_evcnt_synci_desired.ev_count++;
-				break;
-			}
-		}
-#if 0
-		printf("%s: %s: %x to %x on cpus %#x\n", __func__,
-		    ti->ti_name, page_mask, ti->ti_synci_page_bitmap,
-		     onproc & page_onproc & ti->ti_cpu_mask);
-#endif
-		TLBINFO_UNLOCK(ti);
-	}
-	onproc &= page_onproc;
-	if (__predict_false(onproc != 0)) {
-		/*
-		 * If the cpu need to sync this page, tell the current lwp
-		 * to sync the icache before it returns to userspace.
-		 */
-		if (onproc & cpu_mask) {
-			if (curcpu()->ci_flags & CPUF_USERPMAP) {
-				curlwp->l_md.md_astpending = 1;	/* force call to ast() */
-				curcpu()->ci_evcnt_synci_onproc_rqst.ev_count++;
-			} else {
-				curcpu()->ci_evcnt_synci_deferred_rqst.ev_count++;
-			}
-			onproc ^= cpu_mask;
-		}
-
-		/*
-		 * For each cpu that is affect, send an IPI telling
-		 * that CPU that the current thread needs to sync its icache.
-		 * We might cause some spurious icache syncs but that's not
-		 * going to break anything.
-		 */
-		for (u_int n = ffs(onproc);
-		     onproc != 0;
-		     onproc >>= n, onproc <<= n, n = ffs(onproc)) {
-			cpu_send_ipi(cpu_lookup(n-1), IPI_SYNCICACHE);
-		}
-	}
-}
-
-void
-pmap_tlb_syncicache_wanted(struct cpu_info *ci)
-{
-	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
-
-	KASSERT(cpu_intr_p());
-
-	TLBINFO_LOCK(ti);
-
-	/*
-	 * We might have been notified because another CPU changed an exec
-	 * page and now needs us to sync the icache so tell the current lwp
-	 * to do the next time it returns to userland (which should be very
-	 * soon).
-	 */
-	if (ti->ti_synci_page_bitmap && (ci->ci_flags & CPUF_USERPMAP)) {
-		curlwp->l_md.md_astpending = 1;	/* force call to ast() */
-		ci->ci_evcnt_synci_ipi_rqst.ev_count++;
-	}
-
-	TLBINFO_UNLOCK(ti);
-
-}
-#endif /* MULTIPROCESSOR */
-
 void
 pmap_tlb_asid_check(void)
 {

Added files:

Index: src/sys/arch/mips/mips/pmap_syncicache.c
diff -u /dev/null src/sys/arch/mips/mips/pmap_syncicache.c:1.1.2.1
--- /dev/null	Fri Dec 23 22:31:30 2011
+++ src/sys/arch/mips/mips/pmap_syncicache.c	Fri Dec 23 22:31:30 2011
@@ -0,0 +1,290 @@
+/*	$NetBSD: pmap_syncicache.c,v 1.1.2.1 2011/12/23 22:31:30 matt Exp $	*/
+
+/*-
+ * Copyright (c) 2010 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas at 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+__KERNEL_RCSID(0, "$NetBSD: pmap_syncicache.c,v 1.1.2.1 2011/12/23 22:31:30 matt Exp $");
+
+/*
+ *
+ */
+
+#include "opt_multiprocessor.h"
+#include "opt_sysv.h"
+#include "opt_cputype.h"
+#include "opt_mips_cache.h"
+#include "opt_multiprocessor.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/atomic.h>
+#include <sys/kernel.h>			/* for cold */
+#include <sys/cpu.h>
+
+#include <uvm/uvm.h>
+
+#include <mips/cache.h>
+#include <mips/cpuregs.h>
+#include <mips/locore.h>
+#include <mips/pte.h>
+
+u_int pmap_syncicache_page_mask;
+u_int pmap_syncicache_map_mask;
+
+void
+pmap_syncicache_init(void)
+{
+	const u_int icache_way_pages =
+	    mips_cache_info.mci_picache_way_size >> PGSHIFT;
+	KASSERT(icache_way_pages <= 8*sizeof(pmap_syncicache_page_mask));
+	pmap_syncicache_page_mask = icache_way_pages - 1;
+	KASSERT(pmap_syncicache_page_mask == atop(MIPS_ICACHE_ALIAS_MASK));
+	pmap_syncicache_map_mask = ~(~0 << icache_way_pages);
+	KASSERT((pmap_syncicache_map_mask & ~(PG_MD_EXECPAGE_ANY >> PG_MD_EXECPAGE_SHIFT)) == 0);
+	printf("tlb0: synci page mask %#x and map mask %#x used for %u pages\n",
+	    pmap_syncicache_page_mask, pmap_syncicache_map_mask,
+	    icache_way_pages);
+}
+
+void
+pmap_syncicache_page(struct vm_page *pg, uint32_t colors)
+{
+	struct vm_page_md * const md = VM_PAGE_TO_MD(pg);
+#ifdef MULTIPROCESSOR
+	pv_entry_t pv = &md->pvh_first;
+	uint32_t onproc = 0;
+	(void)PG_MD_PVLIST_LOCK(md, false);
+	if (pv->pv_pmap != NULL) {
+		for (; pv != NULL; pv = pv->pv_next) {
+			onproc |= pv->pv_pmap->pm_onproc;
+			if (onproc == cpus_running)
+				break;
+		}
+	}
+	PG_MD_PVLIST_UNLOCK(md);
+	kpreempt_disable();
+	pmap_tlb_syncicache(colors & md->pvh_attrs, onproc);
+	kpreempt_enable();
+#else
+	if (!MIPS_HAS_R4K_MMU) {
+		mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)),
+		    PAGE_SIZE);
+	} else if (PG_MD_CACHED_P(md)) {
+#if 0
+		struct cpu_info * const ci = curcpu();
+		colors >>= PG_MD_EXECPAGE_SHIFT;
+		KASSERT(colors != 0);
+
+		/*
+		 * Record the colors to flushes the next time we return
+		 * to userspace.
+		 */
+		u_int old = ci->ci_icache_badcolors;
+		atomic_or_uint(&ci->ci_icache_badcolors, colors);
+		if ((old & colors) == colors) {
+			ci->ci_evcnt_syncicache_duplicate.ev_count++;
+		} else {
+			ci->ci_evcnt_syncicache_desired.ev_count++;
+		}
+#else
+		colors >>= PG_MD_EXECPAGE_SHIFT;
+		/*
+		 * If not all the colors are in use, just flush the
+		 * ones that are.
+		 */
+		for (vaddr_t va = MIPS_KSEG0_START;
+		     colors != 0;
+		     colors >>= 1, va += PAGE_SIZE) {
+			if (colors & 1) {
+				mips_icache_sync_range_index(va, PAGE_SIZE);
+			}
+		}
+#endif
+	}
+#endif
+}
+
+void
+pmap_syncicache_ast(struct cpu_info *ci)
+{
+	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
+
+	kpreempt_disable();
+	uint32_t page_bitmap = atomic_swap_32(&ti->ti_syncicache_bitmap, 0);
+#if 0
+	printf("%s: need to sync %#x\n", __func__, page_bitmap);
+#endif
+	ti->ti_evcnt_syncicache_asts.ev_count++;
+	/*
+	 * If every bit is set in the bitmap, sync the entire icache.
+	 */
+	if (page_bitmap == pmap_syncicache_map_mask) {
+		mips_icache_sync_all();
+		ti->ti_evcnt_syncicache_all.ev_count++;
+		ti->ti_evcnt_syncicache_pages.ev_count += pmap_syncicache_page_mask+1;
+		kpreempt_enable();
+		return;
+	}
+
+	/*
+	 * Loop through the bitmap clearing each set of indices for each page.
+	 */
+	for (vaddr_t va = 0;
+	     page_bitmap != 0;
+	     page_bitmap >>= 1, va += PAGE_SIZE) {
+		if (page_bitmap & 1) {
+			/*
+			 * Each bit set represents a page to be synced.
+			 */
+			mips_icache_sync_range_index(va, PAGE_SIZE);
+			ti->ti_evcnt_syncicache_pages.ev_count++;
+		}
+	}
+
+	kpreempt_enable();
+}
+
+#ifdef MULTIPROCESSOR
+void
+pmap_syncicache(uint32_t colors, uint32_t page_onproc)
+{
+	KASSERT(kpreempt_disabled());
+	/*
+	 * We don't sync the icache here but let ast do it for us just before
+	 * returning to userspace.  We do this because we don't really know
+	 * on which CPU we will return to userspace and if we synch the icache
+	 * now it might not be on the CPU we need it on.  In addition, others
+	 * threads might sync the icache before we get to return to userland
+	 * so there's no reason for us to do it.
+	 *
+	 * Each TLB/cache keeps a synci sequence number which gets advanced
+	 * each that TLB/cache performs a mips_sync_icache_all.  When we
+	 * return to userland, we check the pmap's corresponding synci
+	 * sequence number for that TLB/cache.  If they match, it means that
+	 * no one has yet synched the icache so we much do it ourselves.  If
+	 * they don't match someone has already synced the icache for us.
+	 *
+	 * There is a small chance that the generation numbers will wrap and
+	 * then become equal but that's a one in 4 billion cache and will
+	 * just cause an extra sync of the icache.
+	 */
+	const uint32_t cpu_mask = 1L << cpu_index(curcpu());
+	const uint32_t page_mask = colors;
+	KASSERT((page_mask & ~pmap_syncicache_map_mask) == 0);
+	uint32_t onproc = 0;
+	for (size_t i = 0; i < pmap_ntlbs; i++) {
+		struct pmap_tlb_info * const ti = pmap_tlbs[0];
+		TLBINFO_LOCK(ti);
+		for (;;) {
+			uint32_t old_page_bitmap = ti->ti_syncicache_bitmap;
+
+			uint32_t orig_page_bitmap = atomic_cas_32(
+			    &ti->ti_syncicache_bitmap, old_page_bitmap,
+			    old_page_bitmap | page_mask);
+
+			if ((orig_page_bitmap & page_mask) == page_mask) {
+				ti->ti_evcnt_syncicache_duplicate.ev_count++;
+				break;
+			}
+
+			if (orig_page_bitmap == old_page_bitmap) {
+				if (old_page_bitmap == 0) {
+					onproc |= ti->ti_cpu_mask;
+				} else {
+					ti->ti_evcnt_syncicache_deferred.ev_count++;
+				}
+				ti->ti_evcnt_syncicache_desired.ev_count++;
+				break;
+			}
+		}
+#if 0
+		printf("%s: %s: %x to %x on cpus %#x\n", __func__,
+		    ti->ti_name, page_mask, ti->ti_syncicache_bitmap,
+		     onproc & page_onproc & ti->ti_cpu_mask);
+#endif
+		TLBINFO_UNLOCK(ti);
+	}
+	onproc &= page_onproc;
+	if (__predict_false(onproc != 0)) {
+		/*
+		 * If the cpu need to sync this page, tell the current lwp
+		 * to sync the icache before it returns to userspace.
+		 */
+		if (onproc & cpu_mask) {
+			if (curcpu()->ci_flags & CPUF_USERPMAP) {
+				curlwp->l_md.md_astpending = 1;	/* force call to ast() */
+				curcpu()->ci_evcnt_syncicache_onproc_rqst.ev_count++;
+			} else {
+				curcpu()->ci_evcnt_syncicache_deferred_rqst.ev_count++;
+			}
+			onproc ^= cpu_mask;
+		}
+
+		/*
+		 * For each cpu that is affect, send an IPI telling
+		 * that CPU that the current thread needs to sync its icache.
+		 * We might cause some spurious icache syncs but that's not
+		 * going to break anything.
+		 */
+		for (u_int n = ffs(onproc);
+		     onproc != 0;
+		     onproc >>= n, onproc <<= n, n = ffs(onproc)) {
+			cpu_send_ipi(cpu_lookup(n-1), IPI_SYNCICACHE);
+		}
+	}
+}
+
+void
+pmap_syncicache_wanted(struct cpu_info *ci)
+{
+	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
+
+	KASSERT(cpu_intr_p());
+
+	TLBINFO_LOCK(ti);
+
+	/*
+	 * We might have been notified because another CPU changed an exec
+	 * page and now needs us to sync the icache so tell the current lwp
+	 * to do the next time it returns to userland (which should be very
+	 * soon).
+	 */
+	if (ti->ti_syncicache_bitmap && (ci->ci_flags & CPUF_USERPMAP)) {
+		curlwp->l_md.md_astpending = 1;	/* force call to ast() */
+		ci->ci_evcnt_syncicache_ipi_rqst.ev_count++;
+	}
+
+	TLBINFO_UNLOCK(ti);
+
+}
+#endif /* MULTIPROCESSOR */

Reply via email to