Module Name: src Committed By: matt Date: Thu Jan 12 18:50:33 UTC 2012
Modified Files: src/sys/arch/mips/include [matt-nb5-mips64]: cache.h src/sys/arch/mips/mips [matt-nb5-mips64]: cache.c pmap_syncicache.c Log Message: Add an optimization for UP system with non-virtually tagged caches (which are most of them these days). If a page needs to be have an icache_sync performed and the page has a direct map alias (XKPHYS or KSEG0), then don't do an index op; instead do a range op on the XKPHYS or KSEG0 address. This results in unneeded fewer cache line invalidations. To generate a diff of this commit: cvs rdiff -u -r1.9.96.6 -r1.9.96.7 src/sys/arch/mips/include/cache.h cvs rdiff -u -r1.33.96.10 -r1.33.96.11 src/sys/arch/mips/mips/cache.c cvs rdiff -u -r1.1.2.1 -r1.1.2.2 src/sys/arch/mips/mips/pmap_syncicache.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/mips/include/cache.h diff -u src/sys/arch/mips/include/cache.h:1.9.96.6 src/sys/arch/mips/include/cache.h:1.9.96.7 --- src/sys/arch/mips/include/cache.h:1.9.96.6 Tue Dec 27 01:56:32 2011 +++ src/sys/arch/mips/include/cache.h Thu Jan 12 18:50:32 2012 @@ -170,6 +170,7 @@ struct mips_cache_info { u_int mci_picache_ways; u_int mci_picache_way_size; u_int mci_picache_way_mask; + bool mci_picache_vivt; /* virtually indexed and tagged */ u_int mci_pdcache_size; /* and unified */ u_int mci_pdcache_line_size; Index: src/sys/arch/mips/mips/cache.c diff -u src/sys/arch/mips/mips/cache.c:1.33.96.10 src/sys/arch/mips/mips/cache.c:1.33.96.11 --- src/sys/arch/mips/mips/cache.c:1.33.96.10 Tue Dec 27 16:08:17 2011 +++ src/sys/arch/mips/mips/cache.c Thu Jan 12 18:50:33 2012 @@ -965,6 +965,11 @@ mips_config_cache_modern(uint32_t cpu_id mci->mci_picache_ways = MIPSNN_CFG1_IA(cfg1) + 1; /* + * Is this Icache virtually indexed and virtually tagged? + */ + mci->mci_picache_vivt = (cfg & MIPSNN_CFG_VI) != 0; + + /* * Compute the total size and "way mask" for the * primary Dcache. */ Index: src/sys/arch/mips/mips/pmap_syncicache.c diff -u src/sys/arch/mips/mips/pmap_syncicache.c:1.1.2.1 src/sys/arch/mips/mips/pmap_syncicache.c:1.1.2.2 --- src/sys/arch/mips/mips/pmap_syncicache.c:1.1.2.1 Fri Dec 23 22:31:30 2011 +++ src/sys/arch/mips/mips/pmap_syncicache.c Thu Jan 12 18:50:33 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_syncicache.c,v 1.1.2.1 2011/12/23 22:31:30 matt Exp $ */ +/* $NetBSD: pmap_syncicache.c,v 1.1.2.2 2012/01/12 18:50:33 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_syncicache.c,v 1.1.2.1 2011/12/23 22:31:30 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_syncicache.c,v 1.1.2.2 2012/01/12 18:50:33 matt Exp $"); /* * @@ -100,6 +100,26 @@ pmap_syncicache_page(struct vm_page *pg, mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)), PAGE_SIZE); } else if (PG_MD_CACHED_P(md)) { + /* + * The page may not be mapped so we can't use one of its + * virtual addresses. But if the cache is not vivt (meaning + * it's physically tagged), we can use its XKPHYS cached or + * KSEG0 (if it lies within) address to invalid it. + */ + if (__predict_true(!mips_cache_info.mci_picache_vivt)) { + const paddr_t pa = VM_PAGE_TO_PHYS(pg); +#if _LP64 + mips_icache_sync_range(MIPS_PHYS_TO_XKPHYS_CACHED(pa), + PAGE_SIZE); + return; +#else + if (MIPS_KSEG0_P(pa)) { + mips_icache_sync_range(MIPS_PHYS_TO_KSEG0(pa), + PAGE_SIZE); + return; + } +#endif + } #if 0 struct cpu_info * const ci = curcpu(); colors >>= PG_MD_EXECPAGE_SHIFT;