Module Name: src Committed By: matt Date: Thu Jun 23 02:33:44 UTC 2011
Modified Files: src/sys/arch/powerpc/booke: booke_pmap.c src/sys/arch/powerpc/include/booke: pmap.h src/sys/common/pmap/tlb: pmap.c pmap.h pmap_tlb.c Log Message: Move some MD parts back to the booke pmap.c. Cleanup initialization a bit. To generate a diff of this commit: cvs rdiff -u -r1.6 -r1.7 src/sys/arch/powerpc/booke/booke_pmap.c cvs rdiff -u -r1.6 -r1.7 src/sys/arch/powerpc/include/booke/pmap.h cvs rdiff -u -r1.6 -r1.7 src/sys/common/pmap/tlb/pmap.c cvs rdiff -u -r1.5 -r1.6 src/sys/common/pmap/tlb/pmap.h \ src/sys/common/pmap/tlb/pmap_tlb.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/powerpc/booke/booke_pmap.c diff -u src/sys/arch/powerpc/booke/booke_pmap.c:1.6 src/sys/arch/powerpc/booke/booke_pmap.c:1.7 --- src/sys/arch/powerpc/booke/booke_pmap.c:1.6 Mon Jun 20 20:24:28 2011 +++ src/sys/arch/powerpc/booke/booke_pmap.c Thu Jun 23 02:33:44 2011 @@ -37,7 +37,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.6 2011/06/20 20:24:28 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.7 2011/06/23 02:33:44 matt Exp $"); #include <sys/param.h> #include <sys/kcore.h> @@ -308,3 +308,24 @@ && !(VM_MIN_KERNEL_ADDRESS <= va && va < VM_MAX_KERNEL_ADDRESS); } +bool +pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte) +{ + pmap_t pm = ctx; + struct pmap_asid_info * const pai = PMAP_PAI(pm, curcpu()->ci_tlb_info); + + if (asid != pai->pai_asid) + return true; + + const pt_entry_t * const ptep = pmap_pte_lookup(pm, va); + KASSERT(ptep != NULL); + pt_entry_t xpte = *ptep; + xpte &= ~((xpte & (PTE_UNSYNCED|PTE_UNMODIFIED)) << 1); + xpte ^= xpte & (PTE_UNSYNCED|PTE_UNMODIFIED|PTE_WIRED); + + KASSERTMSG(pte == xpte, + ("pm=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#x) != real pte (%#x/%#x)", + pm, va, asid, pte, xpte, *ptep)); + + return true; +} Index: src/sys/arch/powerpc/include/booke/pmap.h diff -u src/sys/arch/powerpc/include/booke/pmap.h:1.6 src/sys/arch/powerpc/include/booke/pmap.h:1.7 --- src/sys/arch/powerpc/include/booke/pmap.h:1.6 Mon Jun 20 20:24:28 2011 +++ src/sys/arch/powerpc/include/booke/pmap.h Thu Jun 23 02:33:44 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.6 2011/06/20 20:24:28 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.7 2011/06/23 02:33:44 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -75,6 +75,7 @@ void pmap_procwr(struct proc *, vaddr_t, size_t); #define PMAP_NEED_PROCWR +#ifdef __PMAP_PRIVATE struct vm_page * pmap_md_alloc_poolpage(int flags); vaddr_t pmap_md_map_poolpage(paddr_t); @@ -84,6 +85,9 @@ vaddr_t pmap_md_direct_map_paddr(paddr_t); void pmap_md_init(void); +bool pmap_md_tlb_check_entry(void *, vaddr_t, tlb_asid_t, pt_entry_t); +#endif + void pmap_md_page_syncicache(struct vm_page *, __cpuset_t); void pmap_bootstrap(vaddr_t, vaddr_t, const phys_ram_seg_t *, size_t); bool pmap_extract(struct pmap *, vaddr_t, paddr_t *); @@ -101,6 +105,7 @@ return (paddr_t) -1; } +#ifdef __PMAP_PRIVATE /* * Virtual Cache Alias helper routines. Not a problem for Booke CPUs. */ @@ -121,6 +126,13 @@ { } +static inline size_t +pmap_md_tlb_asid_max(void) +{ + return PMAP_TLB_NUM_PIDS - 1; +} +#endif + #define POOL_VTOPHYS(va) ((paddr_t)(vaddr_t)(va)) #define POOL_PHYSTOV(pa) ((vaddr_t)(paddr_t)(pa)) Index: src/sys/common/pmap/tlb/pmap.c diff -u src/sys/common/pmap/tlb/pmap.c:1.6 src/sys/common/pmap/tlb/pmap.c:1.7 --- src/sys/common/pmap/tlb/pmap.c:1.6 Mon Jun 20 20:24:29 2011 +++ src/sys/common/pmap/tlb/pmap.c Thu Jun 23 02:33:44 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.6 2011/06/20 20:24:29 matt Exp $ */ +/* $NetBSD: pmap.c,v 1.7 2011/06/23 02:33:44 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.6 2011/06/20 20:24:29 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.7 2011/06/23 02:33:44 matt Exp $"); /* * Manages physical address maps. @@ -591,7 +591,7 @@ PMAP_COUNT(shootdown_ipis); #endif #ifdef DEBUG - pmap_tlb_check(pmap); + pmap_tlb_check(pmap, pmap_md_tlb_check_entry); #endif /* DEBUG */ /* Index: src/sys/common/pmap/tlb/pmap.h diff -u src/sys/common/pmap/tlb/pmap.h:1.5 src/sys/common/pmap/tlb/pmap.h:1.6 --- src/sys/common/pmap/tlb/pmap.h:1.5 Mon Jun 20 20:24:29 2011 +++ src/sys/common/pmap/tlb/pmap.h Thu Jun 23 02:33:44 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.5 2011/06/20 20:24:29 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.6 2011/06/23 02:33:44 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -245,7 +245,7 @@ #define PMAP_TLB_NEED_IPI 0x01 #define PMAP_TLB_INSERT 0x02 void pmap_tlb_invalidate_addr(pmap_t, vaddr_t); -void pmap_tlb_check(pmap_t); +void pmap_tlb_check(pmap_t, bool (*)(void *, vaddr_t, tlb_asid_t, pt_entry_t)); uint16_t pmap_pvlist_lock(struct vm_page_md *, bool); Index: src/sys/common/pmap/tlb/pmap_tlb.c diff -u src/sys/common/pmap/tlb/pmap_tlb.c:1.5 src/sys/common/pmap/tlb/pmap_tlb.c:1.6 --- src/sys/common/pmap/tlb/pmap_tlb.c:1.5 Thu Jun 23 01:27:21 2011 +++ src/sys/common/pmap/tlb/pmap_tlb.c Thu Jun 23 02:33:44 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_tlb.c,v 1.5 2011/06/23 01:27:21 matt Exp $ */ +/* $NetBSD: pmap_tlb.c,v 1.6 2011/06/23 02:33:44 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.5 2011/06/23 01:27:21 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.6 2011/06/23 02:33:44 matt Exp $"); /* * Manages address spaces in a TLB. @@ -134,7 +134,7 @@ #include <uvm/uvm.h> -static kmutex_t pmap_tlb0_mutex __aligned(32); +static kmutex_t pmap_tlb0_mutex __cacheline_aligned; #ifdef MULTIPROCESSOR static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = { [0] = &pmap_tlb_info, @@ -142,12 +142,14 @@ static u_int pmap_ntlbs = 1; #endif +#define IFCONSTANT(x) (__builtin_constant_p((x)) ? (x) : 0) + struct pmap_tlb_info pmap_tlb0_info = { + .ti_name = "tlb0", .ti_asid_hint = KERNEL_PID + 1, #ifdef PMAP_TLB_NUM_PIDS - .ti_asid_mask = PMAP_TLB_NUM_PIDS - 1, - .ti_asid_max = PMAP_TLB_NUM_PIDS - 1, - .ti_asids_free = PMAP_TLB_NUM_PIDS - 1 - KERNEL_PID, + .ti_asid_max = IFCONSTANT(PMAP_TLB_NUM_PIDS - 1), + .ti_asids_free = IFCONSTANT(PMAP_TLB_NUM_PIDS - (KERNEL_PID + 1)), #endif .ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1, #ifdef PMAP_TLB_WIRED_UPAGES @@ -161,6 +163,8 @@ #endif }; +#undef IFCONSTANT + #define __BITMAP_SET(bm, n) \ ((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0])))) #define __BITMAP_CLR(bm, n) \ @@ -231,42 +235,39 @@ pmap_tlb_info_init(struct pmap_tlb_info *ti) { #ifdef MULTIPROCESSOR - if (ti == &pmap_tlb0_info) { - mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED); - return; - } + if (ti != &pmap_tlb0_info) { - KASSERT(pmap_tlbs[pmap_ntlbs] == NULL); + KASSERT(pmap_tlbs[pmap_ntlbs] == NULL); - ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); - ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1; - ti->ti_asid_hint = KERNEL_PID + 1; - ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max; - ti->ti_asid_mask = pmap_tlbs[0]->ti_asid_mask; - ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID; - ti->ti_tlbinvop = TLBINV_NOBODY, - ti->ti_victim = NULL; - ti->ti_cpu_mask = 0; - ti->ti_index = pmap_ntlbs++; - ti->ti_wired = 0; - pmap_tlbs[ti->ti_index] = ti; -#else - KASSERT(ti == &pmap_tlb0_info); - mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED); -#if 0 - if (!CPUISMIPSNN) { - ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1; + ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED); + ti->ti_asid_bitmap[0] = (2 << KERNEL_PID) - 1; + ti->ti_asid_hint = KERNEL_PID + 1; + ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max; + ti->ti_asid_mask = pmap_tlbs[0]->ti_asid_mask; ti->ti_asids_free = ti->ti_asid_max - KERNEL_PID; - ti->ti_asid_mask = ti->ti_asid_max; - /* - * Now figure out what mask we need to focus on asid_max. - */ - while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) { - ti->ti_asid_mask |= ti->ti_asid_mask >> 1; - } + ti->ti_tlbinvop = TLBINV_NOBODY, + ti->ti_victim = NULL; + ti->ti_cpu_mask = 0; + ti->ti_index = pmap_ntlbs++; + ti->ti_wired = 0; + pmap_tlbs[ti->ti_index] = ti; + return; } #endif -#endif /* MULTIPROCESSOR */ + KASSERT(ti == &pmap_tlb0_info); + mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED); + if (ti->ti_asid_max == 0) { + ti->ti_asid_max = pmap_md_tlb_asid_max(); + ti->ti_asids_free = ti->ti_asid_max - (KERNEL_PID + 1); + } + /* + * Now figure out what mask we need to focus on asid_max. + */ + ti->ti_asid_mask = ~0U >> __builtin_clz(ti->ti_asid_max); + + KASSERT(ti->ti_asid_max < sizeof(ti->ti_asid_bitmap)*8); + KASSERT(ti->ti_asid_max <= ti->ti_asid_mask); + KASSERT(((ti->ti_asid_mask + 1) & ti->ti_asid_mask) == 0); } #ifdef MULTIPROCESSOR @@ -781,37 +782,16 @@ TLBINFO_UNLOCK(ti); #endif /* MULTIPROCESSOR */ } -#ifdef DEBUG -static bool -pmap_tlb_check_entry(void *ctx, vaddr_t va, uint32_t asid, uint32_t pte) -{ - pmap_t pm = ctx; - struct pmap_asid_info * const pai = PMAP_PAI(pm, curcpu()->ci_tlb_info); - - if (asid != pai->pai_asid) - return true; - - const pt_entry_t * const ptep = pmap_pte_lookup(pm, va); - KASSERT(ptep != NULL); - pt_entry_t xpte = *ptep; - xpte &= ~((xpte & (PTE_UNSYNCED|PTE_UNMODIFIED)) << 1); - xpte ^= xpte & (PTE_UNSYNCED|PTE_UNMODIFIED|PTE_WIRED); - - KASSERTMSG(pte == xpte, - ("pm=%p va=%#"PRIxVADDR" asid=%u: TLB pte (%#x) != real pte (%#x/%#x)", - pm, va, asid, pte, xpte, *ptep)); - - return true; -} +#ifdef DEBUG void -pmap_tlb_check(pmap_t pm) +pmap_tlb_check(pmap_t pm, bool (*func)(void *, vaddr_t, tlb_asid_t, pt_entry_t)) { struct pmap_tlb_info * const ti = curcpu()->ci_tlb_info; struct pmap_asid_info * const pai = PMAP_PAI(pm, ti); TLBINFO_LOCK(ti); if (pm == pmap_kernel() || pai->pai_asid > KERNEL_PID) - tlb_walk(pm, pmap_tlb_check_entry); + tlb_walk(pm, func); TLBINFO_UNLOCK(ti); } #endif /* DEBUG */