Module Name: src Committed By: matt Date: Mon Jun 20 20:24:29 UTC 2011
Modified Files: src/sys/arch/powerpc/booke: booke_pmap.c trap.c src/sys/arch/powerpc/include: intr.h pmap.h vmparam.h src/sys/arch/powerpc/include/booke: pmap.h pte.h vmparam.h src/sys/arch/powerpc/include/ibm4xx: pmap.h vmparam.h src/sys/arch/powerpc/include/oea: pmap.h vmparam.h src/sys/common/pmap/tlb: pmap.c pmap.h Log Message: PowerPC now exports a common view of cpu.h, vmparam.h and pmap.h when building a MODULAR kernel or compiling _MODULE. It should be noted that MODULAR or _MODULE export a view of the kernel as being MULTIPROCESSOR (even if isn't). The shared pmap TLB uses mdpg in places where it used mdpg to avoid deadly embrance inclusion problems. To generate a diff of this commit: cvs rdiff -u -r1.5 -r1.6 src/sys/arch/powerpc/booke/booke_pmap.c cvs rdiff -u -r1.10 -r1.11 src/sys/arch/powerpc/booke/trap.c cvs rdiff -u -r1.8 -r1.9 src/sys/arch/powerpc/include/intr.h cvs rdiff -u -r1.36 -r1.37 src/sys/arch/powerpc/include/pmap.h cvs rdiff -u -r1.14 -r1.15 src/sys/arch/powerpc/include/vmparam.h cvs rdiff -u -r1.5 -r1.6 src/sys/arch/powerpc/include/booke/pmap.h cvs rdiff -u -r1.2 -r1.3 src/sys/arch/powerpc/include/booke/pte.h cvs rdiff -u -r1.4 -r1.5 src/sys/arch/powerpc/include/booke/vmparam.h cvs rdiff -u -r1.15 -r1.16 src/sys/arch/powerpc/include/ibm4xx/pmap.h cvs rdiff -u -r1.8 -r1.9 src/sys/arch/powerpc/include/ibm4xx/vmparam.h cvs rdiff -u -r1.23 -r1.24 src/sys/arch/powerpc/include/oea/pmap.h cvs rdiff -u -r1.16 -r1.17 src/sys/arch/powerpc/include/oea/vmparam.h cvs rdiff -u -r1.5 -r1.6 src/sys/common/pmap/tlb/pmap.c cvs rdiff -u -r1.4 -r1.5 src/sys/common/pmap/tlb/pmap.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/powerpc/booke/booke_pmap.c diff -u src/sys/arch/powerpc/booke/booke_pmap.c:1.5 src/sys/arch/powerpc/booke/booke_pmap.c:1.6 --- src/sys/arch/powerpc/booke/booke_pmap.c:1.5 Sun Jun 12 05:32:38 2011 +++ src/sys/arch/powerpc/booke/booke_pmap.c Mon Jun 20 20:24:28 2011 @@ -37,13 +37,13 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.5 2011/06/12 05:32:38 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: booke_pmap.c,v 1.6 2011/06/20 20:24:28 matt Exp $"); #include <sys/param.h> #include <sys/kcore.h> #include <sys/buf.h> -#include <uvm/uvm_extern.h> +#include <uvm/uvm.h> #include <machine/pmap.h> @@ -265,7 +265,7 @@ { dcache_zero_page(pa); - KASSERT(!VM_PAGE_MD_EXECPAGE_P(PHYS_TO_VM_PAGE(pa))); + KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(pa)))); } void @@ -291,7 +291,7 @@ } } - KASSERT(!VM_PAGE_MD_EXECPAGE_P(PHYS_TO_VM_PAGE(dst - PAGE_SIZE))); + KASSERT(!VM_PAGEMD_EXECPAGE_P(VM_PAGE_TO_MD(PHYS_TO_VM_PAGE(dst - PAGE_SIZE)))); } void Index: src/sys/arch/powerpc/booke/trap.c diff -u src/sys/arch/powerpc/booke/trap.c:1.10 src/sys/arch/powerpc/booke/trap.c:1.11 --- src/sys/arch/powerpc/booke/trap.c:1.10 Tue Jun 14 05:50:24 2011 +++ src/sys/arch/powerpc/booke/trap.c Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: trap.c,v 1.10 2011/06/14 05:50:24 matt Exp $ */ +/* $NetBSD: trap.c,v 1.11 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -39,7 +39,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.10 2011/06/14 05:50:24 matt Exp $"); +__KERNEL_RCSID(1, "$NetBSD: trap.c,v 1.11 2011/06/20 20:24:28 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -233,9 +233,10 @@ const paddr_t pa = pte_to_paddr(pte); struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); - if (!VM_PAGE_MD_MODIFIED_P(pg)) { - pmap_page_set_attributes(pg, VM_PAGE_MD_MODIFIED); + if (!VM_PAGEMD_MODIFIED_P(mdpg)) { + pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED); } pte &= ~PTE_UNMODIFIED; *ptep = pte; @@ -294,19 +295,20 @@ const paddr_t pa = pte_to_paddr(pte); struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); UVMHIST_LOG(pmapexechist, "srr0=%#x pg=%p (pa %#"PRIxPADDR"): %s", tf->tf_srr0, pg, pa, - (VM_PAGE_MD_EXECPAGE_P(pg) + (VM_PAGEMD_EXECPAGE_P(mdpg) ? "no syncicache (already execpage)" : "performed syncicache (now execpage)")); - if (!VM_PAGE_MD_EXECPAGE_P(pg)) { + if (!VM_PAGEMD_EXECPAGE_P(mdpg)) { ci->ci_softc->cpu_ev_exec_trap_sync.ev_count++; dcache_wb_page(pa); icache_inv_page(pa); - pmap_page_set_attributes(pg, VM_PAGE_MD_EXECPAGE); + pmap_page_set_attributes(mdpg, VM_PAGEMD_EXECPAGE); } pte &= ~PTE_UNSYNCED; pte |= PTE_xX; Index: src/sys/arch/powerpc/include/intr.h diff -u src/sys/arch/powerpc/include/intr.h:1.8 src/sys/arch/powerpc/include/intr.h:1.9 --- src/sys/arch/powerpc/include/intr.h:1.8 Fri Jun 17 23:36:17 2011 +++ src/sys/arch/powerpc/include/intr.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: intr.h,v 1.8 2011/06/17 23:36:17 matt Exp $ */ +/* $NetBSD: intr.h,v 1.9 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (c) 2007 Michael Lorenz @@ -28,7 +28,7 @@ #ifndef _LOCORE #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: intr.h,v 1.8 2011/06/17 23:36:17 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: intr.h,v 1.9 2011/06/20 20:24:28 matt Exp $"); #endif #ifndef POWERPC_INTR_MACHDEP_H @@ -36,12 +36,6 @@ #define __HAVE_FAST_SOFTINTS 1 -#ifndef _LOCORE -void *intr_establish(int, int, int, int (*)(void *), void *); -void intr_disestablish(void *); -const char *intr_typename(int); -void genppc_cpu_configure(void); -#endif /* Interrupt priority `levels'. */ #define IPL_NONE 0 /* nothing */ @@ -60,7 +54,20 @@ #define IST_EDGE 2 /* edge-triggered */ #define IST_LEVEL 3 /* level-triggered */ -#ifndef _LOCORE +#if !defined(_LOCORE) +void * intr_establish(int, int, int, int (*)(void *), void *); +void intr_disestablish(void *); +const char * + intr_typename(int); + +int splraise(int); +int spllower(int); +void splx(int); + +#if !defined(_MODULE) + +void genppc_cpu_configure(void); + /* * Interrupt handler chains. intr_establish() inserts a handler into * the list. The handler is called with its (single) argument. @@ -73,13 +80,9 @@ int ih_virq; }; -int splraise(int); -int spllower(int); -void splx(int); - void softint_fast_dispatch(struct lwp *, int); -#define softint_init_md powerpc_softint_init_md +#define softint_init_md powerpc_softint_init_md #define softint_trigger powerpc_softint_trigger #ifdef __IMASK_T @@ -101,6 +104,8 @@ #define PIC_VIRQ_TO_MASK(v) __BIT(HWIRQ_MAX - (v)) #define PIC_VIRQ_MS_PENDING(p) __builtin_clz(p) +#endif /* !_MODULE */ + #define spl0() spllower(0) typedef int ipl_t; Index: src/sys/arch/powerpc/include/pmap.h diff -u src/sys/arch/powerpc/include/pmap.h:1.36 src/sys/arch/powerpc/include/pmap.h:1.37 --- src/sys/arch/powerpc/include/pmap.h:1.36 Mon Jun 20 08:07:03 2011 +++ src/sys/arch/powerpc/include/pmap.h Mon Jun 20 20:24:28 2011 @@ -1,15 +1,43 @@ -/* $NetBSD: pmap.h,v 1.36 2011/06/20 08:07:03 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.37 2011/06/20 20:24:28 matt Exp $ */ + +#ifndef _POWERPC_PMAP_H_ +#define _POWERPC_PMAP_H_ #ifdef _KERNEL_OPT #include "opt_ppcarch.h" +#include "opt_modular.h" #endif -#ifdef PPC_IBM4XX -#include <powerpc/ibm4xx/pmap.h> -#elif defined(PPC_BOOKE) +#if !defined(_MODULE) + +#if defined(PPC_BOOKE) #include <powerpc/booke/pmap.h> +#elif defined(PPC_IBM4XX) +#include <powerpc/ibm4xx/pmap.h> #elif defined(PPC_OEA) || defined (PPC_OEA64) || defined (PPC_OEA64_BRIDGE) #include <powerpc/oea/pmap.h> #else #error unknown PPC variant #endif + +#endif /* !_MODULE */ + +#if !defined(_LOCORE) && (defined(MODULAR) || defined(_MODULE)) +/* + * Both BOOKE and OEA use __HAVE_VM_PAGE_MD but IBM4XX doesn't so define + * a compatible vm_page_md so that struct vm_page is the same size for all + * PPC variants. + */ +#ifndef __HAVE_VM_PAGE_MD +#define __HAVE_VM_PAGE_MD + +struct vm_page_md { + uintptr_t mdpg_dummy[5]; +}; +#endif /* !__HVE_VM_PAGE_MD */ + +__CTASSERT(sizeof(struct vm_page_md) == sizeof(uintptr_t)*5); + +#endif /* !LOCORE && (MODULAR || _MODULE) */ + +#endif /* !_POWERPC_PMAP_H_ */ Index: src/sys/arch/powerpc/include/vmparam.h diff -u src/sys/arch/powerpc/include/vmparam.h:1.14 src/sys/arch/powerpc/include/vmparam.h:1.15 --- src/sys/arch/powerpc/include/vmparam.h:1.14 Mon Jun 20 08:01:14 2011 +++ src/sys/arch/powerpc/include/vmparam.h Mon Jun 20 20:24:28 2011 @@ -1,9 +1,52 @@ -/* $NetBSD: vmparam.h,v 1.14 2011/06/20 08:01:14 matt Exp $ */ +/* $NetBSD: vmparam.h,v 1.15 2011/06/20 20:24:28 matt Exp $ */ + +#ifndef _POWERPC_VMPARAM_H_ +#define _POWERPC_VMPARAM_H_ #ifdef _KERNEL_OPT +#include "opt_modular.h" #include "opt_ppcarch.h" +#include "opt_uvm.h" #endif +/* + * These are common for BOOKE, IBM4XX, and OEA + */ +#define VM_FREELIST_DEFAULT 0 +#define VM_FREELIST_FIRST256 1 +#define VM_FREELIST_FIRST16 2 +#define VM_NFREELIST 3 + +#define VM_PHYSSEG_MAX 16 + +/* + * The address to which unspecified mapping requests default + * Put the stack in it's own segment and start mmaping at the + * top of the next lower segment. + */ +#define __USE_TOPDOWN_VM +#define VM_DEFAULT_ADDRESS(da, sz) \ + ((VM_MAXUSER_ADDRESS - MAXSSIZ) - round_page(sz)) + +#if defined(_MODULE) +/* + * If we are a module, then we need with varible page sizes since BOOKE and OEA + * use 4KB pages while IBM4XX use 16KB pages. + */ +#define MIN_PAGE_SIZE 4096 /* BOOKE/OEA */ +#define MAX_PAGE_SIZE 16384 /* IBM4XX */ + +/* + * Some modules need some of the constants but those vary between the variants + * so those constants are exported as linker symbols so they don't take up any + * space but also avoid an extra load to put into a register. + */ +extern const char __USRSTACK; /* let the linker resolve it */ + +#define USRSTACK ((vaddr_t)(uintptr_t)&__USRSTACK) + +#else /* !_MODULE */ + #if defined(PPC_BOOKE) #include <powerpc/booke/vmparam.h> #elif defined(PPC_IBM4XX) @@ -13,3 +56,23 @@ #else #error unknown PPC variant #endif + +#endif /* !_MODULE */ + +#if defined(MODULAR) || defined(_MODULAR) +/* + * If we are a module or support modules, we need to define a compatible + * pmap_physseg since IBM4XX uses one. This will waste a tiny of space + * but is needed for compatibility. + */ +#ifndef __HAVE_PMAP_PHYSSEG +#define __HAVE_PMAP_PHYSSEG +struct pmap_physseg { + uintptr_t pmseg_dummy[2]; +}; +#endif + +__CTASSERT(sizeof(struct pmap_physseg) == sizeof(uintptr_t) * 2); +#endif /* MODULAR || _MODULE */ + +#endif /* !_POWERPC_VMPARAM_H_ */ Index: src/sys/arch/powerpc/include/booke/pmap.h diff -u src/sys/arch/powerpc/include/booke/pmap.h:1.5 src/sys/arch/powerpc/include/booke/pmap.h:1.6 --- src/sys/arch/powerpc/include/booke/pmap.h:1.5 Sun Jun 5 16:52:25 2011 +++ src/sys/arch/powerpc/include/booke/pmap.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.5 2011/06/05 16:52:25 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.6 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -40,6 +40,10 @@ #error use assym.h instead #endif +#if defined(_MODULE) +#error this file should not be included by loadable kernel modules +#endif + #include <sys/cpu.h> #include <sys/kcore.h> #include <uvm/uvm_page.h> @@ -51,6 +55,8 @@ #define PMAP_MD_NOCACHE 0x01000000 #define PMAP_NEED_PROCWR +#include <common/pmap/tlb/vmpagemd.h> + #include <powerpc/booke/pte.h> #define NBSEG (NBPG*NPTEPG) @@ -64,7 +70,6 @@ #define PMAP_TLB_NUM_PIDS 256 #define PMAP_INVALID_SEGTAB_ADDRESS ((struct pmap_segtab *)0xfeeddead) -#ifndef _LOCORE #define pmap_phys_address(x) (x) void pmap_procwr(struct proc *, vaddr_t, size_t); @@ -120,6 +125,5 @@ #define POOL_PHYSTOV(pa) ((vaddr_t)(paddr_t)(pa)) #include <common/pmap/tlb/pmap.h> -#endif /* _LOCORE */ #endif /* !_POWERPC_BOOKE_PMAP_H_ */ Index: src/sys/arch/powerpc/include/booke/pte.h diff -u src/sys/arch/powerpc/include/booke/pte.h:1.2 src/sys/arch/powerpc/include/booke/pte.h:1.3 --- src/sys/arch/powerpc/include/booke/pte.h:1.2 Tue Jan 18 01:02:54 2011 +++ src/sys/arch/powerpc/include/booke/pte.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pte.h,v 1.2 2011/01/18 01:02:54 matt Exp $ */ +/* $NetBSD: pte.h,v 1.3 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -180,17 +180,17 @@ } static inline pt_entry_t -pte_prot_bits(struct vm_page *pg, vm_prot_t prot) +pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot) { KASSERT(prot & VM_PROT_READ); pt_entry_t pt_entry = PTE_xR; if (prot & VM_PROT_EXECUTE) { #if 0 pt_entry |= PTE_xX; - if (pg != NULL && !VM_PAGE_MD_EXECPAGE_P(pg)) + if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) pt_entry |= PTE_UNSYNCED; #elif 1 - if (pg != NULL && !VM_PAGE_MD_EXECPAGE_P(pg)) + if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg)) pt_entry |= PTE_UNSYNCED; else pt_entry |= PTE_xX; @@ -200,23 +200,23 @@ } if (prot & VM_PROT_WRITE) { pt_entry |= PTE_xW; - if (pg != NULL && !VM_PAGE_MD_MODIFIED_P(pg)) + if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg)) pt_entry |= PTE_UNMODIFIED; } return pt_entry; } static inline pt_entry_t -pte_flag_bits(struct vm_page *pg, int flags) +pte_flag_bits(struct vm_page_md *mdpg, int flags) { if (__predict_false(flags & PMAP_MD_NOCACHE)) { - if (__predict_true(pg != NULL)) { + if (__predict_true(mdpg != NULL)) { return pte_nocached_bits(); } else { return pte_ionocached_bits(); } } else { - if (__predict_false(pg != NULL)) { + if (__predict_false(mdpg != NULL)) { return pte_cached_bits(); } else { return pte_iocached_bits(); @@ -225,24 +225,24 @@ } static inline pt_entry_t -pte_make_enter(paddr_t pa, struct vm_page *pg, vm_prot_t prot, +pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, int flags, bool kernel) { pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK; - pt_entry |= pte_flag_bits(pg, flags); - pt_entry |= pte_prot_bits(pg, prot); + pt_entry |= pte_flag_bits(mdpg, flags); + pt_entry |= pte_prot_bits(mdpg, prot); return pt_entry; } static inline pt_entry_t -pte_make_kenter_pa(paddr_t pa, struct vm_page *pg, vm_prot_t prot, +pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot, int flags) { pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK; - pt_entry |= pte_flag_bits(pg, flags); + pt_entry |= pte_flag_bits(mdpg, flags); pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */ return pt_entry; Index: src/sys/arch/powerpc/include/booke/vmparam.h diff -u src/sys/arch/powerpc/include/booke/vmparam.h:1.4 src/sys/arch/powerpc/include/booke/vmparam.h:1.5 --- src/sys/arch/powerpc/include/booke/vmparam.h:1.4 Sun Jun 5 16:52:25 2011 +++ src/sys/arch/powerpc/include/booke/vmparam.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: vmparam.h,v 1.4 2011/06/05 16:52:25 matt Exp $ */ +/* $NetBSD: vmparam.h,v 1.5 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc. * All rights reserved. @@ -37,8 +37,6 @@ #ifndef _POWERPC_BOOKE_VMPARAM_H_ #define _POWERPC_BOOKE_VMPARAM_H_ -#include <sys/mutex.h> - /* * Most of the definitions in this can be overriden by a machine-specific * vmparam.h if required. Otherwise a port can just include this file @@ -102,97 +100,12 @@ #define VM_MIN_KERNEL_ADDRESS ((vaddr_t) 0xe4000000) #define VM_MAX_KERNEL_ADDRESS ((vaddr_t) 0xfefff000) -/* - * The address to which unspecified mapping requests default - * Put the stack in it's own segment and start mmaping at the - * top of the next lower segment. - */ -#ifdef _KERNEL_OPT -#include "opt_uvm.h" -#endif -#define __USE_TOPDOWN_VM -#define VM_DEFAULT_ADDRESS(da, sz) \ - ((VM_MAXUSER_ADDRESS - MAXSSIZ) - round_page(sz)) - -#ifndef VM_PHYSSEG_MAX -#define VM_PHYSSEG_MAX 16 -#endif #define VM_PHYSSEG_STRAT VM_PSTRAT_BIGFIRST #ifndef VM_PHYS_SIZE #define VM_PHYS_SIZE (USRIOSIZE * PAGE_SIZE) #endif -#define VM_NFREELIST 2 /* 16 distinct memory segments */ -#define VM_FREELIST_DEFAULT 0 -#define VM_FREELIST_FIRST16 1 -#define VM_FREELIST_MAX 2 - -#ifndef VM_NFREELIST -#define VM_NFREELIST 16 /* 16 distinct memory segments */ -#define VM_FREELIST_DEFAULT 0 -#define VM_FREELIST_MAX 1 -#endif - -#define __HAVE_VM_PAGE_MD -#ifndef _LOCORE - -typedef struct pv_entry { - struct pv_entry *pv_next; - struct pmap *pv_pmap; - vaddr_t pv_va; -} *pv_entry_t; - -#define VM_PAGE_MD_REFERENCED 0x0001 /* page has been recently referenced */ -#define VM_PAGE_MD_MODIFIED 0x0002 /* page has been modified */ -#define VM_PAGE_MD_POOLPAGE 0x0004 /* page is used as a poolpage */ -#define VM_PAGE_MD_EXECPAGE 0x0008 /* page is exec mapped */ -#if 0 -#define VM_PAGE_MD_UNCACHED 0x0010 /* page is mapped uncached */ -#endif - -#ifdef VM_PAGE_MD_UNCACHED -#define VM_PAGE_MD_CACHED_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_UNCACHED) == 0) -#define VM_PAGE_MD_UNCACHED_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_UNCACHED) != 0) -#endif -#define VM_PAGE_MD_MODIFIED_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_MODIFIED) != 0) -#define VM_PAGE_MD_REFERENCED_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_REFERENCED) != 0) -#define VM_PAGE_MD_POOLPAGE_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_POOLPAGE) != 0) -#define VM_PAGE_MD_EXECPAGE_P(pg) (((pg)->mdpage.mdpg_attrs & VM_PAGE_MD_EXECPAGE) != 0) - -struct vm_page_md { - struct pv_entry mdpg_first; /* pv_entry first */ -#ifdef MULTIPROCESSOR - volatile u_int mdpg_attrs; /* page attributes */ - kmutex_t *mdpg_lock; /* pv list lock */ -#define VM_PAGE_PVLIST_LOCK_INIT(pg) \ - (pg)->mdpage.mdpg_lock = NULL -#define VM_PAGE_PVLIST_LOCKED_P(pg) \ - (mutex_owner((pg)->mdpage.mdpg_lock) != 0) -#define VM_PAGE_PVLIST_LOCK(pg, list_change) \ - pmap_pvlist_lock(pg, list_change) -#define VM_PAGE_PVLIST_UNLOCK(pg) \ - mutex_spin_exit((pg)->mdpage.mdpg_lock); -#define VM_PAGE_PVLIST_GEN(pg) ((uint16_t)(pg->mdpage.mdpg_attrs >> 16)) -#else - u_int mdpg_attrs; /* page attributes */ -#define VM_PAGE_PVLIST_LOCK_INIT(pg) do { } while (/*CONSTCOND*/ 0) -#define VM_PAGE_PVLIST_LOCKED_P(pg) true -#define VM_PAGE_PVLIST_LOCK(pg, lc) (mutex_spin_enter(&pmap_pvlist_mutex), 0) -#define VM_PAGE_PVLIST_UNLOCK(pg) mutex_spin_exit(&pmap_pvlist_mutex) -#define VM_PAGE_PVLIST_GEN(pg) (0) -#endif -}; - -#define VM_MDPAGE_INIT(pg) \ -do { \ - (pg)->mdpage.mdpg_first.pv_next = NULL; \ - (pg)->mdpage.mdpg_first.pv_pmap = NULL; \ - (pg)->mdpage.mdpg_first.pv_va = (pg)->phys_addr; \ - VM_PAGE_PVLIST_LOCK_INIT(pg); \ - (pg)->mdpage.mdpg_attrs = 0; \ -} while (/* CONSTCOND */ 0) - -#endif /* _LOCORE */ +#include <common/pmap/tlb/vmpagemd.h> #endif /* _POWERPC_BOOKE_VMPARAM_H_ */ Index: src/sys/arch/powerpc/include/ibm4xx/pmap.h diff -u src/sys/arch/powerpc/include/ibm4xx/pmap.h:1.15 src/sys/arch/powerpc/include/ibm4xx/pmap.h:1.16 --- src/sys/arch/powerpc/include/ibm4xx/pmap.h:1.15 Tue Jan 18 01:02:54 2011 +++ src/sys/arch/powerpc/include/ibm4xx/pmap.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.15 2011/01/18 01:02:54 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.16 2011/06/20 20:24:28 matt Exp $ */ /* * Copyright 2001 Wasabi Systems, Inc. @@ -69,6 +69,14 @@ #ifndef _IBM4XX_PMAP_H_ #define _IBM4XX_PMAP_H_ +#ifdef _LOCORE +#error use assym.h instead +#endif + +#if defined(_MODULE) +#error this file should not be included by loadable kernel modules +#endif + #include <powerpc/ibm4xx/tlb.h> #define KERNEL_PID 1 /* TLB PID to use for kernel translation */ @@ -132,8 +140,6 @@ #define PME_WRITETHROUG 0x2000000 #define PMAP_MD_NOCACHE PME_NOCACHE /* XXX: OEA pmap compat. for bus_dma */ -#ifndef _LOCORE - /* * Pmap stuff */ @@ -203,5 +209,4 @@ return va; } #endif /* _KERNEL */ -#endif /* _LOCORE */ #endif /* _IBM4XX_PMAP_H_ */ Index: src/sys/arch/powerpc/include/ibm4xx/vmparam.h diff -u src/sys/arch/powerpc/include/ibm4xx/vmparam.h:1.8 src/sys/arch/powerpc/include/ibm4xx/vmparam.h:1.9 --- src/sys/arch/powerpc/include/ibm4xx/vmparam.h:1.8 Sat Nov 6 16:36:27 2010 +++ src/sys/arch/powerpc/include/ibm4xx/vmparam.h Mon Jun 20 20:24:28 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: vmparam.h,v 1.8 2010/11/06 16:36:27 uebayasi Exp $ */ +/* $NetBSD: vmparam.h,v 1.9 2011/06/20 20:24:28 matt Exp $ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. @@ -109,10 +109,6 @@ char *attrs; }; -#define VM_PHYSSEG_MAX 16 /* 1? */ #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH -#define VM_NFREELIST 1 -#define VM_FREELIST_DEFAULT 0 - #endif /* _MACHINE_VMPARAM_H_ */ Index: src/sys/arch/powerpc/include/oea/pmap.h diff -u src/sys/arch/powerpc/include/oea/pmap.h:1.23 src/sys/arch/powerpc/include/oea/pmap.h:1.24 --- src/sys/arch/powerpc/include/oea/pmap.h:1.23 Mon Jun 20 08:07:03 2011 +++ src/sys/arch/powerpc/include/oea/pmap.h Mon Jun 20 20:24:29 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.23 2011/06/20 08:07:03 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.24 2011/06/20 20:24:29 matt Exp $ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. @@ -34,12 +34,19 @@ #ifndef _POWERPC_OEA_PMAP_H_ #define _POWERPC_OEA_PMAP_H_ +#ifdef _LOCORE +#error use assym.h instead +#endif + +#if defined(_LKM) || defined(_MODULE) +#error this file should not be included by loadable kernel modules +#endif + #ifdef _KERNEL_OPT #include "opt_ppcarch.h" #endif #include <powerpc/oea/pte.h> -#ifndef _LOCORE /* * Pmap stuff */ @@ -224,17 +231,19 @@ #define __HAVE_VM_PAGE_MD struct vm_page_md { - struct pvo_head mdpg_pvoh; unsigned int mdpg_attrs; + struct pvo_head mdpg_pvoh; +#ifdef MODULAR + uintptr_t mdpg_dummy[3]; +#endif }; #define VM_MDPAGE_INIT(pg) do { \ - LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \ (pg)->mdpage.mdpg_attrs = 0; \ + LIST_INIT(&(pg)->mdpage.mdpg_pvoh); \ } while (/*CONSTCOND*/0) __END_DECLS #endif /* _KERNEL */ -#endif /* _LOCORE */ #endif /* _POWERPC_OEA_PMAP_H_ */ Index: src/sys/arch/powerpc/include/oea/vmparam.h diff -u src/sys/arch/powerpc/include/oea/vmparam.h:1.16 src/sys/arch/powerpc/include/oea/vmparam.h:1.17 --- src/sys/arch/powerpc/include/oea/vmparam.h:1.16 Sun Nov 14 13:33:22 2010 +++ src/sys/arch/powerpc/include/oea/vmparam.h Mon Jun 20 20:24:29 2011 @@ -163,21 +163,6 @@ #define VM_MIN_KERNEL_ADDRESS ((vaddr_t) (KERNEL_SR << ADDR_SR_SHFT)) #define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH) -/* - * The address to which unspecified mapping requests default - * Put the stack in it's own segment and start mmaping at the - * top of the next lower segment. - */ -#ifdef _KERNEL_OPT -#include "opt_uvm.h" -#endif -#define __USE_TOPDOWN_VM -#define VM_DEFAULT_ADDRESS(da, sz) \ - (((VM_MAXUSER_ADDRESS - MAXSSIZ) & SEGMENT_MASK) - round_page(sz)) - -#ifndef VM_PHYSSEG_MAX -#define VM_PHYSSEG_MAX 16 -#endif #define VM_PHYSSEG_STRAT VM_PSTRAT_BIGFIRST #ifndef VM_PHYS_SIZE @@ -188,10 +173,4 @@ #define VM_MAX_KERNEL_BUF (SEGMENT_LENGTH * 3 / 4) #endif -#define VM_NFREELIST 16 /* 16 distinct memory segments */ -#define VM_FREELIST_DEFAULT 0 -#define VM_FREELIST_FIRST256 1 -#define VM_FREELIST_FIRST16 2 -#define VM_FREELIST_MAX 3 - #endif /* _POWERPC_OEA_VMPARAM_H_ */ Index: src/sys/common/pmap/tlb/pmap.c diff -u src/sys/common/pmap/tlb/pmap.c:1.5 src/sys/common/pmap/tlb/pmap.c:1.6 --- src/sys/common/pmap/tlb/pmap.c:1.5 Sun Jun 12 05:32:38 2011 +++ src/sys/common/pmap/tlb/pmap.c Mon Jun 20 20:24:29 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.5 2011/06/12 05:32:38 matt Exp $ */ +/* $NetBSD: pmap.c,v 1.6 2011/06/20 20:24:29 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.5 2011/06/12 05:32:38 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.6 2011/06/20 20:24:29 matt Exp $"); /* * Manages physical address maps. @@ -95,8 +95,9 @@ * and to when physical maps must be made correct. */ -#include "opt_sysv.h" +#include "opt_modular.h" #include "opt_multiprocessor.h" +#include "opt_sysv.h" #define __PMAP_PRIVATE @@ -278,9 +279,9 @@ */ bool -pmap_page_clear_attributes(struct vm_page *pg, u_int clear_attributes) +pmap_page_clear_attributes(struct vm_page_md *mdpg, u_int clear_attributes) { - volatile u_int * const attrp = &VM_PAGE_TO_MD(pg)->mdpg_attrs; + volatile u_int * const attrp = &mdpg->mdpg_attrs; #ifdef MULTIPROCESSOR for (;;) { u_int old_attr = *attrp; @@ -300,12 +301,12 @@ } void -pmap_page_set_attributes(struct vm_page *pg, u_int set_attributes) +pmap_page_set_attributes(struct vm_page_md *mdpg, u_int set_attributes) { #ifdef MULTIPROCESSOR - atomic_or_uint(&VM_PAGE_TO_MD(pg)->mdpg_attrs, set_attributes); + atomic_or_uint(&mdpg->mdpg_attrs, set_attributes); #else - VM_PAGE_TO_MD(pg)->mdpg_attrs |= set_attributes; + mdpg->mdpg_attrs |= set_attributes; #endif } @@ -315,9 +316,10 @@ #ifndef MULTIPROCESSOR struct pmap * const curpmap = curcpu()->ci_curpm; #endif - pv_entry_t pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pv_entry_t pv = &mdpg->mdpg_first; __cpuset_t onproc = CPUSET_NULLSET; - (void)VM_PAGE_PVLIST_LOCK(pg, false); + (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false); if (pv->pv_pmap != NULL) { for (; pv != NULL; pv = pv->pv_next) { #ifdef MULTIPROCESSOR @@ -333,7 +335,7 @@ #endif } } - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); kpreempt_disable(); pmap_md_page_syncicache(pg, onproc); kpreempt_enable(); @@ -695,6 +697,7 @@ void pmap_page_protect(struct vm_page *pg, vm_prot_t prot) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); pv_entry_t pv; vaddr_t va; @@ -711,28 +714,28 @@ /* copy_on_write */ case VM_PROT_READ: case VM_PROT_READ|VM_PROT_EXECUTE: - (void)VM_PAGE_PVLIST_LOCK(pg, false); - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false); + pv = &mdpg->mdpg_first; /* * Loop over all current mappings setting/clearing as apropos. */ if (pv->pv_pmap != NULL) { while (pv != NULL) { const pmap_t pmap = pv->pv_pmap; - const uint16_t gen = VM_PAGE_PVLIST_GEN(pg); + const uint16_t gen = VM_PAGEMD_PVLIST_GEN(mdpg); va = pv->pv_va; - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); pmap_protect(pmap, va, va + PAGE_SIZE, prot); KASSERT(pv->pv_pmap == pmap); pmap_update(pmap); - if (gen != VM_PAGE_PVLIST_LOCK(pg, false)) { - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + if (gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false)) { + pv = &mdpg->mdpg_first; } else { pv = pv->pv_next; } } } - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); break; /* remove_all */ @@ -741,22 +744,22 @@ * Do this first so that for each unmapping, pmap_remove_pv * won't try to sync the icache. */ - if (pmap_page_clear_attributes(pg, VM_PAGE_MD_EXECPAGE)) { + if (pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE)) { UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR "): execpage cleared", pg, VM_PAGE_TO_PHYS(pg),0,0); PMAP_COUNT(exec_uncached_page_protect); } - (void)VM_PAGE_PVLIST_LOCK(pg, false); - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false); + pv = &mdpg->mdpg_first; while (pv->pv_pmap != NULL) { const pmap_t pmap = pv->pv_pmap; va = pv->pv_va; - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); pmap_remove(pmap, va, va + PAGE_SIZE); pmap_update(pmap); - (void)VM_PAGE_PVLIST_LOCK(pg, false); + (void)VM_PAGEMD_PVLIST_LOCK(mdpg, false); } - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); } UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0); @@ -785,9 +788,10 @@ struct vm_page * const pg = PHYS_TO_VM_PAGE(pte_to_paddr(pt_entry)); if (pg != NULL && pte_modified_p(pt_entry)) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); pmap_md_vca_clean(pg, sva, PMAP_WBINV); - if (VM_PAGE_MD_EXECPAGE_P(pg)) { - KASSERT(VM_PAGE_TO_MD(pg)->mdpg_first.pv_pmap != NULL); + if (VM_PAGEMD_EXECPAGE_P(mdpg)) { + KASSERT(mdpg->mdpg_first.pv_pmap != NULL); UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR"): %s", pg, VM_PAGE_TO_PHYS(pg), @@ -855,7 +859,7 @@ UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0); } -#if defined(VM_PAGE_MD_CACHED) +#if defined(__PMAP_VIRTUAL_CACHE_ALIASES) /* * pmap_page_cache: * @@ -864,22 +868,23 @@ static void pmap_page_cache(struct vm_page *pg, bool cached) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR") cached=%s)", pg, VM_PAGE_TO_PHYS(pg), cached ? "true" : "false", 0); KASSERT(kpreempt_disabled()); if (cached) { - pmap_page_clear_attributes(pg, VM_PAGE_MD_UNCACHED); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_UNCACHED); PMAP_COUNT(page_cache_restorations); } else { - pmap_page_set_attributes(pg, VM_PAGE_MD_UNCACHED); + pmap_page_set_attributes(mdpg, VM_PAGEMD_UNCACHED); PMAP_COUNT(page_cache_evictions); } - KASSERT(VM_PAGE_PVLIST_LOCKED_P(pg)); + KASSERT(VM_PAGEMD_PVLIST_LOCKED_P(mdpg)); KASSERT(kpreempt_disabled()); - for (pv_entry_t pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + for (pv_entry_t pv = &mdpg->mdpg_first; pv != NULL; pv = pv->pv_next) { pmap_t pmap = pv->pv_pmap; @@ -900,7 +905,7 @@ } UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0); } -#endif /* VM_PAGE_MD_CACHED */ +#endif /* __PMAP_VIRTUAL_CACHE_ALIASES */ /* * Insert the given physical page (p) at @@ -954,16 +959,18 @@ ("%s: no READ (%#x) in prot %#x", __func__, VM_PROT_READ, prot)); struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); + struct vm_page_md *mdpg; if (pg) { + mdpg = VM_PAGE_TO_MD(pg); /* Set page referenced/modified status based on flags */ if (flags & VM_PROT_WRITE) - pmap_page_set_attributes(pg, VM_PAGE_MD_MODIFIED|VM_PAGE_MD_REFERENCED); + pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); else if (flags & VM_PROT_ALL) - pmap_page_set_attributes(pg, VM_PAGE_MD_REFERENCED); + pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED); -#ifdef VM_PAGE_MD_CACHED - if (!VM_PAGE_MD_CACHED(pg)) +#ifdef __PMAP_VIRTUAL_CACHE_ALIASES + if (!VM_PAGEMD_CACHED(pg)) flags |= PMAP_NOCACHE; #endif @@ -973,11 +980,12 @@ * Assumption: if it is not part of our managed memory * then it must be device memory which may be volatile. */ + mdpg = NULL; flags |= PMAP_NOCACHE; PMAP_COUNT(unmanaged_mappings); } - npte = pte_make_enter(pa, pg, prot, flags, is_kernel_pmap_p); + npte = pte_make_enter(pa, mdpg, prot, flags, is_kernel_pmap_p); kpreempt_disable(); pt_entry_t * const ptep = pmap_pte_reserve(pmap, va, flags); @@ -1021,15 +1029,16 @@ kpreempt_enable(); if (pg != NULL && (prot == (VM_PROT_READ | VM_PROT_EXECUTE))) { + KASSERT(mdpg != NULL); PMAP_COUNT(exec_mappings); - if (!VM_PAGE_MD_EXECPAGE_P(pg) && pte_cached_p(npte)) { + if (!VM_PAGEMD_EXECPAGE_P(mdpg) && pte_cached_p(npte)) { if (!pte_deferred_exec_p(npte)) { UVMHIST_LOG(*histp, "va=%#"PRIxVADDR" pg %p: %s syncicache%s", va, pg, "immediate", ""); pmap_page_syncicache(pg); - pmap_page_set_attributes(pg, - VM_PAGE_MD_EXECPAGE); + pmap_page_set_attributes(mdpg, + VM_PAGEMD_EXECPAGE); PMAP_COUNT(exec_synced_mappings); } else { UVMHIST_LOG(*histp, "va=%#"PRIxVADDR @@ -1045,10 +1054,11 @@ : " (uncached)")); } } else if (pg != NULL && (prot & VM_PROT_EXECUTE)) { + KASSERT(mdpg != NULL); KASSERT(prot & VM_PROT_WRITE); PMAP_COUNT(exec_mappings); pmap_page_syncicache(pg); - pmap_page_clear_attributes(pg, VM_PAGE_MD_EXECPAGE); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); UVMHIST_LOG(pmapexechist, "va=%#"PRIxVADDR" pg %p: %s syncicache%s", va, pg, "immediate", " (writeable)"); @@ -1066,21 +1076,25 @@ pmap_kenter_pa(vaddr_t va, paddr_t pa, vm_prot_t prot, u_int flags) { struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); + struct vm_page_md *mdpg; UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); UVMHIST_LOG(pmaphist, "(va=%#"PRIxVADDR" pa=%#"PRIxPADDR ", prot=%#x, flags=%#x)", va, pa, prot, flags); PMAP_COUNT(kenter_pa); - if (!PMAP_PAGE_COLOROK_P(pa, va) && pg != NULL) - PMAP_COUNT(kenter_pa_bad); - if (pg == NULL) { + mdpg = NULL; PMAP_COUNT(kenter_pa_unmanaged); flags |= PMAP_NOCACHE; + } else { + mdpg = VM_PAGE_TO_MD(pg); } - const pt_entry_t npte = pte_make_kenter_pa(pa, pg, prot, flags) + if ((flags & PMAP_NOCACHE) == 0 && !PMAP_PAGE_COLOROK_P(pa, va)) + PMAP_COUNT(kenter_pa_bad); + + const pt_entry_t npte = pte_make_kenter_pa(pa, mdpg, prot, flags) | pte_wired_entry(); kpreempt_disable(); pt_entry_t * const ptep = pmap_pte_reserve(pmap_kernel(), va, 0); @@ -1285,11 +1299,13 @@ bool pmap_clear_reference(struct vm_page *pg) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist); UVMHIST_LOG(pmaphist, "(pg=%p (pa %#"PRIxPADDR"))", pg, VM_PAGE_TO_PHYS(pg), 0,0); - bool rv = pmap_page_clear_attributes(pg, VM_PAGE_MD_REFERENCED); + bool rv = pmap_page_clear_attributes(mdpg, VM_PAGEMD_REFERENCED); UVMHIST_LOG(pmaphist, "<- %s", rv ? "true" : "false", 0,0,0); @@ -1306,7 +1322,7 @@ pmap_is_referenced(struct vm_page *pg) { - return VM_PAGE_MD_REFERENCED_P(pg); + return VM_PAGEMD_REFERENCED_P(VM_PAGE_TO_MD(pg)); } /* @@ -1315,7 +1331,8 @@ bool pmap_clear_modify(struct vm_page *pg) { - pv_entry_t pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pv_entry_t pv = &mdpg->mdpg_first; pv_entry_t pv_next; uint16_t gen; @@ -1324,12 +1341,12 @@ pg, VM_PAGE_TO_PHYS(pg), 0,0); PMAP_COUNT(clear_modify); - if (VM_PAGE_MD_EXECPAGE_P(pg)) { + if (VM_PAGEMD_EXECPAGE_P(mdpg)) { if (pv->pv_pmap == NULL) { UVMHIST_LOG(pmapexechist, "pg %p (pa %#"PRIxPADDR"): %s", pg, VM_PAGE_TO_PHYS(pg), "execpage cleared", 0); - pmap_page_clear_attributes(pg, VM_PAGE_MD_EXECPAGE); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); PMAP_COUNT(exec_uncached_clear_modify); } else { UVMHIST_LOG(pmapexechist, @@ -1339,7 +1356,7 @@ PMAP_COUNT(exec_synced_clear_modify); } } - if (!pmap_page_clear_attributes(pg, VM_PAGE_MD_MODIFIED)) { + if (!pmap_page_clear_attributes(mdpg, VM_PAGEMD_MODIFIED)) { UVMHIST_LOG(pmaphist, "<- false", 0,0,0,0); return false; } @@ -1354,7 +1371,7 @@ * flush the VAC first if there is one. */ kpreempt_disable(); - gen = VM_PAGE_PVLIST_LOCK(pg, false); + gen = VM_PAGEMD_PVLIST_LOCK(mdpg, false); for (; pv != NULL; pv = pv_next) { pmap_t pmap = pv->pv_pmap; vaddr_t va = pv->pv_va; @@ -1367,17 +1384,17 @@ } pmap_md_vca_clean(pg, va, PMAP_WBINV); *ptep = pt_entry; - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); pmap_tlb_invalidate_addr(pmap, va); pmap_update(pmap); - if (__predict_false(gen != VM_PAGE_PVLIST_LOCK(pg, false))) { + if (__predict_false(gen != VM_PAGEMD_PVLIST_LOCK(mdpg, false))) { /* * The list changed! So restart from the beginning. */ - pv_next = &VM_PAGE_TO_MD(pg)->mdpg_first; + pv_next = &mdpg->mdpg_first; } } - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); kpreempt_enable(); UVMHIST_LOG(pmaphist, "<- true (mappings changed)", 0,0,0,0); @@ -1394,7 +1411,7 @@ pmap_is_modified(struct vm_page *pg) { - return VM_PAGE_MD_MODIFIED_P(pg); + return VM_PAGEMD_MODIFIED_P(VM_PAGE_TO_MD(pg)); } /* @@ -1405,8 +1422,9 @@ void pmap_set_modified(paddr_t pa) { - struct vm_page *pg = PHYS_TO_VM_PAGE(pa); - pmap_page_set_attributes(pg, VM_PAGE_MD_MODIFIED | VM_PAGE_MD_REFERENCED); + struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED|VM_PAGEMD_REFERENCED); } /******************** pv_entry management ********************/ @@ -1415,12 +1433,12 @@ pmap_check_pvlist(struct vm_page *pg) { #ifdef PARANOIADIAG - pt_entry_t pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pt_entry_t pv = &mdpg->mdpg_first; if (pv->pv_pmap != NULL) { for (; pv != NULL; pv = pv->pv_next) { KASSERT(!pmap_md_direct_mapped_vaddr_p(pv->pv_va)); } - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; } #endif /* PARANOIADIAG */ } @@ -1432,6 +1450,7 @@ void pmap_enter_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, u_int *npte) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); pv_entry_t pv, npv, apv; int16_t gen; bool first = false; @@ -1446,8 +1465,8 @@ KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); apv = NULL; - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; - gen = VM_PAGE_PVLIST_LOCK(pg, true); + pv = &mdpg->mdpg_first; + gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true); pmap_check_pvlist(pg); again: if (pv->pv_pmap == NULL) { @@ -1458,8 +1477,8 @@ PMAP_COUNT(primary_mappings); PMAP_COUNT(mappings); first = true; -#ifdef VM_PAGE_MD_UNCACHED - pmap_page_clear_attributes(pg, VM_PAGE_MD_UNCACHED); +#ifdef __PMAP_VIRTUAL_CACHE_ALIASES + pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED); #endif pv->pv_pmap = pmap; pv->pv_va = va; @@ -1490,35 +1509,33 @@ va, pa, pt_entry); #endif PMAP_COUNT(remappings); - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); if (__predict_false(apv != NULL)) pmap_pv_free(apv); return; } } if (__predict_true(apv == NULL)) { -#ifdef MULTIPROCESSOR /* * To allocate a PV, we have to release the PVLIST lock * so get the page generation. We allocate the PV, and * then reacquire the lock. */ - VM_PAGE_PVLIST_UNLOCK(pg); -#endif + VM_PAGEMD_PVLIST_UNLOCK(mdpg); + apv = (pv_entry_t)pmap_pv_alloc(); if (apv == NULL) panic("pmap_enter_pv: pmap_pv_alloc() failed"); -#ifdef MULTIPROCESSOR + /* * If the generation has changed, then someone else * tinkered with this page so we should * start over. */ uint16_t oldgen = gen; - gen = VM_PAGE_PVLIST_LOCK(pg, true); + gen = VM_PAGEMD_PVLIST_LOCK(mdpg, true); if (gen != oldgen) goto again; -#endif } npv = apv; apv = NULL; @@ -1529,7 +1546,7 @@ PMAP_COUNT(mappings); } pmap_check_pvlist(pg); - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); if (__predict_false(apv != NULL)) pmap_pv_free(apv); @@ -1547,6 +1564,7 @@ void pmap_remove_pv(pmap_t pmap, vaddr_t va, struct vm_page *pg, bool dirty) { + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); pv_entry_t pv, npv; bool last; @@ -1557,9 +1575,9 @@ UVMHIST_LOG(pmaphist, "dirty=%s)", dirty ? "true" : "false", 0,0,0); KASSERT(kpreempt_disabled()); - pv = &VM_PAGE_TO_MD(pg)->mdpg_first; + pv = &mdpg->mdpg_first; - (void)VM_PAGE_PVLIST_LOCK(pg, true); + (void)VM_PAGEMD_PVLIST_LOCK(mdpg, true); pmap_check_pvlist(pg); /* @@ -1576,8 +1594,8 @@ *pv = *npv; KASSERT(pv->pv_pmap != NULL); } else { -#ifdef VM_PAGE_MD_UNCACHED - pmap_page_clear_attributes(pg, VM_PAGE_MD_UNCACHED); +#ifdef __PMAP_VIRTUAL_CACHE_ALIASES + pmap_page_clear_attributes(pg, VM_PAGEMD_UNCACHED); #endif pv->pv_pmap = NULL; last = true; /* Last mapping removed */ @@ -1596,14 +1614,14 @@ pmap_md_vca_remove(pg, va); pmap_check_pvlist(pg); - VM_PAGE_PVLIST_UNLOCK(pg); + VM_PAGEMD_PVLIST_UNLOCK(mdpg); /* * Free the pv_entry if needed. */ if (npv) pmap_pv_free(npv); - if (VM_PAGE_MD_EXECPAGE_P(pg) && dirty) { + if (VM_PAGEMD_EXECPAGE_P(mdpg) && dirty) { if (last) { /* * If this was the page's last mapping, we no longer @@ -1614,7 +1632,7 @@ pg, VM_PAGE_TO_PHYS(pg), last ? " [last mapping]" : "", "execpage cleared"); - pmap_page_clear_attributes(pg, VM_PAGE_MD_EXECPAGE); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_EXECPAGE); PMAP_COUNT(exec_uncached_remove); } else { /* @@ -1633,7 +1651,7 @@ UVMHIST_LOG(pmaphist, "<- done", 0,0,0,0); } -#ifdef MULTIPROCESSOR +#if defined(MULTIPROCESSOR) struct pmap_pvlist_info { kmutex_t *pli_locks[PAGE_SIZE / 32]; volatile u_int pli_lock_refs[PAGE_SIZE / 32]; @@ -1664,10 +1682,10 @@ } uint16_t -pmap_pvlist_lock(struct vm_page *pg, bool list_change) +pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change) { struct pmap_pvlist_info * const pli = &pmap_pvlist_info; - kmutex_t *lock = VM_PAGE_TO_MD(pg)->mdpg_lock; + kmutex_t *lock = mdpg->mdpg_lock; int16_t gen; /* @@ -1682,7 +1700,7 @@ * Set the lock. If some other thread already did, just use * the one they assigned. */ - lock = atomic_cas_ptr(&VM_PAGE_TO_MD(pg)->mdpg_lock, NULL, new_lock); + lock = atomic_cas_ptr(&mdpg->mdpg_lock, NULL, new_lock); if (lock == NULL) { lock = new_lock; atomic_inc_uint(&pli->pli_lock_refs[lockid]); @@ -1698,22 +1716,42 @@ * If the locker will be changing the list, increment the high 16 bits * of attrs so we use that as a generation number. */ - gen = VM_PAGE_PVLIST_GEN(pg); /* get old value */ + gen = VM_PAGEMD_PVLIST_GEN(mdpg); /* get old value */ if (list_change) - atomic_add_int(&VM_PAGE_TO_MD(pg)->mdpg_attrs, 0x10000); + atomic_add_int(&mdpg->mdpg_attrs, 0x10000); /* * Return the generation number. */ return gen; } -#else +#else /* !MULTIPROCESSOR */ void pmap_pvlist_lock_init(size_t cache_line_size) { mutex_init(&pmap_pvlist_mutex, MUTEX_DEFAULT, IPL_VM); } -#endif /* MULTIPROCESSOR */ + +#ifdef MODULAR +uint16_t +pmap_pvlist_lock(struct vm_page_md *mdpg, bool list_change) +{ + /* + * We just use a global lock. + */ + if (__predict_false(mdpg->mdpg_lock == NULL)) { + mdpg->mdpg_lock = &pmap_pvlist_mutex; + } + + /* + * Now finally lock the pvlists. + */ + mutex_spin_enter(mdpg->mdpg_lock); + + return 0; +} +#endif /* MODULAR */ +#endif /* !MULTIPROCESSOR */ /* * pmap_pv_page_alloc: @@ -1743,8 +1781,9 @@ KASSERT(pmap_md_direct_mapped_vaddr_p(va)); const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va); struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); pmap_md_vca_remove(pg, va); - pmap_page_clear_attributes(pg, VM_PAGE_MD_POOLPAGE); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE); uvm_pagefree(pg); } @@ -1787,7 +1826,8 @@ struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg); - pmap_page_set_attributes(pg, VM_PAGE_MD_POOLPAGE); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pmap_page_set_attributes(mdpg, VM_PAGEMD_POOLPAGE); const vaddr_t va = pmap_md_direct_map_paddr(pa); pmap_md_vca_add(pg, va, NULL); @@ -1803,7 +1843,8 @@ struct vm_page * const pg = PHYS_TO_VM_PAGE(pa); KASSERT(pg); - pmap_page_clear_attributes(pg, VM_PAGE_MD_POOLPAGE); + struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg); + pmap_page_clear_attributes(mdpg, VM_PAGEMD_POOLPAGE); pmap_md_vca_remove(pg, va); return pa; Index: src/sys/common/pmap/tlb/pmap.h diff -u src/sys/common/pmap/tlb/pmap.h:1.4 src/sys/common/pmap/tlb/pmap.h:1.5 --- src/sys/common/pmap/tlb/pmap.h:1.4 Sun Jun 12 05:32:38 2011 +++ src/sys/common/pmap/tlb/pmap.h Mon Jun 20 20:24:29 2011 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.4 2011/06/12 05:32:38 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.5 2011/06/20 20:24:29 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -221,8 +221,8 @@ */ void pmap_remove_all(pmap_t); void pmap_set_modified(paddr_t); -bool pmap_page_clear_attributes(struct vm_page *, u_int); -void pmap_page_set_attributes(struct vm_page *, u_int); +bool pmap_page_clear_attributes(struct vm_page_md *, u_int); +void pmap_page_set_attributes(struct vm_page_md *, u_int); void pmap_pvlist_lock_init(size_t); #define PMAP_WB 0 @@ -247,7 +247,7 @@ void pmap_tlb_invalidate_addr(pmap_t, vaddr_t); void pmap_tlb_check(pmap_t); -uint16_t pmap_pvlist_lock(struct vm_page *, bool); +uint16_t pmap_pvlist_lock(struct vm_page_md *, bool); #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */