Module Name: src Committed By: matt Date: Thu Jun 11 08:22:09 UTC 2015
Modified Files: src/sys/arch/arc/arc: bus_dma.c src/sys/arch/arc/isa: isadma_bounce.c src/sys/arch/emips/emips: bus_dma.c machdep.c src/sys/arch/ews4800mips/ews4800mips: bus_dma.c src/sys/arch/hpcmips/hpcmips: bus_dma.c src/sys/arch/hpcmips/include: kloader.h src/sys/arch/hpcmips/vr: vrdcu.c src/sys/arch/mips/include: bus_dma_defs.h pmap.h pte.h src/sys/arch/mips/mips: cpu_subr.c mips_machdep.c pmap.c pmap_segtab.c vm_machdep.c src/sys/arch/mipsco/mipsco: bus_dma.c src/sys/arch/newsmips/newsmips: bus.c src/sys/arch/pmax/pmax: bus_dma.c src/sys/arch/sgimips/ioc: if_le_oioc.c Log Message: Add struct pmap_limits and pm_{min,max}addr from uvm/pmap/map.h and use it to store avail_start, avail_end, virtual_start, and virtual_end. Remove iospace and let emips just bump pmap_limits.virtual_start to get the VA space it needs. pmap_segtab.c is almost identical to uvm/pmap/pmap_segtab.c now. It won't be long until we switch to the uvm/pmap one. To generate a diff of this commit: cvs rdiff -u -r1.33 -r1.34 src/sys/arch/arc/arc/bus_dma.c cvs rdiff -u -r1.13 -r1.14 src/sys/arch/arc/isa/isadma_bounce.c cvs rdiff -u -r1.3 -r1.4 src/sys/arch/emips/emips/bus_dma.c cvs rdiff -u -r1.10 -r1.11 src/sys/arch/emips/emips/machdep.c cvs rdiff -u -r1.13 -r1.14 src/sys/arch/ews4800mips/ews4800mips/bus_dma.c cvs rdiff -u -r1.38 -r1.39 src/sys/arch/hpcmips/hpcmips/bus_dma.c cvs rdiff -u -r1.5 -r1.6 src/sys/arch/hpcmips/include/kloader.h cvs rdiff -u -r1.7 -r1.8 src/sys/arch/hpcmips/vr/vrdcu.c cvs rdiff -u -r1.1 -r1.2 src/sys/arch/mips/include/bus_dma_defs.h cvs rdiff -u -r1.65 -r1.66 src/sys/arch/mips/include/pmap.h cvs rdiff -u -r1.20 -r1.21 src/sys/arch/mips/include/pte.h cvs rdiff -u -r1.24 -r1.25 src/sys/arch/mips/mips/cpu_subr.c cvs rdiff -u -r1.267 -r1.268 src/sys/arch/mips/mips/mips_machdep.c cvs rdiff -u -r1.215 -r1.216 src/sys/arch/mips/mips/pmap.c cvs rdiff -u -r1.8 -r1.9 src/sys/arch/mips/mips/pmap_segtab.c cvs rdiff -u -r1.144 -r1.145 src/sys/arch/mips/mips/vm_machdep.c cvs rdiff -u -r1.28 -r1.29 src/sys/arch/mipsco/mipsco/bus_dma.c cvs rdiff -u -r1.32 -r1.33 src/sys/arch/newsmips/newsmips/bus.c cvs rdiff -u -r1.57 -r1.58 src/sys/arch/pmax/pmax/bus_dma.c cvs rdiff -u -r1.4 -r1.5 src/sys/arch/sgimips/ioc/if_le_oioc.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/arc/arc/bus_dma.c diff -u src/sys/arch/arc/arc/bus_dma.c:1.33 src/sys/arch/arc/arc/bus_dma.c:1.34 --- src/sys/arch/arc/arc/bus_dma.c:1.33 Fri Jul 1 19:28:00 2011 +++ src/sys/arch/arc/arc/bus_dma.c Thu Jun 11 08:22:08 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.33 2011/07/01 19:28:00 dyoung Exp $ */ +/* $NetBSD: bus_dma.c,v 1.34 2015/06/11 08:22:08 matt Exp $ */ /* NetBSD: bus_dma.c,v 1.20 2000/01/10 03:24:36 simonb Exp */ /*- @@ -32,7 +32,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.33 2011/07/01 19:28:00 dyoung Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.34 2015/06/11 08:22:08 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -554,8 +554,8 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s { return _bus_dmamem_alloc_range(t, size, alignment, boundary, - segs, nsegs, rsegs, flags, mips_avail_start, - trunc_page(mips_avail_end)); + segs, nsegs, rsegs, flags, pmap_limits.avail_start, + trunc_page(pmap_limits.avail_end)); } /* @@ -575,7 +575,7 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t, /* Always round the size. */ size = round_page(size); - high = mips_avail_end - PAGE_SIZE; + high = pmap_limits.avail_end - PAGE_SIZE; /* * Allocate pages from the VM system. @@ -599,7 +599,7 @@ _bus_dmamem_alloc_range(bus_dma_tag_t t, for (; m != NULL; m = TAILQ_NEXT(m, pageq.queue)) { curaddr = VM_PAGE_TO_PHYS(m); #ifdef DIAGNOSTIC - if (curaddr < mips_avail_start || curaddr >= high) { + if (curaddr < pmap_limits.avail_start || curaddr >= high) { printf("uvm_pglistalloc returned non-sensical" " address 0x%llx\n", (long long)curaddr); panic("_bus_dmamem_alloc_range"); Index: src/sys/arch/arc/isa/isadma_bounce.c diff -u src/sys/arch/arc/isa/isadma_bounce.c:1.13 src/sys/arch/arc/isa/isadma_bounce.c:1.14 --- src/sys/arch/arc/isa/isadma_bounce.c:1.13 Fri Jul 1 19:25:41 2011 +++ src/sys/arch/arc/isa/isadma_bounce.c Thu Jun 11 08:22:08 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: isadma_bounce.c,v 1.13 2011/07/01 19:25:41 dyoung Exp $ */ +/* $NetBSD: isadma_bounce.c,v 1.14 2015/06/11 08:22:08 matt Exp $ */ /* NetBSD: isadma_bounce.c,v 1.2 2000/06/01 05:49:36 thorpej Exp */ /*- @@ -33,7 +33,7 @@ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ -__KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.13 2011/07/01 19:25:41 dyoung Exp $"); +__KERNEL_RCSID(0, "$NetBSD: isadma_bounce.c,v 1.14 2015/06/11 08:22:08 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -172,7 +172,7 @@ isadma_bounce_dmamap_create(bus_dma_tag_ * ISA DMA controller), we may have to bounce it as well. */ cookieflags = 0; - if (mips_avail_end > ISA_DMA_BOUNCE_THRESHOLD || + if (pmap_limits.avail_end > ISA_DMA_BOUNCE_THRESHOLD || ((map->_dm_size / PAGE_SIZE) + 1) > map->_dm_segcnt) { cookieflags |= ID_MIGHT_NEED_BOUNCE; cookiesize += (sizeof(bus_dma_segment_t) * @@ -568,10 +568,10 @@ isadma_bounce_dmamem_alloc(bus_dma_tag_t { paddr_t high; - if (mips_avail_end > ISA_DMA_BOUNCE_THRESHOLD) + if (pmap_limits.avail_end > ISA_DMA_BOUNCE_THRESHOLD) high = trunc_page(ISA_DMA_BOUNCE_THRESHOLD); else - high = trunc_page(mips_avail_end); + high = trunc_page(pmap_limits.avail_end); return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, 0, high); Index: src/sys/arch/emips/emips/bus_dma.c diff -u src/sys/arch/emips/emips/bus_dma.c:1.3 src/sys/arch/emips/emips/bus_dma.c:1.4 --- src/sys/arch/emips/emips/bus_dma.c:1.3 Tue Oct 2 23:54:53 2012 +++ src/sys/arch/emips/emips/bus_dma.c Thu Jun 11 08:22:08 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.3 2012/10/02 23:54:53 christos Exp $ */ +/* $NetBSD: bus_dma.c,v 1.4 2015/06/11 08:22:08 matt Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.3 2012/10/02 23:54:53 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.4 2015/06/11 08:22:08 matt Exp $"); #include "opt_cputype.h" @@ -658,10 +658,10 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { - return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, - segs, nsegs, rsegs, flags, - mips_avail_start /*low*/, - mips_avail_end - PAGE_SIZE /*high*/)); + return _bus_dmamem_alloc_range_common(t, size, alignment, boundary, + segs, nsegs, rsegs, flags, + pmap_limits.avail_start /*low*/, + pmap_limits.avail_end - PAGE_SIZE /*high*/); } /* Index: src/sys/arch/emips/emips/machdep.c diff -u src/sys/arch/emips/emips/machdep.c:1.10 src/sys/arch/emips/emips/machdep.c:1.11 --- src/sys/arch/emips/emips/machdep.c:1.10 Mon Mar 24 20:06:31 2014 +++ src/sys/arch/emips/emips/machdep.c Thu Jun 11 08:22:08 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: machdep.c,v 1.10 2014/03/24 20:06:31 christos Exp $ */ +/* $NetBSD: machdep.c,v 1.11 2015/06/11 08:22:08 matt Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -39,7 +39,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.10 2014/03/24 20:06:31 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: machdep.c,v 1.11 2015/06/11 08:22:08 matt Exp $"); #include "opt_ddb.h" @@ -82,8 +82,8 @@ __KERNEL_RCSID(0, "$NetBSD: machdep.c,v #include <ddb/db_extern.h> #endif -extern vaddr_t iospace; -extern vsize_t iospace_size; +vaddr_t iospace = 64 * 1024; /* BUGBUG make it an option? */ +vsize_t iospace_size; #include "ksyms.h" @@ -315,7 +315,8 @@ mach_init(int argc, char *argv[], int co /* * Initialize the virtual memory system. */ - iospace_size = 64*1024; /* BUGBUG make it an option? */ + iospace = pmap_limits.virtual_start; + pmap_limits.virtual_start += iospace_size; pmap_bootstrap(); mips_init_lwp0_uarea(); Index: src/sys/arch/ews4800mips/ews4800mips/bus_dma.c diff -u src/sys/arch/ews4800mips/ews4800mips/bus_dma.c:1.13 src/sys/arch/ews4800mips/ews4800mips/bus_dma.c:1.14 --- src/sys/arch/ews4800mips/ews4800mips/bus_dma.c:1.13 Tue Oct 2 23:54:52 2012 +++ src/sys/arch/ews4800mips/ews4800mips/bus_dma.c Thu Jun 11 08:22:08 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.13 2012/10/02 23:54:52 christos Exp $ */ +/* $NetBSD: bus_dma.c,v 1.14 2015/06/11 08:22:08 matt Exp $ */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.13 2012/10/02 23:54:52 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.14 2015/06/11 08:22:08 matt Exp $"); /* #define BUS_DMA_DEBUG */ #include <sys/param.h> @@ -542,11 +542,11 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { - extern paddr_t mips_avail_start, mips_avail_end; - return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, + return _bus_dmamem_alloc_range_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags, - mips_avail_start /*low*/, mips_avail_end - PAGE_SIZE /*high*/)); + pmap_limits.avail_start /*low*/, + pmap_limits.avail_end - PAGE_SIZE /*high*/); } /* Index: src/sys/arch/hpcmips/hpcmips/bus_dma.c diff -u src/sys/arch/hpcmips/hpcmips/bus_dma.c:1.38 src/sys/arch/hpcmips/hpcmips/bus_dma.c:1.39 --- src/sys/arch/hpcmips/hpcmips/bus_dma.c:1.38 Tue Oct 2 23:54:52 2012 +++ src/sys/arch/hpcmips/hpcmips/bus_dma.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.38 2012/10/02 23:54:52 christos Exp $ */ +/* $NetBSD: bus_dma.c,v 1.39 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.38 2012/10/02 23:54:52 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.39 2015/06/11 08:22:09 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -517,12 +517,10 @@ _hpcmips_bd_mem_alloc(bus_dma_tag_t t, b bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { - psize_t high; - - high = mips_avail_end - PAGE_SIZE; + paddr_t high = pmap_limits.avail_end - PAGE_SIZE; return (_hpcmips_bd_mem_alloc_range(t, size, alignment, boundary, - segs, nsegs, rsegs, flags, mips_avail_start, high)); + segs, nsegs, rsegs, flags, pmap_limits.avail_start, high)); } /* @@ -537,8 +535,8 @@ _hpcmips_bd_mem_alloc_range(bus_dma_tag_ { #ifdef DIAGNOSTIC - high = high<(mips_avail_end - PAGE_SIZE)? high: (mips_avail_end - PAGE_SIZE); - low = low>mips_avail_start? low: mips_avail_start; + high = high<(pmap_limits.avail_end - PAGE_SIZE)? high: (pmap_limits.avail_end - PAGE_SIZE); + low = low>pmap_limits.avail_start? low: pmap_limits.avail_start; #endif return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, Index: src/sys/arch/hpcmips/include/kloader.h diff -u src/sys/arch/hpcmips/include/kloader.h:1.5 src/sys/arch/hpcmips/include/kloader.h:1.6 --- src/sys/arch/hpcmips/include/kloader.h:1.5 Wed Mar 16 13:08:22 2011 +++ src/sys/arch/hpcmips/include/kloader.h Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: kloader.h,v 1.5 2011/03/16 13:08:22 tsutsui Exp $ */ +/* $NetBSD: kloader.h,v 1.6 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 2001, 2002, 2004 The NetBSD Foundation, Inc. @@ -28,8 +28,12 @@ #include <dev/kloader.h> +#include <uvm/uvm_extern.h> + +#include <mips/cpuregs.h> + #define PG_VADDR(pg) MIPS_PHYS_TO_KSEG0(VM_PAGE_TO_PHYS(pg)) /* XXX: kludge: MI kloader.c assumes avail_start and avail_end are common */ -#define avail_start mips_avail_start -#define avail_end mips_avail_end +#define avail_start pmap_limits.avail_start +#define avail_end pmap_limits.avail_end Index: src/sys/arch/hpcmips/vr/vrdcu.c diff -u src/sys/arch/hpcmips/vr/vrdcu.c:1.7 src/sys/arch/hpcmips/vr/vrdcu.c:1.8 --- src/sys/arch/hpcmips/vr/vrdcu.c:1.7 Sat Oct 27 17:17:55 2012 +++ src/sys/arch/hpcmips/vr/vrdcu.c Thu Jun 11 08:22:09 2015 @@ -24,7 +24,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vrdcu.c,v 1.7 2012/10/27 17:17:55 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vrdcu.c,v 1.8 2015/06/11 08:22:09 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -222,12 +222,12 @@ _vrdcu_dmamem_alloc(bus_dma_tag_t t, bus DPRINTFN(1, ("_vrdcu_dmamem_alloc\n")); - high = (mips_avail_end < VRDMAAU_BOUNCE_THRESHOLD ? - mips_avail_end : VRDMAAU_BOUNCE_THRESHOLD) - PAGE_SIZE; + high = (pmap_limits.avail_end < VRDMAAU_BOUNCE_THRESHOLD ? + pmap_limits.avail_end : VRDMAAU_BOUNCE_THRESHOLD) - PAGE_SIZE; alignment = alignment > VRDMAAU_ALIGNMENT ? alignment : VRDMAAU_ALIGNMENT; return _hpcmips_bd_mem_alloc_range(t, size, alignment, boundary, segs, nsegs, rsegs, flags, - mips_avail_start, high); + pmap_limits.avail_start, high); } Index: src/sys/arch/mips/include/bus_dma_defs.h diff -u src/sys/arch/mips/include/bus_dma_defs.h:1.1 src/sys/arch/mips/include/bus_dma_defs.h:1.2 --- src/sys/arch/mips/include/bus_dma_defs.h:1.1 Fri Jul 1 17:28:55 2011 +++ src/sys/arch/mips/include/bus_dma_defs.h Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma_defs.h,v 1.1 2011/07/01 17:28:55 dyoung Exp $ */ +/* $NetBSD: bus_dma_defs.h,v 1.2 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1997, 1998, 2000, 2001 The NetBSD Foundation, Inc. @@ -213,7 +213,7 @@ struct mips_bus_dmamap { }; #ifdef _MIPS_BUS_DMA_PRIVATE -#define _BUS_AVAIL_END mips_avail_end +#define _BUS_AVAIL_END pmap_limits.avail_end /* * Cookie used for bounce buffers. A pointer to one of these it stashed in * the DMA map. Index: src/sys/arch/mips/include/pmap.h diff -u src/sys/arch/mips/include/pmap.h:1.65 src/sys/arch/mips/include/pmap.h:1.66 --- src/sys/arch/mips/include/pmap.h:1.65 Wed Jun 10 22:31:00 2015 +++ src/sys/arch/mips/include/pmap.h Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.h,v 1.65 2015/06/10 22:31:00 matt Exp $ */ +/* $NetBSD: pmap.h,v 1.66 2015/06/11 08:22:09 matt Exp $ */ /* * Copyright (c) 1992, 1993 @@ -105,8 +105,8 @@ * dynamically allocated at boot time. */ -#define mips_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET) -#define mips_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET) +#define pmap_trunc_seg(x) ((vaddr_t)(x) & ~SEGOFSET) +#define pmap_round_seg(x) (((vaddr_t)(x) + SEGOFSET) & ~SEGOFSET) #ifdef _LP64 #define PMAP_SEGTABSIZE NSEGPG @@ -119,10 +119,14 @@ union pt_entry; union segtab { #ifdef _LP64 union segtab *seg_seg[PMAP_SEGTABSIZE]; +#else + union segtab *seg_seg[1]; #endif union pt_entry *seg_tab[PMAP_SEGTABSIZE]; }; +typedef union segtab pmap_segtab_t; + /* * Structure defining an tlb entry data set. */ @@ -149,7 +153,7 @@ void pmap_pte_process(struct pmap *, vad uintptr_t); void pmap_segtab_activate(struct pmap *, struct lwp *); void pmap_segtab_init(struct pmap *); -void pmap_segtab_destroy(struct pmap *); +void pmap_segtab_destroy(struct pmap *, pte_callback_t, uintptr_t); extern kmutex_t pmap_segtab_lock; #endif /* _KERNEL */ @@ -178,11 +182,13 @@ struct pmap { kcpuset_t *pm_onproc; /* pmap is active on ... */ volatile u_int pm_shootdown_pending; #endif - union segtab *pm_segtab; /* pointers to pages of PTEs */ + pmap_segtab_t *pm_segtab; /* pointers to pages of PTEs */ u_int pm_count; /* pmap reference count */ u_int pm_flags; #define PMAP_DEFERRED_ACTIVATE 0x0001 struct pmap_statistics pm_stats; /* pmap statistics */ + vaddr_t pm_minaddr; + vaddr_t pm_maxaddr; struct pmap_asid_info pm_pai[1]; }; @@ -233,15 +239,20 @@ struct pmap_kernel { #endif }; +struct pmap_limits { + paddr_t avail_start; + paddr_t avail_end; + vaddr_t virtual_start; + vaddr_t virtual_end; +}; + extern struct pmap_kernel kernel_pmap_store; extern struct pmap_tlb_info pmap_tlb0_info; +extern struct pmap_limits pmap_limits; #ifdef MULTIPROCESSOR extern struct pmap_tlb_info *pmap_tlbs[MAXCPUS]; extern u_int pmap_ntlbs; #endif -extern paddr_t mips_avail_start; -extern paddr_t mips_avail_end; -extern vaddr_t mips_virtual_end; #define pmap_wired_count(pmap) ((pmap)->pm_stats.wired_count) #define pmap_resident_count(pmap) ((pmap)->pm_stats.resident_count) @@ -287,6 +298,8 @@ void pmap_prefer(vaddr_t, vaddr_t *, vsi #define PMAP_STEAL_MEMORY /* enable pmap_steal_memory() */ #define PMAP_ENABLE_PMAP_KMPAGE /* enable the PMAP_KMPAGE flag */ +bool pmap_md_direct_mapped_vaddr_p(vaddr_t); + /* * Alternate mapping hooks for pool pages. Avoids thrashing the TLB. */ @@ -304,8 +317,10 @@ struct vm_page *mips_pmap_alloc_poolpage #define POOL_VTOPHYS(va) (MIPS_KSEG0_P(va) \ ? MIPS_KSEG0_TO_PHYS(va) \ : MIPS_XKPHYS_TO_PHYS(va)) +#define POOL_PHYSTOV(pa) MIPS_PHYS_TO_XKPHYS_CACHED((paddr_t)(pa)) #else #define POOL_VTOPHYS(va) MIPS_KSEG0_TO_PHYS((vaddr_t)(va)) +#define POOL_PHYSTOV(pa) MIPS_PHYS_TO_KSEG0_TO_PHYS((paddr_t)(pa)) #endif /* Index: src/sys/arch/mips/include/pte.h diff -u src/sys/arch/mips/include/pte.h:1.20 src/sys/arch/mips/include/pte.h:1.21 --- src/sys/arch/mips/include/pte.h:1.20 Sun Feb 20 07:45:47 2011 +++ src/sys/arch/mips/include/pte.h Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pte.h,v 1.20 2011/02/20 07:45:47 matt Exp $ */ +/* $NetBSD: pte.h,v 1.21 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. @@ -268,5 +268,19 @@ mips_paddr_to_tlbpfn(paddr_t pa) extern pt_entry_t *Sysmap; /* kernel pte table */ extern u_int Sysmapsize; /* number of pte's in Sysmap */ + +static inline bool +pte_zero_p(pt_entry_t pte) +{ + return pte.pt_entry == 0; +} + +#define PRIxPTE PRIx32 +static inline uint32_t +pte_value(pt_entry_t pte) +{ + return pte.pt_entry; +} + #endif /* defined(_KERNEL) && !defined(_LOCORE) */ #endif /* __MIPS_PTE_H__ */ Index: src/sys/arch/mips/mips/cpu_subr.c diff -u src/sys/arch/mips/mips/cpu_subr.c:1.24 src/sys/arch/mips/mips/cpu_subr.c:1.25 --- src/sys/arch/mips/mips/cpu_subr.c:1.24 Wed Jun 10 22:31:00 2015 +++ src/sys/arch/mips/mips/cpu_subr.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: cpu_subr.c,v 1.24 2015/06/10 22:31:00 matt Exp $ */ +/* $NetBSD: cpu_subr.c,v 1.25 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 2010 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.24 2015/06/10 22:31:00 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.25 2015/06/11 08:22:09 matt Exp $"); #include "opt_cputype.h" #include "opt_ddb.h" @@ -194,7 +194,7 @@ cpu_info_alloc(struct pmap_tlb_info *ti, * allocate enough VA so we can map pages with the right color * (to avoid cache alias problems). */ - if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { + if (pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map, uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); KASSERT(ci->ci_pmap_dstbase); Index: src/sys/arch/mips/mips/mips_machdep.c diff -u src/sys/arch/mips/mips/mips_machdep.c:1.267 src/sys/arch/mips/mips/mips_machdep.c:1.268 --- src/sys/arch/mips/mips/mips_machdep.c:1.267 Wed Jun 10 05:03:59 2015 +++ src/sys/arch/mips/mips/mips_machdep.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: mips_machdep.c,v 1.267 2015/06/10 05:03:59 matt Exp $ */ +/* $NetBSD: mips_machdep.c,v 1.268 2015/06/11 08:22:09 matt Exp $ */ /* * Copyright 2002 Wasabi Systems, Inc. @@ -111,7 +111,7 @@ */ #include <sys/cdefs.h> /* RCS ID & Copyright macro defns */ -__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.267 2015/06/10 05:03:59 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.268 2015/06/11 08:22:09 matt Exp $"); #define __INTR_PRIVATE #include "opt_cputype.h" @@ -2395,7 +2395,7 @@ mm_md_kernacc(void *ptr, vm_prot_t prot, if (v < MIPS_XKPHYS_START) { return EFAULT; } - if (MIPS_XKPHYS_P(v) && v > MIPS_PHYS_TO_XKPHYS_CACHED(mips_avail_end + + if (MIPS_XKPHYS_P(v) && v > MIPS_PHYS_TO_XKPHYS_CACHED(pmap_limits.avail_end + mips_round_page(MSGBUFSIZE))) { return EFAULT; } @@ -2410,7 +2410,7 @@ mm_md_kernacc(void *ptr, vm_prot_t prot, if (v < MIPS_KSEG0_START) { return EFAULT; } - if (v < MIPS_PHYS_TO_KSEG0(mips_avail_end + + if (v < MIPS_PHYS_TO_KSEG0(pmap_limits.avail_end + mips_round_page(MSGBUFSIZE))) { *handled = true; return 0; Index: src/sys/arch/mips/mips/pmap.c diff -u src/sys/arch/mips/mips/pmap.c:1.215 src/sys/arch/mips/mips/pmap.c:1.216 --- src/sys/arch/mips/mips/pmap.c:1.215 Wed Jun 10 22:31:00 2015 +++ src/sys/arch/mips/mips/pmap.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap.c,v 1.215 2015/06/10 22:31:00 matt Exp $ */ +/* $NetBSD: pmap.c,v 1.216 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.215 2015/06/10 22:31:00 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.216 2015/06/11 08:22:09 matt Exp $"); /* * Manages physical address maps. @@ -267,15 +267,15 @@ struct pmap_kernel kernel_pmap_store = { .kernel_pmap = { .pm_count = 1, .pm_segtab = (void *)(MIPS_KSEG2_START + 0x1eadbeef), + .pm_minaddr = VM_MIN_KERNEL_ADDRESS, + .pm_maxaddr = VM_MAX_KERNEL_ADDRESS, }, }; struct pmap * const kernel_pmap_ptr = &kernel_pmap_store.kernel_pmap; -paddr_t mips_avail_start; /* PA of first available physical page */ -paddr_t mips_avail_end; /* PA of last available physical page */ -vaddr_t mips_virtual_end; /* VA of last avail page (end of kernel AS) */ -vaddr_t iospace; /* VA of start of I/O space, if needed */ -vsize_t iospace_size = 0; /* Size of (initial) range of I/O addresses */ +struct pmap_limits pmap_limits = { /* VA and PA limits */ + .virtual_start = VM_MIN_KERNEL_ADDRESS, +}; pt_entry_t *Sysmap; /* kernel pte table */ unsigned int Sysmapsize; /* number of pte's in Sysmap */ @@ -513,7 +513,7 @@ pmap_bootstrap(void) buf_setvalimit(bufsz); Sysmapsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) + - bufsz + 16 * NCARGS + pager_map_size + iospace_size) / NBPG + + bufsz + 16 * NCARGS + pager_map_size) / NBPG + (maxproc * UPAGES) + nkmempages; #ifdef DEBUG { @@ -551,40 +551,31 @@ pmap_bootstrap(void) * for us. Must do this before uvm_pageboot_alloc() * can be called. */ - mips_avail_start = ptoa(VM_PHYSMEM_PTR(0)->start); - mips_avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end); - mips_virtual_end = VM_MIN_KERNEL_ADDRESS + (vaddr_t)Sysmapsize * NBPG; + pmap_limits.avail_start = ptoa(VM_PHYSMEM_PTR(0)->start); + pmap_limits.avail_end = ptoa(VM_PHYSMEM_PTR(vm_nphysseg - 1)->end); + pmap_limits.virtual_end = pmap_limits.virtual_start + (vaddr_t)Sysmapsize * NBPG; #ifndef _LP64 /* Need space for I/O (not in K1SEG) ? */ - if (mips_virtual_end > VM_MAX_KERNEL_ADDRESS) { - mips_virtual_end = VM_MAX_KERNEL_ADDRESS; + if (pmap_limits.virtual_end > VM_MAX_KERNEL_ADDRESS) { + pmap_limits.virtual_end = VM_MAX_KERNEL_ADDRESS; Sysmapsize = - (VM_MAX_KERNEL_ADDRESS - - (VM_MIN_KERNEL_ADDRESS + iospace_size)) / NBPG; - } - - if (iospace_size) { - iospace = mips_virtual_end - iospace_size; -#ifdef DEBUG - printf("io: %#"PRIxVADDR".%#"PRIxVADDR" %#"PRIxVADDR"\n", - iospace, iospace_size, mips_virtual_end); -#endif + (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / NBPG; } #endif pmap_pvlist_lock_init(); /* * Now actually allocate the kernel PTE array (must be done - * after mips_virtual_end is initialized). + * after pmap_limits.virtual_end is initialized). */ Sysmap = (pt_entry_t *) uvm_pageboot_alloc(sizeof(pt_entry_t) * Sysmapsize); #ifdef PMAP_POOLPAGE_DEBUG - mips_virtual_end -= poolpage.size; - poolpage.base = mips_virtual_end; + pmap_limits.virtual_end -= poolpage.size; + poolpage.base = pmap_limits.virtual_end; poolpage.sysmap = Sysmap + atop(poolpage.size); #endif /* @@ -622,8 +613,8 @@ void pmap_virtual_space(vaddr_t *vstartp, vaddr_t *vendp) { - *vstartp = VM_MIN_KERNEL_ADDRESS; /* kernel is in K0SEG */ - *vendp = trunc_page(mips_virtual_end); /* XXX need pmap_growkernel() */ + *vstartp = trunc_page(pmap_limits.virtual_start); + *vendp = trunc_page(pmap_limits.virtual_end); } /* @@ -763,7 +754,7 @@ pmap_init(void) * allocate enough VA so we can map pages with the right color * (to avoid cache alias problems). */ - if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { + if (pmap_limits.avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) { curcpu()->ci_pmap_dstbase = uvm_km_alloc(kernel_map, uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY); KASSERT(curcpu()->ci_pmap_dstbase); @@ -822,6 +813,11 @@ pmap_create(void) memset(pmap, 0, PMAP_SIZE); pmap->pm_count = 1; + pmap->pm_minaddr = VM_MIN_ADDRESS; + pmap->pm_maxaddr = VM_MAXUSER_ADDRESS; + + pmap_segtab_init(pmap); + #ifdef MULTIPROCESSOR kcpuset_create(&pmap->pm_onproc, true); kcpuset_create(&pmap->pm_active, true); @@ -829,8 +825,6 @@ pmap_create(void) KASSERT(pmap->pm_active != NULL); #endif - pmap_segtab_init(pmap); - return pmap; } @@ -855,7 +849,7 @@ pmap_destroy(pmap_t pmap) PMAP_COUNT(destroy); kpreempt_disable(); pmap_tlb_asid_release_all(pmap); - pmap_segtab_destroy(pmap); + pmap_segtab_destroy(pmap, NULL, 0); #ifdef MULTIPROCESSOR kcpuset_destroy(pmap->pm_onproc); @@ -1003,7 +997,7 @@ pmap_remove(pmap_t pmap, vaddr_t sva, va /* remove entries from kernel pmap */ PMAP_COUNT(remove_kernel_calls); #ifdef PARANOIADIAG - if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end) + if (sva < VM_MIN_KERNEL_ADDRESS || eva >= pmap_limits.virtual_end) panic("pmap_remove: kva not in range"); #endif pt_entry_t *pte = kvtopte(sva); @@ -1204,7 +1198,7 @@ pmap_protect(pmap_t pmap, vaddr_t sva, v * read-only. */ #ifdef PARANOIADIAG - if (sva < VM_MIN_KERNEL_ADDRESS || eva >= mips_virtual_end) + if (sva < VM_MIN_KERNEL_ADDRESS || eva >= pmap_limits.virtual_end) panic("pmap_protect: kva not in range"); #endif pte = kvtopte(sva); @@ -1398,7 +1392,7 @@ pmap_enter(pmap_t pmap, vaddr_t va, padd if (!good_color) PMAP_COUNT(kernel_mappings_bad); #if defined(DEBUG) || defined(DIAGNOSTIC) || defined(PARANOIADIAG) - if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end) + if (va < VM_MIN_KERNEL_ADDRESS || va >= pmap_limits.virtual_end) panic("pmap_enter: kva %#"PRIxVADDR"too big", va); #endif } else { @@ -1778,7 +1772,7 @@ pmap_unwire(pmap_t pmap, vaddr_t va) if (pmap == pmap_kernel()) { /* change entries in kernel pmap */ #ifdef PARANOIADIAG - if (va < VM_MIN_KERNEL_ADDRESS || va >= mips_virtual_end) + if (va < VM_MIN_KERNEL_ADDRESS || va >= pmap_limits.virtual_end) panic("pmap_unwire"); #endif pte = kvtopte(va); @@ -1841,7 +1835,7 @@ pmap_extract(pmap_t pmap, vaddr_t va, pa if (MIPS_KSEG1_P(va)) panic("pmap_extract: kseg1 address %#"PRIxVADDR"", va); #endif - if (va >= mips_virtual_end) + if (va >= pmap_limits.virtual_end) panic("pmap_extract: illegal kernel mapped address %#"PRIxVADDR"", va); pte = kvtopte(va); kpreempt_disable(); @@ -2710,8 +2704,12 @@ mips_pmap_unmap_poolpage(vaddr_t va) return pa; } - - -/******************** page table page management ********************/ - -/* TO BE DONE */ +bool +pmap_md_direct_mapped_vaddr_p(vaddr_t va) +{ +#ifdef _LP64 + if (MIPS_XKPHYS_P(va)) + return true; +#endif + return MIPS_KSEG0_P(va); +} Index: src/sys/arch/mips/mips/pmap_segtab.c diff -u src/sys/arch/mips/mips/pmap_segtab.c:1.8 src/sys/arch/mips/mips/pmap_segtab.c:1.9 --- src/sys/arch/mips/mips/pmap_segtab.c:1.8 Sun May 11 07:53:28 2014 +++ src/sys/arch/mips/mips/pmap_segtab.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: pmap_segtab.c,v 1.8 2014/05/11 07:53:28 skrll Exp $ */ +/* $NetBSD: pmap_segtab.c,v 1.9 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc. @@ -67,7 +67,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.8 2014/05/11 07:53:28 skrll Exp $"); +__KERNEL_RCSID(0, "$NetBSD: pmap_segtab.c,v 1.9 2015/06/11 08:22:09 matt Exp $"); /* * Manages physical address maps. @@ -95,25 +95,8 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. * and to when physical maps must be made correct. */ -/* XXX simonb 2002/02/26 - * - * MIPS3_PLUS is used to conditionally compile the r4k MMU support. - * This is bogus - for example, some IDT MIPS-II CPUs have r4k style - * MMUs (and 32-bit ones at that). - * - * On the other hand, it's not likely that we'll ever support the R6000 - * (is it?), so maybe that can be an "if MIPS2 or greater" check. - * - * Also along these lines are using totally separate functions for - * r3k-style and r4k-style MMUs and removing all the MIPS_HAS_R4K_MMU - * checks in the current functions. - * - * These warnings probably applies to other files under sys/arch/mips. - */ +#define __PMAP_PRIVATE -#include "opt_sysv.h" -#include "opt_cputype.h" -#include "opt_mips_cache.h" #include "opt_multiprocessor.h" #include <sys/param.h> @@ -124,15 +107,12 @@ __KERNEL_RCSID(0, "$NetBSD: pmap_segtab. #include <uvm/uvm.h> -#include <mips/cache.h> -#include <mips/cpuregs.h> -#include <mips/locore.h> #include <mips/pte.h> -CTASSERT(NBPG >= sizeof(union segtab)); +CTASSERT(NBPG >= sizeof(pmap_segtab_t)); struct pmap_segtab_info { - union segtab *free_segtab; /* free list kept locally */ + pmap_segtab_t *free_segtab; /* free list kept locally */ #ifdef DEBUG uint32_t nget_segtab; uint32_t nput_segtab; @@ -141,29 +121,44 @@ struct pmap_segtab_info { #else #define SEGTAB_ADD(n, v) ((void) 0) #endif -} pmap_segtab_info; +#ifdef PMAP_PTP_CACHE + struct pgflist ptp_pgflist; /* Keep a list of idle page tables. */ +#endif +} pmap_segtab_info = { +#ifdef PMAP_PTP_CACHE + .ptp_pgflist = LIST_HEAD_INITIALIZER(pmap_segtab_info.ptp_pgflist), +#endif +}; kmutex_t pmap_segtab_lock __cacheline_aligned; static inline struct vm_page * pmap_pte_pagealloc(void) { - return mips_pmap_alloc_poolpage(UVM_PGA_ZERO|UVM_PGA_USERESERVE); + struct vm_page *pg; + + pg = PMAP_ALLOC_POOLPAGE(UVM_PGA_ZERO|UVM_PGA_USERESERVE); + if (pg) { +#ifdef UVM_PAGE_TRKOWN + pg->owner_tag = NULL; +#endif + UVM_PAGE_OWN(pg, "pmap-ptp"); + } + + return pg; } -static inline pt_entry_t * +static inline pt_entry_t * pmap_segmap(struct pmap *pmap, vaddr_t va) { - union segtab *stp = pmap->pm_segtab; - KASSERT(!MIPS_KSEG0_P(va)); - KASSERT(!MIPS_KSEG1_P(va)); + pmap_segtab_t *stp = pmap->pm_segtab; + KASSERT(pmap != pmap_kernel() || !pmap_md_direct_mapped_vaddr_p(va)); #ifdef _LP64 - KASSERT(!MIPS_XKPHYS_P(va)); stp = stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]; if (stp == NULL) return NULL; #endif - + return stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; } @@ -178,56 +173,74 @@ pmap_pte_lookup(pmap_t pmap, vaddr_t va) } static void -pmap_segtab_free(union segtab *stp) +pmap_segtab_free(pmap_segtab_t *stp) { /* * Insert the the segtab into the segtab freelist. */ mutex_spin_enter(&pmap_segtab_lock); - stp->seg_tab[0] = (void *) pmap_segtab_info.free_segtab; + stp->seg_seg[0] = pmap_segtab_info.free_segtab; pmap_segtab_info.free_segtab = stp; SEGTAB_ADD(nput, 1); mutex_spin_exit(&pmap_segtab_lock); } static void -pmap_segtab_release(union segtab *stp, u_int level) +pmap_segtab_release(pmap_t pmap, pmap_segtab_t **stp_p, bool free_stp, + pte_callback_t callback, uintptr_t flags, + vaddr_t va, vsize_t vinc) { + pmap_segtab_t *stp = *stp_p; - for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) { - paddr_t pa; + for (size_t i = va / vinc; i < PMAP_SEGTABSIZE; i++, va += vinc) { #ifdef _LP64 - if (level > 0) { + if (vinc > NBSEG) { if (stp->seg_seg[i] != NULL) { - pmap_segtab_release(stp->seg_seg[i], level - 1); - stp->seg_seg[i] = NULL; + pmap_segtab_release(pmap, &stp->seg_seg[i], + true, callback, flags, va, vinc / NSEGPG); + KASSERT(stp->seg_seg[i] == NULL); } continue; } #endif + KASSERT(vinc == NBSEG); /* get pointer to segment map */ pt_entry_t *pte = stp->seg_tab[i]; if (pte == NULL) continue; -#ifdef PARANOIADIAG + + /* + * If our caller want a callback, do so. + */ + if (callback != NULL) { + (*callback)(pmap, va, va + vinc, pte, flags); + } +#ifdef DEBUG for (size_t j = 0; j < NPTEPG; j++) { - if ((pte + j)->pt_entry) - panic("pmap_destroy: segmap not empty"); + if (!pte_zero_p(pte[j])) + panic("%s: pte entry %p not 0 (%#"PRIxPTE")", + __func__, &pte[j], pte_value(pte[j])); } #endif - - /* No need to flush page here as unmap poolpage does it */ -#ifdef _LP64 - KASSERT(MIPS_XKPHYS_P(pte)); + // PMAP_UNMAP_POOLPAGE should handle any VCA issues itself + paddr_t pa = PMAP_UNMAP_POOLPAGE((vaddr_t)pte); + struct vm_page *pg = PHYS_TO_VM_PAGE(pa); +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, pg, listq.list); + mutex_spin_exit(&pmap_segtab_lock); +#else + uvm_pagefree(pg); #endif - pa = mips_pmap_unmap_poolpage((vaddr_t)pte); - uvm_pagefree(PHYS_TO_VM_PAGE(pa)); stp->seg_tab[i] = NULL; } - pmap_segtab_free(stp); + if (free_stp) { + pmap_segtab_free(stp); + *stp_p = NULL; + } } /* @@ -242,17 +255,16 @@ pmap_segtab_release(union segtab *stp, u * the map will be used in software only, and * is bounded by that size. */ -static union segtab * +static pmap_segtab_t * pmap_segtab_alloc(void) { - union segtab *stp; + pmap_segtab_t *stp; again: mutex_spin_enter(&pmap_segtab_lock); if (__predict_true((stp = pmap_segtab_info.free_segtab) != NULL)) { - pmap_segtab_info.free_segtab = - (union segtab *)stp->seg_tab[0]; - stp->seg_tab[0] = NULL; + pmap_segtab_info.free_segtab = stp->seg_seg[0]; + stp->seg_seg[0] = NULL; SEGTAB_ADD(nget, 1); } mutex_spin_exit(&pmap_segtab_lock); @@ -270,30 +282,27 @@ pmap_segtab_alloc(void) SEGTAB_ADD(npage, 1); const paddr_t stp_pa = VM_PAGE_TO_PHYS(stp_pg); -#ifdef _LP64 - KASSERT(mips_options.mips3_xkphys_cached); -#endif - stp = (union segtab *)mips_pmap_map_poolpage(stp_pa); + stp = (pmap_segtab_t *)PMAP_MAP_POOLPAGE(stp_pa); const size_t n = NBPG / sizeof(*stp); if (n > 1) { /* * link all the segtabs in this page together */ for (size_t i = 1; i < n - 1; i++) { - stp[i].seg_tab[0] = (void *)&stp[i+1]; + stp[i].seg_seg[0] = &stp[i+1]; } /* * Now link the new segtabs into the free segtab list. */ mutex_spin_enter(&pmap_segtab_lock); - stp[n-1].seg_tab[0] = (void *)pmap_segtab_info.free_segtab; + stp[n-1].seg_seg[0] = pmap_segtab_info.free_segtab; pmap_segtab_info.free_segtab = stp + 1; SEGTAB_ADD(nput, n - 1); mutex_spin_exit(&pmap_segtab_lock); } } -#ifdef PARANOIADIAG +#ifdef DEBUG for (size_t i = 0; i < PMAP_SEGTABSIZE; i++) { if (stp->seg_tab[i] != 0) panic("%s: pm_segtab.seg_tab[%zu] != 0", __func__, i); @@ -318,18 +327,18 @@ pmap_segtab_init(pmap_t pmap) * no valid mappings. */ void -pmap_segtab_destroy(pmap_t pmap) +pmap_segtab_destroy(pmap_t pmap, pte_callback_t func, uintptr_t flags) { - union segtab *stp = pmap->pm_segtab; - - if (stp == NULL) + if (pmap->pm_segtab == NULL) return; #ifdef _LP64 - pmap_segtab_release(stp, 1); + const vsize_t vinc = NBXSEG; #else - pmap_segtab_release(stp, 0); + const vsize_t vinc = NBSEG; #endif + pmap_segtab_release(pmap, &pmap->pm_segtab, + func == NULL, func, flags, pmap->pm_minaddr, vinc); } /* @@ -368,9 +377,8 @@ pmap_pte_process(pmap_t pmap, vaddr_t sv __func__, pmap, sva, eva, callback, flags); #endif while (sva < eva) { - vaddr_t lastseg_va = mips_trunc_seg(sva) + NBSEG; - KASSERT(lastseg_va != 0); - if (lastseg_va > eva) + vaddr_t lastseg_va = pmap_trunc_seg(sva) + NBSEG; + if (lastseg_va == 0 || lastseg_va > eva) lastseg_va = eva; /* @@ -399,18 +407,18 @@ pmap_pte_process(pmap_t pmap, vaddr_t sv pt_entry_t * pmap_pte_reserve(pmap_t pmap, vaddr_t va, int flags) { - union segtab *stp = pmap->pm_segtab; + pmap_segtab_t *stp = pmap->pm_segtab; pt_entry_t *pte; pte = pmap_pte_lookup(pmap, va); if (__predict_false(pte == NULL)) { #ifdef _LP64 - union segtab ** const stp_p = + pmap_segtab_t ** const stp_p = &stp->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]; if (__predict_false((stp = *stp_p) == NULL)) { - union segtab *nstp = pmap_segtab_alloc(); + pmap_segtab_t *nstp = pmap_segtab_alloc(); #ifdef MULTIPROCESSOR - union segtab *ostp = atomic_cas_ptr(stp_p, NULL, nstp); + pmap_segtab_t *ostp = atomic_cas_ptr(stp_p, NULL, nstp); if (__predict_false(ostp != NULL)) { pmap_segtab_free(nstp); nstp = ostp; @@ -422,7 +430,17 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va } KASSERT(stp == pmap->pm_segtab->seg_seg[(va >> XSEGSHIFT) & (NSEGPG - 1)]); #endif /* _LP64 */ - struct vm_page * const pg = pmap_pte_pagealloc(); + struct vm_page *pg = NULL; +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + if ((pg = LIST_FIRST(&pmap_segtab_info.ptp_pgflist)) != NULL) { + LIST_REMOVE(pg, listq.list); + KASSERT(LIST_FIRST(&pmap_segtab_info.ptp_pgflist) != pg); + } + mutex_spin_exit(&pmap_segtab_lock); +#endif + if (pg == NULL) + pg = pmap_pte_pagealloc(); if (pg == NULL) { if (flags & PMAP_CANFAIL) return NULL; @@ -431,10 +449,7 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va } const paddr_t pa = VM_PAGE_TO_PHYS(pg); -#ifdef _LP64 - KASSERT(mips_options.mips3_xkphys_cached); -#endif - pte = (pt_entry_t *)mips_pmap_map_poolpage(pa); + pte = (pt_entry_t *)PMAP_MAP_POOLPAGE(pa); pt_entry_t ** const pte_p = &stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]; #ifdef MULTIPROCESSOR @@ -444,8 +459,15 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va * free the page we just allocated. */ if (__predict_false(opte != NULL)) { - mips_pmap_unmap_poolpage(pa); +#ifdef PMAP_PTP_CACHE + mutex_spin_enter(&pmap_segtab_lock); + LIST_INSERT_HEAD(&pmap_segtab_info.ptp_pgflist, + pg, listq.list); + mutex_spin_exit(&pmap_segtab_lock); +#else + PMAP_UNMAP_POOLPAGE((vaddr_t)pte); uvm_pagefree(pg); +#endif pte = opte; } #else @@ -453,10 +475,11 @@ pmap_pte_reserve(pmap_t pmap, vaddr_t va #endif KASSERT(pte == stp->seg_tab[(va >> SEGSHIFT) & (PMAP_SEGTABSIZE - 1)]); -#ifdef PARANOIADIAG +#ifdef DEBUG for (size_t i = 0; i < NPTEPG; i++) { - if ((pte+i)->pt_entry) - panic("pmap_enter: new segmap not empty"); + if (!pte_zero_p(pte[i])) + panic("%s: new segmap %p not empty @ %zu", + __func__, pte, i); } #endif pte += (va >> PGSHIFT) & (NPTEPG - 1); Index: src/sys/arch/mips/mips/vm_machdep.c diff -u src/sys/arch/mips/mips/vm_machdep.c:1.144 src/sys/arch/mips/mips/vm_machdep.c:1.145 --- src/sys/arch/mips/mips/vm_machdep.c:1.144 Sat Jun 6 21:46:17 2015 +++ src/sys/arch/mips/mips/vm_machdep.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: vm_machdep.c,v 1.144 2015/06/06 21:46:17 matt Exp $ */ +/* $NetBSD: vm_machdep.c,v 1.145 2015/06/11 08:22:09 matt Exp $ */ /* * Copyright (c) 1988 University of Utah. @@ -39,7 +39,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.144 2015/06/06 21:46:17 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vm_machdep.c,v 1.145 2015/06/11 08:22:09 matt Exp $"); #include "opt_ddb.h" #include "opt_coredump.h" @@ -165,7 +165,7 @@ cpu_uarea_alloc(bool system) { struct pglist pglist; #ifdef _LP64 - const paddr_t high = mips_avail_end; + const paddr_t high = pmap_limits.avail_end; #else const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START; /* @@ -173,7 +173,7 @@ cpu_uarea_alloc(bool system) * system lwp and we have memory that can't be mapped via KSEG0. * If */ - if (!system && high > mips_avail_end) + if (!system && high > pmap_limits.avail_end) return NULL; #endif int error; @@ -182,7 +182,7 @@ cpu_uarea_alloc(bool system) * Allocate a new physically contiguous uarea which can be * direct-mapped. */ - error = uvm_pglistalloc(USPACE, mips_avail_start, high, + error = uvm_pglistalloc(USPACE, pmap_limits.avail_start, high, USPACE_ALIGN, 0, &pglist, 1, 1); if (error) { #ifdef _LP64 @@ -198,12 +198,12 @@ cpu_uarea_alloc(bool system) const struct vm_page * const pg = TAILQ_FIRST(&pglist); KASSERT(pg != NULL); const paddr_t pa = VM_PAGE_TO_PHYS(pg); - KASSERTMSG(pa >= mips_avail_start, - "pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")", - pa, mips_avail_start); - KASSERTMSG(pa < mips_avail_end, - "pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")", - pa, mips_avail_end); + KASSERTMSG(pa >= pmap_limits.avail_start, + "pa (%#"PRIxPADDR") < pmap_limits.avail_start (%#"PRIxPADDR")", + pa, pmap_limits.avail_start); + KASSERTMSG(pa < pmap_limits.avail_end, + "pa (%#"PRIxPADDR") >= pmap_limits.avail_end (%#"PRIxPADDR")", + pa, pmap_limits.avail_end); /* * we need to return a direct-mapped VA for the pa. Index: src/sys/arch/mipsco/mipsco/bus_dma.c diff -u src/sys/arch/mipsco/mipsco/bus_dma.c:1.28 src/sys/arch/mipsco/mipsco/bus_dma.c:1.29 --- src/sys/arch/mipsco/mipsco/bus_dma.c:1.28 Tue Oct 2 23:54:53 2012 +++ src/sys/arch/mipsco/mipsco/bus_dma.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.28 2012/10/02 23:54:53 christos Exp $ */ +/* $NetBSD: bus_dma.c,v 1.29 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.28 2012/10/02 23:54:53 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.29 2015/06/11 08:22:09 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -490,8 +490,9 @@ int _bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags) { - return (_bus_dmamem_alloc_range(t, size, alignment, boundary, - segs, nsegs, rsegs, flags, mips_avail_start, trunc_page(mips_avail_end))); + return _bus_dmamem_alloc_range(t, size, alignment, boundary, + segs, nsegs, rsegs, flags, pmap_limits.avail_start, + trunc_page(pmap_limits.avail_end)); } /* Index: src/sys/arch/newsmips/newsmips/bus.c diff -u src/sys/arch/newsmips/newsmips/bus.c:1.32 src/sys/arch/newsmips/newsmips/bus.c:1.33 --- src/sys/arch/newsmips/newsmips/bus.c:1.32 Tue Oct 2 23:54:53 2012 +++ src/sys/arch/newsmips/newsmips/bus.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus.c,v 1.32 2012/10/02 23:54:53 christos Exp $ */ +/* $NetBSD: bus.c,v 1.33 2015/06/11 08:22:09 matt Exp $ */ /* * Copyright (c) 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.32 2012/10/02 23:54:53 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus.c,v 1.33 2015/06/11 08:22:09 matt Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -705,7 +705,8 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s { return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags, - mips_avail_start /*low*/, mips_avail_end - PAGE_SIZE /*high*/)); + pmap_limits.avail_start /*low*/, + pmap_limits.avail_end - PAGE_SIZE /*high*/)); } /* Index: src/sys/arch/pmax/pmax/bus_dma.c diff -u src/sys/arch/pmax/pmax/bus_dma.c:1.57 src/sys/arch/pmax/pmax/bus_dma.c:1.58 --- src/sys/arch/pmax/pmax/bus_dma.c:1.57 Tue Oct 2 23:54:54 2012 +++ src/sys/arch/pmax/pmax/bus_dma.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: bus_dma.c,v 1.57 2012/10/02 23:54:54 christos Exp $ */ +/* $NetBSD: bus_dma.c,v 1.58 2015/06/11 08:22:09 matt Exp $ */ /*- * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc. @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.57 2012/10/02 23:54:54 christos Exp $"); +__KERNEL_RCSID(0, "$NetBSD: bus_dma.c,v 1.58 2015/06/11 08:22:09 matt Exp $"); #include "opt_cputype.h" @@ -660,7 +660,8 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s { return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary, segs, nsegs, rsegs, flags, - mips_avail_start /*low*/, mips_avail_end - PAGE_SIZE /*high*/)); + pmap_limits.avail_start /*low*/, + pmap_limits.avail_end - PAGE_SIZE /*high*/)); } /* Index: src/sys/arch/sgimips/ioc/if_le_oioc.c diff -u src/sys/arch/sgimips/ioc/if_le_oioc.c:1.4 src/sys/arch/sgimips/ioc/if_le_oioc.c:1.5 --- src/sys/arch/sgimips/ioc/if_le_oioc.c:1.4 Sun Feb 20 07:59:50 2011 +++ src/sys/arch/sgimips/ioc/if_le_oioc.c Thu Jun 11 08:22:09 2015 @@ -1,4 +1,4 @@ -/* $NetBSD: if_le_oioc.c,v 1.4 2011/02/20 07:59:50 matt Exp $ */ +/* $NetBSD: if_le_oioc.c,v 1.5 2015/06/11 08:22:09 matt Exp $ */ /* * Copyright (c) 2009 Stephen M. Rumble @@ -25,7 +25,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: if_le_oioc.c,v 1.4 2011/02/20 07:59:50 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: if_le_oioc.c,v 1.5 2015/06/11 08:22:09 matt Exp $"); #include "opt_inet.h" @@ -188,7 +188,8 @@ le_attach(device_t parent, device_t self /* Allocate a contiguous chunk of physical memory for the le buffer. */ error = uvm_pglistalloc(OIOC_LANCE_NPAGES * PAGE_SIZE, - mips_avail_start, mips_avail_end, PAGE_SIZE, 0, &mlist, 1, 0); + pmap_limits.avail_start, pmap_limits.avail_end, PAGE_SIZE, 0, + &mlist, 1, 0); if (error) { aprint_error(": failed to allocate ioc<->lance buffer space, " "error = %d\n", error);