Module Name: src Committed By: para Date: Wed Feb 1 23:43:49 UTC 2012
Modified Files: src/sys/kern: subr_vmem.c vfs_bio.c src/sys/uvm: uvm_glue.c uvm_km.c uvm_pdaemon.c Log Message: allocate uareas and buffers from kernel_map again add code to drain pools if kmem_arena runs out of space To generate a diff of this commit: cvs rdiff -u -r1.70 -r1.71 src/sys/kern/subr_vmem.c cvs rdiff -u -r1.235 -r1.236 src/sys/kern/vfs_bio.c cvs rdiff -u -r1.153 -r1.154 src/sys/uvm/uvm_glue.c cvs rdiff -u -r1.115 -r1.116 src/sys/uvm/uvm_km.c cvs rdiff -u -r1.104 -r1.105 src/sys/uvm/uvm_pdaemon.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/kern/subr_vmem.c diff -u src/sys/kern/subr_vmem.c:1.70 src/sys/kern/subr_vmem.c:1.71 --- src/sys/kern/subr_vmem.c:1.70 Mon Jan 30 17:35:18 2012 +++ src/sys/kern/subr_vmem.c Wed Feb 1 23:43:49 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: subr_vmem.c,v 1.70 2012/01/30 17:35:18 para Exp $ */ +/* $NetBSD: subr_vmem.c,v 1.71 2012/02/01 23:43:49 para Exp $ */ /*- * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, @@ -34,7 +34,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.70 2012/01/30 17:35:18 para Exp $"); +__KERNEL_RCSID(0, "$NetBSD: subr_vmem.c,v 1.71 2012/02/01 23:43:49 para Exp $"); #if defined(_KERNEL) #include "opt_ddb.h" @@ -1245,6 +1245,11 @@ retry: /* XXX */ if ((flags & VM_SLEEP) != 0) { +#if defined(_KERNEL) && !defined(_RUMPKERNEL) + mutex_spin_enter(&uvm_fpageqlock); + uvm_kick_pdaemon(); + mutex_spin_exit(&uvm_fpageqlock); +#endif VMEM_LOCK(vm); VMEM_CONDVAR_WAIT(vm); VMEM_UNLOCK(vm); Index: src/sys/kern/vfs_bio.c diff -u src/sys/kern/vfs_bio.c:1.235 src/sys/kern/vfs_bio.c:1.236 --- src/sys/kern/vfs_bio.c:1.235 Sat Jan 28 00:00:06 2012 +++ src/sys/kern/vfs_bio.c Wed Feb 1 23:43:49 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $ */ +/* $NetBSD: vfs_bio.c,v 1.236 2012/02/01 23:43:49 para Exp $ */ /*- * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc. @@ -123,7 +123,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.235 2012/01/28 00:00:06 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.236 2012/02/01 23:43:49 para Exp $"); #include "opt_bufcache.h" @@ -231,21 +231,18 @@ static struct vm_map *buf_map; static void * bufpool_page_alloc(struct pool *pp, int flags) { - const vm_flag_t vflags = (flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP; - vmem_addr_t va; - int ret; - ret = uvm_km_kmem_alloc(kmem_va_arena, MAXBSIZE, - vflags | VM_INSTANTFIT, &va); - - return ret ? NULL : (void *)va; + return (void *)uvm_km_alloc(buf_map, + MAXBSIZE, MAXBSIZE, + ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK) + | UVM_KMF_WIRED); } static void bufpool_page_free(struct pool *pp, void *v) { - uvm_km_kmem_free(kmem_va_arena, (vaddr_t)v, MAXBSIZE); + uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); } static struct pool_allocator bufmempool_allocator = { @@ -491,7 +488,7 @@ bufinit(void) pa = (size <= PAGE_SIZE && use_std) ? &pool_allocator_nointr : &bufmempool_allocator; - pool_init(pp, size, 0, 0, PR_NOALIGN, name, pa, IPL_NONE); + pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE); pool_setlowat(pp, 1); pool_sethiwat(pp, 1); } Index: src/sys/uvm/uvm_glue.c diff -u src/sys/uvm/uvm_glue.c:1.153 src/sys/uvm/uvm_glue.c:1.154 --- src/sys/uvm/uvm_glue.c:1.153 Fri Jan 27 19:48:41 2012 +++ src/sys/uvm/uvm_glue.c Wed Feb 1 23:43:49 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $ */ +/* $NetBSD: uvm_glue.c,v 1.154 2012/02/01 23:43:49 para Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -62,7 +62,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.153 2012/01/27 19:48:41 para Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.154 2012/02/01 23:43:49 para Exp $"); #include "opt_kgdb.h" #include "opt_kstack.h" @@ -240,15 +240,26 @@ static pool_cache_t uvm_uarea_system_cac static void * uarea_poolpage_alloc(struct pool *pp, int flags) { - if (USPACE_ALIGN == 0) { - int rc; - vmem_addr_t va; - - rc = uvm_km_kmem_alloc(kmem_va_arena, USPACE, - ((flags & PR_WAITOK) ? VM_SLEEP: VM_NOSLEEP) | - VM_INSTANTFIT, &va); - return (rc != 0) ? NULL : (void *)va; +#if defined(PMAP_MAP_POOLPAGE) + if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) { + struct vm_page *pg; + vaddr_t va; + +#if defined(PMAP_ALLOC_POOLPAGE) + pg = PMAP_ALLOC_POOLPAGE( + ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0)); +#else + pg = uvm_pagealloc(NULL, 0, NULL, + ((flags & PR_WAITOK) == 0 ? UVM_KMF_NOWAIT : 0)); +#endif + if (pg == NULL) + return NULL; + va = PMAP_MAP_POOLPAGE(VM_PAGE_TO_PHYS(pg)); + if (va == 0) + uvm_pagefree(pg); + return (void *)va; } +#endif #if defined(__HAVE_CPU_UAREA_ROUTINES) void *va = cpu_uarea_alloc(false); if (va) @@ -263,10 +274,16 @@ uarea_poolpage_alloc(struct pool *pp, in static void uarea_poolpage_free(struct pool *pp, void *addr) { - if (USPACE_ALIGN == 0) { - uvm_km_kmem_free(kmem_va_arena, (vmem_addr_t)addr, USPACE); +#if defined(PMAP_MAP_POOLPAGE) + if (USPACE == PAGE_SIZE && USPACE_ALIGN == 0) { + paddr_t pa; + + pa = PMAP_UNMAP_POOLPAGE((vaddr_t) addr); + KASSERT(pa != 0); + uvm_pagefree(PHYS_TO_VM_PAGE(pa)); return; } +#endif #if defined(__HAVE_CPU_UAREA_ROUTINES) if (cpu_uarea_free(addr)) return; Index: src/sys/uvm/uvm_km.c diff -u src/sys/uvm/uvm_km.c:1.115 src/sys/uvm/uvm_km.c:1.116 --- src/sys/uvm/uvm_km.c:1.115 Wed Feb 1 02:22:27 2012 +++ src/sys/uvm/uvm_km.c Wed Feb 1 23:43:49 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_km.c,v 1.115 2012/02/01 02:22:27 matt Exp $ */ +/* $NetBSD: uvm_km.c,v 1.116 2012/02/01 23:43:49 para Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -120,7 +120,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.115 2012/02/01 02:22:27 matt Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.116 2012/02/01 23:43:49 para Exp $"); #include "opt_uvmhist.h" @@ -167,8 +167,8 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t { vaddr_t base = VM_MIN_KERNEL_ADDRESS; - kmemsize = MIN(((((vsize_t)(end - start)) / 3) * 2), - ((((vsize_t)uvmexp.npages) * PAGE_SIZE))); + kmemsize = MIN((((vsize_t)(end - start)) / 3), + ((((vsize_t)uvmexp.npages) * PAGE_SIZE) / 2)); kmemsize = round_page(kmemsize); /* Index: src/sys/uvm/uvm_pdaemon.c diff -u src/sys/uvm/uvm_pdaemon.c:1.104 src/sys/uvm/uvm_pdaemon.c:1.105 --- src/sys/uvm/uvm_pdaemon.c:1.104 Fri Jan 27 19:48:42 2012 +++ src/sys/uvm/uvm_pdaemon.c Wed Feb 1 23:43:49 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_pdaemon.c,v 1.104 2012/01/27 19:48:42 para Exp $ */ +/* $NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.104 2012/01/27 19:48:42 para Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.105 2012/02/01 23:43:49 para Exp $"); #include "opt_uvmhist.h" #include "opt_readahead.h" @@ -173,7 +173,8 @@ uvm_kick_pdaemon(void) if (uvmexp.free + uvmexp.paging < uvmexp.freemin || (uvmexp.free + uvmexp.paging < uvmexp.freetarg && - uvmpdpol_needsscan_p())) { + uvmpdpol_needsscan_p()) || + uvm_km_va_starved_p()) { wakeup(&uvm.pagedaemon); } } @@ -248,10 +249,13 @@ uvm_pageout(void *arg) */ for (;;) { - bool needsscan, needsfree; + bool needsscan, needsfree, kmem_va_starved; + + kmem_va_starved = uvm_km_va_starved_p(); mutex_spin_enter(&uvm_fpageqlock); - if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) { + if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) && + !kmem_va_starved) { UVMHIST_LOG(pdhist," <<SLEEPING>>",0,0,0,0); UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon, &uvm_fpageqlock, false, "pgdaemon", 0); @@ -320,7 +324,7 @@ uvm_pageout(void *arg) * if we don't need free memory, we're done. */ - if (!needsfree && !uvm_km_va_starved_p()) + if (!needsfree && !kmem_va_starved) continue; /*