Module Name: src Committed By: rmind Date: Sun Feb 19 00:05:57 UTC 2012
Modified Files: src/sys/uvm: uvm_fault.c uvm_fault_i.h uvm_km.c uvm_loan.c uvm_map.c uvm_map.h Log Message: Remove VM_MAP_INTRSAFE and related code. Not used since the "kmem changes". To generate a diff of this commit: cvs rdiff -u -r1.193 -r1.194 src/sys/uvm/uvm_fault.c cvs rdiff -u -r1.27 -r1.28 src/sys/uvm/uvm_fault_i.h cvs rdiff -u -r1.120 -r1.121 src/sys/uvm/uvm_km.c cvs rdiff -u -r1.81 -r1.82 src/sys/uvm/uvm_loan.c cvs rdiff -u -r1.313 -r1.314 src/sys/uvm/uvm_map.c cvs rdiff -u -r1.70 -r1.71 src/sys/uvm/uvm_map.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/uvm/uvm_fault.c diff -u src/sys/uvm/uvm_fault.c:1.193 src/sys/uvm/uvm_fault.c:1.194 --- src/sys/uvm/uvm_fault.c:1.193 Thu Feb 2 19:43:08 2012 +++ src/sys/uvm/uvm_fault.c Sun Feb 19 00:05:55 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_fault.c,v 1.193 2012/02/02 19:43:08 tls Exp $ */ +/* $NetBSD: uvm_fault.c,v 1.194 2012/02/19 00:05:55 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -32,7 +32,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.193 2012/02/02 19:43:08 tls Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.194 2012/02/19 00:05:55 rmind Exp $"); #include "opt_uvmhist.h" @@ -2400,8 +2400,6 @@ uvm_fault_unwire_locked(struct vm_map *m paddr_t pa; struct vm_page *pg; - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); - /* * we assume that the area we are unwiring has actually been wired * in the first place. this means that we should be able to extract Index: src/sys/uvm/uvm_fault_i.h diff -u src/sys/uvm/uvm_fault_i.h:1.27 src/sys/uvm/uvm_fault_i.h:1.28 --- src/sys/uvm/uvm_fault_i.h:1.27 Sun Jun 12 03:36:03 2011 +++ src/sys/uvm/uvm_fault_i.h Sun Feb 19 00:05:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_fault_i.h,v 1.27 2011/06/12 03:36:03 rmind Exp $ */ +/* $NetBSD: uvm_fault_i.h,v 1.28 2012/02/19 00:05:56 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -107,16 +107,7 @@ uvmfault_lookup(struct uvm_faultinfo *uf * only be two levels so we won't loop very long. */ - /*CONSTCOND*/ - while (1) { - /* - * Make sure this is not an "interrupt safe" map. - * Such maps are never supposed to be involved in - * a fault. - */ - if (ufi->map->flags & VM_MAP_INTRSAFE) - return (false); - + for (;;) { /* * lock map */ Index: src/sys/uvm/uvm_km.c diff -u src/sys/uvm/uvm_km.c:1.120 src/sys/uvm/uvm_km.c:1.121 --- src/sys/uvm/uvm_km.c:1.120 Fri Feb 10 17:35:47 2012 +++ src/sys/uvm/uvm_km.c Sun Feb 19 00:05:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $ */ +/* $NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -120,7 +120,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.120 2012/02/10 17:35:47 para Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.121 2012/02/19 00:05:56 rmind Exp $"); #include "opt_uvmhist.h" @@ -499,15 +499,13 @@ uvm_km_check_empty(struct vm_map *map, v panic("uvm_km_check_empty: va %p has pa 0x%llx", (void *)va, (long long)pa); } - if ((map->flags & VM_MAP_INTRSAFE) == 0) { - mutex_enter(uvm_kernel_object->vmobjlock); - pg = uvm_pagelookup(uvm_kernel_object, - va - vm_map_min(kernel_map)); - mutex_exit(uvm_kernel_object->vmobjlock); - if (pg) { - panic("uvm_km_check_empty: " - "has page hashed at %p", (const void *)va); - } + mutex_enter(uvm_kernel_object->vmobjlock); + pg = uvm_pagelookup(uvm_kernel_object, + va - vm_map_min(kernel_map)); + mutex_exit(uvm_kernel_object->vmobjlock); + if (pg) { + panic("uvm_km_check_empty: " + "has page hashed at %p", (const void *)va); } } } Index: src/sys/uvm/uvm_loan.c diff -u src/sys/uvm/uvm_loan.c:1.81 src/sys/uvm/uvm_loan.c:1.82 --- src/sys/uvm/uvm_loan.c:1.81 Sat Aug 6 17:25:03 2011 +++ src/sys/uvm/uvm_loan.c Sun Feb 19 00:05:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_loan.c,v 1.81 2011/08/06 17:25:03 rmind Exp $ */ +/* $NetBSD: uvm_loan.c,v 1.82 2012/02/19 00:05:56 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -32,7 +32,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.81 2011/08/06 17:25:03 rmind Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_loan.c,v 1.82 2012/02/19 00:05:56 rmind Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -245,7 +245,6 @@ uvm_loan(struct vm_map *map, vaddr_t sta KASSERT(((flags & UVM_LOAN_TOANON) == 0) ^ ((flags & UVM_LOAN_TOPAGE) == 0)); - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); /* * "output" is a pointer to the current place to put the loaned page. Index: src/sys/uvm/uvm_map.c diff -u src/sys/uvm/uvm_map.c:1.313 src/sys/uvm/uvm_map.c:1.314 --- src/sys/uvm/uvm_map.c:1.313 Sun Feb 12 20:28:14 2012 +++ src/sys/uvm/uvm_map.c Sun Feb 19 00:05:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.c,v 1.313 2012/02/12 20:28:14 martin Exp $ */ +/* $NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -66,7 +66,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.313 2012/02/12 20:28:14 martin Exp $"); +__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.314 2012/02/19 00:05:56 rmind Exp $"); #include "opt_ddb.h" #include "opt_uvmhist.h" @@ -607,8 +607,6 @@ _uvm_tree_sanity(struct vm_map *map) /* * vm_map_lock: acquire an exclusive (write) lock on a map. * - * => Note that "intrsafe" maps use only exclusive, spin locks. - * * => The locking protocol provides for guaranteed upgrade from shared -> * exclusive by whichever thread currently has the map marked busy. * See "LOCKING PROTOCOL NOTES" in uvm_map.h. This is horrible; among @@ -620,24 +618,18 @@ void vm_map_lock(struct vm_map *map) { - if ((map->flags & VM_MAP_INTRSAFE) != 0) { - mutex_spin_enter(&map->mutex); - return; - } - for (;;) { rw_enter(&map->lock, RW_WRITER); - if (map->busy == NULL) - break; - if (map->busy == curlwp) + if (map->busy == NULL || map->busy == curlwp) { break; + } mutex_enter(&map->misc_lock); rw_exit(&map->lock); - if (map->busy != NULL) + if (map->busy != NULL) { cv_wait(&map->cv, &map->misc_lock); + } mutex_exit(&map->misc_lock); } - map->timestamp++; } @@ -649,15 +641,13 @@ bool vm_map_lock_try(struct vm_map *map) { - if ((map->flags & VM_MAP_INTRSAFE) != 0) - return mutex_tryenter(&map->mutex); - if (!rw_tryenter(&map->lock, RW_WRITER)) + if (!rw_tryenter(&map->lock, RW_WRITER)) { return false; + } if (map->busy != NULL) { rw_exit(&map->lock); return false; } - map->timestamp++; return true; } @@ -670,13 +660,9 @@ void vm_map_unlock(struct vm_map *map) { - if ((map->flags & VM_MAP_INTRSAFE) != 0) - mutex_spin_exit(&map->mutex); - else { - KASSERT(rw_write_held(&map->lock)); - KASSERT(map->busy == NULL || map->busy == curlwp); - rw_exit(&map->lock); - } + KASSERT(rw_write_held(&map->lock)); + KASSERT(map->busy == NULL || map->busy == curlwp); + rw_exit(&map->lock); } /* @@ -711,21 +697,17 @@ void vm_map_lock_read(struct vm_map *map) { - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); - rw_enter(&map->lock, RW_READER); } /* * vm_map_unlock_read: release a shared lock on a map. */ - + void vm_map_unlock_read(struct vm_map *map) { - KASSERT((map->flags & VM_MAP_INTRSAFE) == 0); - rw_exit(&map->lock); } @@ -756,11 +738,7 @@ bool vm_map_locked_p(struct vm_map *map) { - if ((map->flags & VM_MAP_INTRSAFE) != 0) { - return mutex_owned(&map->mutex); - } else { - return rw_write_held(&map->lock); - } + return rw_write_held(&map->lock); } /* @@ -775,14 +753,14 @@ uvm_mapent_alloc(struct vm_map *map, int UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist); me = pool_cache_get(&uvm_map_entry_cache, pflags); - if (__predict_false(me == NULL)) + if (__predict_false(me == NULL)) { return NULL; + } me->flags = 0; - UVMHIST_LOG(maphist, "<- new entry=0x%x [kentry=%d]", me, - ((map->flags & VM_MAP_INTRSAFE) != 0 || map == kernel_map), 0, 0); - return (me); + (map == kernel_map), 0, 0); + return me; } /* @@ -1131,8 +1109,7 @@ uvm_map_prepare(struct vm_map *map, vadd * detect a popular device driver bug. */ - KASSERT(doing_shutdown || curlwp != NULL || - (map->flags & VM_MAP_INTRSAFE)); + KASSERT(doing_shutdown || curlwp != NULL); /* * zero-sized mapping doesn't make any sense. @@ -1156,11 +1133,9 @@ uvm_map_prepare(struct vm_map *map, vadd /* * figure out where to put new VM range */ - retry: if (vm_map_lock_try(map) == false) { - if ((flags & UVM_FLAG_TRYLOCK) != 0 && - (map->flags & VM_MAP_INTRSAFE) == 0) { + if ((flags & UVM_FLAG_TRYLOCK) != 0) { return EAGAIN; } vm_map_lock(map); /* could sleep here */ @@ -4180,6 +4155,7 @@ uvmspace_free(struct vmspace *vm) if (vm->vm_shm != NULL) shmexit(vm); #endif + if (map->nentries) { uvm_unmap_remove(map, vm_map_min(map), vm_map_max(map), &dead_entries, 0); @@ -4188,8 +4164,8 @@ uvmspace_free(struct vmspace *vm) } KASSERT(map->nentries == 0); KASSERT(map->size == 0); + mutex_destroy(&map->misc_lock); - mutex_destroy(&map->mutex); rw_destroy(&map->lock); cv_destroy(&map->cv); pmap_destroy(map->pmap); @@ -4557,7 +4533,6 @@ uvm_mapent_trymerge(struct vm_map *map, void uvm_map_setup(struct vm_map *map, vaddr_t vmin, vaddr_t vmax, int flags) { - int ipl; rb_tree_init(&map->rb_tree, &uvm_map_tree_ops); map->header.next = map->header.prev = &map->header; @@ -4572,19 +4547,11 @@ uvm_map_setup(struct vm_map *map, vaddr_ map->timestamp = 0; map->busy = NULL; - if ((flags & VM_MAP_INTRSAFE) != 0) { - ipl = IPL_VM; - } else { - ipl = IPL_NONE; - } - rw_init(&map->lock); cv_init(&map->cv, "vm_map"); - mutex_init(&map->misc_lock, MUTEX_DRIVER, ipl); - mutex_init(&map->mutex, MUTEX_DRIVER, ipl); + mutex_init(&map->misc_lock, MUTEX_DRIVER, IPL_NONE); } - /* * U N M A P - m a i n e n t r y p o i n t */ Index: src/sys/uvm/uvm_map.h diff -u src/sys/uvm/uvm_map.h:1.70 src/sys/uvm/uvm_map.h:1.71 --- src/sys/uvm/uvm_map.h:1.70 Fri Jan 27 19:48:41 2012 +++ src/sys/uvm/uvm_map.h Sun Feb 19 00:05:56 2012 @@ -1,4 +1,4 @@ -/* $NetBSD: uvm_map.h,v 1.70 2012/01/27 19:48:41 para Exp $ */ +/* $NetBSD: uvm_map.h,v 1.71 2012/02/19 00:05:56 rmind Exp $ */ /* * Copyright (c) 1997 Charles D. Cranor and Washington University. @@ -193,8 +193,6 @@ struct vm_map_entry { * * VM_MAP_PAGEABLE r/o static flag; no locking required * - * VM_MAP_INTRSAFE r/o static flag; no locking required - * * VM_MAP_WIREFUTURE r/w; may only be set or cleared when * map is write-locked. may be tested * without asserting `flags_lock'. @@ -212,7 +210,6 @@ struct vm_map { struct pmap * pmap; /* Physical map */ krwlock_t lock; /* Non-intrsafe lock */ struct lwp * busy; /* LWP holding map busy */ - kmutex_t mutex; /* INTRSAFE lock */ kmutex_t misc_lock; /* Lock for ref_count, cv */ kcondvar_t cv; /* For signalling */ int flags; /* flags */ @@ -236,7 +233,6 @@ struct vm_map { /* vm_map flags */ #define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */ -#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */ #define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */ #define VM_MAP_DYING 0x20 /* rw: map is being destroyed */ #define VM_MAP_TOPDOWN 0x40 /* ro: arrange map top-down */