Re: vm_map_assert_{un,}locked()
On 08/11/19(Fri) 17:28, Martin Pieuchot wrote: > Current UVM code uses comments to explains when a `map' should be locked > or unlocked. > > The diff below introduces functions to check those assertions and start > to use them in two safe places. This also has the advantage of self- > documenting the code, so fewer comments. > > While here move the definition of uvm_map_is_stack_remappable() to its > C file. Now with the correct diff, ok? Index: uvm/uvm_map.c === RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.250 diff -u -p -r1.250 uvm_map.c --- uvm/uvm_map.c 2 Nov 2019 16:41:57 - 1.250 +++ uvm/uvm_map.c 8 Nov 2019 16:42:05 - @@ -157,6 +157,8 @@ int uvm_map_findspace(struct vm_map*, vaddr_t); vsize_t uvm_map_addr_augment_get(struct vm_map_entry*); voiduvm_map_addr_augment(struct vm_map_entry*); +boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, +vsize_t); int uvm_map_inentry_recheck(u_long, vaddr_t, struct p_inentry *); @@ -1124,7 +1119,6 @@ out: * uvm_map: establish a valid mapping in map * * => *addr and sz must be a multiple of PAGE_SIZE. - * => map must be unlocked. * => value meanings (4 cases): * [1] == uoffset is a hint for PMAP_PREFER * [2]== don't PMAP_PREFER @@ -1157,6 +1151,7 @@ uvm_map(struct vm_map *map, vaddr_t *add splassert(IPL_NONE); else splassert(IPL_VM); + vm_map_assert_unlocked(map); /* * We use pmap_align and pmap_offset as alignment and offset variables. @@ -1889,8 +1884,6 @@ uvm_map_inentry(struct proc *p, struct p /* * Check whether the given address range can be converted to a MAP_STACK * mapping. - * - * Must be called with map locked. */ boolean_t uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz) @@ -1898,6 +1891,8 @@ uvm_map_is_stack_remappable(struct vm_ma vaddr_t end = addr + sz; struct vm_map_entry *first, *iter, *prev = NULL; + vm_map_assert_locked(map); + if (!uvm_map_lookup_entry(map, addr, )) { printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n", addr, end, map); @@ -5442,6 +5437,30 @@ vm_map_unbusy_ln(struct vm_map *map, cha mtx_leave(>flags_lock); if (oflags & VM_MAP_WANTLOCK) wakeup(>flags); +} + +void +vm_map_assert_locked_ln(struct vm_map *map, char *file, int line) +{ + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_anylock(>lock); + else + MUTEX_ASSERT_LOCKED(>mtx); + LPRINTF(("map locked: %p (at %s %d)\n", map, file, line)); + uvm_tree_sanity(map, file, line); + uvm_tree_size_chk(map, file, line); +} + +void +vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line) +{ + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_unlocked(>lock); + else + MUTEX_ASSERT_UNLOCKED(>mtx); + LPRINTF(("map unlckd: %p (at %s %d)\n", map, file, line)); + uvm_tree_sanity(map, file, line); + uvm_tree_size_chk(map, file, line); } #ifndef SMALL_KERNEL Index: uvm/uvm_map.h === RCS file: /cvs/src/sys/uvm/uvm_map.h,v retrieving revision 1.64 diff -u -p -r1.64 uvm_map.h --- uvm/uvm_map.h 2 Nov 2019 09:36:08 - 1.64 +++ uvm/uvm_map.h 8 Nov 2019 16:42:05 - @@ -399,7 +399,6 @@ int uvm_map_inherit(vm_map_t, vaddr_t, intuvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); void uvm_map_init(void); boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); -boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, vsize_t); intuvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); intuvm_map_replace(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t, int); @@ -473,6 +472,8 @@ voidvm_map_downgrade_ln(struct vm_map* void vm_map_upgrade_ln(struct vm_map*, char*, int); void vm_map_busy_ln(struct vm_map*, char*, int); void vm_map_unbusy_ln(struct vm_map*, char*, int); +void vm_map_assert_locked_ln(struct vm_map*, char*, int); +void vm_map_assert_unlocked_ln(struct vm_map*, char*, int); #ifdef DIAGNOSTIC #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) @@ -484,6 +485,10 @@ void vm_map_unbusy_ln(struct vm_map*, c #define vm_map_upgrade(map)vm_map_upgrade_ln(map, __FILE__, __LINE__) #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) +#define
vm_map_assert_{un,}locked()
Current UVM code uses comments to explains when a `map' should be locked or unlocked. The diff below introduces functions to check those assertions and start to use them in two safe places. This also has the advantage of self- documenting the code, so fewer comments. While here move the definition of uvm_map_is_stack_remappable() to its C file. Ok? Index: uvm/uvm_map.c === RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.250 diff -u -p -r1.250 uvm_map.c --- uvm/uvm_map.c 2 Nov 2019 16:41:57 - 1.250 +++ uvm/uvm_map.c 8 Nov 2019 16:10:16 - @@ -157,6 +157,8 @@ int uvm_map_findspace(struct vm_map*, vaddr_t); vsize_t uvm_map_addr_augment_get(struct vm_map_entry*); voiduvm_map_addr_augment(struct vm_map_entry*); +boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, +vsize_t); int uvm_map_inentry_recheck(u_long, vaddr_t, struct p_inentry *); @@ -1124,7 +1126,6 @@ out: * uvm_map: establish a valid mapping in map * * => *addr and sz must be a multiple of PAGE_SIZE. - * => map must be unlocked. * => value meanings (4 cases): * [1] == uoffset is a hint for PMAP_PREFER * [2]== don't PMAP_PREFER @@ -1157,6 +1158,7 @@ uvm_map(struct vm_map *map, vaddr_t *add splassert(IPL_NONE); else splassert(IPL_VM); + vm_map_assert_unlocked(map); /* * We use pmap_align and pmap_offset as alignment and offset variables. @@ -1889,8 +1891,6 @@ uvm_map_inentry(struct proc *p, struct p /* * Check whether the given address range can be converted to a MAP_STACK * mapping. - * - * Must be called with map locked. */ boolean_t uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz) @@ -1898,6 +1898,8 @@ uvm_map_is_stack_remappable(struct vm_ma vaddr_t end = addr + sz; struct vm_map_entry *first, *iter, *prev = NULL; + vm_map_assert_locked(map); + if (!uvm_map_lookup_entry(map, addr, )) { printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n", addr, end, map); @@ -5442,6 +5444,30 @@ vm_map_unbusy_ln(struct vm_map *map, cha mtx_leave(>flags_lock); if (oflags & VM_MAP_WANTLOCK) wakeup(>flags); +} + +void +vm_map_assert_locked_ln(struct vm_map *map, char *file, int line) +{ + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_anylock(>lock); + else + MUTEX_ASSERT_LOCKED(>mtx); + LPRINTF(("map locked: %p (at %s %d)\n", map, file, line)); + uvm_tree_sanity(map, file, line); + uvm_tree_size_chk(map, file, line); +} + +void +vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line) +{ + if ((map->flags & VM_MAP_INTRSAFE) == 0) + rw_assert_unlocked(>lock); + else + MUTEX_ASSERT_UNLOCKED(>mtx); + LPRINTF(("map unlckd: %p (at %s %d)\n", map, file, line)); + uvm_tree_sanity(map, file, line); + uvm_tree_size_chk(map, file, line); } #ifndef SMALL_KERNEL Index: uvm/uvm_map.h === RCS file: /cvs/src/sys/uvm/uvm_map.h,v retrieving revision 1.64 diff -u -p -r1.64 uvm_map.h --- uvm/uvm_map.h 2 Nov 2019 09:36:08 - 1.64 +++ uvm/uvm_map.h 8 Nov 2019 16:08:55 - @@ -399,7 +399,6 @@ int uvm_map_inherit(vm_map_t, vaddr_t, intuvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int); void uvm_map_init(void); boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *); -boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, vsize_t); intuvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t); intuvm_map_replace(vm_map_t, vaddr_t, vaddr_t, vm_map_entry_t, int); @@ -473,6 +472,8 @@ voidvm_map_downgrade_ln(struct vm_map* void vm_map_upgrade_ln(struct vm_map*, char*, int); void vm_map_busy_ln(struct vm_map*, char*, int); void vm_map_unbusy_ln(struct vm_map*, char*, int); +void vm_map_assert_locked(struct vm_map*, char*, int); +void vm_map_assert_unlocked(struct vm_map*, char*, int); #ifdef DIAGNOSTIC #define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__) @@ -484,6 +485,10 @@ void vm_map_unbusy_ln(struct vm_map*, c #define vm_map_upgrade(map)vm_map_upgrade_ln(map, __FILE__, __LINE__) #define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__) #define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__) +#define vm_map_assert_locked(map) \ + vm_map_assert_locked_ln(map, __FILE__, __LINE__)