On 08/11/19(Fri) 17:28, Martin Pieuchot wrote:
> Current UVM code uses comments to explains when a `map' should be locked
> or unlocked.
>
> The diff below introduces functions to check those assertions and start
> to use them in two safe places. This also has the advantage of self-
> documenting the code, so fewer comments.
>
> While here move the definition of uvm_map_is_stack_remappable() to its
> C file.
Now with the correct diff, ok?
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.250
diff -u -p -r1.250 uvm_map.c
--- uvm/uvm_map.c 2 Nov 2019 16:41:57 -0000 1.250
+++ uvm/uvm_map.c 8 Nov 2019 16:42:05 -0000
@@ -157,6 +157,8 @@ int uvm_map_findspace(struct vm_map*,
vaddr_t);
vsize_t uvm_map_addr_augment_get(struct vm_map_entry*);
void uvm_map_addr_augment(struct vm_map_entry*);
+boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t,
+ vsize_t);
int uvm_map_inentry_recheck(u_long, vaddr_t,
struct p_inentry *);
@@ -1124,7 +1119,6 @@ out:
* uvm_map: establish a valid mapping in map
*
* => *addr and sz must be a multiple of PAGE_SIZE.
- * => map must be unlocked.
* => <uobj,uoffset> value meanings (4 cases):
* [1] <NULL,uoffset> == uoffset is a hint for PMAP_PREFER
* [2] <NULL,UVM_UNKNOWN_OFFSET> == don't PMAP_PREFER
@@ -1157,6 +1151,7 @@ uvm_map(struct vm_map *map, vaddr_t *add
splassert(IPL_NONE);
else
splassert(IPL_VM);
+ vm_map_assert_unlocked(map);
/*
* We use pmap_align and pmap_offset as alignment and offset variables.
@@ -1889,8 +1884,6 @@ uvm_map_inentry(struct proc *p, struct p
/*
* Check whether the given address range can be converted to a MAP_STACK
* mapping.
- *
- * Must be called with map locked.
*/
boolean_t
uvm_map_is_stack_remappable(struct vm_map *map, vaddr_t addr, vaddr_t sz)
@@ -1898,6 +1891,8 @@ uvm_map_is_stack_remappable(struct vm_ma
vaddr_t end = addr + sz;
struct vm_map_entry *first, *iter, *prev = NULL;
+ vm_map_assert_locked(map);
+
if (!uvm_map_lookup_entry(map, addr, &first)) {
printf("map stack 0x%lx-0x%lx of map %p failed: no mapping\n",
addr, end, map);
@@ -5442,6 +5437,30 @@ vm_map_unbusy_ln(struct vm_map *map, cha
mtx_leave(&map->flags_lock);
if (oflags & VM_MAP_WANTLOCK)
wakeup(&map->flags);
+}
+
+void
+vm_map_assert_locked_ln(struct vm_map *map, char *file, int line)
+{
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_anylock(&map->lock);
+ else
+ MUTEX_ASSERT_LOCKED(&map->mtx);
+ LPRINTF(("map locked: %p (at %s %d)\n", map, file, line));
+ uvm_tree_sanity(map, file, line);
+ uvm_tree_size_chk(map, file, line);
+}
+
+void
+vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line)
+{
+ if ((map->flags & VM_MAP_INTRSAFE) == 0)
+ rw_assert_unlocked(&map->lock);
+ else
+ MUTEX_ASSERT_UNLOCKED(&map->mtx);
+ LPRINTF(("map unlckd: %p (at %s %d)\n", map, file, line));
+ uvm_tree_sanity(map, file, line);
+ uvm_tree_size_chk(map, file, line);
}
#ifndef SMALL_KERNEL
Index: uvm/uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.64
diff -u -p -r1.64 uvm_map.h
--- uvm/uvm_map.h 2 Nov 2019 09:36:08 -0000 1.64
+++ uvm/uvm_map.h 8 Nov 2019 16:42:05 -0000
@@ -399,7 +399,6 @@ int uvm_map_inherit(vm_map_t, vaddr_t,
int uvm_map_advice(vm_map_t, vaddr_t, vaddr_t, int);
void uvm_map_init(void);
boolean_t uvm_map_lookup_entry(vm_map_t, vaddr_t, vm_map_entry_t *);
-boolean_t uvm_map_is_stack_remappable(vm_map_t, vaddr_t, vsize_t);
int uvm_map_remap_as_stack(struct proc *, vaddr_t, vsize_t);
int uvm_map_replace(vm_map_t, vaddr_t, vaddr_t,
vm_map_entry_t, int);
@@ -473,6 +472,8 @@ void vm_map_downgrade_ln(struct vm_map*
void vm_map_upgrade_ln(struct vm_map*, char*, int);
void vm_map_busy_ln(struct vm_map*, char*, int);
void vm_map_unbusy_ln(struct vm_map*, char*, int);
+void vm_map_assert_locked_ln(struct vm_map*, char*, int);
+void vm_map_assert_unlocked_ln(struct vm_map*, char*, int);
#ifdef DIAGNOSTIC
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, __FILE__, __LINE__)
@@ -484,6 +485,10 @@ void vm_map_unbusy_ln(struct vm_map*, c
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, __FILE__, __LINE__)
#define vm_map_busy(map) vm_map_busy_ln(map, __FILE__, __LINE__)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_locked(map) \
+ vm_map_assert_locked_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_unlocked(map) \
+ vm_map_assert_unlocked_ln(map, __FILE__, __LINE__)
#else
#define vm_map_lock_try(map) vm_map_lock_try_ln(map, NULL, 0)
#define vm_map_lock(map) vm_map_lock_ln(map, NULL, 0)
@@ -494,6 +499,10 @@ void vm_map_unbusy_ln(struct vm_map*, c
#define vm_map_upgrade(map) vm_map_upgrade_ln(map, NULL, 0)
#define vm_map_busy(map) vm_map_busy_ln(map, NULL, 0)
#define vm_map_unbusy(map) vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_locked(map) \
+ vm_map_assert_locked_ln(map, NULL, 0)
+#define vm_map_assert_unlocked(map) \
+ vm_map_assert_unlocked_ln(map, NULL, 0)
#endif
#endif /* _KERNEL */