On Mon, Jan 24, 2022 at 12:08:33PM -0300, Martin Pieuchot wrote:
> On 24/01/22(Mon) 12:06, Klemens Nanni wrote:
> > On Sun, Jan 16, 2022 at 09:22:50AM -0300, Martin Pieuchot wrote:
> > > IMHO this approach of let's try if it works now and revert if it isn't
> > > doesn't help us make progress.  I'd be more confident seeing diffs that
> > > assert for the right lock in the functions called by uvm_mapanon() and
> > > documentation about which lock is protecting which field of the data
> > > structures.
> > 
> > I picked `vm_map's `size' as first underdocumented member.
> > All accesses to it are protected by vm_map_lock(), either through the
> > function itself or implicitly by already requiring the calling function
> > to lock the map.
> 
> Could we use a vm_map_assert_locked() or something similar that reflect
> the exclusive or shared (read) lock comments?  I don't trust comments.
> It's too easy to miss a lock in a code path.

This survives desktop usage, running regress and building kernels on
amd64.

I'll throw it at sparc64 soon.

> 
> > So annotate functions using `size' wrt. the required lock.

Index: uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.282
diff -u -p -r1.282 uvm_map.c
--- uvm_map.c   21 Dec 2021 22:21:32 -0000      1.282
+++ uvm_map.c   25 Jan 2022 07:37:32 -0000
@@ -805,6 +805,8 @@ uvm_map_sel_limits(vaddr_t *min, vaddr_t
  * Fills in *start_ptr and *end_ptr to be the first and last entry describing
  * the space.
  * If called with prefilled *start_ptr and *end_ptr, they are to be correct.
+ *
+ * map must be at least read-locked.
  */
 int
 uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
@@ -815,6 +817,8 @@ uvm_map_isavail(struct vm_map *map, stru
        struct uvm_map_addr *atree;
        struct vm_map_entry *i, *i_end;
 
+       vm_map_assert_locked(map);
+
        if (addr + sz < addr)
                return 0;
 
@@ -986,6 +990,8 @@ uvm_mapanon(struct vm_map *map, vaddr_t 
        vaddr_t                  pmap_align, pmap_offset;
        vaddr_t                  hint;
 
+       vm_map_assert_unlocked(map);
+
        KASSERT((map->flags & VM_MAP_ISVMSPACE) == VM_MAP_ISVMSPACE);
        KASSERT(map != kernel_map);
        KASSERT((map->flags & UVM_FLAG_HOLE) == 0);
@@ -1189,6 +1195,8 @@ uvm_map(struct vm_map *map, vaddr_t *add
        vaddr_t                  pmap_align, pmap_offset;
        vaddr_t                  hint;
 
+       vm_map_assert_unlocked(map);
+
        if ((map->flags & VM_MAP_INTRSAFE) == 0)
                splassert(IPL_NONE);
        else
@@ -1821,6 +1829,8 @@ boolean_t
 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
     struct vm_map_entry **entry)
 {
+       vm_map_assert_locked(map);
+
        *entry = uvm_map_entrybyaddr(&map->addr, address);
        return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
            (*entry)->start <= address && (*entry)->end > address;
@@ -2206,6 +2216,8 @@ uvm_unmap_kill_entry(struct vm_map *map,
  * If remove_holes, then remove ET_HOLE entries as well.
  * If markfree, entry will be properly marked free, otherwise, no replacement
  * entry will be put in the tree (corrupting the tree).
+ *
+ * map must be locked.
  */
 void
 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
@@ -2214,6 +2226,8 @@ uvm_unmap_remove(struct vm_map *map, vad
 {
        struct vm_map_entry *prev_hint, *next, *entry;
 
+       vm_map_assert_locked(map);
+
        start = MAX(start, map->min_offset);
        end = MIN(end, map->max_offset);
        if (start >= end)
@@ -2976,13 +2990,17 @@ uvm_tree_sanity(struct vm_map *map, char
        UVM_ASSERT(map, addr == vm_map_max(map), file, line);
 }
 
+/*
+ * map must be at least read-locked.
+ */
 void
 uvm_tree_size_chk(struct vm_map *map, char *file, int line)
 {
        struct vm_map_entry *iter;
-       vsize_t size;
+       vsize_t size = 0;
+
+       vm_map_assert_locked(map);
 
-       size = 0;
        RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
                if (!UVM_ET_ISHOLE(iter))
                        size += iter->end - iter->start;
@@ -3320,6 +3338,8 @@ uvm_map_protect(struct vm_map *map, vadd
        vsize_t dused;
        int error;
 
+       vm_map_assert_unlocked(map);
+
        if (start > end)
                return EINVAL;
        start = MAX(start, map->min_offset);
@@ -4096,6 +4116,7 @@ uvmspace_fork(struct process *pr)
        struct vm_map_entry *old_entry, *new_entry;
        struct uvm_map_deadq dead;
 
+       vm_map_assert_unlocked(old_map);
        vm_map_lock(old_map);
 
        vm2 = uvmspace_alloc(old_map->min_offset, old_map->max_offset,
@@ -4236,6 +4257,8 @@ uvm_map_submap(struct vm_map *map, vaddr
        struct vm_map_entry *entry;
        int result;
 
+       vm_map_assert_unlocked(map);
+
        if (start > map->max_offset || end > map->max_offset ||
            start < map->min_offset || end < map->min_offset)
                return EINVAL;
@@ -4358,6 +4381,8 @@ uvm_map_inherit(struct vm_map *map, vadd
 {
        struct vm_map_entry *entry;
 
+       vm_map_assert_unlocked(map);
+
        switch (new_inheritance) {
        case MAP_INHERIT_NONE:
        case MAP_INHERIT_COPY:
@@ -4403,6 +4428,8 @@ uvm_map_syscall(struct vm_map *map, vadd
 {
        struct vm_map_entry *entry;
 
+       vm_map_assert_unlocked(map);
+
        if (start > end)
                return EINVAL;
        start = MAX(start, map->min_offset);
@@ -4442,6 +4469,8 @@ uvm_map_advice(struct vm_map *map, vaddr
 {
        struct vm_map_entry *entry;
 
+       vm_map_assert_unlocked(map);
+
        switch (new_advice) {
        case MADV_NORMAL:
        case MADV_RANDOM:
@@ -5557,6 +5589,26 @@ vm_map_unbusy_ln(struct vm_map *map, cha
        mtx_leave(&map->flags_lock);
        if (oflags & VM_MAP_WANTLOCK)
                wakeup(&map->flags);
+}
+
+void
+vm_map_assert_locked_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert locked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_anylock(&map->lock);
+       else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert unlocked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_unlocked(&map->lock);
+       else
+               MUTEX_ASSERT_UNLOCKED(&map->mtx);
 }
 
 #ifndef SMALL_KERNEL
Index: uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.71
diff -u -p -r1.71 uvm_map.h
--- uvm_map.h   15 Dec 2021 12:53:53 -0000      1.71
+++ uvm_map.h   25 Jan 2022 06:22:41 -0000
@@ -262,6 +262,7 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry
  *     a       atomic operations
  *     I       immutable after creation or exec(2)
  *     v       `vm_map_lock' (this map `lock' or `mtx')
+ *     f       `flags_lock'
  */
 struct vm_map {
        struct pmap             *pmap;          /* [I] Physical map */
@@ -272,9 +273,9 @@ struct vm_map {
 
        struct uvm_map_addr     addr;           /* [v] Entry tree, by addr */
 
-       vsize_t                 size;           /* virtual size */
+       vsize_t                 size;           /* [v] virtual size */
        int                     ref_count;      /* [a] Reference count */
-       int                     flags;          /* flags */
+       int                     flags;          /* [f] flags */
        struct mutex            flags_lock;     /* flags lock */
        unsigned int            timestamp;      /* Version number */
 
@@ -419,6 +420,8 @@ void                vm_map_downgrade_ln(struct vm_map*
 void           vm_map_upgrade_ln(struct vm_map*, char*, int);
 void           vm_map_busy_ln(struct vm_map*, char*, int);
 void           vm_map_unbusy_ln(struct vm_map*, char*, int);
+void           vm_map_assert_locked_ln(struct vm_map*, char*, int);
+void           vm_map_assert_unlocked_ln(struct vm_map*, char*, int);
 
 #ifdef DIAGNOSTIC
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, __FILE__, __LINE__)
@@ -430,6 +433,8 @@ void                vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, __FILE__, __LINE__)
 #define vm_map_busy(map)       vm_map_busy_ln(map, __FILE__, __LINE__)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_locked(map)      vm_map_assert_locked_ln(map, __FILE__, 
__LINE__)
+#define vm_map_assert_unlocked(map)    vm_map_assert_unlocked_ln(map, 
__FILE__, __LINE__)
 #else
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, NULL, 0)
 #define vm_map_lock(map)       vm_map_lock_ln(map, NULL, 0)
@@ -440,6 +445,8 @@ void                vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, NULL, 0)
 #define vm_map_busy(map)       vm_map_busy_ln(map, NULL, 0)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_locked(map)      vm_map_assert_locked_ln(map, NULL, 0)
+#define vm_map_assert_unlocked(map)    vm_map_assert_unlocked_ln(map, NULL, 0)
 #endif
 
 void           uvm_map_lock_entry(struct vm_map_entry *);

Reply via email to