On Mon, Feb 07, 2022 at 12:41:27PM +0100, Mark Kettenis wrote:
> > Date: Mon,  7 Feb 2022 11:12:50 +0000
> > From: Klemens Nanni <k...@openbsd.org>
> > 
> > On Thu, Jan 27, 2022 at 11:11:48AM +0000, Klemens Nanni wrote:
> > > On Tue, Jan 25, 2022 at 07:54:52AM +0000, Klemens Nanni wrote:
> > > > > Could we use a vm_map_assert_locked() or something similar that 
> > > > > reflect
> > > > > the exclusive or shared (read) lock comments?  I don't trust comments.
> > > > > It's too easy to miss a lock in a code path.
> > > > 
> > > > This survives desktop usage, running regress and building kernels on
> > > > amd64.
> > > > 
> > > > I'll throw it at sparc64 soon.
> > > > 
> > > > > 
> > > > > > So annotate functions using `size' wrt. the required lock.
> > > 
> > > Here's a better diff that asserts for read, write or any type of vm_map
> > > lock.
> > > 
> > > vm_map_lock() and vm_map_lock_read() are used for exclusive and shared
> > > access respectively;  unless I missed something, no code path is purely
> > > protected by vm_map_lock_read() alone, i.e. functions called with a read
> > > lock held by the callee are also called with a write lock elsewhere.
> > > 
> > > That means my new vm_map_assert_locked_read() is currently unused, but
> > > vm_map_assert_locked_any() and vm_map_assert_locked_write() are now used
> > > to validate existing function comments as well as a few new places.
> > > 
> > > amd64 and sparc64 are happy with this, both daily drivers as well as
> > > build/regress machines.
> > 
> > amd64 package bulk (thanks naddy), amd64 regress (thanks bluhm) and
> > sparc64 base builds/regress are happy with the diff below, the
> > "uvm_unmap_kill_entry(): unwire with map lock held" locking diff on
> > tech@ and all three of mmap, munmap, mprotect unlocked.
> > 
> > > 
> > > I'd appreciate more tests and reports about running with this diff and
> > > mmap(2) unlocked.
> > 
> > Here's that tested diff which a) follows rwlock(9)'s naming pattern for
> > the assert functions and b) syncs lock related comments with NetBSD.
> > 
> > It only adds and (updates) comments;  no locking or logic changes.
> > 
> > I'd like to get these asserts committed soon (after the required
> > uvm_unmap_kill_entry() vm_map lock diff) before moving forward with
> > other (un)locking diffs.
> 
> So there is the existing UVM_MAP_REQ_WRITE().  Compared to
> vm_map_assert_wrlock() it checks the reference count of the map.  I
> think that's questionable, so these should probably be replaced by
> vm_map_assert_wrlock().

Yes, I'd replace the old macro with the new functions in a separate diff.

> Also...
> 
> > Index: uvm_map.c
> > ===================================================================
> > RCS file: /cvs/src/sys/uvm/uvm_map.c,v
> > retrieving revision 1.282
> > diff -u -p -r1.282 uvm_map.c
> > --- uvm_map.c       21 Dec 2021 22:21:32 -0000      1.282
> > +++ uvm_map.c       3 Feb 2022 15:46:44 -0000
> > @@ -498,6 +498,7 @@ uvm_mapent_addr_remove(struct vm_map *ma
> >  void
> >  uvm_map_reference(struct vm_map *map)
> >  {
> > +   vm_map_assert_unlocked(map);
> 
> I don't understand this one.  Grabbing a reference should be safe at
> any time as long as you already hold a reference.

That's a left-over from when I added asserts rather mechanically based
on function comments.

There's no problem with holding a lock while grabbing a reference, so
I've removed this hunk, thanks.


Index: uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.282
diff -u -p -r1.282 uvm_map.c
--- uvm_map.c   21 Dec 2021 22:21:32 -0000      1.282
+++ uvm_map.c   7 Feb 2022 12:08:41 -0000
@@ -805,6 +805,8 @@ uvm_map_sel_limits(vaddr_t *min, vaddr_t
  * Fills in *start_ptr and *end_ptr to be the first and last entry describing
  * the space.
  * If called with prefilled *start_ptr and *end_ptr, they are to be correct.
+ *
+ * map must be at least read-locked.
  */
 int
 uvm_map_isavail(struct vm_map *map, struct uvm_addr_state *uaddr,
@@ -815,6 +817,8 @@ uvm_map_isavail(struct vm_map *map, stru
        struct uvm_map_addr *atree;
        struct vm_map_entry *i, *i_end;
 
+       vm_map_assert_anylock(map);
+
        if (addr + sz < addr)
                return 0;
 
@@ -892,6 +896,8 @@ uvm_map_isavail(struct vm_map *map, stru
 /*
  * Invoke each address selector until an address is found.
  * Will not invoke uaddr_exe.
+ *
+ * => caller must at least have read-locked map
  */
 int
 uvm_map_findspace(struct vm_map *map, struct vm_map_entry**first,
@@ -901,6 +907,8 @@ uvm_map_findspace(struct vm_map *map, st
        struct uvm_addr_state *uaddr;
        int i;
 
+       vm_map_assert_anylock(map);
+
        /*
         * Allocation for sz bytes at any address,
         * using the addr selectors in order.
@@ -1160,7 +1168,7 @@ unlock:
  * uvm_map: establish a valid mapping in map
  *
  * => *addr and sz must be a multiple of PAGE_SIZE.
- * => map must be unlocked.
+ * => map must be unlocked (we will lock it)
  * => <uobj,uoffset> value meanings (4 cases):
  *     [1] <NULL,uoffset>              == uoffset is a hint for PMAP_PREFER
  *     [2] <NULL,UVM_UNKNOWN_OFFSET>   == don't PMAP_PREFER
@@ -1821,6 +1829,8 @@ boolean_t
 uvm_map_lookup_entry(struct vm_map *map, vaddr_t address,
     struct vm_map_entry **entry)
 {
+       vm_map_assert_anylock(map);
+
        *entry = uvm_map_entrybyaddr(&map->addr, address);
        return *entry != NULL && !UVM_ET_ISHOLE(*entry) &&
            (*entry)->start <= address && (*entry)->end > address;
@@ -2206,6 +2216,8 @@ uvm_unmap_kill_entry(struct vm_map *map,
  * If remove_holes, then remove ET_HOLE entries as well.
  * If markfree, entry will be properly marked free, otherwise, no replacement
  * entry will be put in the tree (corrupting the tree).
+ *
+ * => map must be locked by caller
  */
 void
 uvm_unmap_remove(struct vm_map *map, vaddr_t start, vaddr_t end,
@@ -2214,6 +2226,8 @@ uvm_unmap_remove(struct vm_map *map, vad
 {
        struct vm_map_entry *prev_hint, *next, *entry;
 
+       vm_map_assert_wrlock(map);
+
        start = MAX(start, map->min_offset);
        end = MIN(end, map->max_offset);
        if (start >= end)
@@ -2304,6 +2318,8 @@ uvm_map_pageable_pgon(struct vm_map *map
 {
        struct vm_map_entry *iter;
 
+       vm_map_assert_wrlock(map);
+
        for (iter = first; iter != end;
            iter = RBT_NEXT(uvm_map_addr, iter)) {
                KDASSERT(iter->start >= start_addr && iter->end <= end_addr);
@@ -2457,9 +2473,9 @@ uvm_map_pageable_wire(struct vm_map *map
 /*
  * uvm_map_pageable: set pageability of a range in a map.
  *
- * Flags:
- * UVM_LK_ENTER: map is already locked by caller
- * UVM_LK_EXIT:  don't unlock map on exit
+ * => map must never be read-locked
+ * => if lockflags has UVM_LK_ENTER set, map is already write-locked
+ * => if lockflags has UVM_LK_EXIT  set, don't unlock map on exit
  *
  * The full range must be in use (entries may not have fspace != 0).
  * UVM_ET_HOLE counts as unmapped.
@@ -2586,8 +2602,8 @@ out:
  * uvm_map_pageable_all: special case of uvm_map_pageable - affects
  * all mapped regions.
  *
- * Map must not be locked.
- * If no flags are specified, all ragions are unwired.
+ * => map must not be locked.
+ * => if no flags are specified, all ragions are unwired.
  */
 int
 uvm_map_pageable_all(struct vm_map *map, int flags, vsize_t limit)
@@ -2734,6 +2750,7 @@ uvm_map_teardown(struct vm_map *map)
        KERNEL_ASSERT_UNLOCKED();
 
        KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+       vm_map_lock(map);
 
        /* Remove address selectors. */
        uvm_addr_destroy(map->uaddr_exe);
@@ -2976,12 +2993,17 @@ uvm_tree_sanity(struct vm_map *map, char
        UVM_ASSERT(map, addr == vm_map_max(map), file, line);
 }
 
+/*
+ * map must be at least read-locked.
+ */
 void
 uvm_tree_size_chk(struct vm_map *map, char *file, int line)
 {
        struct vm_map_entry *iter;
        vsize_t size;
 
+       vm_map_assert_anylock(map);
+
        size = 0;
        RBT_FOREACH(iter, uvm_map_addr, &map->addr) {
                if (!UVM_ET_ISHOLE(iter))
@@ -4268,7 +4290,7 @@ uvm_map_submap(struct vm_map *map, vaddr
  * uvm_map_checkprot: check protection in map
  *
  * => must allow specific protection in a fully allocated region.
- * => map mut be read or write locked by caller.
+ * => map must be read or write locked by caller.
  */
 boolean_t
 uvm_map_checkprot(struct vm_map *map, vaddr_t start, vaddr_t end,
@@ -4276,6 +4298,8 @@ uvm_map_checkprot(struct vm_map *map, va
 {
        struct vm_map_entry *entry;
 
+       vm_map_assert_anylock(map);
+
        if (start < map->min_offset || end > map->max_offset || start > end)
                return FALSE;
        if (start == end)
@@ -4802,12 +4826,18 @@ flush_object:
 
 /*
  * UVM_MAP_CLIP_END implementation
+ *
+ * => caller should use UVM_MAP_CLIP_END macro rather than calling
+ *    this directly
+ * => map must be locked by caller
  */
 void
 uvm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, vaddr_t addr)
 {
        struct vm_map_entry *tmp;
 
+       vm_map_assert_wrlock(map);
+
        KASSERT(entry->start < addr && VMMAP_FREE_END(entry) > addr);
        tmp = uvm_mapent_alloc(map, 0);
 
@@ -4823,6 +4853,10 @@ uvm_map_clip_end(struct vm_map *map, str
  * Since uvm_map_splitentry turns the original entry into the lowest
  * entry (address wise) we do a swap between the new entry and the original
  * entry, prior to calling uvm_map_splitentry.
+ *
+ * => caller should use UVM_MAP_CLIP_START macro rather than calling
+ *    this directly
+ * => map must be locked by caller
  */
 void
 uvm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry, vaddr_t 
addr)
@@ -4830,6 +4864,8 @@ uvm_map_clip_start(struct vm_map *map, s
        struct vm_map_entry *tmp;
        struct uvm_addr_state *free;
 
+       vm_map_assert_wrlock(map);
+
        /* Unlink original. */
        free = uvm_map_uaddr_e(map, entry);
        uvm_mapent_free_remove(map, free, entry);
@@ -5557,6 +5593,46 @@ vm_map_unbusy_ln(struct vm_map *map, cha
        mtx_leave(&map->flags_lock);
        if (oflags & VM_MAP_WANTLOCK)
                wakeup(&map->flags);
+}
+
+void
+vm_map_assert_anylock_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert read or write locked: %p (at %s %d)\n", map, file, 
line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_anylock(&map->lock);
+       else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_rdlock_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert read locked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_rdlock(&map->lock);
+       else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_wrlock_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert write locked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_wrlock(&map->lock);
+       else
+               MUTEX_ASSERT_LOCKED(&map->mtx);
+}
+
+void
+vm_map_assert_unlocked_ln(struct vm_map *map, char *file, int line)
+{
+       LPRINTF(("map assert unlocked: %p (at %s %d)\n", map, file, line));
+       if ((map->flags & VM_MAP_INTRSAFE) == 0)
+               rw_assert_unlocked(&map->lock);
+       else
+               MUTEX_ASSERT_UNLOCKED(&map->mtx);
 }
 
 #ifndef SMALL_KERNEL
Index: uvm_map.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.h,v
retrieving revision 1.71
diff -u -p -r1.71 uvm_map.h
--- uvm_map.h   15 Dec 2021 12:53:53 -0000      1.71
+++ uvm_map.h   3 Feb 2022 11:38:22 -0000
@@ -419,6 +420,10 @@ void               vm_map_downgrade_ln(struct vm_map*
 void           vm_map_upgrade_ln(struct vm_map*, char*, int);
 void           vm_map_busy_ln(struct vm_map*, char*, int);
 void           vm_map_unbusy_ln(struct vm_map*, char*, int);
+void           vm_map_assert_anylock_ln(struct vm_map*, char*, int);
+void           vm_map_assert_rdlock_ln(struct vm_map*, char*, int);
+void           vm_map_assert_wrlock_ln(struct vm_map*, char*, int);
+void           vm_map_assert_unlocked_ln(struct vm_map*, char*, int);
 
 #ifdef DIAGNOSTIC
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, __FILE__, __LINE__)
@@ -430,6 +435,10 @@ void               vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, __FILE__, __LINE__)
 #define vm_map_busy(map)       vm_map_busy_ln(map, __FILE__, __LINE__)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, __FILE__, __LINE__)
+#define vm_map_assert_anylock(map)     vm_map_assert_anylock_ln(map, __FILE__, 
__LINE__)
+#define vm_map_assert_rdlock(map)      vm_map_assert_rdlock_ln(map, __FILE__, 
__LINE__)
+#define vm_map_assert_wrlock(map)      vm_map_assert_wrlock_ln(map, __FILE__, 
__LINE__)
+#define vm_map_assert_unlocked(map)    vm_map_assert_unlocked_ln(map, 
__FILE__, __LINE__)
 #else
 #define vm_map_lock_try(map)   vm_map_lock_try_ln(map, NULL, 0)
 #define vm_map_lock(map)       vm_map_lock_ln(map, NULL, 0)
@@ -440,6 +449,10 @@ void               vm_map_unbusy_ln(struct vm_map*, c
 #define vm_map_upgrade(map)    vm_map_upgrade_ln(map, NULL, 0)
 #define vm_map_busy(map)       vm_map_busy_ln(map, NULL, 0)
 #define vm_map_unbusy(map)     vm_map_unbusy_ln(map, NULL, 0)
+#define vm_map_assert_anylock(map)     vm_map_assert_anylock_ln(map, NULL, 0)
+#define vm_map_assert_rdlock(map)      vm_map_assert_rdlock_ln(map, NULL, 0)
+#define vm_map_assert_wrlock(map)      vm_map_assert_wrlock_ln(map, NULL, 0)
+#define vm_map_assert_unlocked(map)    vm_map_assert_unlocked_ln(map, NULL, 0)
 #endif
 
 void           uvm_map_lock_entry(struct vm_map_entry *);
Index: uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.126
diff -u -p -r1.126 uvm_fault.c
--- uvm_fault.c 3 Feb 2022 19:57:11 -0000       1.126
+++ uvm_fault.c 5 Feb 2022 13:27:54 -0000
@@ -1616,6 +1616,7 @@ uvm_fault_unwire_locked(vm_map_t map, va
        struct vm_page *pg;
 
        KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
+       vm_map_assert_anylock(map);
 
        /*
         * we assume that the area we are unwiring has actually been wired

Reply via email to