Author: bdrewery
Date: Fri Oct  9 23:49:42 2020
New Revision: 366594
URL: https://svnweb.freebsd.org/changeset/base/366594

Log:
  Use unlocked page lookup for inmem() to avoid object lock contention
  
  Reviewed By:  kib, markj
  Submitted by: mlaier
  Sponsored by: Dell EMC
  Differential Revision:        https://reviews.freebsd.org/D26653

Modified:
  head/sys/kern/vfs_bio.c
  head/sys/sys/buf.h
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c     Fri Oct  9 23:02:09 2020        (r366593)
+++ head/sys/kern/vfs_bio.c     Fri Oct  9 23:49:42 2020        (r366594)
@@ -154,7 +154,6 @@ caddr_t __read_mostly unmapped_buf;
 /* Used below and for softdep flushing threads in ufs/ffs/ffs_softdep.c */
 struct proc *bufdaemonproc;
 
-static int inmem(struct vnode *vp, daddr_t blkno);
 static void vm_hold_free_pages(struct buf *bp, int newbsize);
 static void vm_hold_load_pages(struct buf *bp, vm_offset_t from,
                vm_offset_t to);
@@ -3585,48 +3584,54 @@ incore(struct bufobj *bo, daddr_t blkno)
  * associated VM object.  This is like incore except
  * it also hunts around in the VM system for the data.
  */
-
-static int
+bool
 inmem(struct vnode * vp, daddr_t blkno)
 {
        vm_object_t obj;
        vm_offset_t toff, tinc, size;
-       vm_page_t m;
+       vm_page_t m, n;
        vm_ooffset_t off;
+       int valid;
 
        ASSERT_VOP_LOCKED(vp, "inmem");
 
        if (incore(&vp->v_bufobj, blkno))
-               return 1;
+               return (true);
        if (vp->v_mount == NULL)
-               return 0;
+               return (false);
        obj = vp->v_object;
        if (obj == NULL)
-               return (0);
+               return (false);
 
        size = PAGE_SIZE;
        if (size > vp->v_mount->mnt_stat.f_iosize)
                size = vp->v_mount->mnt_stat.f_iosize;
        off = (vm_ooffset_t)blkno * 
(vm_ooffset_t)vp->v_mount->mnt_stat.f_iosize;
 
-       VM_OBJECT_RLOCK(obj);
        for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) {
-               m = vm_page_lookup(obj, OFF_TO_IDX(off + toff));
-               if (!m)
-                       goto notinmem;
+               m = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
+recheck:
+               if (m == NULL)
+                       return (false);
+
                tinc = size;
                if (tinc > PAGE_SIZE - ((toff + off) & PAGE_MASK))
                        tinc = PAGE_SIZE - ((toff + off) & PAGE_MASK);
-               if (vm_page_is_valid(m,
-                   (vm_offset_t) ((toff + off) & PAGE_MASK), tinc) == 0)
-                       goto notinmem;
+               /*
+                * Consider page validity only if page mapping didn't change
+                * during the check.
+                */
+               valid = vm_page_is_valid(m,
+                   (vm_offset_t)((toff + off) & PAGE_MASK), tinc);
+               n = vm_page_lookup_unlocked(obj, OFF_TO_IDX(off + toff));
+               if (m != n) {
+                       m = n;
+                       goto recheck;
+               }
+               if (!valid)
+                       return (false);
        }
-       VM_OBJECT_RUNLOCK(obj);
-       return 1;
-
-notinmem:
-       VM_OBJECT_RUNLOCK(obj);
-       return (0);
+       return (true);
 }
 
 /*

Modified: head/sys/sys/buf.h
==============================================================================
--- head/sys/sys/buf.h  Fri Oct  9 23:02:09 2020        (r366593)
+++ head/sys/sys/buf.h  Fri Oct  9 23:49:42 2020        (r366594)
@@ -549,6 +549,7 @@ int vfs_bio_awrite(struct buf *);
 void   vfs_busy_pages_acquire(struct buf *bp);
 void   vfs_busy_pages_release(struct buf *bp);
 struct buf *incore(struct bufobj *, daddr_t);
+bool   inmem(struct vnode *, daddr_t);
 struct buf *gbincore(struct bufobj *, daddr_t);
 struct buf *gbincore_unlocked(struct bufobj *, daddr_t);
 struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Fri Oct  9 23:02:09 2020        (r366593)
+++ head/sys/vm/vm_page.c       Fri Oct  9 23:49:42 2020        (r366594)
@@ -1698,6 +1698,21 @@ vm_page_lookup(vm_object_t object, vm_pindex_t pindex)
 }
 
 /*
+ *     vm_page_lookup_unlocked:
+ *
+ *     Returns the page associated with the object/offset pair specified;
+ *     if none is found, NULL is returned.  The page may be no longer be
+ *     present in the object at the time that this function returns.  Only
+ *     useful for opportunistic checks such as inmem().
+ */
+vm_page_t
+vm_page_lookup_unlocked(vm_object_t object, vm_pindex_t pindex)
+{
+
+       return (vm_radix_lookup_unlocked(&object->rtree, pindex));
+}
+
+/*
  *     vm_page_relookup:
  *
  *     Returns a page that must already have been busied by

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Fri Oct  9 23:02:09 2020        (r366593)
+++ head/sys/vm/vm_page.h       Fri Oct  9 23:49:42 2020        (r366594)
@@ -700,6 +700,7 @@ int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_
 void vm_page_invalid(vm_page_t m);
 void vm_page_launder(vm_page_t m);
 vm_page_t vm_page_lookup(vm_object_t, vm_pindex_t);
+vm_page_t vm_page_lookup_unlocked(vm_object_t, vm_pindex_t);
 vm_page_t vm_page_next(vm_page_t m);
 void vm_page_pqbatch_drain(void);
 void vm_page_pqbatch_submit(vm_page_t m, uint8_t queue);
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to