Author: mjg
Date: Tue Jan 24 19:45:33 2017
New Revision: 312718
URL: https://svnweb.freebsd.org/changeset/base/312718

Log:
  MFC r310766,r310767,r310774,r310779:
  
      cache: drop the NULL check from VP2VNODELOCK
  
      Now that negative entries are annotated with a dedicated flag, NULL vnodes
      are no longer passed.
  
  ==
  
      cache: depessimize hashing macros/inlines
  
      All hash sizes are power-of-2, but the compiler does not know that for 
sure
      and 'foo % size' forces doing a division.
  
      Store the size - 1 and use 'foo & hash' instead which allows mere shift.
  
  ==
  
      cache: move shrink lock init to nchinit
  
      This gets rid of unnecesary sysinit usage.
  
      While here also rename the lock to be consistent with the rest.
  
  ==
  
      cache: sprinkle __predict_false

Modified:
  stable/11/sys/kern/vfs_cache.c
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/vfs_cache.c
==============================================================================
--- stable/11/sys/kern/vfs_cache.c      Tue Jan 24 19:42:24 2017        
(r312717)
+++ stable/11/sys/kern/vfs_cache.c      Tue Jan 24 19:45:33 2017        
(r312718)
@@ -229,8 +229,6 @@ SYSCTL_UINT(_vfs, OID_AUTO, ncneghitsreq
 struct nchstats        nchstats;               /* cache effectiveness 
statistics */
 
 static struct mtx       ncneg_shrink_lock;
-MTX_SYSINIT(vfscache_shrink_neg, &ncneg_shrink_lock, "Name Cache shrink neg",
-    MTX_DEF);
 
 struct neglist {
        struct mtx              nl_lock;
@@ -242,30 +240,29 @@ static struct neglist ncneg_hot;
 
 static int     shrink_list_turn;
 
-static u_int   numneglists;
+#define        numneglists (ncneghash + 1)
+static u_int   ncneghash;
 static inline struct neglist *
 NCP2NEGLIST(struct namecache *ncp)
 {
 
-       return (&neglists[(((uintptr_t)(ncp) >> 8) % numneglists)]);
+       return (&neglists[(((uintptr_t)(ncp) >> 8) & ncneghash)]);
 }
 
-static u_int   numbucketlocks;
+#define        numbucketlocks (ncbuckethash + 1)
+static u_int   ncbuckethash;
 static struct rwlock_padalign  *bucketlocks;
 #define        HASH2BUCKETLOCK(hash) \
-       ((struct rwlock *)(&bucketlocks[((hash) % numbucketlocks)]))
+       ((struct rwlock *)(&bucketlocks[((hash) & ncbuckethash)]))
 
-static u_int   numvnodelocks;
+#define        numvnodelocks (ncvnodehash + 1)
+static u_int   ncvnodehash;
 static struct mtx *vnodelocks;
 static inline struct mtx *
 VP2VNODELOCK(struct vnode *vp)
 {
-       struct mtx *vlp;
 
-       if (vp == NULL)
-               return (NULL);
-       vlp = &vnodelocks[(((uintptr_t)(vp) >> 8) % numvnodelocks)];
-       return (vlp);
+       return (&vnodelocks[(((uintptr_t)(vp) >> 8) & ncvnodehash)]);
 }
 
 /*
@@ -1107,7 +1104,7 @@ cache_lookup(struct vnode *dvp, struct v
        uint32_t hash;
        int error, ltype;
 
-       if (!doingcache) {
+       if (__predict_false(!doingcache)) {
                cnp->cn_flags &= ~MAKEENTRY;
                return (0);
        }
@@ -1374,8 +1371,8 @@ cache_lock_vnodes_cel_3(struct celocksta
        cache_assert_vlp_locked(cel->vlp[1]);
        MPASS(cel->vlp[2] == NULL);
 
+       MPASS(vp != NULL);
        vlp = VP2VNODELOCK(vp);
-       MPASS(vlp != NULL);
 
        ret = true;
        if (vlp >= cel->vlp[1]) {
@@ -1547,13 +1544,13 @@ cache_enter_time(struct vnode *dvp, stru
        VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp,
            ("cache_enter: Doomed vnode used as src"));
 
-       if (!doingcache)
+       if (__predict_false(!doingcache))
                return;
 
        /*
         * Avoid blowout in namecache entries.
         */
-       if (numcache >= desiredvnodes * ncsizefactor)
+       if (__predict_false(numcache >= desiredvnodes * ncsizefactor))
                return;
 
        cache_celockstate_init(&cel);
@@ -1779,21 +1776,21 @@ nchinit(void *dummy __unused)
            NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
 
        nchashtbl = hashinit(desiredvnodes * 2, M_VFSCACHE, &nchash);
-       numbucketlocks = cache_roundup_2(mp_ncpus * 64);
-       if (numbucketlocks > nchash + 1)
-               numbucketlocks = nchash + 1;
+       ncbuckethash = cache_roundup_2(mp_ncpus * 64) - 1;
+       if (ncbuckethash > nchash)
+               ncbuckethash = nchash;
        bucketlocks = malloc(sizeof(*bucketlocks) * numbucketlocks, M_VFSCACHE,
            M_WAITOK | M_ZERO);
        for (i = 0; i < numbucketlocks; i++)
                rw_init_flags(&bucketlocks[i], "ncbuc", RW_DUPOK | RW_RECURSE);
-       numvnodelocks = cache_roundup_2(mp_ncpus * 64);
+       ncvnodehash = cache_roundup_2(mp_ncpus * 64) - 1;
        vnodelocks = malloc(sizeof(*vnodelocks) * numvnodelocks, M_VFSCACHE,
            M_WAITOK | M_ZERO);
        for (i = 0; i < numvnodelocks; i++)
                mtx_init(&vnodelocks[i], "ncvn", NULL, MTX_DUPOK | MTX_RECURSE);
        ncpurgeminvnodes = numbucketlocks;
 
-       numneglists = 4;
+       ncneghash = 3;
        neglists = malloc(sizeof(*neglists) * numneglists, M_VFSCACHE,
            M_WAITOK | M_ZERO);
        for (i = 0; i < numneglists; i++) {
@@ -1803,6 +1800,8 @@ nchinit(void *dummy __unused)
        mtx_init(&ncneg_hot.nl_lock, "ncneglh", NULL, MTX_DEF);
        TAILQ_INIT(&ncneg_hot.nl_list);
 
+       mtx_init(&ncneg_shrink_lock, "ncnegs", NULL, MTX_DEF);
+
        numcalls = counter_u64_alloc(M_WAITOK);
        dothits = counter_u64_alloc(M_WAITOK);
        dotdothits = counter_u64_alloc(M_WAITOK);
@@ -2055,9 +2054,9 @@ kern___getcwd(struct thread *td, char *b
        struct vnode *cdir, *rdir;
        int error;
 
-       if (disablecwd)
+       if (__predict_false(disablecwd))
                return (ENODEV);
-       if (buflen < 2)
+       if (__predict_false(buflen < 2))
                return (EINVAL);
        if (buflen > path_max)
                buflen = path_max;
@@ -2108,9 +2107,9 @@ vn_fullpath(struct thread *td, struct vn
        struct vnode *rdir;
        int error;
 
-       if (disablefullpath)
+       if (__predict_false(disablefullpath))
                return (ENODEV);
-       if (vn == NULL)
+       if (__predict_false(vn == NULL))
                return (EINVAL);
 
        buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
@@ -2142,9 +2141,9 @@ vn_fullpath_global(struct thread *td, st
        char *buf;
        int error;
 
-       if (disablefullpath)
+       if (__predict_false(disablefullpath))
                return (ENODEV);
-       if (vn == NULL)
+       if (__predict_false(vn == NULL))
                return (EINVAL);
        buf = malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
        error = vn_fullpath1(td, vn, rootvnode, buf, retbuf, MAXPATHLEN);
@@ -2408,7 +2407,7 @@ vn_path_to_global_path(struct thread *td
        ASSERT_VOP_ELOCKED(vp, __func__);
 
        /* Return ENODEV if sysctl debug.disablefullpath==1 */
-       if (disablefullpath)
+       if (__predict_false(disablefullpath))
                return (ENODEV);
 
        /* Construct global filesystem path from vp. */
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to