Author: mjg
Date: Mon Nov  9 23:02:13 2020
New Revision: 367535
URL: https://svnweb.freebsd.org/changeset/base/367535

Log:
  vfs: group mount per-cpu vars into one struct
  
  While here move frequently read stuff into the same cacheline.
  
  This shrinks struct mount by 64 bytes.
  
  Tested by:    pho

Modified:
  head/sys/kern/vfs_cache.c
  head/sys/kern/vfs_default.c
  head/sys/kern/vfs_mount.c
  head/sys/kern/vfs_subr.c
  head/sys/kern/vfs_vnops.c
  head/sys/sys/mount.h

Modified: head/sys/kern/vfs_cache.c
==============================================================================
--- head/sys/kern/vfs_cache.c   Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/kern/vfs_cache.c   Mon Nov  9 23:02:13 2020        (r367535)
@@ -4249,6 +4249,7 @@ static int __noinline
 cache_fplookup_climb_mount(struct cache_fpl *fpl)
 {
        struct mount *mp, *prev_mp;
+       struct mount_pcpu *mpcpu, *prev_mpcpu;
        struct vnode *vp;
        seqc_t vp_seqc;
 
@@ -4262,38 +4263,39 @@ cache_fplookup_climb_mount(struct cache_fpl *fpl)
 
        prev_mp = NULL;
        for (;;) {
-               if (!vfs_op_thread_enter_crit(mp)) {
+               if (!vfs_op_thread_enter_crit(mp, mpcpu)) {
                        if (prev_mp != NULL)
-                               vfs_op_thread_exit_crit(prev_mp);
+                               vfs_op_thread_exit_crit(prev_mp, prev_mpcpu);
                        return (cache_fpl_partial(fpl));
                }
                if (prev_mp != NULL)
-                       vfs_op_thread_exit_crit(prev_mp);
+                       vfs_op_thread_exit_crit(prev_mp, prev_mpcpu);
                if (!vn_seqc_consistent(vp, vp_seqc)) {
-                       vfs_op_thread_exit_crit(mp);
+                       vfs_op_thread_exit_crit(mp, mpcpu);
                        return (cache_fpl_partial(fpl));
                }
                if (!cache_fplookup_mp_supported(mp)) {
-                       vfs_op_thread_exit_crit(mp);
+                       vfs_op_thread_exit_crit(mp, mpcpu);
                        return (cache_fpl_partial(fpl));
                }
                vp = atomic_load_ptr(&mp->mnt_rootvnode);
                if (vp == NULL || VN_IS_DOOMED(vp)) {
-                       vfs_op_thread_exit_crit(mp);
+                       vfs_op_thread_exit_crit(mp, mpcpu);
                        return (cache_fpl_partial(fpl));
                }
                vp_seqc = vn_seqc_read_any(vp);
                if (seqc_in_modify(vp_seqc)) {
-                       vfs_op_thread_exit_crit(mp);
+                       vfs_op_thread_exit_crit(mp, mpcpu);
                        return (cache_fpl_partial(fpl));
                }
                prev_mp = mp;
+               prev_mpcpu = mpcpu;
                mp = atomic_load_ptr(&vp->v_mountedhere);
                if (mp == NULL)
                        break;
        }
 
-       vfs_op_thread_exit_crit(prev_mp);
+       vfs_op_thread_exit_crit(prev_mp, prev_mpcpu);
        fpl->tvp = vp;
        fpl->tvp_seqc = vp_seqc;
        return (0);

Modified: head/sys/kern/vfs_default.c
==============================================================================
--- head/sys/kern/vfs_default.c Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/kern/vfs_default.c Mon Nov  9 23:02:13 2020        (r367535)
@@ -663,6 +663,7 @@ vop_stdgetwritemount(ap)
        } */ *ap;
 {
        struct mount *mp;
+       struct mount_pcpu *mpcpu;
        struct vnode *vp;
 
        /*
@@ -680,12 +681,12 @@ vop_stdgetwritemount(ap)
                *(ap->a_mpp) = NULL;
                return (0);
        }
-       if (vfs_op_thread_enter(mp)) {
+       if (vfs_op_thread_enter(mp, mpcpu)) {
                if (mp == vp->v_mount) {
-                       vfs_mp_count_add_pcpu(mp, ref, 1);
-                       vfs_op_thread_exit(mp);
+                       vfs_mp_count_add_pcpu(mpcpu, ref, 1);
+                       vfs_op_thread_exit(mp, mpcpu);
                } else {
-                       vfs_op_thread_exit(mp);
+                       vfs_op_thread_exit(mp, mpcpu);
                        mp = NULL;
                }
        } else {

Modified: head/sys/kern/vfs_mount.c
==============================================================================
--- head/sys/kern/vfs_mount.c   Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/kern/vfs_mount.c   Mon Nov  9 23:02:13 2020        (r367535)
@@ -127,14 +127,7 @@ mount_init(void *mem, int size, int flags)
        mtx_init(&mp->mnt_mtx, "struct mount mtx", NULL, MTX_DEF);
        mtx_init(&mp->mnt_listmtx, "struct mount vlist mtx", NULL, MTX_DEF);
        lockinit(&mp->mnt_explock, PVFS, "explock", 0, 0);
-       mp->mnt_thread_in_ops_pcpu = uma_zalloc_pcpu(pcpu_zone_4,
-           M_WAITOK | M_ZERO);
-       mp->mnt_ref_pcpu = uma_zalloc_pcpu(pcpu_zone_4,
-           M_WAITOK | M_ZERO);
-       mp->mnt_lockref_pcpu = uma_zalloc_pcpu(pcpu_zone_4,
-           M_WAITOK | M_ZERO);
-       mp->mnt_writeopcount_pcpu = uma_zalloc_pcpu(pcpu_zone_4,
-           M_WAITOK | M_ZERO);
+       mp->mnt_pcpu = uma_zalloc_pcpu(pcpu_zone_16, M_WAITOK | M_ZERO);
        mp->mnt_ref = 0;
        mp->mnt_vfs_ops = 1;
        mp->mnt_rootvnode = NULL;
@@ -147,10 +140,7 @@ mount_fini(void *mem, int size)
        struct mount *mp;
 
        mp = (struct mount *)mem;
-       uma_zfree_pcpu(pcpu_zone_4, mp->mnt_writeopcount_pcpu);
-       uma_zfree_pcpu(pcpu_zone_4, mp->mnt_lockref_pcpu);
-       uma_zfree_pcpu(pcpu_zone_4, mp->mnt_ref_pcpu);
-       uma_zfree_pcpu(pcpu_zone_4, mp->mnt_thread_in_ops_pcpu);
+       uma_zfree_pcpu(pcpu_zone_16, mp->mnt_pcpu);
        lockdestroy(&mp->mnt_explock);
        mtx_destroy(&mp->mnt_listmtx);
        mtx_destroy(&mp->mnt_mtx);
@@ -462,11 +452,12 @@ sys_nmount(struct thread *td, struct nmount_args *uap)
 void
 vfs_ref(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
 
        CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
-       if (vfs_op_thread_enter(mp)) {
-               vfs_mp_count_add_pcpu(mp, ref, 1);
-               vfs_op_thread_exit(mp);
+       if (vfs_op_thread_enter(mp, mpcpu)) {
+               vfs_mp_count_add_pcpu(mpcpu, ref, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                return;
        }
 
@@ -478,11 +469,12 @@ vfs_ref(struct mount *mp)
 void
 vfs_rel(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
 
        CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
-       if (vfs_op_thread_enter(mp)) {
-               vfs_mp_count_sub_pcpu(mp, ref, 1);
-               vfs_op_thread_exit(mp);
+       if (vfs_op_thread_enter(mp, mpcpu)) {
+               vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                return;
        }
 
@@ -503,6 +495,12 @@ vfs_mount_alloc(struct vnode *vp, struct vfsconf *vfsp
        mp = uma_zalloc(mount_zone, M_WAITOK);
        bzero(&mp->mnt_startzero,
            __rangeof(struct mount, mnt_startzero, mnt_endzero));
+       mp->mnt_kern_flag = 0;
+       mp->mnt_flag = 0;
+       mp->mnt_rootvnode = NULL;
+       mp->mnt_vnodecovered = NULL;
+       mp->mnt_op = NULL;
+       mp->mnt_vfc = NULL;
        TAILQ_INIT(&mp->mnt_nvnodelist);
        mp->mnt_nvnodelistsize = 0;
        TAILQ_INIT(&mp->mnt_lazyvnodelist);
@@ -1513,6 +1511,7 @@ dounmount_cleanup(struct mount *mp, struct vnode *cove
 void
 vfs_op_enter(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
        int cpu;
 
        MNT_ILOCK(mp);
@@ -1523,12 +1522,16 @@ vfs_op_enter(struct mount *mp)
        }
        vfs_op_barrier_wait(mp);
        CPU_FOREACH(cpu) {
-               mp->mnt_ref +=
-                   zpcpu_replace_cpu(mp->mnt_ref_pcpu, 0, cpu);
-               mp->mnt_lockref +=
-                   zpcpu_replace_cpu(mp->mnt_lockref_pcpu, 0, cpu);
-               mp->mnt_writeopcount +=
-                   zpcpu_replace_cpu(mp->mnt_writeopcount_pcpu, 0, cpu);
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+
+               mp->mnt_ref += mpcpu->mntp_ref;
+               mpcpu->mntp_ref = 0;
+
+               mp->mnt_lockref += mpcpu->mntp_lockref;
+               mpcpu->mntp_lockref = 0;
+
+               mp->mnt_writeopcount += mpcpu->mntp_writeopcount;
+               mpcpu->mntp_writeopcount = 0;
        }
        if (mp->mnt_ref <= 0 || mp->mnt_lockref < 0 || mp->mnt_writeopcount < 0)
                panic("%s: invalid count(s) on mp %p: ref %d lockref %d 
writeopcount %d\n",
@@ -1581,13 +1584,13 @@ vfs_op_wait_func(void *arg, int cpu)
 {
        struct vfs_op_barrier_ipi *vfsopipi;
        struct mount *mp;
-       int *in_op;
+       struct mount_pcpu *mpcpu;
 
        vfsopipi = __containerof(arg, struct vfs_op_barrier_ipi, srcra);
        mp = vfsopipi->mp;
 
-       in_op = zpcpu_get_cpu(mp->mnt_thread_in_ops_pcpu, cpu);
-       while (atomic_load_int(in_op))
+       mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+       while (atomic_load_int(&mpcpu->mntp_thread_in_ops))
                cpu_spinwait();
 }
 
@@ -1610,15 +1613,17 @@ vfs_op_barrier_wait(struct mount *mp)
 void
 vfs_assert_mount_counters(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
        int cpu;
 
        if (mp->mnt_vfs_ops == 0)
                return;
 
        CPU_FOREACH(cpu) {
-               if (*zpcpu_get_cpu(mp->mnt_ref_pcpu, cpu) != 0 ||
-                   *zpcpu_get_cpu(mp->mnt_lockref_pcpu, cpu) != 0 ||
-                   *zpcpu_get_cpu(mp->mnt_writeopcount_pcpu, cpu) != 0)
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+               if (mpcpu->mntp_ref != 0 ||
+                   mpcpu->mntp_lockref != 0 ||
+                   mpcpu->mntp_writeopcount != 0)
                        vfs_dump_mount_counters(mp);
        }
 }
@@ -1626,33 +1631,34 @@ vfs_assert_mount_counters(struct mount *mp)
 void
 vfs_dump_mount_counters(struct mount *mp)
 {
-       int cpu, *count;
+       struct mount_pcpu *mpcpu;
        int ref, lockref, writeopcount;
+       int cpu;
 
        printf("%s: mp %p vfs_ops %d\n", __func__, mp, mp->mnt_vfs_ops);
 
        printf("        ref : ");
        ref = mp->mnt_ref;
        CPU_FOREACH(cpu) {
-               count = zpcpu_get_cpu(mp->mnt_ref_pcpu, cpu);
-               printf("%d ", *count);
-               ref += *count;
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+               printf("%d ", mpcpu->mntp_ref);
+               ref += mpcpu->mntp_ref;
        }
        printf("\n");
        printf("    lockref : ");
        lockref = mp->mnt_lockref;
        CPU_FOREACH(cpu) {
-               count = zpcpu_get_cpu(mp->mnt_lockref_pcpu, cpu);
-               printf("%d ", *count);
-               lockref += *count;
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+               printf("%d ", mpcpu->mntp_lockref);
+               lockref += mpcpu->mntp_lockref;
        }
        printf("\n");
        printf("writeopcount: ");
        writeopcount = mp->mnt_writeopcount;
        CPU_FOREACH(cpu) {
-               count = zpcpu_get_cpu(mp->mnt_writeopcount_pcpu, cpu);
-               printf("%d ", *count);
-               writeopcount += *count;
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+               printf("%d ", mpcpu->mntp_writeopcount);
+               writeopcount += mpcpu->mntp_writeopcount;
        }
        printf("\n");
 
@@ -1668,27 +1674,34 @@ vfs_dump_mount_counters(struct mount *mp)
 int
 vfs_mount_fetch_counter(struct mount *mp, enum mount_counter which)
 {
-       int *base, *pcpu;
+       struct mount_pcpu *mpcpu;
        int cpu, sum;
 
        switch (which) {
        case MNT_COUNT_REF:
-               base = &mp->mnt_ref;
-               pcpu = mp->mnt_ref_pcpu;
+               sum = mp->mnt_ref;
                break;
        case MNT_COUNT_LOCKREF:
-               base = &mp->mnt_lockref;
-               pcpu = mp->mnt_lockref_pcpu;
+               sum = mp->mnt_lockref;
                break;
        case MNT_COUNT_WRITEOPCOUNT:
-               base = &mp->mnt_writeopcount;
-               pcpu = mp->mnt_writeopcount_pcpu;
+               sum = mp->mnt_writeopcount;
                break;
        }
 
-       sum = *base;
        CPU_FOREACH(cpu) {
-               sum += *zpcpu_get_cpu(pcpu, cpu);
+               mpcpu = vfs_mount_pcpu_remote(mp, cpu);
+               switch (which) {
+               case MNT_COUNT_REF:
+                       sum += mpcpu->mntp_ref;
+                       break;
+               case MNT_COUNT_LOCKREF:
+                       sum += mpcpu->mntp_lockref;
+                       break;
+               case MNT_COUNT_WRITEOPCOUNT:
+                       sum += mpcpu->mntp_writeopcount;
+                       break;
+               }
        }
        return (sum);
 }

Modified: head/sys/kern/vfs_subr.c
==============================================================================
--- head/sys/kern/vfs_subr.c    Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/kern/vfs_subr.c    Mon Nov  9 23:02:13 2020        (r367535)
@@ -734,17 +734,18 @@ SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_FIRST, vntblinit, NU
 int
 vfs_busy(struct mount *mp, int flags)
 {
+       struct mount_pcpu *mpcpu;
 
        MPASS((flags & ~MBF_MASK) == 0);
        CTR3(KTR_VFS, "%s: mp %p with flags %d", __func__, mp, flags);
 
-       if (vfs_op_thread_enter(mp)) {
+       if (vfs_op_thread_enter(mp, mpcpu)) {
                MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
                MPASS((mp->mnt_kern_flag & MNTK_UNMOUNT) == 0);
                MPASS((mp->mnt_kern_flag & MNTK_REFEXPIRE) == 0);
-               vfs_mp_count_add_pcpu(mp, ref, 1);
-               vfs_mp_count_add_pcpu(mp, lockref, 1);
-               vfs_op_thread_exit(mp);
+               vfs_mp_count_add_pcpu(mpcpu, ref, 1);
+               vfs_mp_count_add_pcpu(mpcpu, lockref, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                if (flags & MBF_MNTLSTLOCK)
                        mtx_unlock(&mountlist_mtx);
                return (0);
@@ -794,15 +795,16 @@ vfs_busy(struct mount *mp, int flags)
 void
 vfs_unbusy(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
        int c;
 
        CTR2(KTR_VFS, "%s: mp %p", __func__, mp);
 
-       if (vfs_op_thread_enter(mp)) {
+       if (vfs_op_thread_enter(mp, mpcpu)) {
                MPASS((mp->mnt_kern_flag & MNTK_DRAINING) == 0);
-               vfs_mp_count_sub_pcpu(mp, lockref, 1);
-               vfs_mp_count_sub_pcpu(mp, ref, 1);
-               vfs_op_thread_exit(mp);
+               vfs_mp_count_sub_pcpu(mpcpu, lockref, 1);
+               vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                return;
        }
 
@@ -6399,18 +6401,19 @@ restart:
 int
 vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp)
 {
+       struct mount_pcpu *mpcpu;
        struct vnode *vp;
        int error;
 
-       if (!vfs_op_thread_enter(mp))
+       if (!vfs_op_thread_enter(mp, mpcpu))
                return (vfs_cache_root_fallback(mp, flags, vpp));
        vp = atomic_load_ptr(&mp->mnt_rootvnode);
        if (vp == NULL || VN_IS_DOOMED(vp)) {
-               vfs_op_thread_exit(mp);
+               vfs_op_thread_exit(mp, mpcpu);
                return (vfs_cache_root_fallback(mp, flags, vpp));
        }
        vrefact(vp);
-       vfs_op_thread_exit(mp);
+       vfs_op_thread_exit(mp, mpcpu);
        error = vn_lock(vp, flags);
        if (error != 0) {
                vrele(vp);

Modified: head/sys/kern/vfs_vnops.c
==============================================================================
--- head/sys/kern/vfs_vnops.c   Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/kern/vfs_vnops.c   Mon Nov  9 23:02:13 2020        (r367535)
@@ -1761,13 +1761,14 @@ vn_closefile(struct file *fp, struct thread *td)
 static int
 vn_start_write_refed(struct mount *mp, int flags, bool mplocked)
 {
+       struct mount_pcpu *mpcpu;
        int error, mflags;
 
        if (__predict_true(!mplocked) && (flags & V_XSLEEP) == 0 &&
-           vfs_op_thread_enter(mp)) {
+           vfs_op_thread_enter(mp, mpcpu)) {
                MPASS((mp->mnt_kern_flag & MNTK_SUSPEND) == 0);
-               vfs_mp_count_add_pcpu(mp, writeopcount, 1);
-               vfs_op_thread_exit(mp);
+               vfs_mp_count_add_pcpu(mpcpu, writeopcount, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                return (0);
        }
 
@@ -1917,15 +1918,16 @@ vn_start_secondary_write(struct vnode *vp, struct moun
 void
 vn_finished_write(struct mount *mp)
 {
+       struct mount_pcpu *mpcpu;
        int c;
 
        if (mp == NULL)
                return;
 
-       if (vfs_op_thread_enter(mp)) {
-               vfs_mp_count_sub_pcpu(mp, writeopcount, 1);
-               vfs_mp_count_sub_pcpu(mp, ref, 1);
-               vfs_op_thread_exit(mp);
+       if (vfs_op_thread_enter(mp, mpcpu)) {
+               vfs_mp_count_sub_pcpu(mpcpu, writeopcount, 1);
+               vfs_mp_count_sub_pcpu(mpcpu, ref, 1);
+               vfs_op_thread_exit(mp, mpcpu);
                return;
        }
 

Modified: head/sys/sys/mount.h
==============================================================================
--- head/sys/sys/mount.h        Mon Nov  9 23:00:29 2020        (r367534)
+++ head/sys/sys/mount.h        Mon Nov  9 23:02:13 2020        (r367535)
@@ -177,6 +177,16 @@ struct vfsopt {
        int     seen;
 };
 
+struct mount_pcpu {
+       int             mntp_thread_in_ops;
+       int             mntp_ref;
+       int             mntp_lockref;
+       int             mntp_writeopcount;
+};
+
+_Static_assert(sizeof(struct mount_pcpu) == 16,
+    "the struct is allocated from pcpu 16 zone");
+
 /*
  * Structure per mounted filesystem.  Each mounted filesystem has an
  * array of operations and an instance record.  The filesystems are
@@ -192,20 +202,23 @@ struct vfsopt {
  *
  */
 struct mount {
-       struct mtx      mnt_mtx;                /* mount structure interlock */
+       int             mnt_vfs_ops;            /* (i) pending vfs ops */
+       int             mnt_kern_flag;          /* (i) kernel only flags */
+       uint64_t        mnt_flag;               /* (i) flags shared with user */
+       struct mount_pcpu *mnt_pcpu;            /* per-CPU data */
+       struct vnode    *mnt_rootvnode;
+       struct vnode    *mnt_vnodecovered;      /* vnode we mounted on */
+       struct vfsops   *mnt_op;                /* operations on fs */
+       struct vfsconf  *mnt_vfc;               /* configuration info */
+       struct mtx __aligned(CACHE_LINE_SIZE)   mnt_mtx; /* mount structure 
interlock */
        int             mnt_gen;                /* struct mount generation */
 #define        mnt_startzero   mnt_list
        TAILQ_ENTRY(mount) mnt_list;            /* (m) mount list */
-       struct vfsops   *mnt_op;                /* operations on fs */
-       struct vfsconf  *mnt_vfc;               /* configuration info */
-       struct vnode    *mnt_vnodecovered;      /* vnode we mounted on */
        struct vnode    *mnt_syncer;            /* syncer vnode */
        int             mnt_ref;                /* (i) Reference count */
        struct vnodelst mnt_nvnodelist;         /* (i) list of vnodes */
        int             mnt_nvnodelistsize;     /* (i) # of vnodes */
        int             mnt_writeopcount;       /* (i) write syscalls pending */
-       int             mnt_kern_flag;          /* (i) kernel only flags */
-       uint64_t        mnt_flag;               /* (i) flags shared with user */
        struct vfsoptlist *mnt_opt;             /* current mount options */
        struct vfsoptlist *mnt_optnew;          /* new options passed to fs */
        int             mnt_maxsymlinklen;      /* max size of short symlink */
@@ -229,12 +242,6 @@ struct mount {
        struct lock     mnt_explock;            /* vfs_export walkers lock */
        TAILQ_ENTRY(mount) mnt_upper_link;      /* (m) we in the all uppers */
        TAILQ_HEAD(, mount) mnt_uppers;         /* (m) upper mounts over us*/
-       int __aligned(CACHE_LINE_SIZE)  mnt_vfs_ops;/* (i) pending vfs ops */
-       int             *mnt_thread_in_ops_pcpu;
-       int             *mnt_ref_pcpu;
-       int             *mnt_lockref_pcpu;
-       int             *mnt_writeopcount_pcpu;
-       struct vnode    *mnt_rootvnode;
 };
 
 /*
@@ -1054,7 +1061,7 @@ void resume_all_fs(void);
 
 /*
  * Code transitioning mnt_vfs_ops to > 0 issues IPIs until it observes
- * all CPUs not executing code enclosed by mnt_thread_in_ops_pcpu.
+ * all CPUs not executing code enclosed by thread_in_ops_pcpu variable.
  *
  * This provides an invariant that by the time the last CPU is observed not
  * executing, everyone else entering will see the counter > 0 and exit.
@@ -1064,52 +1071,58 @@ void resume_all_fs(void);
  * before making any changes or only make changes safe while the section is
  * executed.
  */
+#define        vfs_mount_pcpu(mp)              zpcpu_get(mp->mnt_pcpu)
+#define        vfs_mount_pcpu_remote(mp, cpu)  zpcpu_get_cpu(mp->mnt_pcpu, cpu)
+
 #define vfs_op_thread_entered(mp) ({                           \
        MPASS(curthread->td_critnest > 0);                      \
-       *zpcpu_get(mp->mnt_thread_in_ops_pcpu) == 1;            \
+       struct mount_pcpu *_mpcpu = vfs_mount_pcpu(mp);         \
+       _mpcpu->mntp_thread_in_ops == 1;                        \
 })
 
-#define vfs_op_thread_enter_crit(mp) ({                                \
+#define vfs_op_thread_enter_crit(mp, _mpcpu) ({                        \
        bool _retval_crit = true;                               \
        MPASS(curthread->td_critnest > 0);                      \
-       MPASS(!vfs_op_thread_entered(mp));                      \
-       zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 1);     \
+       _mpcpu = vfs_mount_pcpu(mp);                            \
+       MPASS(mpcpu->mntp_thread_in_ops == 0);                  \
+       _mpcpu->mntp_thread_in_ops = 1;                         \
        __compiler_membar();                                    \
        if (__predict_false(mp->mnt_vfs_ops > 0)) {             \
-               vfs_op_thread_exit_crit(mp);                    \
+               vfs_op_thread_exit_crit(mp, _mpcpu);            \
                _retval_crit = false;                           \
        }                                                       \
        _retval_crit;                                           \
 })
 
-#define vfs_op_thread_enter(mp) ({                             \
+#define vfs_op_thread_enter(mp, _mpcpu) ({                     \
        bool _retval;                                           \
        critical_enter();                                       \
-       _retval = vfs_op_thread_enter_crit(mp);                 \
+       _retval = vfs_op_thread_enter_crit(mp, _mpcpu);         \
        if (__predict_false(!_retval))                          \
                critical_exit();                                \
        _retval;                                                \
 })
 
-#define vfs_op_thread_exit_crit(mp) do {                       \
-       MPASS(vfs_op_thread_entered(mp));                       \
+#define vfs_op_thread_exit_crit(mp, _mpcpu) do {               \
+       MPASS(_mpcpu == vfs_mount_pcpu(mp));                    \
+       MPASS(_mpcpu->mntp_thread_in_ops == 1);                 \
        __compiler_membar();                                    \
-       zpcpu_set_protected(mp->mnt_thread_in_ops_pcpu, 0);     \
+       _mpcpu->mntp_thread_in_ops = 0;                         \
 } while (0)
 
-#define vfs_op_thread_exit(mp) do {                            \
-       vfs_op_thread_exit_crit(mp);                            \
+#define vfs_op_thread_exit(mp, _mpcpu) do {                    \
+       vfs_op_thread_exit_crit(mp, _mpcpu);                    \
        critical_exit();                                        \
 } while (0)
 
-#define vfs_mp_count_add_pcpu(mp, count, val) do {             \
-       MPASS(vfs_op_thread_entered(mp));                       \
-       zpcpu_add_protected(mp->mnt_##count##_pcpu, val);       \
+#define vfs_mp_count_add_pcpu(_mpcpu, count, val) do {         \
+       MPASS(_mpcpu->mntp_thread_in_ops == 1);                 \
+       _mpcpu->mntp_##count += val;                            \
 } while (0)
 
-#define vfs_mp_count_sub_pcpu(mp, count, val) do {             \
-       MPASS(vfs_op_thread_entered(mp));                       \
-       zpcpu_sub_protected(mp->mnt_##count##_pcpu, val);       \
+#define vfs_mp_count_sub_pcpu(_mpcpu, count, val) do {         \
+       MPASS(_mpcpu->mntp_thread_in_ops == 1);                 \
+       _mpcpu->mntp_##count -= val;                            \
 } while (0)
 
 #else /* !_KERNEL */
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to