Author: mjg
Date: Thu Mar 16 01:32:56 2017
New Revision: 315341
URL: https://svnweb.freebsd.org/changeset/base/315341

Log:
  MFC r311172,r311194,r311226,r312389,r312390:
  
  mtx: reduce lock accesses
  
  Instead of spuriously re-reading the lock value, read it once.
  
  This change also has a side effect of fixing a performance bug:
  on failed _mtx_obtain_lock, it was possible that re-read would find
  the lock is unowned, but in this case the primitive would make a trip
  through turnstile code.
  
  This is diff reduction to a variant which uses atomic_fcmpset.
  
  ==
  
  Reduce lock accesses in thread lock similarly to r311172
  
  ==
  
  mtx: plug open-coded mtx_lock access missed in r311172
  
  ==
  
  rwlock: reduce lock accesses similarly to r311172
  
  ==
  
  sx: reduce lock accesses similarly to r311172

Modified:
  stable/11/sys/kern/kern_mutex.c
  stable/11/sys/kern/kern_rwlock.c
  stable/11/sys/kern/kern_sx.c
  stable/11/sys/sys/mutex.h
  stable/11/sys/sys/rwlock.h
  stable/11/sys/sys/sx.h
Directory Properties:
  stable/11/   (props changed)

Modified: stable/11/sys/kern/kern_mutex.c
==============================================================================
--- stable/11/sys/kern/kern_mutex.c     Thu Mar 16 01:04:30 2017        
(r315340)
+++ stable/11/sys/kern/kern_mutex.c     Thu Mar 16 01:32:56 2017        
(r315341)
@@ -94,8 +94,6 @@ PMC_SOFT_DEFINE( , , lock, failed);
 
 #define        mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
 
-#define        mtx_owner(m)    ((struct thread *)((m)->mtx_lock & 
~MTX_FLAGMASK))
-
 static void    assert_mtx(const struct lock_object *lock, int what);
 #ifdef DDB
 static void    db_show_mtx(const struct lock_object *lock);
@@ -452,8 +450,9 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
        lock_delay_arg_init(&lda, NULL);
 #endif
        m = mtxlock2mtx(c);
+       v = MTX_READ_VALUE(m);
 
-       if (mtx_owned(m)) {
+       if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) {
                KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
                    (opts & MTX_RECURSE) != 0,
            ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n",
@@ -481,8 +480,12 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
 #endif
 
        for (;;) {
-               if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
-                       break;
+               if (v == MTX_UNOWNED) {
+                       if (_mtx_obtain_lock(m, tid))
+                               break;
+                       v = MTX_READ_VALUE(m);
+                       continue;
+               }
 #ifdef KDTRACE_HOOKS
                lda.spin_cnt++;
 #endif
@@ -491,31 +494,30 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                 * If the owner is running on another CPU, spin until the
                 * owner stops running or the state of the lock changes.
                 */
-               v = m->mtx_lock;
-               if (v != MTX_UNOWNED) {
-                       owner = (struct thread *)(v & ~MTX_FLAGMASK);
-                       if (TD_IS_RUNNING(owner)) {
-                               if (LOCK_LOG_TEST(&m->lock_object, 0))
-                                       CTR3(KTR_LOCK,
-                                           "%s: spinning on %p held by %p",
-                                           __func__, m, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname((struct thread *)tid),
-                                   "spinning", "lockname:\"%s\"",
-                                   m->lock_object.lo_name);
-                               while (mtx_owner(m) == owner &&
-                                   TD_IS_RUNNING(owner))
-                                       lock_delay(&lda);
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname((struct thread *)tid),
-                                   "running");
-                               continue;
-                       }
+               owner = lv_mtx_owner(v);
+               if (TD_IS_RUNNING(owner)) {
+                       if (LOCK_LOG_TEST(&m->lock_object, 0))
+                               CTR3(KTR_LOCK,
+                                   "%s: spinning on %p held by %p",
+                                   __func__, m, owner);
+                       KTR_STATE1(KTR_SCHED, "thread",
+                           sched_tdname((struct thread *)tid),
+                           "spinning", "lockname:\"%s\"",
+                           m->lock_object.lo_name);
+                       do {
+                               lock_delay(&lda);
+                               v = MTX_READ_VALUE(m);
+                               owner = lv_mtx_owner(v);
+                       } while (v != MTX_UNOWNED && TD_IS_RUNNING(owner));
+                       KTR_STATE0(KTR_SCHED, "thread",
+                           sched_tdname((struct thread *)tid),
+                           "running");
+                       continue;
                }
 #endif
 
                ts = turnstile_trywait(&m->lock_object);
-               v = m->mtx_lock;
+               v = MTX_READ_VALUE(m);
 
                /*
                 * Check if the lock has been released while spinning for
@@ -534,7 +536,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                 * chain lock.  If so, drop the turnstile lock and try
                 * again.
                 */
-               owner = (struct thread *)(v & ~MTX_FLAGMASK);
+               owner = lv_mtx_owner(v);
                if (TD_IS_RUNNING(owner)) {
                        turnstile_cancel(ts);
                        continue;
@@ -549,6 +551,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                if ((v & MTX_CONTESTED) == 0 &&
                    !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) {
                        turnstile_cancel(ts);
+                       v = MTX_READ_VALUE(m);
                        continue;
                }
 
@@ -579,6 +582,7 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                sleep_time += lockstat_nsecs(&m->lock_object);
                sleep_cnt++;
 #endif
+               v = MTX_READ_VALUE(m);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&m->lock_object);
@@ -636,6 +640,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 {
        struct mtx *m;
        struct lock_delay_arg lda;
+       uintptr_t v;
 #ifdef LOCK_PROFILING
        int contested = 0;
        uint64_t waittime = 0;
@@ -662,24 +667,30 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 #ifdef KDTRACE_HOOKS
        spin_time -= lockstat_nsecs(&m->lock_object);
 #endif
+       v = MTX_READ_VALUE(m);
        for (;;) {
-               if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, tid))
-                       break;
+               if (v == MTX_UNOWNED) {
+                       if (_mtx_obtain_lock(m, tid))
+                               break;
+                       v = MTX_READ_VALUE(m);
+                       continue;
+               }
                /* Give interrupts a chance while we spin. */
                spinlock_exit();
-               while (m->mtx_lock != MTX_UNOWNED) {
+               do {
                        if (lda.spin_cnt < 10000000) {
                                lock_delay(&lda);
-                               continue;
+                       } else {
+                               lda.spin_cnt++;
+                               if (lda.spin_cnt < 60000000 || kdb_active ||
+                                   panicstr != NULL)
+                                       DELAY(1);
+                               else
+                                       _mtx_lock_spin_failed(m);
+                               cpu_spinwait();
                        }
-                       lda.spin_cnt++;
-                       if (lda.spin_cnt < 60000000 || kdb_active ||
-                           panicstr != NULL)
-                               DELAY(1);
-                       else
-                               _mtx_lock_spin_failed(m);
-                       cpu_spinwait();
-               }
+                       v = MTX_READ_VALUE(m);
+               } while (v != MTX_UNOWNED);
                spinlock_enter();
        }
 #ifdef KDTRACE_HOOKS
@@ -704,7 +715,7 @@ void
 thread_lock_flags_(struct thread *td, int opts, const char *file, int line)
 {
        struct mtx *m;
-       uintptr_t tid;
+       uintptr_t tid, v;
        struct lock_delay_arg lda;
 #ifdef LOCK_PROFILING
        int contested = 0;
@@ -746,10 +757,15 @@ retry:
                            m->lock_object.lo_name, file, line));
                WITNESS_CHECKORDER(&m->lock_object,
                    opts | LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL);
+               v = MTX_READ_VALUE(m);
                for (;;) {
-                       if (m->mtx_lock == MTX_UNOWNED && _mtx_obtain_lock(m, 
tid))
-                               break;
-                       if (m->mtx_lock == tid) {
+                       if (v == MTX_UNOWNED) {
+                               if (_mtx_obtain_lock(m, tid))
+                                       break;
+                               v = MTX_READ_VALUE(m);
+                               continue;
+                       }
+                       if (v == tid) {
                                m->mtx_recurse++;
                                break;
                        }
@@ -760,7 +776,7 @@ retry:
                            &contested, &waittime);
                        /* Give interrupts a chance while we spin. */
                        spinlock_exit();
-                       while (m->mtx_lock != MTX_UNOWNED) {
+                       do {
                                if (lda.spin_cnt < 10000000) {
                                        lock_delay(&lda);
                                } else {
@@ -774,7 +790,8 @@ retry:
                                }
                                if (m != td->td_lock)
                                        goto retry;
-                       }
+                               v = MTX_READ_VALUE(m);
+                       } while (v != MTX_UNOWNED);
                        spinlock_enter();
                }
                if (m == td->td_lock)

Modified: stable/11/sys/kern/kern_rwlock.c
==============================================================================
--- stable/11/sys/kern/kern_rwlock.c    Thu Mar 16 01:04:30 2017        
(r315340)
+++ stable/11/sys/kern/kern_rwlock.c    Thu Mar 16 01:32:56 2017        
(r315341)
@@ -114,9 +114,12 @@ LOCK_DELAY_SYSINIT_DEFAULT(rw_delay);
  * Return a pointer to the owning thread if the lock is write-locked or
  * NULL if the lock is unlocked or read-locked.
  */
-#define        rw_wowner(rw)                                                   
\
-       ((rw)->rw_lock & RW_LOCK_READ ? NULL :                          \
-           (struct thread *)RW_OWNER((rw)->rw_lock))
+
+#define        lv_rw_wowner(v)                                                 
\
+       ((v) & RW_LOCK_READ ? NULL :                                    \
+        (struct thread *)RW_OWNER((v)))
+
+#define        rw_wowner(rw)   lv_rw_wowner(RW_READ_VALUE(rw))
 
 /*
  * Returns if a write owner is recursed.  Write ownership is not assured
@@ -397,7 +400,10 @@ __rw_rlock(volatile uintptr_t *c, const 
 
 #ifdef KDTRACE_HOOKS
        all_time -= lockstat_nsecs(&rw->lock_object);
-       state = rw->rw_lock;
+#endif
+       v = RW_READ_VALUE(rw);
+#ifdef KDTRACE_HOOKS
+       state = v;
 #endif
        for (;;) {
                /*
@@ -410,7 +416,6 @@ __rw_rlock(volatile uintptr_t *c, const 
                 * completely unlocked rwlock since such a lock is encoded
                 * as a read lock with no waiters.
                 */
-               v = rw->rw_lock;
                if (RW_CAN_READ(v)) {
                        /*
                         * The RW_LOCK_READ_WAITERS flag should only be set
@@ -426,6 +431,7 @@ __rw_rlock(volatile uintptr_t *c, const 
                                            (void *)(v + RW_ONE_READER));
                                break;
                        }
+                       v = RW_READ_VALUE(rw);
                        continue;
                }
 #ifdef KDTRACE_HOOKS
@@ -453,9 +459,11 @@ __rw_rlock(volatile uintptr_t *c, const 
                                KTR_STATE1(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "spinning",
                                    "lockname:\"%s\"", rw->lock_object.lo_name);
-                               while ((struct thread*)RW_OWNER(rw->rw_lock) ==
-                                   owner && TD_IS_RUNNING(owner))
+                               do {
                                        lock_delay(&lda);
+                                       v = RW_READ_VALUE(rw);
+                                       owner = lv_rw_wowner(v);
+                               } while (owner != NULL && TD_IS_RUNNING(owner));
                                KTR_STATE0(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "running");
                                continue;
@@ -466,11 +474,12 @@ __rw_rlock(volatile uintptr_t *c, const 
                            "spinning", "lockname:\"%s\"",
                            rw->lock_object.lo_name);
                        for (i = 0; i < rowner_loops; i++) {
-                               v = rw->rw_lock;
+                               v = RW_READ_VALUE(rw);
                                if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
                                        break;
                                cpu_spinwait();
                        }
+                       v = RW_READ_VALUE(rw);
 #ifdef KDTRACE_HOOKS
                        lda.spin_cnt += rowner_loops - i;
 #endif
@@ -493,7 +502,7 @@ __rw_rlock(volatile uintptr_t *c, const 
                 * The lock might have been released while we spun, so
                 * recheck its state and restart the loop if needed.
                 */
-               v = rw->rw_lock;
+               v = RW_READ_VALUE(rw);
                if (RW_CAN_READ(v)) {
                        turnstile_cancel(ts);
                        continue;
@@ -531,6 +540,7 @@ __rw_rlock(volatile uintptr_t *c, const 
                        if (!atomic_cmpset_ptr(&rw->rw_lock, v,
                            v | RW_LOCK_READ_WAITERS)) {
                                turnstile_cancel(ts);
+                               v = RW_READ_VALUE(rw);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -556,6 +566,7 @@ __rw_rlock(volatile uintptr_t *c, const 
                if (LOCK_LOG_TEST(&rw->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p resuming from turnstile",
                            __func__, rw);
+               v = RW_READ_VALUE(rw);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&rw->lock_object);
@@ -639,13 +650,12 @@ _rw_runlock_cookie(volatile uintptr_t *c
        LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line);
 
        /* TODO: drop "owner of record" here. */
-
+       x = RW_READ_VALUE(rw);
        for (;;) {
                /*
                 * See if there is more than one read lock held.  If so,
                 * just drop one and return.
                 */
-               x = rw->rw_lock;
                if (RW_READERS(x) > 1) {
                        if (atomic_cmpset_rel_ptr(&rw->rw_lock, x,
                            x - RW_ONE_READER)) {
@@ -656,6 +666,7 @@ _rw_runlock_cookie(volatile uintptr_t *c
                                            (void *)(x - RW_ONE_READER));
                                break;
                        }
+                       x = RW_READ_VALUE(rw);
                        continue;
                }
                /*
@@ -672,6 +683,7 @@ _rw_runlock_cookie(volatile uintptr_t *c
                                            __func__, rw);
                                break;
                        }
+                       x = RW_READ_VALUE(rw);
                        continue;
                }
                /*
@@ -707,6 +719,7 @@ _rw_runlock_cookie(volatile uintptr_t *c
                if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v,
                    x)) {
                        turnstile_chain_unlock(&rw->lock_object);
+                       x = RW_READ_VALUE(rw);
                        continue;
                }
                if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -772,8 +785,9 @@ __rw_wlock_hard(volatile uintptr_t *c, u
        lock_delay_arg_init(&lda, NULL);
 #endif
        rw = rwlock2rw(c);
+       v = RW_READ_VALUE(rw);
 
-       if (rw_wlocked(rw)) {
+       if (__predict_false(lv_rw_wowner(v) == (struct thread *)tid)) {
                KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE,
                    ("%s: recursing but non-recursive rw %s @ %s:%d\n",
                    __func__, rw->lock_object.lo_name, file, line));
@@ -789,11 +803,15 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 
 #ifdef KDTRACE_HOOKS
        all_time -= lockstat_nsecs(&rw->lock_object);
-       state = rw->rw_lock;
+       state = v;
 #endif
        for (;;) {
-               if (rw->rw_lock == RW_UNLOCKED && _rw_write_lock(rw, tid))
-                       break;
+               if (v == RW_UNLOCKED) {
+                       if (_rw_write_lock(rw, tid))
+                               break;
+                       v = RW_READ_VALUE(rw);
+                       continue;
+               }
 #ifdef KDTRACE_HOOKS
                lda.spin_cnt++;
 #endif
@@ -808,8 +826,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                 * running on another CPU, spin until the owner stops
                 * running or the state of the lock changes.
                 */
-               v = rw->rw_lock;
-               owner = (struct thread *)RW_OWNER(v);
+               owner = lv_rw_wowner(v);
                if (!(v & RW_LOCK_READ) && TD_IS_RUNNING(owner)) {
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
                                CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
@@ -817,9 +834,11 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                        KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
                            "spinning", "lockname:\"%s\"",
                            rw->lock_object.lo_name);
-                       while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
-                           TD_IS_RUNNING(owner))
+                       do {
                                lock_delay(&lda);
+                               v = RW_READ_VALUE(rw);
+                               owner = lv_rw_wowner(v);
+                       } while (owner != NULL && TD_IS_RUNNING(owner));
                        KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
                            "running");
                        continue;
@@ -829,6 +848,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                        if (!(v & RW_LOCK_WRITE_SPINNER)) {
                                if (!atomic_cmpset_ptr(&rw->rw_lock, v,
                                    v | RW_LOCK_WRITE_SPINNER)) {
+                                       v = RW_READ_VALUE(rw);
                                        continue;
                                }
                        }
@@ -843,6 +863,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                        }
                        KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
                            "running");
+                       v = RW_READ_VALUE(rw);
 #ifdef KDTRACE_HOOKS
                        lda.spin_cnt += rowner_loops - i;
 #endif
@@ -851,7 +872,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                }
 #endif
                ts = turnstile_trywait(&rw->lock_object);
-               v = rw->rw_lock;
+               v = RW_READ_VALUE(rw);
 
 #ifdef ADAPTIVE_RWLOCKS
                /*
@@ -887,6 +908,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                                break;
                        }
                        turnstile_cancel(ts);
+                       v = RW_READ_VALUE(rw);
                        continue;
                }
                /*
@@ -898,6 +920,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                        if (!atomic_cmpset_ptr(&rw->rw_lock, v,
                            v | RW_LOCK_WRITE_WAITERS)) {
                                turnstile_cancel(ts);
+                               v = RW_READ_VALUE(rw);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
@@ -925,6 +948,7 @@ __rw_wlock_hard(volatile uintptr_t *c, u
 #ifdef ADAPTIVE_RWLOCKS
                spintries = 0;
 #endif
+               v = RW_READ_VALUE(rw);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&rw->lock_object);

Modified: stable/11/sys/kern/kern_sx.c
==============================================================================
--- stable/11/sys/kern/kern_sx.c        Thu Mar 16 01:04:30 2017        
(r315340)
+++ stable/11/sys/kern/kern_sx.c        Thu Mar 16 01:32:56 2017        
(r315341)
@@ -543,8 +543,10 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
        lock_delay_arg_init(&lda, NULL);
 #endif
 
+       x = SX_READ_VALUE(sx);
+
        /* If we already hold an exclusive lock, then recurse. */
-       if (sx_xlocked(sx)) {
+       if (__predict_false(lv_sx_owner(x) == (struct thread *)tid)) {
                KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0,
            ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n",
                    sx->lock_object.lo_name, file, line));
@@ -561,12 +563,15 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 
 #ifdef KDTRACE_HOOKS
        all_time -= lockstat_nsecs(&sx->lock_object);
-       state = sx->sx_lock;
+       state = x;
 #endif
        for (;;) {
-               if (sx->sx_lock == SX_LOCK_UNLOCKED &&
-                   atomic_cmpset_acq_ptr(&sx->sx_lock, SX_LOCK_UNLOCKED, tid))
-                       break;
+               if (x == SX_LOCK_UNLOCKED) {
+                       if (atomic_cmpset_acq_ptr(&sx->sx_lock, x, tid))
+                               break;
+                       x = SX_READ_VALUE(sx);
+                       continue;
+               }
 #ifdef KDTRACE_HOOKS
                lda.spin_cnt++;
 #endif
@@ -581,11 +586,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                 * running on another CPU, spin until the owner stops
                 * running or the state of the lock changes.
                 */
-               x = sx->sx_lock;
                if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
                        if ((x & SX_LOCK_SHARED) == 0) {
-                               x = SX_OWNER(x);
-                               owner = (struct thread *)x;
+                               owner = lv_sx_owner(x);
                                if (TD_IS_RUNNING(owner)) {
                                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                                CTR3(KTR_LOCK,
@@ -596,9 +599,12 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                            "lockname:\"%s\"",
                                            sx->lock_object.lo_name);
                                        GIANT_SAVE();
-                                       while (SX_OWNER(sx->sx_lock) == x &&
-                                           TD_IS_RUNNING(owner))
+                                       do {
                                                lock_delay(&lda);
+                                               x = SX_READ_VALUE(sx);
+                                               owner = lv_sx_owner(x);
+                                       } while (owner != NULL &&
+                                                   TD_IS_RUNNING(owner));
                                        KTR_STATE0(KTR_SCHED, "thread",
                                            sched_tdname(curthread), "running");
                                        continue;
@@ -625,6 +631,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                }
                                KTR_STATE0(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "running");
+                               x = SX_READ_VALUE(sx);
                                if (i != asx_loops)
                                        continue;
                        }
@@ -632,7 +639,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
 #endif
 
                sleepq_lock(&sx->lock_object);
-               x = sx->sx_lock;
+               x = SX_READ_VALUE(sx);
 
                /*
                 * If the lock was released while spinning on the
@@ -681,6 +688,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                break;
                        }
                        sleepq_release(&sx->lock_object);
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -692,6 +700,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                        if (!atomic_cmpset_ptr(&sx->sx_lock, x,
                            x | SX_LOCK_EXCLUSIVE_WAITERS)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -733,6 +742,7 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
                            __func__, sx);
+               x = SX_READ_VALUE(sx);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&sx->lock_object);
@@ -852,20 +862,18 @@ _sx_slock_hard(struct sx *sx, int opts, 
        lock_delay_arg_init(&lda, NULL);
 #endif
 #ifdef KDTRACE_HOOKS
-       state = sx->sx_lock;
        all_time -= lockstat_nsecs(&sx->lock_object);
 #endif
+       x = SX_READ_VALUE(sx);
+#ifdef KDTRACE_HOOKS
+       state = x;
+#endif
 
        /*
         * As with rwlocks, we don't make any attempt to try to block
         * shared locks once there is an exclusive waiter.
         */
        for (;;) {
-#ifdef KDTRACE_HOOKS
-               lda.spin_cnt++;
-#endif
-               x = sx->sx_lock;
-
                /*
                 * If no other thread has an exclusive lock then try to bump up
                 * the count of sharers.  Since we have to preserve the state
@@ -883,8 +891,13 @@ _sx_slock_hard(struct sx *sx, int opts, 
                                            (void *)(x + SX_ONE_SHARER));
                                break;
                        }
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
+#ifdef KDTRACE_HOOKS
+               lda.spin_cnt++;
+#endif
+
 #ifdef HWPMC_HOOKS
                PMC_SOFT_CALL( , , lock, failed);
 #endif
@@ -898,8 +911,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                 * changes.
                 */
                if ((sx->lock_object.lo_flags & SX_NOADAPTIVE) == 0) {
-                       x = SX_OWNER(x);
-                       owner = (struct thread *)x;
+                       owner = lv_sx_owner(x);
                        if (TD_IS_RUNNING(owner)) {
                                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                                        CTR3(KTR_LOCK,
@@ -909,9 +921,11 @@ _sx_slock_hard(struct sx *sx, int opts, 
                                    sched_tdname(curthread), "spinning",
                                    "lockname:\"%s\"", sx->lock_object.lo_name);
                                GIANT_SAVE();
-                               while (SX_OWNER(sx->sx_lock) == x &&
-                                   TD_IS_RUNNING(owner))
+                               do {
                                        lock_delay(&lda);
+                                       x = SX_READ_VALUE(sx);
+                                       owner = lv_sx_owner(x);
+                               } while (owner != NULL && TD_IS_RUNNING(owner));
                                KTR_STATE0(KTR_SCHED, "thread",
                                    sched_tdname(curthread), "running");
                                continue;
@@ -924,7 +938,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                 * start the process of blocking.
                 */
                sleepq_lock(&sx->lock_object);
-               x = sx->sx_lock;
+               x = SX_READ_VALUE(sx);
 
                /*
                 * The lock could have been released while we spun.
@@ -946,6 +960,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                        owner = (struct thread *)SX_OWNER(x);
                        if (TD_IS_RUNNING(owner)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                }
@@ -960,6 +975,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                        if (!atomic_cmpset_ptr(&sx->sx_lock, x,
                            x | SX_LOCK_SHARED_WAITERS)) {
                                sleepq_release(&sx->lock_object);
+                               x = SX_READ_VALUE(sx);
                                continue;
                        }
                        if (LOCK_LOG_TEST(&sx->lock_object, 0))
@@ -1000,6 +1016,7 @@ _sx_slock_hard(struct sx *sx, int opts, 
                if (LOCK_LOG_TEST(&sx->lock_object, 0))
                        CTR2(KTR_LOCK, "%s: %p resuming from sleep queue",
                            __func__, sx);
+               x = SX_READ_VALUE(sx);
        }
 #ifdef KDTRACE_HOOKS
        all_time += lockstat_nsecs(&sx->lock_object);
@@ -1034,9 +1051,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
        if (SCHEDULER_STOPPED())
                return;
 
+       x = SX_READ_VALUE(sx);
        for (;;) {
-               x = sx->sx_lock;
-
                /*
                 * We should never have sharers while at least one thread
                 * holds a shared lock.
@@ -1058,6 +1074,8 @@ _sx_sunlock_hard(struct sx *sx, const ch
                                            (void *)(x - SX_ONE_SHARER));
                                break;
                        }
+
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -1074,6 +1092,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
                                            __func__, sx);
                                break;
                        }
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
 
@@ -1095,6 +1114,7 @@ _sx_sunlock_hard(struct sx *sx, const ch
                    SX_SHARERS_LOCK(1) | SX_LOCK_EXCLUSIVE_WAITERS,
                    SX_LOCK_UNLOCKED)) {
                        sleepq_release(&sx->lock_object);
+                       x = SX_READ_VALUE(sx);
                        continue;
                }
                if (LOCK_LOG_TEST(&sx->lock_object, 0))

Modified: stable/11/sys/sys/mutex.h
==============================================================================
--- stable/11/sys/sys/mutex.h   Thu Mar 16 01:04:30 2017        (r315340)
+++ stable/11/sys/sys/mutex.h   Thu Mar 16 01:32:56 2017        (r315341)
@@ -420,9 +420,15 @@ extern struct mtx_pool *mtxpool_sleep;
        _sleep((chan), &(mtx)->lock_object, (pri), (wmesg),             \
            tick_sbt * (timo), 0, C_HARDCLOCK)
 
+#define        MTX_READ_VALUE(m)       ((m)->mtx_lock)
+
 #define        mtx_initialized(m)      lock_initialized(&(m)->lock_object)
 
-#define mtx_owned(m)   (((m)->mtx_lock & ~MTX_FLAGMASK) == 
(uintptr_t)curthread)
+#define lv_mtx_owner(v)        ((struct thread *)((v) & ~MTX_FLAGMASK))
+
+#define mtx_owner(m)   lv_mtx_owner(MTX_READ_VALUE(m))
+
+#define mtx_owned(m)   (mtx_owner(m) == curthread)
 
 #define mtx_recursed(m)        ((m)->mtx_recurse != 0)
 

Modified: stable/11/sys/sys/rwlock.h
==============================================================================
--- stable/11/sys/sys/rwlock.h  Thu Mar 16 01:04:30 2017        (r315340)
+++ stable/11/sys/sys/rwlock.h  Thu Mar 16 01:32:56 2017        (r315341)
@@ -76,6 +76,8 @@
 
 #define        rw_recurse      lock_object.lo_data
 
+#define        RW_READ_VALUE(x)        ((x)->rw_lock)
+
 /* Very simple operations on rw_lock. */
 
 /* Try to obtain a write lock once. */

Modified: stable/11/sys/sys/sx.h
==============================================================================
--- stable/11/sys/sys/sx.h      Thu Mar 16 01:04:30 2017        (r315340)
+++ stable/11/sys/sys/sx.h      Thu Mar 16 01:32:56 2017        (r315341)
@@ -88,6 +88,11 @@
 
 #define        sx_recurse      lock_object.lo_data
 
+#define        SX_READ_VALUE(sx)       ((sx)->sx_lock)
+
+#define        lv_sx_owner(v) \
+       ((v & SX_LOCK_SHARED) ? NULL : (struct thread *)SX_OWNER(v))
+
 /*
  * Function prototipes.  Routines that start with an underscore are not part
  * of the public interface and are wrappered with a macro.
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to