Author: attilio
Date: Tue Feb  9 14:56:10 2010
New Revision: 203704
URL: http://svn.freebsd.org/changeset/base/203704

Log:
  MFC r202889, r202940:
  - Fix a race in sched_switch() of sched_4bsd.
    Block the td_lock when acquiring explicitly sched_lock in order to prevent
    races with other td_lock contenders.
  - Merge the ULE's internal function thread_block_switch() into the global
    thread_lock_block() and make the former semantic as the default for
    thread_lock_block().
  - Split out an invariant in order to have better checks.
  
  Tested by:    Giovanni Trematerra
                <giovanni dot trematerra at gmail dot com>
  Approved by:  re (kib)

Modified:
  stable/7/sys/kern/kern_mutex.c
  stable/7/sys/kern/sched_4bsd.c
  stable/7/sys/kern/sched_ule.c
Directory Properties:
  stable/7/sys/   (props changed)
  stable/7/sys/cddl/contrib/opensolaris/   (props changed)
  stable/7/sys/contrib/dev/acpica/   (props changed)
  stable/7/sys/contrib/pf/   (props changed)

Modified: stable/7/sys/kern/kern_mutex.c
==============================================================================
--- stable/7/sys/kern/kern_mutex.c      Tue Feb  9 14:51:39 2010        
(r203703)
+++ stable/7/sys/kern/kern_mutex.c      Tue Feb  9 14:56:10 2010        
(r203704)
@@ -557,7 +557,6 @@ thread_lock_block(struct thread *td)
 {
        struct mtx *lock;
 
-       spinlock_enter();
        THREAD_LOCK_ASSERT(td, MA_OWNED);
        lock = td->td_lock;
        td->td_lock = &blocked_lock;
@@ -572,7 +571,6 @@ thread_lock_unblock(struct thread *td, s
        mtx_assert(new, MA_OWNED);
        MPASS(td->td_lock == &blocked_lock);
        atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new);
-       spinlock_exit();
 }
 
 void

Modified: stable/7/sys/kern/sched_4bsd.c
==============================================================================
--- stable/7/sys/kern/sched_4bsd.c      Tue Feb  9 14:51:39 2010        
(r203703)
+++ stable/7/sys/kern/sched_4bsd.c      Tue Feb  9 14:56:10 2010        
(r203704)
@@ -824,9 +824,11 @@ sched_sleep(struct thread *td)
 void
 sched_switch(struct thread *td, struct thread *newtd, int flags)
 {
+       struct mtx *tmtx;
        struct td_sched *ts;
        struct proc *p;
 
+       tmtx = NULL;
        ts = td->td_sched;
        p = td->td_proc;
 
@@ -835,17 +837,20 @@ sched_switch(struct thread *td, struct t
        /*
         * Switch to the sched lock to fix things up and pick
         * a new thread.
+        * Block the td_lock in order to avoid breaking the critical path.
         */
        if (td->td_lock != &sched_lock) {
                mtx_lock_spin(&sched_lock);
-               thread_unlock(td);
+               tmtx = thread_lock_block(td);
        }
 
        if ((p->p_flag & P_NOLOAD) == 0)
                sched_load_rem();
 
-       if (newtd)
+       if (newtd) {
+               MPASS(newtd->td_lock == &sched_lock);
                newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED);
+       }
 
        td->td_lastcpu = td->td_oncpu;
        td->td_flags &= ~TDF_NEEDRESCHED;
@@ -888,8 +893,8 @@ sched_switch(struct thread *td, struct t
                        sched_load_add();
        } else {
                newtd = choosethread();
+               MPASS(newtd->td_lock == &sched_lock);
        }
-       MPASS(newtd->td_lock == &sched_lock);
 
        if (td != newtd) {
 #ifdef HWPMC_HOOKS
@@ -907,7 +912,7 @@ sched_switch(struct thread *td, struct t
                        (*dtrace_vtime_switch_func)(newtd);
 #endif
                 /* I feel sleepy */
-               cpu_switch(td, newtd, td->td_lock);
+               cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock);
                /*
                 * Where am I?  What year is it?
                 * We are in the same thread that went to sleep above,

Modified: stable/7/sys/kern/sched_ule.c
==============================================================================
--- stable/7/sys/kern/sched_ule.c       Tue Feb  9 14:51:39 2010        
(r203703)
+++ stable/7/sys/kern/sched_ule.c       Tue Feb  9 14:56:10 2010        
(r203704)
@@ -318,7 +318,6 @@ static void sched_balance_groups(void);
 static void sched_balance_group(struct tdq_group *);
 static void sched_balance_pair(struct tdq *, struct tdq *);
 static inline struct tdq *sched_setcpu(struct td_sched *, int, int);
-static inline struct mtx *thread_block_switch(struct thread *);
 static inline void thread_unblock_switch(struct thread *, struct mtx *);
 static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int);
 #endif
@@ -989,9 +988,11 @@ sched_setcpu(struct td_sched *ts, int cp
         * The hard case, migration, we need to block the thread first to
         * prevent order reversals with other cpus locks.
         */
+       spinlock_enter();
        thread_lock_block(td);
        TDQ_LOCK(tdq);
        thread_lock_unblock(td, TDQ_LOCKPTR(tdq));
+       spinlock_exit();
        return (tdq);
 }
 
@@ -1789,23 +1790,6 @@ sched_switchin(struct tdq *tdq, struct t
 }
 
 /*
- * Block a thread for switching.  Similar to thread_block() but does not
- * bump the spin count.
- */
-static inline struct mtx *
-thread_block_switch(struct thread *td)
-{
-       struct mtx *lock;
-
-       THREAD_LOCK_ASSERT(td, MA_OWNED);
-       lock = td->td_lock;
-       td->td_lock = &blocked_lock;
-       mtx_unlock_spin(lock);
-
-       return (lock);
-}
-
-/*
  * Handle migration from sched_switch().  This happens only for
  * cpu binding.
  */
@@ -1822,7 +1806,7 @@ sched_switch_migrate(struct tdq *tdq, st
         * not holding either run-queue lock.
         */
        spinlock_enter();
-       thread_block_switch(td);        /* This releases the lock on tdq. */
+       thread_lock_block(td);  /* This releases the lock on tdq. */
 
        /*
         * Acquire both run-queue locks before placing the thread on the new
@@ -1848,7 +1832,8 @@ sched_switch_migrate(struct tdq *tdq, st
 }
 
 /*
- * Release a thread that was blocked with thread_block_switch().
+ * Variadic version of thread_lock_unblock() that does not assume td_lock
+ * is blocked.
  */
 static inline void
 thread_unblock_switch(struct thread *td, struct mtx *mtx)
@@ -1907,7 +1892,7 @@ sched_switch(struct thread *td, struct t
        } else {
                /* This thread must be going to sleep. */
                TDQ_LOCK(tdq);
-               mtx = thread_block_switch(td);
+               mtx = thread_lock_block(td);
                tdq_load_rem(tdq, ts);
        }
        /*
_______________________________________________
svn-src-all@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to