Author: sbruno
Date: Fri Feb 13 18:45:44 2015
New Revision: 278693
URL: https://svnweb.freebsd.org/changeset/base/278693

Log:
  Revert r278650.  Definite layer 8 bug.
  
  Submitted by: dhw and Thomas Mueller <tmuel...@sysgo.com>

Modified:
  stable/10/sys/kern/kern_lock.c
  stable/10/sys/kern/kern_mutex.c
  stable/10/sys/kern/kern_rwlock.c
  stable/10/sys/kern/kern_sx.c
  stable/10/sys/kern/kern_timeout.c
  stable/10/tools/sched/schedgraph.py
  stable/10/usr.bin/man/man.sh
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/kern/kern_lock.c
==============================================================================
--- stable/10/sys/kern/kern_lock.c      Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/sys/kern/kern_lock.c      Fri Feb 13 18:45:44 2015        
(r278693)
@@ -583,9 +583,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                        CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                            __func__, lk, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(td), "spinning",
-                                   "lockname:\"%s\"", lk->lock_object.lo_name);
 
                                /*
                                 * If we are holding also an interlock drop it
@@ -601,16 +598,11 @@ __lockmgr_args(struct lock *lk, u_int fl
                                while (LK_HOLDER(lk->lk_lock) ==
                                    (uintptr_t)owner && TD_IS_RUNNING(owner))
                                        cpu_spinwait();
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(td), "running");
                                GIANT_RESTORE();
                                continue;
                        } else if (LK_CAN_ADAPT(lk, flags) &&
                            (x & LK_SHARE) != 0 && LK_SHARERS(x) &&
                            spintries < alk_retries) {
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(td), "spinning",
-                                   "lockname:\"%s\"", lk->lock_object.lo_name);
                                if (flags & LK_INTERLOCK) {
                                        class->lc_unlock(ilk);
                                        flags &= ~LK_INTERLOCK;
@@ -628,8 +620,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                                break;
                                        cpu_spinwait();
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(td), "running");
                                GIANT_RESTORE();
                                if (i != alk_loops)
                                        continue;
@@ -825,9 +815,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                        CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                            __func__, lk, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(td), "spinning",
-                                   "lockname:\"%s\"", lk->lock_object.lo_name);
 
                                /*
                                 * If we are holding also an interlock drop it
@@ -843,8 +830,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                while (LK_HOLDER(lk->lk_lock) ==
                                    (uintptr_t)owner && TD_IS_RUNNING(owner))
                                        cpu_spinwait();
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(td), "running");
                                GIANT_RESTORE();
                                continue;
                        } else if (LK_CAN_ADAPT(lk, flags) &&
@@ -854,9 +839,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                    !atomic_cmpset_ptr(&lk->lk_lock, x,
                                    x | LK_EXCLUSIVE_SPINNERS))
                                        continue;
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(td), "spinning",
-                                   "lockname:\"%s\"", lk->lock_object.lo_name);
                                if (flags & LK_INTERLOCK) {
                                        class->lc_unlock(ilk);
                                        flags &= ~LK_INTERLOCK;
@@ -873,8 +855,6 @@ __lockmgr_args(struct lock *lk, u_int fl
                                                break;
                                        cpu_spinwait();
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(td), "running");
                                GIANT_RESTORE();
                                if (i != alk_loops)
                                        continue;

Modified: stable/10/sys/kern/kern_mutex.c
==============================================================================
--- stable/10/sys/kern/kern_mutex.c     Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/sys/kern/kern_mutex.c     Fri Feb 13 18:45:44 2015        
(r278693)
@@ -436,10 +436,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                                        CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                            __func__, m, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname((struct thread *)tid),
-                                   "spinning", "lockname:\"%s\"",
-                                   m->lock_object.lo_name);
                                while (mtx_owner(m) == owner &&
                                    TD_IS_RUNNING(owner)) {
                                        cpu_spinwait();
@@ -447,9 +443,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, 
                                        spin_cnt++;
 #endif
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname((struct thread *)tid),
-                                   "running");
                                continue;
                        }
                }
@@ -586,8 +579,6 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 
        if (LOCK_LOG_TEST(&m->lock_object, opts))
                CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
-       KTR_STATE1(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
-           "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
 
 #ifdef HWPMC_HOOKS
        PMC_SOFT_CALL( , , lock, failed);
@@ -613,8 +604,6 @@ _mtx_lock_spin_cookie(volatile uintptr_t
 
        if (LOCK_LOG_TEST(&m->lock_object, opts))
                CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
-       KTR_STATE0(KTR_SCHED, "thread", sched_tdname((struct thread *)tid),
-           "running");
 
        LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m,
            contested, waittime, (file), (line));

Modified: stable/10/sys/kern/kern_rwlock.c
==============================================================================
--- stable/10/sys/kern/kern_rwlock.c    Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/sys/kern/kern_rwlock.c    Fri Feb 13 18:45:44 2015        
(r278693)
@@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/mutex.h>
 #include <sys/proc.h>
 #include <sys/rwlock.h>
-#include <sys/sched.h>
 #include <sys/sysctl.h>
 #include <sys/systm.h>
 #include <sys/turnstile.h>
@@ -424,9 +423,6 @@ __rw_rlock(volatile uintptr_t *c, const 
                                        CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                            __func__, rw, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "spinning",
-                                   "lockname:\"%s\"", rw->lock_object.lo_name);
                                while ((struct thread*)RW_OWNER(rw->rw_lock) ==
                                    owner && TD_IS_RUNNING(owner)) {
                                        cpu_spinwait();
@@ -434,23 +430,16 @@ __rw_rlock(volatile uintptr_t *c, const 
                                        spin_cnt++;
 #endif
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "running");
                                continue;
                        }
                } else if (spintries < rowner_retries) {
                        spintries++;
-                       KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "spinning", "lockname:\"%s\"",
-                           rw->lock_object.lo_name);
                        for (i = 0; i < rowner_loops; i++) {
                                v = rw->rw_lock;
                                if ((v & RW_LOCK_READ) == 0 || RW_CAN_READ(v))
                                        break;
                                cpu_spinwait();
                        }
-                       KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "running");
                        if (i != rowner_loops)
                                continue;
                }
@@ -770,9 +759,6 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                        if (LOCK_LOG_TEST(&rw->lock_object, 0))
                                CTR3(KTR_LOCK, "%s: spinning on %p held by %p",
                                    __func__, rw, owner);
-                       KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "spinning", "lockname:\"%s\"",
-                           rw->lock_object.lo_name);
                        while ((struct thread*)RW_OWNER(rw->rw_lock) == owner &&
                            TD_IS_RUNNING(owner)) {
                                cpu_spinwait();
@@ -780,8 +766,6 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                                spin_cnt++;
 #endif
                        }
-                       KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "running");
                        continue;
                }
                if ((v & RW_LOCK_READ) && RW_READERS(v) &&
@@ -793,16 +777,11 @@ __rw_wlock_hard(volatile uintptr_t *c, u
                                }
                        }
                        spintries++;
-                       KTR_STATE1(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "spinning", "lockname:\"%s\"",
-                           rw->lock_object.lo_name);
                        for (i = 0; i < rowner_loops; i++) {
                                if ((rw->rw_lock & RW_LOCK_WRITE_SPINNER) == 0)
                                        break;
                                cpu_spinwait();
                        }
-                       KTR_STATE0(KTR_SCHED, "thread", sched_tdname(curthread),
-                           "running");
 #ifdef KDTRACE_HOOKS
                        spin_cnt += rowner_loops - i;
 #endif

Modified: stable/10/sys/kern/kern_sx.c
==============================================================================
--- stable/10/sys/kern/kern_sx.c        Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/sys/kern/kern_sx.c        Fri Feb 13 18:45:44 2015        
(r278693)
@@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/proc.h>
-#include <sys/sched.h>
 #include <sys/sleepqueue.h>
 #include <sys/sx.h>
 #include <sys/sysctl.h>
@@ -561,10 +560,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                                CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                                    __func__, sx, owner);
-                                       KTR_STATE1(KTR_SCHED, "thread",
-                                           sched_tdname(curthread), "spinning",
-                                           "lockname:\"%s\"",
-                                           sx->lock_object.lo_name);
                                        GIANT_SAVE();
                                        while (SX_OWNER(sx->sx_lock) == x &&
                                            TD_IS_RUNNING(owner)) {
@@ -573,14 +568,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                                spin_cnt++;
 #endif
                                        }
-                                       KTR_STATE0(KTR_SCHED, "thread",
-                                           sched_tdname(curthread), "running");
                                        continue;
                                }
                        } else if (SX_SHARERS(x) && spintries < asx_retries) {
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "spinning",
-                                   "lockname:\"%s\"", sx->lock_object.lo_name);
                                GIANT_SAVE();
                                spintries++;
                                for (i = 0; i < asx_loops; i++) {
@@ -597,8 +587,6 @@ _sx_xlock_hard(struct sx *sx, uintptr_t 
                                        spin_cnt++;
 #endif
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "running");
                                if (i != asx_loops)
                                        continue;
                        }
@@ -861,9 +849,6 @@ _sx_slock_hard(struct sx *sx, int opts, 
                                        CTR3(KTR_LOCK,
                                            "%s: spinning on %p held by %p",
                                            __func__, sx, owner);
-                               KTR_STATE1(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "spinning",
-                                   "lockname:\"%s\"", sx->lock_object.lo_name);
                                GIANT_SAVE();
                                while (SX_OWNER(sx->sx_lock) == x &&
                                    TD_IS_RUNNING(owner)) {
@@ -872,8 +857,6 @@ _sx_slock_hard(struct sx *sx, int opts, 
 #endif
                                        cpu_spinwait();
                                }
-                               KTR_STATE0(KTR_SCHED, "thread",
-                                   sched_tdname(curthread), "running");
                                continue;
                        }
                }

Modified: stable/10/sys/kern/kern_timeout.c
==============================================================================
--- stable/10/sys/kern/kern_timeout.c   Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/sys/kern/kern_timeout.c   Fri Feb 13 18:45:44 2015        
(r278693)
@@ -150,7 +150,6 @@ struct callout_cpu {
        sbintime_t              cc_lastscan;
        void                    *cc_cookie;
        u_int                   cc_bucket;
-       char                    cc_ktr_event_name[20];
 };
 
 #define        cc_exec_curr            cc_exec_entity[0].cc_curr
@@ -189,7 +188,7 @@ struct callout_cpu cc_cpu;
 
 static int timeout_cpu;
 
-static void    callout_cpu_init(struct callout_cpu *cc, int cpu);
+static void    callout_cpu_init(struct callout_cpu *cc);
 static void    softclock_call_cc(struct callout *c, struct callout_cpu *cc,
 #ifdef CALLOUT_PROFILING
                    int *mpcalls, int *lockcalls, int *gcalls,
@@ -284,7 +283,7 @@ callout_callwheel_init(void *dummy)
        cc = CC_CPU(timeout_cpu);
        cc->cc_callout = malloc(ncallout * sizeof(struct callout),
            M_CALLOUT, M_WAITOK);
-       callout_cpu_init(cc, timeout_cpu);
+       callout_cpu_init(cc);
 }
 SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, 
NULL);
 
@@ -292,7 +291,7 @@ SYSINIT(callwheel_init, SI_SUB_CPU, SI_O
  * Initialize the per-cpu callout structures.
  */
 static void
-callout_cpu_init(struct callout_cpu *cc, int cpu)
+callout_cpu_init(struct callout_cpu *cc)
 {
        struct callout *c;
        int i;
@@ -307,8 +306,6 @@ callout_cpu_init(struct callout_cpu *cc,
        cc->cc_firstevent = INT64_MAX;
        for (i = 0; i < 2; i++)
                cc_cce_cleanup(cc, i);
-       snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name),
-           "callwheel cpu %d", cpu);
        if (cc->cc_callout == NULL)     /* Only cpu0 handles timeout(9) */
                return;
        for (i = 0; i < ncallout; i++) {
@@ -370,7 +367,7 @@ start_softclock(void *dummy)
                        continue;
                cc = CC_CPU(cpu);
                cc->cc_callout = NULL;  /* Only cpu0 handles timeout(9). */
-               callout_cpu_init(cc, cpu);
+               callout_cpu_init(cc);
                if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
                    INTR_MPSAFE, &cc->cc_cookie))
                        panic("died while creating standard software ithreads");
@@ -677,8 +674,6 @@ softclock_call_cc(struct callout *c, str
                CTR3(KTR_CALLOUT, "callout %p func %p arg %p",
                    c, c_func, c_arg);
        }
-       KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running",
-           "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct);
 #if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING)
        sbt1 = sbinuptime();
 #endif
@@ -701,7 +696,6 @@ softclock_call_cc(struct callout *c, str
                lastfunc = c_func;
        }
 #endif
-       KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle");
        CTR1(KTR_CALLOUT, "callout %p finished", c);
        if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
                class->lc_unlock(c_lock);

Modified: stable/10/tools/sched/schedgraph.py
==============================================================================
--- stable/10/tools/sched/schedgraph.py Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/tools/sched/schedgraph.py Fri Feb 13 18:45:44 2015        
(r278693)
@@ -70,7 +70,6 @@ eventcolors = [
        ("count",       "red"),
        ("running",     "green"),
        ("idle",        "grey"),
-       ("spinning",    "red"),
        ("yielding",    "yellow"),
        ("swapped",     "violet"),
        ("suspended",   "purple"),
@@ -81,6 +80,8 @@ eventcolors = [
        ("runq rem",    "yellow"),
        ("thread exit", "grey"),
        ("proc exit",   "grey"),
+       ("callwheel idle", "grey"),
+       ("callout running", "green"),
        ("lock acquire", "blue"),
        ("lock contest", "purple"),
        ("failed lock try", "red"),
@@ -855,7 +856,7 @@ class EventSource:
                return (Y_EVENTSOURCE)
 
        def eventat(self, i):
-               if (i >= len(self.events) or i < 0):
+               if (i >= len(self.events)):
                        return (None)
                event = self.events[i]
                return (event)
@@ -902,6 +903,7 @@ class KTRFile:
                self.timestamp_f = None
                self.timestamp_l = None
                self.locks = {}
+               self.callwheels = {}
                self.ticks = {}
                self.load = {}
                self.crit = {}

Modified: stable/10/usr.bin/man/man.sh
==============================================================================
--- stable/10/usr.bin/man/man.sh        Fri Feb 13 18:37:22 2015        
(r278692)
+++ stable/10/usr.bin/man/man.sh        Fri Feb 13 18:45:44 2015        
(r278693)
@@ -276,8 +276,11 @@ man_check_for_so() {
        return 0
 }
 
+# Usage: man_display_page
+# Display either the manpage or catpage depending on the use_cat variable
 man_display_page() {
-       local IFS pipeline preconv_enc testline
+       local EQN NROFF PIC TBL TROFF REFER VGRIND
+       local IFS l nroff_dev pipeline preproc_arg tool
 
        # We are called with IFS set to colon. This causes really weird
        # things to happen for the variables that have spaces in them.
@@ -309,49 +312,6 @@ man_display_page() {
                return
        fi
 
-       case "${manpage}" in
-       *.${man_charset}/*)
-               case "$man_charset" in
-               ISO8859-1) preconv_enc="latin-1" ;;
-               ISO8859-15) preconv_enc="latin-1" ;;
-               UTF-8) preconv_enc="utf-8" ;;
-               esac
-               ;;
-       esac
-
-       if [ -n "$preconv_enc" ]; then
-               pipeline="preconv -e $preconv_enc |"
-       fi
-       testline="$pipeline mandoc -Tlint -Werror 2>/dev/null"
-       pipeline="$pipeline mandoc -Tlocale | $MANPAGER"
-
-       if ! eval "$cattool $manpage | $testline" ;then
-               if which -s groff2; then
-                       man_display_page_groff
-               else
-                       echo "This manpage needs groff(1) to be rendered" >&2
-                       echo "First install groff(1): " >&2
-                       echo "pkg install groff " >&2
-                       ret=1
-               fi
-               return
-       fi
-
-       if [ $debug -gt 0 ]; then
-               decho "Command: $cattool $manpage | $pipeline"
-               ret=0
-       else
-               eval "$cattool $manpage | $pipeline"
-               ret=$?
-       fi
-}
-
-# Usage: man_display_page
-# Display either the manpage or catpage depending on the use_cat variable
-man_display_page_groff() {
-       local EQN NROFF PIC TBL TROFF REFER VGRIND
-       local IFS l nroff_dev pipeline preproc_arg tool
-
        # So, we really do need to parse the manpage. First, figure out the
        # device flag (-T) we have to pass to eqn(1) and groff(1). Then,
        # setup the pipeline of commands based on the user's request.
_______________________________________________
svn-src-stable-10@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-stable-10
To unsubscribe, send any mail to "svn-src-stable-10-unsubscr...@freebsd.org"

Reply via email to