Author: mmacy
Date: Tue Jun  5 04:26:40 2018
New Revision: 334647
URL: https://svnweb.freebsd.org/changeset/base/334647

Log:
  hwpmc: log name->pid, name->tid mappings
  
  By logging all threads and processes 'pmc filter'
  can now filter on process or thread name, relieving
  the user of the burden of determining which tid or
  pid was which when the sample was taken.
  
  % pmc filter -T if_io_tqg -P nginx pmc.log pmc-iflib.log
  
  % pmc filter -x -T idle pmc.log pmc-noidle.log

Added:
  head/usr.sbin/pmc/cmd_pmc_filter.cc
     - copied, changed from r334645, head/usr.sbin/pmc/cmd_pmc_filter.c
Deleted:
  head/usr.sbin/pmc/cmd_pmc_filter.c
Modified:
  head/lib/libpmc/pmclog.c
  head/lib/libpmc/pmclog.h
  head/sys/dev/hwpmc/hwpmc_logging.c
  head/sys/dev/hwpmc/hwpmc_mod.c
  head/sys/kern/kern_kthread.c
  head/sys/kern/kern_thr.c
  head/sys/kern/kern_thread.c
  head/sys/sys/pmc.h
  head/sys/sys/pmckern.h
  head/sys/sys/pmclog.h
  head/usr.sbin/Makefile
  head/usr.sbin/pmc/Makefile   (contents, props changed)
  head/usr.sbin/pmc/cmd_pmc.h   (contents, props changed)

Modified: head/lib/libpmc/pmclog.c
==============================================================================
--- head/lib/libpmc/pmclog.c    Tue Jun  5 01:05:58 2018        (r334646)
+++ head/lib/libpmc/pmclog.c    Tue Jun  5 04:26:40 2018        (r334647)
@@ -404,6 +404,19 @@ pmclog_get_event(void *cookie, char **data, ssize_t *l
        case PMCLOG_TYPE_USERDATA:
                PMCLOG_READ32(le,ev->pl_u.pl_u.pl_userdata);
                break;
+       case PMCLOG_TYPE_THR_CREATE:
+               PMCLOG_READ32(le,ev->pl_u.pl_tc.pl_tid);
+               PMCLOG_READ32(le,ev->pl_u.pl_tc.pl_pid);
+               PMCLOG_READ32(le,noop);
+               memcpy(ev->pl_u.pl_tc.pl_tdname, le, MAXCOMLEN+1);
+               break;
+       case PMCLOG_TYPE_THR_EXIT:
+               PMCLOG_READ32(le,ev->pl_u.pl_te.pl_tid);
+               break;
+       case PMCLOG_TYPE_PROC_CREATE:
+               PMCLOG_READ32(le,ev->pl_u.pl_pc.pl_pid);
+               memcpy(ev->pl_u.pl_pc.pl_pcomm, le, MAXCOMLEN+1);
+               break;
        default:        /* unknown record type */
                ps->ps_state = PL_STATE_ERROR;
                ev->pl_state = PMCLOG_ERROR;

Modified: head/lib/libpmc/pmclog.h
==============================================================================
--- head/lib/libpmc/pmclog.h    Tue Jun  5 01:05:58 2018        (r334646)
+++ head/lib/libpmc/pmclog.h    Tue Jun  5 04:26:40 2018        (r334647)
@@ -120,6 +120,11 @@ struct pmclog_ev_proccsw {
        pmc_value_t     pl_value;
 };
 
+struct pmclog_ev_proccreate {
+       pid_t           pl_pid;
+       char            pl_pcomm[MAXCOMLEN+1];
+};
+
 struct pmclog_ev_procexec {
        pid_t           pl_pid;
        pmc_id_t        pl_pmcid;
@@ -142,6 +147,16 @@ struct pmclog_ev_sysexit {
        pid_t           pl_pid;
 };
 
+struct pmclog_ev_threadcreate {
+       pid_t           pl_tid;
+       pid_t           pl_pid;
+       char            pl_tdname[MAXCOMLEN+1];
+};
+
+struct pmclog_ev_threadexit {
+       pid_t           pl_tid;
+};
+
 struct pmclog_ev_userdata {
        uint32_t        pl_userdata;
 };
@@ -166,10 +181,13 @@ struct pmclog_ev {
                struct pmclog_ev_pmcattach      pl_t;
                struct pmclog_ev_pmcdetach      pl_d;
                struct pmclog_ev_proccsw        pl_c;
+               struct pmclog_ev_proccreate     pl_pc;
                struct pmclog_ev_procexec       pl_x;
                struct pmclog_ev_procexit       pl_e;
                struct pmclog_ev_procfork       pl_f;
                struct pmclog_ev_sysexit        pl_se;
+               struct pmclog_ev_threadcreate   pl_tc;
+               struct pmclog_ev_threadexit     pl_te;
                struct pmclog_ev_userdata       pl_u;
        } pl_u;
 };

Modified: head/sys/dev/hwpmc/hwpmc_logging.c
==============================================================================
--- head/sys/dev/hwpmc/hwpmc_logging.c  Tue Jun  5 01:05:58 2018        
(r334646)
+++ head/sys/dev/hwpmc/hwpmc_logging.c  Tue Jun  5 04:26:40 2018        
(r334647)
@@ -229,7 +229,7 @@ static void pmclog_loop(void *arg);
 static void pmclog_release(struct pmc_owner *po);
 static uint32_t *pmclog_reserve(struct pmc_owner *po, int length);
 static void pmclog_schedule_io(struct pmc_owner *po, int wakeup);
-static void pmclog_schedule_all(struct pmc_owner *po);
+static void pmclog_schedule_all(struct pmc_owner *po, int force);
 static void pmclog_stop_kthread(struct pmc_owner *po);
 
 /*
@@ -808,7 +808,7 @@ pmclog_deconfigure_log(struct pmc_owner *po)
  */
 
 int
-pmclog_flush(struct pmc_owner *po)
+pmclog_flush(struct pmc_owner *po, int force)
 {
        int error;
 
@@ -832,7 +832,7 @@ pmclog_flush(struct pmc_owner *po)
                goto error;
        }
 
-       pmclog_schedule_all(po);
+       pmclog_schedule_all(po, force);
  error:
        mtx_unlock(&pmc_kthread_mtx);
 
@@ -840,9 +840,8 @@ pmclog_flush(struct pmc_owner *po)
 }
 
 static void
-pmclog_schedule_one_cond(void *arg)
+pmclog_schedule_one_cond(struct pmc_owner *po, int force)
 {
-       struct pmc_owner *po = arg;
        struct pmclog_buffer *plb;
        int cpu;
 
@@ -851,7 +850,8 @@ pmclog_schedule_one_cond(void *arg)
        /* tell hardclock not to run again */
        if (PMC_CPU_HAS_SAMPLES(cpu))
                PMC_CALL_HOOK_UNLOCKED(curthread, PMC_FN_DO_SAMPLES, NULL);
-       pmc_flush_samples(cpu);
+       if (force)
+               pmc_flush_samples(cpu);
        plb = po->po_curbuf[cpu];
        if (plb && plb->plb_ptr != plb->plb_base)
                pmclog_schedule_io(po, 1);
@@ -859,7 +859,7 @@ pmclog_schedule_one_cond(void *arg)
 }
 
 static void
-pmclog_schedule_all(struct pmc_owner *po)
+pmclog_schedule_all(struct pmc_owner *po, int force)
 {
        /*
         * Schedule the current buffer if any and not empty.
@@ -868,7 +868,7 @@ pmclog_schedule_all(struct pmc_owner *po)
                thread_lock(curthread);
                sched_bind(curthread, i);
                thread_unlock(curthread);
-               pmclog_schedule_one_cond(po);
+               pmclog_schedule_one_cond(po, force);
        }
        thread_lock(curthread);
        sched_unbind(curthread);
@@ -895,7 +895,7 @@ pmclog_close(struct pmc_owner *po)
        /*
         * Schedule the current buffer.
         */
-       pmclog_schedule_all(po);
+       pmclog_schedule_all(po, 0);
        wakeup_one(po);
 
        mtx_unlock(&pmc_kthread_mtx);
@@ -1044,6 +1044,22 @@ pmclog_process_pmcdetach(struct pmc *pm, pid_t pid)
        PMCLOG_DESPATCH_SYNC(po);
 }
 
+void
+pmclog_process_proccreate(struct pmc_owner *po, struct proc *p, int sync)
+{
+       if (sync) {
+               PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct 
pmclog_proccreate));
+               PMCLOG_EMIT32(p->p_pid);
+               PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1);
+               PMCLOG_DESPATCH_SYNC(po);
+       } else {
+               PMCLOG_RESERVE(po, PROC_CREATE, sizeof(struct 
pmclog_proccreate));
+               PMCLOG_EMIT32(p->p_pid);
+               PMCLOG_EMITSTRING(p->p_comm, MAXCOMLEN+1);
+               PMCLOG_DESPATCH(po);
+       }
+}
+
 /*
  * Log a context switch event to the log file.
  */
@@ -1079,14 +1095,13 @@ pmclog_process_procexec(struct pmc_owner *po, pmc_id_t
 
        pathlen   = strlen(path) + 1;   /* #bytes for the path */
        recordlen = offsetof(struct pmclog_procexec, pl_pathname) + pathlen;
-
        PMCLOG_RESERVE(po, PROCEXEC, recordlen);
        PMCLOG_EMIT32(pid);
        PMCLOG_EMIT32(pmid);
        PMCLOG_EMIT32(0);
        PMCLOG_EMITADDR(startaddr);
        PMCLOG_EMITSTRING(path,pathlen);
-       PMCLOG_DESPATCH(po);
+       PMCLOG_DESPATCH_SYNC(po);
 }
 
 /*
@@ -1135,6 +1150,38 @@ pmclog_process_sysexit(struct pmc_owner *po, pid_t pid
 {
        PMCLOG_RESERVE(po, SYSEXIT, sizeof(struct pmclog_sysexit));
        PMCLOG_EMIT32(pid);
+       PMCLOG_DESPATCH(po);
+}
+
+void
+pmclog_process_threadcreate(struct pmc_owner *po, struct thread *td, int sync)
+{
+       struct proc *p;
+
+       p = td->td_proc;
+       if (sync) {
+               PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct 
pmclog_threadcreate));
+               PMCLOG_EMIT32(td->td_tid);
+               PMCLOG_EMIT32(p->p_pid);
+               PMCLOG_EMIT32(0);
+               PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1);
+               PMCLOG_DESPATCH_SYNC(po);
+       } else {
+               PMCLOG_RESERVE(po, THR_CREATE, sizeof(struct 
pmclog_threadcreate));
+               PMCLOG_EMIT32(td->td_tid);
+               PMCLOG_EMIT32(p->p_pid);
+               PMCLOG_EMIT32(0);
+               PMCLOG_EMITSTRING(td->td_name, MAXCOMLEN+1);
+               PMCLOG_DESPATCH(po);
+       }
+}
+
+void
+pmclog_process_threadexit(struct pmc_owner *po, struct thread *td)
+{
+
+       PMCLOG_RESERVE(po, THR_EXIT, sizeof(struct pmclog_threadexit));
+       PMCLOG_EMIT32(td->td_tid);
        PMCLOG_DESPATCH(po);
 }
 

Modified: head/sys/dev/hwpmc/hwpmc_mod.c
==============================================================================
--- head/sys/dev/hwpmc/hwpmc_mod.c      Tue Jun  5 01:05:58 2018        
(r334646)
+++ head/sys/dev/hwpmc/hwpmc_mod.c      Tue Jun  5 04:26:40 2018        
(r334647)
@@ -269,6 +269,11 @@ static int generic_switch_out(struct pmc_cpu *pc, stru
 static struct pmc_mdep *pmc_generic_cpu_initialize(void);
 static void pmc_generic_cpu_finalize(struct pmc_mdep *md);
 static void pmc_post_callchain_callback(void);
+static void pmc_process_threadcreate(struct thread *td);
+static void pmc_process_threadexit(struct thread *td);
+static void pmc_process_proccreate(struct proc *p);
+static void pmc_process_allproc(struct pmc *pm);
+
 /*
  * Kernel tunables and sysctl(8) interface.
  */
@@ -2049,6 +2054,9 @@ const char *pmc_hooknames[] = {
        "THR-CREATE",
        "THR-EXIT",
        "THR-USERRET",
+       "THR-CREATE-LOG",
+       "THR-EXIT-LOG",
+       "PROC-CREATE-LOG"
 };
 #endif
 
@@ -2225,6 +2233,10 @@ pmc_hook_handler(struct thread *td, int function, void
                pmc_process_munmap(td, (struct pmckern_map_out *) arg);
                break;
 
+       case PMC_FN_PROC_CREATE_LOG:
+               pmc_process_proccreate((struct proc *)arg);
+               break;
+
        case PMC_FN_USER_CALLCHAIN:
                /*
                 * Record a call chain.
@@ -2270,14 +2282,22 @@ pmc_hook_handler(struct thread *td, int function, void
 
        case PMC_FN_THR_CREATE:
                pmc_process_thread_add(td);
+               pmc_process_threadcreate(td);
                break;
 
+       case PMC_FN_THR_CREATE_LOG:
+               pmc_process_threadcreate(td);
+               break;
+
        case PMC_FN_THR_EXIT:
                KASSERT(td == curthread, ("[pmc,%d] td != curthread",
                    __LINE__));
                pmc_process_thread_delete(td);
+               pmc_process_threadexit(td);
                break;
-
+       case PMC_FN_THR_EXIT_LOG:
+               pmc_process_threadexit(td);
+               break;
        case PMC_FN_THR_USERRET:
                KASSERT(td == curthread, ("[pmc,%d] td != curthread",
                    __LINE__));
@@ -2697,9 +2717,9 @@ pmc_wait_for_pmc_idle(struct pmc *pm)
         * Loop (with a forced context switch) till the PMC's runcount
         * comes down to zero.
         */
-       pmclog_flush(pm->pm_owner);
+       pmclog_flush(pm->pm_owner, 1);
        while (counter_u64_fetch(pm->pm_runcount) > 0) {
-               pmclog_flush(pm->pm_owner);
+               pmclog_flush(pm->pm_owner, 1);
 #ifdef HWPMC_DEBUG
                maxloop--;
                KASSERT(maxloop > 0,
@@ -3439,7 +3459,7 @@ pmc_syscall_handler(struct thread *td, void *syscall_a
                        break;
                }
 
-               error = pmclog_flush(po);
+               error = pmclog_flush(po, 0);
        }
        break;
 
@@ -4015,6 +4035,8 @@ pmc_syscall_handler(struct thread *td, void *syscall_a
                        pmc = NULL;
                        break;
                }
+               if (mode == PMC_MODE_SS)
+                       pmc_process_allproc(pmc);
 
                /*
                 * Return the allocated index.
@@ -5216,8 +5238,10 @@ pmc_process_fork(void *arg __unused, struct proc *p1, 
         */
        epoch_enter_preempt(global_epoch_preempt);
        CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
-           if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+           if (po->po_flags & PMC_PO_OWNS_LOGFILE) {
                    pmclog_process_procfork(po, p1->p_pid, newproc->p_pid);
+                       pmclog_process_proccreate(po, newproc, 1);
+               }
        epoch_exit_preempt(global_epoch_preempt);
 
        if (!is_using_hwpmcs)
@@ -5277,6 +5301,64 @@ pmc_process_fork(void *arg __unused, struct proc *p1, 
 
  done:
        sx_xunlock(&pmc_sx);
+}
+
+static void
+pmc_process_threadcreate(struct thread *td)
+{
+       struct pmc_owner *po;
+
+       epoch_enter_preempt(global_epoch_preempt);
+       CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+           if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+                       pmclog_process_threadcreate(po, td, 1);
+       epoch_exit_preempt(global_epoch_preempt);
+}
+
+static void
+pmc_process_threadexit(struct thread *td)
+{
+       struct pmc_owner *po;
+
+       epoch_enter_preempt(global_epoch_preempt);
+       CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+           if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+                       pmclog_process_threadexit(po, td);
+       epoch_exit_preempt(global_epoch_preempt);
+}
+
+static void
+pmc_process_proccreate(struct proc *p)
+{
+       struct pmc_owner *po;
+
+       epoch_enter_preempt(global_epoch_preempt);
+       CK_LIST_FOREACH(po, &pmc_ss_owners, po_ssnext)
+           if (po->po_flags & PMC_PO_OWNS_LOGFILE)
+                       pmclog_process_proccreate(po, p, 1 /* sync */);
+       epoch_exit_preempt(global_epoch_preempt);
+}
+
+static void
+pmc_process_allproc(struct pmc *pm)
+{
+       struct pmc_owner *po;
+       struct thread *td;
+       struct proc *p;
+
+       po = pm->pm_owner;
+       if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0)
+               return;
+       sx_slock(&allproc_lock);
+       FOREACH_PROC_IN_SYSTEM(p) {
+               pmclog_process_proccreate(po, p, 0 /* sync */);
+               PROC_LOCK(p);
+               FOREACH_THREAD_IN_PROC(p, td)
+                       pmclog_process_threadcreate(po, td, 0 /* sync */);
+               PROC_UNLOCK(p);
+       }
+       sx_sunlock(&allproc_lock);
+       pmclog_flush(po, 0);
 }
 
 static void

Modified: head/sys/kern/kern_kthread.c
==============================================================================
--- head/sys/kern/kern_kthread.c        Tue Jun  5 01:05:58 2018        
(r334646)
+++ head/sys/kern/kern_kthread.c        Tue Jun  5 04:26:40 2018        
(r334647)
@@ -126,6 +126,12 @@ kproc_create(void (*func)(void *), void *arg,
        sched_clear_tdname(td);
 #endif
        TSTHREAD(td, td->td_name);
+#ifdef HWPMC_HOOKS
+       if (PMC_SYSTEM_SAMPLING_ACTIVE()) {
+               PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_PROC_CREATE_LOG, p2);
+               PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_CREATE_LOG, NULL);
+       }
+#endif
 
        /* call the processes' main()... */
        cpu_fork_kthread_handler(td, func, arg);
@@ -310,7 +316,10 @@ kthread_add(void (*func)(void *), void *arg, struct pr
 
        /* Avoid inheriting affinity from a random parent. */
        cpuset_kernthread(newtd);
-
+#ifdef HWPMC_HOOKS
+       if (PMC_SYSTEM_SAMPLING_ACTIVE())
+               PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_CREATE_LOG, NULL);
+#endif
        /* Delay putting it on the run queue until now. */
        if (!(flags & RFSTOPPED)) {
                thread_lock(newtd);
@@ -331,6 +340,10 @@ kthread_exit(void)
        td = curthread;
        p = td->td_proc;
 
+#ifdef HWPMC_HOOKS
+       if (PMC_SYSTEM_SAMPLING_ACTIVE())
+               PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
+#endif
        /* A module may be waiting for us to exit. */
        wakeup(td);
 

Modified: head/sys/kern/kern_thr.c
==============================================================================
--- head/sys/kern/kern_thr.c    Tue Jun  5 01:05:58 2018        (r334646)
+++ head/sys/kern/kern_thr.c    Tue Jun  5 04:26:40 2018        (r334647)
@@ -265,6 +265,8 @@ thread_create(struct thread *td, struct rtprio *rtp,
 #ifdef HWPMC_HOOKS
        if (PMC_PROC_IS_USING_PMCS(p))
                PMC_CALL_HOOK(newtd, PMC_FN_THR_CREATE, NULL);
+       else if (PMC_SYSTEM_SAMPLING_ACTIVE())
+               PMC_CALL_HOOK_UNLOCKED(newtd, PMC_FN_THR_CREATE_LOG, NULL);
 #endif
 
        tidhash_add(newtd);
@@ -592,6 +594,10 @@ sys_thr_set_name(struct thread *td, struct thr_set_nam
        if (ttd == NULL)
                return (ESRCH);
        strcpy(ttd->td_name, name);
+#ifdef HWPMC_HOOKS
+       if (PMC_PROC_IS_USING_PMCS(p) || PMC_SYSTEM_SAMPLING_ACTIVE())
+               PMC_CALL_HOOK_UNLOCKED(ttd, PMC_FN_THR_CREATE_LOG, NULL);
+#endif
 #ifdef KTR
        sched_clear_tdname(ttd);
 #endif

Modified: head/sys/kern/kern_thread.c
==============================================================================
--- head/sys/kern/kern_thread.c Tue Jun  5 01:05:58 2018        (r334646)
+++ head/sys/kern/kern_thread.c Tue Jun  5 04:26:40 2018        (r334647)
@@ -589,7 +589,8 @@ thread_exit(void)
        if (PMC_PROC_IS_USING_PMCS(td->td_proc)) {
                PMC_SWITCH_CONTEXT(td, PMC_FN_CSW_OUT);
                PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT, NULL);
-       }
+       } else if (PMC_SYSTEM_SAMPLING_ACTIVE())
+               PMC_CALL_HOOK_UNLOCKED(td, PMC_FN_THR_EXIT_LOG, NULL);
 #endif
        PROC_UNLOCK(p);
        PROC_STATLOCK(p);

Modified: head/sys/sys/pmc.h
==============================================================================
--- head/sys/sys/pmc.h  Tue Jun  5 01:05:58 2018        (r334646)
+++ head/sys/sys/pmc.h  Tue Jun  5 04:26:40 2018        (r334647)
@@ -62,7 +62,7 @@
  * The patch version is incremented for every bug fix.
  */
 #define        PMC_VERSION_MAJOR       0x06
-#define        PMC_VERSION_MINOR       0x01
+#define        PMC_VERSION_MINOR       0x02
 #define        PMC_VERSION_PATCH       0x0000
 
 #define        PMC_VERSION             (PMC_VERSION_MAJOR << 24 |              
\

Modified: head/sys/sys/pmckern.h
==============================================================================
--- head/sys/sys/pmckern.h      Tue Jun  5 01:05:58 2018        (r334646)
+++ head/sys/sys/pmckern.h      Tue Jun  5 04:26:40 2018        (r334647)
@@ -63,6 +63,9 @@
 #define        PMC_FN_THR_CREATE               12
 #define        PMC_FN_THR_EXIT                 13
 #define        PMC_FN_THR_USERRET              14
+#define        PMC_FN_THR_CREATE_LOG           15
+#define        PMC_FN_THR_EXIT_LOG             16
+#define        PMC_FN_PROC_CREATE_LOG          17
 
 #define        PMC_HR  0       /* Hardware ring buffer */
 #define        PMC_SR  1       /* Software ring buffer */

Modified: head/sys/sys/pmclog.h
==============================================================================
--- head/sys/sys/pmclog.h       Tue Jun  5 01:05:58 2018        (r334646)
+++ head/sys/sys/pmclog.h       Tue Jun  5 04:26:40 2018        (r334647)
@@ -67,7 +67,13 @@ enum pmclog_type {
         *
         * New variant of PMCLOG_TYPE_PMCALLOCATE for dynamic event.
         */
-       PMCLOG_TYPE_PMCALLOCATEDYN = 17
+       PMCLOG_TYPE_PMCALLOCATEDYN = 17,
+       /*
+        * V6 ABI
+        */
+       PMCLOG_TYPE_THR_CREATE = 18,
+       PMCLOG_TYPE_THR_EXIT = 19,
+       PMCLOG_TYPE_PROC_CREATE = 20
 };
 
 /*
@@ -181,6 +187,12 @@ struct pmclog_proccsw {
        uint32_t                pl_tid;
 } __packed;
 
+struct pmclog_proccreate {
+       PMCLOG_ENTRY_HEADER
+       uint32_t                pl_pid;
+       uint64_t                pl_pcomm[MAXCOMLEN+1];  /* keep 8 byte aligned 
*/
+} __packed;
+
 struct pmclog_procexec {
        PMCLOG_ENTRY_HEADER
        uint32_t                pl_pid;
@@ -210,6 +222,19 @@ struct pmclog_sysexit {
        uint32_t                pl_pid;
 } __packed;
 
+struct pmclog_threadcreate {
+       PMCLOG_ENTRY_HEADER
+       uint32_t                pl_tid;
+       uint32_t                pl_pid;
+       uint32_t                pl_pad;
+       uint64_t                pl_tdname[MAXCOMLEN+1]; /* keep 8 byte aligned 
*/
+} __packed;
+
+struct pmclog_threadexit {
+       PMCLOG_ENTRY_HEADER
+       uint32_t                pl_tid;
+} __packed;
+
 struct pmclog_userdata {
        PMCLOG_ENTRY_HEADER
        uint32_t                pl_userdata;
@@ -261,7 +286,7 @@ union pmclog_entry {                /* only used to size 
scratch are
 int    pmclog_configure_log(struct pmc_mdep *_md, struct pmc_owner *_po,
     int _logfd);
 int    pmclog_deconfigure_log(struct pmc_owner *_po);
-int    pmclog_flush(struct pmc_owner *_po);
+int    pmclog_flush(struct pmc_owner *_po, int force);
 int    pmclog_close(struct pmc_owner *_po);
 void   pmclog_initialize(void);
 int    pmclog_proc_create(struct thread *td, void **handlep);
@@ -283,6 +308,9 @@ void        pmclog_process_procexec(struct pmc_owner *_po, 
pm
 void   pmclog_process_procexit(struct pmc *_pm, struct pmc_process *_pp);
 void   pmclog_process_procfork(struct pmc_owner *_po, pid_t _oldpid, pid_t 
_newpid);
 void   pmclog_process_sysexit(struct pmc_owner *_po, pid_t _pid);
+void   pmclog_process_threadcreate(struct pmc_owner *_po, struct thread *td, 
int sync);
+void   pmclog_process_threadexit(struct pmc_owner *_po, struct thread *td);
+void   pmclog_process_proccreate(struct pmc_owner *_po, struct proc *p, int 
sync);
 int    pmclog_process_userlog(struct pmc_owner *_po,
     struct pmc_op_writelog *_wl);
 void   pmclog_shutdown(void);

Modified: head/usr.sbin/Makefile
==============================================================================
--- head/usr.sbin/Makefile      Tue Jun  5 01:05:58 2018        (r334646)
+++ head/usr.sbin/Makefile      Tue Jun  5 04:26:40 2018        (r334647)
@@ -179,7 +179,9 @@ SUBDIR.${MK_OPENSSL}+=      keyserv
 SUBDIR.${MK_PC_SYSINSTALL}+=   pc-sysinstall
 SUBDIR.${MK_PF}+=      ftp-proxy
 SUBDIR.${MK_PKGBOOTSTRAP}+=    pkg
+.if (${COMPILER_TYPE} == "clang" || (${COMPILER_TYPE} == "gcc" && 
${COMPILER_VERSION} >= 60100))
 SUBDIR.${MK_PMC}+=     pmc
+.endif
 SUBDIR.${MK_PMC}+=     pmcannotate
 SUBDIR.${MK_PMC}+=     pmccontrol
 SUBDIR.${MK_PMC}+=     pmcstat

Modified: head/usr.sbin/pmc/Makefile
==============================================================================
--- head/usr.sbin/pmc/Makefile  Tue Jun  5 01:05:58 2018        (r334646)
+++ head/usr.sbin/pmc/Makefile  Tue Jun  5 04:26:40 2018        (r334647)
@@ -2,12 +2,14 @@
 # $FreeBSD$
 #
 
-PROG=  pmc
+.include <src.opts.mk>
+PROG_CXX=      pmc
 MAN=   
+CXXFLAGS+= -O0
 
 LIBADD=        kvm pmc m ncursesw pmcstat elf
 
 SRCS=  pmc.c pmc_util.c cmd_pmc_stat.c \
-       cmd_pmc_list.c cmd_pmc_filter.c
+       cmd_pmc_list.c cmd_pmc_filter.cc
 
 .include <bsd.prog.mk>

Modified: head/usr.sbin/pmc/cmd_pmc.h
==============================================================================
--- head/usr.sbin/pmc/cmd_pmc.h Tue Jun  5 01:05:58 2018        (r334646)
+++ head/usr.sbin/pmc/cmd_pmc.h Tue Jun  5 04:26:40 2018        (r334647)
@@ -40,11 +40,16 @@ extern struct pmcstat_args pmc_args;
 
 typedef int (*cmd_disp_t)(int, char **);
 
-int    cmd_pmc_stat(int, char **);
-int    cmd_pmc_filter(int, char **);
-int    cmd_pmc_stat_system(int, char **);
-int    cmd_pmc_list_events(int, char **);
-
+#if defined(__cplusplus)
+extern "C" {
+#endif
+       int     cmd_pmc_stat(int, char **);
+       int     cmd_pmc_filter(int, char **);
+       int     cmd_pmc_stat_system(int, char **);
+       int     cmd_pmc_list_events(int, char **);
+#if defined(__cplusplus)
+};
+#endif
 int    pmc_util_get_pid(struct pmcstat_args *);
 void   pmc_util_start_pmcs(struct pmcstat_args *);
 void   pmc_util_cleanup(struct pmcstat_args *);

Copied and modified: head/usr.sbin/pmc/cmd_pmc_filter.cc (from r334645, 
head/usr.sbin/pmc/cmd_pmc_filter.c)
==============================================================================
--- head/usr.sbin/pmc/cmd_pmc_filter.c  Mon Jun  4 23:17:18 2018        
(r334645, copy source)
+++ head/usr.sbin/pmc/cmd_pmc_filter.cc Tue Jun  5 04:26:40 2018        
(r334647)
@@ -68,11 +68,22 @@ __FBSDID("$FreeBSD$");
 #include <libpmcstat.h>
 #include "cmd_pmc.h"
 
+#include <iostream>
+#include <string>
+#if _LIBCPP_STD_VER >= 11
+#include <unordered_map>
+using  std::unordered_map;
+#else
+#include <tr1/unordered_map>
+using  std::tr1::unordered_map;
+#endif
 #define LIST_MAX 64
 static struct option longopts[] = {
-       {"threads", no_argument, NULL, 't'},
-       {"pids", no_argument, NULL, 'p'},
-       {"events", no_argument, NULL, 'e'},
+       {"lwps", required_argument, NULL, 't'},
+       {"pids", required_argument, NULL, 'p'},
+       {"threads", required_argument, NULL, 'T'},
+       {"processes", required_argument, NULL, 'P'},
+       {"events", required_argument, NULL, 'e'},
        {NULL, 0, NULL, 0}
 };
 
@@ -81,15 +92,18 @@ usage(void)
 {
        errx(EX_USAGE,
            "\t filter log file\n"
-           "\t -t <lwps>, --threads <lwps> -- comma-delimited list of lwps to 
filter on\n"
-           "\t -p <pids>, --pids <pids> -- comma-delimited list of pids to 
filter on\n"
            "\t -e <events>, --events <events> -- comma-delimited list of 
events to filter on\n"
+           "\t -p <pids>, --pids <pids> -- comma-delimited list of pids to 
filter on\n"
+           "\t -P <processes>, --processes <processes> -- comma-delimited list 
of process names to filter on\n"
+           "\t -t <lwps>, --lwps <lwps> -- comma-delimited list of lwps to 
filter on\n"
+           "\t -T <threads>, --threads <threads> -- comma-delimited list of 
thread names to filter on\n"
+           "\t -x -- toggle inclusive filtering\n"
            );
 }
 
 
 static void
-parse_intlist(char *strlist, int *intlist, int *pcount, int (*fn) (const char 
*))
+parse_intlist(char *strlist, uint32_t *intlist, int *pcount, int (*fn) (const 
char *))
 {
        char *token;
        int count, tokenval;
@@ -105,7 +119,7 @@ parse_intlist(char *strlist, int *intlist, int *pcount
 }
 
 static void
-parse_events(char *strlist, int *intlist, int *pcount, char *cpuid)
+parse_events(char *strlist, uint32_t intlist[LIST_MAX], int *pcount, char 
*cpuid)
 {
        char *token;
        int count, tokenval;
@@ -120,6 +134,21 @@ parse_events(char *strlist, int *intlist, int *pcount,
        *pcount = count;
 }
 
+static void
+parse_names(char *strlist, char *namelist[LIST_MAX], int *pcount)
+{
+       char *token;
+       int count;
+
+       count = 0;
+       while ((token = strsep(&strlist, ",")) != NULL &&
+           count < LIST_MAX) {
+               namelist[count++] = token;
+       }
+       *pcount = count;
+}
+
+
 struct pmcid_ent {
        uint32_t pe_pmcid;
        uint32_t pe_idx;
@@ -129,23 +158,52 @@ struct pmcid_ent {
         (PMCLOG_TYPE_ ## T << 16)   |                                  \
         ((L) & 0xFFFF))
 
+
+typedef unordered_map < int ,std::string > idmap;
+typedef std::pair < int ,std::string > identry;
+
+static bool
+pmc_find_name(idmap & map, uint32_t id, char *list[LIST_MAX], int count)
+{
+       int i;
+
+       auto kvpair = map.find(id);
+       if (kvpair == map.end()) {
+               printf("unknown id: %d\n", id);
+               return (false);
+       }
+       auto p = list;
+       for (i = 0; i < count; i++, p++) {
+               if (strstr(kvpair->second.c_str(), *p) != NULL)
+                       return (true);
+       }
+       return (false);
+}
+
 static void
 pmc_filter_handler(uint32_t *lwplist, int lwpcount, uint32_t *pidlist, int 
pidcount,
-    char *events, int infd, int outfd)
+    char *events, char *processes, char *threads, bool exclusive, int infd,
+    int outfd)
 {
        struct pmclog_ev ev;
        struct pmclog_parse_state *ps;
        struct pmcid_ent *pe;
        uint32_t eventlist[LIST_MAX];
        char cpuid[PMC_CPUID_LEN];
-       int i, pmccount, copies, eventcount;
-       uint32_t idx, h;
-       off_t dstoff;
+       char *proclist[LIST_MAX];
+       char *threadlist[LIST_MAX];
+       int i, pmccount, copies, eventcount, proccount, threadcount;
+       uint32_t idx;
+       idmap pidmap, tidmap;
 
-       if ((ps = pmclog_open(infd)) == NULL)
+       if ((ps = static_cast < struct pmclog_parse_state 
*>(pmclog_open(infd)))== NULL)
                errx(EX_OSERR, "ERROR: Cannot allocate pmclog parse state: 
%s\n", strerror(errno));
 
-       eventcount = pmccount = 0;
+       proccount = eventcount = pmccount = 0;
+       if (processes)
+               parse_names(processes, proclist, &proccount);
+       if (threads)
+               parse_names(threads, threadlist, &threadcount);
        while (pmclog_read(ps, &ev) == 0) {
                if (ev.pl_type == PMCLOG_TYPE_INITIALIZE)
                        memcpy(cpuid, ev.pl_u.pl_i.pl_cpuid, PMC_CPUID_LEN);
@@ -157,9 +215,9 @@ pmc_filter_handler(uint32_t *lwplist, int lwpcount, ui
 
        lseek(infd, 0, SEEK_SET);
        pmclog_close(ps);
-       if ((ps = pmclog_open(infd)) == NULL)
+       if ((ps = static_cast < struct pmclog_parse_state 
*>(pmclog_open(infd)))== NULL)
                errx(EX_OSERR, "ERROR: Cannot allocate pmclog parse state: 
%s\n", strerror(errno));
-       if ((pe = malloc(sizeof(*pe) * pmccount)) == NULL)
+       if ((pe = (typeof(pe)) malloc(sizeof(*pe) * pmccount)) == NULL)
                errx(EX_OSERR, "ERROR: failed to allocate pmcid map");
        i = 0;
        while (pmclog_read(ps, &ev) == 0 && i < pmccount) {
@@ -171,12 +229,14 @@ pmc_filter_handler(uint32_t *lwplist, int lwpcount, ui
        }
        lseek(infd, 0, SEEK_SET);
        pmclog_close(ps);
-       if ((ps = pmclog_open(infd)) == NULL)
+       if ((ps = static_cast < struct pmclog_parse_state 
*>(pmclog_open(infd)))== NULL)
                errx(EX_OSERR, "ERROR: Cannot allocate pmclog parse state: 
%s\n", strerror(errno));
-       dstoff = copies = 0;
+       copies = 0;
        while (pmclog_read(ps, &ev) == 0) {
-               dstoff += ev.pl_len;
-               h = *(uint32_t *)ev.pl_data;
+               if (ev.pl_type == PMCLOG_TYPE_THR_CREATE)
+                       tidmap.insert(identry(ev.pl_u.pl_tc.pl_tid, 
ev.pl_u.pl_tc.pl_tdname));
+               if (ev.pl_type == PMCLOG_TYPE_PROC_CREATE)
+                       pidmap.insert(identry(ev.pl_u.pl_pc.pl_pid, 
ev.pl_u.pl_pc.pl_pcomm));
                if (ev.pl_type != PMCLOG_TYPE_CALLCHAIN) {
                        if (write(outfd, ev.pl_data, ev.pl_len) != 
(ssize_t)ev.pl_len)
                                errx(EX_OSERR, "ERROR: failed output write");
@@ -186,14 +246,14 @@ pmc_filter_handler(uint32_t *lwplist, int lwpcount, ui
                        for (i = 0; i < pidcount; i++)
                                if (pidlist[i] == ev.pl_u.pl_cc.pl_pid)
                                        break;
-                       if (i == pidcount)
+                       if ((i == pidcount) == exclusive)
                                continue;
                }
                if (lwpcount) {
                        for (i = 0; i < lwpcount; i++)
                                if (lwplist[i] == ev.pl_u.pl_cc.pl_tid)
                                        break;
-                       if (i == lwpcount)
+                       if ((i == lwpcount) == exclusive)
                                continue;
                }
                if (eventcount) {
@@ -210,9 +270,15 @@ pmc_filter_handler(uint32_t *lwplist, int lwpcount, ui
                                if (idx == eventlist[i])
                                        break;
                        }
-                       if (i == eventcount)
+                       if ((i == eventcount) == exclusive)
                                continue;
                }
+               if (proccount &&
+                   pmc_find_name(pidmap, ev.pl_u.pl_cc.pl_pid, proclist, 
proccount) == exclusive)
+                       continue;
+               if (threadcount &&
+                   pmc_find_name(tidmap, ev.pl_u.pl_cc.pl_tid, threadlist, 
threadcount) == exclusive)
+                       continue;
                if (write(outfd, ev.pl_data, ev.pl_len) != (ssize_t)ev.pl_len)
                        errx(EX_OSERR, "ERROR: failed output write");
        }
@@ -221,25 +287,36 @@ pmc_filter_handler(uint32_t *lwplist, int lwpcount, ui
 int
 cmd_pmc_filter(int argc, char **argv)
 {
-       char *lwps, *pids, *events;
+       char *lwps, *pids, *events, *processes, *threads;
        uint32_t lwplist[LIST_MAX];
        uint32_t pidlist[LIST_MAX];
        int option, lwpcount, pidcount;
        int prelogfd, postlogfd;
+       bool exclusive;
 
-       lwps = pids = events = NULL;
+       threads = processes = lwps = pids = events = NULL;
        lwpcount = pidcount = 0;
-       while ((option = getopt_long(argc, argv, "t:p:e:", longopts, NULL)) != 
-1) {
+       exclusive = false;
+       while ((option = getopt_long(argc, argv, "e:p:t:xP:T:", longopts, 
NULL)) != -1) {
                switch (option) {
-               case 't':
-                       lwps = strdup(optarg);
+               case 'e':
+                       events = strdup(optarg);
                        break;
                case 'p':
                        pids = strdup(optarg);
                        break;
-               case 'e':
-                       events = strdup(optarg);
+               case 'P':
+                       processes = strdup(optarg);
                        break;
+               case 't':
+                       lwps = strdup(optarg);
+                       break;
+               case 'T':
+                       threads = strdup(optarg);
+                       break;
+               case 'x':
+                       exclusive = !exclusive;
+                       break;
                case '?':
                default:
                        usage();
@@ -264,6 +341,6 @@ cmd_pmc_filter(int argc, char **argv)
                    strerror(errno));
 
        pmc_filter_handler(lwplist, lwpcount, pidlist, pidcount, events,
-           prelogfd, postlogfd);
+           processes, threads, exclusive, prelogfd, postlogfd);
        return (0);
 }
_______________________________________________
svn-src-all@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/svn-src-all
To unsubscribe, send any mail to "svn-src-all-unsubscr...@freebsd.org"

Reply via email to