Adding the check for tasks we monitor via -p/-t options,
and finish stat if there's no longer task to monitor.

Cc: Jin Yao <yao....@linux.intel.com>
Requested-by: Stephane Eranian <eran...@google.com>
Link: http://lkml.kernel.org/n/tip-dfqvnvz0oqu5zg149aquz...@git.kernel.org
Signed-off-by: Jiri Olsa <jo...@kernel.org>
---
 tools/perf/builtin-stat.c | 24 ++++++++++++++++++++++++
 1 file changed, 24 insertions(+)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index b86aba1c8028..d1028d7755bb 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -409,6 +409,28 @@ static struct perf_evsel 
*perf_evsel__reset_weak_group(struct perf_evsel *evsel)
        return leader;
 }
 
+static bool is_target_alive(struct target *_target,
+                           struct thread_map *threads)
+{
+       struct stat st;
+       int i;
+
+       if (!target__has_task(_target))
+               return true;
+
+       for (i = 0; i < threads->nr; i++) {
+               char path[PATH_MAX];
+
+               scnprintf(path, PATH_MAX, "%s/%d", procfs__mountpoint(),
+                         threads->map[i].pid);
+
+               if (!stat(path, &st))
+                       return true;
+       }
+
+       return false;
+}
+
 static int __run_perf_stat(int argc, const char **argv, int run_idx)
 {
        int interval = stat_config.interval;
@@ -579,6 +601,8 @@ static int __run_perf_stat(int argc, const char **argv, int 
run_idx)
                enable_counters();
                while (!done) {
                        nanosleep(&ts, NULL);
+                       if (!is_target_alive(&target, evsel_list->threads))
+                               break;
                        if (timeout)
                                break;
                        if (interval) {
-- 
2.17.2

Reply via email to