On Mon, Nov 16, 2020 at 03:19:41PM +0300, Alexey Budankov wrote:
> 
> Initialize data files located at mmap buffer objects so trace data
> can be written into several data file located at data directory.
> 
> Signed-off-by: Alexey Budankov <alexey.budan...@linux.intel.com>
> ---
>  tools/perf/builtin-record.c | 41 ++++++++++++++++++++++++++++++-------
>  tools/perf/util/record.h    |  1 +
>  2 files changed, 35 insertions(+), 7 deletions(-)
> 
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index 779676531edf..f5e5175da6a1 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -158,6 +158,11 @@ static const char *affinity_tags[PERF_AFFINITY_MAX] = {
>       "SYS", "NODE", "CPU"
>  };
>  
> +static int record__threads_enabled(struct record *rec)
> +{
> +     return rec->opts.threads_spec;
> +}
> +
>  static bool switch_output_signal(struct record *rec)
>  {
>       return rec->switch_output.signal &&
> @@ -1060,7 +1065,7 @@ static int record__free_thread_data(struct record *rec)
>  static int record__mmap_evlist(struct record *rec,
>                              struct evlist *evlist)
>  {
> -     int ret;
> +     int i, ret;
>       struct record_opts *opts = &rec->opts;
>       bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
>                                 opts->auxtrace_sample_mode;
> @@ -1099,6 +1104,18 @@ static int record__mmap_evlist(struct record *rec,
>       if (ret)
>               return ret;
>  
> +     if (record__threads_enabled(rec)) {
> +             ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
> +             if (ret)
> +                     return ret;
> +             for (i = 0; i < evlist->core.nr_mmaps; i++) {
> +                     if (evlist->mmap)
> +                             evlist->mmap[i].file = &rec->data.dir.files[i];
> +                     if (evlist->overwrite_mmap)
> +                             evlist->overwrite_mmap[i].file = 
> &rec->data.dir.files[i];
> +             }
> +     }
> +
>       return 0;
>  }
>  
> @@ -1400,8 +1417,12 @@ static int record__mmap_read_evlist(struct record 
> *rec, struct evlist *evlist,
>       /*
>        * Mark the round finished in case we wrote
>        * at least one event.
> +      *
> +      * No need for round events in directory mode,
> +      * because per-cpu maps and files have data
> +      * sorted by kernel.

But it's not just for single cpu since task can migrate so we need to
look at other cpu's data too.  Thus we use the ordered events queue
and round events help to determine when to flush the data.  Without
the round events, it'd consume huge amount of memory during report.

If we separate tracking records and process them first, we should be
able to process samples immediately without sorting them in the
ordered event queue.  This will save both cpu cycles and memory
footprint significantly IMHO.

Thanks,
Namhyung


>        */
> -     if (bytes_written != rec->bytes_written)
> +     if (!record__threads_enabled(rec) && bytes_written != 
> rec->bytes_written)
>               rc = record__write(rec, NULL, &finished_round_event, 
> sizeof(finished_round_event));
>  
>       if (overwrite)
> @@ -1514,7 +1535,9 @@ static void record__init_features(struct record *rec)
>       if (!rec->opts.use_clockid)
>               perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
>  
> -     perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
> +     if (!record__threads_enabled(rec))
> +             perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
> +
>       if (!record__comp_enabled(rec))
>               perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
>  
> @@ -1525,15 +1548,21 @@ static void
>  record__finish_output(struct record *rec)
>  {
>       struct perf_data *data = &rec->data;
> -     int fd = perf_data__fd(data);
> +     int i, fd = perf_data__fd(data);
>  
>       if (data->is_pipe)
>               return;
>  
>       rec->session->header.data_size += rec->bytes_written;
>       data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
> +     if (record__threads_enabled(rec)) {
> +             for (i = 0; i < data->dir.nr; i++)
> +                     data->dir.files[i].size = lseek(data->dir.files[i].fd, 
> 0, SEEK_CUR);
> +     }
>  
>       if (!rec->no_buildid) {
> +             /* this will be recalculated during process_buildids() */
> +             rec->samples = 0;
>               process_buildids(rec);
>  
>               if (rec->buildid_all)
> @@ -2438,8 +2467,6 @@ static int __cmd_record(struct record *rec, int argc, 
> const char **argv)
>               status = err;
>  
>       record__synthesize(rec, true);
> -     /* this will be recalculated during process_buildids() */
> -     rec->samples = 0;
>  
>       if (!err) {
>               if (!rec->timestamp_filename) {
> @@ -3179,7 +3206,7 @@ int cmd_record(int argc, const char **argv)
>  
>       }
>  
> -     if (rec->opts.kcore)
> +     if (rec->opts.kcore || record__threads_enabled(rec))
>               rec->data.is_dir = true;
>  
>       if (rec->opts.comp_level != 0) {
> diff --git a/tools/perf/util/record.h b/tools/perf/util/record.h
> index 266760ac9143..9c13a39cc58f 100644
> --- a/tools/perf/util/record.h
> +++ b/tools/perf/util/record.h
> @@ -74,6 +74,7 @@ struct record_opts {
>       int           ctl_fd;
>       int           ctl_fd_ack;
>       bool          ctl_fd_close;
> +     int           threads_spec;
>  };
>  
>  extern const char * const *record_usage;
> -- 
> 2.24.1
> 

Reply via email to