On 11.10.2018 16:46, Jiri Olsa wrote:
> On Tue, Oct 09, 2018 at 11:58:53AM +0300, Alexey Budankov wrote:
> 
> SNIP
> 
>> +#ifdef HAVE_AIO_SUPPORT
>> +int perf_mmap__aio_push(struct perf_mmap *md, void *to,
>> +                    int push(void *to, struct aiocb *cblock, void *buf, 
>> size_t size, off_t off),
>> +                    off_t *off)
>> +{
>> +    u64 head = perf_mmap__read_head(md);
>> +    unsigned char *data = md->base + page_size;
>> +    unsigned long size, size0 = 0;
>> +    void *buf;
>> +    int rc = 0;
>> +
>> +    rc = perf_mmap__read_init(md);
>> +    if (rc < 0)
>> +            return (rc == -EAGAIN) ? 0 : -1;
>> +
>> +    /*
>> +     * md->base data is copied into md->data buffer to
>> +     * release space in the kernel buffer as fast as possible,
>> +     * thru perf_mmap__consume() below.
>> +     *
>> +     * That lets the kernel to proceed with storing more
>> +     * profiling data into the kernel buffer earlier than other
>> +     * per-cpu kernel buffers are handled.
>> +     *
>> +     * Coping can be done in two steps in case the chunk of
>> +     * profiling data crosses the upper bound of the kernel buffer.
>> +     * In this case we first move part of data from md->start
>> +     * till the upper bound and then the reminder from the
>> +     * beginning of the kernel buffer till the end of
>> +     * the data chunk.
>> +     */
>> +
>> +    size = md->end - md->start;
>> +
>> +    if ((md->start & md->mask) + size != (md->end & md->mask)) {
>> +            buf = &data[md->start & md->mask];
>> +            size = md->mask + 1 - (md->start & md->mask);
>> +            md->start += size;
>> +            memcpy(md->aio.data, buf, size);
>> +            size0 = size;
>> +    }
>> +
>> +    buf = &data[md->start & md->mask];
>> +    size = md->end - md->start;
>> +    md->start += size;
>> +    memcpy(md->aio.data + size0, buf, size);
>> +
>> +    /*
>> +     * Increment md->refcount to guard md->data buffer
>> +     * from premature deallocation because md object can be
>> +     * released earlier than aio write request started
>> +     * on mmap->data is complete.
>> +     *
>> +     * perf_mmap__put() is done at record__aio_complete()
>> +     * after started request completion.
>> +     */
>> +    perf_mmap__get(md);
>> +
>> +    md->prev = head;
>> +    perf_mmap__consume(md);
>> +
>> +    rc = push(to, &(md->aio.cblock), md->aio.data, size0 + size, *off);
>> +    if (!rc) {
>> +            *off += size0 + size;
>> +    } else {
>> +            /*
>> +             * Decrement md->refcount back if aio write
>> +             * operation failed to start.
>> +             */
>> +            perf_mmap__put(md);
>> +    }
>> +
>> +    return rc;
>> +}
>> +#else
>> +int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to 
>> __maybe_unused,
>> +    int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t 
>> off) __maybe_unused,
>> +    off_t *off __maybe_unused)
>> +{
>> +    return 0;
>> +}
> 
> I think you need to put this one in the header as static inline
> otherwise it'd still appear in the NO_AIO=1 build, like:
>   [jolsa@krava perf]$ make NO_AIO=1
>   ...
>   [jolsa@krava perf]$ nm -D perf | grep perf_mmap__aio_push
>   00000000004c2be0 T perf_mmap__aio_push
> 
> change below makes it disappear completely> 
> jirka
> 
> 
> ---
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index b176d88b3fcb..8c7516696891 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -445,13 +445,6 @@ int perf_mmap__aio_push(struct perf_mmap *md, void *to,
>  
>       return rc;
>  }
> -#else
> -int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to 
> __maybe_unused,
> -     int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t 
> off) __maybe_unused,
> -     off_t *off __maybe_unused)
> -{
> -     return 0;
> -}
>  #endif
>  
>  /*
> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
> index 04ff4d2ffdbe..3ccf8c925002 100644
> --- a/tools/perf/util/mmap.h
> +++ b/tools/perf/util/mmap.h
> @@ -105,10 +105,21 @@ union perf_event *perf_mmap__read_event(struct 
> perf_mmap *map);
>  
>  int perf_mmap__push(struct perf_mmap *md, void *to,
>                   int push(struct perf_mmap *map, void *to, void *buf, size_t 
> size));
> +
> +#ifdef HAVE_AIO_SUPPORT
>  int perf_mmap__aio_push(struct perf_mmap *md, void *to,
>                       int push(void *to, struct aiocb *cblock, void *buf, 
> size_t size, off_t off),
>                       off_t *off);
>  
> +#else
> +static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, 
> void *to __maybe_unused,
> +     int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t 
> off) __maybe_unused,
> +     off_t *off __maybe_unused)
> +{
> +     return 0;
> +}
> +#endif
> +
>  size_t perf_mmap__mmap_len(struct perf_mmap *map);
>  
>  int perf_mmap__read_init(struct perf_mmap *md);
> 

Accepted.

Thanks,
Alexey

Reply via email to