On Fri, Jul 3, 2020 at 7:47 AM Alan Maguire <alan.magu...@oracle.com> wrote: > > The bpf helper bpf_trace_printk() uses trace_printk() under the hood. > This leads to an alarming warning message originating from trace > buffer allocation which occurs the first time a program using > bpf_trace_printk() is loaded. > > We can instead create a trace event for bpf_trace_printk() and enable > it in-kernel when/if we encounter a program using the > bpf_trace_printk() helper. With this approach, trace_printk() > is not used directly and no warning message appears. > > This work was started by Steven (see Link) and finished by Alan; added > Steven's Signed-off-by with his permission. > > Link: https://lore.kernel.org/r/20200628194334.6238b...@oasis.local.home > Signed-off-by: Steven Rostedt (VMware) <rost...@goodmis.org> > Signed-off-by: Alan Maguire <alan.magu...@oracle.com> > --- > kernel/trace/Makefile | 2 ++ > kernel/trace/bpf_trace.c | 41 +++++++++++++++++++++++++++++++++++++---- > kernel/trace/bpf_trace.h | 34 ++++++++++++++++++++++++++++++++++ > 3 files changed, 73 insertions(+), 4 deletions(-) > create mode 100644 kernel/trace/bpf_trace.h >
[...] > +static DEFINE_SPINLOCK(trace_printk_lock); > + > +#define BPF_TRACE_PRINTK_SIZE 1024 > + > +static inline int bpf_do_trace_printk(const char *fmt, ...) > +{ > + static char buf[BPF_TRACE_PRINTK_SIZE]; > + unsigned long flags; > + va_list ap; > + int ret; > + > + spin_lock_irqsave(&trace_printk_lock, flags); > + va_start(ap, fmt); > + ret = vsnprintf(buf, BPF_TRACE_PRINTK_SIZE, fmt, ap); > + va_end(ap); > + if (ret > 0) > + trace_bpf_trace_printk(buf); Is there any reason to artificially limit the case of printing empty string? It's kind of an awkward use case, for sure, but having guarantee that every bpf_trace_printk() invocation triggers tracepoint is a nice property, no? > + spin_unlock_irqrestore(&trace_printk_lock, flags); > + > + return ret; > +} > + > /* > * Only limited trace_printk() conversion specifiers allowed: > * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s > @@ -483,8 +510,7 @@ static void bpf_trace_copy_string(char *buf, void > *unsafe_ptr, char fmt_ptype, > */ > #define __BPF_TP_EMIT() __BPF_ARG3_TP() > #define __BPF_TP(...) \ > - __trace_printk(0 /* Fake ip */, \ > - fmt, ##__VA_ARGS__) > + bpf_do_trace_printk(fmt, ##__VA_ARGS__) > > #define __BPF_ARG1_TP(...) \ > ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ > @@ -518,13 +544,20 @@ static void bpf_trace_copy_string(char *buf, void > *unsafe_ptr, char fmt_ptype, > .arg2_type = ARG_CONST_SIZE, > }; > > +int bpf_trace_printk_enabled; static? > + > const struct bpf_func_proto *bpf_get_trace_printk_proto(void) > { > /* > * this program might be calling bpf_trace_printk, > - * so allocate per-cpu printk buffers > + * so enable the associated bpf_trace/bpf_trace_printk event. > */ > - trace_printk_init_buffers(); > + if (!bpf_trace_printk_enabled) { > + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) just to double check, it's ok to simultaneously enable same event in parallel, right? > + pr_warn_ratelimited("could not enable > bpf_trace_printk events"); > + else > + bpf_trace_printk_enabled = 1; > + } > > return &bpf_trace_printk_proto; > } [...]