diff options
author | Alexei Starovoitov <ast@kernel.org> | 2020-07-13 16:55:49 -0700 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2020-07-13 16:58:50 -0700 |
commit | 207a573c04755d5ef83e89ee1b3e4941f000acbd (patch) | |
tree | 9add8fdea634ba08ed2a57cae8da34e497eed9e5 /kernel/trace/bpf_trace.c | |
parent | 93776cb9ee91aeed43ba53dcec97ffed4ae6f1f7 (diff) | |
parent | 59e8b60bf068180fcadb0ae06ce8f6f835132ce6 (diff) | |
download | kernel_replicant_linux-207a573c04755d5ef83e89ee1b3e4941f000acbd.tar.gz kernel_replicant_linux-207a573c04755d5ef83e89ee1b3e4941f000acbd.tar.bz2 kernel_replicant_linux-207a573c04755d5ef83e89ee1b3e4941f000acbd.zip |
Merge branch 'trace_printk-banner-remove'
Alan Maguire says:
====================
Steven suggested a way to resolve the appearance of the warning banner
that appears as a result of using trace_printk() in BPF [1].
Applying the patch and testing reveals all works as expected; we
can call bpf_trace_printk() and see the trace messages in
/sys/kernel/debug/tracing/trace_pipe and no banner message appears.
Also add a test prog to verify basic bpf_trace_printk() helper behaviour.
Changes since v2:
- fixed stray newline in bpf_trace_printk(), use sizeof(buf)
rather than #defined value in vsnprintf() (Daniel, patch 1)
- Daniel also pointed out that vsnprintf() returns 0 on error rather
than a negative value; also turns out that a null byte is not
appended if the length of the string written is zero, so to fix
for cases where the string to be traced is zero length we set the
null byte explicitly (Daniel, patch 1)
- switch to using getline() for retrieving lines from trace buffer
to ensure we don't read a portion of the search message in one
read() operation and then fail to find it (Andrii, patch 2)
Changes since v1:
- reorder header inclusion in bpf_trace.c (Steven, patch 1)
- trace zero-length messages also (Andrii, patch 1)
- use a raw spinlock to ensure there are no issues for PREMMPT_RT
kernels when using bpf_trace_printk() within other raw spinlocks
(Steven, patch 1)
- always enable bpf_trace_printk() tracepoint when loading programs
using bpf_trace_printk() as this will ensure that a user disabling
that tracepoint will not prevent tracing output from being logged
(Steven, patch 1)
- use "tp/raw_syscalls/sys_enter" and a usleep(1) to trigger events
in the selftest ensuring test runs faster (Andrii, patch 2)
[1] https://lore.kernel.org/r/20200628194334.6238b933@oasis.local.home
====================
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r-- | kernel/trace/bpf_trace.c | 42 |
1 files changed, 37 insertions, 5 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index e178e8e32b33..3cc0dcb60ca2 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -11,6 +11,7 @@ #include <linux/uaccess.h> #include <linux/ctype.h> #include <linux/kprobes.h> +#include <linux/spinlock.h> #include <linux/syscalls.h> #include <linux/error-injection.h> #include <linux/btf_ids.h> @@ -20,6 +21,9 @@ #include "trace_probe.h" #include "trace.h" +#define CREATE_TRACE_POINTS +#include "bpf_trace.h" + #define bpf_event_rcu_dereference(p) \ rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex)) @@ -375,6 +379,30 @@ static void bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, } } +static DEFINE_RAW_SPINLOCK(trace_printk_lock); + +#define BPF_TRACE_PRINTK_SIZE 1024 + +static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...) +{ + static char buf[BPF_TRACE_PRINTK_SIZE]; + unsigned long flags; + va_list ap; + int ret; + + raw_spin_lock_irqsave(&trace_printk_lock, flags); + va_start(ap, fmt); + ret = vsnprintf(buf, sizeof(buf), fmt, ap); + va_end(ap); + /* vsnprintf() will not append null for zero-length strings */ + if (ret == 0) + buf[0] = '\0'; + trace_bpf_trace_printk(buf); + raw_spin_unlock_irqrestore(&trace_printk_lock, flags); + + return ret; +} + /* * Only limited trace_printk() conversion specifiers allowed: * %d %i %u %x %ld %li %lu %lx %lld %lli %llu %llx %p %pB %pks %pus %s @@ -484,8 +512,7 @@ fmt_next: */ #define __BPF_TP_EMIT() __BPF_ARG3_TP() #define __BPF_TP(...) \ - __trace_printk(0 /* Fake ip */, \ - fmt, ##__VA_ARGS__) + bpf_do_trace_printk(fmt, ##__VA_ARGS__) #define __BPF_ARG1_TP(...) \ ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \ @@ -522,10 +549,15 @@ static const struct bpf_func_proto bpf_trace_printk_proto = { const struct bpf_func_proto *bpf_get_trace_printk_proto(void) { /* - * this program might be calling bpf_trace_printk, - * so allocate per-cpu printk buffers + * This program might be calling bpf_trace_printk, + * so enable the associated bpf_trace/bpf_trace_printk event. + * Repeat this each time as it is possible a user has + * disabled bpf_trace_printk events. By loading a program + * calling bpf_trace_printk() however the user has expressed + * the intent to see such events. */ - trace_printk_init_buffers(); + if (trace_set_clr_event("bpf_trace", "bpf_trace_printk", 1)) + pr_warn_ratelimited("could not enable bpf_trace_printk events"); return &bpf_trace_printk_proto; } |