perf/bpf: Always use perf callchains if exist
If the perf_event has PERF_SAMPLE_CALLCHAIN, BPF can use it for stack trace. The problematic cases like PEBS and IBS already handled in the PMU driver and they filled the callchain info in the sample data. For others, we can call perf_callchain() before the BPF handler. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220908214104.3851807-2-namhyung@kernel.org
This commit is contained in:
committed by
Peter Zijlstra
parent
3749d33e51
commit
16817ad7e8
@@ -10000,8 +10000,16 @@ static void bpf_overflow_handler(struct perf_event *event,
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
prog = READ_ONCE(event->prog);
|
||||
if (prog)
|
||||
if (prog) {
|
||||
if (prog->call_get_stack &&
|
||||
(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) &&
|
||||
!(data->sample_flags & PERF_SAMPLE_CALLCHAIN)) {
|
||||
data->callchain = perf_callchain(event, regs);
|
||||
data->sample_flags |= PERF_SAMPLE_CALLCHAIN;
|
||||
}
|
||||
|
||||
ret = bpf_prog_run(prog, &ctx);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
__this_cpu_dec(bpf_prog_active);
|
||||
@@ -10027,7 +10035,7 @@ static int perf_event_set_bpf_handler(struct perf_event *event,
|
||||
|
||||
if (event->attr.precise_ip &&
|
||||
prog->call_get_stack &&
|
||||
(!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY) ||
|
||||
(!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) ||
|
||||
event->attr.exclude_callchain_kernel ||
|
||||
event->attr.exclude_callchain_user)) {
|
||||
/*
|
||||
|
||||
Reference in New Issue
Block a user