Merge branch 'bpf: Add missed stats for kprobes'

Jiri Olsa says:

====================
hi,
at the moment we can't retrieve the number of missed kprobe
executions and subsequent execution of BPF programs.

This patchset adds:
  - counting of missed execution on attach layer for:
    . kprobes attached through perf link (kprobe/ftrace)
    . kprobes attached through kprobe.multi link (fprobe)
  - counting of recursion_misses for BPF kprobe programs

It's still technically possible to create kprobe without perf link (using
SET_BPF perf ioctl) in which case we don't have a way to retrieve the kprobe's
'missed' count. However both libbpf and cilium/ebpf libraries use perf link
if it's available, and for old kernels without perf link support we can use
BPF program to retrieve the kprobe missed count.

v3 changes:
  - added acks [Song]
  - make test_missed not serial [Andrii]

Also available at:
  https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git
  bpf/missed_stats

thanks,
jirka
====================

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
Andrii Nakryiko 2023-09-25 16:37:45 -07:00
commit 0e73ef1d8c
15 changed files with 322 additions and 13 deletions

View File

@ -2922,6 +2922,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
static __always_inline void
bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
{
const struct bpf_prog_array_item *item;
struct bpf_prog *prog;
if (unlikely(!array))
return;
item = &array->items[0];
while ((prog = READ_ONCE(item->prog))) {
bpf_prog_inc_misses_counter(prog);
item++;
}
}
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,

View File

@ -761,7 +761,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr);
u64 *probe_offset, u64 *probe_addr,
unsigned long *missed);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
@ -801,7 +802,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
u64 *probe_addr)
u64 *probe_addr, unsigned long *missed)
{
return -EOPNOTSUPP;
}
@ -877,6 +878,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
unsigned long *missed,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS

View File

@ -6532,6 +6532,7 @@ struct bpf_link_info {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
} kprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
@ -6547,6 +6548,7 @@ struct bpf_link_info {
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */

View File

@ -3374,7 +3374,7 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
static int bpf_perf_link_fill_common(const struct perf_event *event,
char __user *uname, u32 ulen,
u64 *probe_offset, u64 *probe_addr,
u32 *fd_type)
u32 *fd_type, unsigned long *missed)
{
const char *buf;
u32 prog_id;
@ -3385,7 +3385,7 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
return -EINVAL;
err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
probe_offset, probe_addr);
probe_offset, probe_addr, missed);
if (err)
return err;
if (!uname)
@ -3408,6 +3408,7 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
struct bpf_link_info *info)
{
unsigned long missed;
char __user *uname;
u64 addr, offset;
u32 ulen, type;
@ -3416,7 +3417,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
ulen = info->perf_event.kprobe.name_len;
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
&type);
&type, &missed);
if (err)
return err;
if (type == BPF_FD_TYPE_KRETPROBE)
@ -3425,6 +3426,7 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
info->perf_event.type = BPF_PERF_EVENT_KPROBE;
info->perf_event.kprobe.offset = offset;
info->perf_event.kprobe.missed = missed;
if (!kallsyms_show_value(current_cred()))
addr = 0;
info->perf_event.kprobe.addr = addr;
@ -3444,7 +3446,7 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
ulen = info->perf_event.uprobe.name_len;
err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
&type);
&type, NULL);
if (err)
return err;
@ -3480,7 +3482,7 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
ulen = info->perf_event.tracepoint.name_len;
info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL, NULL);
}
static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
@ -4813,7 +4815,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
&buf, &probe_offset,
&probe_addr);
&probe_addr, NULL);
if (!err)
err = bpf_task_fd_query_copy(attr, uattr, prog_id,
fd_type, buf,

View File

@ -117,6 +117,9 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
* and don't send kprobe event into ring-buffer,
* so return zero here
*/
rcu_read_lock();
bpf_prog_inc_misses_counters(rcu_dereference(call->prog_array));
rcu_read_unlock();
ret = 0;
goto out;
}
@ -2384,7 +2387,8 @@ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
u64 *probe_offset, u64 *probe_addr)
u64 *probe_offset, u64 *probe_addr,
unsigned long *missed)
{
bool is_tracepoint, is_syscall_tp;
struct bpf_prog *prog;
@ -2419,7 +2423,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
#ifdef CONFIG_KPROBE_EVENTS
if (flags & TRACE_EVENT_FL_KPROBE)
err = bpf_get_kprobe_info(event, fd_type, buf,
probe_offset, probe_addr,
probe_offset, probe_addr, missed,
event->attr.type == PERF_TYPE_TRACEPOINT);
#endif
#ifdef CONFIG_UPROBE_EVENTS
@ -2614,6 +2618,7 @@ static int bpf_kprobe_multi_link_fill_link_info(const struct bpf_link *link,
kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
info->kprobe_multi.count = kmulti_link->cnt;
info->kprobe_multi.flags = kmulti_link->flags;
info->kprobe_multi.missed = kmulti_link->fp.nmissed;
if (!uaddrs)
return 0;
@ -2710,6 +2715,7 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
int err;
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
bpf_prog_inc_misses_counter(link->link.prog);
err = 0;
goto out;
}

View File

@ -1189,6 +1189,12 @@ static const struct file_operations kprobe_events_ops = {
.write = probes_write,
};
static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
{
return trace_kprobe_is_return(tk) ?
tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
}
/* Probes profiling interfaces */
static int probes_profile_seq_show(struct seq_file *m, void *v)
{
@ -1200,8 +1206,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
return 0;
tk = to_trace_kprobe(ev);
nmissed = trace_kprobe_is_return(tk) ?
tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
nmissed = trace_kprobe_missed(tk);
seq_printf(m, " %-44s %15lu %15lu\n",
trace_probe_name(&tk->tp),
trace_kprobe_nhit(tk),
@ -1547,7 +1552,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
const char **symbol, u64 *probe_offset,
u64 *probe_addr, bool perf_type_tracepoint)
u64 *probe_addr, unsigned long *missed,
bool perf_type_tracepoint)
{
const char *pevent = trace_event_name(event->tp_event);
const char *group = event->tp_event->class->system;
@ -1566,6 +1572,8 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
*probe_addr = kallsyms_show_value(current_cred()) ?
(unsigned long)tk->rp.kp.addr : 0;
*symbol = tk->symbol;
if (missed)
*missed = trace_kprobe_missed(tk);
return 0;
}
#endif /* CONFIG_PERF_EVENTS */

View File

@ -265,6 +265,7 @@ show_kprobe_multi_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_bool_field(json_wtr, "retprobe",
info->kprobe_multi.flags & BPF_F_KPROBE_MULTI_RETURN);
jsonw_uint_field(json_wtr, "func_cnt", info->kprobe_multi.count);
jsonw_uint_field(json_wtr, "missed", info->kprobe_multi.missed);
jsonw_name(json_wtr, "funcs");
jsonw_start_array(json_wtr);
addrs = u64_to_ptr(info->kprobe_multi.addrs);
@ -301,6 +302,7 @@ show_perf_event_kprobe_json(struct bpf_link_info *info, json_writer_t *wtr)
jsonw_string_field(wtr, "func",
u64_to_ptr(info->perf_event.kprobe.func_name));
jsonw_uint_field(wtr, "offset", info->perf_event.kprobe.offset);
jsonw_uint_field(wtr, "missed", info->perf_event.kprobe.missed);
}
static void
@ -641,6 +643,8 @@ static void show_kprobe_multi_plain(struct bpf_link_info *info)
else
printf("\n\tkprobe.multi ");
printf("func_cnt %u ", info->kprobe_multi.count);
if (info->kprobe_multi.missed)
printf("missed %llu ", info->kprobe_multi.missed);
addrs = (__u64 *)u64_to_ptr(info->kprobe_multi.addrs);
qsort(addrs, info->kprobe_multi.count, sizeof(__u64), cmp_u64);
@ -683,6 +687,8 @@ static void show_perf_event_kprobe_plain(struct bpf_link_info *info)
printf("%s", buf);
if (info->perf_event.kprobe.offset)
printf("+%#x", info->perf_event.kprobe.offset);
if (info->perf_event.kprobe.missed)
printf(" missed %llu", info->perf_event.kprobe.missed);
printf(" ");
}

View File

@ -6532,6 +6532,7 @@ struct bpf_link_info {
__aligned_u64 addrs;
__u32 count; /* in/out: kprobe_multi function count */
__u32 flags;
__u64 missed;
} kprobe_multi;
struct {
__u32 type; /* enum bpf_perf_event_type */
@ -6547,6 +6548,7 @@ struct bpf_link_info {
__u32 name_len;
__u32 offset; /* offset from func_name */
__u64 addr;
__u64 missed;
} kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
struct {
__aligned_u64 tp_name; /* in/out */

View File

@ -10,3 +10,4 @@ fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_ma
fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95
fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95
missed/kprobe_recursion # missed_kprobe_recursion__attach unexpected error: -95 (errno 95)

View File

@ -138,6 +138,10 @@ __bpf_kfunc void bpf_iter_testmod_seq_destroy(struct bpf_iter_testmod_seq *it)
it->cnt = 0;
}
__bpf_kfunc void bpf_kfunc_common_test(void)
{
}
struct bpf_testmod_btf_type_tag_1 {
int a;
};
@ -343,6 +347,7 @@ BTF_SET8_START(bpf_testmod_common_kfunc_ids)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL)
BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY)
BTF_ID_FLAGS(func, bpf_kfunc_common_test)
BTF_SET8_END(bpf_testmod_common_kfunc_ids)
static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = {

View File

@ -104,4 +104,6 @@ void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p);
void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p);
void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p);
void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len);
void bpf_kfunc_common_test(void) __ksym;
#endif /* _BPF_TESTMOD_KFUNC_H */

View File

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "missed_kprobe.skel.h"
#include "missed_kprobe_recursion.skel.h"
#include "missed_tp_recursion.skel.h"
/*
* Putting kprobe on bpf_fentry_test1 that calls bpf_kfunc_common_test
* kfunc, which has also kprobe on. The latter won't get triggered due
* to kprobe recursion check and kprobe missed counter is incremented.
*/
static void test_missed_perf_kprobe(void)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct bpf_link_info info = {};
struct missed_kprobe *skel;
__u32 len = sizeof(info);
int err, prog_fd;
skel = missed_kprobe__open_and_load();
if (!ASSERT_OK_PTR(skel, "missed_kprobe__open_and_load"))
goto cleanup;
err = missed_kprobe__attach(skel);
if (!ASSERT_OK(err, "missed_kprobe__attach"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
err = bpf_link_get_info_by_fd(bpf_link__fd(skel->links.test2), &info, &len);
if (!ASSERT_OK(err, "bpf_link_get_info_by_fd"))
goto cleanup;
ASSERT_EQ(info.type, BPF_LINK_TYPE_PERF_EVENT, "info.type");
ASSERT_EQ(info.perf_event.type, BPF_PERF_EVENT_KPROBE, "info.perf_event.type");
ASSERT_EQ(info.perf_event.kprobe.missed, 1, "info.perf_event.kprobe.missed");
cleanup:
missed_kprobe__destroy(skel);
}
static __u64 get_missed_count(int fd)
{
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
int err;
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd"))
return (__u64) -1;
return info.recursion_misses;
}
/*
* Putting kprobe.multi on bpf_fentry_test1 that calls bpf_kfunc_common_test
* kfunc which has 3 perf event kprobes and 1 kprobe.multi attached.
*
* Because fprobe (kprobe.multi attach layear) does not have strict recursion
* check the kprobe's bpf_prog_active check is hit for test2-5.
*/
static void test_missed_kprobe_recursion(void)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct missed_kprobe_recursion *skel;
int err, prog_fd;
skel = missed_kprobe_recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "missed_kprobe_recursion__open_and_load"))
goto cleanup;
err = missed_kprobe_recursion__attach(skel);
if (!ASSERT_OK(err, "missed_kprobe_recursion__attach"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
cleanup:
missed_kprobe_recursion__destroy(skel);
}
/*
* Putting kprobe on bpf_fentry_test1 that calls bpf_printk and invokes
* bpf_trace_printk tracepoint. The bpf_trace_printk tracepoint has test[234]
* programs attached to it.
*
* Because kprobe execution goes through bpf_prog_active check, programs
* attached to the tracepoint will fail the recursion check and increment
* the recursion_misses stats.
*/
static void test_missed_tp_recursion(void)
{
LIBBPF_OPTS(bpf_test_run_opts, topts);
struct missed_tp_recursion *skel;
int err, prog_fd;
skel = missed_tp_recursion__open_and_load();
if (!ASSERT_OK_PTR(skel, "missed_tp_recursion__open_and_load"))
goto cleanup;
err = missed_tp_recursion__attach(skel);
if (!ASSERT_OK(err, "missed_tp_recursion__attach"))
goto cleanup;
prog_fd = bpf_program__fd(skel->progs.trigger);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
cleanup:
missed_tp_recursion__destroy(skel);
}
void test_missed(void)
{
if (test__start_subtest("perf_kprobe"))
test_missed_perf_kprobe();
if (test__start_subtest("kprobe_recursion"))
test_missed_kprobe_recursion();
if (test__start_subtest("tp_recursion"))
test_missed_tp_recursion();
}

View File

@ -0,0 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
/*
* No tests in here, just to trigger 'bpf_fentry_test*'
* through tracing test_run
*/
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(trigger)
{
return 0;
}
SEC("kprobe/bpf_fentry_test1")
int test1(struct pt_regs *ctx)
{
bpf_kfunc_common_test();
return 0;
}
SEC("kprobe/bpf_kfunc_common_test")
int test2(struct pt_regs *ctx)
{
return 0;
}

View File

@ -0,0 +1,48 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include "../bpf_testmod/bpf_testmod_kfunc.h"
char _license[] SEC("license") = "GPL";
/*
* No tests in here, just to trigger 'bpf_fentry_test*'
* through tracing test_run
*/
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(trigger)
{
return 0;
}
SEC("kprobe.multi/bpf_fentry_test1")
int test1(struct pt_regs *ctx)
{
bpf_kfunc_common_test();
return 0;
}
SEC("kprobe/bpf_kfunc_common_test")
int test2(struct pt_regs *ctx)
{
return 0;
}
SEC("kprobe/bpf_kfunc_common_test")
int test3(struct pt_regs *ctx)
{
return 0;
}
SEC("kprobe/bpf_kfunc_common_test")
int test4(struct pt_regs *ctx)
{
return 0;
}
SEC("kprobe.multi/bpf_kfunc_common_test")
int test5(struct pt_regs *ctx)
{
return 0;
}

View File

@ -0,0 +1,41 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
/*
* No tests in here, just to trigger 'bpf_fentry_test*'
* through tracing test_run
*/
SEC("fentry/bpf_modify_return_test")
int BPF_PROG(trigger)
{
return 0;
}
SEC("kprobe/bpf_fentry_test1")
int test1(struct pt_regs *ctx)
{
bpf_printk("test");
return 0;
}
SEC("tp/bpf_trace/bpf_trace_printk")
int test2(struct pt_regs *ctx)
{
return 0;
}
SEC("tp/bpf_trace/bpf_trace_printk")
int test3(struct pt_regs *ctx)
{
return 0;
}
SEC("tp/bpf_trace/bpf_trace_printk")
int test4(struct pt_regs *ctx)
{
return 0;
}