selftests/bpf: lazy-load trigger bench BPF programs

Instead of front-loading all possible benchmarking BPF programs for
trigger benchmarks, explicitly specify which BPF programs are used by
specific benchmark and load only it.

This allows to be more flexible in supporting older kernels, where some
program types might not be possible to load (e.g., those that rely on
newly added kfunc).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20240326162151.3981687-5-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Andrii Nakryiko 2024-03-26 09:21:49 -07:00 committed by Alexei Starovoitov
parent 208c439120
commit b4ccf9158f
2 changed files with 42 additions and 12 deletions

View File

@ -133,8 +133,6 @@ static void trigger_measure(struct bench_res *res)
static void setup_ctx(void)
{
int err;
setup_libbpf();
ctx.skel = trigger_bench__open();
@ -143,7 +141,15 @@ static void setup_ctx(void)
exit(1);
}
/* default "driver" BPF program */
bpf_program__set_autoload(ctx.skel->progs.trigger_driver, true);
ctx.skel->rodata->batch_iters = args.batch_iters;
}
static void load_ctx(void)
{
int err;
err = trigger_bench__load(ctx.skel);
if (err) {
@ -172,6 +178,9 @@ static void trigger_syscall_count_setup(void)
static void trigger_kernel_count_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.trigger_driver, false);
bpf_program__set_autoload(ctx.skel->progs.trigger_count, true);
load_ctx();
/* override driver program */
ctx.driver_prog_fd = bpf_program__fd(ctx.skel->progs.trigger_count);
}
@ -179,36 +188,48 @@ static void trigger_kernel_count_setup(void)
static void trigger_kprobe_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kprobe);
}
static void trigger_kretprobe_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kretprobe);
}
static void trigger_kprobe_multi_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kprobe_multi, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kprobe_multi);
}
static void trigger_kretprobe_multi_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_kretprobe_multi, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_kretprobe_multi);
}
static void trigger_fentry_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fentry, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fentry);
}
static void trigger_fexit_setup(void)
{
setup_ctx();
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_fexit, true);
load_ctx();
attach_bpf(ctx.skel->progs.bench_trigger_fexit);
}
@ -279,15 +300,24 @@ static void usetup(bool use_retprobe, void *target_addr)
{
size_t uprobe_offset;
struct bpf_link *link;
int err;
setup_libbpf();
ctx.skel = trigger_bench__open_and_load();
ctx.skel = trigger_bench__open();
if (!ctx.skel) {
fprintf(stderr, "failed to open skeleton\n");
exit(1);
}
bpf_program__set_autoload(ctx.skel->progs.bench_trigger_uprobe, true);
err = trigger_bench__load(ctx.skel);
if (err) {
fprintf(stderr, "failed to load skeleton\n");
exit(1);
}
uprobe_offset = get_uprobe_offset(target_addr);
link = bpf_program__attach_uprobe(ctx.skel->progs.bench_trigger_uprobe,
use_retprobe,

View File

@ -25,7 +25,7 @@ static __always_inline void inc_counter(void)
__sync_add_and_fetch(&hits[cpu & CPU_MASK].value, 1);
}
SEC("uprobe")
SEC("?uprobe")
int bench_trigger_uprobe(void *ctx)
{
inc_counter();
@ -34,7 +34,7 @@ int bench_trigger_uprobe(void *ctx)
const volatile int batch_iters = 0;
SEC("raw_tp")
SEC("?raw_tp")
int trigger_count(void *ctx)
{
int i;
@ -45,7 +45,7 @@ int trigger_count(void *ctx)
return 0;
}
SEC("raw_tp")
SEC("?raw_tp")
int trigger_driver(void *ctx)
{
int i;
@ -56,42 +56,42 @@ int trigger_driver(void *ctx)
return 0;
}
SEC("kprobe/bpf_get_numa_node_id")
SEC("?kprobe/bpf_get_numa_node_id")
int bench_trigger_kprobe(void *ctx)
{
inc_counter();
return 0;
}
SEC("kretprobe/bpf_get_numa_node_id")
SEC("?kretprobe/bpf_get_numa_node_id")
int bench_trigger_kretprobe(void *ctx)
{
inc_counter();
return 0;
}
SEC("kprobe.multi/bpf_get_numa_node_id")
SEC("?kprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kprobe_multi(void *ctx)
{
inc_counter();
return 0;
}
SEC("kretprobe.multi/bpf_get_numa_node_id")
SEC("?kretprobe.multi/bpf_get_numa_node_id")
int bench_trigger_kretprobe_multi(void *ctx)
{
inc_counter();
return 0;
}
SEC("fentry/bpf_get_numa_node_id")
SEC("?fentry/bpf_get_numa_node_id")
int bench_trigger_fentry(void *ctx)
{
inc_counter();
return 0;
}
SEC("fexit/bpf_get_numa_node_id")
SEC("?fexit/bpf_get_numa_node_id")
int bench_trigger_fexit(void *ctx)
{
inc_counter();