selftests/bpf: Verify calling core kfuncs from BPF_PROG_TYPE_SYCALL

Now that we can call some kfuncs from BPF_PROG_TYPE_SYSCALL progs, let's
add some selftests that verify as much. As a bonus, let's also verify
that we can't call the progs from raw tracepoints. Do do this, we add a
new selftest suite called verifier_kfunc_prog_types.

Signed-off-by: David Vernet <void@manifault.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20240405143041.632519-3-void@manifault.com
This commit is contained in:
David Vernet 2024-04-05 09:30:41 -05:00 committed by Andrii Nakryiko
parent a8e03b6bbb
commit 1bc724af00
4 changed files with 135 additions and 2 deletions

View File

@ -0,0 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include "verifier_kfunc_prog_types.skel.h"
void test_verifier_kfunc_prog_types(void)
{
RUN_TESTS(verifier_kfunc_prog_types);
}

View File

@ -13,7 +13,7 @@ struct __cgrps_kfunc_map_value {
struct cgroup __kptr * cgrp;
};
struct hash_map {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct __cgrps_kfunc_map_value);

View File

@ -13,7 +13,7 @@ struct __tasks_kfunc_map_value {
struct task_struct __kptr * task;
};
struct hash_map {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct __tasks_kfunc_map_value);

View File

@ -0,0 +1,122 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#include "cgrp_kfunc_common.h"
#include "cpumask_common.h"
#include "task_kfunc_common.h"
char _license[] SEC("license") = "GPL";
/***************
* Task kfuncs *
***************/
static void task_kfunc_load_test(void)
{
struct task_struct *current, *ref_1, *ref_2;
current = bpf_get_current_task_btf();
ref_1 = bpf_task_from_pid(current->pid);
if (!ref_1)
return;
ref_2 = bpf_task_acquire(ref_1);
if (ref_2)
bpf_task_release(ref_2);
bpf_task_release(ref_1);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(task_kfunc_raw_tp)
{
task_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(task_kfunc_syscall)
{
task_kfunc_load_test();
return 0;
}
/*****************
* cgroup kfuncs *
*****************/
static void cgrp_kfunc_load_test(void)
{
struct cgroup *cgrp, *ref;
cgrp = bpf_cgroup_from_id(0);
if (!cgrp)
return;
ref = bpf_cgroup_acquire(cgrp);
if (!ref) {
bpf_cgroup_release(cgrp);
return;
}
bpf_cgroup_release(ref);
bpf_cgroup_release(cgrp);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(cgrp_kfunc_raw_tp)
{
cgrp_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(cgrp_kfunc_syscall)
{
cgrp_kfunc_load_test();
return 0;
}
/******************
* cpumask kfuncs *
******************/
static void cpumask_kfunc_load_test(void)
{
struct bpf_cpumask *alloc, *ref;
alloc = bpf_cpumask_create();
if (!alloc)
return;
ref = bpf_cpumask_acquire(alloc);
bpf_cpumask_set_cpu(0, alloc);
bpf_cpumask_test_cpu(0, (const struct cpumask *)ref);
bpf_cpumask_release(ref);
bpf_cpumask_release(alloc);
}
SEC("raw_tp")
__failure __msg("calling kernel function")
int BPF_PROG(cpumask_kfunc_raw_tp)
{
cpumask_kfunc_load_test();
return 0;
}
SEC("syscall")
__success
int BPF_PROG(cpumask_kfunc_syscall)
{
cpumask_kfunc_load_test();
return 0;
}