mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 16:24:13 +08:00
selftests/bpf: Test the inlining of bpf_kptr_xchg()
The test uses bpf_prog_get_info_by_fd() to obtain the xlated instructions of the program first. Since these instructions have already been rewritten by the verifier, the tests then checks whether the rewritten instructions are as expected. And to ensure LLVM generates code exactly as expected, use inline assembly and a naked function. Suggested-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20240105104819.3916743-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
b4b7a4099b
commit
17bda53e43
51
tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
Normal file
51
tools/testing/selftests/bpf/prog_tests/kptr_xchg_inline.c
Normal file
@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
|
||||
#include <test_progs.h>
|
||||
|
||||
#include "linux/filter.h"
|
||||
#include "kptr_xchg_inline.skel.h"
|
||||
|
||||
void test_kptr_xchg_inline(void)
|
||||
{
|
||||
struct kptr_xchg_inline *skel;
|
||||
struct bpf_insn *insn = NULL;
|
||||
struct bpf_insn exp;
|
||||
unsigned int cnt;
|
||||
int err;
|
||||
|
||||
#if !defined(__x86_64__)
|
||||
test__skip();
|
||||
return;
|
||||
#endif
|
||||
|
||||
skel = kptr_xchg_inline__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "open_load"))
|
||||
return;
|
||||
|
||||
err = get_xlated_program(bpf_program__fd(skel->progs.kptr_xchg_inline), &insn, &cnt);
|
||||
if (!ASSERT_OK(err, "prog insn"))
|
||||
goto out;
|
||||
|
||||
/* The original instructions are:
|
||||
* r1 = map[id:xxx][0]+0
|
||||
* r2 = 0
|
||||
* call bpf_kptr_xchg#yyy
|
||||
*
|
||||
* call bpf_kptr_xchg#yyy will be inlined as:
|
||||
* r0 = r2
|
||||
* r0 = atomic64_xchg((u64 *)(r1 +0), r0)
|
||||
*/
|
||||
if (!ASSERT_GT(cnt, 5, "insn cnt"))
|
||||
goto out;
|
||||
|
||||
exp = BPF_MOV64_REG(BPF_REG_0, BPF_REG_2);
|
||||
if (!ASSERT_OK(memcmp(&insn[3], &exp, sizeof(exp)), "mov"))
|
||||
goto out;
|
||||
|
||||
exp = BPF_ATOMIC_OP(BPF_DW, BPF_XCHG, BPF_REG_1, BPF_REG_0, 0);
|
||||
if (!ASSERT_OK(memcmp(&insn[4], &exp, sizeof(exp)), "xchg"))
|
||||
goto out;
|
||||
out:
|
||||
free(insn);
|
||||
kptr_xchg_inline__destroy(skel);
|
||||
}
|
48
tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
Normal file
48
tools/testing/selftests/bpf/progs/kptr_xchg_inline.c
Normal file
@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (C) 2023. Huawei Technologies Co., Ltd */
|
||||
#include <linux/types.h>
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#include "bpf_experimental.h"
|
||||
#include "bpf_misc.h"
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
struct bin_data {
|
||||
char blob[32];
|
||||
};
|
||||
|
||||
#define private(name) SEC(".bss." #name) __hidden __attribute__((aligned(8)))
|
||||
private(kptr) struct bin_data __kptr * ptr;
|
||||
|
||||
SEC("tc")
|
||||
__naked int kptr_xchg_inline(void)
|
||||
{
|
||||
asm volatile (
|
||||
"r1 = %[ptr] ll;"
|
||||
"r2 = 0;"
|
||||
"call %[bpf_kptr_xchg];"
|
||||
"if r0 == 0 goto 1f;"
|
||||
"r1 = r0;"
|
||||
"r2 = 0;"
|
||||
"call %[bpf_obj_drop_impl];"
|
||||
"1:"
|
||||
"r0 = 0;"
|
||||
"exit;"
|
||||
:
|
||||
: __imm_addr(ptr),
|
||||
__imm(bpf_kptr_xchg),
|
||||
__imm(bpf_obj_drop_impl)
|
||||
: __clobber_all
|
||||
);
|
||||
}
|
||||
|
||||
/* BTF FUNC records are not generated for kfuncs referenced
|
||||
* from inline assembly. These records are necessary for
|
||||
* libbpf to link the program. The function below is a hack
|
||||
* to ensure that BTF FUNC records are generated.
|
||||
*/
|
||||
void __btf_root(void)
|
||||
{
|
||||
bpf_obj_drop(NULL);
|
||||
}
|
Loading…
Reference in New Issue
Block a user