mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 00:34:10 +08:00
selftests/bpf: Add test for bpf_get_branch_snapshot
This test uses bpf_get_branch_snapshot from a fexit program. The test uses a target function (bpf_testmod_loop_test) and compares the record against kallsyms. If there isn't enough record matching kallsyms, the test fails. Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20210910183352.3151445-4-songliubraving@fb.com
This commit is contained in:
parent
856c02dbce
commit
025bd7c753
@ -13,6 +13,18 @@
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
|
||||
|
||||
noinline int bpf_testmod_loop_test(int n)
|
||||
{
|
||||
int i, sum = 0;
|
||||
|
||||
/* the primary goal of this test is to test LBR. Create a lot of
|
||||
* branches in the function, so we can catch it easily.
|
||||
*/
|
||||
for (i = 0; i < n; i++)
|
||||
sum += i;
|
||||
return sum;
|
||||
}
|
||||
|
||||
noinline ssize_t
|
||||
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
@ -24,7 +36,11 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
.len = len,
|
||||
};
|
||||
|
||||
trace_bpf_testmod_test_read(current, &ctx);
|
||||
/* This is always true. Use the check to make sure the compiler
|
||||
* doesn't remove bpf_testmod_loop_test.
|
||||
*/
|
||||
if (bpf_testmod_loop_test(101) > 100)
|
||||
trace_bpf_testmod_test_read(current, &ctx);
|
||||
|
||||
return -EIO; /* always fail */
|
||||
}
|
||||
@ -71,4 +87,3 @@ module_exit(bpf_testmod_exit);
|
||||
MODULE_AUTHOR("Andrii Nakryiko");
|
||||
MODULE_DESCRIPTION("BPF selftests module");
|
||||
MODULE_LICENSE("Dual BSD/GPL");
|
||||
|
||||
|
@ -30,7 +30,7 @@ static int duration = 0;
|
||||
.output_len = sizeof(struct core_reloc_module_output), \
|
||||
.prog_sec_name = sec_name, \
|
||||
.raw_tp_name = tp_name, \
|
||||
.trigger = trigger_module_test_read, \
|
||||
.trigger = __trigger_module_test_read, \
|
||||
.needs_testmod = true, \
|
||||
}
|
||||
|
||||
@ -475,19 +475,11 @@ static int setup_type_id_case_failure(struct core_reloc_test_case *test)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trigger_module_test_read(const struct core_reloc_test_case *test)
|
||||
static int __trigger_module_test_read(const struct core_reloc_test_case *test)
|
||||
{
|
||||
struct core_reloc_module_output *exp = (void *)test->output;
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, exp->len); /* request expected number of bytes */
|
||||
close(fd);
|
||||
|
||||
trigger_module_test_read(exp->len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
Normal file
100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
Normal file
@ -0,0 +1,100 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include <test_progs.h>
|
||||
#include "get_branch_snapshot.skel.h"
|
||||
|
||||
static int *pfd_array;
|
||||
static int cpu_cnt;
|
||||
|
||||
static int create_perf_events(void)
|
||||
{
|
||||
struct perf_event_attr attr = {0};
|
||||
int cpu;
|
||||
|
||||
/* create perf event */
|
||||
attr.size = sizeof(attr);
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.config = 0x1b00;
|
||||
attr.sample_type = PERF_SAMPLE_BRANCH_STACK;
|
||||
attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL |
|
||||
PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY;
|
||||
|
||||
cpu_cnt = libbpf_num_possible_cpus();
|
||||
pfd_array = malloc(sizeof(int) * cpu_cnt);
|
||||
if (!pfd_array) {
|
||||
cpu_cnt = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (cpu = 0; cpu < cpu_cnt; cpu++) {
|
||||
pfd_array[cpu] = syscall(__NR_perf_event_open, &attr,
|
||||
-1, cpu, -1, PERF_FLAG_FD_CLOEXEC);
|
||||
if (pfd_array[cpu] < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return cpu == 0;
|
||||
}
|
||||
|
||||
static void close_perf_events(void)
|
||||
{
|
||||
int cpu = 0;
|
||||
int fd;
|
||||
|
||||
while (cpu++ < cpu_cnt) {
|
||||
fd = pfd_array[cpu];
|
||||
if (fd < 0)
|
||||
break;
|
||||
close(fd);
|
||||
}
|
||||
free(pfd_array);
|
||||
}
|
||||
|
||||
void test_get_branch_snapshot(void)
|
||||
{
|
||||
struct get_branch_snapshot *skel = NULL;
|
||||
int err;
|
||||
|
||||
if (create_perf_events()) {
|
||||
test__skip(); /* system doesn't support LBR */
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
skel = get_branch_snapshot__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load"))
|
||||
goto cleanup;
|
||||
|
||||
err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low);
|
||||
if (!ASSERT_OK(err, "kallsyms_find"))
|
||||
goto cleanup;
|
||||
|
||||
err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high);
|
||||
if (!ASSERT_OK(err, "kallsyms_find_next"))
|
||||
goto cleanup;
|
||||
|
||||
err = get_branch_snapshot__attach(skel);
|
||||
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
|
||||
goto cleanup;
|
||||
|
||||
trigger_module_test_read(100);
|
||||
|
||||
if (skel->bss->total_entries < 16) {
|
||||
/* too few entries for the hit/waste test */
|
||||
test__skip();
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr");
|
||||
|
||||
/* Given we stop LBR in software, we will waste a few entries.
|
||||
* But we should try to waste as few as possible entries. We are at
|
||||
* about 7 on x86_64 systems.
|
||||
* Add a check for < 10 so that we get heads-up when something
|
||||
* changes and wastes too many entries.
|
||||
*/
|
||||
ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries");
|
||||
|
||||
cleanup:
|
||||
get_branch_snapshot__destroy(skel);
|
||||
close_perf_events();
|
||||
}
|
@ -6,45 +6,6 @@
|
||||
|
||||
static int duration;
|
||||
|
||||
static int trigger_module_test_read(int read_sz)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, read_sz);
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int trigger_module_test_write(int write_sz)
|
||||
{
|
||||
int fd, err;
|
||||
char *buf = malloc(write_sz);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(buf, 'a', write_sz);
|
||||
buf[write_sz-1] = '\0';
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_WRONLY);
|
||||
err = -errno;
|
||||
if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) {
|
||||
free(buf);
|
||||
return err;
|
||||
}
|
||||
|
||||
write(fd, buf, write_sz);
|
||||
close(fd);
|
||||
free(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int delete_module(const char *name, int flags)
|
||||
{
|
||||
return syscall(__NR_delete_module, name, flags);
|
||||
|
40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
Normal file
40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
Normal file
@ -0,0 +1,40 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
#include <bpf/bpf_tracing.h>
|
||||
|
||||
char _license[] SEC("license") = "GPL";
|
||||
|
||||
__u64 test1_hits = 0;
|
||||
__u64 address_low = 0;
|
||||
__u64 address_high = 0;
|
||||
int wasted_entries = 0;
|
||||
long total_entries = 0;
|
||||
|
||||
#define ENTRY_CNT 32
|
||||
struct perf_branch_entry entries[ENTRY_CNT] = {};
|
||||
|
||||
static inline bool in_range(__u64 val)
|
||||
{
|
||||
return (val >= address_low) && (val < address_high);
|
||||
}
|
||||
|
||||
SEC("fexit/bpf_testmod_loop_test")
|
||||
int BPF_PROG(test1, int n, int ret)
|
||||
{
|
||||
long i;
|
||||
|
||||
total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0);
|
||||
total_entries /= sizeof(struct perf_branch_entry);
|
||||
|
||||
for (i = 0; i < ENTRY_CNT; i++) {
|
||||
if (i >= total_entries)
|
||||
break;
|
||||
if (in_range(entries[i].from) && in_range(entries[i].to))
|
||||
test1_hits++;
|
||||
else if (!test1_hits)
|
||||
wasted_entries++;
|
||||
}
|
||||
return 0;
|
||||
}
|
@ -743,6 +743,45 @@ int cd_flavor_subdir(const char *exec_name)
|
||||
return chdir(flavor);
|
||||
}
|
||||
|
||||
int trigger_module_test_read(int read_sz)
|
||||
{
|
||||
int fd, err;
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_RDONLY);
|
||||
err = -errno;
|
||||
if (!ASSERT_GE(fd, 0, "testmod_file_open"))
|
||||
return err;
|
||||
|
||||
read(fd, NULL, read_sz);
|
||||
close(fd);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int trigger_module_test_write(int write_sz)
|
||||
{
|
||||
int fd, err;
|
||||
char *buf = malloc(write_sz);
|
||||
|
||||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(buf, 'a', write_sz);
|
||||
buf[write_sz-1] = '\0';
|
||||
|
||||
fd = open("/sys/kernel/bpf_testmod", O_WRONLY);
|
||||
err = -errno;
|
||||
if (!ASSERT_GE(fd, 0, "testmod_file_open")) {
|
||||
free(buf);
|
||||
return err;
|
||||
}
|
||||
|
||||
write(fd, buf, write_sz);
|
||||
close(fd);
|
||||
free(buf);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MAX_BACKTRACE_SZ 128
|
||||
void crash_handler(int signum)
|
||||
{
|
||||
|
@ -291,6 +291,8 @@ int compare_map_keys(int map1_fd, int map2_fd);
|
||||
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
|
||||
int extract_build_id(char *build_id, size_t size);
|
||||
int kern_sync_rcu(void);
|
||||
int trigger_module_test_read(int read_sz);
|
||||
int trigger_module_test_write(int write_sz);
|
||||
|
||||
#ifdef __x86_64__
|
||||
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <ctype.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
@ -117,6 +118,42 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
/* find the address of the next symbol of the same type, this can be used
|
||||
* to determine the end of a function.
|
||||
*/
|
||||
int kallsyms_find_next(const char *sym, unsigned long long *addr)
|
||||
{
|
||||
char type, found_type, name[500];
|
||||
unsigned long long value;
|
||||
bool found = false;
|
||||
int err = 0;
|
||||
FILE *f;
|
||||
|
||||
f = fopen("/proc/kallsyms", "r");
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
|
||||
/* Different types of symbols in kernel modules are mixed
|
||||
* in /proc/kallsyms. Only return the next matching type.
|
||||
* Use tolower() for type so that 'T' matches 't'.
|
||||
*/
|
||||
if (found && found_type == tolower(type)) {
|
||||
*addr = value;
|
||||
goto out;
|
||||
}
|
||||
if (strcmp(name, sym) == 0) {
|
||||
found = true;
|
||||
found_type = tolower(type);
|
||||
}
|
||||
}
|
||||
err = -ENOENT;
|
||||
|
||||
out:
|
||||
fclose(f);
|
||||
return err;
|
||||
}
|
||||
|
||||
void read_trace_pipe(void)
|
||||
{
|
||||
int trace_fd;
|
||||
|
@ -16,6 +16,11 @@ long ksym_get_addr(const char *name);
|
||||
/* open kallsyms and find addresses on the fly, faster than load + search. */
|
||||
int kallsyms_find(const char *sym, unsigned long long *addr);
|
||||
|
||||
/* find the address of the next symbol, this can be used to determine the
|
||||
* end of a function
|
||||
*/
|
||||
int kallsyms_find_next(const char *sym, unsigned long long *addr);
|
||||
|
||||
void read_trace_pipe(void);
|
||||
|
||||
ssize_t get_uprobe_offset(const void *addr, ssize_t base);
|
||||
|
Loading…
Reference in New Issue
Block a user