bpf/selftests: coverage for bpf_map_ops errors

These tests expose the issue of being unable to properly check for errors
returned from inlined bpf map helpers that make calls to the bpf_map_ops
functions. At best, a check for zero or non-zero can be done but these
tests show it is not possible to check for a negative value or for a
specific error value.

Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Tested-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230322194754.185781-2-inwardvessel@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
JP Kobryn 2023-03-22 12:47:53 -07:00 committed by Alexei Starovoitov
parent d9d93f3b61
commit 830154cdc5
2 changed files with 300 additions and 0 deletions

View File

@ -0,0 +1,162 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include <errno.h>
#include <sys/syscall.h>
#include <unistd.h>
#include "test_map_ops.skel.h"
#include "test_progs.h"
static void map_update(void)
{
(void)syscall(__NR_getpid);
}
static void map_delete(void)
{
(void)syscall(__NR_getppid);
}
static void map_push(void)
{
(void)syscall(__NR_getuid);
}
static void map_pop(void)
{
(void)syscall(__NR_geteuid);
}
static void map_peek(void)
{
(void)syscall(__NR_getgid);
}
static void map_for_each_pass(void)
{
(void)syscall(__NR_gettid);
}
static void map_for_each_fail(void)
{
(void)syscall(__NR_getpgid);
}
static int setup(struct test_map_ops **skel)
{
int err = 0;
if (!skel)
return -1;
*skel = test_map_ops__open();
if (!ASSERT_OK_PTR(*skel, "test_map_ops__open"))
return -1;
(*skel)->rodata->pid = getpid();
err = test_map_ops__load(*skel);
if (!ASSERT_OK(err, "test_map_ops__load"))
return err;
err = test_map_ops__attach(*skel);
if (!ASSERT_OK(err, "test_map_ops__attach"))
return err;
return err;
}
static void teardown(struct test_map_ops **skel)
{
if (skel && *skel)
test_map_ops__destroy(*skel);
}
static void map_ops_update_delete_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_update();
ASSERT_OK(skel->bss->err, "map_update_initial");
map_update();
ASSERT_LT(skel->bss->err, 0, "map_update_existing");
ASSERT_EQ(skel->bss->err, -EEXIST, "map_update_existing");
map_delete();
ASSERT_OK(skel->bss->err, "map_delete_existing");
map_delete();
ASSERT_LT(skel->bss->err, 0, "map_delete_non_existing");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_delete_non_existing");
teardown:
teardown(&skel);
}
static void map_ops_push_peek_pop_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_push();
ASSERT_OK(skel->bss->err, "map_push_initial");
map_push();
ASSERT_LT(skel->bss->err, 0, "map_push_when_full");
ASSERT_EQ(skel->bss->err, -E2BIG, "map_push_when_full");
map_peek();
ASSERT_OK(skel->bss->err, "map_peek");
map_pop();
ASSERT_OK(skel->bss->err, "map_pop");
map_peek();
ASSERT_LT(skel->bss->err, 0, "map_peek_when_empty");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_peek_when_empty");
map_pop();
ASSERT_LT(skel->bss->err, 0, "map_pop_when_empty");
ASSERT_EQ(skel->bss->err, -ENOENT, "map_pop_when_empty");
teardown:
teardown(&skel);
}
static void map_ops_for_each_subtest(void)
{
struct test_map_ops *skel;
if (setup(&skel))
goto teardown;
map_for_each_pass();
/* expect to iterate over 1 element */
ASSERT_EQ(skel->bss->err, 1, "map_for_each_no_flags");
map_for_each_fail();
ASSERT_LT(skel->bss->err, 0, "map_for_each_with_flags");
ASSERT_EQ(skel->bss->err, -EINVAL, "map_for_each_with_flags");
teardown:
teardown(&skel);
}
void test_map_ops(void)
{
if (test__start_subtest("map_ops_update_delete"))
map_ops_update_delete_subtest();
if (test__start_subtest("map_ops_push_peek_pop"))
map_ops_push_peek_pop_subtest();
if (test__start_subtest("map_ops_for_each"))
map_ops_for_each_subtest();
}

View File

@ -0,0 +1,138 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
char _license[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} hash_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_STACK);
__uint(max_entries, 1);
__type(value, int);
} stack_map SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, int);
} array_map SEC(".maps");
const volatile pid_t pid;
long err = 0;
static u64 callback(u64 map, u64 key, u64 val, u64 ctx, u64 flags)
{
return 0;
}
SEC("tp/syscalls/sys_enter_getpid")
int map_update(void *ctx)
{
const int key = 0;
const int val = 1;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_update_elem(&hash_map, &key, &val, BPF_NOEXIST);
return 0;
}
SEC("tp/syscalls/sys_enter_getppid")
int map_delete(void *ctx)
{
const int key = 0;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_delete_elem(&hash_map, &key);
return 0;
}
SEC("tp/syscalls/sys_enter_getuid")
int map_push(void *ctx)
{
const int val = 1;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_push_elem(&stack_map, &val, 0);
return 0;
}
SEC("tp/syscalls/sys_enter_geteuid")
int map_pop(void *ctx)
{
int val;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_pop_elem(&stack_map, &val);
return 0;
}
SEC("tp/syscalls/sys_enter_getgid")
int map_peek(void *ctx)
{
int val;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
err = bpf_map_peek_elem(&stack_map, &val);
return 0;
}
SEC("tp/syscalls/sys_enter_gettid")
int map_for_each_pass(void *ctx)
{
const int key = 0;
const int val = 1;
const u64 flags = 0;
int callback_ctx;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
bpf_map_update_elem(&array_map, &key, &val, flags);
err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
return 0;
}
SEC("tp/syscalls/sys_enter_getpgid")
int map_for_each_fail(void *ctx)
{
const int key = 0;
const int val = 1;
const u64 flags = BPF_NOEXIST;
int callback_ctx;
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
bpf_map_update_elem(&array_map, &key, &val, flags);
/* calling for_each with non-zero flags will return error */
err = bpf_for_each_map_elem(&array_map, callback, &callback_ctx, flags);
return 0;
}