selftests/bpf: Add bpf_arena_list test.

bpf_arena_alloc.h - implements page_frag allocator as a bpf program.
bpf_arena_list.h - doubly linked link list as a bpf program.

Compiled as a bpf program and as native C code.

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20240308010812.89848-14-alexei.starovoitov@gmail.com
This commit is contained in:
Alexei Starovoitov 2024-03-07 17:08:11 -08:00 committed by Andrii Nakryiko
parent 80a4129fcf
commit 9f2c156f90
4 changed files with 314 additions and 0 deletions

View File

@ -0,0 +1,67 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#pragma once
#include "bpf_arena_common.h"
#ifndef __round_mask
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
#endif
#ifndef round_up
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
#endif
#ifdef __BPF__
#define NR_CPUS (sizeof(struct cpumask) * 8)
static void __arena * __arena page_frag_cur_page[NR_CPUS];
static int __arena page_frag_cur_offset[NR_CPUS];
/* Simple page_frag allocator */
static inline void __arena* bpf_alloc(unsigned int size)
{
__u64 __arena *obj_cnt;
__u32 cpu = bpf_get_smp_processor_id();
void __arena *page = page_frag_cur_page[cpu];
int __arena *cur_offset = &page_frag_cur_offset[cpu];
int offset;
size = round_up(size, 8);
if (size >= PAGE_SIZE - 8)
return NULL;
if (!page) {
refill:
page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0);
if (!page)
return NULL;
cast_kern(page);
page_frag_cur_page[cpu] = page;
*cur_offset = PAGE_SIZE - 8;
obj_cnt = page + PAGE_SIZE - 8;
*obj_cnt = 0;
} else {
cast_kern(page);
obj_cnt = page + PAGE_SIZE - 8;
}
offset = *cur_offset - size;
if (offset < 0)
goto refill;
(*obj_cnt)++;
*cur_offset = offset;
return page + offset;
}
static inline void bpf_free(void __arena *addr)
{
__u64 __arena *obj_cnt;
addr = (void __arena *)(((long)addr) & ~(PAGE_SIZE - 1));
obj_cnt = addr + PAGE_SIZE - 8;
if (--(*obj_cnt) == 0)
bpf_arena_free_pages(&arena, addr, 1);
}
#else
static inline void __arena* bpf_alloc(unsigned int size) { return NULL; }
static inline void bpf_free(void __arena *addr) {}
#endif

View File

@ -0,0 +1,92 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#pragma once
#include "bpf_arena_common.h"
struct arena_list_node;
typedef struct arena_list_node __arena arena_list_node_t;
struct arena_list_node {
arena_list_node_t *next;
arena_list_node_t * __arena *pprev;
};
struct arena_list_head {
struct arena_list_node __arena *first;
};
typedef struct arena_list_head __arena arena_list_head_t;
#define list_entry(ptr, type, member) arena_container_of(ptr, type, member)
#define list_entry_safe(ptr, type, member) \
({ typeof(*ptr) * ___ptr = (ptr); \
___ptr ? ({ cast_kern(___ptr); list_entry(___ptr, type, member); }) : NULL; \
})
#ifndef __BPF__
static inline void *bpf_iter_num_new(struct bpf_iter_num *it, int i, int j) { return NULL; }
static inline void bpf_iter_num_destroy(struct bpf_iter_num *it) {}
static inline bool bpf_iter_num_next(struct bpf_iter_num *it) { return true; }
#define cond_break ({})
#endif
/* Safely walk link list elements. Deletion of elements is allowed. */
#define list_for_each_entry(pos, head, member) \
for (void * ___tmp = (pos = list_entry_safe((head)->first, \
typeof(*(pos)), member), \
(void *)0); \
pos && ({ ___tmp = (void *)pos->member.next; 1; }); \
cond_break, \
pos = list_entry_safe((void __arena *)___tmp, typeof(*(pos)), member))
static inline void list_add_head(arena_list_node_t *n, arena_list_head_t *h)
{
arena_list_node_t *first = h->first, * __arena *tmp;
cast_user(first);
cast_kern(n);
WRITE_ONCE(n->next, first);
cast_kern(first);
if (first) {
tmp = &n->next;
cast_user(tmp);
WRITE_ONCE(first->pprev, tmp);
}
cast_user(n);
WRITE_ONCE(h->first, n);
tmp = &h->first;
cast_user(tmp);
cast_kern(n);
WRITE_ONCE(n->pprev, tmp);
}
static inline void __list_del(arena_list_node_t *n)
{
arena_list_node_t *next = n->next, *tmp;
arena_list_node_t * __arena *pprev = n->pprev;
cast_user(next);
cast_kern(pprev);
tmp = *pprev;
cast_kern(tmp);
WRITE_ONCE(tmp, next);
if (next) {
cast_user(pprev);
cast_kern(next);
WRITE_ONCE(next->pprev, pprev);
}
}
#define POISON_POINTER_DELTA 0
#define LIST_POISON1 ((void __arena *) 0x100 + POISON_POINTER_DELTA)
#define LIST_POISON2 ((void __arena *) 0x122 + POISON_POINTER_DELTA)
static inline void list_del(arena_list_node_t *n)
{
__list_del(n);
n->next = LIST_POISON1;
n->pprev = LIST_POISON2;
}

View File

@ -0,0 +1,68 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <sys/mman.h>
#include <network_helpers.h>
#define PAGE_SIZE 4096
#include "bpf_arena_list.h"
#include "arena_list.skel.h"
struct elem {
struct arena_list_node node;
__u64 value;
};
static int list_sum(struct arena_list_head *head)
{
struct elem __arena *n;
int sum = 0;
list_for_each_entry(n, head, node)
sum += n->value;
return sum;
}
static void test_arena_list_add_del(int cnt)
{
LIBBPF_OPTS(bpf_test_run_opts, opts);
struct arena_list *skel;
int expected_sum = (u64)cnt * (cnt - 1) / 2;
int ret, sum;
skel = arena_list__open_and_load();
if (!ASSERT_OK_PTR(skel, "arena_list__open_and_load"))
return;
skel->bss->cnt = cnt;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_add), &opts);
ASSERT_OK(ret, "ret_add");
ASSERT_OK(opts.retval, "retval");
if (skel->bss->skip) {
printf("%s:SKIP:compiler doesn't support arena_cast\n", __func__);
test__skip();
goto out;
}
sum = list_sum(skel->bss->list_head);
ASSERT_EQ(sum, expected_sum, "sum of elems");
ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
ASSERT_EQ(skel->arena->test_val, cnt + 1, "num of elems");
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.arena_list_del), &opts);
ASSERT_OK(ret, "ret_del");
sum = list_sum(skel->bss->list_head);
ASSERT_EQ(sum, 0, "sum of list elems after del");
ASSERT_EQ(skel->bss->list_sum, expected_sum, "sum of list elems computed by prog");
ASSERT_EQ(skel->arena->arena_sum, expected_sum, "__arena sum of elems");
out:
arena_list__destroy(skel);
}
void test_arena_list(void)
{
if (test__start_subtest("arena_list_1"))
test_arena_list_add_del(1);
if (test__start_subtest("arena_list_1000"))
test_arena_list_add_del(1000);
}

View File

@ -0,0 +1,87 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
#include <vmlinux.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
#include "bpf_experimental.h"
struct {
__uint(type, BPF_MAP_TYPE_ARENA);
__uint(map_flags, BPF_F_MMAPABLE);
__uint(max_entries, 100); /* number of pages */
#ifdef __TARGET_ARCH_arm64
__ulong(map_extra, 0x1ull << 32); /* start of mmap() region */
#else
__ulong(map_extra, 0x1ull << 44); /* start of mmap() region */
#endif
} arena SEC(".maps");
#include "bpf_arena_alloc.h"
#include "bpf_arena_list.h"
struct elem {
struct arena_list_node node;
__u64 value;
};
struct arena_list_head __arena *list_head;
int list_sum;
int cnt;
bool skip = false;
#ifdef __BPF_FEATURE_ARENA_CAST
long __arena arena_sum;
int __arena test_val = 1;
struct arena_list_head __arena global_head;
#else
long arena_sum SEC(".arena.1");
int test_val SEC(".arena.1");
#endif
int zero;
SEC("syscall")
int arena_list_add(void *ctx)
{
#ifdef __BPF_FEATURE_ARENA_CAST
__u64 i;
list_head = &global_head;
for (i = zero; i < cnt; cond_break, i++) {
struct elem __arena *n = bpf_alloc(sizeof(*n));
test_val++;
n->value = i;
arena_sum += i;
list_add_head(&n->node, list_head);
}
#else
skip = true;
#endif
return 0;
}
SEC("syscall")
int arena_list_del(void *ctx)
{
#ifdef __BPF_FEATURE_ARENA_CAST
struct elem __arena *n;
int sum = 0;
arena_sum = 0;
list_for_each_entry(n, list_head, node) {
sum += n->value;
arena_sum += n->value;
list_del(&n->node);
bpf_free(n);
}
list_sum = sum;
#else
skip = true;
#endif
return 0;
}
char _license[] SEC("license") = "GPL";