2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-22 20:23:57 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2020-08-08

The following pull-request contains BPF updates for your *net* tree.

We've added 11 non-merge commits during the last 2 day(s) which contain
a total of 24 files changed, 216 insertions(+), 135 deletions(-).

The main changes are:

1) Fix UAPI for BPF map iterator before it gets frozen to allow for more
   extensions/customization in future, from Yonghong Song.

2) Fix selftests build to undo verbose build output, from Andrii Nakryiko.

3) Fix inlining compilation error on bpf_do_trace_printk() due to variable
   argument lists, from Stanislav Fomichev.

4) Fix an uninitialized pointer warning at btf__parse_raw() in libbpf,
   from Daniel T. Lee.

5) Fix several compilation warnings in selftests with regards to ignoring
   return value, from Jianlin Lv.

6) Fix interruptions by switching off timeout for BPF tests, from Jiri Benc.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-07 17:33:08 -07:00
commit 64cae2fb48
24 changed files with 216 additions and 135 deletions

View File

@ -246,17 +246,6 @@ program is loaded the kernel will print warning message, so
this helper is only useful for experiments and prototypes.
Tracing BPF programs are root only.
Q: bpf_trace_printk() helper warning
------------------------------------
Q: When bpf_trace_printk() helper is used the kernel prints nasty
warning message. Why is that?
A: This is done to nudge program authors into better interfaces when
programs need to pass data to user space. Like bpf_perf_event_output()
can be used to efficiently stream data via perf ring buffer.
BPF maps can be used for asynchronous data sharing between kernel
and user space. bpf_trace_printk() should only be used for debugging.
Q: New functionality via kernel modules?
----------------------------------------
Q: Can BPF functionality such as new program or map types, new

View File

@ -1214,15 +1214,17 @@ struct bpf_iter_aux_info {
struct bpf_map *map;
};
typedef int (*bpf_iter_check_target_t)(struct bpf_prog *prog,
struct bpf_iter_aux_info *aux);
typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux);
typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux);
#define BPF_ITER_CTX_ARG_MAX 2
struct bpf_iter_reg {
const char *target;
bpf_iter_check_target_t check_target;
bpf_iter_attach_target_t attach_target;
bpf_iter_detach_target_t detach_target;
u32 ctx_arg_info_size;
enum bpf_iter_link_info req_linfo;
struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX];
const struct bpf_iter_seq_info *seq_info;
};

View File

@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
__u32 attach_type; /* program attach type */
};
union bpf_iter_link_info {
struct {
__u32 map_fd;
} map;
};
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@ -249,13 +255,6 @@ enum bpf_link_type {
MAX_BPF_LINK_TYPE,
};
enum bpf_iter_link_info {
BPF_ITER_LINK_UNSPEC = 0,
BPF_ITER_LINK_MAP_FD = 1,
MAX_BPF_ITER_LINK_INFO,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
*
* NONE(default): No further bpf programs allowed in the subtree.
@ -623,6 +622,8 @@ union bpf_attr {
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */

View File

@ -338,8 +338,8 @@ static void bpf_iter_link_release(struct bpf_link *link)
struct bpf_iter_link *iter_link =
container_of(link, struct bpf_iter_link, link);
if (iter_link->aux.map)
bpf_map_put_with_uref(iter_link->aux.map);
if (iter_link->tinfo->reg_info->detach_target)
iter_link->tinfo->reg_info->detach_target(&iter_link->aux);
}
static void bpf_iter_link_dealloc(struct bpf_link *link)
@ -390,15 +390,35 @@ bool bpf_link_is_iter(struct bpf_link *link)
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
union bpf_iter_link_info __user *ulinfo;
struct bpf_link_primer link_primer;
struct bpf_iter_target_info *tinfo;
struct bpf_iter_aux_info aux = {};
union bpf_iter_link_info linfo;
struct bpf_iter_link *link;
u32 prog_btf_id, target_fd;
u32 prog_btf_id, linfo_len;
bool existed = false;
struct bpf_map *map;
int err;
if (attr->link_create.target_fd || attr->link_create.flags)
return -EINVAL;
memset(&linfo, 0, sizeof(union bpf_iter_link_info));
ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
linfo_len = attr->link_create.iter_info_len;
if (!ulinfo ^ !linfo_len)
return -EINVAL;
if (ulinfo) {
err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
linfo_len);
if (err)
return err;
linfo_len = min_t(u32, linfo_len, sizeof(linfo));
if (copy_from_user(&linfo, ulinfo, linfo_len))
return -EFAULT;
}
prog_btf_id = prog->aux->attach_btf_id;
mutex_lock(&targets_mutex);
list_for_each_entry(tinfo, &targets, list) {
@ -411,13 +431,6 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
if (!existed)
return -ENOENT;
/* Make sure user supplied flags are target expected. */
target_fd = attr->link_create.target_fd;
if (attr->link_create.flags != tinfo->reg_info->req_linfo)
return -EINVAL;
if (!attr->link_create.flags && target_fd)
return -EINVAL;
link = kzalloc(sizeof(*link), GFP_USER | __GFP_NOWARN);
if (!link)
return -ENOMEM;
@ -431,28 +444,15 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
return err;
}
if (tinfo->reg_info->req_linfo == BPF_ITER_LINK_MAP_FD) {
map = bpf_map_get_with_uref(target_fd);
if (IS_ERR(map)) {
err = PTR_ERR(map);
goto cleanup_link;
}
aux.map = map;
err = tinfo->reg_info->check_target(prog, &aux);
if (tinfo->reg_info->attach_target) {
err = tinfo->reg_info->attach_target(prog, &linfo, &link->aux);
if (err) {
bpf_map_put_with_uref(map);
goto cleanup_link;
bpf_link_cleanup(&link_primer);
return err;
}
link->aux.map = map;
}
return bpf_link_settle(&link_primer);
cleanup_link:
bpf_link_cleanup(&link_primer);
return err;
}
static void init_seq_meta(struct bpf_iter_priv_data *priv_data,

View File

@ -1966,7 +1966,7 @@ void bpf_prog_array_delete_safe(struct bpf_prog_array *array,
* @index: the index of the program to replace
*
* Skips over dummy programs, by not counting them, when calculating
* the the position of the program to replace.
* the position of the program to replace.
*
* Return:
* * 0 - Success

View File

@ -98,12 +98,21 @@ static struct bpf_iter_reg bpf_map_reg_info = {
.seq_info = &bpf_map_seq_info,
};
static int bpf_iter_check_map(struct bpf_prog *prog,
struct bpf_iter_aux_info *aux)
static int bpf_iter_attach_map(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux)
{
u32 key_acc_size, value_acc_size, key_size, value_size;
struct bpf_map *map = aux->map;
struct bpf_map *map;
bool is_percpu = false;
int err = -EINVAL;
if (!linfo->map.map_fd)
return -EBADF;
map = bpf_map_get_with_uref(linfo->map.map_fd);
if (IS_ERR(map))
return PTR_ERR(map);
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
@ -112,7 +121,7 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
else if (map->map_type != BPF_MAP_TYPE_HASH &&
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
map->map_type != BPF_MAP_TYPE_ARRAY)
return -EINVAL;
goto put_map;
key_acc_size = prog->aux->max_rdonly_access;
value_acc_size = prog->aux->max_rdwr_access;
@ -122,10 +131,22 @@ static int bpf_iter_check_map(struct bpf_prog *prog,
else
value_size = round_up(map->value_size, 8) * num_possible_cpus();
if (key_acc_size > key_size || value_acc_size > value_size)
return -EACCES;
if (key_acc_size > key_size || value_acc_size > value_size) {
err = -EACCES;
goto put_map;
}
aux->map = map;
return 0;
put_map:
bpf_map_put_with_uref(map);
return err;
}
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
{
bpf_map_put_with_uref(aux->map);
}
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
@ -133,8 +154,8 @@ DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
static const struct bpf_iter_reg bpf_map_elem_reg_info = {
.target = "bpf_map_elem",
.check_target = bpf_iter_check_map,
.req_linfo = BPF_ITER_LINK_MAP_FD,
.attach_target = bpf_iter_attach_map,
.detach_target = bpf_iter_detach_map,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_map_elem, key),

View File

@ -3883,7 +3883,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
return -EINVAL;
}
#define BPF_LINK_CREATE_LAST_FIELD link_create.flags
#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
static int link_create(union bpf_attr *attr)
{
enum bpf_prog_type ptype;

View File

@ -8294,7 +8294,7 @@ static bool stacksafe(struct bpf_func_state *old,
if (old->stack[spi].slot_type[i % BPF_REG_SIZE] !=
cur->stack[spi].slot_type[i % BPF_REG_SIZE])
/* Ex: old explored (safe) state has STACK_SPILL in
* this stack slot, but current has has STACK_MISC ->
* this stack slot, but current has STACK_MISC ->
* this verifier states are not equivalent,
* return false to continue verification of this path
*/

View File

@ -383,7 +383,7 @@ static DEFINE_RAW_SPINLOCK(trace_printk_lock);
#define BPF_TRACE_PRINTK_SIZE 1024
static inline __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
static __printf(1, 0) int bpf_do_trace_printk(const char *fmt, ...)
{
static char buf[BPF_TRACE_PRINTK_SIZE];
unsigned long flags;

View File

@ -1384,18 +1384,39 @@ static int bpf_iter_init_sk_storage_map(void *priv_data,
return 0;
}
static int bpf_iter_check_map(struct bpf_prog *prog,
struct bpf_iter_aux_info *aux)
static int bpf_iter_attach_map(struct bpf_prog *prog,
union bpf_iter_link_info *linfo,
struct bpf_iter_aux_info *aux)
{
struct bpf_map *map = aux->map;
struct bpf_map *map;
int err = -EINVAL;
if (!linfo->map.map_fd)
return -EBADF;
map = bpf_map_get_with_uref(linfo->map.map_fd);
if (IS_ERR(map))
return PTR_ERR(map);
if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
return -EINVAL;
goto put_map;
if (prog->aux->max_rdonly_access > map->value_size)
return -EACCES;
if (prog->aux->max_rdonly_access > map->value_size) {
err = -EACCES;
goto put_map;
}
aux->map = map;
return 0;
put_map:
bpf_map_put_with_uref(map);
return err;
}
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
{
bpf_map_put_with_uref(aux->map);
}
static const struct seq_operations bpf_sk_storage_map_seq_ops = {
@ -1414,8 +1435,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
.target = "bpf_sk_storage_map",
.check_target = bpf_iter_check_map,
.req_linfo = BPF_ITER_LINK_MAP_FD,
.attach_target = bpf_iter_attach_map,
.detach_target = bpf_iter_detach_map,
.ctx_arg_info_size = 2,
.ctx_arg_info = {
{ offsetof(struct bpf_iter__bpf_sk_storage_map, sk),

View File

@ -11,6 +11,7 @@
static int do_pin(int argc, char **argv)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, iter_opts);
union bpf_iter_link_info linfo;
const char *objfile, *path;
struct bpf_program *prog;
struct bpf_object *obj;
@ -36,6 +37,11 @@ static int do_pin(int argc, char **argv)
map_fd = map_parse_fd(&argc, &argv);
if (map_fd < 0)
return -1;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
iter_opts.link_info = &linfo;
iter_opts.link_info_len = sizeof(linfo);
}
}
@ -57,9 +63,6 @@ static int do_pin(int argc, char **argv)
goto close_obj;
}
if (map_fd >= 0)
iter_opts.map_fd = map_fd;
link = bpf_program__attach_iter(prog, &iter_opts);
if (IS_ERR(link)) {
err = PTR_ERR(link);

View File

@ -566,6 +566,7 @@ static int sets_patch(struct object *obj)
next = rb_next(next);
}
return 0;
}
static int symbols_patch(struct object *obj)

View File

@ -81,6 +81,12 @@ struct bpf_cgroup_storage_key {
__u32 attach_type; /* program attach type */
};
union bpf_iter_link_info {
struct {
__u32 map_fd;
} map;
};
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@ -249,13 +255,6 @@ enum bpf_link_type {
MAX_BPF_LINK_TYPE,
};
enum bpf_iter_link_info {
BPF_ITER_LINK_UNSPEC = 0,
BPF_ITER_LINK_MAP_FD = 1,
MAX_BPF_ITER_LINK_INFO,
};
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
*
* NONE(default): No further bpf programs allowed in the subtree.
@ -623,6 +622,8 @@ union bpf_attr {
};
__u32 attach_type; /* attach type */
__u32 flags; /* extra flags */
__aligned_u64 iter_info; /* extra bpf_iter_link_info */
__u32 iter_info_len; /* iter_info length */
} link_create;
struct { /* struct used by BPF_LINK_UPDATE command */

View File

@ -599,6 +599,9 @@ int bpf_link_create(int prog_fd, int target_fd,
attr.link_create.target_fd = target_fd;
attr.link_create.attach_type = attach_type;
attr.link_create.flags = OPTS_GET(opts, flags, 0);
attr.link_create.iter_info =
ptr_to_u64(OPTS_GET(opts, iter_info, (void *)0));
attr.link_create.iter_info_len = OPTS_GET(opts, iter_info_len, 0);
return sys_bpf(BPF_LINK_CREATE, &attr, sizeof(attr));
}

View File

@ -168,11 +168,14 @@ LIBBPF_API int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);
LIBBPF_API int bpf_prog_detach2(int prog_fd, int attachable_fd,
enum bpf_attach_type type);
union bpf_iter_link_info; /* defined in up-to-date linux/bpf.h */
struct bpf_link_create_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 flags;
union bpf_iter_link_info *iter_info;
__u32 iter_info_len;
};
#define bpf_link_create_opts__last_field flags
#define bpf_link_create_opts__last_field iter_info_len
LIBBPF_API int bpf_link_create(int prog_fd, int target_fd,
enum bpf_attach_type attach_type,

View File

@ -564,8 +564,8 @@ done:
struct btf *btf__parse_raw(const char *path)
{
struct btf *btf = NULL;
void *data = NULL;
struct btf *btf;
FILE *f = NULL;
__u16 magic;
int err = 0;

View File

@ -8306,10 +8306,8 @@ bpf_program__attach_iter(struct bpf_program *prog,
if (!OPTS_VALID(opts, bpf_iter_attach_opts))
return ERR_PTR(-EINVAL);
if (OPTS_HAS(opts, map_fd)) {
target_fd = opts->map_fd;
link_create_opts.flags = BPF_ITER_LINK_MAP_FD;
}
link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
prog_fd = bpf_program__fd(prog);
if (prog_fd < 0) {

View File

@ -267,9 +267,10 @@ LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map);
struct bpf_iter_attach_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
__u32 map_fd;
union bpf_iter_link_info *link_info;
__u32 link_info_len;
};
#define bpf_iter_attach_opts__last_field map_fd
#define bpf_iter_attach_opts__last_field link_info_len
LIBBPF_API struct bpf_link *
bpf_program__attach_iter(struct bpf_program *prog,

View File

@ -102,7 +102,7 @@ endif
OVERRIDE_TARGETS := 1
override define CLEAN
$(call msg,CLEAN)
$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
$(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
endef
include ../lib.mk
@ -123,17 +123,21 @@ $(notdir $(TEST_GEN_PROGS) \
$(TEST_GEN_PROGS_EXTENDED) \
$(TEST_CUSTOM_PROGS)): %: $(OUTPUT)/% ;
$(OUTPUT)/%.o: %.c
$(call msg,CC,,$@)
$(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
$(OUTPUT)/%:%.c
$(call msg,BINARY,,$@)
$(LINK.c) $^ $(LDLIBS) -o $@
$(Q)$(LINK.c) $^ $(LDLIBS) -o $@
$(OUTPUT)/urandom_read: urandom_read.c
$(call msg,BINARY,,$@)
$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
$(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id
$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
$(call msg,CC,,$@)
$(CC) -c $(CFLAGS) -o $@ $<
$(Q)$(CC) -c $(CFLAGS) -o $@ $<
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
$(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
@ -142,7 +146,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \
/boot/vmlinux-$(shell uname -r)
VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
$(OUTPUT)/runqslower: $(BPFOBJ)
DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL)
$(Q)$(MAKE) $(submake_extras) -C $(TOOLSDIR)/bpf/runqslower \
OUTPUT=$(SCRATCH_DIR)/ VMLINUX_BTF=$(VMLINUX_BTF) \
BPFOBJ=$(BPFOBJ) BPF_INCLUDE=$(INCLUDE_DIR) && \
@ -164,7 +170,6 @@ $(OUTPUT)/test_netcnt: cgroup_helpers.c
$(OUTPUT)/test_sock_fields: cgroup_helpers.c
$(OUTPUT)/test_sysctl: cgroup_helpers.c
DEFAULT_BPFTOOL := $(SCRATCH_DIR)/sbin/bpftool
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
$(BPFOBJ) | $(BUILD_DIR)/bpftool
@ -180,15 +185,15 @@ $(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(BUILD_DIR)/libbpf $(BUILD_DIR)/bpftool $(BUILD_DIR)/resolve_btfids $(INCLUDE_DIR):
$(call msg,MKDIR,,$@)
mkdir -p $@
$(Q)mkdir -p $@
$(INCLUDE_DIR)/vmlinux.h: $(VMLINUX_BTF) | $(BPFTOOL) $(INCLUDE_DIR)
ifeq ($(VMLINUX_H),)
$(call msg,GEN,,$@)
$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
$(Q)$(BPFTOOL) btf dump file $(VMLINUX_BTF) format c > $@
else
$(call msg,CP,,$@)
cp "$(VMLINUX_H)" $@
$(Q)cp "$(VMLINUX_H)" $@
endif
$(RESOLVE_BTFIDS): $(BPFOBJ) | $(BUILD_DIR)/resolve_btfids \
@ -237,28 +242,28 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
# $4 - LDFLAGS
define CLANG_BPF_BUILD_RULE
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
$(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -mattr=dwarfris -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
$(call msg,CLNG-LLC,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -target bpf -emit-llvm \
$(Q)($(CLANG) $3 -O2 -target bpf -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=v2 $4 -filetype=obj -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but using native Clang and bpf LLC
define CLANG_NATIVE_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
($(CLANG) $3 -O2 -emit-llvm \
$(Q)($(CLANG) $3 -O2 -emit-llvm \
-c $1 -o - || echo "BPF obj compilation failed") | \
$(LLC) -march=bpf -mcpu=v3 $4 -filetype=obj -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
$(call msg,GCC-BPF,$(TRUNNER_BINARY),$2)
$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
$(Q)$(BPF_GCC) $3 $4 -O2 -c $1 -o $2
endef
SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
@ -300,7 +305,7 @@ ifeq ($($(TRUNNER_OUTPUT)-dir),)
$(TRUNNER_OUTPUT)-dir := y
$(TRUNNER_OUTPUT):
$$(call msg,MKDIR,,$$@)
mkdir -p $$@
$(Q)mkdir -p $$@
endif
# ensure we set up BPF objects generation rule just once for a given
@ -320,7 +325,7 @@ $(TRUNNER_BPF_SKELS): $(TRUNNER_OUTPUT)/%.skel.h: \
$(TRUNNER_OUTPUT)/%.o \
| $(BPFTOOL) $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$$(BPFTOOL) gen skeleton $$< > $$@
$(Q)$$(BPFTOOL) gen skeleton $$< > $$@
endif
# ensure we set up tests.h header generation rule just once
@ -344,7 +349,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_BPF_SKELS) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
$(Q)cd $$(@D) && $$(CC) -I. $$(CFLAGS) -c $(CURDIR)/$$< $$(LDLIBS) -o $$(@F)
$(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
%.c \
@ -352,13 +357,13 @@ $(TRUNNER_EXTRA_OBJS): $(TRUNNER_OUTPUT)/%.o: \
$(TRUNNER_TESTS_HDR) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,EXT-OBJ,$(TRUNNER_BINARY),$$@)
$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
$(Q)$$(CC) $$(CFLAGS) -c $$< $$(LDLIBS) -o $$@
# only copy extra resources if in flavored build
$(TRUNNER_BINARY)-extras: $(TRUNNER_EXTRA_FILES) | $(TRUNNER_OUTPUT)
ifneq ($2,)
$$(call msg,EXT-COPY,$(TRUNNER_BINARY),$(TRUNNER_EXTRA_FILES))
cp -a $$^ $(TRUNNER_OUTPUT)/
$(Q)cp -a $$^ $(TRUNNER_OUTPUT)/
endif
$(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
@ -366,8 +371,8 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$(RESOLVE_BTFIDS) \
| $(TRUNNER_BINARY)-extras
$$(call msg,BINARY,,$$@)
$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --no-fail --btf btf_data.o $$@
endef
@ -420,17 +425,17 @@ verifier/tests.h: verifier/*.c
) > verifier/tests.h)
$(OUTPUT)/test_verifier: test_verifier.c verifier/tests.h $(BPFOBJ) | $(OUTPUT)
$(call msg,BINARY,,$@)
$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
$(Q)$(CC) $(CFLAGS) $(filter %.a %.o %.c,$^) $(LDLIBS) -o $@
# Make sure we are able to include and link libbpf against c++.
$(OUTPUT)/test_cpp: test_cpp.cpp $(OUTPUT)/test_core_extern.skel.h $(BPFOBJ)
$(call msg,CXX,,$@)
$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
$(Q)$(CXX) $(CFLAGS) $^ $(LDLIBS) -o $@
# Benchmark runner
$(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h
$(call msg,CC,,$@)
$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
$(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@
$(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h
$(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h
$(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \
@ -443,7 +448,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
$(OUTPUT)/bench_trigger.o \
$(OUTPUT)/bench_ringbufs.o
$(call msg,BINARY,,$@)
$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
$(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \

View File

@ -468,6 +468,7 @@ static void test_bpf_hash_map(void)
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_hash_map *skel;
int err, i, len, map_fd, iter_fd;
union bpf_iter_link_info linfo;
__u64 val, expected_val = 0;
struct bpf_link *link;
struct key_t {
@ -490,13 +491,16 @@ static void test_bpf_hash_map(void)
goto out;
/* iterator with hashmap2 and hashmap3 should fail */
opts.map_fd = bpf_map__fd(skel->maps.hashmap2);
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter",
"attach_iter for hashmap2 unexpected succeeded\n"))
goto out;
opts.map_fd = bpf_map__fd(skel->maps.hashmap3);
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter",
"attach_iter for hashmap3 unexpected succeeded\n"))
@ -519,7 +523,7 @@ static void test_bpf_hash_map(void)
goto out;
}
opts.map_fd = map_fd;
linfo.map.map_fd = map_fd;
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
@ -562,6 +566,7 @@ static void test_bpf_percpu_hash_map(void)
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_hash_map *skel;
int err, i, j, len, map_fd, iter_fd;
union bpf_iter_link_info linfo;
__u32 expected_val = 0;
struct bpf_link *link;
struct key_t {
@ -606,7 +611,10 @@ static void test_bpf_percpu_hash_map(void)
goto out;
}
opts.map_fd = map_fd;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
@ -649,6 +657,7 @@ static void test_bpf_array_map(void)
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
__u32 expected_key = 0, res_first_key;
struct bpf_iter_bpf_array_map *skel;
union bpf_iter_link_info linfo;
int err, i, map_fd, iter_fd;
struct bpf_link *link;
char buf[64] = {};
@ -673,7 +682,10 @@ static void test_bpf_array_map(void)
goto out;
}
opts.map_fd = map_fd;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
@ -730,6 +742,7 @@ static void test_bpf_percpu_array_map(void)
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_bpf_percpu_array_map *skel;
__u32 expected_key = 0, expected_val = 0;
union bpf_iter_link_info linfo;
int err, i, j, map_fd, iter_fd;
struct bpf_link *link;
char buf[64];
@ -765,7 +778,10 @@ static void test_bpf_percpu_array_map(void)
goto out;
}
opts.map_fd = map_fd;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
@ -803,6 +819,7 @@ static void test_bpf_sk_storage_map(void)
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
int err, i, len, map_fd, iter_fd, num_sockets;
struct bpf_iter_bpf_sk_storage_map *skel;
union bpf_iter_link_info linfo;
int sock_fd[3] = {-1, -1, -1};
__u32 val, expected_val = 0;
struct bpf_link *link;
@ -829,7 +846,10 @@ static void test_bpf_sk_storage_map(void)
goto out;
}
opts.map_fd = map_fd;
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = map_fd;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_sk_storage_map, &opts);
if (CHECK(IS_ERR(link), "attach_iter", "attach_iter failed\n"))
goto out;
@ -871,6 +891,7 @@ static void test_rdonly_buf_out_of_bound(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
struct bpf_iter_test_kern5 *skel;
union bpf_iter_link_info linfo;
struct bpf_link *link;
skel = bpf_iter_test_kern5__open_and_load();
@ -878,7 +899,10 @@ static void test_rdonly_buf_out_of_bound(void)
"skeleton open_and_load failed\n"))
return;
opts.map_fd = bpf_map__fd(skel->maps.hashmap1);
memset(&linfo, 0, sizeof(linfo));
linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);
link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
if (CHECK(!IS_ERR(link), "attach_iter", "unexpected success\n"))
bpf_link__destroy(link);

View File

@ -48,21 +48,19 @@ static void test_send_signal_common(struct perf_event_attr *attr,
close(pipe_p2c[1]); /* close write */
/* notify parent signal handler is installed */
write(pipe_c2p[1], buf, 1);
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
/* make sure parent enabled bpf program to send_signal */
read(pipe_p2c[0], buf, 1);
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
/* wait a little for signal handler */
sleep(1);
if (sigusr1_received)
write(pipe_c2p[1], "2", 1);
else
write(pipe_c2p[1], "0", 1);
buf[0] = sigusr1_received ? '2' : '0';
CHECK(write(pipe_c2p[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
/* wait for parent notification and exit */
read(pipe_p2c[0], buf, 1);
CHECK(read(pipe_p2c[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
close(pipe_c2p[1]);
close(pipe_p2c[0]);
@ -99,7 +97,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
}
/* wait until child signal handler installed */
read(pipe_c2p[0], buf, 1);
CHECK(read(pipe_c2p[0], buf, 1) != 1, "pipe_read", "err %d\n", -errno);
/* trigger the bpf send_signal */
skel->bss->pid = pid;
@ -107,7 +105,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
skel->bss->signal_thread = signal_thread;
/* notify child that bpf program can send_signal now */
write(pipe_p2c[1], buf, 1);
CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
/* wait for result */
err = read(pipe_c2p[0], buf, 1);
@ -121,7 +119,7 @@ static void test_send_signal_common(struct perf_event_attr *attr,
CHECK(buf[0] != '2', test_name, "incorrect result\n");
/* notify child safe to exit */
write(pipe_p2c[1], buf, 1);
CHECK(write(pipe_p2c[1], buf, 1) != 1, "pipe_write", "err %d\n", -errno);
disable_pmu:
close(pmu_fd);

View File

@ -6,11 +6,13 @@ static __u64 read_perf_max_sample_freq(void)
{
__u64 sample_freq = 5000; /* fallback to 5000 on error */
FILE *f;
__u32 duration = 0;
f = fopen("/proc/sys/kernel/perf_event_max_sample_rate", "r");
if (f == NULL)
return sample_freq;
fscanf(f, "%llu", &sample_freq);
CHECK(fscanf(f, "%llu", &sample_freq) != 1, "Get max sample rate",
"return default value: 5000,err %d\n", -errno);
fclose(f);
return sample_freq;
}

View File

@ -0,0 +1 @@
timeout=0

View File

@ -124,17 +124,24 @@ int main(int argc, char **argv)
sprintf(test_script,
"iptables -A INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
if (system(test_script)) {
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
goto err;
}
sprintf(test_script,
"nc 127.0.0.1 %d < /etc/passwd > /dev/null 2>&1 ",
TESTPORT);
system(test_script);
if (system(test_script))
printf("execute command: %s, err %d\n", test_script, -errno);
sprintf(test_script,
"iptables -D INPUT -p tcp --dport %d -j DROP",
TESTPORT);
system(test_script);
if (system(test_script)) {
printf("FAILED: execute command: %s, err %d\n", test_script, -errno);
goto err;
}
rv = bpf_map_lookup_elem(bpf_map__fd(global_map), &key, &g);
if (rv != 0) {