mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-22 05:44:31 +08:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2020-04-15 The following pull-request contains BPF updates for your *net* tree. We've added 10 non-merge commits during the last 3 day(s) which contain a total of 11 files changed, 238 insertions(+), 95 deletions(-). The main changes are: 1) Fix offset overflow for BPF_MEM BPF_DW insn mapping on arm32 JIT, from Luke Nelson and Xi Wang. 2) Prevent mprotect() to make frozen & mmap()'ed BPF map writeable again, from Andrii Nakryiko and Jann Horn. 3) Fix type of old_fd in bpf_xdp_set_link_opts to int in libbpf and add selftests, from Toke Høiland-Jørgensen. 4) Fix AF_XDP to check that headroom cannot be larger than the available space in the chunk, from Magnus Karlsson. 5) Fix reset of XDP prog when expected_fd is set, from David Ahern. 6) Fix a segfault in bpftool's struct_ops command when BTF is not available, from Daniel T. Lee. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
78b877113f
@ -1000,21 +1000,35 @@ static inline void emit_a32_mul_r64(const s8 dst[], const s8 src[],
|
||||
arm_bpf_put_reg32(dst_hi, rd[0], ctx);
|
||||
}
|
||||
|
||||
static bool is_ldst_imm(s16 off, const u8 size)
|
||||
{
|
||||
s16 off_max = 0;
|
||||
|
||||
switch (size) {
|
||||
case BPF_B:
|
||||
case BPF_W:
|
||||
off_max = 0xfff;
|
||||
break;
|
||||
case BPF_H:
|
||||
off_max = 0xff;
|
||||
break;
|
||||
case BPF_DW:
|
||||
/* Need to make sure off+4 does not overflow. */
|
||||
off_max = 0xfff - 4;
|
||||
break;
|
||||
}
|
||||
return -off_max <= off && off <= off_max;
|
||||
}
|
||||
|
||||
/* *(size *)(dst + off) = src */
|
||||
static inline void emit_str_r(const s8 dst, const s8 src[],
|
||||
s32 off, struct jit_ctx *ctx, const u8 sz){
|
||||
s16 off, struct jit_ctx *ctx, const u8 sz){
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
s32 off_max;
|
||||
s8 rd;
|
||||
|
||||
rd = arm_bpf_get_reg32(dst, tmp[1], ctx);
|
||||
|
||||
if (sz == BPF_H)
|
||||
off_max = 0xff;
|
||||
else
|
||||
off_max = 0xfff;
|
||||
|
||||
if (off < 0 || off > off_max) {
|
||||
if (!is_ldst_imm(off, sz)) {
|
||||
emit_a32_mov_i(tmp[0], off, ctx);
|
||||
emit(ARM_ADD_R(tmp[0], tmp[0], rd), ctx);
|
||||
rd = tmp[0];
|
||||
@ -1043,18 +1057,12 @@ static inline void emit_str_r(const s8 dst, const s8 src[],
|
||||
|
||||
/* dst = *(size*)(src + off) */
|
||||
static inline void emit_ldx_r(const s8 dst[], const s8 src,
|
||||
s32 off, struct jit_ctx *ctx, const u8 sz){
|
||||
s16 off, struct jit_ctx *ctx, const u8 sz){
|
||||
const s8 *tmp = bpf2a32[TMP_REG_1];
|
||||
const s8 *rd = is_stacked(dst_lo) ? tmp : dst;
|
||||
s8 rm = src;
|
||||
s32 off_max;
|
||||
|
||||
if (sz == BPF_H)
|
||||
off_max = 0xff;
|
||||
else
|
||||
off_max = 0xfff;
|
||||
|
||||
if (off < 0 || off > off_max) {
|
||||
if (!is_ldst_imm(off, sz)) {
|
||||
emit_a32_mov_i(tmp[0], off, ctx);
|
||||
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
|
||||
rm = tmp[0];
|
||||
|
@ -586,9 +586,7 @@ static void bpf_map_mmap_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
bpf_map_inc_with_uref(map);
|
||||
|
||||
if (vma->vm_flags & VM_WRITE) {
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt++;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
@ -600,13 +598,11 @@ static void bpf_map_mmap_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct bpf_map *map = vma->vm_file->private_data;
|
||||
|
||||
if (vma->vm_flags & VM_WRITE) {
|
||||
if (vma->vm_flags & VM_MAYWRITE) {
|
||||
mutex_lock(&map->freeze_mutex);
|
||||
map->writecnt--;
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
}
|
||||
|
||||
bpf_map_put_with_uref(map);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct bpf_map_default_vmops = {
|
||||
@ -635,14 +631,16 @@ static int bpf_map_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
/* set default open/close callbacks */
|
||||
vma->vm_ops = &bpf_map_default_vmops;
|
||||
vma->vm_private_data = map;
|
||||
vma->vm_flags &= ~VM_MAYEXEC;
|
||||
if (!(vma->vm_flags & VM_WRITE))
|
||||
/* disallow re-mapping with PROT_WRITE */
|
||||
vma->vm_flags &= ~VM_MAYWRITE;
|
||||
|
||||
err = map->ops->map_mmap(map, vma);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
bpf_map_inc_with_uref(map);
|
||||
|
||||
if (vma->vm_flags & VM_WRITE)
|
||||
if (vma->vm_flags & VM_MAYWRITE)
|
||||
map->writecnt++;
|
||||
out:
|
||||
mutex_unlock(&map->freeze_mutex);
|
||||
|
@ -1255,8 +1255,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env,
|
||||
reg->type = SCALAR_VALUE;
|
||||
reg->var_off = tnum_unknown;
|
||||
reg->frameno = 0;
|
||||
reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
|
||||
true : false;
|
||||
reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks;
|
||||
__mark_reg_unbounded(reg);
|
||||
}
|
||||
|
||||
|
@ -8667,8 +8667,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
enum bpf_netdev_command query;
|
||||
u32 prog_id, expected_id = 0;
|
||||
struct bpf_prog *prog = NULL;
|
||||
bpf_op_t bpf_op, bpf_chk;
|
||||
struct bpf_prog *prog;
|
||||
bool offload;
|
||||
int err;
|
||||
|
||||
@ -8734,6 +8734,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
|
||||
} else {
|
||||
if (!prog_id)
|
||||
return 0;
|
||||
prog = NULL;
|
||||
}
|
||||
|
||||
err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
|
||||
|
@ -343,7 +343,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||
u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
|
||||
unsigned int chunks, chunks_per_page;
|
||||
u64 addr = mr->addr, size = mr->len;
|
||||
int size_chk, err;
|
||||
int err;
|
||||
|
||||
if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
|
||||
/* Strictly speaking we could support this, if:
|
||||
@ -382,8 +382,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
|
||||
if (size_chk < 0)
|
||||
if (headroom >= chunk_size - XDP_PACKET_HEADROOM)
|
||||
return -EINVAL;
|
||||
|
||||
umem->address = (unsigned long)addr;
|
||||
|
@ -591,6 +591,8 @@ int do_struct_ops(int argc, char **argv)
|
||||
|
||||
err = cmd_select(cmds, argc, argv, do_help);
|
||||
|
||||
btf__free(btf_vmlinux);
|
||||
if (!IS_ERR(btf_vmlinux))
|
||||
btf__free(btf_vmlinux);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -178,6 +178,8 @@ struct bpf_capabilities {
|
||||
__u32 array_mmap:1;
|
||||
/* BTF_FUNC_GLOBAL is supported */
|
||||
__u32 btf_func_global:1;
|
||||
/* kernel support for expected_attach_type in BPF_PROG_LOAD */
|
||||
__u32 exp_attach_type:1;
|
||||
};
|
||||
|
||||
enum reloc_type {
|
||||
@ -194,6 +196,22 @@ struct reloc_desc {
|
||||
int sym_off;
|
||||
};
|
||||
|
||||
struct bpf_sec_def;
|
||||
|
||||
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
|
||||
struct bpf_sec_def {
|
||||
const char *sec;
|
||||
size_t len;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
bool is_exp_attach_type_optional;
|
||||
bool is_attachable;
|
||||
bool is_attach_btf;
|
||||
attach_fn_t attach_fn;
|
||||
};
|
||||
|
||||
/*
|
||||
* bpf_prog should be a better name but it has been used in
|
||||
* linux/filter.h.
|
||||
@ -204,6 +222,7 @@ struct bpf_program {
|
||||
char *name;
|
||||
int prog_ifindex;
|
||||
char *section_name;
|
||||
const struct bpf_sec_def *sec_def;
|
||||
/* section_name with / replaced by _; makes recursive pinning
|
||||
* in bpf_object__pin_programs easier
|
||||
*/
|
||||
@ -3315,6 +3334,37 @@ static int bpf_object__probe_array_mmap(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__probe_exp_attach_type(struct bpf_object *obj)
|
||||
{
|
||||
struct bpf_load_program_attr attr;
|
||||
struct bpf_insn insns[] = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
};
|
||||
int fd;
|
||||
|
||||
memset(&attr, 0, sizeof(attr));
|
||||
/* use any valid combination of program type and (optional)
|
||||
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
|
||||
* to see if kernel supports expected_attach_type field for
|
||||
* BPF_PROG_LOAD command
|
||||
*/
|
||||
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
|
||||
attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
|
||||
attr.insns = insns;
|
||||
attr.insns_cnt = ARRAY_SIZE(insns);
|
||||
attr.license = "GPL";
|
||||
|
||||
fd = bpf_load_program_xattr(&attr, NULL, 0);
|
||||
if (fd >= 0) {
|
||||
obj->caps.exp_attach_type = 1;
|
||||
close(fd);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
bpf_object__probe_caps(struct bpf_object *obj)
|
||||
{
|
||||
@ -3325,6 +3375,7 @@ bpf_object__probe_caps(struct bpf_object *obj)
|
||||
bpf_object__probe_btf_func_global,
|
||||
bpf_object__probe_btf_datasec,
|
||||
bpf_object__probe_array_mmap,
|
||||
bpf_object__probe_exp_attach_type,
|
||||
};
|
||||
int i, ret;
|
||||
|
||||
@ -4861,7 +4912,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
||||
|
||||
memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
|
||||
load_attr.prog_type = prog->type;
|
||||
load_attr.expected_attach_type = prog->expected_attach_type;
|
||||
/* old kernels might not support specifying expected_attach_type */
|
||||
if (!prog->caps->exp_attach_type && prog->sec_def &&
|
||||
prog->sec_def->is_exp_attach_type_optional)
|
||||
load_attr.expected_attach_type = 0;
|
||||
else
|
||||
load_attr.expected_attach_type = prog->expected_attach_type;
|
||||
if (prog->caps->name)
|
||||
load_attr.name = prog->name;
|
||||
load_attr.insns = insns;
|
||||
@ -5062,6 +5118,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_sec_def *find_sec_def(const char *sec_name);
|
||||
|
||||
static struct bpf_object *
|
||||
__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
const struct bpf_object_open_opts *opts)
|
||||
@ -5117,24 +5175,17 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
|
||||
bpf_object__elf_finish(obj);
|
||||
|
||||
bpf_object__for_each_program(prog, obj) {
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type attach_type;
|
||||
|
||||
if (prog->type != BPF_PROG_TYPE_UNSPEC)
|
||||
continue;
|
||||
|
||||
err = libbpf_prog_type_by_name(prog->section_name, &prog_type,
|
||||
&attach_type);
|
||||
if (err == -ESRCH)
|
||||
prog->sec_def = find_sec_def(prog->section_name);
|
||||
if (!prog->sec_def)
|
||||
/* couldn't guess, but user might manually specify */
|
||||
continue;
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
bpf_program__set_type(prog, prog_type);
|
||||
bpf_program__set_expected_attach_type(prog, attach_type);
|
||||
if (prog_type == BPF_PROG_TYPE_TRACING ||
|
||||
prog_type == BPF_PROG_TYPE_EXT)
|
||||
bpf_program__set_type(prog, prog->sec_def->prog_type);
|
||||
bpf_program__set_expected_attach_type(prog,
|
||||
prog->sec_def->expected_attach_type);
|
||||
|
||||
if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
|
||||
prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
|
||||
prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
|
||||
}
|
||||
|
||||
@ -6223,23 +6274,32 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
prog->expected_attach_type = type;
|
||||
}
|
||||
|
||||
#define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, btf, atype) \
|
||||
{ string, sizeof(string) - 1, ptype, eatype, is_attachable, btf, atype }
|
||||
#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
|
||||
attachable, attach_btf) \
|
||||
{ \
|
||||
.sec = string, \
|
||||
.len = sizeof(string) - 1, \
|
||||
.prog_type = ptype, \
|
||||
.expected_attach_type = eatype, \
|
||||
.is_exp_attach_type_optional = eatype_optional, \
|
||||
.is_attachable = attachable, \
|
||||
.is_attach_btf = attach_btf, \
|
||||
}
|
||||
|
||||
/* Programs that can NOT be attached. */
|
||||
#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
|
||||
|
||||
/* Programs that can be attached. */
|
||||
#define BPF_APROG_SEC(string, ptype, atype) \
|
||||
BPF_PROG_SEC_IMPL(string, ptype, 0, 1, 0, atype)
|
||||
BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
|
||||
|
||||
/* Programs that must specify expected attach type at load time. */
|
||||
#define BPF_EAPROG_SEC(string, ptype, eatype) \
|
||||
BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype)
|
||||
BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
|
||||
|
||||
/* Programs that use BTF to identify attach point */
|
||||
#define BPF_PROG_BTF(string, ptype, eatype) \
|
||||
BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0)
|
||||
BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
|
||||
|
||||
/* Programs that can be attached but attach type can't be identified by section
|
||||
* name. Kept for backward compatibility.
|
||||
@ -6253,11 +6313,6 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
|
||||
__VA_ARGS__ \
|
||||
}
|
||||
|
||||
struct bpf_sec_def;
|
||||
|
||||
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
|
||||
static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
|
||||
@ -6269,17 +6324,6 @@ static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
|
||||
static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
|
||||
struct bpf_program *prog);
|
||||
|
||||
struct bpf_sec_def {
|
||||
const char *sec;
|
||||
size_t len;
|
||||
enum bpf_prog_type prog_type;
|
||||
enum bpf_attach_type expected_attach_type;
|
||||
bool is_attachable;
|
||||
bool is_attach_btf;
|
||||
enum bpf_attach_type attach_type;
|
||||
attach_fn_t attach_fn;
|
||||
};
|
||||
|
||||
static const struct bpf_sec_def section_defs[] = {
|
||||
BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
|
||||
BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
|
||||
@ -6713,7 +6757,7 @@ int libbpf_attach_type_by_name(const char *name,
|
||||
continue;
|
||||
if (!section_defs[i].is_attachable)
|
||||
return -EINVAL;
|
||||
*attach_type = section_defs[i].attach_type;
|
||||
*attach_type = section_defs[i].expected_attach_type;
|
||||
return 0;
|
||||
}
|
||||
pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
|
||||
@ -7542,7 +7586,6 @@ static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
|
||||
struct bpf_link *
|
||||
bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
|
||||
{
|
||||
const struct bpf_sec_def *sec_def;
|
||||
enum bpf_attach_type attach_type;
|
||||
char errmsg[STRERR_BUFSIZE];
|
||||
struct bpf_link *link;
|
||||
@ -7561,11 +7604,6 @@ bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
|
||||
link->detach = &bpf_link__detach_fd;
|
||||
|
||||
attach_type = bpf_program__get_expected_attach_type(prog);
|
||||
if (!attach_type) {
|
||||
sec_def = find_sec_def(bpf_program__title(prog, false));
|
||||
if (sec_def)
|
||||
attach_type = sec_def->attach_type;
|
||||
}
|
||||
link_fd = bpf_link_create(prog_fd, cgroup_fd, attach_type, NULL);
|
||||
if (link_fd < 0) {
|
||||
link_fd = -errno;
|
||||
|
@ -458,7 +458,7 @@ struct xdp_link_info {
|
||||
|
||||
struct bpf_xdp_set_link_opts {
|
||||
size_t sz;
|
||||
__u32 old_fd;
|
||||
int old_fd;
|
||||
};
|
||||
#define bpf_xdp_set_link_opts__last_field old_fd
|
||||
|
||||
|
@ -19,15 +19,16 @@ void test_mmap(void)
|
||||
const size_t map_sz = roundup_page(sizeof(struct map_data));
|
||||
const int zero = 0, one = 1, two = 2, far = 1500;
|
||||
const long page_size = sysconf(_SC_PAGE_SIZE);
|
||||
int err, duration = 0, i, data_map_fd;
|
||||
int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd;
|
||||
struct bpf_map *data_map, *bss_map;
|
||||
void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2;
|
||||
struct test_mmap__bss *bss_data;
|
||||
struct bpf_map_info map_info;
|
||||
__u32 map_info_sz = sizeof(map_info);
|
||||
struct map_data *map_data;
|
||||
struct test_mmap *skel;
|
||||
__u64 val = 0;
|
||||
|
||||
|
||||
skel = test_mmap__open_and_load();
|
||||
if (CHECK(!skel, "skel_open_and_load", "skeleton open/load failed\n"))
|
||||
return;
|
||||
@ -36,6 +37,14 @@ void test_mmap(void)
|
||||
data_map = skel->maps.data_map;
|
||||
data_map_fd = bpf_map__fd(data_map);
|
||||
|
||||
/* get map's ID */
|
||||
memset(&map_info, 0, map_info_sz);
|
||||
err = bpf_obj_get_info_by_fd(data_map_fd, &map_info, &map_info_sz);
|
||||
if (CHECK(err, "map_get_info", "failed %d\n", errno))
|
||||
goto cleanup;
|
||||
data_map_id = map_info.id;
|
||||
|
||||
/* mmap BSS map */
|
||||
bss_mmaped = mmap(NULL, bss_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
|
||||
bpf_map__fd(bss_map), 0);
|
||||
if (CHECK(bss_mmaped == MAP_FAILED, "bss_mmap",
|
||||
@ -98,6 +107,10 @@ void test_mmap(void)
|
||||
"data_map freeze succeeded: err=%d, errno=%d\n", err, errno))
|
||||
goto cleanup;
|
||||
|
||||
err = mprotect(map_mmaped, map_sz, PROT_READ);
|
||||
if (CHECK(err, "mprotect_ro", "mprotect to r/o failed %d\n", errno))
|
||||
goto cleanup;
|
||||
|
||||
/* unmap R/W mapping */
|
||||
err = munmap(map_mmaped, map_sz);
|
||||
map_mmaped = NULL;
|
||||
@ -111,6 +124,12 @@ void test_mmap(void)
|
||||
map_mmaped = NULL;
|
||||
goto cleanup;
|
||||
}
|
||||
err = mprotect(map_mmaped, map_sz, PROT_WRITE);
|
||||
if (CHECK(!err, "mprotect_wr", "mprotect() succeeded unexpectedly!\n"))
|
||||
goto cleanup;
|
||||
err = mprotect(map_mmaped, map_sz, PROT_EXEC);
|
||||
if (CHECK(!err, "mprotect_ex", "mprotect() succeeded unexpectedly!\n"))
|
||||
goto cleanup;
|
||||
map_data = map_mmaped;
|
||||
|
||||
/* map/unmap in a loop to test ref counting */
|
||||
@ -197,6 +216,45 @@ void test_mmap(void)
|
||||
CHECK_FAIL(map_data->val[far] != 3 * 321);
|
||||
|
||||
munmap(tmp2, 4 * page_size);
|
||||
|
||||
tmp1 = mmap(NULL, map_sz, PROT_READ, MAP_SHARED, data_map_fd, 0);
|
||||
if (CHECK(tmp1 == MAP_FAILED, "last_mmap", "failed %d\n", errno))
|
||||
goto cleanup;
|
||||
|
||||
test_mmap__destroy(skel);
|
||||
skel = NULL;
|
||||
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
|
||||
bss_mmaped = NULL;
|
||||
CHECK_FAIL(munmap(map_mmaped, map_sz));
|
||||
map_mmaped = NULL;
|
||||
|
||||
/* map should be still held by active mmap */
|
||||
tmp_fd = bpf_map_get_fd_by_id(data_map_id);
|
||||
if (CHECK(tmp_fd < 0, "get_map_by_id", "failed %d\n", errno)) {
|
||||
munmap(tmp1, map_sz);
|
||||
goto cleanup;
|
||||
}
|
||||
close(tmp_fd);
|
||||
|
||||
/* this should release data map finally */
|
||||
munmap(tmp1, map_sz);
|
||||
|
||||
/* we need to wait for RCU grace period */
|
||||
for (i = 0; i < 10000; i++) {
|
||||
__u32 id = data_map_id - 1;
|
||||
if (bpf_map_get_next_id(id, &id) || id > data_map_id)
|
||||
break;
|
||||
usleep(1);
|
||||
}
|
||||
|
||||
/* should fail to get map FD by non-existing ID */
|
||||
tmp_fd = bpf_map_get_fd_by_id(data_map_id);
|
||||
if (CHECK(tmp_fd >= 0, "get_map_by_id_after",
|
||||
"unexpectedly succeeded %d\n", tmp_fd)) {
|
||||
close(tmp_fd);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
cleanup:
|
||||
if (bss_mmaped)
|
||||
CHECK_FAIL(munmap(bss_mmaped, bss_sz));
|
||||
|
@ -43,18 +43,18 @@ static struct sec_name_test tests[] = {
|
||||
{"lwt_seg6local", {0, BPF_PROG_TYPE_LWT_SEG6LOCAL, 0}, {-EINVAL, 0} },
|
||||
{
|
||||
"cgroup_skb/ingress",
|
||||
{0, BPF_PROG_TYPE_CGROUP_SKB, 0},
|
||||
{0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_INGRESS},
|
||||
{0, BPF_CGROUP_INET_INGRESS},
|
||||
},
|
||||
{
|
||||
"cgroup_skb/egress",
|
||||
{0, BPF_PROG_TYPE_CGROUP_SKB, 0},
|
||||
{0, BPF_PROG_TYPE_CGROUP_SKB, BPF_CGROUP_INET_EGRESS},
|
||||
{0, BPF_CGROUP_INET_EGRESS},
|
||||
},
|
||||
{"cgroup/skb", {0, BPF_PROG_TYPE_CGROUP_SKB, 0}, {-EINVAL, 0} },
|
||||
{
|
||||
"cgroup/sock",
|
||||
{0, BPF_PROG_TYPE_CGROUP_SOCK, 0},
|
||||
{0, BPF_PROG_TYPE_CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE},
|
||||
{0, BPF_CGROUP_INET_SOCK_CREATE},
|
||||
},
|
||||
{
|
||||
@ -69,26 +69,38 @@ static struct sec_name_test tests[] = {
|
||||
},
|
||||
{
|
||||
"cgroup/dev",
|
||||
{0, BPF_PROG_TYPE_CGROUP_DEVICE, 0},
|
||||
{0, BPF_PROG_TYPE_CGROUP_DEVICE, BPF_CGROUP_DEVICE},
|
||||
{0, BPF_CGROUP_DEVICE},
|
||||
},
|
||||
{"sockops", {0, BPF_PROG_TYPE_SOCK_OPS, 0}, {0, BPF_CGROUP_SOCK_OPS} },
|
||||
{
|
||||
"sockops",
|
||||
{0, BPF_PROG_TYPE_SOCK_OPS, BPF_CGROUP_SOCK_OPS},
|
||||
{0, BPF_CGROUP_SOCK_OPS},
|
||||
},
|
||||
{
|
||||
"sk_skb/stream_parser",
|
||||
{0, BPF_PROG_TYPE_SK_SKB, 0},
|
||||
{0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_PARSER},
|
||||
{0, BPF_SK_SKB_STREAM_PARSER},
|
||||
},
|
||||
{
|
||||
"sk_skb/stream_verdict",
|
||||
{0, BPF_PROG_TYPE_SK_SKB, 0},
|
||||
{0, BPF_PROG_TYPE_SK_SKB, BPF_SK_SKB_STREAM_VERDICT},
|
||||
{0, BPF_SK_SKB_STREAM_VERDICT},
|
||||
},
|
||||
{"sk_skb", {0, BPF_PROG_TYPE_SK_SKB, 0}, {-EINVAL, 0} },
|
||||
{"sk_msg", {0, BPF_PROG_TYPE_SK_MSG, 0}, {0, BPF_SK_MSG_VERDICT} },
|
||||
{"lirc_mode2", {0, BPF_PROG_TYPE_LIRC_MODE2, 0}, {0, BPF_LIRC_MODE2} },
|
||||
{
|
||||
"sk_msg",
|
||||
{0, BPF_PROG_TYPE_SK_MSG, BPF_SK_MSG_VERDICT},
|
||||
{0, BPF_SK_MSG_VERDICT},
|
||||
},
|
||||
{
|
||||
"lirc_mode2",
|
||||
{0, BPF_PROG_TYPE_LIRC_MODE2, BPF_LIRC_MODE2},
|
||||
{0, BPF_LIRC_MODE2},
|
||||
},
|
||||
{
|
||||
"flow_dissector",
|
||||
{0, BPF_PROG_TYPE_FLOW_DISSECTOR, 0},
|
||||
{0, BPF_PROG_TYPE_FLOW_DISSECTOR, BPF_FLOW_DISSECTOR},
|
||||
{0, BPF_FLOW_DISSECTOR},
|
||||
},
|
||||
{
|
||||
@ -158,17 +170,17 @@ static void test_prog_type_by_name(const struct sec_name_test *test)
|
||||
&expected_attach_type);
|
||||
|
||||
CHECK(rc != test->expected_load.rc, "check_code",
|
||||
"prog: unexpected rc=%d for %s", rc, test->sec_name);
|
||||
"prog: unexpected rc=%d for %s\n", rc, test->sec_name);
|
||||
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
CHECK(prog_type != test->expected_load.prog_type, "check_prog_type",
|
||||
"prog: unexpected prog_type=%d for %s",
|
||||
"prog: unexpected prog_type=%d for %s\n",
|
||||
prog_type, test->sec_name);
|
||||
|
||||
CHECK(expected_attach_type != test->expected_load.expected_attach_type,
|
||||
"check_attach_type", "prog: unexpected expected_attach_type=%d for %s",
|
||||
"check_attach_type", "prog: unexpected expected_attach_type=%d for %s\n",
|
||||
expected_attach_type, test->sec_name);
|
||||
}
|
||||
|
||||
@ -180,13 +192,13 @@ static void test_attach_type_by_name(const struct sec_name_test *test)
|
||||
rc = libbpf_attach_type_by_name(test->sec_name, &attach_type);
|
||||
|
||||
CHECK(rc != test->expected_attach.rc, "check_ret",
|
||||
"attach: unexpected rc=%d for %s", rc, test->sec_name);
|
||||
"attach: unexpected rc=%d for %s\n", rc, test->sec_name);
|
||||
|
||||
if (rc)
|
||||
return;
|
||||
|
||||
CHECK(attach_type != test->expected_attach.attach_type,
|
||||
"check_attach_type", "attach: unexpected attach_type=%d for %s",
|
||||
"check_attach_type", "attach: unexpected attach_type=%d for %s\n",
|
||||
attach_type, test->sec_name);
|
||||
}
|
||||
|
||||
|
@ -6,19 +6,34 @@
|
||||
|
||||
void test_xdp_attach(void)
|
||||
{
|
||||
__u32 duration = 0, id1, id2, id0 = 0, len;
|
||||
struct bpf_object *obj1, *obj2, *obj3;
|
||||
const char *file = "./test_xdp.o";
|
||||
struct bpf_prog_info info = {};
|
||||
int err, fd1, fd2, fd3;
|
||||
__u32 duration = 0;
|
||||
DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts,
|
||||
.old_fd = -1);
|
||||
|
||||
len = sizeof(info);
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj1, &fd1);
|
||||
if (CHECK_FAIL(err))
|
||||
return;
|
||||
err = bpf_obj_get_info_by_fd(fd1, &info, &len);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out_1;
|
||||
id1 = info.id;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj2, &fd2);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out_1;
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
err = bpf_obj_get_info_by_fd(fd2, &info, &len);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out_2;
|
||||
id2 = info.id;
|
||||
|
||||
err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj3, &fd3);
|
||||
if (CHECK_FAIL(err))
|
||||
goto out_2;
|
||||
@ -28,6 +43,11 @@ void test_xdp_attach(void)
|
||||
if (CHECK(err, "load_ok", "initial load failed"))
|
||||
goto out_close;
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
|
||||
if (CHECK(err || id0 != id1, "id1_check",
|
||||
"loaded prog id %u != id1 %u, err %d", id0, id1, err))
|
||||
goto out_close;
|
||||
|
||||
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, XDP_FLAGS_REPLACE,
|
||||
&opts);
|
||||
if (CHECK(!err, "load_fail", "load with expected id didn't fail"))
|
||||
@ -37,6 +57,10 @@ void test_xdp_attach(void)
|
||||
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd2, 0, &opts);
|
||||
if (CHECK(err, "replace_ok", "replace valid old_fd failed"))
|
||||
goto out;
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
|
||||
if (CHECK(err || id0 != id2, "id2_check",
|
||||
"loaded prog id %u != id2 %u, err %d", id0, id2, err))
|
||||
goto out_close;
|
||||
|
||||
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, fd3, 0, &opts);
|
||||
if (CHECK(!err, "replace_fail", "replace invalid old_fd didn't fail"))
|
||||
@ -51,6 +75,10 @@ void test_xdp_attach(void)
|
||||
if (CHECK(err, "remove_ok", "remove valid old_fd failed"))
|
||||
goto out;
|
||||
|
||||
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
|
||||
if (CHECK(err || id0 != 0, "unload_check",
|
||||
"loaded prog id %u != 0, err %d", id0, err))
|
||||
goto out_close;
|
||||
out:
|
||||
bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
|
||||
out_close:
|
||||
|
Loading…
Reference in New Issue
Block a user