bpf-next-for-netdev

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZpGVmAAKCRDbK58LschI
 gxB4AQCgquQis63yqTI36j4iXBT+TuxHEBNoQBSLyzYdrLS1dgD/S5DRJDA+3LD+
 394hn/VtB1qvX5vaqjsov4UIwSMyxA0=
 =OhSn
 -----END PGP SIGNATURE-----

Merge tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2024-07-12

We've added 23 non-merge commits during the last 3 day(s) which contain
a total of 18 files changed, 234 insertions(+), 243 deletions(-).

The main changes are:

1) Improve BPF verifier by utilizing overflow.h helpers to check
   for overflows, from Shung-Hsi Yu.

2) Fix NULL pointer dereference in resolve_prog_type() for BPF_PROG_TYPE_EXT
   when attr->attach_prog_fd was not specified, from Tengda Wu.

3) Fix arm64 BPF JIT when generating code for BPF trampolines with
   BPF_TRAMP_F_CALL_ORIG which corrupted upper address bits,
   from Puranjay Mohan.

4) Remove test_run callback from lwt_seg6local_prog_ops which never worked
   in the first place and caused syzbot reports,
   from Sebastian Andrzej Siewior.

5) Relax BPF verifier to accept non-zero offset on KF_TRUSTED_ARGS/
   /KF_RCU-typed BPF kfuncs, from Matt Bobrowski.

6) Fix a long standing bug in libbpf with regards to handling of BPF
   skeleton's forward and backward compatibility, from Andrii Nakryiko.

7) Annotate btf_{seq,snprintf}_show functions with __printf,
   from Alan Maguire.

8) BPF selftest improvements to reuse common network helpers in sk_lookup
   test and dropping the open-coded inetaddr_len() and make_socket() ones,
   from Geliang Tang.

* tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (23 commits)
  selftests/bpf: Test for null-pointer-deref bugfix in resolve_prog_type()
  bpf: Fix null pointer dereference in resolve_prog_type() for BPF_PROG_TYPE_EXT
  selftests/bpf: DENYLIST.aarch64: Skip fexit_sleep again
  bpf: use check_sub_overflow() to check for subtraction overflows
  bpf: use check_add_overflow() to check for addition overflows
  bpf: fix overflow check in adjust_jmp_off()
  bpf: Eliminate remaining "make W=1" warnings in kernel/bpf/btf.o
  bpf: annotate BTF show functions with __printf
  bpf, arm64: Fix trampoline for BPF_TRAMP_F_CALL_ORIG
  selftests/bpf: Close obj in error path in xdp_adjust_tail
  selftests/bpf: Null checks for links in bpf_tcp_ca
  selftests/bpf: Use connect_fd_to_fd in sk_lookup
  selftests/bpf: Use start_server_addr in sk_lookup
  selftests/bpf: Use start_server_str in sk_lookup
  selftests/bpf: Close fd in error path in drop_on_reuseport
  selftests/bpf: Add ASSERT_OK_FD macro
  selftests/bpf: Add backlog for network_helper_opts
  selftests/bpf: fix compilation failure when CONFIG_NF_FLOW_TABLE=m
  bpf: Remove tst_run from lwt_seg6local_prog_ops.
  bpf: relax zero fixed offset constraint on KF_TRUSTED_ARGS/KF_RCU
  ...
====================

Link: https://patch.msgid.link/20240712212448.5378-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2024-07-12 22:25:53 -07:00
commit 26f453176a
18 changed files with 234 additions and 243 deletions

View File

@ -2147,7 +2147,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
if (flags & BPF_TRAMP_F_CALL_ORIG) {
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_enter, ctx);
}
@ -2191,7 +2191,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->ro_image + ctx->idx;
emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
emit_a64_mov_i64(A64_R(0), (const u64)im, ctx);
emit_call((const u64)__bpf_tramp_exit, ctx);
}

View File

@ -856,7 +856,7 @@ static inline u32 type_flag(u32 type)
/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
return prog->type == BPF_PROG_TYPE_EXT ?
return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ?
prog->aux->dst_prog->type : prog->type;
}

View File

@ -415,7 +415,7 @@ const char *btf_type_str(const struct btf_type *t)
struct btf_show {
u64 flags;
void *target; /* target of show operation (seq file, buffer) */
void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
__printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args);
const struct btf *btf;
/* below are used during iteration */
struct {
@ -7538,8 +7538,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj,
btf_type_ops(t)->show(btf, t, type_id, obj, 0, show);
}
static void btf_seq_show(struct btf_show *show, const char *fmt,
va_list args)
__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt,
va_list args)
{
seq_vprintf((struct seq_file *)show->target, fmt, args);
}
@ -7572,8 +7572,8 @@ struct btf_show_snprintf {
int len; /* length we would have written */
};
static void btf_snprintf_show(struct btf_show *show, const char *fmt,
va_list args)
__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt,
va_list args)
{
struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show;
int len;

View File

@ -11335,7 +11335,9 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env,
btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id))
strict_type_match = true;
WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off);
WARN_ON_ONCE(is_kfunc_release(meta) &&
(reg->off || !tnum_is_const(reg->var_off) ||
reg->var_off.value));
reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, &reg_ref_id);
reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off);
@ -11917,12 +11919,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
return -EINVAL;
}
}
fallthrough;
case KF_ARG_PTR_TO_CTX:
/* Trusted arguments have the same offset checks as release arguments */
arg_type |= OBJ_RELEASE;
break;
case KF_ARG_PTR_TO_DYNPTR:
case KF_ARG_PTR_TO_ITER:
case KF_ARG_PTR_TO_LIST_HEAD:
@ -11935,7 +11933,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_
case KF_ARG_PTR_TO_REFCOUNTED_KPTR:
case KF_ARG_PTR_TO_CONST_STR:
case KF_ARG_PTR_TO_WORKQUEUE:
/* Trusted by default */
break;
default:
WARN_ON_ONCE(1);
@ -12729,56 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
return 0;
}
static bool signed_add_overflows(s64 a, s64 b)
{
/* Do the add in u64, where overflow is well-defined */
s64 res = (s64)((u64)a + (u64)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_add32_overflows(s32 a, s32 b)
{
/* Do the add in u32, where overflow is well-defined */
s32 res = (s32)((u32)a + (u32)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_add16_overflows(s16 a, s16 b)
{
/* Do the add in u16, where overflow is well-defined */
s16 res = (s16)((u16)a + (u16)b);
if (b < 0)
return res > a;
return res < a;
}
static bool signed_sub_overflows(s64 a, s64 b)
{
/* Do the sub in u64, where overflow is well-defined */
s64 res = (s64)((u64)a - (u64)b);
if (b < 0)
return res < a;
return res > a;
}
static bool signed_sub32_overflows(s32 a, s32 b)
{
/* Do the sub in u32, where overflow is well-defined */
s32 res = (s32)((u32)a - (u32)b);
if (b < 0)
return res < a;
return res > a;
}
static bool check_reg_sane_offset(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
enum bpf_reg_type type)
@ -13260,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* added into the variable offset, and we copy the fixed offset
* from ptr_reg.
*/
if (signed_add_overflows(smin_ptr, smin_val) ||
signed_add_overflows(smax_ptr, smax_val)) {
if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) ||
check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr + smin_val;
dst_reg->smax_value = smax_ptr + smax_val;
}
if (umin_ptr + umin_val < umin_ptr ||
umax_ptr + umax_val < umax_ptr) {
if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) ||
check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value = umin_ptr + umin_val;
dst_reg->umax_value = umax_ptr + umax_val;
}
dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off);
dst_reg->off = ptr_reg->off;
@ -13317,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
/* A new variable offset is created. If the subtrahend is known
* nonnegative, then any reg->range we had before is still good.
*/
if (signed_sub_overflows(smin_ptr, smax_val) ||
signed_sub_overflows(smax_ptr, smin_val)) {
if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) ||
check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value = smin_ptr - smax_val;
dst_reg->smax_value = smax_ptr - smin_val;
}
if (umin_ptr < umax_val) {
/* Overflow possible, we know nothing */
@ -13377,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s32 smin_val = src_reg->s32_min_value;
s32 smax_val = src_reg->s32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
s32 *dst_smin = &dst_reg->s32_min_value;
s32 *dst_smax = &dst_reg->s32_max_value;
u32 *dst_umin = &dst_reg->u32_min_value;
u32 *dst_umax = &dst_reg->u32_max_value;
if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) ||
signed_add32_overflows(dst_reg->s32_max_value, smax_val)) {
dst_reg->s32_min_value = S32_MIN;
dst_reg->s32_max_value = S32_MAX;
} else {
dst_reg->s32_min_value += smin_val;
dst_reg->s32_max_value += smax_val;
if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
*dst_smin = S32_MIN;
*dst_smax = S32_MAX;
}
if (dst_reg->u32_min_value + umin_val < umin_val ||
dst_reg->u32_max_value + umax_val < umax_val) {
dst_reg->u32_min_value = 0;
dst_reg->u32_max_value = U32_MAX;
} else {
dst_reg->u32_min_value += umin_val;
dst_reg->u32_max_value += umax_val;
if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {
*dst_umin = 0;
*dst_umax = U32_MAX;
}
}
static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s64 smin_val = src_reg->smin_value;
s64 smax_val = src_reg->smax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
s64 *dst_smin = &dst_reg->smin_value;
s64 *dst_smax = &dst_reg->smax_value;
u64 *dst_umin = &dst_reg->umin_value;
u64 *dst_umax = &dst_reg->umax_value;
if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value += smin_val;
dst_reg->smax_value += smax_val;
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
*dst_smin = S64_MIN;
*dst_smax = S64_MAX;
}
if (dst_reg->umin_value + umin_val < umin_val ||
dst_reg->umax_value + umax_val < umax_val) {
dst_reg->umin_value = 0;
dst_reg->umax_value = U64_MAX;
} else {
dst_reg->umin_value += umin_val;
dst_reg->umax_value += umax_val;
if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {
*dst_umin = 0;
*dst_umax = U64_MAX;
}
}
static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s32 smin_val = src_reg->s32_min_value;
s32 smax_val = src_reg->s32_max_value;
s32 *dst_smin = &dst_reg->s32_min_value;
s32 *dst_smax = &dst_reg->s32_max_value;
u32 umin_val = src_reg->u32_min_value;
u32 umax_val = src_reg->u32_max_value;
if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) ||
signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) {
if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
/* Overflow possible, we know nothing */
dst_reg->s32_min_value = S32_MIN;
dst_reg->s32_max_value = S32_MAX;
} else {
dst_reg->s32_min_value -= smax_val;
dst_reg->s32_max_value -= smin_val;
*dst_smin = S32_MIN;
*dst_smax = S32_MAX;
}
if (dst_reg->u32_min_value < umax_val) {
/* Overflow possible, we know nothing */
@ -13457,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
struct bpf_reg_state *src_reg)
{
s64 smin_val = src_reg->smin_value;
s64 smax_val = src_reg->smax_value;
s64 *dst_smin = &dst_reg->smin_value;
s64 *dst_smax = &dst_reg->smax_value;
u64 umin_val = src_reg->umin_value;
u64 umax_val = src_reg->umax_value;
if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) {
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
/* Overflow possible, we know nothing */
dst_reg->smin_value = S64_MIN;
dst_reg->smax_value = S64_MAX;
} else {
dst_reg->smin_value -= smax_val;
dst_reg->smax_value -= smin_val;
*dst_smin = S64_MIN;
*dst_smax = S64_MAX;
}
if (dst_reg->umin_value < umax_val) {
/* Overflow possible, we know nothing */
@ -18838,6 +18758,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
{
struct bpf_insn *insn = prog->insnsi;
u32 insn_cnt = prog->len, i;
s32 imm;
s16 off;
for (i = 0; i < insn_cnt; i++, insn++) {
u8 code = insn->code;
@ -18849,15 +18771,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta)
if (insn->code == (BPF_JMP32 | BPF_JA)) {
if (i + 1 + insn->imm != tgt_idx)
continue;
if (signed_add32_overflows(insn->imm, delta))
if (check_add_overflow(insn->imm, delta, &imm))
return -ERANGE;
insn->imm += delta;
insn->imm = imm;
} else {
if (i + 1 + insn->off != tgt_idx)
continue;
if (signed_add16_overflows(insn->imm, delta))
if (check_add_overflow(insn->off, delta, &off))
return -ERANGE;
insn->off += delta;
insn->off = off;
}
}
return 0;

View File

@ -11053,7 +11053,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
};
const struct bpf_prog_ops lwt_seg6local_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};
const struct bpf_verifier_ops cg_sock_verifier_ops = {

View File

@ -852,24 +852,41 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
{
struct bpf_map *map;
char ident[256];
size_t i;
size_t i, map_sz;
if (!map_cnt)
return;
/* for backward compatibility with old libbpf versions that don't
* handle new BPF skeleton with new struct bpf_map_skeleton definition
* that includes link field, avoid specifying new increased size,
* unless we absolutely have to (i.e., if there are struct_ops maps
* present)
*/
map_sz = offsetof(struct bpf_map_skeleton, link);
if (populate_links) {
bpf_object__for_each_map(map, obj) {
if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
map_sz = sizeof(struct bpf_map_skeleton);
break;
}
}
}
codegen("\
\n\
\n\
\n\
/* maps */ \n\
s->map_cnt = %zu; \n\
s->map_skel_sz = sizeof(*s->maps); \n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\
s->map_skel_sz = %zu; \n\
s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\
sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\
if (!s->maps) { \n\
err = -ENOMEM; \n\
goto err; \n\
} \n\
",
map_cnt
map_cnt, map_sz, map_sz, map_sz
);
i = 0;
bpf_object__for_each_map(map, obj) {
@ -878,23 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool
codegen("\
\n\
\n\
s->maps[%zu].name = \"%s\"; \n\
s->maps[%zu].map = &obj->maps.%s; \n\
\n\
map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\
map->name = \"%s\"; \n\
map->map = &obj->maps.%s; \n\
",
i, bpf_map__name(map), i, ident);
i, bpf_map__name(map), ident);
/* memory-mapped internal maps */
if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) {
printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n",
i, ident);
printf("\tmap->mmaped = (void **)&obj->%s;\n", ident);
}
if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) {
codegen("\
\n\
s->maps[%zu].link = &obj->links.%s;\n\
",
i, ident);
map->link = &obj->links.%s; \n\
", ident);
}
i++;
}
@ -1463,6 +1479,7 @@ static int do_skeleton(int argc, char **argv)
%1$s__create_skeleton(struct %1$s *obj) \n\
{ \n\
struct bpf_object_skeleton *s; \n\
struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\
@ -1753,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv)
{ \n\
struct %1$s *obj; \n\
struct bpf_object_subskeleton *s; \n\
struct bpf_map_skeleton *map __attribute__((unused));\n\
int err; \n\
\n\
obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\

View File

@ -13712,14 +13712,15 @@ int libbpf_num_possible_cpus(void)
static int populate_skeleton_maps(const struct bpf_object *obj,
struct bpf_map_skeleton *maps,
size_t map_cnt)
size_t map_cnt, size_t map_skel_sz)
{
int i;
for (i = 0; i < map_cnt; i++) {
struct bpf_map **map = maps[i].map;
const char *name = maps[i].name;
void **mmaped = maps[i].mmaped;
struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz;
struct bpf_map **map = map_skel->map;
const char *name = map_skel->name;
void **mmaped = map_skel->mmaped;
*map = bpf_object__find_map_by_name(obj, name);
if (!*map) {
@ -13736,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj,
static int populate_skeleton_progs(const struct bpf_object *obj,
struct bpf_prog_skeleton *progs,
size_t prog_cnt)
size_t prog_cnt, size_t prog_skel_sz)
{
int i;
for (i = 0; i < prog_cnt; i++) {
struct bpf_program **prog = progs[i].prog;
const char *name = progs[i].name;
struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz;
struct bpf_program **prog = prog_skel->prog;
const char *name = prog_skel->name;
*prog = bpf_object__find_program_by_name(obj, name);
if (!*prog) {
@ -13783,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
}
*s->obj = obj;
err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
return libbpf_err(err);
}
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
return libbpf_err(err);
@ -13819,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
return libbpf_err(-errno);
}
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz);
if (err) {
pr_warn("failed to populate subskeleton maps: %d\n", err);
return libbpf_err(err);
}
for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
var_skel = &s->vars[var_idx];
var_skel = (void *)s->vars + var_idx * s->var_skel_sz;
map = *var_skel->map;
map_type_id = bpf_map__btf_value_type_id(map);
map_type = btf__type_by_id(btf, map_type_id);
@ -13879,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
}
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
size_t mmap_sz = bpf_map_mmap_sz(map);
int prot, map_fd = map->fd;
void **mmaped = s->maps[i].mmaped;
void **mmaped = map_skel->mmaped;
if (!mmaped)
continue;
@ -13930,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
int i, err;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_program *prog = *prog_skel->prog;
struct bpf_link **link = prog_skel->link;
if (!prog->autoload || !prog->autoattach)
continue;
@ -13963,31 +13967,34 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
*/
}
/* Skeleton is created with earlier version of bpftool
* which does not support auto-attachment
*/
if (s->map_skel_sz < sizeof(struct bpf_map_skeleton))
return 0;
for (i = 0; i < s->map_cnt; i++) {
struct bpf_map *map = *s->maps[i].map;
struct bpf_link **link = s->maps[i].link;
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_map *map = *map_skel->map;
struct bpf_link **link;
if (!map->autocreate || !map->autoattach)
continue;
if (*link)
continue;
/* only struct_ops maps can be attached */
if (!bpf_map__is_struct_ops(map))
continue;
*link = bpf_map__attach_struct_ops(map);
/* skeleton is created with earlier version of bpftool, notify user */
if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) {
pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n",
bpf_map__name(map));
continue;
}
link = map_skel->link;
if (*link)
continue;
*link = bpf_map__attach_struct_ops(map);
if (!*link) {
err = -errno;
pr_warn("map '%s': failed to auto-attach: %d\n",
bpf_map__name(map), err);
pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err);
return libbpf_err(err);
}
}
@ -14000,7 +14007,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
int i;
for (i = 0; i < s->prog_cnt; i++) {
struct bpf_link **link = s->progs[i].link;
struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz;
struct bpf_link **link = prog_skel->link;
bpf_link__destroy(*link);
*link = NULL;
@ -14010,7 +14018,8 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
return;
for (i = 0; i < s->map_cnt; i++) {
struct bpf_link **link = s->maps[i].link;
struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz;
struct bpf_link **link = map_skel->link;
if (link) {
bpf_link__destroy(*link);

View File

@ -1,5 +1,6 @@
bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3
fexit_sleep # The test never returns. The remaining tests cannot start.
kprobe_multi_bench_attach # needs CONFIG_FPROBE
kprobe_multi_test # needs CONFIG_FPROBE
module_attach # prog 'kprobe_multi': failed to auto-attach: -95

View File

@ -106,7 +106,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl
}
if (type == SOCK_STREAM) {
if (listen(fd, 1) < 0) {
if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) {
log_err("Failed to listed on socket");
goto error_close;
}

View File

@ -25,6 +25,16 @@ struct network_helper_opts {
int timeout_ms;
bool must_fail;
int proto;
/* +ve: Passed to listen() as-is.
* 0: Default when the test does not set
* a particular value during the struct init.
* It is changed to 1 before passing to listen().
* Most tests only have one on-going connection.
* -ve: It is changed to 0 before passing to listen().
* It is useful to force syncookie without
* changing the "tcp_syncookies" sysctl from 1 to 2.
*/
int backlog;
int (*post_socket_cb)(int fd, void *opts);
void *cb_opts;
};

View File

@ -411,7 +411,8 @@ static void test_update_ca(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
goto out;
do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
@ -425,6 +426,7 @@ static void test_update_ca(void)
ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt");
bpf_link__destroy(link);
out:
tcp_ca_update__destroy(skel);
}
@ -447,7 +449,8 @@ static void test_update_wrong(void)
return;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
if (!ASSERT_OK_PTR(link, "attach_struct_ops"))
goto out;
do_test(&opts);
saved_ca1_cnt = skel->bss->ca1_cnt;
@ -460,6 +463,7 @@ static void test_update_wrong(void)
ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt");
bpf_link__destroy(link);
out:
tcp_ca_update__destroy(skel);
}
@ -481,7 +485,8 @@ static void test_mixed_links(void)
return;
link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link);
ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl");
if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"))
goto out;
link = bpf_map__attach_struct_ops(skel->maps.ca_update_1);
ASSERT_OK_PTR(link, "attach_struct_ops");
@ -494,6 +499,7 @@ static void test_mixed_links(void)
bpf_link__destroy(link);
bpf_link__destroy(link_nl);
out:
tcp_ca_update__destroy(skel);
}
@ -536,7 +542,8 @@ static void test_link_replace(void)
bpf_link__destroy(link);
link = bpf_map__attach_struct_ops(skel->maps.ca_update_2);
ASSERT_OK_PTR(link, "attach_struct_ops_2nd");
if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd"))
goto out;
/* BPF_F_REPLACE with a wrong old map Fd. It should fail!
*
@ -559,6 +566,7 @@ static void test_link_replace(void)
bpf_link__destroy(link);
out:
tcp_ca_update__destroy(skel);
}

View File

@ -77,6 +77,12 @@ struct test {
bool reuseport_has_conns; /* Add a connected socket to reuseport group */
};
struct cb_opts {
int family;
int sotype;
bool reuseport;
};
static __u32 duration; /* for CHECK macro */
static bool is_ipv6(const char *ip)
@ -142,19 +148,14 @@ static int make_socket(int sotype, const char *ip, int port,
return fd;
}
static int make_server(int sotype, const char *ip, int port,
struct bpf_program *reuseport_prog)
static int setsockopts(int fd, void *opts)
{
struct sockaddr_storage addr = {0};
struct cb_opts *co = (struct cb_opts *)opts;
const int one = 1;
int err, fd = -1;
fd = make_socket(sotype, ip, port, &addr);
if (fd < 0)
return -1;
int err = 0;
/* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */
if (sotype == SOCK_DGRAM) {
if (co->sotype == SOCK_DGRAM) {
err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) {
@ -163,7 +164,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) {
if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) {
err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) {
@ -172,7 +173,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
if (sotype == SOCK_STREAM) {
if (co->sotype == SOCK_STREAM) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) {
@ -181,7 +182,7 @@ static int make_server(int sotype, const char *ip, int port,
}
}
if (reuseport_prog) {
if (co->reuseport) {
err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one,
sizeof(one));
if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) {
@ -190,19 +191,28 @@ static int make_server(int sotype, const char *ip, int port,
}
}
err = bind(fd, (void *)&addr, inetaddr_len(&addr));
if (CHECK(err, "bind", "failed\n")) {
log_err("failed to bind listen socket");
goto fail;
}
fail:
return err;
}
if (sotype == SOCK_STREAM) {
err = listen(fd, SOMAXCONN);
if (CHECK(err, "make_server", "listen")) {
log_err("failed to listen on port %d", port);
goto fail;
}
}
static int make_server(int sotype, const char *ip, int port,
struct bpf_program *reuseport_prog)
{
struct cb_opts cb_opts = {
.family = is_ipv6(ip) ? AF_INET6 : AF_INET,
.sotype = sotype,
.reuseport = reuseport_prog,
};
struct network_helper_opts opts = {
.backlog = SOMAXCONN,
.post_socket_cb = setsockopts,
.cb_opts = &cb_opts,
};
int err, fd;
fd = start_server_str(cb_opts.family, sotype, ip, port, &opts);
if (!ASSERT_OK_FD(fd, "start_server_str"))
return -1;
/* Late attach reuseport prog so we can have one init path */
if (reuseport_prog) {
@ -406,18 +416,12 @@ static int udp_recv_send(int server_fd)
}
/* Reply from original destination address. */
fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0);
if (CHECK(fd < 0, "socket", "failed\n")) {
fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL);
if (!ASSERT_OK_FD(fd, "start_server_addr")) {
log_err("failed to create tx socket");
return -1;
}
ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr));
if (CHECK(ret, "bind", "failed\n")) {
log_err("failed to bind tx socket");
goto out;
}
msg.msg_control = NULL;
msg.msg_controllen = 0;
n = sendmsg(fd, &msg, 0);
@ -629,9 +633,6 @@ static void run_lookup_prog(const struct test *t)
* BPF socket lookup.
*/
if (t->reuseport_has_conns) {
struct sockaddr_storage addr = {};
socklen_t len = sizeof(addr);
/* Add an extra socket to reuseport group */
reuse_conn_fd = make_server(t->sotype, t->listen_at.ip,
t->listen_at.port,
@ -639,12 +640,9 @@ static void run_lookup_prog(const struct test *t)
if (reuse_conn_fd < 0)
goto close;
/* Connect the extra socket to itself */
err = getsockname(reuse_conn_fd, (void *)&addr, &len);
if (CHECK(err, "getsockname", "errno %d\n", errno))
goto close;
err = connect(reuse_conn_fd, (void *)&addr, len);
if (CHECK(err, "connect", "errno %d\n", errno))
/* Connect the extra socket to itself */
err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0);
if (!ASSERT_OK(err, "connect_fd_to_fd"))
goto close;
}
@ -994,7 +992,7 @@ static void drop_on_reuseport(const struct test *t)
err = update_lookup_map(t->sock_map, SERVER_A, server1);
if (err)
goto detach;
goto close_srv1;
/* second server on destination address we should never reach */
server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port,

View File

@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void)
prog = bpf_object__next_program(obj, NULL);
if (bpf_object__load(obj))
return;
goto out;
prog_fd = bpf_program__fd(prog);

View File

@ -31,14 +31,6 @@ int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_
return 0;
}
SEC("tp_btf/task_newtask")
__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc")
int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_first_zero(&task->cpus_mask);
return 0;
}
/* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */
SEC("tp_btf/tcp_probe")
__failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_")

View File

@ -32,3 +32,11 @@ int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb)
bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0);
return 0;
}
SEC("tp_btf/task_newtask")
__success
int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags)
{
bpf_cpumask_first_zero(&task->cpus_mask);
return 0;
}

View File

@ -58,6 +58,10 @@ static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end,
return true;
}
struct flow_ports___local {
__be16 source, dest;
} __attribute__((preserve_access_index));
SEC("xdp.frags")
int xdp_flowtable_do_lookup(struct xdp_md *ctx)
{
@ -69,7 +73,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx)
};
void *data = (void *)(long)ctx->data;
struct ethhdr *eth = data;
struct flow_ports *ports;
struct flow_ports___local *ports;
__u32 *val, key = 0;
if (eth + 1 > data_end)
@ -79,7 +83,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx)
case bpf_htons(ETH_P_IP): {
struct iphdr *iph = data + sizeof(*eth);
ports = (struct flow_ports *)(iph + 1);
ports = (struct flow_ports___local *)(iph + 1);
if (ports + 1 > data_end)
return XDP_PASS;
@ -106,7 +110,7 @@ int xdp_flowtable_do_lookup(struct xdp_md *ctx)
struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst;
struct ipv6hdr *ip6h = data + sizeof(*eth);
ports = (struct flow_ports *)(ip6h + 1);
ports = (struct flow_ports___local *)(ip6h + 1);
if (ports + 1 > data_end)
return XDP_PASS;

View File

@ -377,6 +377,15 @@ int test__join_cgroup(const char *path);
___ok; \
})
#define ASSERT_OK_FD(fd, name) ({ \
static int duration = 0; \
int ___fd = (fd); \
bool ___ok = ___fd >= 0; \
CHECK(!___ok, (name), "unexpected fd: %d (errno %d)\n", \
___fd, errno); \
___ok; \
})
#define SYS(goto_label, fmt, ...) \
({ \
char cmd[1024]; \

View File

@ -76,7 +76,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc",
.errstr = "arg#0 expected pointer to ctx, but got PTR",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_pass_ctx", 2 },
},
@ -275,6 +275,19 @@
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_EXT,
.result = REJECT,
.errstr = "Tracing programs must provide btf_id",
.fixup_kfunc_btf_id = {
{ "bpf_dynptr_from_skb", 0 },
},
},
{
"calls: basic sanity",
.insns = {