Daniel Borkmann says:

====================
pull-request: bpf 2022-01-19

We've added 12 non-merge commits during the last 8 day(s) which contain
a total of 12 files changed, 262 insertions(+), 64 deletions(-).

The main changes are:

1) Various verifier fixes mainly around register offset handling when
   passed to helper functions, from Daniel Borkmann.

2) Fix XDP BPF link handling to assert program type,
   from Toke Høiland-Jørgensen.

3) Fix regression in mount parameter handling for BPF fs,
   from Yafang Shao.

4) Fix incorrect integer literal when marking scratched stack slots
   in verifier, from Christy Lee.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf, selftests: Add ringbuf memory type confusion test
  bpf, selftests: Add various ringbuf tests with invalid offset
  bpf: Fix ringbuf memory type confusion when passing to helpers
  bpf: Fix out of bounds access for ringbuf helpers
  bpf: Generally fix helper register offset check
  bpf: Mark PTR_TO_FUNC register initially with zero offset
  bpf: Generalize check_ctx_reg for reuse with other types
  bpf: Fix incorrect integer literal used for marking scratched stack.
  bpf/selftests: Add check for updating XDP bpf_link with wrong program type
  bpf/selftests: convert xdp_link test to ASSERT_* macros
  xdp: check prog type before updating BPF link
  bpf: Fix mount source show for bpffs
====================

Link: https://lore.kernel.org/r/20220119011825.9082-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-01-18 19:28:28 -08:00
commit 99845220d3
12 changed files with 262 additions and 64 deletions

View File

@ -316,7 +316,12 @@ enum bpf_type_flag {
*/
MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
__BPF_TYPE_LAST_FLAG = MEM_RDONLY,
/* MEM was "allocated" from a different helper, and cannot be mixed
* with regular non-MEM_ALLOC'ed MEM types.
*/
MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
__BPF_TYPE_LAST_FLAG = MEM_ALLOC,
};
/* Max number of base types. */
@ -400,7 +405,7 @@ enum bpf_return_type {
RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is

View File

@ -519,8 +519,8 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
int check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno);
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size);

View File

@ -5686,7 +5686,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
i, btf_type_str(t));
return -EINVAL;
}
if (check_ctx_reg(env, reg, regno))
if (check_ptr_off_reg(env, reg, regno))
return -EINVAL;
} else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
const struct btf_type *reg_ref_t;

View File

@ -648,12 +648,22 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
int opt;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
if (opt < 0)
if (opt < 0) {
/* We might like to report bad mount options here, but
* traditionally we've ignored all mount options, so we'd
* better continue to ignore non-existing options for bpf.
*/
return opt == -ENOPARAM ? 0 : opt;
if (opt == -ENOPARAM) {
opt = vfs_parse_fs_param_source(fc, param);
if (opt != -ENOPARAM)
return opt;
return 0;
}
if (opt < 0)
return opt;
}
switch (opt) {
case OPT_MODE:

View File

@ -570,6 +570,8 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
if (type & MEM_RDONLY)
strncpy(prefix, "rdonly_", 16);
if (type & MEM_ALLOC)
strncpy(prefix, "alloc_", 16);
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
prefix, str[base_type(type)], postfix);
@ -616,7 +618,7 @@ static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
{
env->scratched_stack_slots |= 1UL << spi;
env->scratched_stack_slots |= 1ULL << spi;
}
static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
@ -637,14 +639,14 @@ static bool verifier_state_scratched(const struct bpf_verifier_env *env)
static void mark_verifier_state_clean(struct bpf_verifier_env *env)
{
env->scratched_regs = 0U;
env->scratched_stack_slots = 0UL;
env->scratched_stack_slots = 0ULL;
}
/* Used for printing the entire verifier state. */
static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
{
env->scratched_regs = ~0U;
env->scratched_stack_slots = ~0UL;
env->scratched_stack_slots = ~0ULL;
}
/* The reg state of a pointer or a bounded scalar was saved when
@ -3969,16 +3971,17 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
}
#endif
int check_ctx_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno)
static int __check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno,
bool fixed_off_ok)
{
/* Access to ctx or passing it to a helper is only allowed in
* its original, unmodified form.
/* Access to this pointer-typed register or passing it to a helper
* is only allowed in its original, unmodified form.
*/
if (reg->off) {
verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
regno, reg->off);
if (!fixed_off_ok && reg->off) {
verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
reg_type_str(env, reg->type), regno, reg->off);
return -EACCES;
}
@ -3986,13 +3989,20 @@ int check_ctx_reg(struct bpf_verifier_env *env,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
verbose(env, "variable %s access var_off=%s disallowed\n",
reg_type_str(env, reg->type), tn_buf);
return -EACCES;
}
return 0;
}
int check_ptr_off_reg(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg, int regno)
{
return __check_ptr_off_reg(env, reg, regno, false);
}
static int __check_buffer_access(struct bpf_verifier_env *env,
const char *buf_info,
const struct bpf_reg_state *reg,
@ -4437,7 +4447,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
err = check_ctx_reg(env, reg, regno);
err = check_ptr_off_reg(env, reg, regno);
if (err < 0)
return err;
@ -5127,6 +5137,7 @@ static const struct bpf_reg_types mem_types = {
PTR_TO_MAP_KEY,
PTR_TO_MAP_VALUE,
PTR_TO_MEM,
PTR_TO_MEM | MEM_ALLOC,
PTR_TO_BUF,
},
};
@ -5144,7 +5155,7 @@ static const struct bpf_reg_types int_ptr_types = {
static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
@ -5244,12 +5255,6 @@ found:
kernel_type_name(btf_vmlinux, *arg_btf_id));
return -EACCES;
}
if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
regno);
return -EACCES;
}
}
return 0;
@ -5304,10 +5309,33 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
if (err)
return err;
if (type == PTR_TO_CTX) {
err = check_ctx_reg(env, reg, regno);
switch ((u32)type) {
case SCALAR_VALUE:
/* Pointer types where reg offset is explicitly allowed: */
case PTR_TO_PACKET:
case PTR_TO_PACKET_META:
case PTR_TO_MAP_KEY:
case PTR_TO_MAP_VALUE:
case PTR_TO_MEM:
case PTR_TO_MEM | MEM_RDONLY:
case PTR_TO_MEM | MEM_ALLOC:
case PTR_TO_BUF:
case PTR_TO_BUF | MEM_RDONLY:
case PTR_TO_STACK:
/* Some of the argument types nevertheless require a
* zero register offset.
*/
if (arg_type == ARG_PTR_TO_ALLOC_MEM)
goto force_off_check;
break;
/* All the rest must be rejected: */
default:
force_off_check:
err = __check_ptr_off_reg(env, reg, regno,
type == PTR_TO_BTF_ID);
if (err < 0)
return err;
break;
}
skip_type_check:
@ -9507,9 +9535,13 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
mark_reg_known_zero(env, regs, insn->dst_reg);
/* All special src_reg cases are listed below. From this point onwards
* we either succeed and assign a corresponding dst_reg->type after
* zeroing the offset, or fail and reject the program.
*/
mark_reg_known_zero(env, regs, insn->dst_reg);
if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
dst_reg->type = aux->btf_var.reg_type;
switch (base_type(dst_reg->type)) {
case PTR_TO_MEM:
@ -9547,7 +9579,6 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
map = env->used_maps[aux->map_index];
mark_reg_known_zero(env, regs, insn->dst_reg);
dst_reg->map_ptr = map;
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
@ -9651,7 +9682,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
if (err < 0)
return err;

View File

@ -8981,6 +8981,12 @@ static int bpf_xdp_link_update(struct bpf_link *link, struct bpf_prog *new_prog,
goto out_unlock;
}
old_prog = link->prog;
if (old_prog->type != new_prog->type ||
old_prog->expected_attach_type != new_prog->expected_attach_type) {
err = -EINVAL;
goto out_unlock;
}
if (old_prog == new_prog) {
/* no-op, don't disturb drivers */
bpf_prog_put(new_prog);

View File

@ -10,6 +10,7 @@
#include "test_d_path.skel.h"
#include "test_d_path_check_rdonly_mem.skel.h"
#include "test_d_path_check_types.skel.h"
static int duration;
@ -167,6 +168,16 @@ static void test_d_path_check_rdonly_mem(void)
test_d_path_check_rdonly_mem__destroy(skel);
}
static void test_d_path_check_types(void)
{
struct test_d_path_check_types *skel;
skel = test_d_path_check_types__open_and_load();
ASSERT_ERR_PTR(skel, "unexpected_load_passing_wrong_type");
test_d_path_check_types__destroy(skel);
}
void test_d_path(void)
{
if (test__start_subtest("basic"))
@ -174,4 +185,7 @@ void test_d_path(void)
if (test__start_subtest("check_rdonly_mem"))
test_d_path_check_rdonly_mem();
if (test__start_subtest("check_alloc_mem"))
test_d_path_check_types();
}

View File

@ -8,46 +8,47 @@
void serial_test_xdp_link(void)
{
__u32 duration = 0, id1, id2, id0 = 0, prog_fd1, prog_fd2, err;
DECLARE_LIBBPF_OPTS(bpf_xdp_set_link_opts, opts, .old_fd = -1);
struct test_xdp_link *skel1 = NULL, *skel2 = NULL;
__u32 id1, id2, id0 = 0, prog_fd1, prog_fd2;
struct bpf_link_info link_info;
struct bpf_prog_info prog_info;
struct bpf_link *link;
int err;
__u32 link_info_len = sizeof(link_info);
__u32 prog_info_len = sizeof(prog_info);
skel1 = test_xdp_link__open_and_load();
if (CHECK(!skel1, "skel_load", "skeleton open and load failed\n"))
if (!ASSERT_OK_PTR(skel1, "skel_load"))
goto cleanup;
prog_fd1 = bpf_program__fd(skel1->progs.xdp_handler);
skel2 = test_xdp_link__open_and_load();
if (CHECK(!skel2, "skel_load", "skeleton open and load failed\n"))
if (!ASSERT_OK_PTR(skel2, "skel_load"))
goto cleanup;
prog_fd2 = bpf_program__fd(skel2->progs.xdp_handler);
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_obj_get_info_by_fd(prog_fd1, &prog_info, &prog_info_len);
if (CHECK(err, "fd_info1", "failed %d\n", -errno))
if (!ASSERT_OK(err, "fd_info1"))
goto cleanup;
id1 = prog_info.id;
memset(&prog_info, 0, sizeof(prog_info));
err = bpf_obj_get_info_by_fd(prog_fd2, &prog_info, &prog_info_len);
if (CHECK(err, "fd_info2", "failed %d\n", -errno))
if (!ASSERT_OK(err, "fd_info2"))
goto cleanup;
id2 = prog_info.id;
/* set initial prog attachment */
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "fd_attach", "initial prog attach failed: %d\n", err))
if (!ASSERT_OK(err, "fd_attach"))
goto cleanup;
/* validate prog ID */
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err);
if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
goto cleanup;
/* BPF link is not allowed to replace prog attachment */
link = bpf_program__attach_xdp(skel1->progs.xdp_handler, IFINDEX_LO);
@ -62,7 +63,7 @@ void serial_test_xdp_link(void)
/* detach BPF program */
opts.old_fd = prog_fd1;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, -1, XDP_FLAGS_REPLACE, &opts);
if (CHECK(err, "prog_detach", "failed %d\n", err))
if (!ASSERT_OK(err, "prog_detach"))
goto cleanup;
/* now BPF link should attach successfully */
@ -73,24 +74,23 @@ void serial_test_xdp_link(void)
/* validate prog ID */
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
if (CHECK(err || id0 != id1, "id1_check",
"loaded prog id %u != id1 %u, err %d", id0, id1, err))
if (!ASSERT_OK(err, "id1_check_err") || !ASSERT_EQ(id0, id1, "id1_check_val"))
goto cleanup;
/* BPF prog attach is not allowed to replace BPF link */
opts.old_fd = prog_fd1;
err = bpf_set_link_xdp_fd_opts(IFINDEX_LO, prog_fd2, XDP_FLAGS_REPLACE, &opts);
if (CHECK(!err, "prog_attach_fail", "unexpected success\n"))
if (!ASSERT_ERR(err, "prog_attach_fail"))
goto cleanup;
/* Can't force-update when BPF link is active */
err = bpf_set_link_xdp_fd(IFINDEX_LO, prog_fd2, 0);
if (CHECK(!err, "prog_update_fail", "unexpected success\n"))
if (!ASSERT_ERR(err, "prog_update_fail"))
goto cleanup;
/* Can't force-detach when BPF link is active */
err = bpf_set_link_xdp_fd(IFINDEX_LO, -1, 0);
if (CHECK(!err, "prog_detach_fail", "unexpected success\n"))
if (!ASSERT_ERR(err, "prog_detach_fail"))
goto cleanup;
/* BPF link is not allowed to replace another BPF link */
@ -110,40 +110,39 @@ void serial_test_xdp_link(void)
skel2->links.xdp_handler = link;
err = bpf_get_link_xdp_id(IFINDEX_LO, &id0, 0);
if (CHECK(err || id0 != id2, "id2_check",
"loaded prog id %u != id2 %u, err %d", id0, id1, err))
if (!ASSERT_OK(err, "id2_check_err") || !ASSERT_EQ(id0, id2, "id2_check_val"))
goto cleanup;
/* updating program under active BPF link works as expected */
err = bpf_link__update_program(link, skel1->progs.xdp_handler);
if (CHECK(err, "link_upd", "failed: %d\n", err))
if (!ASSERT_OK(err, "link_upd"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
if (!ASSERT_OK(err, "link_info"))
goto cleanup;
CHECK(link_info.type != BPF_LINK_TYPE_XDP, "link_type",
"got %u != exp %u\n", link_info.type, BPF_LINK_TYPE_XDP);
CHECK(link_info.prog_id != id1, "link_prog_id",
"got %u != exp %u\n", link_info.prog_id, id1);
CHECK(link_info.xdp.ifindex != IFINDEX_LO, "link_ifindex",
"got %u != exp %u\n", link_info.xdp.ifindex, IFINDEX_LO);
ASSERT_EQ(link_info.type, BPF_LINK_TYPE_XDP, "link_type");
ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
ASSERT_EQ(link_info.xdp.ifindex, IFINDEX_LO, "link_ifindex");
/* updating program under active BPF link with different type fails */
err = bpf_link__update_program(link, skel1->progs.tc_handler);
if (!ASSERT_ERR(err, "link_upd_invalid"))
goto cleanup;
err = bpf_link__detach(link);
if (CHECK(err, "link_detach", "failed %d\n", err))
if (!ASSERT_OK(err, "link_detach"))
goto cleanup;
memset(&link_info, 0, sizeof(link_info));
err = bpf_obj_get_info_by_fd(bpf_link__fd(link), &link_info, &link_info_len);
if (CHECK(err, "link_info", "failed: %d\n", err))
goto cleanup;
CHECK(link_info.prog_id != id1, "link_prog_id",
"got %u != exp %u\n", link_info.prog_id, id1);
ASSERT_OK(err, "link_info");
ASSERT_EQ(link_info.prog_id, id1, "link_prog_id");
/* ifindex should be zeroed out */
CHECK(link_info.xdp.ifindex != 0, "link_ifindex",
"got %u != exp %u\n", link_info.xdp.ifindex, 0);
ASSERT_EQ(link_info.xdp.ifindex, 0, "link_ifindex");
cleanup:
test_xdp_link__destroy(skel1);

View File

@ -0,0 +1,32 @@
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
extern const int bpf_prog_active __ksym;
struct {
__uint(type, BPF_MAP_TYPE_RINGBUF);
__uint(max_entries, 1 << 12);
} ringbuf SEC(".maps");
SEC("fentry/security_inode_getattr")
int BPF_PROG(d_path_check_rdonly_mem, struct path *path, struct kstat *stat,
__u32 request_mask, unsigned int query_flags)
{
void *active;
u32 cpu;
cpu = bpf_get_smp_processor_id();
active = (void *)bpf_per_cpu_ptr(&bpf_prog_active, cpu);
if (active) {
/* FAIL here! 'active' points to 'regular' memory. It
* cannot be submitted to ring buffer.
*/
bpf_ringbuf_submit(active, 0);
}
return 0;
}
char _license[] SEC("license") = "GPL";

View File

@ -10,3 +10,9 @@ int xdp_handler(struct xdp_md *xdp)
{
return 0;
}
SEC("tc")
int tc_handler(struct __sk_buff *skb)
{
return 0;
}

View File

@ -0,0 +1,95 @@
{
"ringbuf: invalid reservation offset 1",
.insns = {
/* reserve 8 byte ringbuf memory */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
/* store a pointer to the reserved memory in R6 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* check whether the reservation was successful */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
/* spill R6(mem) into the stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
/* fill it back in R7 */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
/* should be able to access *(R7) = 0 */
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
/* submit the reserved ringbuf memory */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
/* add invalid offset to reserved ringbuf memory */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xcafe),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_ringbuf = { 1 },
.result = REJECT,
.errstr = "dereference of modified alloc_mem ptr R1",
},
{
"ringbuf: invalid reservation offset 2",
.insns = {
/* reserve 8 byte ringbuf memory */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
/* store a pointer to the reserved memory in R6 */
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
/* check whether the reservation was successful */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
/* spill R6(mem) into the stack */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_6, -8),
/* fill it back in R7 */
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_10, -8),
/* add invalid offset to reserved ringbuf memory */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 0xcafe),
/* should be able to access *(R7) = 0 */
BPF_ST_MEM(BPF_DW, BPF_REG_7, 0, 0),
/* submit the reserved ringbuf memory */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_ringbuf = { 1 },
.result = REJECT,
.errstr = "R7 min value is outside of the allowed memory range",
},
{
"ringbuf: check passing rb mem to helpers",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
/* reserve 8 byte ringbuf memory */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 8),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
/* check whether the reservation was successful */
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
/* pass allocated ring buffer memory to fib lookup */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 8),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_fib_lookup),
/* submit the ringbuf memory */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_ringbuf = { 2 },
.prog_type = BPF_PROG_TYPE_XDP,
.result = ACCEPT,
},

View File

@ -84,7 +84,7 @@
},
.fixup_map_ringbuf = { 1 },
.result = REJECT,
.errstr = "R0 pointer arithmetic on mem_or_null prohibited",
.errstr = "R0 pointer arithmetic on alloc_mem_or_null prohibited",
},
{
"check corrupted spill/fill",