mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-18 11:54:37 +08:00
Daniel Borkmann says: ==================== pull-request: bpf 2021-12-16 We've added 15 non-merge commits during the last 7 day(s) which contain a total of 12 files changed, 434 insertions(+), 30 deletions(-). The main changes are: 1) Fix incorrect verifier state pruning behavior for <8B register spill/fill, from Paul Chaignon. 2) Fix x86-64 JIT's extable handling for fentry/fexit when return pointer is an ERR_PTR(), from Alexei Starovoitov. 3) Fix 3 different possibilities that BPF verifier missed where unprivileged could leak kernel addresses, from Daniel Borkmann. 4) Fix xsk's poll behavior under need_wakeup flag, from Magnus Karlsson. 5) Fix an oob-write in test_verifier due to a missed MAX_NR_MAPS bump, from Kumar Kartikeya Dwivedi. 6) Fix a race in test_btf_skc_cls_ingress selftest, from Martin KaFai Lau. * https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf: bpf, selftests: Fix racing issue in btf_skc_cls_ingress test selftest/bpf: Add a test that reads various addresses. bpf: Fix extable address check. bpf: Fix extable fixup offset. bpf, selftests: Add test case trying to taint map value pointer bpf: Make 32->64 bounds propagation slightly more robust bpf: Fix signed bounds propagation after mov32 bpf, selftests: Update test case for atomic cmpxchg on r0 with pointer bpf: Fix kernel address leakage in atomic cmpxchg's r0 aux reg bpf, selftests: Add test case for atomic fetch on spilled pointer bpf: Fix kernel address leakage in atomic fetch selftests/bpf: Fix OOB write in test_verifier xsk: Do not sleep in poll() when need_wakeup set selftests/bpf: Tests for state pruning with u32 spill/fill bpf: Fix incorrect state pruning for <8B spill/fill ==================== Link: https://lore.kernel.org/r/20211216210005.13815-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
0c3e247460
@ -1252,19 +1252,54 @@ st: if (is_imm8(insn->off))
|
||||
case BPF_LDX | BPF_MEM | BPF_DW:
|
||||
case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
|
||||
/* test src_reg, src_reg */
|
||||
maybe_emit_mod(&prog, src_reg, src_reg, true); /* always 1 byte */
|
||||
EMIT2(0x85, add_2reg(0xC0, src_reg, src_reg));
|
||||
/* jne start_of_ldx */
|
||||
EMIT2(X86_JNE, 0);
|
||||
/* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
|
||||
* add abs(insn->off) to the limit to make sure that negative
|
||||
* offset won't be an issue.
|
||||
* insn->off is s16, so it won't affect valid pointers.
|
||||
*/
|
||||
u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
|
||||
u8 *end_of_jmp1, *end_of_jmp2;
|
||||
|
||||
/* Conservatively check that src_reg + insn->off is a kernel address:
|
||||
* 1. src_reg + insn->off >= limit
|
||||
* 2. src_reg + insn->off doesn't become small positive.
|
||||
* Cannot do src_reg + insn->off >= limit in one branch,
|
||||
* since it needs two spare registers, but JIT has only one.
|
||||
*/
|
||||
|
||||
/* movabsq r11, limit */
|
||||
EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
|
||||
EMIT((u32)limit, 4);
|
||||
EMIT(limit >> 32, 4);
|
||||
/* cmp src_reg, r11 */
|
||||
maybe_emit_mod(&prog, src_reg, AUX_REG, true);
|
||||
EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
|
||||
/* if unsigned '<' goto end_of_jmp2 */
|
||||
EMIT2(X86_JB, 0);
|
||||
end_of_jmp1 = prog;
|
||||
|
||||
/* mov r11, src_reg */
|
||||
emit_mov_reg(&prog, true, AUX_REG, src_reg);
|
||||
/* add r11, insn->off */
|
||||
maybe_emit_1mod(&prog, AUX_REG, true);
|
||||
EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
|
||||
/* jmp if not carry to start_of_ldx
|
||||
* Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
|
||||
* that has to be rejected.
|
||||
*/
|
||||
EMIT2(0x73 /* JNC */, 0);
|
||||
end_of_jmp2 = prog;
|
||||
|
||||
/* xor dst_reg, dst_reg */
|
||||
emit_mov_imm32(&prog, false, dst_reg, 0);
|
||||
/* jmp byte_after_ldx */
|
||||
EMIT2(0xEB, 0);
|
||||
|
||||
/* populate jmp_offset for JNE above */
|
||||
temp[4] = prog - temp - 5 /* sizeof(test + jne) */;
|
||||
/* populate jmp_offset for JB above to jump to xor dst_reg */
|
||||
end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
|
||||
/* populate jmp_offset for JNC above to jump to start_of_ldx */
|
||||
start_of_ldx = prog;
|
||||
end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
|
||||
}
|
||||
emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
|
||||
if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
|
||||
@ -1305,7 +1340,7 @@ st: if (is_imm8(insn->off))
|
||||
* End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
|
||||
* of 4 bytes will be ignored and rbx will be zero inited.
|
||||
*/
|
||||
ex->fixup = (prog - temp) | (reg2pt_regs[dst_reg] << 8);
|
||||
ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1366,22 +1366,28 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
|
||||
reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off);
|
||||
}
|
||||
|
||||
static bool __reg32_bound_s64(s32 a)
|
||||
{
|
||||
return a >= 0 && a <= S32_MAX;
|
||||
}
|
||||
|
||||
static void __reg_assign_32_into_64(struct bpf_reg_state *reg)
|
||||
{
|
||||
reg->umin_value = reg->u32_min_value;
|
||||
reg->umax_value = reg->u32_max_value;
|
||||
/* Attempt to pull 32-bit signed bounds into 64-bit bounds
|
||||
* but must be positive otherwise set to worse case bounds
|
||||
* and refine later from tnum.
|
||||
|
||||
/* Attempt to pull 32-bit signed bounds into 64-bit bounds but must
|
||||
* be positive otherwise set to worse case bounds and refine later
|
||||
* from tnum.
|
||||
*/
|
||||
if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0)
|
||||
reg->smax_value = reg->s32_max_value;
|
||||
else
|
||||
reg->smax_value = U32_MAX;
|
||||
if (reg->s32_min_value >= 0)
|
||||
if (__reg32_bound_s64(reg->s32_min_value) &&
|
||||
__reg32_bound_s64(reg->s32_max_value)) {
|
||||
reg->smin_value = reg->s32_min_value;
|
||||
else
|
||||
reg->smax_value = reg->s32_max_value;
|
||||
} else {
|
||||
reg->smin_value = 0;
|
||||
reg->smax_value = U32_MAX;
|
||||
}
|
||||
}
|
||||
|
||||
static void __reg_combine_32_into_64(struct bpf_reg_state *reg)
|
||||
@ -2379,8 +2385,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
*/
|
||||
if (insn->src_reg != BPF_REG_FP)
|
||||
return 0;
|
||||
if (BPF_SIZE(insn->code) != BPF_DW)
|
||||
return 0;
|
||||
|
||||
/* dreg = *(u64 *)[fp - off] was a fill from the stack.
|
||||
* that [fp - off] slot contains scalar that needs to be
|
||||
@ -2403,8 +2407,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||
/* scalars can only be spilled into stack */
|
||||
if (insn->dst_reg != BPF_REG_FP)
|
||||
return 0;
|
||||
if (BPF_SIZE(insn->code) != BPF_DW)
|
||||
return 0;
|
||||
spi = (-insn->off - 1) / BPF_REG_SIZE;
|
||||
if (spi >= 64) {
|
||||
verbose(env, "BUG spi %d\n", spi);
|
||||
@ -4551,9 +4553,16 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
|
||||
if (insn->imm == BPF_CMPXCHG) {
|
||||
/* Check comparison of R0 with memory location */
|
||||
err = check_reg_arg(env, BPF_REG_0, SRC_OP);
|
||||
const u32 aux_reg = BPF_REG_0;
|
||||
|
||||
err = check_reg_arg(env, aux_reg, SRC_OP);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (is_pointer_value(env, aux_reg)) {
|
||||
verbose(env, "R%d leaks addr into mem\n", aux_reg);
|
||||
return -EACCES;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_pointer_value(env, insn->src_reg)) {
|
||||
@ -4588,13 +4597,19 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
|
||||
load_reg = -1;
|
||||
}
|
||||
|
||||
/* check whether we can read the memory */
|
||||
/* Check whether we can read the memory, with second call for fetch
|
||||
* case to simulate the register fill.
|
||||
*/
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, load_reg, true);
|
||||
BPF_SIZE(insn->code), BPF_READ, -1, true);
|
||||
if (!err && load_reg >= 0)
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, load_reg,
|
||||
true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* check whether we can write into the same memory */
|
||||
/* Check whether we can write into the same memory. */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
if (err)
|
||||
@ -8308,6 +8323,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
insn->dst_reg);
|
||||
}
|
||||
zext_32_to_64(dst_reg);
|
||||
|
||||
__update_reg_bounds(dst_reg);
|
||||
__reg_deduce_bounds(dst_reg);
|
||||
__reg_bound_offset(dst_reg);
|
||||
}
|
||||
} else {
|
||||
/* case: R = imm
|
||||
|
@ -677,8 +677,6 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
struct xdp_sock *xs = xdp_sk(sk);
|
||||
struct xsk_buff_pool *pool;
|
||||
|
||||
sock_poll_wait(file, sock, wait);
|
||||
|
||||
if (unlikely(!xsk_is_bound(xs)))
|
||||
return mask;
|
||||
|
||||
@ -690,6 +688,8 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
|
||||
else
|
||||
/* Poll needs to drive Tx also in copy mode */
|
||||
__xsk_sendmsg(sk);
|
||||
} else {
|
||||
sock_poll_wait(file, sock, wait);
|
||||
}
|
||||
|
||||
if (xs->rx && !xskq_prod_is_empty(xs->rx))
|
||||
|
@ -33,6 +33,22 @@ noinline int bpf_testmod_loop_test(int n)
|
||||
return sum;
|
||||
}
|
||||
|
||||
__weak noinline struct file *bpf_testmod_return_ptr(int arg)
|
||||
{
|
||||
static struct file f = {};
|
||||
|
||||
switch (arg) {
|
||||
case 1: return (void *)EINVAL; /* user addr */
|
||||
case 2: return (void *)0xcafe4a11; /* user addr */
|
||||
case 3: return (void *)-EINVAL; /* canonical, but invalid */
|
||||
case 4: return (void *)(1ull << 60); /* non-canonical and invalid */
|
||||
case 5: return (void *)~(1ull << 30); /* trigger extable */
|
||||
case 6: return &f; /* valid addr */
|
||||
case 7: return (void *)((long)&f | 1); /* kernel tricks */
|
||||
default: return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
noinline ssize_t
|
||||
bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr,
|
||||
@ -43,6 +59,10 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
|
||||
.off = off,
|
||||
.len = len,
|
||||
};
|
||||
int i = 1;
|
||||
|
||||
while (bpf_testmod_return_ptr(i))
|
||||
i++;
|
||||
|
||||
/* This is always true. Use the check to make sure the compiler
|
||||
* doesn't remove bpf_testmod_loop_test.
|
||||
|
@ -90,7 +90,7 @@ static void print_err_line(void)
|
||||
|
||||
static void test_conn(void)
|
||||
{
|
||||
int listen_fd = -1, cli_fd = -1, err;
|
||||
int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
|
||||
socklen_t addrlen = sizeof(srv_sa6);
|
||||
int srv_port;
|
||||
|
||||
@ -112,6 +112,10 @@ static void test_conn(void)
|
||||
if (CHECK_FAIL(cli_fd == -1))
|
||||
goto done;
|
||||
|
||||
srv_fd = accept(listen_fd, NULL, NULL);
|
||||
if (CHECK_FAIL(srv_fd == -1))
|
||||
goto done;
|
||||
|
||||
if (CHECK(skel->bss->listen_tp_sport != srv_port ||
|
||||
skel->bss->req_sk_sport != srv_port,
|
||||
"Unexpected sk src port",
|
||||
@ -134,11 +138,13 @@ done:
|
||||
close(listen_fd);
|
||||
if (cli_fd != -1)
|
||||
close(cli_fd);
|
||||
if (srv_fd != -1)
|
||||
close(srv_fd);
|
||||
}
|
||||
|
||||
static void test_syncookie(void)
|
||||
{
|
||||
int listen_fd = -1, cli_fd = -1, err;
|
||||
int listen_fd = -1, cli_fd = -1, srv_fd = -1, err;
|
||||
socklen_t addrlen = sizeof(srv_sa6);
|
||||
int srv_port;
|
||||
|
||||
@ -161,6 +167,10 @@ static void test_syncookie(void)
|
||||
if (CHECK_FAIL(cli_fd == -1))
|
||||
goto done;
|
||||
|
||||
srv_fd = accept(listen_fd, NULL, NULL);
|
||||
if (CHECK_FAIL(srv_fd == -1))
|
||||
goto done;
|
||||
|
||||
if (CHECK(skel->bss->listen_tp_sport != srv_port,
|
||||
"Unexpected tp src port",
|
||||
"listen_tp_sport:%u expected:%u\n",
|
||||
@ -188,6 +198,8 @@ done:
|
||||
close(listen_fd);
|
||||
if (cli_fd != -1)
|
||||
close(cli_fd);
|
||||
if (srv_fd != -1)
|
||||
close(srv_fd);
|
||||
}
|
||||
|
||||
struct test {
|
||||
|
@ -87,6 +87,18 @@ int BPF_PROG(handle_fexit,
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("fexit/bpf_testmod_return_ptr")
|
||||
int BPF_PROG(handle_fexit_ret, int arg, struct file *ret)
|
||||
{
|
||||
long buf = 0;
|
||||
|
||||
bpf_probe_read_kernel(&buf, 8, ret);
|
||||
bpf_probe_read_kernel(&buf, 8, (char *)ret + 256);
|
||||
*(volatile long long *)ret;
|
||||
*(volatile int *)&ret->f_mode;
|
||||
return 0;
|
||||
}
|
||||
|
||||
__u32 fmod_ret_read_sz = 0;
|
||||
|
||||
SEC("fmod_ret/bpf_testmod_test_read")
|
||||
|
@ -54,7 +54,7 @@
|
||||
#define MAX_INSNS BPF_MAXINSNS
|
||||
#define MAX_TEST_INSNS 1000000
|
||||
#define MAX_FIXUPS 8
|
||||
#define MAX_NR_MAPS 21
|
||||
#define MAX_NR_MAPS 22
|
||||
#define MAX_TEST_RUNS 8
|
||||
#define POINTER_VALUE 0xcafe4all
|
||||
#define TEST_DATA_LEN 64
|
||||
|
@ -138,6 +138,8 @@
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr into mem",
|
||||
},
|
||||
{
|
||||
"Dest pointer in r0 - succeed",
|
||||
@ -156,4 +158,88 @@
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr into mem",
|
||||
},
|
||||
{
|
||||
"Dest pointer in r0 - succeed, check 2",
|
||||
.insns = {
|
||||
/* r0 = &val */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
|
||||
/* val = r0; */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* r5 = &val */
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
|
||||
/* r0 = atomic_cmpxchg(&val, r0, r5); */
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
|
||||
/* r1 = *r0 */
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
|
||||
/* exit(0); */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R0 leaks addr into mem",
|
||||
},
|
||||
{
|
||||
"Dest pointer in r0 - succeed, check 3",
|
||||
.insns = {
|
||||
/* r0 = &val */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
|
||||
/* val = r0; */
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* r5 = &val */
|
||||
BPF_MOV64_REG(BPF_REG_5, BPF_REG_10),
|
||||
/* r0 = atomic_cmpxchg(&val, r0, r5); */
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
|
||||
/* exit(0); */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid size of register fill",
|
||||
.errstr_unpriv = "R0 leaks addr into mem",
|
||||
},
|
||||
{
|
||||
"Dest pointer in r0 - succeed, check 4",
|
||||
.insns = {
|
||||
/* r0 = &val */
|
||||
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
|
||||
/* val = r0; */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* r5 = &val */
|
||||
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
|
||||
/* r0 = atomic_cmpxchg(&val, r0, r5); */
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
|
||||
/* r1 = *r10 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -8),
|
||||
/* exit(0); */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "R10 partial copy of pointer",
|
||||
},
|
||||
{
|
||||
"Dest pointer in r0 - succeed, check 5",
|
||||
.insns = {
|
||||
/* r0 = &val */
|
||||
BPF_MOV32_REG(BPF_REG_0, BPF_REG_10),
|
||||
/* val = r0; */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
|
||||
/* r5 = &val */
|
||||
BPF_MOV32_REG(BPF_REG_5, BPF_REG_10),
|
||||
/* r0 = atomic_cmpxchg(&val, r0, r5); */
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_CMPXCHG, BPF_REG_10, BPF_REG_5, -8),
|
||||
/* r1 = *r0 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -8),
|
||||
/* exit(0); */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "R0 invalid mem access",
|
||||
.errstr_unpriv = "R10 partial copy of pointer",
|
||||
},
|
||||
|
@ -1,3 +1,97 @@
|
||||
{
|
||||
"atomic dw/fetch and address leakage of (map ptr & -1) via stack slot",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, -1),
|
||||
BPF_LD_MAP_FD(BPF_REG_8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_9, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 2, 4 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "leaking pointer from stack off -8",
|
||||
},
|
||||
{
|
||||
"atomic dw/fetch and address leakage of (map ptr & -1) via returned value",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, -1),
|
||||
BPF_LD_MAP_FD(BPF_REG_8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_9, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
|
||||
BPF_ATOMIC_OP(BPF_DW, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 2, 4 },
|
||||
.result = ACCEPT,
|
||||
.result_unpriv = REJECT,
|
||||
.errstr_unpriv = "leaking pointer from stack off -8",
|
||||
},
|
||||
{
|
||||
"atomic w/fetch and address leakage of (map ptr & -1) via stack slot",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, -1),
|
||||
BPF_LD_MAP_FD(BPF_REG_8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_9, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_2, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 2, 4 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid size of register fill",
|
||||
},
|
||||
{
|
||||
"atomic w/fetch and address leakage of (map ptr & -1) via returned value",
|
||||
.insns = {
|
||||
BPF_LD_IMM64(BPF_REG_1, -1),
|
||||
BPF_LD_MAP_FD(BPF_REG_8, 0),
|
||||
BPF_LD_MAP_FD(BPF_REG_9, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_9, 0),
|
||||
BPF_ATOMIC_OP(BPF_W, BPF_AND | BPF_FETCH, BPF_REG_2, BPF_REG_1, 0),
|
||||
BPF_MOV64_REG(BPF_REG_9, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
|
||||
BPF_MOV64_REG(BPF_REG_1, BPF_REG_8),
|
||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_9, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 2, 4 },
|
||||
.result = REJECT,
|
||||
.errstr = "invalid size of register fill",
|
||||
},
|
||||
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
|
||||
{ \
|
||||
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
|
||||
|
@ -132,6 +132,77 @@
|
||||
.result = REJECT,
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"precision tracking for u32 spill/fill",
|
||||
.insns = {
|
||||
BPF_MOV64_REG(BPF_REG_7, BPF_REG_1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV32_IMM(BPF_REG_6, 32),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_MOV32_IMM(BPF_REG_6, 4),
|
||||
/* Additional insns to introduce a pruning point. */
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
/* u32 spill/fill */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -8),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_10, -8),
|
||||
/* out-of-bound map value access for r6=32 */
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_hash_8b = { 15 },
|
||||
.result = REJECT,
|
||||
.errstr = "R0 min value is outside of the allowed memory range",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"precision tracking for u32 spills, u64 fill",
|
||||
.insns = {
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
|
||||
BPF_MOV32_IMM(BPF_REG_7, 0xffffffff),
|
||||
/* Additional insns to introduce a pruning point. */
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_EMIT_CALL(BPF_FUNC_get_prandom_u32),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
|
||||
/* u32 spills, u64 fill */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_8, BPF_REG_10, -8),
|
||||
/* if r8 != X goto pc+1 r8 known in fallthrough branch */
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_8, 0xffffffff, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 1),
|
||||
/* if r8 == X goto pc+1 condition always true on first
|
||||
* traversal, so starts backtracking to mark r8 as requiring
|
||||
* precision. r7 marked as needing precision. r6 not marked
|
||||
* since it's not tracked.
|
||||
*/
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 0xffffffff, 1),
|
||||
/* fails if r8 correctly marked unknown after fill. */
|
||||
BPF_ALU32_IMM(BPF_DIV, BPF_REG_3, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "div by zero",
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
},
|
||||
{
|
||||
"allocated_stack",
|
||||
.insns = {
|
||||
|
@ -175,6 +175,38 @@
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill u32 const scalars. Refill as u64. Offset to skb->data",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data)),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, data_end)),
|
||||
/* r6 = 0 */
|
||||
BPF_MOV32_IMM(BPF_REG_6, 0),
|
||||
/* r7 = 20 */
|
||||
BPF_MOV32_IMM(BPF_REG_7, 20),
|
||||
/* *(u32 *)(r10 -4) = r6 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_6, -4),
|
||||
/* *(u32 *)(r10 -8) = r7 */
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, -8),
|
||||
/* r4 = *(u64 *)(r10 -8) */
|
||||
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
|
||||
/* r0 = r2 */
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
|
||||
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
|
||||
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
|
||||
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.result = REJECT,
|
||||
.errstr = "invalid access to packet",
|
||||
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
|
||||
},
|
||||
{
|
||||
"Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data",
|
||||
.insns = {
|
||||
|
@ -1077,6 +1077,29 @@
|
||||
.errstr = "R0 invalid mem access 'inv'",
|
||||
.errstr_unpriv = "R0 pointer -= pointer prohibited",
|
||||
},
|
||||
{
|
||||
"map access: trying to leak tained dst reg",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
||||
BPF_MOV32_IMM(BPF_REG_1, 0xFFFFFFFF),
|
||||
BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_2, BPF_REG_1),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map_array_48b = { 4 },
|
||||
.result = REJECT,
|
||||
.errstr = "math between map_value pointer and 4294967295 is not allowed",
|
||||
},
|
||||
{
|
||||
"32bit pkt_ptr -= scalar",
|
||||
.insns = {
|
||||
|
Loading…
Reference in New Issue
Block a user