Merge branch 'bpf-log-improvements'

Andrii Nakryiko says:

====================
This patch set fixes ambiguity in BPF verifier log output of SCALAR register
in the parts that emit umin/umax, smin/smax, etc ranges. See patch #4 for
details.

Also, patch #5 fixes an issue with verifier log missing instruction context
(state) output for conditionals that trigger precision marking. See details in
the patch.

First two patches are just improvements to two selftests that are very flaky
locally when run in parallel mode.

Patch #3 changes 'align' selftest to be less strict about exact verifier log
output (which patch #4 changes, breaking lots of align tests as written). Now
test does more of a register substate checks, mostly around expected var_off()
values. This 'align' selftests is one of the more brittle ones and requires
constant adjustment when verifier log output changes, without really catching
any new issues. So hopefully these changes can minimize future support efforts
for this specific set of tests.
====================

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Daniel Borkmann 2023-10-16 13:49:18 +02:00
commit 99c9991f4e
8 changed files with 200 additions and 157 deletions

View File

@ -1342,6 +1342,50 @@ static void scrub_spilled_slot(u8 *stype)
*stype = STACK_MISC;
}
static void print_scalar_ranges(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
const char **sep)
{
struct {
const char *name;
u64 val;
bool omit;
} minmaxs[] = {
{"smin", reg->smin_value, reg->smin_value == S64_MIN},
{"smax", reg->smax_value, reg->smax_value == S64_MAX},
{"umin", reg->umin_value, reg->umin_value == 0},
{"umax", reg->umax_value, reg->umax_value == U64_MAX},
{"smin32", (s64)reg->s32_min_value, reg->s32_min_value == S32_MIN},
{"smax32", (s64)reg->s32_max_value, reg->s32_max_value == S32_MAX},
{"umin32", reg->u32_min_value, reg->u32_min_value == 0},
{"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
}, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
bool neg1, neg2;
for (m1 = &minmaxs[0]; m1 < mend; m1++) {
if (m1->omit)
continue;
neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
verbose(env, "%s%s=", *sep, m1->name);
*sep = ",";
for (m2 = m1 + 2; m2 < mend; m2 += 2) {
if (m2->omit || m2->val != m1->val)
continue;
/* don't mix negatives with positives */
neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
if (neg2 != neg1)
continue;
m2->omit = true;
verbose(env, "%s=", m2->name);
}
verbose(env, m1->name[0] == 's' ? "%lld" : "%llu", m1->val);
}
}
static void print_verifier_state(struct bpf_verifier_env *env,
const struct bpf_func_state *state,
bool print_all)
@ -1405,34 +1449,13 @@ static void print_verifier_state(struct bpf_verifier_env *env,
*/
verbose_a("imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose_a("smin=%lld", (long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose_a("smax=%lld", (long long)reg->smax_value);
if (reg->umin_value != 0)
verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
print_scalar_ranges(env, reg, &sep);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose_a("var_off=%s", tn_buf);
}
if (reg->s32_min_value != reg->smin_value &&
reg->s32_min_value != S32_MIN)
verbose_a("s32_min=%d", (int)(reg->s32_min_value));
if (reg->s32_max_value != reg->smax_value &&
reg->s32_max_value != S32_MAX)
verbose_a("s32_max=%d", (int)(reg->s32_max_value));
if (reg->u32_min_value != reg->umin_value &&
reg->u32_min_value != U32_MIN)
verbose_a("u32_min=%d", (int)(reg->u32_min_value));
if (reg->u32_max_value != reg->umax_value &&
reg->u32_max_value != U32_MAX)
verbose_a("u32_max=%d", (int)(reg->u32_max_value));
}
#undef verbose_a
@ -1516,7 +1539,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (state->in_async_callback_fn)
verbose(env, " async_cb");
verbose(env, "\n");
mark_verifier_state_clean(env);
if (!print_all)
mark_verifier_state_clean(env);
}
static inline u32 vlog_alignment(u32 pos)
@ -14385,6 +14409,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
!sanitize_speculative_path(env, insn, *insn_idx + 1,
*insn_idx))
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch->frame[this_branch->curframe]);
*insn_idx += insn->off;
return 0;
} else if (pred == 0) {
@ -14397,6 +14423,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
*insn_idx + insn->off + 1,
*insn_idx))
return -EFAULT;
if (env->log.level & BPF_LOG_LEVEL)
print_insn_state(env, this_branch->frame[this_branch->curframe]);
return 0;
}

View File

@ -6,6 +6,7 @@
struct bpf_reg_match {
unsigned int line;
const char *reg;
const char *match;
};
@ -39,13 +40,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=2"},
{1, "R3_w=4"},
{2, "R3_w=8"},
{3, "R3_w=16"},
{4, "R3_w=32"},
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R10", "fp0"},
{0, "R3_w", "2"},
{1, "R3_w", "4"},
{2, "R3_w", "8"},
{3, "R3_w", "16"},
{4, "R3_w", "32"},
},
},
{
@ -67,19 +68,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=1"},
{1, "R3_w=2"},
{2, "R3_w=4"},
{3, "R3_w=8"},
{4, "R3_w=16"},
{5, "R3_w=1"},
{6, "R4_w=32"},
{7, "R4_w=16"},
{8, "R4_w=8"},
{9, "R4_w=4"},
{10, "R4_w=2"},
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R10", "fp0"},
{0, "R3_w", "1"},
{1, "R3_w", "2"},
{2, "R3_w", "4"},
{3, "R3_w", "8"},
{4, "R3_w", "16"},
{5, "R3_w", "1"},
{6, "R4_w", "32"},
{7, "R4_w", "16"},
{8, "R4_w", "8"},
{9, "R4_w", "4"},
{10, "R4_w", "2"},
},
},
{
@ -96,14 +97,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=4"},
{1, "R3_w=8"},
{2, "R3_w=10"},
{3, "R4_w=8"},
{4, "R4_w=12"},
{5, "R4_w=14"},
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R10", "fp0"},
{0, "R3_w", "4"},
{1, "R3_w", "8"},
{2, "R3_w", "10"},
{3, "R4_w", "8"},
{4, "R4_w", "12"},
{5, "R4_w", "14"},
},
},
{
@ -118,12 +119,12 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=7"},
{1, "R3_w=7"},
{2, "R3_w=14"},
{3, "R3_w=56"},
{0, "R1", "ctx(off=0,imm=0)"},
{0, "R10", "fp0"},
{0, "R3_w", "7"},
{1, "R3_w", "7"},
{2, "R3_w", "14"},
{3, "R3_w", "56"},
},
},
@ -161,19 +162,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R0_w=pkt(off=8,r=8,imm=0)"},
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{12, "R3_w=pkt_end(off=0,imm=0)"},
{17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
{19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{6, "R0_w", "pkt(off=8,r=8,imm=0)"},
{6, "R3_w", "var_off=(0x0; 0xff)"},
{7, "R3_w", "var_off=(0x0; 0x1fe)"},
{8, "R3_w", "var_off=(0x0; 0x3fc)"},
{9, "R3_w", "var_off=(0x0; 0x7f8)"},
{10, "R3_w", "var_off=(0x0; 0xff0)"},
{12, "R3_w", "pkt_end(off=0,imm=0)"},
{17, "R4_w", "var_off=(0x0; 0xff)"},
{18, "R4_w", "var_off=(0x0; 0x1fe0)"},
{19, "R4_w", "var_off=(0x0; 0xff0)"},
{20, "R4_w", "var_off=(0x0; 0x7f8)"},
{21, "R4_w", "var_off=(0x0; 0x3fc)"},
{22, "R4_w", "var_off=(0x0; 0x1fe)"},
},
},
{
@ -194,16 +195,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{6, "R3_w", "var_off=(0x0; 0xff)"},
{7, "R4_w", "var_off=(0x0; 0xff)"},
{8, "R4_w", "var_off=(0x0; 0xff)"},
{9, "R4_w", "var_off=(0x0; 0xff)"},
{10, "R4_w", "var_off=(0x0; 0x1fe)"},
{11, "R4_w", "var_off=(0x0; 0xff)"},
{12, "R4_w", "var_off=(0x0; 0x3fc)"},
{13, "R4_w", "var_off=(0x0; 0xff)"},
{14, "R4_w", "var_off=(0x0; 0x7f8)"},
{15, "R4_w", "var_off=(0x0; 0xff0)"},
},
},
{
@ -234,14 +235,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{2, "R5_w=pkt(off=0,r=0,imm=0)"},
{4, "R5_w=pkt(off=14,r=0,imm=0)"},
{5, "R4_w=pkt(off=14,r=0,imm=0)"},
{9, "R2=pkt(off=0,r=18,imm=0)"},
{10, "R5=pkt(off=14,r=18,imm=0)"},
{10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
{14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
{2, "R5_w", "pkt(off=0,r=0,imm=0)"},
{4, "R5_w", "pkt(off=14,r=0,imm=0)"},
{5, "R4_w", "pkt(off=14,r=0,imm=0)"},
{9, "R2", "pkt(off=0,r=18,imm=0)"},
{10, "R5", "pkt(off=14,r=18,imm=0)"},
{10, "R4_w", "var_off=(0x0; 0xff)"},
{13, "R4_w", "var_off=(0x0; 0xffff)"},
{14, "R4_w", "var_off=(0x0; 0xffff)"},
},
},
{
@ -298,20 +299,20 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
{11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{11, "R5_w", "pkt(id=1,off=14,"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R4", "var_off=(0x0; 0x3fc)"},
{15, "R5", "var_off=(0x0; 0x3fc)"},
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4. To avoid BPF
* verifier's precision backtracking logging
@ -319,46 +320,46 @@ static struct bpf_align_test tests[] = {
* instruction to validate R5 state. We also check
* that R4 is what it should be in such case.
*/
{18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{18, "R4_w", "var_off=(0x0; 0x3fc)"},
{18, "R5_w", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
{19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{19, "R5_w", "pkt(id=2,off=14,"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
{24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{24, "R4", "var_off=(0x0; 0x3fc)"},
{24, "R5", "var_off=(0x0; 0x3fc)"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
{26, "R5_w=pkt(off=14,r=8"},
{26, "R5_w", "pkt(off=14,r=8,"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n). See comment for insn #18
* for R4 = R5 trick.
*/
{28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{28, "R4_w", "var_off=(0x0; 0x3fc)"},
{28, "R5_w", "var_off=(0x0; 0x3fc)"},
/* Constant is added to R5 again, setting reg->off to 18. */
{29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
{29, "R5_w", "pkt(id=3,off=18,"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
{31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
{31, "R4_w", "var_off=(0x0; 0x7fc)"},
{31, "R5_w", "var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
{35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
{35, "R4", "var_off=(0x0; 0x7fc)"},
{35, "R5", "var_off=(0x0; 0x7fc)"},
},
},
{
@ -396,36 +397,36 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{7, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
{8, "R6_w", "var_off=(0x2; 0x7fc)"},
/* Packet pointer has (4n+2) offset */
{11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
{12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
{11, "R5_w", "var_off=(0x2; 0x7fc)"},
{12, "R4", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
{15, "R5", "var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
{17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{17, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
{19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
{20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
{19, "R5_w", "var_off=(0x2; 0xffc)"},
{20, "R4", "var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
{23, "R5", "var_off=(0x2; 0xffc)"},
},
},
{
@ -458,18 +459,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
{3, "R5_w=pkt_end(off=0,imm=0)"},
{3, "R5_w", "pkt_end(off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
{5, "R5_w", "var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
{6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
{6, "R5_w", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
{9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{9, "R5", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
{11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{11, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
{12, "R4_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@ -477,7 +478,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
{15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{15, "R6_w", "var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@ -512,24 +513,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{8, "R6_w", "var_off=(0x0; 0x3fc)"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
{9, "R6_w", "var_off=(0x2; 0x7fc)"},
/* New unknown value in R7 is (4n) */
{10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{10, "R7_w", "var_off=(0x0; 0x3fc)"},
/* Subtracting it from R6 blows our unsigned bounds */
{11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
{11, "R6", "var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
{14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
{14, "R6", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
{20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
{
@ -566,23 +566,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
{6, "R2_w", "pkt(off=0,r=8,imm=0)"},
{9, "R6_w", "var_off=(0x0; 0x3c)"},
/* Adding 14 makes R6 be (4n+2) */
{10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
{10, "R6_w", "var_off=(0x2; 0x7c)"},
/* Subtracting from packet pointer overflows ubounds */
{13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
{13, "R5_w", "var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
{14, "R7_w", "var_off=(0x0; 0x7fc)"},
/* Adding it to packet pointer gives nice bounds again */
{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
{16, "R5_w", "var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0x7fc)"},
{20, "R5", "var_off=(0x2; 0x7fc)"},
},
},
};
@ -635,6 +635,7 @@ static int do_test_single(struct bpf_align_test *test)
line_ptr = strtok(bpf_vlog_copy, "\n");
for (i = 0; i < MAX_MATCHES; i++) {
struct bpf_reg_match m = test->matches[i];
const char *p;
int tmp;
if (!m.match)
@ -649,8 +650,8 @@ static int do_test_single(struct bpf_align_test *test)
line_ptr = strtok(NULL, "\n");
}
if (!line_ptr) {
printf("Failed to find line %u for match: %s\n",
m.line, m.match);
printf("Failed to find line %u for match: %s=%s\n",
m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;
@ -667,15 +668,15 @@ static int do_test_single(struct bpf_align_test *test)
* 6: R0_w=pkt(off=8,r=8,imm=0) R1=ctx(off=0,imm=0) R2_w=pkt(off=0,r=8,imm=0) R3_w=pkt_end(off=0,imm=0) R10=fp0
* 6: (71) r3 = *(u8 *)(r2 +0) ; R2_w=pkt(off=0,r=8,imm=0) R3_w=scalar(umax=255,var_off=(0x0; 0xff))
*/
while (!strstr(line_ptr, m.match)) {
while (!(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
cur_line = -1;
line_ptr = strtok(NULL, "\n");
sscanf(line_ptr ?: "", "%u: ", &cur_line);
if (!line_ptr || cur_line != m.line)
break;
}
if (cur_line != m.line || !line_ptr || !strstr(line_ptr, m.match)) {
printf("Failed to find match %u: %s\n", m.line, m.match);
if (cur_line != m.line || !line_ptr || !(p = strstr(line_ptr, m.reg)) || !strstr(p, m.match)) {
printf("Failed to find match %u: %s=%s\n", m.line, m.reg, m.match);
ret = 1;
printf("%s", bpf_vlog);
break;

View File

@ -81,10 +81,10 @@ static void test_missed_kprobe_recursion(void)
ASSERT_EQ(topts.retval, 0, "test_run");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test1)), 0, "test1_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
ASSERT_EQ(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test2)), 1, "test2_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test3)), 1, "test3_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test4)), 1, "test4_recursion_misses");
ASSERT_GE(get_missed_count(bpf_program__fd(skel->progs.test5)), 1, "test5_recursion_misses");
cleanup:
missed_kprobe_recursion__destroy(skel);

View File

@ -19,6 +19,7 @@ static void test_array(void)
bpf_program__set_autoload(skel->progs.test_array_map_3, true);
bpf_program__set_autoload(skel->progs.test_array_map_4, true);
skel->bss->my_pid = getpid();
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
err = percpu_alloc_array__load(skel);
@ -51,6 +52,7 @@ static void test_array_sleepable(void)
bpf_program__set_autoload(skel->progs.test_array_map_10, true);
skel->bss->my_pid = getpid();
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
err = percpu_alloc_array__load(skel);
@ -85,6 +87,7 @@ static void test_cgrp_local_storage(void)
if (!ASSERT_OK_PTR(skel, "percpu_alloc_cgrp_local_storage__open"))
goto close_fd;
skel->bss->my_pid = getpid();
skel->rodata->nr_cpus = libbpf_num_possible_cpus();
err = percpu_alloc_cgrp_local_storage__load(skel);

View File

@ -31,35 +31,35 @@ check_assert(s64, eq, llong_max, LLONG_MAX);
__msg(": R0_w=scalar(smax=2147483646) R10=fp0")
check_assert(s64, lt, pos, INT_MAX);
__msg(": R0_w=scalar(umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=-1,umin=9223372036854775808,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, zero, 0);
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=-2147483649,umin=9223372036854775808,umax=18446744071562067967,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, lt, neg, INT_MIN);
__msg(": R0_w=scalar(smax=2147483647) R10=fp0")
check_assert(s64, le, pos, INT_MAX);
__msg(": R0_w=scalar(smax=0) R10=fp0")
check_assert(s64, le, zero, 0);
__msg(": R0_w=scalar(umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smax=-2147483648,umin=9223372036854775808,umax=18446744071562067968,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
check_assert(s64, le, neg, INT_MIN);
__msg(": R0_w=scalar(umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=2147483648,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, pos, INT_MAX);
__msg(": R0_w=scalar(umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=1,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, gt, zero, 0);
__msg(": R0_w=scalar(smin=-2147483647) R10=fp0")
check_assert(s64, gt, neg, INT_MIN);
__msg(": R0_w=scalar(umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
__msg(": R0_w=scalar(smin=umin=2147483647,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff))")
check_assert(s64, ge, pos, INT_MAX);
__msg(": R0_w=scalar(umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
__msg(": R0_w=scalar(smin=0,umax=9223372036854775807,var_off=(0x0; 0x7fffffffffffffff)) R10=fp0")
check_assert(s64, ge, zero, 0);
__msg(": R0_w=scalar(smin=-2147483648) R10=fp0")
check_assert(s64, ge, neg, INT_MIN);
SEC("?tc")
__log_level(2) __failure
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=-2147483646,smax=2147483645) R10=fp0")
__msg(": R0=0 R1=ctx(off=0,imm=0) R2=scalar(smin=smin32=-2147483646,smax=smax32=2147483645) R10=fp0")
int check_assert_range_s64(struct __sk_buff *ctx)
{
struct bpf_sock *sk = ctx->sk;
@ -75,7 +75,7 @@ int check_assert_range_s64(struct __sk_buff *ctx)
SEC("?tc")
__log_level(2) __failure
__msg(": R1=ctx(off=0,imm=0) R2=scalar(umin=4096,umax=8192,var_off=(0x0; 0x3fff))")
__msg(": R1=ctx(off=0,imm=0) R2=scalar(smin=umin=smin32=umin32=4096,smax=umax=smax32=umax32=8192,var_off=(0x0; 0x3fff))")
int check_assert_range_u64(struct __sk_buff *ctx)
{
u64 num = ctx->len;

View File

@ -71,6 +71,7 @@ int BPF_PROG(test_array_map_2)
}
int cpu0_field_d, sum_field_c;
int my_pid;
/* Summarize percpu data */
SEC("?fentry/bpf_fentry_test3")
@ -81,6 +82,9 @@ int BPF_PROG(test_array_map_3)
struct val_t *v;
struct elem *e;
if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
return 0;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;
@ -130,6 +134,9 @@ int BPF_PROG(test_array_map_10)
struct val_t *v;
struct elem *e;
if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
return 0;
e = bpf_map_lookup_elem(&array, &index);
if (!e)
return 0;

View File

@ -70,6 +70,7 @@ int BPF_PROG(test_cgrp_local_storage_2)
}
int cpu0_field_d, sum_field_c;
int my_pid;
/* Summarize percpu data collection */
SEC("fentry/bpf_fentry_test3")
@ -81,6 +82,9 @@ int BPF_PROG(test_cgrp_local_storage_3)
struct elem *e;
int i;
if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
return 0;
task = bpf_get_current_task_btf();
e = bpf_cgrp_storage_get(&cgrp, task->cgroups->dfl_cgrp, 0, 0);
if (!e)

View File

@ -64,7 +64,7 @@ __naked void ldsx_s32(void)
SEC("socket")
__description("LDSX, S8 range checking, privileged")
__log_level(2) __success __retval(1)
__msg("R1_w=scalar(smin=-128,smax=127)")
__msg("R1_w=scalar(smin=smin32=-128,smax=smax32=127)")
__naked void ldsx_s8_range_priv(void)
{
asm volatile (