mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
3f50f132d8
It is not possible for the current verifier to track ALU32 and JMP ops correctly. This can result in the verifier aborting with errors even though the program should be verifiable. BPF codes that hit this can work around it by changin int variables to 64-bit types, marking variables volatile, etc. But this is all very ugly so it would be better to avoid these tricks. But, the main reason to address this now is do_refine_retval_range() was assuming return values could not be negative. Once we fixed this code that was previously working will no longer work. See do_refine_retval_range() patch for details. And we don't want to suddenly cause programs that used to work to fail. The simplest example code snippet that illustrates the problem is likely this, 53: w8 = w0 // r8 <- [0, S32_MAX], // w8 <- [-S32_MIN, X] 54: w8 <s 0 // r8 <- [0, U32_MAX] // w8 <- [0, X] The expected 64-bit and 32-bit bounds after each line are shown on the right. The current issue is without the w* bounds we are forced to use the worst case bound of [0, U32_MAX]. To resolve this type of case, jmp32 creating divergent 32-bit bounds from 64-bit bounds, we add explicit 32-bit register bounds s32_{min|max}_value and u32_{min|max}_value. Then from branch_taken logic creating new bounds we can track 32-bit bounds explicitly. The next case we observed is ALU ops after the jmp32, 53: w8 = w0 // r8 <- [0, S32_MAX], // w8 <- [-S32_MIN, X] 54: w8 <s 0 // r8 <- [0, U32_MAX] // w8 <- [0, X] 55: w8 += 1 // r8 <- [0, U32_MAX+1] // w8 <- [0, X+1] In order to keep the bounds accurate at this point we also need to track ALU32 ops. To do this we add explicit ALU32 logic for each of the ALU ops, mov, add, sub, etc. Finally there is a question of how and when to merge bounds. The cases enumerate here, 1. MOV ALU32 - zext 32-bit -> 64-bit 2. MOV ALU64 - copy 64-bit -> 32-bit 3. op ALU32 - zext 32-bit -> 64-bit 4. op ALU64 - n/a 5. jmp ALU32 - 64-bit: var32_off | upper_32_bits(var64_off) 6. jmp ALU64 - 32-bit: (>> (<< var64_off)) Details for each case, For "MOV ALU32" BPF arch zero extends so we simply copy the bounds from 32-bit into 64-bit ensuring we truncate var_off and 64-bit bounds correctly. See zext_32_to_64. For "MOV ALU64" copy all bounds including 32-bit into new register. If the src register had 32-bit bounds the dst register will as well. For "op ALU32" zero extend 32-bit into 64-bit the same as move, see zext_32_to_64. For "op ALU64" calculate both 32-bit and 64-bit bounds no merging is done here. Except we have a special case. When RSH or ARSH is done we can't simply ignore shifting bits from 64-bit reg into the 32-bit subreg. So currently just push bounds from 64-bit into 32-bit. This will be correct in the sense that they will represent a valid state of the register. However we could lose some accuracy if an ARSH is following a jmp32 operation. We can handle this special case in a follow up series. For "jmp ALU32" mark 64-bit reg unknown and recalculate 64-bit bounds from tnum by setting var_off to ((<<(>>var_off)) | var32_off). We special case if 64-bit bounds has zero'd upper 32bits at which point we can simply copy 32-bit bounds into 64-bit register. This catches a common compiler trick where upper 32-bits are zeroed and then 32-bit ops are used followed by a 64-bit compare or 64-bit op on a pointer. See __reg_combine_64_into_32(). For "jmp ALU64" cast the bounds of the 64bit to their 32-bit counterpart. For example s32_min_value = (s32)reg->smin_value. For tnum use only the lower 32bits via, (>>(<<var_off)). See __reg_combine_64_into_32(). Signed-off-by: John Fastabend <john.fastabend@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/158560419880.10843.11448220440809118343.stgit@john-Precision-5820-Tower
212 lines
4.6 KiB
C
212 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* tnum: tracked (or tristate) numbers
|
|
*
|
|
* A tnum tracks knowledge about the bits of a value. Each bit can be either
|
|
* known (0 or 1), or unknown (x). Arithmetic operations on tnums will
|
|
* propagate the unknown bits such that the tnum result represents all the
|
|
* possible results for possible values of the operands.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/tnum.h>
|
|
|
|
#define TNUM(_v, _m) (struct tnum){.value = _v, .mask = _m}
|
|
/* A completely unknown value */
|
|
const struct tnum tnum_unknown = { .value = 0, .mask = -1 };
|
|
|
|
struct tnum tnum_const(u64 value)
|
|
{
|
|
return TNUM(value, 0);
|
|
}
|
|
|
|
struct tnum tnum_range(u64 min, u64 max)
|
|
{
|
|
u64 chi = min ^ max, delta;
|
|
u8 bits = fls64(chi);
|
|
|
|
/* special case, needed because 1ULL << 64 is undefined */
|
|
if (bits > 63)
|
|
return tnum_unknown;
|
|
/* e.g. if chi = 4, bits = 3, delta = (1<<3) - 1 = 7.
|
|
* if chi = 0, bits = 0, delta = (1<<0) - 1 = 0, so we return
|
|
* constant min (since min == max).
|
|
*/
|
|
delta = (1ULL << bits) - 1;
|
|
return TNUM(min & ~delta, delta);
|
|
}
|
|
|
|
struct tnum tnum_lshift(struct tnum a, u8 shift)
|
|
{
|
|
return TNUM(a.value << shift, a.mask << shift);
|
|
}
|
|
|
|
struct tnum tnum_rshift(struct tnum a, u8 shift)
|
|
{
|
|
return TNUM(a.value >> shift, a.mask >> shift);
|
|
}
|
|
|
|
struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
|
|
{
|
|
/* if a.value is negative, arithmetic shifting by minimum shift
|
|
* will have larger negative offset compared to more shifting.
|
|
* If a.value is nonnegative, arithmetic shifting by minimum shift
|
|
* will have larger positive offset compare to more shifting.
|
|
*/
|
|
if (insn_bitness == 32)
|
|
return TNUM((u32)(((s32)a.value) >> min_shift),
|
|
(u32)(((s32)a.mask) >> min_shift));
|
|
else
|
|
return TNUM((s64)a.value >> min_shift,
|
|
(s64)a.mask >> min_shift);
|
|
}
|
|
|
|
struct tnum tnum_add(struct tnum a, struct tnum b)
|
|
{
|
|
u64 sm, sv, sigma, chi, mu;
|
|
|
|
sm = a.mask + b.mask;
|
|
sv = a.value + b.value;
|
|
sigma = sm + sv;
|
|
chi = sigma ^ sv;
|
|
mu = chi | a.mask | b.mask;
|
|
return TNUM(sv & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_sub(struct tnum a, struct tnum b)
|
|
{
|
|
u64 dv, alpha, beta, chi, mu;
|
|
|
|
dv = a.value - b.value;
|
|
alpha = dv + a.mask;
|
|
beta = dv - b.mask;
|
|
chi = alpha ^ beta;
|
|
mu = chi | a.mask | b.mask;
|
|
return TNUM(dv & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_and(struct tnum a, struct tnum b)
|
|
{
|
|
u64 alpha, beta, v;
|
|
|
|
alpha = a.value | a.mask;
|
|
beta = b.value | b.mask;
|
|
v = a.value & b.value;
|
|
return TNUM(v, alpha & beta & ~v);
|
|
}
|
|
|
|
struct tnum tnum_or(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value | b.value;
|
|
mu = a.mask | b.mask;
|
|
return TNUM(v, mu & ~v);
|
|
}
|
|
|
|
struct tnum tnum_xor(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value ^ b.value;
|
|
mu = a.mask | b.mask;
|
|
return TNUM(v & ~mu, mu);
|
|
}
|
|
|
|
/* half-multiply add: acc += (unknown * mask * value).
|
|
* An intermediate step in the multiply algorithm.
|
|
*/
|
|
static struct tnum hma(struct tnum acc, u64 value, u64 mask)
|
|
{
|
|
while (mask) {
|
|
if (mask & 1)
|
|
acc = tnum_add(acc, TNUM(0, value));
|
|
mask >>= 1;
|
|
value <<= 1;
|
|
}
|
|
return acc;
|
|
}
|
|
|
|
struct tnum tnum_mul(struct tnum a, struct tnum b)
|
|
{
|
|
struct tnum acc;
|
|
u64 pi;
|
|
|
|
pi = a.value * b.value;
|
|
acc = hma(TNUM(pi, 0), a.mask, b.mask | b.value);
|
|
return hma(acc, b.mask, a.value);
|
|
}
|
|
|
|
/* Note that if a and b disagree - i.e. one has a 'known 1' where the other has
|
|
* a 'known 0' - this will return a 'known 1' for that bit.
|
|
*/
|
|
struct tnum tnum_intersect(struct tnum a, struct tnum b)
|
|
{
|
|
u64 v, mu;
|
|
|
|
v = a.value | b.value;
|
|
mu = a.mask & b.mask;
|
|
return TNUM(v & ~mu, mu);
|
|
}
|
|
|
|
struct tnum tnum_cast(struct tnum a, u8 size)
|
|
{
|
|
a.value &= (1ULL << (size * 8)) - 1;
|
|
a.mask &= (1ULL << (size * 8)) - 1;
|
|
return a;
|
|
}
|
|
|
|
bool tnum_is_aligned(struct tnum a, u64 size)
|
|
{
|
|
if (!size)
|
|
return true;
|
|
return !((a.value | a.mask) & (size - 1));
|
|
}
|
|
|
|
bool tnum_in(struct tnum a, struct tnum b)
|
|
{
|
|
if (b.mask & ~a.mask)
|
|
return false;
|
|
b.value &= ~a.mask;
|
|
return a.value == b.value;
|
|
}
|
|
|
|
int tnum_strn(char *str, size_t size, struct tnum a)
|
|
{
|
|
return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
|
|
}
|
|
EXPORT_SYMBOL_GPL(tnum_strn);
|
|
|
|
int tnum_sbin(char *str, size_t size, struct tnum a)
|
|
{
|
|
size_t n;
|
|
|
|
for (n = 64; n; n--) {
|
|
if (n < size) {
|
|
if (a.mask & 1)
|
|
str[n - 1] = 'x';
|
|
else if (a.value & 1)
|
|
str[n - 1] = '1';
|
|
else
|
|
str[n - 1] = '0';
|
|
}
|
|
a.mask >>= 1;
|
|
a.value >>= 1;
|
|
}
|
|
str[min(size - 1, (size_t)64)] = 0;
|
|
return 64;
|
|
}
|
|
|
|
struct tnum tnum_subreg(struct tnum a)
|
|
{
|
|
return tnum_cast(a, 4);
|
|
}
|
|
|
|
struct tnum tnum_clear_subreg(struct tnum a)
|
|
{
|
|
return tnum_lshift(tnum_rshift(a, 32), 32);
|
|
}
|
|
|
|
struct tnum tnum_const_subreg(struct tnum a, u32 value)
|
|
{
|
|
return tnum_or(tnum_clear_subreg(a), tnum_const(value));
|
|
}
|