mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
bpf: verifier: Allocate idmap scratch in verifier env
func_states_equal makes a very short lived allocation for idmap, probably because it's too large to fit on the stack. However the function is called quite often, leading to a lot of alloc / free churn. Replace the temporary allocation with dedicated scratch space in struct bpf_verifier_env. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Edward Cree <ecree.xilinx@gmail.com> Link: https://lore.kernel.org/bpf/20210429134656.122225-4-lmb@cloudflare.com
This commit is contained in:
parent
06ab6a5055
commit
c9e73e3d2b
@ -215,6 +215,13 @@ struct bpf_idx_pair {
|
||||
u32 idx;
|
||||
};
|
||||
|
||||
struct bpf_id_pair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
#define MAX_CALL_FRAMES 8
|
||||
struct bpf_verifier_state {
|
||||
/* call stack tracking */
|
||||
@ -418,6 +425,7 @@ struct bpf_verifier_env {
|
||||
const struct bpf_line_info *prev_linfo;
|
||||
struct bpf_verifier_log log;
|
||||
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
|
||||
struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
|
||||
struct {
|
||||
int *insn_state;
|
||||
int *insn_stack;
|
||||
|
@ -9748,13 +9748,6 @@ static bool range_within(struct bpf_reg_state *old,
|
||||
old->s32_max_value >= cur->s32_max_value;
|
||||
}
|
||||
|
||||
/* Maximum number of register states that can exist at once */
|
||||
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
|
||||
struct idpair {
|
||||
u32 old;
|
||||
u32 cur;
|
||||
};
|
||||
|
||||
/* If in the old state two registers had the same id, then they need to have
|
||||
* the same id in the new state as well. But that id could be different from
|
||||
* the old state, so we need to track the mapping from old to new ids.
|
||||
@ -9765,11 +9758,11 @@ struct idpair {
|
||||
* So we look through our idmap to see if this old id has been seen before. If
|
||||
* so, we require the new id to match; otherwise, we add the id pair to the map.
|
||||
*/
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
|
||||
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ID_MAP_SIZE; i++) {
|
||||
for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
|
||||
if (!idmap[i].old) {
|
||||
/* Reached an empty slot; haven't seen this id before */
|
||||
idmap[i].old = old_id;
|
||||
@ -9882,7 +9875,7 @@ next:
|
||||
|
||||
/* Returns true if (rold safe implies rcur safe) */
|
||||
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
struct idpair *idmap)
|
||||
struct bpf_id_pair *idmap)
|
||||
{
|
||||
bool equal;
|
||||
|
||||
@ -10000,7 +9993,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
|
||||
|
||||
static bool stacksafe(struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur,
|
||||
struct idpair *idmap)
|
||||
struct bpf_id_pair *idmap)
|
||||
{
|
||||
int i, spi;
|
||||
|
||||
@ -10097,32 +10090,23 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
|
||||
* whereas register type in current state is meaningful, it means that
|
||||
* the current state will reach 'bpf_exit' instruction safely
|
||||
*/
|
||||
static bool func_states_equal(struct bpf_func_state *old,
|
||||
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
|
||||
struct bpf_func_state *cur)
|
||||
{
|
||||
struct idpair *idmap;
|
||||
bool ret = false;
|
||||
int i;
|
||||
|
||||
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
|
||||
/* If we failed to allocate the idmap, just say it's not safe */
|
||||
if (!idmap)
|
||||
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
|
||||
for (i = 0; i < MAX_BPF_REG; i++)
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
if (!stacksafe(old, cur, env->idmap_scratch))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < MAX_BPF_REG; i++) {
|
||||
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (!stacksafe(old, cur, idmap))
|
||||
goto out_free;
|
||||
|
||||
if (!refsafe(old, cur))
|
||||
goto out_free;
|
||||
ret = true;
|
||||
out_free:
|
||||
kfree(idmap);
|
||||
return ret;
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool states_equal(struct bpf_verifier_env *env,
|
||||
@ -10149,7 +10133,7 @@ static bool states_equal(struct bpf_verifier_env *env,
|
||||
for (i = 0; i <= old->curframe; i++) {
|
||||
if (old->frame[i]->callsite != cur->frame[i]->callsite)
|
||||
return false;
|
||||
if (!func_states_equal(old->frame[i], cur->frame[i]))
|
||||
if (!func_states_equal(env, old->frame[i], cur->frame[i]))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
Loading…
Reference in New Issue
Block a user