mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 12:44:11 +08:00
bpf: Rename bpf_{prog,map}_is_dev_bound to is_offloaded
BPF offloading infra will be reused to implement bound-but-not-offloaded bpf programs. Rename existing helpers for clarity. No functional changes. Cc: John Fastabend <john.fastabend@gmail.com> Cc: David Ahern <dsahern@gmail.com> Cc: Martin KaFai Lau <martin.lau@linux.dev> Cc: Willem de Bruijn <willemb@google.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Cc: Anatoly Burakov <anatoly.burakov@intel.com> Cc: Alexander Lobakin <alexandr.lobakin@intel.com> Cc: Magnus Karlsson <magnus.karlsson@gmail.com> Cc: Maryam Tahhan <mtahhan@redhat.com> Cc: xdp-hints@xdp-project.net Cc: netdev@vger.kernel.org Reviewed-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/r/20230119221536.3349901-3-sdf@google.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
parent
a4aeb9d656
commit
9d03ebc71a
@ -2481,12 +2481,12 @@ void unpriv_ebpf_notify(int new_state);
|
||||
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
|
||||
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
|
||||
|
||||
static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux)
|
||||
static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux)
|
||||
{
|
||||
return aux->offload_requested;
|
||||
}
|
||||
|
||||
static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
|
||||
static inline bool bpf_map_is_offloaded(struct bpf_map *map)
|
||||
{
|
||||
return unlikely(map->ops == &bpf_map_offload_ops);
|
||||
}
|
||||
@ -2513,12 +2513,12 @@ static inline int bpf_prog_offload_init(struct bpf_prog *prog,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux)
|
||||
static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
|
||||
static inline bool bpf_map_is_offloaded(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -2182,7 +2182,7 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
|
||||
* valid program, which in this case would simply not
|
||||
* be JITed, but falls back to the interpreter.
|
||||
*/
|
||||
if (!bpf_prog_is_dev_bound(fp->aux)) {
|
||||
if (!bpf_prog_is_offloaded(fp->aux)) {
|
||||
*err = bpf_prog_alloc_jited_linfo(fp);
|
||||
if (*err)
|
||||
return fp;
|
||||
@ -2553,7 +2553,7 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
#endif
|
||||
bpf_free_used_maps(aux);
|
||||
bpf_free_used_btfs(aux);
|
||||
if (bpf_prog_is_dev_bound(aux))
|
||||
if (bpf_prog_is_offloaded(aux))
|
||||
bpf_prog_offload_destroy(aux->prog);
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
if (aux->prog->has_callchain_buf)
|
||||
|
@ -549,7 +549,7 @@ static bool __bpf_offload_dev_match(struct bpf_prog *prog,
|
||||
struct bpf_offload_netdev *ondev1, *ondev2;
|
||||
struct bpf_prog_offload *offload;
|
||||
|
||||
if (!bpf_prog_is_dev_bound(prog->aux))
|
||||
if (!bpf_prog_is_offloaded(prog->aux))
|
||||
return false;
|
||||
|
||||
offload = prog->aux->offload;
|
||||
@ -581,7 +581,7 @@ bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map)
|
||||
struct bpf_offloaded_map *offmap;
|
||||
bool ret;
|
||||
|
||||
if (!bpf_map_is_dev_bound(map))
|
||||
if (!bpf_map_is_offloaded(map))
|
||||
return bpf_map_offload_neutral(map);
|
||||
offmap = map_to_offmap(map);
|
||||
|
||||
|
@ -181,7 +181,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct file *map_file,
|
||||
int err;
|
||||
|
||||
/* Need to create a kthread, thus must support schedule */
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
if (bpf_map_is_offloaded(map)) {
|
||||
return bpf_map_offload_update_elem(map, key, value, flags);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
|
||||
map->map_type == BPF_MAP_TYPE_STRUCT_OPS) {
|
||||
@ -238,7 +238,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
|
||||
void *ptr;
|
||||
int err;
|
||||
|
||||
if (bpf_map_is_dev_bound(map))
|
||||
if (bpf_map_is_offloaded(map))
|
||||
return bpf_map_offload_lookup_elem(map, key, value);
|
||||
|
||||
bpf_disable_instrumentation();
|
||||
@ -1483,7 +1483,7 @@ static int map_delete_elem(union bpf_attr *attr, bpfptr_t uattr)
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
if (bpf_map_is_offloaded(map)) {
|
||||
err = bpf_map_offload_delete_elem(map, key);
|
||||
goto out;
|
||||
} else if (IS_FD_PROG_ARRAY(map) ||
|
||||
@ -1547,7 +1547,7 @@ static int map_get_next_key(union bpf_attr *attr)
|
||||
if (!next_key)
|
||||
goto free_key;
|
||||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
if (bpf_map_is_offloaded(map)) {
|
||||
err = bpf_map_offload_get_next_key(map, key, next_key);
|
||||
goto out;
|
||||
}
|
||||
@ -1605,7 +1605,7 @@ int generic_map_delete_batch(struct bpf_map *map,
|
||||
map->key_size))
|
||||
break;
|
||||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
if (bpf_map_is_offloaded(map)) {
|
||||
err = bpf_map_offload_delete_elem(map, key);
|
||||
break;
|
||||
}
|
||||
@ -1851,7 +1851,7 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
|
||||
map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
if (!bpf_map_is_dev_bound(map)) {
|
||||
if (!bpf_map_is_offloaded(map)) {
|
||||
bpf_disable_instrumentation();
|
||||
rcu_read_lock();
|
||||
err = map->ops->map_lookup_and_delete_elem(map, key, value, attr->flags);
|
||||
@ -1944,7 +1944,7 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
|
||||
if (!ops)
|
||||
return -EINVAL;
|
||||
|
||||
if (!bpf_prog_is_dev_bound(prog->aux))
|
||||
if (!bpf_prog_is_offloaded(prog->aux))
|
||||
prog->aux->ops = ops;
|
||||
else
|
||||
prog->aux->ops = &bpf_offload_prog_ops;
|
||||
@ -2255,7 +2255,7 @@ bool bpf_prog_get_ok(struct bpf_prog *prog,
|
||||
|
||||
if (prog->type != *attach_type)
|
||||
return false;
|
||||
if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
|
||||
if (bpf_prog_is_offloaded(prog->aux) && !attach_drv)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -2598,7 +2598,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
|
||||
atomic64_set(&prog->aux->refcnt, 1);
|
||||
prog->gpl_compatible = is_gpl ? 1 : 0;
|
||||
|
||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||
if (bpf_prog_is_offloaded(prog->aux)) {
|
||||
err = bpf_prog_offload_init(prog, attr);
|
||||
if (err)
|
||||
goto free_prog_sec;
|
||||
@ -3997,7 +3997,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||
if (bpf_prog_is_offloaded(prog->aux)) {
|
||||
err = bpf_prog_offload_info_fill(&info, prog);
|
||||
if (err)
|
||||
return err;
|
||||
@ -4225,7 +4225,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
|
||||
}
|
||||
info.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
|
||||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
if (bpf_map_is_offloaded(map)) {
|
||||
err = bpf_map_offload_info_fill(&info, map);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -14099,7 +14099,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
env->prev_log_len = env->log.len_used;
|
||||
}
|
||||
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux)) {
|
||||
if (bpf_prog_is_offloaded(env->prog->aux)) {
|
||||
err = bpf_prog_offload_verify_insn(env, env->insn_idx,
|
||||
env->prev_insn_idx);
|
||||
if (err)
|
||||
@ -14579,7 +14579,7 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
|
||||
}
|
||||
}
|
||||
|
||||
if ((bpf_prog_is_dev_bound(prog->aux) || bpf_map_is_dev_bound(map)) &&
|
||||
if ((bpf_prog_is_offloaded(prog->aux) || bpf_map_is_offloaded(map)) &&
|
||||
!bpf_offload_prog_map_match(prog, map)) {
|
||||
verbose(env, "offload device mismatch between prog and map\n");
|
||||
return -EINVAL;
|
||||
@ -15060,7 +15060,7 @@ static int verifier_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt)
|
||||
unsigned int orig_prog_len = env->prog->len;
|
||||
int err;
|
||||
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux))
|
||||
if (bpf_prog_is_offloaded(env->prog->aux))
|
||||
bpf_prog_offload_remove_insns(env, off, cnt);
|
||||
|
||||
err = bpf_remove_insns(env->prog, off, cnt);
|
||||
@ -15141,7 +15141,7 @@ static void opt_hard_wire_dead_code_branches(struct bpf_verifier_env *env)
|
||||
else
|
||||
continue;
|
||||
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux))
|
||||
if (bpf_prog_is_offloaded(env->prog->aux))
|
||||
bpf_prog_offload_replace_insn(env, i, &ja);
|
||||
|
||||
memcpy(insn, &ja, sizeof(ja));
|
||||
@ -15328,7 +15328,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
||||
}
|
||||
}
|
||||
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux))
|
||||
if (bpf_prog_is_offloaded(env->prog->aux))
|
||||
return 0;
|
||||
|
||||
insn = env->prog->insnsi + delta;
|
||||
@ -15728,7 +15728,7 @@ static int fixup_call_args(struct bpf_verifier_env *env)
|
||||
int err = 0;
|
||||
|
||||
if (env->prog->jit_requested &&
|
||||
!bpf_prog_is_dev_bound(env->prog->aux)) {
|
||||
!bpf_prog_is_offloaded(env->prog->aux)) {
|
||||
err = jit_subprogs(env);
|
||||
if (err == 0)
|
||||
return 0;
|
||||
@ -17231,7 +17231,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
if (ret < 0)
|
||||
goto skip_full_check;
|
||||
|
||||
if (bpf_prog_is_dev_bound(env->prog->aux)) {
|
||||
if (bpf_prog_is_offloaded(env->prog->aux)) {
|
||||
ret = bpf_prog_offload_verifier_prep(env->prog);
|
||||
if (ret)
|
||||
goto skip_full_check;
|
||||
@ -17244,7 +17244,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
|
||||
ret = do_check_subprogs(env);
|
||||
ret = ret ?: do_check_main(env);
|
||||
|
||||
if (ret == 0 && bpf_prog_is_dev_bound(env->prog->aux))
|
||||
if (ret == 0 && bpf_prog_is_offloaded(env->prog->aux))
|
||||
ret = bpf_prog_offload_finalize(env);
|
||||
|
||||
skip_full_check:
|
||||
@ -17279,7 +17279,7 @@ skip_full_check:
|
||||
/* do 32-bit optimization after insn patching has done so those patched
|
||||
* insns could be handled correctly.
|
||||
*/
|
||||
if (ret == 0 && !bpf_prog_is_dev_bound(env->prog->aux)) {
|
||||
if (ret == 0 && !bpf_prog_is_offloaded(env->prog->aux)) {
|
||||
ret = opt_subreg_zext_lo32_rnd_hi32(env, attr);
|
||||
env->prog->aux->verifier_zext = bpf_jit_needs_zext() ? !ret
|
||||
: false;
|
||||
|
@ -9224,8 +9224,8 @@ static int dev_xdp_attach(struct net_device *dev, struct netlink_ext_ack *extack
|
||||
NL_SET_ERR_MSG(extack, "Native and generic XDP can't be active at the same time");
|
||||
return -EEXIST;
|
||||
}
|
||||
if (!offload && bpf_prog_is_dev_bound(new_prog->aux)) {
|
||||
NL_SET_ERR_MSG(extack, "Using device-bound program without HW_MODE flag is not supported");
|
||||
if (!offload && bpf_prog_is_offloaded(new_prog->aux)) {
|
||||
NL_SET_ERR_MSG(extack, "Using offloaded program without HW_MODE flag is not supported");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (new_prog->expected_attach_type == BPF_XDP_DEVMAP) {
|
||||
|
@ -8760,7 +8760,7 @@ static bool xdp_is_valid_access(int off, int size,
|
||||
}
|
||||
|
||||
if (type == BPF_WRITE) {
|
||||
if (bpf_prog_is_dev_bound(prog->aux)) {
|
||||
if (bpf_prog_is_offloaded(prog->aux)) {
|
||||
switch (off) {
|
||||
case offsetof(struct xdp_md, rx_queue_index):
|
||||
return __is_valid_xdp_access(off, size);
|
||||
|
Loading…
Reference in New Issue
Block a user