mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Merge branch 'Support kernel module function calls from eBPF'
Kumar Kartikeya says: ==================== This set enables kernel module function calls, and also modifies verifier logic to permit invalid kernel function calls as long as they are pruned as part of dead code elimination. This is done to provide better runtime portability for BPF objects, which can conditionally disable parts of code that are pruned later by the verifier (e.g. const volatile vars, kconfig options). libbpf modifications are made along with kernel changes to support module function calls. It also converts TCP congestion control objects to use the module kfunc support instead of relying on IS_BUILTIN ifdef. Changelog: ---------- v6 -> v7 v6: https://lore.kernel.org/bpf/20210930062948.1843919-1-memxor@gmail.com * Let __bpf_check_kfunc_call take kfunc_btf_id_list instead of generating callbacks (Andrii) * Rename it to bpf_check_mod_kfunc_call to reflect usage * Remove OOM checks (Alexei) * Remove resolve_btfids invocation for bpf_testmod (Andrii) * Move fd_array_cnt initialization near fd_array alloc (Andrii) * Rename helper to btf_find_by_name_kind and pass start_id (Andrii) * memset when data is NULL in add_data (Alexei) * Fix other nits v5 -> v6 v5: https://lore.kernel.org/bpf/20210927145941.1383001-1-memxor@gmail.com * Rework gen_loader relocation emits * Only emit bpf_btf_find_by_name_kind call when required (Alexei) * Refactor code to emit ksym var and func relo into separate helpers, this will be easier to add future weak/typeless ksym support to (for my followup) * Count references for both ksym var and funcs, and avoid calling helpers unless required for both of them. This also means we share fds between ksym vars for the module BTFs. Also be careful with this when closing BTF fd so that we only close one instance of the fd for each ksym v4 -> v5 v4: https://lore.kernel.org/bpf/20210920141526.3940002-1-memxor@gmail.com * Address comments from Alexei * Use reserved fd_array area in loader map instead of creating a new map * Drop selftest testing the 256 kfunc limit, however selftest testing reuse of BTF fd for same kfunc in gen_loader and libbpf is kept * Address comments from Andrii * Make --no-fail the default for resolve_btfids, i.e. only fail if we find BTF section and cannot process it * Use obj->btf_modules array to store index in the fd_array, so that we don't have to do any searching to reuse the index, instead only set it the first time a module BTF's fd is used * Make find_ksym_btf_id to return struct module_btf * in last parameter * Improve logging when index becomes bigger than INT16_MAX * Add btf__find_by_name_kind_own internal helper to only start searching for kfunc ID in module BTF, since find_ksym_btf_id already checks vmlinux BTF before iterating over module BTFs. * Fix various other nits * Fixes for failing selftests on BPF CI * Rearrange/cleanup selftests * Avoid testing kfunc limit (Alexei) * Do test gen_loader and libbpf BTF fd index dedup with 256 calls * Move invalid kfunc failure test to verifier selftest * Minimize duplication * Use consistent bpf_<type>_check_kfunc_call naming for module kfunc callback * Since we try to add fd using add_data while we can, cherry pick Alexei's patch from CO-RE RFC series to align gen_loader data. v3 -> v4 v3: https://lore.kernel.org/bpf/20210915050943.679062-1-memxor@gmail.com * Address comments from Alexei * Drop MAX_BPF_STACK change, instead move map_fd and BTF fd to BPF array map and pass fd_array using BPF_PSEUDO_MAP_IDX_VALUE * Address comments from Andrii * Fix selftest to store to variable for observing function call instead of printk and polluting CI logs * Drop use of raw_tp for testing, instead reuse classifier based prog_test_run * Drop index + 1 based insn->off convention for kfunc module calls * Expand selftests to cover more corner cases * Misc cleanups v2 -> v3 v2: https://lore.kernel.org/bpf/20210914123750.460750-1-memxor@gmail.com * Fix issues pointed out by Kernel Test Robot * Fix find_kfunc_desc to also take offset into consideration when comparing RFC v1 -> v2 v1: https://lore.kernel.org/bpf/20210830173424.1385796-1-memxor@gmail.com * Address comments from Alexei * Reuse fd_array instead of introducing kfunc_btf_fds array * Take btf and module reference as needed, instead of preloading * Add BTF_KIND_FUNC relocation support to gen_loader infrastructure * Address comments from Andrii * Drop hashmap in libbpf for finding index of existing BTF in fd_array * Preserve invalid kfunc calls only when the symbol is weak * Adjust verifier selftests ==================== Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
commit
32a16f6bfe
@ -513,7 +513,7 @@ struct bpf_verifier_ops {
|
||||
const struct btf_type *t, int off, int size,
|
||||
enum bpf_access_type atype,
|
||||
u32 *next_btf_id);
|
||||
bool (*check_kfunc_call)(u32 kfunc_btf_id);
|
||||
bool (*check_kfunc_call)(u32 kfunc_btf_id, struct module *owner);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
@ -877,6 +877,7 @@ struct bpf_prog_aux {
|
||||
void *jit_data; /* JIT specific data. arch dependent */
|
||||
struct bpf_jit_poke_descriptor *poke_tab;
|
||||
struct bpf_kfunc_desc_tab *kfunc_tab;
|
||||
struct bpf_kfunc_btf_tab *kfunc_btf_tab;
|
||||
u32 size_poke_tab;
|
||||
struct bpf_ksym ksym;
|
||||
const struct bpf_prog_ops *ops;
|
||||
@ -1639,7 +1640,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
||||
int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
||||
const union bpf_attr *kattr,
|
||||
union bpf_attr __user *uattr);
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id);
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
|
||||
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
|
||||
const struct bpf_prog *prog,
|
||||
struct bpf_insn_access_aux *info);
|
||||
@ -1860,7 +1861,8 @@ static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
|
||||
static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
|
||||
struct module *owner)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -527,5 +527,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
|
||||
const struct bpf_prog *tgt_prog,
|
||||
u32 btf_id,
|
||||
struct bpf_attach_target_info *tgt_info);
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab);
|
||||
|
||||
|
||||
#endif /* _LINUX_BPF_VERIFIER_H */
|
||||
|
@ -3,6 +3,7 @@
|
||||
#ifndef _LINUX_BPFPTR_H
|
||||
#define _LINUX_BPFPTR_H
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sockptr.h>
|
||||
|
||||
typedef sockptr_t bpfptr_t;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#define _LINUX_BTF_H 1
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpfptr.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <uapi/linux/bpf.h>
|
||||
|
||||
@ -238,4 +239,42 @@ static inline const char *btf_name_by_offset(const struct btf *btf,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct kfunc_btf_id_set {
|
||||
struct list_head list;
|
||||
struct btf_id_set *set;
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
struct kfunc_btf_id_list;
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
|
||||
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s);
|
||||
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
|
||||
struct module *owner);
|
||||
#else
|
||||
static inline void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
}
|
||||
static inline bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist,
|
||||
u32 kfunc_id, struct module *owner)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define DEFINE_KFUNC_BTF_ID_SET(set, name) \
|
||||
struct kfunc_btf_id_set name = { LIST_HEAD_INIT(name.list), (set), \
|
||||
THIS_MODULE }
|
||||
|
||||
extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
|
||||
extern struct kfunc_btf_id_list prog_test_kfunc_list;
|
||||
|
||||
#endif
|
||||
|
@ -6343,3 +6343,58 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
|
||||
};
|
||||
|
||||
BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
|
||||
|
||||
/* BTF ID set registration API for modules */
|
||||
|
||||
struct kfunc_btf_id_list {
|
||||
struct list_head list;
|
||||
struct mutex mutex;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_INFO_BTF_MODULES
|
||||
|
||||
void register_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
mutex_lock(&l->mutex);
|
||||
list_add(&s->list, &l->list);
|
||||
mutex_unlock(&l->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_kfunc_btf_id_set);
|
||||
|
||||
void unregister_kfunc_btf_id_set(struct kfunc_btf_id_list *l,
|
||||
struct kfunc_btf_id_set *s)
|
||||
{
|
||||
mutex_lock(&l->mutex);
|
||||
list_del_init(&s->list);
|
||||
mutex_unlock(&l->mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_kfunc_btf_id_set);
|
||||
|
||||
bool bpf_check_mod_kfunc_call(struct kfunc_btf_id_list *klist, u32 kfunc_id,
|
||||
struct module *owner)
|
||||
{
|
||||
struct kfunc_btf_id_set *s;
|
||||
|
||||
if (!owner)
|
||||
return false;
|
||||
mutex_lock(&klist->mutex);
|
||||
list_for_each_entry(s, &klist->list, list) {
|
||||
if (s->owner == owner && btf_id_set_contains(s->set, kfunc_id)) {
|
||||
mutex_unlock(&klist->mutex);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&klist->mutex);
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define DEFINE_KFUNC_BTF_ID_LIST(name) \
|
||||
struct kfunc_btf_id_list name = { LIST_HEAD_INIT(name.list), \
|
||||
__MUTEX_INITIALIZER(name.mutex) }; \
|
||||
EXPORT_SYMBOL_GPL(name)
|
||||
|
||||
DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
|
||||
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/extable.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/unaligned.h>
|
||||
@ -2255,6 +2256,9 @@ static void bpf_prog_free_deferred(struct work_struct *work)
|
||||
int i;
|
||||
|
||||
aux = container_of(work, struct bpf_prog_aux, work);
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
bpf_free_kfunc_btf_tab(aux->kfunc_btf_tab);
|
||||
#endif
|
||||
bpf_free_used_maps(aux);
|
||||
bpf_free_used_btfs(aux);
|
||||
if (bpf_prog_is_dev_bound(aux))
|
||||
|
@ -1640,52 +1640,173 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
|
||||
return env->subprog_cnt - 1;
|
||||
}
|
||||
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_KFUNC_BTFS 256
|
||||
|
||||
struct bpf_kfunc_desc {
|
||||
struct btf_func_model func_model;
|
||||
u32 func_id;
|
||||
s32 imm;
|
||||
u16 offset;
|
||||
};
|
||||
|
||||
struct bpf_kfunc_btf {
|
||||
struct btf *btf;
|
||||
struct module *module;
|
||||
u16 offset;
|
||||
};
|
||||
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
struct bpf_kfunc_desc_tab {
|
||||
struct bpf_kfunc_desc descs[MAX_KFUNC_DESCS];
|
||||
u32 nr_descs;
|
||||
};
|
||||
|
||||
static int kfunc_desc_cmp_by_id(const void *a, const void *b)
|
||||
struct bpf_kfunc_btf_tab {
|
||||
struct bpf_kfunc_btf descs[MAX_KFUNC_BTFS];
|
||||
u32 nr_descs;
|
||||
};
|
||||
|
||||
static int kfunc_desc_cmp_by_id_off(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_kfunc_desc *d0 = a;
|
||||
const struct bpf_kfunc_desc *d1 = b;
|
||||
|
||||
/* func_id is not greater than BTF_MAX_TYPE */
|
||||
return d0->func_id - d1->func_id;
|
||||
return d0->func_id - d1->func_id ?: d0->offset - d1->offset;
|
||||
}
|
||||
|
||||
static int kfunc_btf_cmp_by_off(const void *a, const void *b)
|
||||
{
|
||||
const struct bpf_kfunc_btf *d0 = a;
|
||||
const struct bpf_kfunc_btf *d1 = b;
|
||||
|
||||
return d0->offset - d1->offset;
|
||||
}
|
||||
|
||||
static const struct bpf_kfunc_desc *
|
||||
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id)
|
||||
find_kfunc_desc(const struct bpf_prog *prog, u32 func_id, u16 offset)
|
||||
{
|
||||
struct bpf_kfunc_desc desc = {
|
||||
.func_id = func_id,
|
||||
.offset = offset,
|
||||
};
|
||||
struct bpf_kfunc_desc_tab *tab;
|
||||
|
||||
tab = prog->aux->kfunc_tab;
|
||||
return bsearch(&desc, tab->descs, tab->nr_descs,
|
||||
sizeof(tab->descs[0]), kfunc_desc_cmp_by_id);
|
||||
sizeof(tab->descs[0]), kfunc_desc_cmp_by_id_off);
|
||||
}
|
||||
|
||||
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
s16 offset, struct module **btf_modp)
|
||||
{
|
||||
struct bpf_kfunc_btf kf_btf = { .offset = offset };
|
||||
struct bpf_kfunc_btf_tab *tab;
|
||||
struct bpf_kfunc_btf *b;
|
||||
struct module *mod;
|
||||
struct btf *btf;
|
||||
int btf_fd;
|
||||
|
||||
tab = env->prog->aux->kfunc_btf_tab;
|
||||
b = bsearch(&kf_btf, tab->descs, tab->nr_descs,
|
||||
sizeof(tab->descs[0]), kfunc_btf_cmp_by_off);
|
||||
if (!b) {
|
||||
if (tab->nr_descs == MAX_KFUNC_BTFS) {
|
||||
verbose(env, "too many different module BTFs\n");
|
||||
return ERR_PTR(-E2BIG);
|
||||
}
|
||||
|
||||
if (bpfptr_is_null(env->fd_array)) {
|
||||
verbose(env, "kfunc offset > 0 without fd_array is invalid\n");
|
||||
return ERR_PTR(-EPROTO);
|
||||
}
|
||||
|
||||
if (copy_from_bpfptr_offset(&btf_fd, env->fd_array,
|
||||
offset * sizeof(btf_fd),
|
||||
sizeof(btf_fd)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
btf = btf_get_by_fd(btf_fd);
|
||||
if (IS_ERR(btf))
|
||||
return btf;
|
||||
|
||||
if (!btf_is_module(btf)) {
|
||||
verbose(env, "BTF fd for kfunc is not a module BTF\n");
|
||||
btf_put(btf);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
mod = btf_try_get_module(btf);
|
||||
if (!mod) {
|
||||
btf_put(btf);
|
||||
return ERR_PTR(-ENXIO);
|
||||
}
|
||||
|
||||
b = &tab->descs[tab->nr_descs++];
|
||||
b->btf = btf;
|
||||
b->module = mod;
|
||||
b->offset = offset;
|
||||
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_btf_cmp_by_off, NULL);
|
||||
}
|
||||
if (btf_modp)
|
||||
*btf_modp = b->module;
|
||||
return b->btf;
|
||||
}
|
||||
|
||||
void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
|
||||
{
|
||||
if (!tab)
|
||||
return;
|
||||
|
||||
while (tab->nr_descs--) {
|
||||
module_put(tab->descs[tab->nr_descs].module);
|
||||
btf_put(tab->descs[tab->nr_descs].btf);
|
||||
}
|
||||
kfree(tab);
|
||||
}
|
||||
|
||||
static struct btf *find_kfunc_desc_btf(struct bpf_verifier_env *env,
|
||||
u32 func_id, s16 offset,
|
||||
struct module **btf_modp)
|
||||
{
|
||||
struct btf *kfunc_btf;
|
||||
|
||||
if (offset) {
|
||||
if (offset < 0) {
|
||||
/* In the future, this can be allowed to increase limit
|
||||
* of fd index into fd_array, interpreted as u16.
|
||||
*/
|
||||
verbose(env, "negative offset disallowed for kernel module function call\n");
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
kfunc_btf = __find_kfunc_desc_btf(env, offset, btf_modp);
|
||||
if (IS_ERR_OR_NULL(kfunc_btf)) {
|
||||
verbose(env, "cannot find module BTF for func_id %u\n", func_id);
|
||||
return kfunc_btf ?: ERR_PTR(-ENOENT);
|
||||
}
|
||||
return kfunc_btf;
|
||||
}
|
||||
return btf_vmlinux ?: ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
|
||||
{
|
||||
const struct btf_type *func, *func_proto;
|
||||
struct bpf_kfunc_btf_tab *btf_tab;
|
||||
struct bpf_kfunc_desc_tab *tab;
|
||||
struct bpf_prog_aux *prog_aux;
|
||||
struct bpf_kfunc_desc *desc;
|
||||
const char *func_name;
|
||||
struct btf *desc_btf;
|
||||
unsigned long addr;
|
||||
int err;
|
||||
|
||||
prog_aux = env->prog->aux;
|
||||
tab = prog_aux->kfunc_tab;
|
||||
btf_tab = prog_aux->kfunc_btf_tab;
|
||||
if (!tab) {
|
||||
if (!btf_vmlinux) {
|
||||
verbose(env, "calling kernel function is not supported without CONFIG_DEBUG_INFO_BTF\n");
|
||||
@ -1713,7 +1834,29 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
prog_aux->kfunc_tab = tab;
|
||||
}
|
||||
|
||||
if (find_kfunc_desc(env->prog, func_id))
|
||||
/* func_id == 0 is always invalid, but instead of returning an error, be
|
||||
* conservative and wait until the code elimination pass before returning
|
||||
* error, so that invalid calls that get pruned out can be in BPF programs
|
||||
* loaded from userspace. It is also required that offset be untouched
|
||||
* for such calls.
|
||||
*/
|
||||
if (!func_id && !offset)
|
||||
return 0;
|
||||
|
||||
if (!btf_tab && offset) {
|
||||
btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
|
||||
if (!btf_tab)
|
||||
return -ENOMEM;
|
||||
prog_aux->kfunc_btf_tab = btf_tab;
|
||||
}
|
||||
|
||||
desc_btf = find_kfunc_desc_btf(env, func_id, offset, NULL);
|
||||
if (IS_ERR(desc_btf)) {
|
||||
verbose(env, "failed to find BTF for kernel function\n");
|
||||
return PTR_ERR(desc_btf);
|
||||
}
|
||||
|
||||
if (find_kfunc_desc(env->prog, func_id, offset))
|
||||
return 0;
|
||||
|
||||
if (tab->nr_descs == MAX_KFUNC_DESCS) {
|
||||
@ -1721,20 +1864,20 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
func = btf_type_by_id(btf_vmlinux, func_id);
|
||||
func = btf_type_by_id(desc_btf, func_id);
|
||||
if (!func || !btf_type_is_func(func)) {
|
||||
verbose(env, "kernel btf_id %u is not a function\n",
|
||||
func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
func_proto = btf_type_by_id(btf_vmlinux, func->type);
|
||||
func_proto = btf_type_by_id(desc_btf, func->type);
|
||||
if (!func_proto || !btf_type_is_func_proto(func_proto)) {
|
||||
verbose(env, "kernel function btf_id %u does not have a valid func_proto\n",
|
||||
func_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
func_name = btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
func_name = btf_name_by_offset(desc_btf, func->name_off);
|
||||
addr = kallsyms_lookup_name(func_name);
|
||||
if (!addr) {
|
||||
verbose(env, "cannot find address for kernel function %s\n",
|
||||
@ -1745,12 +1888,13 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id)
|
||||
desc = &tab->descs[tab->nr_descs++];
|
||||
desc->func_id = func_id;
|
||||
desc->imm = BPF_CALL_IMM(addr);
|
||||
err = btf_distill_func_proto(&env->log, btf_vmlinux,
|
||||
desc->offset = offset;
|
||||
err = btf_distill_func_proto(&env->log, desc_btf,
|
||||
func_proto, func_name,
|
||||
&desc->func_model);
|
||||
if (!err)
|
||||
sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
|
||||
kfunc_desc_cmp_by_id, NULL);
|
||||
kfunc_desc_cmp_by_id_off, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1829,7 +1973,7 @@ static int add_subprog_and_kfunc(struct bpf_verifier_env *env)
|
||||
} else if (bpf_pseudo_call(insn)) {
|
||||
ret = add_subprog(env, i + insn->imm + 1);
|
||||
} else {
|
||||
ret = add_kfunc_call(env, insn->imm);
|
||||
ret = add_kfunc_call(env, insn->imm, insn->off);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
@ -2166,12 +2310,17 @@ static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
|
||||
static const char *disasm_kfunc_name(void *data, const struct bpf_insn *insn)
|
||||
{
|
||||
const struct btf_type *func;
|
||||
struct btf *desc_btf;
|
||||
|
||||
if (insn->src_reg != BPF_PSEUDO_KFUNC_CALL)
|
||||
return NULL;
|
||||
|
||||
func = btf_type_by_id(btf_vmlinux, insn->imm);
|
||||
return btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
desc_btf = find_kfunc_desc_btf(data, insn->imm, insn->off, NULL);
|
||||
if (IS_ERR(desc_btf))
|
||||
return "<error>";
|
||||
|
||||
func = btf_type_by_id(desc_btf, insn->imm);
|
||||
return btf_name_by_offset(desc_btf, func->name_off);
|
||||
}
|
||||
|
||||
/* For given verifier state backtrack_insn() is called from the last insn to
|
||||
@ -6530,23 +6679,33 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
const char *func_name, *ptr_type_name;
|
||||
u32 i, nargs, func_id, ptr_type_id;
|
||||
struct module *btf_mod = NULL;
|
||||
const struct btf_param *args;
|
||||
struct btf *desc_btf;
|
||||
int err;
|
||||
|
||||
/* skip for now, but return error when we find this in fixup_kfunc_call */
|
||||
if (!insn->imm)
|
||||
return 0;
|
||||
|
||||
desc_btf = find_kfunc_desc_btf(env, insn->imm, insn->off, &btf_mod);
|
||||
if (IS_ERR(desc_btf))
|
||||
return PTR_ERR(desc_btf);
|
||||
|
||||
func_id = insn->imm;
|
||||
func = btf_type_by_id(btf_vmlinux, func_id);
|
||||
func_name = btf_name_by_offset(btf_vmlinux, func->name_off);
|
||||
func_proto = btf_type_by_id(btf_vmlinux, func->type);
|
||||
func = btf_type_by_id(desc_btf, func_id);
|
||||
func_name = btf_name_by_offset(desc_btf, func->name_off);
|
||||
func_proto = btf_type_by_id(desc_btf, func->type);
|
||||
|
||||
if (!env->ops->check_kfunc_call ||
|
||||
!env->ops->check_kfunc_call(func_id)) {
|
||||
!env->ops->check_kfunc_call(func_id, btf_mod)) {
|
||||
verbose(env, "calling kernel function %s is not allowed\n",
|
||||
func_name);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* Check the arguments */
|
||||
err = btf_check_kfunc_arg_match(env, btf_vmlinux, func_id, regs);
|
||||
err = btf_check_kfunc_arg_match(env, desc_btf, func_id, regs);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -6554,15 +6713,15 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
mark_reg_not_init(env, regs, caller_saved[i]);
|
||||
|
||||
/* Check return type */
|
||||
t = btf_type_skip_modifiers(btf_vmlinux, func_proto->type, NULL);
|
||||
t = btf_type_skip_modifiers(desc_btf, func_proto->type, NULL);
|
||||
if (btf_type_is_scalar(t)) {
|
||||
mark_reg_unknown(env, regs, BPF_REG_0);
|
||||
mark_btf_func_reg_size(env, BPF_REG_0, t->size);
|
||||
} else if (btf_type_is_ptr(t)) {
|
||||
ptr_type = btf_type_skip_modifiers(btf_vmlinux, t->type,
|
||||
ptr_type = btf_type_skip_modifiers(desc_btf, t->type,
|
||||
&ptr_type_id);
|
||||
if (!btf_type_is_struct(ptr_type)) {
|
||||
ptr_type_name = btf_name_by_offset(btf_vmlinux,
|
||||
ptr_type_name = btf_name_by_offset(desc_btf,
|
||||
ptr_type->name_off);
|
||||
verbose(env, "kernel function %s returns pointer type %s %s is not supported\n",
|
||||
func_name, btf_type_str(ptr_type),
|
||||
@ -6570,7 +6729,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
return -EINVAL;
|
||||
}
|
||||
mark_reg_known_zero(env, regs, BPF_REG_0);
|
||||
regs[BPF_REG_0].btf = btf_vmlinux;
|
||||
regs[BPF_REG_0].btf = desc_btf;
|
||||
regs[BPF_REG_0].type = PTR_TO_BTF_ID;
|
||||
regs[BPF_REG_0].btf_id = ptr_type_id;
|
||||
mark_btf_func_reg_size(env, BPF_REG_0, sizeof(void *));
|
||||
@ -6581,7 +6740,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
for (i = 0; i < nargs; i++) {
|
||||
u32 regno = i + 1;
|
||||
|
||||
t = btf_type_skip_modifiers(btf_vmlinux, args[i].type, NULL);
|
||||
t = btf_type_skip_modifiers(desc_btf, args[i].type, NULL);
|
||||
if (btf_type_is_ptr(t))
|
||||
mark_btf_func_reg_size(env, regno, sizeof(void *));
|
||||
else
|
||||
@ -11121,7 +11280,8 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
env->jmps_processed++;
|
||||
if (opcode == BPF_CALL) {
|
||||
if (BPF_SRC(insn->code) != BPF_K ||
|
||||
insn->off != 0 ||
|
||||
(insn->src_reg != BPF_PSEUDO_KFUNC_CALL
|
||||
&& insn->off != 0) ||
|
||||
(insn->src_reg != BPF_REG_0 &&
|
||||
insn->src_reg != BPF_PSEUDO_CALL &&
|
||||
insn->src_reg != BPF_PSEUDO_KFUNC_CALL) ||
|
||||
@ -12477,6 +12637,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
||||
func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
|
||||
func[i]->jit_requested = 1;
|
||||
func[i]->aux->kfunc_tab = prog->aux->kfunc_tab;
|
||||
func[i]->aux->kfunc_btf_tab = prog->aux->kfunc_btf_tab;
|
||||
func[i]->aux->linfo = prog->aux->linfo;
|
||||
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
|
||||
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
|
||||
@ -12662,10 +12823,15 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env,
|
||||
{
|
||||
const struct bpf_kfunc_desc *desc;
|
||||
|
||||
if (!insn->imm) {
|
||||
verbose(env, "invalid kernel function call not eliminated in verifier pass\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* insn->imm has the btf func_id. Replace it with
|
||||
* an address (relative to __bpf_base_call).
|
||||
*/
|
||||
desc = find_kfunc_desc(env->prog, insn->imm);
|
||||
desc = find_kfunc_desc(env->prog, insn->imm, insn->off);
|
||||
if (!desc) {
|
||||
verbose(env, "verifier internal error: kernel function descriptor not found for func_id %u\n",
|
||||
insn->imm);
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Copyright (c) 2017 Facebook
|
||||
*/
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
@ -241,9 +242,11 @@ BTF_ID(func, bpf_kfunc_call_test2)
|
||||
BTF_ID(func, bpf_kfunc_call_test3)
|
||||
BTF_SET_END(test_sk_kfunc_ids)
|
||||
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id)
|
||||
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
|
||||
{
|
||||
return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id);
|
||||
if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
|
||||
return true;
|
||||
return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
|
||||
}
|
||||
|
||||
static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
|
||||
|
@ -223,41 +223,13 @@ BTF_ID(func, tcp_reno_cong_avoid)
|
||||
BTF_ID(func, tcp_reno_undo_cwnd)
|
||||
BTF_ID(func, tcp_slow_start)
|
||||
BTF_ID(func, tcp_cong_avoid_ai)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
|
||||
BTF_ID(func, cubictcp_init)
|
||||
BTF_ID(func, cubictcp_recalc_ssthresh)
|
||||
BTF_ID(func, cubictcp_cong_avoid)
|
||||
BTF_ID(func, cubictcp_state)
|
||||
BTF_ID(func, cubictcp_cwnd_event)
|
||||
BTF_ID(func, cubictcp_acked)
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
|
||||
BTF_ID(func, dctcp_init)
|
||||
BTF_ID(func, dctcp_update_alpha)
|
||||
BTF_ID(func, dctcp_cwnd_event)
|
||||
BTF_ID(func, dctcp_ssthresh)
|
||||
BTF_ID(func, dctcp_cwnd_undo)
|
||||
BTF_ID(func, dctcp_state)
|
||||
#endif
|
||||
#if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
|
||||
BTF_ID(func, bbr_init)
|
||||
BTF_ID(func, bbr_main)
|
||||
BTF_ID(func, bbr_sndbuf_expand)
|
||||
BTF_ID(func, bbr_undo_cwnd)
|
||||
BTF_ID(func, bbr_cwnd_event)
|
||||
BTF_ID(func, bbr_ssthresh)
|
||||
BTF_ID(func, bbr_min_tso_segs)
|
||||
BTF_ID(func, bbr_set_state)
|
||||
#endif
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
#endif /* CONFIG_X86 */
|
||||
BTF_SET_END(bpf_tcp_ca_kfunc_ids)
|
||||
|
||||
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
|
||||
static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id, struct module *owner)
|
||||
{
|
||||
return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
|
||||
if (btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id))
|
||||
return true;
|
||||
return bpf_check_mod_kfunc_call(&bpf_tcp_ca_kfunc_list, kfunc_btf_id, owner);
|
||||
}
|
||||
|
||||
static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
|
||||
|
@ -56,6 +56,8 @@
|
||||
* otherwise TCP stack falls back to an internal pacing using one high
|
||||
* resolution timer per TCP socket and may use more resources.
|
||||
*/
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <net/tcp.h>
|
||||
#include <linux/inet_diag.h>
|
||||
@ -1152,14 +1154,38 @@ static struct tcp_congestion_ops tcp_bbr_cong_ops __read_mostly = {
|
||||
.set_state = bbr_set_state,
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_bbr_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, bbr_init)
|
||||
BTF_ID(func, bbr_main)
|
||||
BTF_ID(func, bbr_sndbuf_expand)
|
||||
BTF_ID(func, bbr_undo_cwnd)
|
||||
BTF_ID(func, bbr_cwnd_event)
|
||||
BTF_ID(func, bbr_ssthresh)
|
||||
BTF_ID(func, bbr_min_tso_segs)
|
||||
BTF_ID(func, bbr_set_state)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_bbr_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_bbr_kfunc_ids, tcp_bbr_kfunc_btf_set);
|
||||
|
||||
static int __init bbr_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bbr) > ICSK_CA_PRIV_SIZE);
|
||||
return tcp_register_congestion_control(&tcp_bbr_cong_ops);
|
||||
ret = tcp_register_congestion_control(&tcp_bbr_cong_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit bbr_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_bbr_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&tcp_bbr_cong_ops);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,8 @@
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/math64.h>
|
||||
#include <net/tcp.h>
|
||||
@ -482,8 +484,25 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
|
||||
.name = "cubic",
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_cubic_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, cubictcp_init)
|
||||
BTF_ID(func, cubictcp_recalc_ssthresh)
|
||||
BTF_ID(func, cubictcp_cong_avoid)
|
||||
BTF_ID(func, cubictcp_state)
|
||||
BTF_ID(func, cubictcp_cwnd_event)
|
||||
BTF_ID(func, cubictcp_acked)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_cubic_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set);
|
||||
|
||||
static int __init cubictcp_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
|
||||
|
||||
/* Precompute a bunch of the scaling factors that are used per-packet
|
||||
@ -514,11 +533,16 @@ static int __init cubictcp_register(void)
|
||||
/* divide by bic_scale and by constant Srtt (100ms) */
|
||||
do_div(cube_factor, bic_scale * 10);
|
||||
|
||||
return tcp_register_congestion_control(&cubictcp);
|
||||
ret = tcp_register_congestion_control(&cubictcp);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit cubictcp_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&cubictcp);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,8 @@
|
||||
* Glenn Judd <glenn.judd@morganstanley.com>
|
||||
*/
|
||||
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <net/tcp.h>
|
||||
@ -236,14 +238,36 @@ static struct tcp_congestion_ops dctcp_reno __read_mostly = {
|
||||
.name = "dctcp-reno",
|
||||
};
|
||||
|
||||
BTF_SET_START(tcp_dctcp_kfunc_ids)
|
||||
#ifdef CONFIG_X86
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
BTF_ID(func, dctcp_init)
|
||||
BTF_ID(func, dctcp_update_alpha)
|
||||
BTF_ID(func, dctcp_cwnd_event)
|
||||
BTF_ID(func, dctcp_ssthresh)
|
||||
BTF_ID(func, dctcp_cwnd_undo)
|
||||
BTF_ID(func, dctcp_state)
|
||||
#endif
|
||||
#endif
|
||||
BTF_SET_END(tcp_dctcp_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&tcp_dctcp_kfunc_ids, tcp_dctcp_kfunc_btf_set);
|
||||
|
||||
static int __init dctcp_register(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct dctcp) > ICSK_CA_PRIV_SIZE);
|
||||
return tcp_register_congestion_control(&dctcp);
|
||||
ret = tcp_register_congestion_control(&dctcp);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit dctcp_unregister(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_dctcp_kfunc_btf_set);
|
||||
tcp_unregister_congestion_control(&dctcp);
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@
|
||||
cmd_btf_ko = \
|
||||
if [ -f vmlinux ]; then \
|
||||
LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \
|
||||
$(RESOLVE_BTFIDS) -b vmlinux $@; \
|
||||
else \
|
||||
printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \
|
||||
fi;
|
||||
|
@ -89,6 +89,7 @@ struct btf_id {
|
||||
struct object {
|
||||
const char *path;
|
||||
const char *btf;
|
||||
const char *base_btf_path;
|
||||
|
||||
struct {
|
||||
int fd;
|
||||
@ -477,16 +478,27 @@ static int symbols_resolve(struct object *obj)
|
||||
int nr_structs = obj->nr_structs;
|
||||
int nr_unions = obj->nr_unions;
|
||||
int nr_funcs = obj->nr_funcs;
|
||||
struct btf *base_btf = NULL;
|
||||
int err, type_id;
|
||||
struct btf *btf;
|
||||
__u32 nr_types;
|
||||
|
||||
btf = btf__parse(obj->btf ?: obj->path, NULL);
|
||||
if (obj->base_btf_path) {
|
||||
base_btf = btf__parse(obj->base_btf_path, NULL);
|
||||
err = libbpf_get_error(base_btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load base BTF from %s: %s\n",
|
||||
obj->base_btf_path, strerror(-err));
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
btf = btf__parse_split(obj->btf ?: obj->path, base_btf);
|
||||
err = libbpf_get_error(btf);
|
||||
if (err) {
|
||||
pr_err("FAILED: load BTF from %s: %s\n",
|
||||
obj->btf ?: obj->path, strerror(-err));
|
||||
return -1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = -1;
|
||||
@ -545,6 +557,7 @@ static int symbols_resolve(struct object *obj)
|
||||
|
||||
err = 0;
|
||||
out:
|
||||
btf__free(base_btf);
|
||||
btf__free(btf);
|
||||
return err;
|
||||
}
|
||||
@ -678,7 +691,6 @@ static const char * const resolve_btfids_usage[] = {
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
bool no_fail = false;
|
||||
struct object obj = {
|
||||
.efile = {
|
||||
.idlist_shndx = -1,
|
||||
@ -695,8 +707,8 @@ int main(int argc, const char **argv)
|
||||
"be more verbose (show errors, etc)"),
|
||||
OPT_STRING(0, "btf", &obj.btf, "BTF data",
|
||||
"BTF data"),
|
||||
OPT_BOOLEAN(0, "no-fail", &no_fail,
|
||||
"do not fail if " BTF_IDS_SECTION " section is not found"),
|
||||
OPT_STRING('b', "btf_base", &obj.base_btf_path, "file",
|
||||
"path of file providing base BTF"),
|
||||
OPT_END()
|
||||
};
|
||||
int err = -1;
|
||||
@ -717,10 +729,8 @@ int main(int argc, const char **argv)
|
||||
*/
|
||||
if (obj.efile.idlist_shndx == -1 ||
|
||||
obj.efile.symbols_shndx == -1) {
|
||||
if (no_fail)
|
||||
return 0;
|
||||
pr_err("FAILED to find needed sections\n");
|
||||
return -1;
|
||||
pr_debug("Cannot find .BTF_ids or symbols sections, nothing to do\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (symbols_collect(&obj))
|
||||
|
@ -264,6 +264,7 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
|
||||
attr.line_info_rec_size = load_attr->line_info_rec_size;
|
||||
attr.line_info_cnt = load_attr->line_info_cnt;
|
||||
attr.line_info = ptr_to_u64(load_attr->line_info);
|
||||
attr.fd_array = ptr_to_u64(load_attr->fd_array);
|
||||
|
||||
if (load_attr->name)
|
||||
memcpy(attr.prog_name, load_attr->name,
|
||||
|
@ -7,6 +7,15 @@ struct ksym_relo_desc {
|
||||
const char *name;
|
||||
int kind;
|
||||
int insn_idx;
|
||||
bool is_weak;
|
||||
};
|
||||
|
||||
struct ksym_desc {
|
||||
const char *name;
|
||||
int ref;
|
||||
int kind;
|
||||
int off;
|
||||
int insn;
|
||||
};
|
||||
|
||||
struct bpf_gen {
|
||||
@ -24,6 +33,10 @@ struct bpf_gen {
|
||||
int relo_cnt;
|
||||
char attach_target[128];
|
||||
int attach_kind;
|
||||
struct ksym_desc *ksyms;
|
||||
__u32 nr_ksyms;
|
||||
int fd_array;
|
||||
int nr_fd_array;
|
||||
};
|
||||
|
||||
void bpf_gen__init(struct bpf_gen *gen, int log_level);
|
||||
@ -36,6 +49,7 @@ void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_a
|
||||
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
||||
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, int kind,
|
||||
int insn_idx);
|
||||
|
||||
#endif
|
||||
|
@ -695,15 +695,15 @@ __s32 btf__find_by_name(const struct btf *btf, const char *type_name)
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
static __s32 btf_find_by_name_kind(const struct btf *btf, int start_id,
|
||||
const char *type_name, __u32 kind)
|
||||
{
|
||||
__u32 i, nr_types = btf__get_nr_types(btf);
|
||||
|
||||
if (kind == BTF_KIND_UNKN || !strcmp(type_name, "void"))
|
||||
return 0;
|
||||
|
||||
for (i = 1; i <= nr_types; i++) {
|
||||
for (i = start_id; i <= nr_types; i++) {
|
||||
const struct btf_type *t = btf__type_by_id(btf, i);
|
||||
const char *name;
|
||||
|
||||
@ -717,6 +717,18 @@ __s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
return libbpf_err(-ENOENT);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, btf->start_id, type_name, kind);
|
||||
}
|
||||
|
||||
__s32 btf__find_by_name_kind(const struct btf *btf, const char *type_name,
|
||||
__u32 kind)
|
||||
{
|
||||
return btf_find_by_name_kind(btf, 1, type_name, kind);
|
||||
}
|
||||
|
||||
static bool btf_is_modifiable(const struct btf *btf)
|
||||
{
|
||||
return (void *)btf->hdr != btf->raw_data;
|
||||
|
@ -14,8 +14,10 @@
|
||||
#include "bpf_gen_internal.h"
|
||||
#include "skel_internal.h"
|
||||
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_USED_MAPS 64
|
||||
#define MAX_USED_PROGS 32
|
||||
#define MAX_KFUNC_DESCS 256
|
||||
#define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
|
||||
|
||||
/* The following structure describes the stack layout of the loader program.
|
||||
* In addition R6 contains the pointer to context.
|
||||
@ -30,7 +32,6 @@
|
||||
*/
|
||||
struct loader_stack {
|
||||
__u32 btf_fd;
|
||||
__u32 map_fd[MAX_USED_MAPS];
|
||||
__u32 prog_fd[MAX_USED_PROGS];
|
||||
__u32 inner_map_fd;
|
||||
};
|
||||
@ -143,13 +144,49 @@ static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
|
||||
if (realloc_data_buf(gen, size8))
|
||||
return 0;
|
||||
prev = gen->data_cur;
|
||||
memcpy(gen->data_cur, data, size);
|
||||
gen->data_cur += size;
|
||||
memcpy(gen->data_cur, &zero, size8 - size);
|
||||
gen->data_cur += size8 - size;
|
||||
if (data) {
|
||||
memcpy(gen->data_cur, data, size);
|
||||
memcpy(gen->data_cur + size, &zero, size8 - size);
|
||||
} else {
|
||||
memset(gen->data_cur, 0, size8);
|
||||
}
|
||||
gen->data_cur += size8;
|
||||
return prev - gen->data_start;
|
||||
}
|
||||
|
||||
/* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
|
||||
* to start of fd_array. Caller can decide if it is usable or not.
|
||||
*/
|
||||
static int add_map_fd(struct bpf_gen *gen)
|
||||
{
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_maps == MAX_USED_MAPS) {
|
||||
pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
|
||||
gen->error = -E2BIG;
|
||||
return 0;
|
||||
}
|
||||
return gen->nr_maps++;
|
||||
}
|
||||
|
||||
static int add_kfunc_btf_fd(struct bpf_gen *gen)
|
||||
{
|
||||
int cur;
|
||||
|
||||
if (!gen->fd_array)
|
||||
gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
|
||||
if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
|
||||
cur = add_data(gen, NULL, sizeof(int));
|
||||
return (cur - gen->fd_array) / sizeof(int);
|
||||
}
|
||||
return MAX_USED_MAPS + gen->nr_fd_array++;
|
||||
}
|
||||
|
||||
static int blob_fd_array_off(struct bpf_gen *gen, int index)
|
||||
{
|
||||
return gen->fd_array + index * sizeof(int);
|
||||
}
|
||||
|
||||
static int insn_bytes_to_bpf_size(__u32 sz)
|
||||
{
|
||||
switch (sz) {
|
||||
@ -171,14 +208,22 @@ static void emit_rel_store(struct bpf_gen *gen, int off, int data)
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
|
||||
static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
|
||||
static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
|
||||
{
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, off));
|
||||
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
|
||||
}
|
||||
|
||||
static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
|
||||
{
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_off));
|
||||
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
|
||||
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
|
||||
}
|
||||
|
||||
static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
|
||||
@ -326,11 +371,11 @@ int bpf_gen__finish(struct bpf_gen *gen)
|
||||
offsetof(struct bpf_prog_desc, prog_fd), 4,
|
||||
stack_off(prog_fd[i]));
|
||||
for (i = 0; i < gen->nr_maps; i++)
|
||||
move_stack2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
stack_off(map_fd[i]));
|
||||
move_blob2ctx(gen,
|
||||
sizeof(struct bpf_loader_ctx) +
|
||||
sizeof(struct bpf_map_desc) * i +
|
||||
offsetof(struct bpf_map_desc, map_fd), 4,
|
||||
blob_fd_array_off(gen, i));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
|
||||
emit(gen, BPF_EXIT_INSN());
|
||||
pr_debug("gen: finish %d\n", gen->error);
|
||||
@ -390,7 +435,7 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
{
|
||||
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
|
||||
bool close_inner_map_fd = false;
|
||||
int map_create_attr;
|
||||
int map_create_attr, idx;
|
||||
union bpf_attr attr;
|
||||
|
||||
memset(&attr, 0, attr_size);
|
||||
@ -467,9 +512,11 @@ void bpf_gen__map_create(struct bpf_gen *gen,
|
||||
gen->error = -EDOM; /* internal bug */
|
||||
return;
|
||||
} else {
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
|
||||
stack_off(map_fd[map_idx])));
|
||||
gen->nr_maps++;
|
||||
/* add_map_fd does gen->nr_maps++ */
|
||||
idx = add_map_fd(gen);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, idx)));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
|
||||
}
|
||||
if (close_inner_map_fd)
|
||||
emit_sys_close_stack(gen, stack_off(inner_map_fd));
|
||||
@ -511,8 +558,8 @@ static void emit_find_attach_target(struct bpf_gen *gen)
|
||||
*/
|
||||
}
|
||||
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
int insn_idx)
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
int kind, int insn_idx)
|
||||
{
|
||||
struct ksym_relo_desc *relo;
|
||||
|
||||
@ -524,38 +571,192 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
|
||||
gen->relos = relo;
|
||||
relo += gen->relo_cnt;
|
||||
relo->name = name;
|
||||
relo->is_weak = is_weak;
|
||||
relo->kind = kind;
|
||||
relo->insn_idx = insn_idx;
|
||||
gen->relo_cnt++;
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
/* returns existing ksym_desc with ref incremented, or inserts a new one */
|
||||
static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name, insn, len = strlen(relo->name) + 1;
|
||||
struct ksym_desc *kdesc;
|
||||
|
||||
pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
|
||||
name = add_data(gen, relo->name, len);
|
||||
for (int i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (!strcmp(gen->ksyms[i].name, relo->name)) {
|
||||
gen->ksyms[i].ref++;
|
||||
return &gen->ksyms[i];
|
||||
}
|
||||
}
|
||||
kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
|
||||
if (!kdesc) {
|
||||
gen->error = -ENOMEM;
|
||||
return NULL;
|
||||
}
|
||||
gen->ksyms = kdesc;
|
||||
kdesc = &gen->ksyms[gen->nr_ksyms++];
|
||||
kdesc->name = relo->name;
|
||||
kdesc->kind = relo->kind;
|
||||
kdesc->ref = 1;
|
||||
kdesc->off = 0;
|
||||
kdesc->insn = 0;
|
||||
return kdesc;
|
||||
}
|
||||
|
||||
/* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
|
||||
* Returns result in BPF_REG_7
|
||||
*/
|
||||
static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
|
||||
{
|
||||
int name_off, len = strlen(relo->name) + 1;
|
||||
|
||||
name_off = add_data(gen, relo->name, len);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, name));
|
||||
0, 0, 0, name_off));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
|
||||
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
|
||||
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*
|
||||
* We need to reuse BTF fd for same symbol otherwise each relocation takes a new
|
||||
* index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
|
||||
* this would mean a new BTF fd index for each entry. By pairing symbol name
|
||||
* with index, we get the insn->imm, insn->off pairing that kernel uses for
|
||||
* kfunc_tab, which becomes the effective limit even though all of them may
|
||||
* share same index in fd_array (such that kfunc_btf_tab has 1 element).
|
||||
*/
|
||||
static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
int btf_fd_idx;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing bpf_insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
|
||||
kdesc->insn + offsetof(struct bpf_insn, off));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
if (!relo->is_weak)
|
||||
emit_check_err(gen);
|
||||
/* get index in fd_array to store BTF FD at */
|
||||
btf_fd_idx = add_kfunc_btf_fd(gen);
|
||||
if (btf_fd_idx > INT16_MAX) {
|
||||
pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
|
||||
btf_fd_idx, relo->name);
|
||||
gen->error = -E2BIG;
|
||||
return;
|
||||
}
|
||||
kdesc->off = btf_fd_idx;
|
||||
/* set a default value for imm */
|
||||
emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
|
||||
/* skip success case store if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1));
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* load fd_array slot pointer */
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
|
||||
/* skip store of BTF fd if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3));
|
||||
/* store BTF fd in slot */
|
||||
emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
|
||||
/* set a default value for off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
|
||||
/* skip insn->off store if ret < 0 */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2));
|
||||
/* skip if vmlinux BTF */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1));
|
||||
/* store index into insn[insn_idx].off */
|
||||
emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
|
||||
log:
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, off)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
|
||||
relo->name, kdesc->ref);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
|
||||
debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
|
||||
relo->name, kdesc->ref);
|
||||
}
|
||||
|
||||
/* Expects:
|
||||
* BPF_REG_8 - pointer to instruction
|
||||
*/
|
||||
static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
|
||||
kdesc = get_ksym_desc(gen, relo);
|
||||
if (!kdesc)
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
goto log;
|
||||
}
|
||||
/* remember insn offset, so we can copy BTF ID and FD later */
|
||||
kdesc->insn = insn;
|
||||
emit_bpf_find_by_name_kind(gen, relo);
|
||||
emit_check_err(gen);
|
||||
/* store btf_id into insn[insn_idx].imm */
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
|
||||
0, 0, 0, insn));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
|
||||
if (relo->kind == BTF_KIND_VAR) {
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
|
||||
sizeof(struct bpf_insn)));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
|
||||
/* store btf_obj_fd into insn[insn_idx + 1].imm */
|
||||
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
log:
|
||||
if (!gen->log_level)
|
||||
return;
|
||||
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
|
||||
offsetof(struct bpf_insn, imm)));
|
||||
debug_regs(gen, BPF_REG_7, BPF_REG_9, " var (%s:count=%d): imm: %%d, fd: %%d",
|
||||
relo->name, kdesc->ref);
|
||||
}
|
||||
|
||||
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
|
||||
{
|
||||
int insn;
|
||||
|
||||
pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
|
||||
switch (relo->kind) {
|
||||
case BTF_KIND_VAR:
|
||||
emit_relo_ksym_btf(gen, relo, insn);
|
||||
break;
|
||||
case BTF_KIND_FUNC:
|
||||
emit_relo_kfunc_btf(gen, relo, insn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown relocation kind '%d'\n", relo->kind);
|
||||
gen->error = -EDOM;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -571,14 +772,22 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
{
|
||||
int i, insn;
|
||||
|
||||
for (i = 0; i < gen->relo_cnt; i++) {
|
||||
if (gen->relos[i].kind != BTF_KIND_VAR)
|
||||
continue;
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = insns +
|
||||
sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
|
||||
offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (gen->ksyms[i].kind == BTF_KIND_VAR) {
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = gen->ksyms[i].insn;
|
||||
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
} else { /* BTF_KIND_FUNC */
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
|
||||
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
|
||||
gen->nr_fd_array--;
|
||||
}
|
||||
}
|
||||
if (gen->nr_ksyms) {
|
||||
free(gen->ksyms);
|
||||
gen->nr_ksyms = 0;
|
||||
gen->ksyms = NULL;
|
||||
}
|
||||
if (gen->relo_cnt) {
|
||||
free(gen->relos);
|
||||
@ -637,9 +846,8 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
|
||||
/* populate union bpf_attr with a pointer to line_info */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
|
||||
|
||||
/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
|
||||
emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
|
||||
stack_off(map_fd[0]));
|
||||
/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
|
||||
emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
|
||||
|
||||
/* populate union bpf_attr with user provided log details */
|
||||
move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
|
||||
@ -706,8 +914,8 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
|
||||
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
|
||||
|
||||
map_update_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
emit_rel_store(gen, attr_field(map_update_attr, key), key);
|
||||
emit_rel_store(gen, attr_field(map_update_attr, value), value);
|
||||
/* emit MAP_UPDATE_ELEM command */
|
||||
@ -725,8 +933,8 @@ void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
|
||||
memset(&attr, 0, attr_size);
|
||||
pr_debug("gen: map_freeze: idx %d\n", map_idx);
|
||||
map_freeze_attr = add_data(gen, &attr, attr_size);
|
||||
move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
stack_off(map_fd[map_idx]));
|
||||
move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
|
||||
blob_fd_array_off(gen, map_idx));
|
||||
/* emit MAP_FREEZE command */
|
||||
emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
|
||||
debug_ret(gen, "map_freeze");
|
||||
|
@ -443,6 +443,11 @@ struct extern_desc {
|
||||
|
||||
/* local btf_id of the ksym extern's type. */
|
||||
__u32 type_id;
|
||||
/* BTF fd index to be patched in for insn->off, this is
|
||||
* 0 for vmlinux BTF, index in obj->fd_array for module
|
||||
* BTF
|
||||
*/
|
||||
__s16 btf_fd_idx;
|
||||
} ksym;
|
||||
};
|
||||
};
|
||||
@ -454,6 +459,7 @@ struct module_btf {
|
||||
char *name;
|
||||
__u32 id;
|
||||
int fd;
|
||||
int fd_array_idx;
|
||||
};
|
||||
|
||||
struct bpf_object {
|
||||
@ -539,6 +545,10 @@ struct bpf_object {
|
||||
void *priv;
|
||||
bpf_object_clear_priv_t clear_priv;
|
||||
|
||||
int *fd_array;
|
||||
size_t fd_array_cap;
|
||||
size_t fd_array_cnt;
|
||||
|
||||
char path[];
|
||||
};
|
||||
#define obj_elf_valid(o) ((o)->efile.elf)
|
||||
@ -3429,11 +3439,6 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
|
||||
return -ENOTSUP;
|
||||
}
|
||||
} else if (strcmp(sec_name, KSYMS_SEC) == 0) {
|
||||
if (btf_is_func(t) && ext->is_weak) {
|
||||
pr_warn("extern weak function %s is unsupported\n",
|
||||
ext->name);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
ksym_sec = sec;
|
||||
ext->type = EXT_KSYM;
|
||||
skip_mods_and_typedefs(obj->btf, t->type,
|
||||
@ -5406,7 +5411,13 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
|
||||
case RELO_EXTERN_FUNC:
|
||||
ext = &obj->externs[relo->sym_off];
|
||||
insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
|
||||
insn[0].imm = ext->ksym.kernel_btf_id;
|
||||
if (ext->is_set) {
|
||||
insn[0].imm = ext->ksym.kernel_btf_id;
|
||||
insn[0].off = ext->ksym.btf_fd_idx;
|
||||
} else { /* unresolved weak kfunc */
|
||||
insn[0].imm = 0;
|
||||
insn[0].off = 0;
|
||||
}
|
||||
break;
|
||||
case RELO_SUBPROG_ADDR:
|
||||
if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
|
||||
@ -6236,6 +6247,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
|
||||
}
|
||||
load_attr.log_level = prog->log_level;
|
||||
load_attr.prog_flags = prog->prog_flags;
|
||||
load_attr.fd_array = prog->obj->fd_array;
|
||||
|
||||
/* adjust load_attr if sec_def provides custom preload callback */
|
||||
if (prog->sec_def && prog->sec_def->preload_fn) {
|
||||
@ -6348,12 +6360,12 @@ static int bpf_program__record_externs(struct bpf_program *prog)
|
||||
ext->name);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
|
||||
relo->insn_idx);
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
|
||||
BTF_KIND_VAR, relo->insn_idx);
|
||||
break;
|
||||
case RELO_EXTERN_FUNC:
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
|
||||
relo->insn_idx);
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak,
|
||||
BTF_KIND_FUNC, relo->insn_idx);
|
||||
break;
|
||||
default:
|
||||
continue;
|
||||
@ -6752,13 +6764,14 @@ out:
|
||||
|
||||
static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
|
||||
__u16 kind, struct btf **res_btf,
|
||||
int *res_btf_fd)
|
||||
struct module_btf **res_mod_btf)
|
||||
{
|
||||
int i, id, btf_fd, err;
|
||||
struct module_btf *mod_btf;
|
||||
struct btf *btf;
|
||||
int i, id, err;
|
||||
|
||||
btf = obj->btf_vmlinux;
|
||||
btf_fd = 0;
|
||||
mod_btf = NULL;
|
||||
id = btf__find_by_name_kind(btf, ksym_name, kind);
|
||||
|
||||
if (id == -ENOENT) {
|
||||
@ -6767,10 +6780,10 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
|
||||
return err;
|
||||
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
btf = obj->btf_modules[i].btf;
|
||||
/* we assume module BTF FD is always >0 */
|
||||
btf_fd = obj->btf_modules[i].fd;
|
||||
id = btf__find_by_name_kind(btf, ksym_name, kind);
|
||||
/* we assume module_btf's BTF FD is always >0 */
|
||||
mod_btf = &obj->btf_modules[i];
|
||||
btf = mod_btf->btf;
|
||||
id = btf__find_by_name_kind_own(btf, ksym_name, kind);
|
||||
if (id != -ENOENT)
|
||||
break;
|
||||
}
|
||||
@ -6779,7 +6792,7 @@ static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
|
||||
return -ESRCH;
|
||||
|
||||
*res_btf = btf;
|
||||
*res_btf_fd = btf_fd;
|
||||
*res_mod_btf = mod_btf;
|
||||
return id;
|
||||
}
|
||||
|
||||
@ -6788,14 +6801,15 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
|
||||
{
|
||||
const struct btf_type *targ_var, *targ_type;
|
||||
__u32 targ_type_id, local_type_id;
|
||||
struct module_btf *mod_btf = NULL;
|
||||
const char *targ_var_name;
|
||||
int id, btf_fd = 0, err;
|
||||
struct btf *btf = NULL;
|
||||
int id, err;
|
||||
|
||||
id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &btf_fd);
|
||||
if (id == -ESRCH && ext->is_weak) {
|
||||
return 0;
|
||||
} else if (id < 0) {
|
||||
id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
|
||||
if (id < 0) {
|
||||
if (id == -ESRCH && ext->is_weak)
|
||||
return 0;
|
||||
pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
|
||||
ext->name);
|
||||
return id;
|
||||
@ -6827,7 +6841,7 @@ static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
|
||||
}
|
||||
|
||||
ext->is_set = true;
|
||||
ext->ksym.kernel_btf_obj_fd = btf_fd;
|
||||
ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
|
||||
ext->ksym.kernel_btf_id = id;
|
||||
pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
|
||||
ext->name, id, btf_kind_str(targ_var), targ_var_name);
|
||||
@ -6839,26 +6853,22 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
|
||||
struct extern_desc *ext)
|
||||
{
|
||||
int local_func_proto_id, kfunc_proto_id, kfunc_id;
|
||||
struct module_btf *mod_btf = NULL;
|
||||
const struct btf_type *kern_func;
|
||||
struct btf *kern_btf = NULL;
|
||||
int ret, kern_btf_fd = 0;
|
||||
int ret;
|
||||
|
||||
local_func_proto_id = ext->ksym.type_id;
|
||||
|
||||
kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC,
|
||||
&kern_btf, &kern_btf_fd);
|
||||
kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf);
|
||||
if (kfunc_id < 0) {
|
||||
pr_warn("extern (func ksym) '%s': not found in kernel BTF\n",
|
||||
if (kfunc_id == -ESRCH && ext->is_weak)
|
||||
return 0;
|
||||
pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
|
||||
ext->name);
|
||||
return kfunc_id;
|
||||
}
|
||||
|
||||
if (kern_btf != obj->btf_vmlinux) {
|
||||
pr_warn("extern (func ksym) '%s': function in kernel module is not supported\n",
|
||||
ext->name);
|
||||
return -ENOTSUP;
|
||||
}
|
||||
|
||||
kern_func = btf__type_by_id(kern_btf, kfunc_id);
|
||||
kfunc_proto_id = kern_func->type;
|
||||
|
||||
@ -6870,9 +6880,30 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* set index for module BTF fd in fd_array, if unset */
|
||||
if (mod_btf && !mod_btf->fd_array_idx) {
|
||||
/* insn->off is s16 */
|
||||
if (obj->fd_array_cnt == INT16_MAX) {
|
||||
pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
|
||||
ext->name, mod_btf->fd_array_idx);
|
||||
return -E2BIG;
|
||||
}
|
||||
/* Cannot use index 0 for module BTF fd */
|
||||
if (!obj->fd_array_cnt)
|
||||
obj->fd_array_cnt = 1;
|
||||
|
||||
ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
|
||||
obj->fd_array_cnt + 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
mod_btf->fd_array_idx = obj->fd_array_cnt;
|
||||
/* we assume module BTF FD is always >0 */
|
||||
obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
|
||||
}
|
||||
|
||||
ext->is_set = true;
|
||||
ext->ksym.kernel_btf_obj_fd = kern_btf_fd;
|
||||
ext->ksym.kernel_btf_id = kfunc_id;
|
||||
ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
|
||||
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
|
||||
ext->name, kfunc_id);
|
||||
|
||||
@ -7031,6 +7062,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
|
||||
err = bpf_gen__finish(obj->gen_loader);
|
||||
}
|
||||
|
||||
/* clean up fd_array */
|
||||
zfree(&obj->fd_array);
|
||||
|
||||
/* clean up module BTFs */
|
||||
for (i = 0; i < obj->btf_module_cnt; i++) {
|
||||
close(obj->btf_modules[i].fd);
|
||||
|
@ -298,6 +298,7 @@ struct bpf_prog_load_params {
|
||||
__u32 log_level;
|
||||
char *log_buf;
|
||||
size_t log_buf_sz;
|
||||
int *fd_array;
|
||||
};
|
||||
|
||||
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
|
||||
@ -408,6 +409,8 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
|
||||
int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx);
|
||||
int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx);
|
||||
__s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name,
|
||||
__u32 kind);
|
||||
|
||||
extern enum libbpf_strict_mode libbpf_mode;
|
||||
|
||||
|
@ -315,8 +315,9 @@ LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
|
||||
linked_vars.skel.h linked_maps.skel.h
|
||||
|
||||
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
|
||||
test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c \
|
||||
trace_vprintk.c
|
||||
test_ringbuf.c atomics.c trace_printk.c trace_vprintk.c
|
||||
# Generate both light skeleton and libbpf skeleton for these
|
||||
LSKELS_EXTRA := test_ksyms_module.c
|
||||
SKEL_BLACKLIST += $$(LSKELS)
|
||||
|
||||
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
|
||||
@ -346,7 +347,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
|
||||
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
|
||||
$$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
|
||||
$$(TRUNNER_BPF_SRCS)))
|
||||
TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS))
|
||||
TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS) $$(LSKELS_EXTRA))
|
||||
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
|
||||
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
|
||||
|
||||
@ -454,7 +455,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
|
||||
| $(TRUNNER_BINARY)-extras
|
||||
$$(call msg,BINARY,,$$@)
|
||||
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
|
||||
$(Q)$(RESOLVE_BTFIDS) --no-fail --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
|
||||
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
|
||||
|
||||
endef
|
||||
|
||||
|
@ -1,5 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <linux/btf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/error-injection.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
@ -13,6 +15,12 @@
|
||||
|
||||
DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123;
|
||||
|
||||
noinline void
|
||||
bpf_testmod_test_mod_kfunc(int i)
|
||||
{
|
||||
*(int *)this_cpu_ptr(&bpf_testmod_ksym_percpu) = i;
|
||||
}
|
||||
|
||||
noinline int bpf_testmod_loop_test(int n)
|
||||
{
|
||||
int i, sum = 0;
|
||||
@ -71,13 +79,26 @@ static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = {
|
||||
.write = bpf_testmod_test_write,
|
||||
};
|
||||
|
||||
BTF_SET_START(bpf_testmod_kfunc_ids)
|
||||
BTF_ID(func, bpf_testmod_test_mod_kfunc)
|
||||
BTF_SET_END(bpf_testmod_kfunc_ids)
|
||||
|
||||
static DEFINE_KFUNC_BTF_ID_SET(&bpf_testmod_kfunc_ids, bpf_testmod_kfunc_btf_set);
|
||||
|
||||
static int bpf_testmod_init(void)
|
||||
{
|
||||
return sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
|
||||
int ret;
|
||||
|
||||
ret = sysfs_create_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
|
||||
if (ret)
|
||||
return ret;
|
||||
register_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_testmod_exit(void)
|
||||
{
|
||||
unregister_kfunc_btf_id_set(&prog_test_kfunc_list, &bpf_testmod_kfunc_btf_set);
|
||||
return sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
|
||||
}
|
||||
|
||||
|
@ -2,30 +2,29 @@
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <bpf/libbpf.h>
|
||||
#include <bpf/btf.h>
|
||||
#include <network_helpers.h>
|
||||
#include "test_ksyms_module.lskel.h"
|
||||
|
||||
static int duration;
|
||||
|
||||
void test_ksyms_module(void)
|
||||
{
|
||||
struct test_ksyms_module* skel;
|
||||
struct test_ksyms_module *skel;
|
||||
int retval;
|
||||
int err;
|
||||
|
||||
skel = test_ksyms_module__open_and_load();
|
||||
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
|
||||
if (!env.has_testmod) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
err = test_ksyms_module__attach(skel);
|
||||
if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
|
||||
skel = test_ksyms_module__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open_and_load"))
|
||||
return;
|
||||
err = bpf_prog_test_run(skel->progs.load.prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
|
||||
NULL, NULL, (__u32 *)&retval, NULL);
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run"))
|
||||
goto cleanup;
|
||||
|
||||
usleep(1);
|
||||
|
||||
ASSERT_EQ(skel->bss->triggered, true, "triggered");
|
||||
ASSERT_EQ(skel->bss->out_mod_ksym_global, 123, "global_ksym_val");
|
||||
|
||||
ASSERT_EQ(retval, 0, "retval");
|
||||
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
|
||||
cleanup:
|
||||
test_ksyms_module__destroy(skel);
|
||||
}
|
||||
|
28
tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c
Normal file
28
tools/testing/selftests/bpf/prog_tests/ksyms_module_libbpf.c
Normal file
@ -0,0 +1,28 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <test_progs.h>
|
||||
#include <network_helpers.h>
|
||||
#include "test_ksyms_module.skel.h"
|
||||
|
||||
void test_ksyms_module_libbpf(void)
|
||||
{
|
||||
struct test_ksyms_module *skel;
|
||||
int retval, err;
|
||||
|
||||
if (!env.has_testmod) {
|
||||
test__skip();
|
||||
return;
|
||||
}
|
||||
|
||||
skel = test_ksyms_module__open_and_load();
|
||||
if (!ASSERT_OK_PTR(skel, "test_ksyms_module__open"))
|
||||
return;
|
||||
err = bpf_prog_test_run(bpf_program__fd(skel->progs.load), 1, &pkt_v4,
|
||||
sizeof(pkt_v4), NULL, NULL, (__u32 *)&retval, NULL);
|
||||
if (!ASSERT_OK(err, "bpf_prog_test_run"))
|
||||
goto cleanup;
|
||||
ASSERT_EQ(retval, 0, "retval");
|
||||
ASSERT_EQ(skel->bss->out_bpf_testmod_ksym, 42, "bpf_testmod_ksym");
|
||||
cleanup:
|
||||
test_ksyms_module__destroy(skel);
|
||||
}
|
@ -2,24 +2,48 @@
|
||||
/* Copyright (c) 2021 Facebook */
|
||||
|
||||
#include "vmlinux.h"
|
||||
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
#define X_0(x)
|
||||
#define X_1(x) x X_0(x)
|
||||
#define X_2(x) x X_1(x)
|
||||
#define X_3(x) x X_2(x)
|
||||
#define X_4(x) x X_3(x)
|
||||
#define X_5(x) x X_4(x)
|
||||
#define X_6(x) x X_5(x)
|
||||
#define X_7(x) x X_6(x)
|
||||
#define X_8(x) x X_7(x)
|
||||
#define X_9(x) x X_8(x)
|
||||
#define X_10(x) x X_9(x)
|
||||
#define REPEAT_256(Y) X_2(X_10(X_10(Y))) X_5(X_10(Y)) X_6(Y)
|
||||
|
||||
extern const int bpf_testmod_ksym_percpu __ksym;
|
||||
extern void bpf_testmod_test_mod_kfunc(int i) __ksym;
|
||||
extern void bpf_testmod_invalid_mod_kfunc(void) __ksym __weak;
|
||||
|
||||
int out_mod_ksym_global = 0;
|
||||
bool triggered = false;
|
||||
int out_bpf_testmod_ksym = 0;
|
||||
const volatile int x = 0;
|
||||
|
||||
SEC("raw_tp/sys_enter")
|
||||
int handler(const void *ctx)
|
||||
SEC("tc")
|
||||
int load(struct __sk_buff *skb)
|
||||
{
|
||||
int *val;
|
||||
__u32 cpu;
|
||||
|
||||
val = (int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
|
||||
out_mod_ksym_global = *val;
|
||||
triggered = true;
|
||||
/* This will be kept by clang, but removed by verifier. Since it is
|
||||
* marked as __weak, libbpf and gen_loader don't error out if BTF ID
|
||||
* is not found for it, instead imm and off is set to 0 for it.
|
||||
*/
|
||||
if (x)
|
||||
bpf_testmod_invalid_mod_kfunc();
|
||||
bpf_testmod_test_mod_kfunc(42);
|
||||
out_bpf_testmod_ksym = *(int *)bpf_this_cpu_ptr(&bpf_testmod_ksym_percpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
SEC("tc")
|
||||
int load_256(struct __sk_buff *skb)
|
||||
{
|
||||
/* this will fail if kfunc doesn't reuse its own btf fd index */
|
||||
REPEAT_256(bpf_testmod_test_mod_kfunc(42););
|
||||
bpf_testmod_test_mod_kfunc(42);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1,3 +1,26 @@
|
||||
{
|
||||
"calls: invalid kfunc call not eliminated",
|
||||
.insns = {
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
.result = REJECT,
|
||||
.errstr = "invalid kernel function call not eliminated in verifier pass",
|
||||
},
|
||||
{
|
||||
"calls: invalid kfunc call unreachable",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 0, 2),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 1),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
||||
.result = ACCEPT,
|
||||
},
|
||||
{
|
||||
"calls: basic sanity",
|
||||
.insns = {
|
||||
|
Loading…
Reference in New Issue
Block a user