2020-03-29 08:43:49 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copyright (C) 2020 Google LLC.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/filter.h>
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/btf.h>
|
2020-11-18 07:29:28 +08:00
|
|
|
#include <linux/binfmts.h>
|
2020-03-29 08:43:51 +08:00
|
|
|
#include <linux/lsm_hooks.h>
|
|
|
|
#include <linux/bpf_lsm.h>
|
2020-03-29 08:43:52 +08:00
|
|
|
#include <linux/kallsyms.h>
|
2020-08-26 02:29:18 +08:00
|
|
|
#include <net/bpf_sk_storage.h>
|
|
|
|
#include <linux/bpf_local_storage.h>
|
2020-11-06 07:06:51 +08:00
|
|
|
#include <linux/btf_ids.h>
|
2020-11-24 23:12:09 +08:00
|
|
|
#include <linux/ima.h>
|
2022-06-29 01:43:06 +08:00
|
|
|
#include <linux/bpf-cgroup.h>
|
2020-03-29 08:43:51 +08:00
|
|
|
|
|
|
|
/* For every LSM hook that allows attachment of BPF programs, declare a nop
|
|
|
|
* function where a BPF program can be attached.
|
|
|
|
*/
|
|
|
|
#define LSM_HOOK(RET, DEFAULT, NAME, ...) \
|
|
|
|
noinline RET bpf_lsm_##NAME(__VA_ARGS__) \
|
|
|
|
{ \
|
|
|
|
return DEFAULT; \
|
|
|
|
}
|
|
|
|
|
|
|
|
#include <linux/lsm_hook_defs.h>
|
|
|
|
#undef LSM_HOOK
|
2020-03-29 08:43:49 +08:00
|
|
|
|
2020-11-06 07:06:51 +08:00
|
|
|
#define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME)
|
|
|
|
BTF_SET_START(bpf_lsm_hooks)
|
|
|
|
#include <linux/lsm_hook_defs.h>
|
|
|
|
#undef LSM_HOOK
|
|
|
|
BTF_SET_END(bpf_lsm_hooks)
|
2020-03-29 08:43:52 +08:00
|
|
|
|
2024-07-19 19:00:51 +08:00
|
|
|
BTF_SET_START(bpf_lsm_disabled_hooks)
|
|
|
|
BTF_ID(func, bpf_lsm_vm_enough_memory)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_need_killpriv)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_getsecurity)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_listsecurity)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_copy_up_xattr)
|
|
|
|
BTF_ID(func, bpf_lsm_getselfattr)
|
|
|
|
BTF_ID(func, bpf_lsm_getprocattr)
|
|
|
|
BTF_ID(func, bpf_lsm_setprocattr)
|
|
|
|
#ifdef CONFIG_KEYS
|
|
|
|
BTF_ID(func, bpf_lsm_key_getsecurity)
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_AUDIT
|
|
|
|
BTF_ID(func, bpf_lsm_audit_rule_match)
|
|
|
|
#endif
|
|
|
|
BTF_ID(func, bpf_lsm_ismaclabel)
|
|
|
|
BTF_SET_END(bpf_lsm_disabled_hooks)
|
|
|
|
|
2022-06-29 01:43:06 +08:00
|
|
|
/* List of LSM hooks that should operate on 'current' cgroup regardless
|
|
|
|
* of function signature.
|
|
|
|
*/
|
|
|
|
BTF_SET_START(bpf_lsm_current_hooks)
|
|
|
|
/* operate on freshly allocated sk without any cgroup association */
|
2022-09-01 14:51:26 +08:00
|
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
2022-06-29 01:43:06 +08:00
|
|
|
BTF_ID(func, bpf_lsm_sk_alloc_security)
|
|
|
|
BTF_ID(func, bpf_lsm_sk_free_security)
|
2022-09-01 14:51:26 +08:00
|
|
|
#endif
|
2022-06-29 01:43:06 +08:00
|
|
|
BTF_SET_END(bpf_lsm_current_hooks)
|
|
|
|
|
2022-06-29 01:43:09 +08:00
|
|
|
/* List of LSM hooks that trigger while the socket is properly locked.
|
|
|
|
*/
|
|
|
|
BTF_SET_START(bpf_lsm_locked_sockopt_hooks)
|
2022-09-01 14:51:26 +08:00
|
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
2022-06-29 01:43:09 +08:00
|
|
|
BTF_ID(func, bpf_lsm_sock_graft)
|
|
|
|
BTF_ID(func, bpf_lsm_inet_csk_clone)
|
|
|
|
BTF_ID(func, bpf_lsm_inet_conn_established)
|
2022-09-01 14:51:26 +08:00
|
|
|
#endif
|
2022-06-29 01:43:09 +08:00
|
|
|
BTF_SET_END(bpf_lsm_locked_sockopt_hooks)
|
|
|
|
|
|
|
|
/* List of LSM hooks that trigger while the socket is _not_ locked,
|
|
|
|
* but it's ok to call bpf_{g,s}etsockopt because the socket is still
|
|
|
|
* in the early init phase.
|
|
|
|
*/
|
|
|
|
BTF_SET_START(bpf_lsm_unlocked_sockopt_hooks)
|
2022-09-01 14:51:26 +08:00
|
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
2022-06-29 01:43:09 +08:00
|
|
|
BTF_ID(func, bpf_lsm_socket_post_create)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_socketpair)
|
2022-09-01 14:51:26 +08:00
|
|
|
#endif
|
2022-06-29 01:43:09 +08:00
|
|
|
BTF_SET_END(bpf_lsm_unlocked_sockopt_hooks)
|
|
|
|
|
2022-07-15 02:54:04 +08:00
|
|
|
#ifdef CONFIG_CGROUP_BPF
|
2022-06-29 01:43:06 +08:00
|
|
|
void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
|
|
|
|
bpf_func_t *bpf_func)
|
|
|
|
{
|
2022-07-15 02:54:04 +08:00
|
|
|
const struct btf_param *args __maybe_unused;
|
2022-06-29 01:43:06 +08:00
|
|
|
|
|
|
|
if (btf_type_vlen(prog->aux->attach_func_proto) < 1 ||
|
|
|
|
btf_id_set_contains(&bpf_lsm_current_hooks,
|
|
|
|
prog->aux->attach_btf_id)) {
|
|
|
|
*bpf_func = __cgroup_bpf_run_lsm_current;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2022-07-15 02:54:04 +08:00
|
|
|
#ifdef CONFIG_NET
|
2022-06-29 01:43:06 +08:00
|
|
|
args = btf_params(prog->aux->attach_func_proto);
|
|
|
|
|
|
|
|
if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCKET])
|
|
|
|
*bpf_func = __cgroup_bpf_run_lsm_socket;
|
|
|
|
else if (args[0].type == btf_sock_ids[BTF_SOCK_TYPE_SOCK])
|
|
|
|
*bpf_func = __cgroup_bpf_run_lsm_sock;
|
|
|
|
else
|
|
|
|
#endif
|
|
|
|
*bpf_func = __cgroup_bpf_run_lsm_current;
|
|
|
|
}
|
2022-07-15 02:54:04 +08:00
|
|
|
#endif
|
2022-06-29 01:43:06 +08:00
|
|
|
|
2020-03-29 08:43:52 +08:00
|
|
|
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
|
|
|
|
const struct bpf_prog *prog)
|
|
|
|
{
|
2024-07-19 19:00:51 +08:00
|
|
|
u32 btf_id = prog->aux->attach_btf_id;
|
|
|
|
const char *func_name = prog->aux->attach_func_name;
|
|
|
|
|
2020-03-29 08:43:52 +08:00
|
|
|
if (!prog->gpl_compatible) {
|
|
|
|
bpf_log(vlog,
|
|
|
|
"LSM programs must have a GPL compatible license\n");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2024-07-19 19:00:51 +08:00
|
|
|
if (btf_id_set_contains(&bpf_lsm_disabled_hooks, btf_id)) {
|
|
|
|
bpf_log(vlog, "attach_btf_id %u points to disabled hook %s\n",
|
|
|
|
btf_id, func_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!btf_id_set_contains(&bpf_lsm_hooks, btf_id)) {
|
2020-03-29 08:43:52 +08:00
|
|
|
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
|
2024-07-19 19:00:51 +08:00
|
|
|
btf_id, func_name);
|
2020-03-29 08:43:52 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-11-18 07:29:28 +08:00
|
|
|
/* Mask for all the currently supported BPRM option flags */
|
|
|
|
#define BPF_F_BRPM_OPTS_MASK BPF_F_BPRM_SECUREEXEC
|
|
|
|
|
|
|
|
BPF_CALL_2(bpf_bprm_opts_set, struct linux_binprm *, bprm, u64, flags)
|
|
|
|
{
|
|
|
|
if (flags & ~BPF_F_BRPM_OPTS_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
bprm->secureexec = (flags & BPF_F_BPRM_SECUREEXEC);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
BTF_ID_LIST_SINGLE(bpf_bprm_opts_set_btf_ids, struct, linux_binprm)
|
|
|
|
|
2021-03-23 05:51:51 +08:00
|
|
|
static const struct bpf_func_proto bpf_bprm_opts_set_proto = {
|
2020-11-18 07:29:28 +08:00
|
|
|
.func = bpf_bprm_opts_set,
|
|
|
|
.gpl_only = false,
|
|
|
|
.ret_type = RET_INTEGER,
|
|
|
|
.arg1_type = ARG_PTR_TO_BTF_ID,
|
|
|
|
.arg1_btf_id = &bpf_bprm_opts_set_btf_ids[0],
|
|
|
|
.arg2_type = ARG_ANYTHING,
|
|
|
|
};
|
|
|
|
|
2020-11-24 23:12:09 +08:00
|
|
|
BPF_CALL_3(bpf_ima_inode_hash, struct inode *, inode, void *, dst, u32, size)
|
|
|
|
{
|
|
|
|
return ima_inode_hash(inode, dst, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool bpf_ima_inode_hash_allowed(const struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
return bpf_lsm_is_sleepable_hook(prog->aux->attach_btf_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
BTF_ID_LIST_SINGLE(bpf_ima_inode_hash_btf_ids, struct, inode)
|
|
|
|
|
2021-03-23 05:51:51 +08:00
|
|
|
static const struct bpf_func_proto bpf_ima_inode_hash_proto = {
|
2020-11-24 23:12:09 +08:00
|
|
|
.func = bpf_ima_inode_hash,
|
|
|
|
.gpl_only = false,
|
2022-11-24 13:32:11 +08:00
|
|
|
.might_sleep = true,
|
2020-11-24 23:12:09 +08:00
|
|
|
.ret_type = RET_INTEGER,
|
|
|
|
.arg1_type = ARG_PTR_TO_BTF_ID,
|
|
|
|
.arg1_btf_id = &bpf_ima_inode_hash_btf_ids[0],
|
|
|
|
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
|
|
|
.arg3_type = ARG_CONST_SIZE,
|
|
|
|
.allowed = bpf_ima_inode_hash_allowed,
|
|
|
|
};
|
|
|
|
|
2022-03-02 19:13:58 +08:00
|
|
|
BPF_CALL_3(bpf_ima_file_hash, struct file *, file, void *, dst, u32, size)
|
|
|
|
{
|
|
|
|
return ima_file_hash(file, dst, size);
|
|
|
|
}
|
|
|
|
|
|
|
|
BTF_ID_LIST_SINGLE(bpf_ima_file_hash_btf_ids, struct, file)
|
|
|
|
|
|
|
|
static const struct bpf_func_proto bpf_ima_file_hash_proto = {
|
|
|
|
.func = bpf_ima_file_hash,
|
|
|
|
.gpl_only = false,
|
2022-11-24 13:32:11 +08:00
|
|
|
.might_sleep = true,
|
2022-03-02 19:13:58 +08:00
|
|
|
.ret_type = RET_INTEGER,
|
|
|
|
.arg1_type = ARG_PTR_TO_BTF_ID,
|
|
|
|
.arg1_btf_id = &bpf_ima_file_hash_btf_ids[0],
|
|
|
|
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
|
|
|
|
.arg3_type = ARG_CONST_SIZE,
|
|
|
|
.allowed = bpf_ima_inode_hash_allowed,
|
|
|
|
};
|
|
|
|
|
2022-05-11 04:59:21 +08:00
|
|
|
BPF_CALL_1(bpf_get_attach_cookie, void *, ctx)
|
|
|
|
{
|
|
|
|
struct bpf_trace_run_ctx *run_ctx;
|
|
|
|
|
|
|
|
run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
|
|
|
|
return run_ctx->bpf_cookie;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct bpf_func_proto bpf_get_attach_cookie_proto = {
|
|
|
|
.func = bpf_get_attach_cookie,
|
|
|
|
.gpl_only = false,
|
|
|
|
.ret_type = RET_INTEGER,
|
|
|
|
.arg1_type = ARG_PTR_TO_CTX,
|
|
|
|
};
|
|
|
|
|
2020-08-26 02:29:18 +08:00
|
|
|
static const struct bpf_func_proto *
|
|
|
|
bpf_lsm_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|
|
|
{
|
2022-08-24 06:25:52 +08:00
|
|
|
const struct bpf_func_proto *func_proto;
|
|
|
|
|
|
|
|
if (prog->expected_attach_type == BPF_LSM_CGROUP) {
|
|
|
|
func_proto = cgroup_common_func_proto(func_id, prog);
|
|
|
|
if (func_proto)
|
|
|
|
return func_proto;
|
|
|
|
}
|
|
|
|
|
2020-08-26 02:29:18 +08:00
|
|
|
switch (func_id) {
|
|
|
|
case BPF_FUNC_inode_storage_get:
|
|
|
|
return &bpf_inode_storage_get_proto;
|
|
|
|
case BPF_FUNC_inode_storage_delete:
|
|
|
|
return &bpf_inode_storage_delete_proto;
|
2021-05-26 02:35:29 +08:00
|
|
|
#ifdef CONFIG_NET
|
2020-08-26 02:29:18 +08:00
|
|
|
case BPF_FUNC_sk_storage_get:
|
2020-09-25 08:04:02 +08:00
|
|
|
return &bpf_sk_storage_get_proto;
|
2020-08-26 02:29:18 +08:00
|
|
|
case BPF_FUNC_sk_storage_delete:
|
2020-09-25 08:04:02 +08:00
|
|
|
return &bpf_sk_storage_delete_proto;
|
2021-05-26 02:35:29 +08:00
|
|
|
#endif /* CONFIG_NET */
|
2020-11-06 18:37:39 +08:00
|
|
|
case BPF_FUNC_spin_lock:
|
|
|
|
return &bpf_spin_lock_proto;
|
|
|
|
case BPF_FUNC_spin_unlock:
|
|
|
|
return &bpf_spin_unlock_proto;
|
2020-11-18 07:29:28 +08:00
|
|
|
case BPF_FUNC_bprm_opts_set:
|
|
|
|
return &bpf_bprm_opts_set_proto;
|
2020-11-24 23:12:09 +08:00
|
|
|
case BPF_FUNC_ima_inode_hash:
|
2022-11-24 13:32:11 +08:00
|
|
|
return &bpf_ima_inode_hash_proto;
|
2022-03-02 19:13:58 +08:00
|
|
|
case BPF_FUNC_ima_file_hash:
|
2022-11-24 13:32:11 +08:00
|
|
|
return &bpf_ima_file_hash_proto;
|
2022-05-11 04:59:21 +08:00
|
|
|
case BPF_FUNC_get_attach_cookie:
|
|
|
|
return bpf_prog_has_trampoline(prog) ? &bpf_get_attach_cookie_proto : NULL;
|
2022-07-15 02:54:04 +08:00
|
|
|
#ifdef CONFIG_NET
|
2022-06-29 01:43:09 +08:00
|
|
|
case BPF_FUNC_setsockopt:
|
|
|
|
if (prog->expected_attach_type != BPF_LSM_CGROUP)
|
|
|
|
return NULL;
|
|
|
|
if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks,
|
|
|
|
prog->aux->attach_btf_id))
|
|
|
|
return &bpf_sk_setsockopt_proto;
|
|
|
|
if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks,
|
|
|
|
prog->aux->attach_btf_id))
|
|
|
|
return &bpf_unlocked_sk_setsockopt_proto;
|
|
|
|
return NULL;
|
|
|
|
case BPF_FUNC_getsockopt:
|
|
|
|
if (prog->expected_attach_type != BPF_LSM_CGROUP)
|
|
|
|
return NULL;
|
|
|
|
if (btf_id_set_contains(&bpf_lsm_locked_sockopt_hooks,
|
|
|
|
prog->aux->attach_btf_id))
|
|
|
|
return &bpf_sk_getsockopt_proto;
|
|
|
|
if (btf_id_set_contains(&bpf_lsm_unlocked_sockopt_hooks,
|
|
|
|
prog->aux->attach_btf_id))
|
|
|
|
return &bpf_unlocked_sk_getsockopt_proto;
|
|
|
|
return NULL;
|
2022-07-15 02:54:04 +08:00
|
|
|
#endif
|
2020-08-26 02:29:18 +08:00
|
|
|
default:
|
|
|
|
return tracing_prog_func_proto(func_id, prog);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
/* The set of hooks which are called without pagefaults disabled and are allowed
|
2021-06-16 10:04:36 +08:00
|
|
|
* to "sleep" and thus can be used for sleepable BPF programs.
|
2020-11-13 08:59:29 +08:00
|
|
|
*/
|
|
|
|
BTF_SET_START(sleepable_lsm_hooks)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_map)
|
bpf,lsm: Refactor bpf_map_alloc/bpf_map_free LSM hooks
Similarly to bpf_prog_alloc LSM hook, rename and extend bpf_map_alloc
hook into bpf_map_create, taking not just struct bpf_map, but also
bpf_attr and bpf_token, to give a fuller context to LSMs.
Unlike bpf_prog_alloc, there is no need to move the hook around, as it
currently is firing right before allocating BPF map ID and FD, which
seems to be a sweet spot.
But like bpf_prog_alloc/bpf_prog_free combo, make sure that bpf_map_free
LSM hook is called even if bpf_map_create hook returned error, as if few
LSMs are combined together it could be that one LSM successfully
allocated security blob for its needs, while subsequent LSM rejected BPF
map creation. The former LSM would still need to free up LSM blob, so we
need to ensure security_bpf_map_free() is called regardless of the
outcome.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Paul Moore <paul@paul-moore.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-11-andrii@kernel.org
2024-01-24 10:21:07 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_map_create)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_map_free)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_prog)
|
bpf,lsm: Refactor bpf_prog_alloc/bpf_prog_free LSM hooks
Based on upstream discussion ([0]), rework existing
bpf_prog_alloc_security LSM hook. Rename it to bpf_prog_load and instead
of passing bpf_prog_aux, pass proper bpf_prog pointer for a full BPF
program struct. Also, we pass bpf_attr union with all the user-provided
arguments for BPF_PROG_LOAD command. This will give LSMs as much
information as we can basically provide.
The hook is also BPF token-aware now, and optional bpf_token struct is
passed as a third argument. bpf_prog_load LSM hook is called after
a bunch of sanity checks were performed, bpf_prog and bpf_prog_aux were
allocated and filled out, but right before performing full-fledged BPF
verification step.
bpf_prog_free LSM hook is now accepting struct bpf_prog argument, for
consistency. SELinux code is adjusted to all new names, types, and
signatures.
Note, given that bpf_prog_load (previously bpf_prog_alloc) hook can be
used by some LSMs to allocate extra security blob, but also by other
LSMs to reject BPF program loading, we need to make sure that
bpf_prog_free LSM hook is called after bpf_prog_load/bpf_prog_alloc one
*even* if the hook itself returned error. If we don't do that, we run
the risk of leaking memory. This seems to be possible today when
combining SELinux and BPF LSM, as one example, depending on their
relative ordering.
Also, for BPF LSM setup, add bpf_prog_load and bpf_prog_free to
sleepable LSM hooks list, as they are both executed in sleepable
context. Also drop bpf_prog_load hook from untrusted, as there is no
issue with refcount or anything else anymore, that originally forced us
to add it to untrusted list in c0c852dd1876 ("bpf: Do not mark certain LSM
hook arguments as trusted"). We now trigger this hook much later and it
should not be an issue anymore.
[0] https://lore.kernel.org/bpf/9fe88aef7deabbe87d3fc38c4aea3c69.paul@paul-moore.com/
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Paul Moore <paul@paul-moore.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-10-andrii@kernel.org
2024-01-24 10:21:06 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_prog_load)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_prog_free)
|
2024-01-24 10:21:08 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_token_create)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_token_free)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_token_cmd)
|
|
|
|
BTF_ID(func, bpf_lsm_bpf_token_capable)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bprm_check_security)
|
|
|
|
BTF_ID(func, bpf_lsm_bprm_committed_creds)
|
|
|
|
BTF_ID(func, bpf_lsm_bprm_committing_creds)
|
|
|
|
BTF_ID(func, bpf_lsm_bprm_creds_for_exec)
|
|
|
|
BTF_ID(func, bpf_lsm_bprm_creds_from_file)
|
|
|
|
BTF_ID(func, bpf_lsm_capget)
|
|
|
|
BTF_ID(func, bpf_lsm_capset)
|
|
|
|
BTF_ID(func, bpf_lsm_cred_prepare)
|
|
|
|
BTF_ID(func, bpf_lsm_file_ioctl)
|
|
|
|
BTF_ID(func, bpf_lsm_file_lock)
|
|
|
|
BTF_ID(func, bpf_lsm_file_open)
|
2024-06-19 03:29:22 +08:00
|
|
|
BTF_ID(func, bpf_lsm_file_post_open)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_file_receive)
|
2021-01-25 14:39:36 +08:00
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_inode_create)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_free_security)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_getattr)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_getxattr)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_mknod)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_need_killpriv)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_post_setxattr)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_readlink)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_rename)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_rmdir)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_setattr)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_setxattr)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_symlink)
|
|
|
|
BTF_ID(func, bpf_lsm_inode_unlink)
|
|
|
|
BTF_ID(func, bpf_lsm_kernel_module_request)
|
2022-03-02 19:14:02 +08:00
|
|
|
BTF_ID(func, bpf_lsm_kernel_read_file)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_kernfs_init_security)
|
2021-01-25 14:39:36 +08:00
|
|
|
|
2023-12-08 23:32:48 +08:00
|
|
|
#ifdef CONFIG_SECURITY_PATH
|
|
|
|
BTF_ID(func, bpf_lsm_path_unlink)
|
|
|
|
BTF_ID(func, bpf_lsm_path_mkdir)
|
|
|
|
BTF_ID(func, bpf_lsm_path_rmdir)
|
|
|
|
BTF_ID(func, bpf_lsm_path_truncate)
|
|
|
|
BTF_ID(func, bpf_lsm_path_symlink)
|
|
|
|
BTF_ID(func, bpf_lsm_path_link)
|
|
|
|
BTF_ID(func, bpf_lsm_path_rename)
|
|
|
|
BTF_ID(func, bpf_lsm_path_chmod)
|
|
|
|
BTF_ID(func, bpf_lsm_path_chown)
|
|
|
|
#endif /* CONFIG_SECURITY_PATH */
|
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_mmap_file)
|
|
|
|
BTF_ID(func, bpf_lsm_netlink_send)
|
|
|
|
BTF_ID(func, bpf_lsm_path_notify)
|
|
|
|
BTF_ID(func, bpf_lsm_release_secctx)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_alloc_security)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_eat_lsm_opts)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_kern_mount)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_mount)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_remount)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_set_mnt_opts)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_show_options)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_statfs)
|
|
|
|
BTF_ID(func, bpf_lsm_sb_umount)
|
|
|
|
BTF_ID(func, bpf_lsm_settime)
|
2021-01-25 14:39:36 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
2024-02-01 18:43:40 +08:00
|
|
|
BTF_ID(func, bpf_lsm_inet_conn_established)
|
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_socket_accept)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_bind)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_connect)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_create)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_getpeername)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_getpeersec_dgram)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_getsockname)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_getsockopt)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_listen)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_post_create)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_recvmsg)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_sendmsg)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_shutdown)
|
|
|
|
BTF_ID(func, bpf_lsm_socket_socketpair)
|
2021-01-25 14:39:36 +08:00
|
|
|
#endif /* CONFIG_SECURITY_NETWORK */
|
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_syslog)
|
|
|
|
BTF_ID(func, bpf_lsm_task_alloc)
|
2022-01-25 12:20:51 +08:00
|
|
|
BTF_ID(func, bpf_lsm_current_getsecid_subj)
|
2021-02-20 03:26:21 +08:00
|
|
|
BTF_ID(func, bpf_lsm_task_getsecid_obj)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_ID(func, bpf_lsm_task_prctl)
|
|
|
|
BTF_ID(func, bpf_lsm_task_setscheduler)
|
|
|
|
BTF_ID(func, bpf_lsm_task_to_inode)
|
2022-08-16 00:20:26 +08:00
|
|
|
BTF_ID(func, bpf_lsm_userns_create)
|
2020-11-13 08:59:29 +08:00
|
|
|
BTF_SET_END(sleepable_lsm_hooks)
|
|
|
|
|
2022-12-04 04:49:54 +08:00
|
|
|
BTF_SET_START(untrusted_lsm_hooks)
|
bpf,lsm: Refactor bpf_map_alloc/bpf_map_free LSM hooks
Similarly to bpf_prog_alloc LSM hook, rename and extend bpf_map_alloc
hook into bpf_map_create, taking not just struct bpf_map, but also
bpf_attr and bpf_token, to give a fuller context to LSMs.
Unlike bpf_prog_alloc, there is no need to move the hook around, as it
currently is firing right before allocating BPF map ID and FD, which
seems to be a sweet spot.
But like bpf_prog_alloc/bpf_prog_free combo, make sure that bpf_map_free
LSM hook is called even if bpf_map_create hook returned error, as if few
LSMs are combined together it could be that one LSM successfully
allocated security blob for its needs, while subsequent LSM rejected BPF
map creation. The former LSM would still need to free up LSM blob, so we
need to ensure security_bpf_map_free() is called regardless of the
outcome.
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Paul Moore <paul@paul-moore.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-11-andrii@kernel.org
2024-01-24 10:21:07 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_map_free)
|
bpf,lsm: Refactor bpf_prog_alloc/bpf_prog_free LSM hooks
Based on upstream discussion ([0]), rework existing
bpf_prog_alloc_security LSM hook. Rename it to bpf_prog_load and instead
of passing bpf_prog_aux, pass proper bpf_prog pointer for a full BPF
program struct. Also, we pass bpf_attr union with all the user-provided
arguments for BPF_PROG_LOAD command. This will give LSMs as much
information as we can basically provide.
The hook is also BPF token-aware now, and optional bpf_token struct is
passed as a third argument. bpf_prog_load LSM hook is called after
a bunch of sanity checks were performed, bpf_prog and bpf_prog_aux were
allocated and filled out, but right before performing full-fledged BPF
verification step.
bpf_prog_free LSM hook is now accepting struct bpf_prog argument, for
consistency. SELinux code is adjusted to all new names, types, and
signatures.
Note, given that bpf_prog_load (previously bpf_prog_alloc) hook can be
used by some LSMs to allocate extra security blob, but also by other
LSMs to reject BPF program loading, we need to make sure that
bpf_prog_free LSM hook is called after bpf_prog_load/bpf_prog_alloc one
*even* if the hook itself returned error. If we don't do that, we run
the risk of leaking memory. This seems to be possible today when
combining SELinux and BPF LSM, as one example, depending on their
relative ordering.
Also, for BPF LSM setup, add bpf_prog_load and bpf_prog_free to
sleepable LSM hooks list, as they are both executed in sleepable
context. Also drop bpf_prog_load hook from untrusted, as there is no
issue with refcount or anything else anymore, that originally forced us
to add it to untrusted list in c0c852dd1876 ("bpf: Do not mark certain LSM
hook arguments as trusted"). We now trigger this hook much later and it
should not be an issue anymore.
[0] https://lore.kernel.org/bpf/9fe88aef7deabbe87d3fc38c4aea3c69.paul@paul-moore.com/
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Paul Moore <paul@paul-moore.com>
Link: https://lore.kernel.org/bpf/20240124022127.2379740-10-andrii@kernel.org
2024-01-24 10:21:06 +08:00
|
|
|
BTF_ID(func, bpf_lsm_bpf_prog_free)
|
2022-12-04 04:49:54 +08:00
|
|
|
BTF_ID(func, bpf_lsm_file_alloc_security)
|
|
|
|
BTF_ID(func, bpf_lsm_file_free_security)
|
2022-12-17 14:21:44 +08:00
|
|
|
#ifdef CONFIG_SECURITY_NETWORK
|
2022-12-04 04:49:54 +08:00
|
|
|
BTF_ID(func, bpf_lsm_sk_alloc_security)
|
|
|
|
BTF_ID(func, bpf_lsm_sk_free_security)
|
2022-12-17 14:21:44 +08:00
|
|
|
#endif /* CONFIG_SECURITY_NETWORK */
|
2022-12-04 04:49:54 +08:00
|
|
|
BTF_ID(func, bpf_lsm_task_free)
|
|
|
|
BTF_SET_END(untrusted_lsm_hooks)
|
|
|
|
|
2020-11-13 08:59:29 +08:00
|
|
|
bool bpf_lsm_is_sleepable_hook(u32 btf_id)
|
|
|
|
{
|
|
|
|
return btf_id_set_contains(&sleepable_lsm_hooks, btf_id);
|
|
|
|
}
|
|
|
|
|
2022-12-04 04:49:54 +08:00
|
|
|
bool bpf_lsm_is_trusted(const struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
return !btf_id_set_contains(&untrusted_lsm_hooks, prog->aux->attach_btf_id);
|
|
|
|
}
|
|
|
|
|
2020-03-29 08:43:49 +08:00
|
|
|
const struct bpf_prog_ops lsm_prog_ops = {
|
|
|
|
};
|
|
|
|
|
|
|
|
const struct bpf_verifier_ops lsm_verifier_ops = {
|
2020-08-26 02:29:18 +08:00
|
|
|
.get_func_proto = bpf_lsm_func_proto,
|
2020-03-29 08:43:49 +08:00
|
|
|
.is_valid_access = btf_ctx_access,
|
|
|
|
};
|
2024-07-19 19:00:52 +08:00
|
|
|
|
|
|
|
/* hooks return 0 or 1 */
|
|
|
|
BTF_SET_START(bool_lsm_hooks)
|
|
|
|
#ifdef CONFIG_SECURITY_NETWORK_XFRM
|
|
|
|
BTF_ID(func, bpf_lsm_xfrm_state_pol_flow_match)
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_AUDIT
|
|
|
|
BTF_ID(func, bpf_lsm_audit_rule_known)
|
|
|
|
#endif
|
|
|
|
BTF_ID(func, bpf_lsm_inode_xattr_skipcap)
|
|
|
|
BTF_SET_END(bool_lsm_hooks)
|
|
|
|
|
|
|
|
int bpf_lsm_get_retval_range(const struct bpf_prog *prog,
|
|
|
|
struct bpf_retval_range *retval_range)
|
|
|
|
{
|
|
|
|
/* no return value range for void hooks */
|
|
|
|
if (!prog->aux->attach_func_proto->type)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (btf_id_set_contains(&bool_lsm_hooks, prog->aux->attach_btf_id)) {
|
|
|
|
retval_range->minval = 0;
|
|
|
|
retval_range->maxval = 1;
|
|
|
|
} else {
|
|
|
|
/* All other available LSM hooks, except task_prctl, return 0
|
|
|
|
* on success and negative error code on failure.
|
|
|
|
* To keep things simple, we only allow bpf progs to return 0
|
|
|
|
* or negative errno for task_prctl too.
|
|
|
|
*/
|
|
|
|
retval_range->minval = -MAX_ERRNO;
|
|
|
|
retval_range->maxval = 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|