mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
bpf: Migrate cgroup_bpf to internal cgroup_bpf_attach_type enum
Add an enum (cgroup_bpf_attach_type) containing only valid cgroup_bpf attach types and a function to map bpf_attach_type values to the new enum. Inspired by netns_bpf_attach_type. Then, migrate cgroup_bpf to use cgroup_bpf_attach_type wherever possible. Functionality is unchanged as attach_type_to_prog_type switches in bpf/syscall.c were preventing non-cgroup programs from making use of the invalid cgroup_bpf array slots. As a result struct cgroup_bpf uses 504 fewer bytes relative to when its arrays were sized using MAX_BPF_ATTACH_TYPE. bpf_cgroup_storage is notably not migrated as struct bpf_cgroup_storage_key is part of uapi and contains a bpf_attach_type member which is not meant to be opaque. Similarly, bpf_cgroup_link continues to report its bpf_attach_type member to userspace via fdinfo and bpf_link_info. To ease disambiguation, bpf_attach_type variables are renamed from 'type' to 'atype' when changed to cgroup_bpf_attach_type. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210819092420.1984861-2-davemarchevsky@fb.com
This commit is contained in:
parent
d359902d5c
commit
6fc88c354f
@ -23,9 +23,73 @@ struct ctl_table_header;
|
||||
struct task_struct;
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
enum cgroup_bpf_attach_type {
|
||||
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
|
||||
CGROUP_INET_INGRESS = 0,
|
||||
CGROUP_INET_EGRESS,
|
||||
CGROUP_INET_SOCK_CREATE,
|
||||
CGROUP_SOCK_OPS,
|
||||
CGROUP_DEVICE,
|
||||
CGROUP_INET4_BIND,
|
||||
CGROUP_INET6_BIND,
|
||||
CGROUP_INET4_CONNECT,
|
||||
CGROUP_INET6_CONNECT,
|
||||
CGROUP_INET4_POST_BIND,
|
||||
CGROUP_INET6_POST_BIND,
|
||||
CGROUP_UDP4_SENDMSG,
|
||||
CGROUP_UDP6_SENDMSG,
|
||||
CGROUP_SYSCTL,
|
||||
CGROUP_UDP4_RECVMSG,
|
||||
CGROUP_UDP6_RECVMSG,
|
||||
CGROUP_GETSOCKOPT,
|
||||
CGROUP_SETSOCKOPT,
|
||||
CGROUP_INET4_GETPEERNAME,
|
||||
CGROUP_INET6_GETPEERNAME,
|
||||
CGROUP_INET4_GETSOCKNAME,
|
||||
CGROUP_INET6_GETSOCKNAME,
|
||||
CGROUP_INET_SOCK_RELEASE,
|
||||
MAX_CGROUP_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
|
||||
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
|
||||
#define CGROUP_ATYPE(type) \
|
||||
case BPF_##type: return type
|
||||
|
||||
static inline enum cgroup_bpf_attach_type
|
||||
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
|
||||
{
|
||||
switch (attach_type) {
|
||||
CGROUP_ATYPE(CGROUP_INET_INGRESS);
|
||||
CGROUP_ATYPE(CGROUP_INET_EGRESS);
|
||||
CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
|
||||
CGROUP_ATYPE(CGROUP_SOCK_OPS);
|
||||
CGROUP_ATYPE(CGROUP_DEVICE);
|
||||
CGROUP_ATYPE(CGROUP_INET4_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET6_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET4_CONNECT);
|
||||
CGROUP_ATYPE(CGROUP_INET6_CONNECT);
|
||||
CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
|
||||
CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
|
||||
CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
|
||||
CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
|
||||
CGROUP_ATYPE(CGROUP_SYSCTL);
|
||||
CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
|
||||
CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
|
||||
CGROUP_ATYPE(CGROUP_GETSOCKOPT);
|
||||
CGROUP_ATYPE(CGROUP_SETSOCKOPT);
|
||||
CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
|
||||
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
|
||||
default:
|
||||
return CGROUP_BPF_ATTACH_TYPE_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
#undef CGROUP_ATYPE
|
||||
|
||||
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
|
||||
|
||||
#define for_each_cgroup_storage_type(stype) \
|
||||
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
|
||||
@ -67,15 +131,15 @@ struct bpf_prog_array;
|
||||
|
||||
struct cgroup_bpf {
|
||||
/* array of effective progs in this cgroup */
|
||||
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
||||
struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
|
||||
/* attached progs to this cgroup and attach flags
|
||||
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
|
||||
* have either zero or one element
|
||||
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
||||
*/
|
||||
struct list_head progs[MAX_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_BPF_ATTACH_TYPE];
|
||||
struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
|
||||
|
||||
/* list of cgroup shared storages */
|
||||
struct list_head storages;
|
||||
@ -115,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
void *t_ctx,
|
||||
u32 *flags);
|
||||
|
||||
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
struct bpf_sock_ops_kern *sock_ops,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
short access, enum bpf_attach_type type);
|
||||
short access, enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
struct ctl_table *table, int write,
|
||||
char **buf, size_t *pcount, loff_t *ppos,
|
||||
enum bpf_attach_type type);
|
||||
enum cgroup_bpf_attach_type atype);
|
||||
|
||||
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
|
||||
int *optname, char __user *optval,
|
||||
@ -179,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
||||
BPF_CGROUP_INET_INGRESS); \
|
||||
CGROUP_INET_INGRESS); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
@ -189,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk(sk); \
|
||||
if (sk_fullsock(__sk)) \
|
||||
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
||||
BPF_CGROUP_INET_EGRESS); \
|
||||
CGROUP_INET_EGRESS); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
|
||||
#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
__ret = __cgroup_bpf_run_filter_sk(sk, atype); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
|
||||
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
|
||||
|
||||
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
|
||||
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
|
||||
({ \
|
||||
u32 __unused_flags; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
if (cgroup_bpf_enabled(atype)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
NULL, \
|
||||
&__unused_flags); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
|
||||
({ \
|
||||
u32 __unused_flags; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
t_ctx, \
|
||||
&__unused_flags); \
|
||||
release_sock(sk); \
|
||||
@ -249,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
|
||||
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
|
||||
*/
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
|
||||
({ \
|
||||
u32 __flags = 0; \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(type)) { \
|
||||
if (cgroup_bpf_enabled(atype)) { \
|
||||
lock_sock(sk); \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
||||
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
|
||||
NULL, &__flags); \
|
||||
release_sock(sk); \
|
||||
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
|
||||
@ -265,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
|
||||
((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \
|
||||
cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \
|
||||
((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
|
||||
cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
|
||||
(sk)->sk_prot->pre_connect)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
|
||||
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
|
||||
|
||||
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
|
||||
* fullsock and its parent fullsock cannot be traced by
|
||||
@ -311,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_ops(sk, \
|
||||
sock_ops, \
|
||||
BPF_CGROUP_SOCK_OPS); \
|
||||
CGROUP_SOCK_OPS); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
|
||||
if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
|
||||
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
|
||||
if (__sk && sk_fullsock(__sk)) \
|
||||
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
|
||||
sock_ops, \
|
||||
BPF_CGROUP_SOCK_OPS); \
|
||||
CGROUP_SOCK_OPS); \
|
||||
} \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \
|
||||
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
|
||||
if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
|
||||
__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
|
||||
access, \
|
||||
BPF_CGROUP_DEVICE); \
|
||||
CGROUP_DEVICE); \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
@ -346,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
|
||||
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
|
||||
buf, count, pos, \
|
||||
BPF_CGROUP_SYSCTL); \
|
||||
CGROUP_SYSCTL); \
|
||||
__ret; \
|
||||
})
|
||||
|
||||
@ -357,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
kernel_optval) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
||||
optname, optval, \
|
||||
optlen, \
|
||||
@ -368,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
get_user(__ret, optlen); \
|
||||
__ret; \
|
||||
})
|
||||
@ -377,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
max_optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
|
||||
!INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
|
||||
tcp_bpf_bypass_getsockopt, \
|
||||
@ -392,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
||||
optlen, retval) \
|
||||
({ \
|
||||
int __ret = retval; \
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \
|
||||
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
|
||||
__ret = __cgroup_bpf_run_filter_getsockopt_kern( \
|
||||
sock, level, optname, optval, optlen, retval); \
|
||||
__ret; \
|
||||
@ -451,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define cgroup_bpf_enabled(type) (0)
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
|
||||
#define cgroup_bpf_enabled(atype) (0)
|
||||
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
|
||||
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
||||
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
|
||||
@ -470,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
||||
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
||||
|
@ -84,7 +84,7 @@ struct bpf_lpm_trie_key {
|
||||
|
||||
struct bpf_cgroup_storage_key {
|
||||
__u64 cgroup_inode_id; /* cgroup inode id */
|
||||
__u32 attach_type; /* program attach type */
|
||||
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
|
||||
};
|
||||
|
||||
union bpf_iter_link_info {
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#include "../cgroup/cgroup-internal.h"
|
||||
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_BPF_ATTACH_TYPE);
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE);
|
||||
EXPORT_SYMBOL(cgroup_bpf_enabled_key);
|
||||
|
||||
void cgroup_bpf_offline(struct cgroup *cgrp)
|
||||
@ -113,12 +113,12 @@ static void cgroup_bpf_release(struct work_struct *work)
|
||||
struct list_head *storages = &cgrp->bpf.storages;
|
||||
struct bpf_cgroup_storage *storage, *stmp;
|
||||
|
||||
unsigned int type;
|
||||
unsigned int atype;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
|
||||
for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) {
|
||||
struct list_head *progs = &cgrp->bpf.progs[atype];
|
||||
struct bpf_prog_list *pl, *pltmp;
|
||||
|
||||
list_for_each_entry_safe(pl, pltmp, progs, node) {
|
||||
@ -128,10 +128,10 @@ static void cgroup_bpf_release(struct work_struct *work)
|
||||
if (pl->link)
|
||||
bpf_cgroup_link_auto_detach(pl->link);
|
||||
kfree(pl);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[type]);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[atype]);
|
||||
}
|
||||
old_array = rcu_dereference_protected(
|
||||
cgrp->bpf.effective[type],
|
||||
cgrp->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
bpf_prog_array_free(old_array);
|
||||
}
|
||||
@ -196,7 +196,7 @@ static u32 prog_list_length(struct list_head *head)
|
||||
* if parent has overridable or multi-prog, allow attaching
|
||||
*/
|
||||
static bool hierarchy_allows_attach(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup *p;
|
||||
|
||||
@ -204,12 +204,12 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
|
||||
if (!p)
|
||||
return true;
|
||||
do {
|
||||
u32 flags = p->bpf.flags[type];
|
||||
u32 flags = p->bpf.flags[atype];
|
||||
u32 cnt;
|
||||
|
||||
if (flags & BPF_F_ALLOW_MULTI)
|
||||
return true;
|
||||
cnt = prog_list_length(&p->bpf.progs[type]);
|
||||
cnt = prog_list_length(&p->bpf.progs[atype]);
|
||||
WARN_ON_ONCE(cnt > 1);
|
||||
if (cnt == 1)
|
||||
return !!(flags & BPF_F_ALLOW_OVERRIDE);
|
||||
@ -225,7 +225,7 @@ static bool hierarchy_allows_attach(struct cgroup *cgrp,
|
||||
* to programs in this cgroup
|
||||
*/
|
||||
static int compute_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
struct bpf_prog_array **array)
|
||||
{
|
||||
struct bpf_prog_array_item *item;
|
||||
@ -236,8 +236,8 @@ static int compute_effective_progs(struct cgroup *cgrp,
|
||||
|
||||
/* count number of effective programs by walking parents */
|
||||
do {
|
||||
if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
|
||||
cnt += prog_list_length(&p->bpf.progs[type]);
|
||||
if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
|
||||
cnt += prog_list_length(&p->bpf.progs[atype]);
|
||||
p = cgroup_parent(p);
|
||||
} while (p);
|
||||
|
||||
@ -249,10 +249,10 @@ static int compute_effective_progs(struct cgroup *cgrp,
|
||||
cnt = 0;
|
||||
p = cgrp;
|
||||
do {
|
||||
if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
|
||||
if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
|
||||
continue;
|
||||
|
||||
list_for_each_entry(pl, &p->bpf.progs[type], node) {
|
||||
list_for_each_entry(pl, &p->bpf.progs[atype], node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
|
||||
@ -269,10 +269,10 @@ static int compute_effective_progs(struct cgroup *cgrp,
|
||||
}
|
||||
|
||||
static void activate_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
struct bpf_prog_array *old_array)
|
||||
{
|
||||
old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array,
|
||||
old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array,
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
/* free prog array after grace period, since __cgroup_bpf_run_*()
|
||||
* might be still walking the array
|
||||
@ -328,7 +328,7 @@ cleanup:
|
||||
}
|
||||
|
||||
static int update_effective_progs(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup_subsys_state *css;
|
||||
int err;
|
||||
@ -340,7 +340,7 @@ static int update_effective_progs(struct cgroup *cgrp,
|
||||
if (percpu_ref_is_zero(&desc->bpf.refcnt))
|
||||
continue;
|
||||
|
||||
err = compute_effective_progs(desc, type, &desc->bpf.inactive);
|
||||
err = compute_effective_progs(desc, atype, &desc->bpf.inactive);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
}
|
||||
@ -357,7 +357,7 @@ static int update_effective_progs(struct cgroup *cgrp,
|
||||
continue;
|
||||
}
|
||||
|
||||
activate_effective_progs(desc, type, desc->bpf.inactive);
|
||||
activate_effective_progs(desc, atype, desc->bpf.inactive);
|
||||
desc->bpf.inactive = NULL;
|
||||
}
|
||||
|
||||
@ -436,11 +436,12 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type, u32 flags)
|
||||
{
|
||||
u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI));
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
struct bpf_prog *old_prog = NULL;
|
||||
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
int err;
|
||||
|
||||
if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) ||
|
||||
@ -454,10 +455,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
|
||||
/* replace_prog implies BPF_F_REPLACE, and vice versa */
|
||||
return -EINVAL;
|
||||
|
||||
if (!hierarchy_allows_attach(cgrp, type))
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
|
||||
if (!hierarchy_allows_attach(cgrp, atype))
|
||||
return -EPERM;
|
||||
|
||||
if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags)
|
||||
if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags)
|
||||
/* Disallow attaching non-overridable on top
|
||||
* of existing overridable in this cgroup.
|
||||
* Disallow attaching multi-prog if overridable or none
|
||||
@ -490,16 +497,16 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
|
||||
pl->prog = prog;
|
||||
pl->link = link;
|
||||
bpf_cgroup_storages_assign(pl->storage, storage);
|
||||
cgrp->bpf.flags[type] = saved_flags;
|
||||
cgrp->bpf.flags[atype] = saved_flags;
|
||||
|
||||
err = update_effective_progs(cgrp, type);
|
||||
err = update_effective_progs(cgrp, atype);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
else
|
||||
static_branch_inc(&cgroup_bpf_enabled_key[type]);
|
||||
static_branch_inc(&cgroup_bpf_enabled_key[atype]);
|
||||
bpf_cgroup_storages_link(new_storage, cgrp, type);
|
||||
return 0;
|
||||
|
||||
@ -520,7 +527,7 @@ cleanup:
|
||||
* all descendant cgroups. This function is guaranteed to succeed.
|
||||
*/
|
||||
static void replace_effective_prog(struct cgroup *cgrp,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
struct bpf_cgroup_link *link)
|
||||
{
|
||||
struct bpf_prog_array_item *item;
|
||||
@ -539,10 +546,10 @@ static void replace_effective_prog(struct cgroup *cgrp,
|
||||
|
||||
/* find position of link in effective progs array */
|
||||
for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) {
|
||||
if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI))
|
||||
if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI))
|
||||
continue;
|
||||
|
||||
head = &cg->bpf.progs[type];
|
||||
head = &cg->bpf.progs[atype];
|
||||
list_for_each_entry(pl, head, node) {
|
||||
if (!prog_list_prog(pl))
|
||||
continue;
|
||||
@ -554,7 +561,7 @@ static void replace_effective_prog(struct cgroup *cgrp,
|
||||
found:
|
||||
BUG_ON(!cg);
|
||||
progs = rcu_dereference_protected(
|
||||
desc->bpf.effective[type],
|
||||
desc->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
item = &progs->items[pos];
|
||||
WRITE_ONCE(item->prog, link->link.prog);
|
||||
@ -574,11 +581,18 @@ static int __cgroup_bpf_replace(struct cgroup *cgrp,
|
||||
struct bpf_cgroup_link *link,
|
||||
struct bpf_prog *new_prog)
|
||||
{
|
||||
struct list_head *progs = &cgrp->bpf.progs[link->type];
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog *old_prog;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
bool found = false;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(link->type);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
|
||||
if (link->link.prog->type != new_prog->type)
|
||||
return -EINVAL;
|
||||
|
||||
@ -592,7 +606,7 @@ static int __cgroup_bpf_replace(struct cgroup *cgrp,
|
||||
return -ENOENT;
|
||||
|
||||
old_prog = xchg(&link->link.prog, new_prog);
|
||||
replace_effective_prog(cgrp, link->type, link);
|
||||
replace_effective_prog(cgrp, atype, link);
|
||||
bpf_prog_put(old_prog);
|
||||
return 0;
|
||||
}
|
||||
@ -667,12 +681,20 @@ static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
||||
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link, enum bpf_attach_type type)
|
||||
{
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
u32 flags = cgrp->bpf.flags[type];
|
||||
struct bpf_prog_list *pl;
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog *old_prog;
|
||||
struct bpf_prog_list *pl;
|
||||
struct list_head *progs;
|
||||
u32 flags;
|
||||
int err;
|
||||
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
flags = cgrp->bpf.flags[atype];
|
||||
|
||||
if (prog && link)
|
||||
/* only one of prog or link can be specified */
|
||||
return -EINVAL;
|
||||
@ -686,7 +708,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
pl->prog = NULL;
|
||||
pl->link = NULL;
|
||||
|
||||
err = update_effective_progs(cgrp, type);
|
||||
err = update_effective_progs(cgrp, atype);
|
||||
if (err)
|
||||
goto cleanup;
|
||||
|
||||
@ -695,10 +717,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
kfree(pl);
|
||||
if (list_empty(progs))
|
||||
/* last program was detached, reset flags to zero */
|
||||
cgrp->bpf.flags[type] = 0;
|
||||
cgrp->bpf.flags[atype] = 0;
|
||||
if (old_prog)
|
||||
bpf_prog_put(old_prog);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[type]);
|
||||
static_branch_dec(&cgroup_bpf_enabled_key[atype]);
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
@ -714,13 +736,21 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
{
|
||||
__u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
|
||||
enum bpf_attach_type type = attr->query.attach_type;
|
||||
struct list_head *progs = &cgrp->bpf.progs[type];
|
||||
u32 flags = cgrp->bpf.flags[type];
|
||||
enum cgroup_bpf_attach_type atype;
|
||||
struct bpf_prog_array *effective;
|
||||
struct list_head *progs;
|
||||
struct bpf_prog *prog;
|
||||
int cnt, ret = 0, i;
|
||||
u32 flags;
|
||||
|
||||
effective = rcu_dereference_protected(cgrp->bpf.effective[type],
|
||||
atype = to_cgroup_bpf_attach_type(type);
|
||||
if (atype < 0)
|
||||
return -EINVAL;
|
||||
|
||||
progs = &cgrp->bpf.progs[atype];
|
||||
flags = cgrp->bpf.flags[atype];
|
||||
|
||||
effective = rcu_dereference_protected(cgrp->bpf.effective[atype],
|
||||
lockdep_is_held(&cgroup_mutex));
|
||||
|
||||
if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
|
||||
@ -931,8 +961,8 @@ int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
||||
goto out_put_cgroup;
|
||||
}
|
||||
|
||||
err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
|
||||
BPF_F_ALLOW_MULTI);
|
||||
err = cgroup_bpf_attach(cgrp, NULL, NULL, link,
|
||||
link->type, BPF_F_ALLOW_MULTI);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link_primer);
|
||||
goto out_put_cgroup;
|
||||
@ -986,7 +1016,7 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
*/
|
||||
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
struct sk_buff *skb,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
unsigned int offset = skb->data - skb_network_header(skb);
|
||||
struct sock *save_sk;
|
||||
@ -1008,11 +1038,11 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
/* compute pointers for the bpf prog */
|
||||
bpf_compute_and_save_data_end(skb, &saved_data_end);
|
||||
|
||||
if (type == BPF_CGROUP_INET_EGRESS) {
|
||||
if (atype == CGROUP_INET_EGRESS) {
|
||||
ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY(
|
||||
cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb);
|
||||
cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb);
|
||||
} else {
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[type], skb,
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb,
|
||||
__bpf_prog_run_save_cb);
|
||||
ret = (ret == 1 ? 0 : -EPERM);
|
||||
}
|
||||
@ -1038,12 +1068,12 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
|
||||
* and if it returned != 1 during execution. In all other cases, 0 is returned.
|
||||
*/
|
||||
int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
int ret;
|
||||
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[type], sk, bpf_prog_run);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run);
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
|
||||
@ -1065,7 +1095,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
|
||||
*/
|
||||
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
struct sockaddr *uaddr,
|
||||
enum bpf_attach_type type,
|
||||
enum cgroup_bpf_attach_type atype,
|
||||
void *t_ctx,
|
||||
u32 *flags)
|
||||
{
|
||||
@ -1090,7 +1120,7 @@ int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
||||
}
|
||||
|
||||
cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[type], &ctx,
|
||||
ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx,
|
||||
bpf_prog_run, flags);
|
||||
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
@ -1115,19 +1145,19 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
|
||||
*/
|
||||
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
||||
struct bpf_sock_ops_kern *sock_ops,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
|
||||
int ret;
|
||||
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[type], sock_ops,
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops,
|
||||
bpf_prog_run);
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
|
||||
|
||||
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
short access, enum bpf_attach_type type)
|
||||
short access, enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
struct bpf_cgroup_dev_ctx ctx = {
|
||||
@ -1139,7 +1169,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(current);
|
||||
allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[type], &ctx,
|
||||
allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx,
|
||||
bpf_prog_run);
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -1231,7 +1261,7 @@ const struct bpf_verifier_ops cg_dev_verifier_ops = {
|
||||
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
struct ctl_table *table, int write,
|
||||
char **buf, size_t *pcount, loff_t *ppos,
|
||||
enum bpf_attach_type type)
|
||||
enum cgroup_bpf_attach_type atype)
|
||||
{
|
||||
struct bpf_sysctl_kern ctx = {
|
||||
.head = head,
|
||||
@ -1271,7 +1301,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
|
||||
rcu_read_lock();
|
||||
cgrp = task_dfl_cgroup(current);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[type], &ctx, bpf_prog_run);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run);
|
||||
rcu_read_unlock();
|
||||
|
||||
kfree(ctx.cur_val);
|
||||
@ -1289,7 +1319,7 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
|
||||
enum bpf_attach_type attach_type)
|
||||
enum cgroup_bpf_attach_type attach_type)
|
||||
{
|
||||
struct bpf_prog_array *prog_array;
|
||||
bool empty;
|
||||
@ -1364,7 +1394,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT))
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT))
|
||||
return 0;
|
||||
|
||||
/* Allocate a bit more than the initial user buffer for
|
||||
@ -1385,7 +1415,7 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT],
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT],
|
||||
&ctx, bpf_prog_run);
|
||||
release_sock(sk);
|
||||
|
||||
@ -1460,7 +1490,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
* attached to the hook so we don't waste time allocating
|
||||
* memory and locking the socket.
|
||||
*/
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT))
|
||||
if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT))
|
||||
return retval;
|
||||
|
||||
ctx.optlen = max_optlen;
|
||||
@ -1495,7 +1525,7 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
|
||||
&ctx, bpf_prog_run);
|
||||
release_sock(sk);
|
||||
|
||||
@ -1556,7 +1586,7 @@ int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
|
||||
* be called if that data shouldn't be "exported".
|
||||
*/
|
||||
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT],
|
||||
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT],
|
||||
&ctx, bpf_prog_run);
|
||||
if (!ret)
|
||||
return -EPERM;
|
||||
|
@ -452,7 +452,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
* changes context in a wrong way it will be caught.
|
||||
*/
|
||||
err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
|
||||
BPF_CGROUP_INET4_BIND, &flags);
|
||||
CGROUP_INET4_BIND, &flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -781,7 +781,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
sin->sin_port = inet->inet_dport;
|
||||
sin->sin_addr.s_addr = inet->inet_daddr;
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
|
||||
BPF_CGROUP_INET4_GETPEERNAME,
|
||||
CGROUP_INET4_GETPEERNAME,
|
||||
NULL);
|
||||
} else {
|
||||
__be32 addr = inet->inet_rcv_saddr;
|
||||
@ -790,7 +790,7 @@ int inet_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
sin->sin_port = inet->inet_sport;
|
||||
sin->sin_addr.s_addr = addr;
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
|
||||
BPF_CGROUP_INET4_GETSOCKNAME,
|
||||
CGROUP_INET4_GETSOCKNAME,
|
||||
NULL);
|
||||
}
|
||||
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
|
||||
|
@ -1143,7 +1143,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_UDP4_SENDMSG) && !connected) {
|
||||
if (cgroup_bpf_enabled(CGROUP_UDP4_SENDMSG) && !connected) {
|
||||
err = BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk,
|
||||
(struct sockaddr *)usin, &ipc.addr);
|
||||
if (err)
|
||||
|
@ -455,7 +455,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
||||
* changes context in a wrong way it will be caught.
|
||||
*/
|
||||
err = BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr,
|
||||
BPF_CGROUP_INET6_BIND, &flags);
|
||||
CGROUP_INET6_BIND, &flags);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -532,7 +532,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
if (np->sndflow)
|
||||
sin->sin6_flowinfo = np->flow_label;
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
|
||||
BPF_CGROUP_INET6_GETPEERNAME,
|
||||
CGROUP_INET6_GETPEERNAME,
|
||||
NULL);
|
||||
} else {
|
||||
if (ipv6_addr_any(&sk->sk_v6_rcv_saddr))
|
||||
@ -541,7 +541,7 @@ int inet6_getname(struct socket *sock, struct sockaddr *uaddr,
|
||||
sin->sin6_addr = sk->sk_v6_rcv_saddr;
|
||||
sin->sin6_port = inet->inet_sport;
|
||||
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, (struct sockaddr *)sin,
|
||||
BPF_CGROUP_INET6_GETSOCKNAME,
|
||||
CGROUP_INET6_GETSOCKNAME,
|
||||
NULL);
|
||||
}
|
||||
sin->sin6_scope_id = ipv6_iface_scope_id(&sin->sin6_addr,
|
||||
|
@ -1475,7 +1475,7 @@ do_udp_sendmsg:
|
||||
fl6.saddr = np->saddr;
|
||||
fl6.fl6_sport = inet->inet_sport;
|
||||
|
||||
if (cgroup_bpf_enabled(BPF_CGROUP_UDP6_SENDMSG) && !connected) {
|
||||
if (cgroup_bpf_enabled(CGROUP_UDP6_SENDMSG) && !connected) {
|
||||
err = BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk,
|
||||
(struct sockaddr *)sin6, &fl6.saddr);
|
||||
if (err)
|
||||
|
@ -84,7 +84,7 @@ struct bpf_lpm_trie_key {
|
||||
|
||||
struct bpf_cgroup_storage_key {
|
||||
__u64 cgroup_inode_id; /* cgroup inode id */
|
||||
__u32 attach_type; /* program attach type */
|
||||
__u32 attach_type; /* program attach type (enum bpf_attach_type) */
|
||||
};
|
||||
|
||||
union bpf_iter_link_info {
|
||||
|
Loading…
Reference in New Issue
Block a user