mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-26 22:24:09 +08:00
2bbc078f81
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-12-27 The following pull-request contains BPF updates for your *net-next* tree. We've added 127 non-merge commits during the last 17 day(s) which contain a total of 110 files changed, 6901 insertions(+), 2721 deletions(-). There are three merge conflicts. Conflicts and resolution looks as follows: 1) Merge conflict in net/bpf/test_run.c: There was a tree-wide cleanupc593642c8b
("treewide: Use sizeof_field() macro") which gets in the way withb590cb5f80
("bpf: Switch to offsetofend in BPF_PROG_TEST_RUN"): <<<<<<< HEAD if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) + sizeof_field(struct __sk_buff, priority), ======= if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), >>>>>>>7c8dce4b16
There are a few occasions that look similar to this. Always take the chunk with offsetofend(). Note that there is one where the fields differ in here: <<<<<<< HEAD if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) + sizeof_field(struct __sk_buff, tstamp), ======= if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), >>>>>>>7c8dce4b16
Just take the one with offsetofend() /and/ gso_segs. Latter is correct due to850a88cc40
("bpf: Expose __sk_buff wire_len/gso_segs to BPF_PROG_TEST_RUN"). 2) Merge conflict in arch/riscv/net/bpf_jit_comp.c: (I'm keeping Bjorn in Cc here for a double-check in case I got it wrong.) <<<<<<< HEAD if (is_13b_check(off, insn)) return -1; emit(rv_blt(tcc, RV_REG_ZERO, off >> 1), ctx); ======= emit_branch(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx); >>>>>>>7c8dce4b16
Result should look like: emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx); 3) Merge conflict in arch/riscv/include/asm/pgtable.h: <<<<<<< HEAD ======= #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) /* * Roughly size the vmemmap space to be large enough to fit enough * struct pages to map half the virtual address space. Then * position vmemmap directly below the VMALLOC region. */ #define VMEMMAP_SHIFT \ (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) #define VMEMMAP_END (VMALLOC_START - 1) #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) #define vmemmap ((struct page *)VMEMMAP_START) >>>>>>>7c8dce4b16
Only take the BPF_* defines from there and move them higher up in the same file. Remove the rest from the chunk. The VMALLOC_* etc defines got moved via01f52e16b8
("riscv: define vmemmap before pfn_to_page calls"). Result: [...] #define __S101 PAGE_READ_EXEC #define __S110 PAGE_SHARED_EXEC #define __S111 PAGE_SHARED_EXEC #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) #define VMALLOC_END (PAGE_OFFSET - 1) #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) #define BPF_JIT_REGION_END (VMALLOC_END) /* * Roughly size the vmemmap space to be large enough to fit enough * struct pages to map half the virtual address space. Then * position vmemmap directly below the VMALLOC region. */ #define VMEMMAP_SHIFT \ (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) #define VMEMMAP_END (VMALLOC_START - 1) #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) [...] Let me know if there are any other issues. Anyway, the main changes are: 1) Extend bpftool to produce a struct (aka "skeleton") tailored and specific to a provided BPF object file. This provides an alternative, simplified API compared to standard libbpf interaction. Also, add libbpf extern variable resolution for .kconfig section to import Kconfig data, from Andrii Nakryiko. 2) Add BPF dispatcher for XDP which is a mechanism to avoid indirect calls by generating a branch funnel as discussed back in bpfconf'19 at LSF/MM. Also, add various BPF riscv JIT improvements, from Björn Töpel. 3) Extend bpftool to allow matching BPF programs and maps by name, from Paul Chaignon. 4) Support for replacing cgroup BPF programs attached with BPF_F_ALLOW_MULTI flag for allowing updates without service interruption, from Andrey Ignatov. 5) Cleanup and simplification of ring access functions for AF_XDP with a bonus of 0-5% performance improvement, from Magnus Karlsson. 6) Enable BPF JITs for x86-64 and arm64 by default. Also, final version of audit support for BPF, from Daniel Borkmann and latter with Jiri Olsa. 7) Move and extend test_select_reuseport into BPF program tests under BPF selftests, from Jakub Sitnicki. 8) Various BPF sample improvements for xdpsock for customizing parameters to set up and benchmark AF_XDP, from Jay Jayatheerthan. 9) Improve libbpf to provide a ulimit hint on permission denied errors. Also change XDP sample programs to attach in driver mode by default, from Toke Høiland-Jørgensen. 10) Extend BPF test infrastructure to allow changing skb mark from tc BPF programs, from Nikita V. Shirokov. 11) Optimize prologue code sequence in BPF arm32 JIT, from Russell King. 12) Fix xdp_redirect_cpu BPF sample to manually attach to tracepoints after libbpf conversion, from Jesper Dangaard Brouer. 13) Minor misc improvements from various others. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
413 lines
14 KiB
C
413 lines
14 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _BPF_CGROUP_H
|
|
#define _BPF_CGROUP_H
|
|
|
|
#include <linux/bpf.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/jump_label.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/percpu-refcount.h>
|
|
#include <linux/rbtree.h>
|
|
#include <uapi/linux/bpf.h>
|
|
|
|
struct sock;
|
|
struct sockaddr;
|
|
struct cgroup;
|
|
struct sk_buff;
|
|
struct bpf_map;
|
|
struct bpf_prog;
|
|
struct bpf_sock_ops_kern;
|
|
struct bpf_cgroup_storage;
|
|
struct ctl_table;
|
|
struct ctl_table_header;
|
|
|
|
#ifdef CONFIG_CGROUP_BPF
|
|
|
|
extern struct static_key_false cgroup_bpf_enabled_key;
|
|
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
|
|
|
|
DECLARE_PER_CPU(struct bpf_cgroup_storage*,
|
|
bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
|
|
|
|
#define for_each_cgroup_storage_type(stype) \
|
|
for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
|
|
|
|
struct bpf_cgroup_storage_map;
|
|
|
|
struct bpf_storage_buffer {
|
|
struct rcu_head rcu;
|
|
char data[0];
|
|
};
|
|
|
|
struct bpf_cgroup_storage {
|
|
union {
|
|
struct bpf_storage_buffer *buf;
|
|
void __percpu *percpu_buf;
|
|
};
|
|
struct bpf_cgroup_storage_map *map;
|
|
struct bpf_cgroup_storage_key key;
|
|
struct list_head list;
|
|
struct rb_node node;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
struct bpf_prog_list {
|
|
struct list_head node;
|
|
struct bpf_prog *prog;
|
|
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
|
|
};
|
|
|
|
struct bpf_prog_array;
|
|
|
|
struct cgroup_bpf {
|
|
/* array of effective progs in this cgroup */
|
|
struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
|
|
|
|
/* attached progs to this cgroup and attach flags
|
|
* when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
|
|
* have either zero or one element
|
|
* when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
|
|
*/
|
|
struct list_head progs[MAX_BPF_ATTACH_TYPE];
|
|
u32 flags[MAX_BPF_ATTACH_TYPE];
|
|
|
|
/* temp storage for effective prog array used by prog_attach/detach */
|
|
struct bpf_prog_array *inactive;
|
|
|
|
/* reference counter used to detach bpf programs after cgroup removal */
|
|
struct percpu_ref refcnt;
|
|
|
|
/* cgroup_bpf is released using a work queue */
|
|
struct work_struct release_work;
|
|
};
|
|
|
|
int cgroup_bpf_inherit(struct cgroup *cgrp);
|
|
void cgroup_bpf_offline(struct cgroup *cgrp);
|
|
|
|
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|
struct bpf_prog *replace_prog,
|
|
enum bpf_attach_type type, u32 flags);
|
|
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|
enum bpf_attach_type type);
|
|
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr);
|
|
|
|
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
|
|
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|
struct bpf_prog *replace_prog, enum bpf_attach_type type,
|
|
u32 flags);
|
|
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
|
enum bpf_attach_type type, u32 flags);
|
|
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr);
|
|
|
|
int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
|
struct sk_buff *skb,
|
|
enum bpf_attach_type type);
|
|
|
|
int __cgroup_bpf_run_filter_sk(struct sock *sk,
|
|
enum bpf_attach_type type);
|
|
|
|
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
|
|
struct sockaddr *uaddr,
|
|
enum bpf_attach_type type,
|
|
void *t_ctx);
|
|
|
|
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
|
|
struct bpf_sock_ops_kern *sock_ops,
|
|
enum bpf_attach_type type);
|
|
|
|
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
|
short access, enum bpf_attach_type type);
|
|
|
|
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
|
struct ctl_table *table, int write,
|
|
void __user *buf, size_t *pcount,
|
|
loff_t *ppos, void **new_buf,
|
|
enum bpf_attach_type type);
|
|
|
|
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
|
|
int *optname, char __user *optval,
|
|
int *optlen, char **kernel_optval);
|
|
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
|
int optname, char __user *optval,
|
|
int __user *optlen, int max_optlen,
|
|
int retval);
|
|
|
|
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
|
|
struct bpf_map *map)
|
|
{
|
|
if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
|
|
return BPF_CGROUP_STORAGE_PERCPU;
|
|
|
|
return BPF_CGROUP_STORAGE_SHARED;
|
|
}
|
|
|
|
static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
|
|
*storage[MAX_BPF_CGROUP_STORAGE_TYPE])
|
|
{
|
|
enum bpf_cgroup_storage_type stype;
|
|
|
|
for_each_cgroup_storage_type(stype)
|
|
this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
|
|
}
|
|
|
|
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
|
|
enum bpf_cgroup_storage_type stype);
|
|
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
|
|
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
|
|
struct cgroup *cgroup,
|
|
enum bpf_attach_type type);
|
|
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
|
|
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
|
|
void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
|
|
|
|
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
|
|
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|
void *value, u64 flags);
|
|
|
|
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
|
|
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_run_filter_skb(sk, skb, \
|
|
BPF_CGROUP_INET_INGRESS); \
|
|
\
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
|
|
typeof(sk) __sk = sk_to_full_sk(sk); \
|
|
if (sk_fullsock(__sk)) \
|
|
__ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
|
|
BPF_CGROUP_INET_EGRESS); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_SK_PROG(sk, type) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) { \
|
|
__ret = __cgroup_bpf_run_filter_sk(sk, type); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
|
|
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
|
|
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
|
|
BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
|
|
|
|
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
|
NULL); \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) { \
|
|
lock_sock(sk); \
|
|
__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
|
|
t_ctx); \
|
|
release_sock(sk); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
|
|
|
|
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
|
|
sk->sk_prot->pre_connect)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
|
|
BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
|
|
|
|
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled && (sock_ops)->sk) { \
|
|
typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
|
|
if (__sk && sk_fullsock(__sk)) \
|
|
__ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
|
|
sock_ops, \
|
|
BPF_CGROUP_SOCK_OPS); \
|
|
} \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
|
|
access, \
|
|
BPF_CGROUP_DEVICE); \
|
|
\
|
|
__ret; \
|
|
})
|
|
|
|
|
|
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
|
|
buf, count, pos, nbuf, \
|
|
BPF_CGROUP_SYSCTL); \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
|
|
kernel_optval) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
|
|
optname, optval, \
|
|
optlen, \
|
|
kernel_optval); \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
|
|
({ \
|
|
int __ret = 0; \
|
|
if (cgroup_bpf_enabled) \
|
|
get_user(__ret, optlen); \
|
|
__ret; \
|
|
})
|
|
|
|
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
|
|
max_optlen, retval) \
|
|
({ \
|
|
int __ret = retval; \
|
|
if (cgroup_bpf_enabled) \
|
|
__ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
|
|
optname, optval, \
|
|
optlen, max_optlen, \
|
|
retval); \
|
|
__ret; \
|
|
})
|
|
|
|
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
|
enum bpf_prog_type ptype, struct bpf_prog *prog);
|
|
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
|
|
enum bpf_prog_type ptype);
|
|
int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr);
|
|
#else
|
|
|
|
struct bpf_prog;
|
|
struct cgroup_bpf {};
|
|
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
|
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
|
|
|
|
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
|
|
enum bpf_prog_type ptype,
|
|
struct bpf_prog *prog)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
|
|
enum bpf_prog_type ptype)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
|
union bpf_attr __user *uattr)
|
|
{
|
|
return -EINVAL;
|
|
}
|
|
|
|
static inline void bpf_cgroup_storage_set(
|
|
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
|
|
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
|
|
struct bpf_map *map) { return 0; }
|
|
static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
|
|
struct bpf_map *map) {}
|
|
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
|
|
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
|
|
static inline void bpf_cgroup_storage_free(
|
|
struct bpf_cgroup_storage *storage) {}
|
|
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
|
|
void *value) {
|
|
return 0;
|
|
}
|
|
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
|
void *key, void *value, u64 flags) {
|
|
return 0;
|
|
}
|
|
|
|
#define cgroup_bpf_enabled (0)
|
|
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
|
|
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
|
|
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
|
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
|
optlen, max_optlen, retval) ({ retval; })
|
|
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
|
|
kernel_optval) ({ 0; })
|
|
|
|
#define for_each_cgroup_storage_type(stype) for (; false; )
|
|
|
|
#endif /* CONFIG_CGROUP_BPF */
|
|
|
|
#endif /* _BPF_CGROUP_H */
|