mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 06:04:23 +08:00
241a94abcf
No functional change intended, aliases will be used in followup commits. Note for backporters: you may need to add aliases also for modules that are already removed in mainline kernel but still in your version. Patches were generated with the help of Coccinelle scripts like: cat >scripts/coccinelle/misc/tcf_alias.cocci <<EOD virtual patch virtual report @ haskernel @ @@ @ tcf_has_kind depends on report && haskernel @ identifier ops; constant K; @@ static struct tcf_proto_ops ops = { .kind = K, ... }; +char module_alias = K; EOD /usr/bin/spatch -D report --cocci-file scripts/coccinelle/misc/tcf_alias.cocci \ --dir . \ -I ./arch/x86/include -I ./arch/x86/include/generated -I ./include \ -I ./arch/x86/include/uapi -I ./arch/x86/include/generated/uapi \ -I ./include/uapi -I ./include/generated/uapi \ --include ./include/linux/compiler-version.h --include ./include/linux/kconfig.h \ --jobs 8 --chunksize 1 2>/dev/null | \ sed 's/char module_alias = "\([^"]*\)";/MODULE_ALIAS_NET_CLS("\1");/' And analogously for: static struct tc_action_ops ops = { .kind = K, static struct Qdisc_ops ops = { .id = K, (Someone familiar would be able to fit those into one .cocci file without sed post processing.) Signed-off-by: Michal Koutný <mkoutny@suse.com> Acked-by: Jamal Hadi Salim <jhs@mojatatu.com> Reviewed-by: Jiri Pirko <jiri@nvidia.com> Link: https://lore.kernel.org/r/20240201130943.19536-3-mkoutny@suse.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
710 lines
17 KiB
C
710 lines
17 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Berkeley Packet Filter based traffic classifier
|
|
*
|
|
* Might be used to classify traffic through flexible, user-defined and
|
|
* possibly JIT-ed BPF filters for traffic control as an alternative to
|
|
* ematches.
|
|
*
|
|
* (C) 2013 Daniel Borkmann <dborkman@redhat.com>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/bpf.h>
|
|
#include <linux/idr.h>
|
|
|
|
#include <net/rtnetlink.h>
|
|
#include <net/pkt_cls.h>
|
|
#include <net/sock.h>
|
|
#include <net/tc_wrapper.h>
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
|
|
MODULE_DESCRIPTION("TC BPF based classifier");
|
|
|
|
#define CLS_BPF_NAME_LEN 256
|
|
#define CLS_BPF_SUPPORTED_GEN_FLAGS \
|
|
(TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
|
|
|
|
struct cls_bpf_head {
|
|
struct list_head plist;
|
|
struct idr handle_idr;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
struct cls_bpf_prog {
|
|
struct bpf_prog *filter;
|
|
struct list_head link;
|
|
struct tcf_result res;
|
|
bool exts_integrated;
|
|
u32 gen_flags;
|
|
unsigned int in_hw_count;
|
|
struct tcf_exts exts;
|
|
u32 handle;
|
|
u16 bpf_num_ops;
|
|
struct sock_filter *bpf_ops;
|
|
const char *bpf_name;
|
|
struct tcf_proto *tp;
|
|
struct rcu_work rwork;
|
|
};
|
|
|
|
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
|
|
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
|
|
[TCA_BPF_FLAGS] = { .type = NLA_U32 },
|
|
[TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
|
|
[TCA_BPF_FD] = { .type = NLA_U32 },
|
|
[TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
|
|
.len = CLS_BPF_NAME_LEN },
|
|
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
|
|
[TCA_BPF_OPS] = { .type = NLA_BINARY,
|
|
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
|
|
};
|
|
|
|
static int cls_bpf_exec_opcode(int code)
|
|
{
|
|
switch (code) {
|
|
case TC_ACT_OK:
|
|
case TC_ACT_SHOT:
|
|
case TC_ACT_STOLEN:
|
|
case TC_ACT_TRAP:
|
|
case TC_ACT_REDIRECT:
|
|
case TC_ACT_UNSPEC:
|
|
return code;
|
|
default:
|
|
return TC_ACT_UNSPEC;
|
|
}
|
|
}
|
|
|
|
TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb,
|
|
const struct tcf_proto *tp,
|
|
struct tcf_result *res)
|
|
{
|
|
struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
|
|
bool at_ingress = skb_at_tc_ingress(skb);
|
|
struct cls_bpf_prog *prog;
|
|
int ret = -1;
|
|
|
|
list_for_each_entry_rcu(prog, &head->plist, link) {
|
|
int filter_res;
|
|
|
|
qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
|
|
|
|
if (tc_skip_sw(prog->gen_flags)) {
|
|
filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
|
|
} else if (at_ingress) {
|
|
/* It is safe to push/pull even if skb_shared() */
|
|
__skb_push(skb, skb->mac_len);
|
|
bpf_compute_data_pointers(skb);
|
|
filter_res = bpf_prog_run(prog->filter, skb);
|
|
__skb_pull(skb, skb->mac_len);
|
|
} else {
|
|
bpf_compute_data_pointers(skb);
|
|
filter_res = bpf_prog_run(prog->filter, skb);
|
|
}
|
|
if (unlikely(!skb->tstamp && skb->mono_delivery_time))
|
|
skb->mono_delivery_time = 0;
|
|
|
|
if (prog->exts_integrated) {
|
|
res->class = 0;
|
|
res->classid = TC_H_MAJ(prog->res.classid) |
|
|
qdisc_skb_cb(skb)->tc_classid;
|
|
|
|
ret = cls_bpf_exec_opcode(filter_res);
|
|
if (ret == TC_ACT_UNSPEC)
|
|
continue;
|
|
break;
|
|
}
|
|
|
|
if (filter_res == 0)
|
|
continue;
|
|
if (filter_res != -1) {
|
|
res->class = 0;
|
|
res->classid = filter_res;
|
|
} else {
|
|
*res = prog->res;
|
|
}
|
|
|
|
ret = tcf_exts_exec(skb, &prog->exts, res);
|
|
if (ret < 0)
|
|
continue;
|
|
|
|
break;
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
|
|
{
|
|
return !prog->bpf_ops;
|
|
}
|
|
|
|
static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
|
|
struct cls_bpf_prog *oldprog,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct tcf_block *block = tp->chain->block;
|
|
struct tc_cls_bpf_offload cls_bpf = {};
|
|
struct cls_bpf_prog *obj;
|
|
bool skip_sw;
|
|
int err;
|
|
|
|
skip_sw = prog && tc_skip_sw(prog->gen_flags);
|
|
obj = prog ?: oldprog;
|
|
|
|
tc_cls_common_offload_init(&cls_bpf.common, tp, obj->gen_flags, extack);
|
|
cls_bpf.command = TC_CLSBPF_OFFLOAD;
|
|
cls_bpf.exts = &obj->exts;
|
|
cls_bpf.prog = prog ? prog->filter : NULL;
|
|
cls_bpf.oldprog = oldprog ? oldprog->filter : NULL;
|
|
cls_bpf.name = obj->bpf_name;
|
|
cls_bpf.exts_integrated = obj->exts_integrated;
|
|
|
|
if (oldprog && prog)
|
|
err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
|
|
skip_sw, &oldprog->gen_flags,
|
|
&oldprog->in_hw_count,
|
|
&prog->gen_flags, &prog->in_hw_count,
|
|
true);
|
|
else if (prog)
|
|
err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
|
|
skip_sw, &prog->gen_flags,
|
|
&prog->in_hw_count, true);
|
|
else
|
|
err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
|
|
skip_sw, &oldprog->gen_flags,
|
|
&oldprog->in_hw_count, true);
|
|
|
|
if (prog && err) {
|
|
cls_bpf_offload_cmd(tp, oldprog, prog, extack);
|
|
return err;
|
|
}
|
|
|
|
if (prog && skip_sw && !(prog->gen_flags & TCA_CLS_FLAGS_IN_HW))
|
|
return -EINVAL;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static u32 cls_bpf_flags(u32 flags)
|
|
{
|
|
return flags & CLS_BPF_SUPPORTED_GEN_FLAGS;
|
|
}
|
|
|
|
static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
|
|
struct cls_bpf_prog *oldprog,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
if (prog && oldprog &&
|
|
cls_bpf_flags(prog->gen_flags) !=
|
|
cls_bpf_flags(oldprog->gen_flags))
|
|
return -EINVAL;
|
|
|
|
if (prog && tc_skip_hw(prog->gen_flags))
|
|
prog = NULL;
|
|
if (oldprog && tc_skip_hw(oldprog->gen_flags))
|
|
oldprog = NULL;
|
|
if (!prog && !oldprog)
|
|
return 0;
|
|
|
|
return cls_bpf_offload_cmd(tp, prog, oldprog, extack);
|
|
}
|
|
|
|
static void cls_bpf_stop_offload(struct tcf_proto *tp,
|
|
struct cls_bpf_prog *prog,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
int err;
|
|
|
|
err = cls_bpf_offload_cmd(tp, NULL, prog, extack);
|
|
if (err)
|
|
pr_err("Stopping hardware offload failed: %d\n", err);
|
|
}
|
|
|
|
static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
|
|
struct cls_bpf_prog *prog)
|
|
{
|
|
struct tcf_block *block = tp->chain->block;
|
|
struct tc_cls_bpf_offload cls_bpf = {};
|
|
|
|
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags, NULL);
|
|
cls_bpf.command = TC_CLSBPF_STATS;
|
|
cls_bpf.exts = &prog->exts;
|
|
cls_bpf.prog = prog->filter;
|
|
cls_bpf.name = prog->bpf_name;
|
|
cls_bpf.exts_integrated = prog->exts_integrated;
|
|
|
|
tc_setup_cb_call(block, TC_SETUP_CLSBPF, &cls_bpf, false, true);
|
|
}
|
|
|
|
static int cls_bpf_init(struct tcf_proto *tp)
|
|
{
|
|
struct cls_bpf_head *head;
|
|
|
|
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
|
if (head == NULL)
|
|
return -ENOBUFS;
|
|
|
|
INIT_LIST_HEAD_RCU(&head->plist);
|
|
idr_init(&head->handle_idr);
|
|
rcu_assign_pointer(tp->root, head);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
|
|
{
|
|
if (cls_bpf_is_ebpf(prog))
|
|
bpf_prog_put(prog->filter);
|
|
else
|
|
bpf_prog_destroy(prog->filter);
|
|
|
|
kfree(prog->bpf_name);
|
|
kfree(prog->bpf_ops);
|
|
}
|
|
|
|
static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
|
|
{
|
|
tcf_exts_destroy(&prog->exts);
|
|
tcf_exts_put_net(&prog->exts);
|
|
|
|
cls_bpf_free_parms(prog);
|
|
kfree(prog);
|
|
}
|
|
|
|
static void cls_bpf_delete_prog_work(struct work_struct *work)
|
|
{
|
|
struct cls_bpf_prog *prog = container_of(to_rcu_work(work),
|
|
struct cls_bpf_prog,
|
|
rwork);
|
|
rtnl_lock();
|
|
__cls_bpf_delete_prog(prog);
|
|
rtnl_unlock();
|
|
}
|
|
|
|
static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
|
|
idr_remove(&head->handle_idr, prog->handle);
|
|
cls_bpf_stop_offload(tp, prog, extack);
|
|
list_del_rcu(&prog->link);
|
|
tcf_unbind_filter(tp, &prog->res);
|
|
if (tcf_exts_get_net(&prog->exts))
|
|
tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work);
|
|
else
|
|
__cls_bpf_delete_prog(prog);
|
|
}
|
|
|
|
static int cls_bpf_delete(struct tcf_proto *tp, void *arg, bool *last,
|
|
bool rtnl_held, struct netlink_ext_ack *extack)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
|
|
__cls_bpf_delete(tp, arg, extack);
|
|
*last = list_empty(&head->plist);
|
|
return 0;
|
|
}
|
|
|
|
static void cls_bpf_destroy(struct tcf_proto *tp, bool rtnl_held,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
struct cls_bpf_prog *prog, *tmp;
|
|
|
|
list_for_each_entry_safe(prog, tmp, &head->plist, link)
|
|
__cls_bpf_delete(tp, prog, extack);
|
|
|
|
idr_destroy(&head->handle_idr);
|
|
kfree_rcu(head, rcu);
|
|
}
|
|
|
|
static void *cls_bpf_get(struct tcf_proto *tp, u32 handle)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
struct cls_bpf_prog *prog;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (prog->handle == handle)
|
|
return prog;
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
|
|
{
|
|
struct sock_filter *bpf_ops;
|
|
struct sock_fprog_kern fprog_tmp;
|
|
struct bpf_prog *fp;
|
|
u16 bpf_size, bpf_num_ops;
|
|
int ret;
|
|
|
|
bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
|
|
if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
|
|
return -EINVAL;
|
|
|
|
bpf_size = bpf_num_ops * sizeof(*bpf_ops);
|
|
if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
|
|
return -EINVAL;
|
|
|
|
bpf_ops = kmemdup(nla_data(tb[TCA_BPF_OPS]), bpf_size, GFP_KERNEL);
|
|
if (bpf_ops == NULL)
|
|
return -ENOMEM;
|
|
|
|
fprog_tmp.len = bpf_num_ops;
|
|
fprog_tmp.filter = bpf_ops;
|
|
|
|
ret = bpf_prog_create(&fp, &fprog_tmp);
|
|
if (ret < 0) {
|
|
kfree(bpf_ops);
|
|
return ret;
|
|
}
|
|
|
|
prog->bpf_ops = bpf_ops;
|
|
prog->bpf_num_ops = bpf_num_ops;
|
|
prog->bpf_name = NULL;
|
|
prog->filter = fp;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
|
|
u32 gen_flags, const struct tcf_proto *tp)
|
|
{
|
|
struct bpf_prog *fp;
|
|
char *name = NULL;
|
|
bool skip_sw;
|
|
u32 bpf_fd;
|
|
|
|
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
|
|
skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
|
|
|
|
fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
|
|
if (IS_ERR(fp))
|
|
return PTR_ERR(fp);
|
|
|
|
if (tb[TCA_BPF_NAME]) {
|
|
name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
|
|
if (!name) {
|
|
bpf_prog_put(fp);
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
prog->bpf_ops = NULL;
|
|
prog->bpf_name = name;
|
|
prog->filter = fp;
|
|
|
|
if (fp->dst_needed)
|
|
tcf_block_netif_keep_dst(tp->chain->block);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
|
struct tcf_proto *tp, unsigned long base,
|
|
u32 handle, struct nlattr **tca,
|
|
void **arg, u32 flags,
|
|
struct netlink_ext_ack *extack)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
bool is_bpf, is_ebpf, have_exts = false;
|
|
struct cls_bpf_prog *oldprog = *arg;
|
|
struct nlattr *tb[TCA_BPF_MAX + 1];
|
|
bool bound_to_filter = false;
|
|
struct cls_bpf_prog *prog;
|
|
u32 gen_flags = 0;
|
|
int ret;
|
|
|
|
if (tca[TCA_OPTIONS] == NULL)
|
|
return -EINVAL;
|
|
|
|
ret = nla_parse_nested_deprecated(tb, TCA_BPF_MAX, tca[TCA_OPTIONS],
|
|
bpf_policy, NULL);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
prog = kzalloc(sizeof(*prog), GFP_KERNEL);
|
|
if (!prog)
|
|
return -ENOBUFS;
|
|
|
|
ret = tcf_exts_init(&prog->exts, net, TCA_BPF_ACT, TCA_BPF_POLICE);
|
|
if (ret < 0)
|
|
goto errout;
|
|
|
|
if (oldprog) {
|
|
if (handle && oldprog->handle != handle) {
|
|
ret = -EINVAL;
|
|
goto errout;
|
|
}
|
|
}
|
|
|
|
if (handle == 0) {
|
|
handle = 1;
|
|
ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
|
|
INT_MAX, GFP_KERNEL);
|
|
} else if (!oldprog) {
|
|
ret = idr_alloc_u32(&head->handle_idr, prog, &handle,
|
|
handle, GFP_KERNEL);
|
|
}
|
|
|
|
if (ret)
|
|
goto errout;
|
|
prog->handle = handle;
|
|
|
|
is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
|
|
is_ebpf = tb[TCA_BPF_FD];
|
|
if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
|
|
ret = -EINVAL;
|
|
goto errout_idr;
|
|
}
|
|
|
|
ret = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &prog->exts,
|
|
flags, extack);
|
|
if (ret < 0)
|
|
goto errout_idr;
|
|
|
|
if (tb[TCA_BPF_FLAGS]) {
|
|
u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
|
|
|
|
if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
|
|
ret = -EINVAL;
|
|
goto errout_idr;
|
|
}
|
|
|
|
have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
|
|
}
|
|
if (tb[TCA_BPF_FLAGS_GEN]) {
|
|
gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
|
|
if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
|
|
!tc_flags_valid(gen_flags)) {
|
|
ret = -EINVAL;
|
|
goto errout_idr;
|
|
}
|
|
}
|
|
|
|
prog->exts_integrated = have_exts;
|
|
prog->gen_flags = gen_flags;
|
|
|
|
ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
|
|
cls_bpf_prog_from_efd(tb, prog, gen_flags, tp);
|
|
if (ret < 0)
|
|
goto errout_idr;
|
|
|
|
if (tb[TCA_BPF_CLASSID]) {
|
|
prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
|
|
tcf_bind_filter(tp, &prog->res, base);
|
|
bound_to_filter = true;
|
|
}
|
|
|
|
ret = cls_bpf_offload(tp, prog, oldprog, extack);
|
|
if (ret)
|
|
goto errout_parms;
|
|
|
|
if (!tc_in_hw(prog->gen_flags))
|
|
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
|
|
|
if (oldprog) {
|
|
idr_replace(&head->handle_idr, prog, handle);
|
|
list_replace_rcu(&oldprog->link, &prog->link);
|
|
tcf_unbind_filter(tp, &oldprog->res);
|
|
tcf_exts_get_net(&oldprog->exts);
|
|
tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work);
|
|
} else {
|
|
list_add_rcu(&prog->link, &head->plist);
|
|
}
|
|
|
|
*arg = prog;
|
|
return 0;
|
|
|
|
errout_parms:
|
|
if (bound_to_filter)
|
|
tcf_unbind_filter(tp, &prog->res);
|
|
cls_bpf_free_parms(prog);
|
|
errout_idr:
|
|
if (!oldprog)
|
|
idr_remove(&head->handle_idr, prog->handle);
|
|
errout:
|
|
tcf_exts_destroy(&prog->exts);
|
|
kfree(prog);
|
|
return ret;
|
|
}
|
|
|
|
static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct nlattr *nla;
|
|
|
|
if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
|
|
return -EMSGSIZE;
|
|
|
|
nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
|
|
sizeof(struct sock_filter));
|
|
if (nla == NULL)
|
|
return -EMSGSIZE;
|
|
|
|
memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct nlattr *nla;
|
|
|
|
if (prog->bpf_name &&
|
|
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
|
|
return -EMSGSIZE;
|
|
|
|
if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
|
|
return -EMSGSIZE;
|
|
|
|
nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
|
|
if (nla == NULL)
|
|
return -EMSGSIZE;
|
|
|
|
memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, void *fh,
|
|
struct sk_buff *skb, struct tcmsg *tm, bool rtnl_held)
|
|
{
|
|
struct cls_bpf_prog *prog = fh;
|
|
struct nlattr *nest;
|
|
u32 bpf_flags = 0;
|
|
int ret;
|
|
|
|
if (prog == NULL)
|
|
return skb->len;
|
|
|
|
tm->tcm_handle = prog->handle;
|
|
|
|
cls_bpf_offload_update_stats(tp, prog);
|
|
|
|
nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
|
|
if (nest == NULL)
|
|
goto nla_put_failure;
|
|
|
|
if (prog->res.classid &&
|
|
nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
|
|
goto nla_put_failure;
|
|
|
|
if (cls_bpf_is_ebpf(prog))
|
|
ret = cls_bpf_dump_ebpf_info(prog, skb);
|
|
else
|
|
ret = cls_bpf_dump_bpf_info(prog, skb);
|
|
if (ret)
|
|
goto nla_put_failure;
|
|
|
|
if (tcf_exts_dump(skb, &prog->exts) < 0)
|
|
goto nla_put_failure;
|
|
|
|
if (prog->exts_integrated)
|
|
bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
|
|
if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
|
|
goto nla_put_failure;
|
|
if (prog->gen_flags &&
|
|
nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
|
|
goto nla_put_failure;
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
|
|
goto nla_put_failure;
|
|
|
|
return skb->len;
|
|
|
|
nla_put_failure:
|
|
nla_nest_cancel(skb, nest);
|
|
return -1;
|
|
}
|
|
|
|
static void cls_bpf_bind_class(void *fh, u32 classid, unsigned long cl,
|
|
void *q, unsigned long base)
|
|
{
|
|
struct cls_bpf_prog *prog = fh;
|
|
|
|
tc_cls_bind_class(classid, cl, q, &prog->res, base);
|
|
}
|
|
|
|
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg,
|
|
bool rtnl_held)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
struct cls_bpf_prog *prog;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (!tc_cls_stats_dump(tp, arg, prog))
|
|
break;
|
|
}
|
|
}
|
|
|
|
static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb,
|
|
void *cb_priv, struct netlink_ext_ack *extack)
|
|
{
|
|
struct cls_bpf_head *head = rtnl_dereference(tp->root);
|
|
struct tcf_block *block = tp->chain->block;
|
|
struct tc_cls_bpf_offload cls_bpf = {};
|
|
struct cls_bpf_prog *prog;
|
|
int err;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (tc_skip_hw(prog->gen_flags))
|
|
continue;
|
|
|
|
tc_cls_common_offload_init(&cls_bpf.common, tp, prog->gen_flags,
|
|
extack);
|
|
cls_bpf.command = TC_CLSBPF_OFFLOAD;
|
|
cls_bpf.exts = &prog->exts;
|
|
cls_bpf.prog = add ? prog->filter : NULL;
|
|
cls_bpf.oldprog = add ? NULL : prog->filter;
|
|
cls_bpf.name = prog->bpf_name;
|
|
cls_bpf.exts_integrated = prog->exts_integrated;
|
|
|
|
err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF,
|
|
&cls_bpf, cb_priv, &prog->gen_flags,
|
|
&prog->in_hw_count);
|
|
if (err)
|
|
return err;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
|
|
.kind = "bpf",
|
|
.owner = THIS_MODULE,
|
|
.classify = cls_bpf_classify,
|
|
.init = cls_bpf_init,
|
|
.destroy = cls_bpf_destroy,
|
|
.get = cls_bpf_get,
|
|
.change = cls_bpf_change,
|
|
.delete = cls_bpf_delete,
|
|
.walk = cls_bpf_walk,
|
|
.reoffload = cls_bpf_reoffload,
|
|
.dump = cls_bpf_dump,
|
|
.bind_class = cls_bpf_bind_class,
|
|
};
|
|
MODULE_ALIAS_NET_CLS("bpf");
|
|
|
|
static int __init cls_bpf_init_mod(void)
|
|
{
|
|
return register_tcf_proto_ops(&cls_bpf_ops);
|
|
}
|
|
|
|
static void __exit cls_bpf_exit_mod(void)
|
|
{
|
|
unregister_tcf_proto_ops(&cls_bpf_ops);
|
|
}
|
|
|
|
module_init(cls_bpf_init_mod);
|
|
module_exit(cls_bpf_exit_mod);
|