mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 15:13:55 +08:00
7ae457c1e5
clean up names related to socket filtering and bpf in the following way: - everything that deals with sockets keeps 'sk_*' prefix - everything that is pure BPF is changed to 'bpf_*' prefix split 'struct sk_filter' into struct sk_filter { atomic_t refcnt; struct rcu_head rcu; struct bpf_prog *prog; }; and struct bpf_prog { u32 jited:1, len:31; struct sock_fprog_kern *orig_prog; unsigned int (*bpf_func)(const struct sk_buff *skb, const struct bpf_insn *filter); union { struct sock_filter insns[0]; struct bpf_insn insnsi[0]; struct work_struct work; }; }; so that 'struct bpf_prog' can be used independent of sockets and cleans up 'unattached' bpf use cases split SK_RUN_FILTER macro into: SK_RUN_FILTER to be used with 'struct sk_filter *' and BPF_PROG_RUN to be used with 'struct bpf_prog *' __sk_filter_release(struct sk_filter *) gains __bpf_prog_release(struct bpf_prog *) helper function also perform related renames for the functions that work with 'struct bpf_prog *', since they're on the same lines: sk_filter_size -> bpf_prog_size sk_filter_select_runtime -> bpf_prog_select_runtime sk_filter_free -> bpf_prog_free sk_unattached_filter_create -> bpf_prog_create sk_unattached_filter_destroy -> bpf_prog_destroy sk_store_orig_filter -> bpf_prog_store_orig_filter sk_release_orig_filter -> bpf_release_orig_filter __sk_migrate_filter -> bpf_migrate_filter __sk_prepare_filter -> bpf_prepare_filter API for attaching classic BPF to a socket stays the same: sk_attach_filter(prog, struct sock *)/sk_detach_filter(struct sock *) and SK_RUN_FILTER(struct sk_filter *, ctx) to execute a program which is used by sockets, tun, af_packet API for 'unattached' BPF programs becomes: bpf_prog_create(struct bpf_prog **)/bpf_prog_destroy(struct bpf_prog *) and BPF_PROG_RUN(struct bpf_prog *, ctx) to execute a program which is used by isdn, ppp, team, seccomp, ptp, xt_bpf, cls_bpf, test_bpf Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
383 lines
8.1 KiB
C
383 lines
8.1 KiB
C
/*
|
|
* Berkeley Packet Filter based traffic classifier
|
|
*
|
|
* Might be used to classify traffic through flexible, user-defined and
|
|
* possibly JIT-ed BPF filters for traffic control as an alternative to
|
|
* ematches.
|
|
*
|
|
* (C) 2013 Daniel Borkmann <dborkman@redhat.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/types.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/filter.h>
|
|
#include <net/rtnetlink.h>
|
|
#include <net/pkt_cls.h>
|
|
#include <net/sock.h>
|
|
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
|
|
MODULE_DESCRIPTION("TC BPF based classifier");
|
|
|
|
struct cls_bpf_head {
|
|
struct list_head plist;
|
|
u32 hgen;
|
|
};
|
|
|
|
struct cls_bpf_prog {
|
|
struct bpf_prog *filter;
|
|
struct sock_filter *bpf_ops;
|
|
struct tcf_exts exts;
|
|
struct tcf_result res;
|
|
struct list_head link;
|
|
u32 handle;
|
|
u16 bpf_len;
|
|
};
|
|
|
|
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
|
|
[TCA_BPF_CLASSID] = { .type = NLA_U32 },
|
|
[TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
|
|
[TCA_BPF_OPS] = { .type = NLA_BINARY,
|
|
.len = sizeof(struct sock_filter) * BPF_MAXINSNS },
|
|
};
|
|
|
|
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|
struct tcf_result *res)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog;
|
|
int ret;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
int filter_res = BPF_PROG_RUN(prog->filter, skb);
|
|
|
|
if (filter_res == 0)
|
|
continue;
|
|
|
|
*res = prog->res;
|
|
if (filter_res != -1)
|
|
res->classid = filter_res;
|
|
|
|
ret = tcf_exts_exec(skb, &prog->exts, res);
|
|
if (ret < 0)
|
|
continue;
|
|
|
|
return ret;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
static int cls_bpf_init(struct tcf_proto *tp)
|
|
{
|
|
struct cls_bpf_head *head;
|
|
|
|
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
|
if (head == NULL)
|
|
return -ENOBUFS;
|
|
|
|
INIT_LIST_HEAD(&head->plist);
|
|
tp->root = head;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
|
|
{
|
|
tcf_unbind_filter(tp, &prog->res);
|
|
tcf_exts_destroy(tp, &prog->exts);
|
|
|
|
bpf_prog_destroy(prog->filter);
|
|
|
|
kfree(prog->bpf_ops);
|
|
kfree(prog);
|
|
}
|
|
|
|
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (prog == todel) {
|
|
tcf_tree_lock(tp);
|
|
list_del(&prog->link);
|
|
tcf_tree_unlock(tp);
|
|
|
|
cls_bpf_delete_prog(tp, prog);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return -ENOENT;
|
|
}
|
|
|
|
static void cls_bpf_destroy(struct tcf_proto *tp)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog, *tmp;
|
|
|
|
list_for_each_entry_safe(prog, tmp, &head->plist, link) {
|
|
list_del(&prog->link);
|
|
cls_bpf_delete_prog(tp, prog);
|
|
}
|
|
|
|
kfree(head);
|
|
}
|
|
|
|
static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog;
|
|
unsigned long ret = 0UL;
|
|
|
|
if (head == NULL)
|
|
return 0UL;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (prog->handle == handle) {
|
|
ret = (unsigned long) prog;
|
|
break;
|
|
}
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void cls_bpf_put(struct tcf_proto *tp, unsigned long f)
|
|
{
|
|
}
|
|
|
|
static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
|
|
struct cls_bpf_prog *prog,
|
|
unsigned long base, struct nlattr **tb,
|
|
struct nlattr *est, bool ovr)
|
|
{
|
|
struct sock_filter *bpf_ops, *bpf_old;
|
|
struct tcf_exts exts;
|
|
struct sock_fprog_kern tmp;
|
|
struct bpf_prog *fp, *fp_old;
|
|
u16 bpf_size, bpf_len;
|
|
u32 classid;
|
|
int ret;
|
|
|
|
if (!tb[TCA_BPF_OPS_LEN] || !tb[TCA_BPF_OPS] || !tb[TCA_BPF_CLASSID])
|
|
return -EINVAL;
|
|
|
|
tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
|
|
ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
|
|
bpf_len = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
|
|
if (bpf_len > BPF_MAXINSNS || bpf_len == 0) {
|
|
ret = -EINVAL;
|
|
goto errout;
|
|
}
|
|
|
|
bpf_size = bpf_len * sizeof(*bpf_ops);
|
|
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
|
|
if (bpf_ops == NULL) {
|
|
ret = -ENOMEM;
|
|
goto errout;
|
|
}
|
|
|
|
memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
|
|
|
|
tmp.len = bpf_len;
|
|
tmp.filter = bpf_ops;
|
|
|
|
ret = bpf_prog_create(&fp, &tmp);
|
|
if (ret)
|
|
goto errout_free;
|
|
|
|
tcf_tree_lock(tp);
|
|
fp_old = prog->filter;
|
|
bpf_old = prog->bpf_ops;
|
|
|
|
prog->bpf_len = bpf_len;
|
|
prog->bpf_ops = bpf_ops;
|
|
prog->filter = fp;
|
|
prog->res.classid = classid;
|
|
tcf_tree_unlock(tp);
|
|
|
|
tcf_bind_filter(tp, &prog->res, base);
|
|
tcf_exts_change(tp, &prog->exts, &exts);
|
|
|
|
if (fp_old)
|
|
bpf_prog_destroy(fp_old);
|
|
if (bpf_old)
|
|
kfree(bpf_old);
|
|
|
|
return 0;
|
|
|
|
errout_free:
|
|
kfree(bpf_ops);
|
|
errout:
|
|
tcf_exts_destroy(tp, &exts);
|
|
return ret;
|
|
}
|
|
|
|
static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
|
|
struct cls_bpf_head *head)
|
|
{
|
|
unsigned int i = 0x80000000;
|
|
|
|
do {
|
|
if (++head->hgen == 0x7FFFFFFF)
|
|
head->hgen = 1;
|
|
} while (--i > 0 && cls_bpf_get(tp, head->hgen));
|
|
if (i == 0)
|
|
pr_err("Insufficient number of handles\n");
|
|
|
|
return i;
|
|
}
|
|
|
|
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
|
|
struct tcf_proto *tp, unsigned long base,
|
|
u32 handle, struct nlattr **tca,
|
|
unsigned long *arg, bool ovr)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg;
|
|
struct nlattr *tb[TCA_BPF_MAX + 1];
|
|
int ret;
|
|
|
|
if (tca[TCA_OPTIONS] == NULL)
|
|
return -EINVAL;
|
|
|
|
ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
if (prog != NULL) {
|
|
if (handle && prog->handle != handle)
|
|
return -EINVAL;
|
|
return cls_bpf_modify_existing(net, tp, prog, base, tb,
|
|
tca[TCA_RATE], ovr);
|
|
}
|
|
|
|
prog = kzalloc(sizeof(*prog), GFP_KERNEL);
|
|
if (prog == NULL)
|
|
return -ENOBUFS;
|
|
|
|
tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
|
|
if (handle == 0)
|
|
prog->handle = cls_bpf_grab_new_handle(tp, head);
|
|
else
|
|
prog->handle = handle;
|
|
if (prog->handle == 0) {
|
|
ret = -EINVAL;
|
|
goto errout;
|
|
}
|
|
|
|
ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE], ovr);
|
|
if (ret < 0)
|
|
goto errout;
|
|
|
|
tcf_tree_lock(tp);
|
|
list_add(&prog->link, &head->plist);
|
|
tcf_tree_unlock(tp);
|
|
|
|
*arg = (unsigned long) prog;
|
|
|
|
return 0;
|
|
errout:
|
|
if (*arg == 0UL && prog)
|
|
kfree(prog);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
|
|
struct sk_buff *skb, struct tcmsg *tm)
|
|
{
|
|
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
|
|
struct nlattr *nest, *nla;
|
|
|
|
if (prog == NULL)
|
|
return skb->len;
|
|
|
|
tm->tcm_handle = prog->handle;
|
|
|
|
nest = nla_nest_start(skb, TCA_OPTIONS);
|
|
if (nest == NULL)
|
|
goto nla_put_failure;
|
|
|
|
if (nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
|
|
goto nla_put_failure;
|
|
if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_len))
|
|
goto nla_put_failure;
|
|
|
|
nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_len *
|
|
sizeof(struct sock_filter));
|
|
if (nla == NULL)
|
|
goto nla_put_failure;
|
|
|
|
memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
|
|
|
|
if (tcf_exts_dump(skb, &prog->exts) < 0)
|
|
goto nla_put_failure;
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
|
|
goto nla_put_failure;
|
|
|
|
return skb->len;
|
|
|
|
nla_put_failure:
|
|
nla_nest_cancel(skb, nest);
|
|
return -1;
|
|
}
|
|
|
|
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
|
|
{
|
|
struct cls_bpf_head *head = tp->root;
|
|
struct cls_bpf_prog *prog;
|
|
|
|
list_for_each_entry(prog, &head->plist, link) {
|
|
if (arg->count < arg->skip)
|
|
goto skip;
|
|
if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
|
|
arg->stop = 1;
|
|
break;
|
|
}
|
|
skip:
|
|
arg->count++;
|
|
}
|
|
}
|
|
|
|
static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
|
|
.kind = "bpf",
|
|
.owner = THIS_MODULE,
|
|
.classify = cls_bpf_classify,
|
|
.init = cls_bpf_init,
|
|
.destroy = cls_bpf_destroy,
|
|
.get = cls_bpf_get,
|
|
.put = cls_bpf_put,
|
|
.change = cls_bpf_change,
|
|
.delete = cls_bpf_delete,
|
|
.walk = cls_bpf_walk,
|
|
.dump = cls_bpf_dump,
|
|
};
|
|
|
|
static int __init cls_bpf_init_mod(void)
|
|
{
|
|
return register_tcf_proto_ops(&cls_bpf_ops);
|
|
}
|
|
|
|
static void __exit cls_bpf_exit_mod(void)
|
|
{
|
|
unregister_tcf_proto_ops(&cls_bpf_ops);
|
|
}
|
|
|
|
module_init(cls_bpf_init_mod);
|
|
module_exit(cls_bpf_exit_mod);
|