2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-24 05:04:00 +08:00

net: sched: rcu'ify cls_bpf

This patch makes the cls_bpf classifier RCU safe. The tcf_lock
was being used to protect a list of cls_bpf_prog now this list
is RCU safe and updates occur with rcu_replace.

Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Acked-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
John Fastabend 2014-09-12 20:10:24 -07:00 committed by David S. Miller
parent b929d86d25
commit 1f947bf151

View File

@ -27,6 +27,7 @@ MODULE_DESCRIPTION("TC BPF based classifier");
struct cls_bpf_head { struct cls_bpf_head {
struct list_head plist; struct list_head plist;
u32 hgen; u32 hgen;
struct rcu_head rcu;
}; };
struct cls_bpf_prog { struct cls_bpf_prog {
@ -37,6 +38,8 @@ struct cls_bpf_prog {
struct list_head link; struct list_head link;
u32 handle; u32 handle;
u16 bpf_len; u16 bpf_len;
struct tcf_proto *tp;
struct rcu_head rcu;
}; };
static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
@ -49,11 +52,11 @@ static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp, static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res) struct tcf_result *res)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rcu_dereference(tp->root);
struct cls_bpf_prog *prog; struct cls_bpf_prog *prog;
int ret; int ret;
list_for_each_entry(prog, &head->plist, link) { list_for_each_entry_rcu(prog, &head->plist, link) {
int filter_res = BPF_PROG_RUN(prog->filter, skb); int filter_res = BPF_PROG_RUN(prog->filter, skb);
if (filter_res == 0) if (filter_res == 0)
@ -81,8 +84,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
if (head == NULL) if (head == NULL)
return -ENOBUFS; return -ENOBUFS;
INIT_LIST_HEAD(&head->plist); INIT_LIST_HEAD_RCU(&head->plist);
tp->root = head; rcu_assign_pointer(tp->root, head);
return 0; return 0;
} }
@ -98,18 +101,22 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
kfree(prog); kfree(prog);
} }
static void __cls_bpf_delete_prog(struct rcu_head *rcu)
{
struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu);
cls_bpf_delete_prog(prog->tp, prog);
}
static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg) static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg; struct cls_bpf_prog *prog, *todel = (struct cls_bpf_prog *) arg;
list_for_each_entry(prog, &head->plist, link) { list_for_each_entry(prog, &head->plist, link) {
if (prog == todel) { if (prog == todel) {
tcf_tree_lock(tp); list_del_rcu(&prog->link);
list_del(&prog->link); call_rcu(&prog->rcu, __cls_bpf_delete_prog);
tcf_tree_unlock(tp);
cls_bpf_delete_prog(tp, prog);
return 0; return 0;
} }
} }
@ -119,27 +126,28 @@ static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg)
static void cls_bpf_destroy(struct tcf_proto *tp) static void cls_bpf_destroy(struct tcf_proto *tp)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog, *tmp; struct cls_bpf_prog *prog, *tmp;
list_for_each_entry_safe(prog, tmp, &head->plist, link) { list_for_each_entry_safe(prog, tmp, &head->plist, link) {
list_del(&prog->link); list_del_rcu(&prog->link);
cls_bpf_delete_prog(tp, prog); call_rcu(&prog->rcu, __cls_bpf_delete_prog);
} }
kfree(head); RCU_INIT_POINTER(tp->root, NULL);
kfree_rcu(head, rcu);
} }
static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle) static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog; struct cls_bpf_prog *prog;
unsigned long ret = 0UL; unsigned long ret = 0UL;
if (head == NULL) if (head == NULL)
return 0UL; return 0UL;
list_for_each_entry(prog, &head->plist, link) { list_for_each_entry_rcu(prog, &head->plist, link) {
if (prog->handle == handle) { if (prog->handle == handle) {
ret = (unsigned long) prog; ret = (unsigned long) prog;
break; break;
@ -158,10 +166,10 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
unsigned long base, struct nlattr **tb, unsigned long base, struct nlattr **tb,
struct nlattr *est, bool ovr) struct nlattr *est, bool ovr)
{ {
struct sock_filter *bpf_ops, *bpf_old; struct sock_filter *bpf_ops;
struct tcf_exts exts; struct tcf_exts exts;
struct sock_fprog_kern tmp; struct sock_fprog_kern tmp;
struct bpf_prog *fp, *fp_old; struct bpf_prog *fp;
u16 bpf_size, bpf_len; u16 bpf_size, bpf_len;
u32 classid; u32 classid;
int ret; int ret;
@ -197,26 +205,15 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
if (ret) if (ret)
goto errout_free; goto errout_free;
tcf_tree_lock(tp);
fp_old = prog->filter;
bpf_old = prog->bpf_ops;
prog->bpf_len = bpf_len; prog->bpf_len = bpf_len;
prog->bpf_ops = bpf_ops; prog->bpf_ops = bpf_ops;
prog->filter = fp; prog->filter = fp;
prog->res.classid = classid; prog->res.classid = classid;
tcf_tree_unlock(tp);
tcf_bind_filter(tp, &prog->res, base); tcf_bind_filter(tp, &prog->res, base);
tcf_exts_change(tp, &prog->exts, &exts); tcf_exts_change(tp, &prog->exts, &exts);
if (fp_old)
bpf_prog_destroy(fp_old);
if (bpf_old)
kfree(bpf_old);
return 0; return 0;
errout_free: errout_free:
kfree(bpf_ops); kfree(bpf_ops);
errout: errout:
@ -244,9 +241,10 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
u32 handle, struct nlattr **tca, u32 handle, struct nlattr **tca,
unsigned long *arg, bool ovr) unsigned long *arg, bool ovr)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog = (struct cls_bpf_prog *) *arg; struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
struct nlattr *tb[TCA_BPF_MAX + 1]; struct nlattr *tb[TCA_BPF_MAX + 1];
struct cls_bpf_prog *prog;
int ret; int ret;
if (tca[TCA_OPTIONS] == NULL) if (tca[TCA_OPTIONS] == NULL)
@ -256,18 +254,19 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (ret < 0) if (ret < 0)
return ret; return ret;
if (prog != NULL) {
if (handle && prog->handle != handle)
return -EINVAL;
return cls_bpf_modify_existing(net, tp, prog, base, tb,
tca[TCA_RATE], ovr);
}
prog = kzalloc(sizeof(*prog), GFP_KERNEL); prog = kzalloc(sizeof(*prog), GFP_KERNEL);
if (prog == NULL) if (!prog)
return -ENOBUFS; return -ENOBUFS;
tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE); tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
if (oldprog) {
if (handle && oldprog->handle != handle) {
ret = -EINVAL;
goto errout;
}
}
if (handle == 0) if (handle == 0)
prog->handle = cls_bpf_grab_new_handle(tp, head); prog->handle = cls_bpf_grab_new_handle(tp, head);
else else
@ -281,16 +280,17 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (ret < 0) if (ret < 0)
goto errout; goto errout;
tcf_tree_lock(tp); if (oldprog) {
list_add(&prog->link, &head->plist); list_replace_rcu(&prog->link, &oldprog->link);
tcf_tree_unlock(tp); call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
} else {
list_add_rcu(&prog->link, &head->plist);
}
*arg = (unsigned long) prog; *arg = (unsigned long) prog;
return 0; return 0;
errout: errout:
if (*arg == 0UL && prog) kfree(prog);
kfree(prog);
return ret; return ret;
} }
@ -339,10 +339,10 @@ nla_put_failure:
static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg) static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
{ {
struct cls_bpf_head *head = tp->root; struct cls_bpf_head *head = rtnl_dereference(tp->root);
struct cls_bpf_prog *prog; struct cls_bpf_prog *prog;
list_for_each_entry(prog, &head->plist, link) { list_for_each_entry_rcu(prog, &head->plist, link) {
if (arg->count < arg->skip) if (arg->count < arg->skip)
goto skip; goto skip;
if (arg->fn(tp, (unsigned long) prog, arg) < 0) { if (arg->fn(tp, (unsigned long) prog, arg) < 0) {