mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-18 09:44:18 +08:00
d936377414
Roi reported a crash in flower where tp->root was NULL in ->classify() callbacks. Reason is that in ->destroy() tp->root is set to NULL via RCU_INIT_POINTER(). It's problematic for some of the classifiers, because this doesn't respect RCU grace period for them, and as a result, still outstanding readers from tc_classify() will try to blindly dereference a NULL tp->root. The tp->root object is strictly private to the classifier implementation and holds internal data the core such as tc_ctl_tfilter() doesn't know about. Within some classifiers, such as cls_bpf, cls_basic, etc, tp->root is only checked for NULL in ->get() callback, but nowhere else. This is misleading and seemed to be copied from old classifier code that was not cleaned up properly. For example,d3fa76ee6b
("[NET_SCHED]: cls_basic: fix NULL pointer dereference") moved tp->root initialization into ->init() routine, where before it was part of ->change(), so ->get() had to deal with tp->root being NULL back then, so that was indeed a valid case, afterd3fa76ee6b
, not really anymore. We used to set tp->root to NULL long ago in ->destroy(), see47a1a1d4be
("pkt_sched: remove unnecessary xchg() in packet classifiers"); but the NULLifying was reintroduced with the RCUification, but it's not correct for every classifier implementation. In the cases that are fixed here with one exception of cls_cgroup, tp->root object is allocated and initialized inside ->init() callback, which is always performed at a point in time after we allocate a new tp, which means tp and thus tp->root was not globally visible in the tp chain yet (see tc_ctl_tfilter()). Also, on destruction tp->root is strictly kfree_rcu()'ed in ->destroy() handler, same for the tp which is kfree_rcu()'ed right when we return from ->destroy() in tcf_destroy(). This means, the head object's lifetime for such classifiers is always tied to the tp lifetime. The RCU callback invocation for the two kfree_rcu() could be out of order, but that's fine since both are independent. Dropping the RCU_INIT_POINTER(tp->root, NULL) for these classifiers here means that 1) we don't need a useless NULL check in fast-path and, 2) that outstanding readers of that tp in tc_classify() can still execute under respect with RCU grace period as it is actually expected. Things that haven't been touched here: cls_fw and cls_route. They each handle tp->root being NULL in ->classify() path for historic reasons, so their ->destroy() implementation can stay as is. If someone actually cares, they could get cleaned up at some point to avoid the test in fast path. cls_u32 doesn't set tp->root to NULL. For cls_rsvp, I just added a !head should anyone actually be using/testing it, so it at least aligns with cls_fw and cls_route. For cls_flower we additionally need to defer rhashtable destruction (to a sleepable context) after RCU grace period as concurrent readers might still access it. (Note that in this case we need to hold module reference to keep work callback address intact, since we only wait on module unload for all call_rcu()s to finish.) This fixes one race to bring RCU grace period guarantees back. Next step as worked on by Cong however is to fix1e052be69d
("net_sched: destroy proto tp when all filters are gone") to get the order of unlinking the tp in tc_ctl_tfilter() for the RTM_DELTFILTER case right by moving RCU_INIT_POINTER() before tcf_destroy() and let the notification for removal be done through the prior ->delete() callback. Both are independant issues. Once we have that right, we can then clean tp->root up for a number of classifiers by not making them RCU pointers, which requires a new callback (->uninit) that is triggered from tp's RCU callback, where we just kfree() tp->root from there. Fixes:1f947bf151
("net: sched: rcu'ify cls_bpf") Fixes:9888faefe1
("net: sched: cls_basic use RCU") Fixes:70da9f0bf9
("net: sched: cls_flow use RCU") Fixes:77b9900ef5
("tc: introduce Flower classifier") Fixes:bf3994d2ed
("net/sched: introduce Match-all classifier") Fixes:952313bd62
("net: sched: cls_cgroup use RCU") Reported-by: Roi Dayan <roid@mellanox.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Cong Wang <xiyou.wangcong@gmail.com> Cc: John Fastabend <john.fastabend@gmail.com> Cc: Roi Dayan <roid@mellanox.com> Cc: Jiri Pirko <jiri@mellanox.com> Acked-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
318 lines
7.2 KiB
C
318 lines
7.2 KiB
C
/*
|
|
* net/sched/cls_matchll.c Match-all classifier
|
|
*
|
|
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/init.h>
|
|
#include <linux/module.h>
|
|
|
|
#include <net/sch_generic.h>
|
|
#include <net/pkt_cls.h>
|
|
|
|
struct cls_mall_filter {
|
|
struct tcf_exts exts;
|
|
struct tcf_result res;
|
|
u32 handle;
|
|
struct rcu_head rcu;
|
|
u32 flags;
|
|
};
|
|
|
|
struct cls_mall_head {
|
|
struct cls_mall_filter *filter;
|
|
struct rcu_head rcu;
|
|
};
|
|
|
|
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|
struct tcf_result *res)
|
|
{
|
|
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
|
struct cls_mall_filter *f = head->filter;
|
|
|
|
if (tc_skip_sw(f->flags))
|
|
return -1;
|
|
|
|
return tcf_exts_exec(skb, &f->exts, res);
|
|
}
|
|
|
|
static int mall_init(struct tcf_proto *tp)
|
|
{
|
|
struct cls_mall_head *head;
|
|
|
|
head = kzalloc(sizeof(*head), GFP_KERNEL);
|
|
if (!head)
|
|
return -ENOBUFS;
|
|
|
|
rcu_assign_pointer(tp->root, head);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void mall_destroy_filter(struct rcu_head *head)
|
|
{
|
|
struct cls_mall_filter *f = container_of(head, struct cls_mall_filter, rcu);
|
|
|
|
tcf_exts_destroy(&f->exts);
|
|
|
|
kfree(f);
|
|
}
|
|
|
|
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
|
struct cls_mall_filter *f,
|
|
unsigned long cookie)
|
|
{
|
|
struct net_device *dev = tp->q->dev_queue->dev;
|
|
struct tc_to_netdev offload;
|
|
struct tc_cls_matchall_offload mall_offload = {0};
|
|
|
|
offload.type = TC_SETUP_MATCHALL;
|
|
offload.cls_mall = &mall_offload;
|
|
offload.cls_mall->command = TC_CLSMATCHALL_REPLACE;
|
|
offload.cls_mall->exts = &f->exts;
|
|
offload.cls_mall->cookie = cookie;
|
|
|
|
return dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
|
&offload);
|
|
}
|
|
|
|
static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
|
struct cls_mall_filter *f,
|
|
unsigned long cookie)
|
|
{
|
|
struct net_device *dev = tp->q->dev_queue->dev;
|
|
struct tc_to_netdev offload;
|
|
struct tc_cls_matchall_offload mall_offload = {0};
|
|
|
|
offload.type = TC_SETUP_MATCHALL;
|
|
offload.cls_mall = &mall_offload;
|
|
offload.cls_mall->command = TC_CLSMATCHALL_DESTROY;
|
|
offload.cls_mall->exts = NULL;
|
|
offload.cls_mall->cookie = cookie;
|
|
|
|
dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, tp->protocol,
|
|
&offload);
|
|
}
|
|
|
|
static bool mall_destroy(struct tcf_proto *tp, bool force)
|
|
{
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
struct net_device *dev = tp->q->dev_queue->dev;
|
|
struct cls_mall_filter *f = head->filter;
|
|
|
|
if (!force && f)
|
|
return false;
|
|
|
|
if (f) {
|
|
if (tc_should_offload(dev, tp, f->flags))
|
|
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
|
|
|
call_rcu(&f->rcu, mall_destroy_filter);
|
|
}
|
|
kfree_rcu(head, rcu);
|
|
return true;
|
|
}
|
|
|
|
static unsigned long mall_get(struct tcf_proto *tp, u32 handle)
|
|
{
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
struct cls_mall_filter *f = head->filter;
|
|
|
|
if (f && f->handle == handle)
|
|
return (unsigned long) f;
|
|
return 0;
|
|
}
|
|
|
|
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
|
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
|
|
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
|
|
};
|
|
|
|
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
|
struct cls_mall_filter *f,
|
|
unsigned long base, struct nlattr **tb,
|
|
struct nlattr *est, bool ovr)
|
|
{
|
|
struct tcf_exts e;
|
|
int err;
|
|
|
|
tcf_exts_init(&e, TCA_MATCHALL_ACT, 0);
|
|
err = tcf_exts_validate(net, tp, tb, est, &e, ovr);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
if (tb[TCA_MATCHALL_CLASSID]) {
|
|
f->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
|
tcf_bind_filter(tp, &f->res, base);
|
|
}
|
|
|
|
tcf_exts_change(tp, &f->exts, &e);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|
struct tcf_proto *tp, unsigned long base,
|
|
u32 handle, struct nlattr **tca,
|
|
unsigned long *arg, bool ovr)
|
|
{
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
struct cls_mall_filter *fold = (struct cls_mall_filter *) *arg;
|
|
struct net_device *dev = tp->q->dev_queue->dev;
|
|
struct cls_mall_filter *f;
|
|
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
|
u32 flags = 0;
|
|
int err;
|
|
|
|
if (!tca[TCA_OPTIONS])
|
|
return -EINVAL;
|
|
|
|
if (head->filter)
|
|
return -EBUSY;
|
|
|
|
if (fold)
|
|
return -EINVAL;
|
|
|
|
err = nla_parse_nested(tb, TCA_MATCHALL_MAX,
|
|
tca[TCA_OPTIONS], mall_policy);
|
|
if (err < 0)
|
|
return err;
|
|
|
|
if (tb[TCA_MATCHALL_FLAGS]) {
|
|
flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
|
|
if (!tc_flags_valid(flags))
|
|
return -EINVAL;
|
|
}
|
|
|
|
f = kzalloc(sizeof(*f), GFP_KERNEL);
|
|
if (!f)
|
|
return -ENOBUFS;
|
|
|
|
tcf_exts_init(&f->exts, TCA_MATCHALL_ACT, 0);
|
|
|
|
if (!handle)
|
|
handle = 1;
|
|
f->handle = handle;
|
|
f->flags = flags;
|
|
|
|
err = mall_set_parms(net, tp, f, base, tb, tca[TCA_RATE], ovr);
|
|
if (err)
|
|
goto errout;
|
|
|
|
if (tc_should_offload(dev, tp, flags)) {
|
|
err = mall_replace_hw_filter(tp, f, (unsigned long) f);
|
|
if (err) {
|
|
if (tc_skip_sw(flags))
|
|
goto errout;
|
|
else
|
|
err = 0;
|
|
}
|
|
}
|
|
|
|
*arg = (unsigned long) f;
|
|
rcu_assign_pointer(head->filter, f);
|
|
|
|
return 0;
|
|
|
|
errout:
|
|
kfree(f);
|
|
return err;
|
|
}
|
|
|
|
static int mall_delete(struct tcf_proto *tp, unsigned long arg)
|
|
{
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
struct cls_mall_filter *f = (struct cls_mall_filter *) arg;
|
|
struct net_device *dev = tp->q->dev_queue->dev;
|
|
|
|
if (tc_should_offload(dev, tp, f->flags))
|
|
mall_destroy_hw_filter(tp, f, (unsigned long) f);
|
|
|
|
RCU_INIT_POINTER(head->filter, NULL);
|
|
tcf_unbind_filter(tp, &f->res);
|
|
call_rcu(&f->rcu, mall_destroy_filter);
|
|
return 0;
|
|
}
|
|
|
|
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg)
|
|
{
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
struct cls_mall_filter *f = head->filter;
|
|
|
|
if (arg->count < arg->skip)
|
|
goto skip;
|
|
if (arg->fn(tp, (unsigned long) f, arg) < 0)
|
|
arg->stop = 1;
|
|
skip:
|
|
arg->count++;
|
|
}
|
|
|
|
static int mall_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
|
|
struct sk_buff *skb, struct tcmsg *t)
|
|
{
|
|
struct cls_mall_filter *f = (struct cls_mall_filter *) fh;
|
|
struct nlattr *nest;
|
|
|
|
if (!f)
|
|
return skb->len;
|
|
|
|
t->tcm_handle = f->handle;
|
|
|
|
nest = nla_nest_start(skb, TCA_OPTIONS);
|
|
if (!nest)
|
|
goto nla_put_failure;
|
|
|
|
if (f->res.classid &&
|
|
nla_put_u32(skb, TCA_MATCHALL_CLASSID, f->res.classid))
|
|
goto nla_put_failure;
|
|
|
|
if (tcf_exts_dump(skb, &f->exts))
|
|
goto nla_put_failure;
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
if (tcf_exts_dump_stats(skb, &f->exts) < 0)
|
|
goto nla_put_failure;
|
|
|
|
return skb->len;
|
|
|
|
nla_put_failure:
|
|
nla_nest_cancel(skb, nest);
|
|
return -1;
|
|
}
|
|
|
|
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
|
|
.kind = "matchall",
|
|
.classify = mall_classify,
|
|
.init = mall_init,
|
|
.destroy = mall_destroy,
|
|
.get = mall_get,
|
|
.change = mall_change,
|
|
.delete = mall_delete,
|
|
.walk = mall_walk,
|
|
.dump = mall_dump,
|
|
.owner = THIS_MODULE,
|
|
};
|
|
|
|
static int __init cls_mall_init(void)
|
|
{
|
|
return register_tcf_proto_ops(&cls_mall_ops);
|
|
}
|
|
|
|
static void __exit cls_mall_exit(void)
|
|
{
|
|
unregister_tcf_proto_ops(&cls_mall_ops);
|
|
}
|
|
|
|
module_init(cls_mall_init);
|
|
module_exit(cls_mall_exit);
|
|
|
|
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
|
MODULE_DESCRIPTION("Match-all classifier");
|
|
MODULE_LICENSE("GPL v2");
|