2016-07-21 18:03:11 +08:00
|
|
|
/*
|
|
|
|
* net/sched/cls_matchll.c Match-all classifier
|
|
|
|
*
|
|
|
|
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
2019-01-18 04:44:25 +08:00
|
|
|
#include <linux/percpu.h>
|
2016-07-21 18:03:11 +08:00
|
|
|
|
|
|
|
#include <net/sch_generic.h>
|
|
|
|
#include <net/pkt_cls.h>
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
struct cls_mall_head {
|
2016-07-21 18:03:11 +08:00
|
|
|
struct tcf_exts exts;
|
|
|
|
struct tcf_result res;
|
|
|
|
u32 handle;
|
2016-07-21 18:03:12 +08:00
|
|
|
u32 flags;
|
2018-06-26 05:30:07 +08:00
|
|
|
unsigned int in_hw_count;
|
2019-01-18 04:44:25 +08:00
|
|
|
struct tc_matchall_pcnt __percpu *pf;
|
2018-05-24 06:26:53 +08:00
|
|
|
struct rcu_work rwork;
|
2016-07-21 18:03:11 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|
|
|
struct tcf_result *res)
|
|
|
|
{
|
|
|
|
struct cls_mall_head *head = rcu_dereference_bh(tp->root);
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (tc_skip_sw(head->flags))
|
2016-07-21 18:03:12 +08:00
|
|
|
return -1;
|
|
|
|
|
net/sched: cls_matchall: fix crash when used with classful qdisc
this script, edited from Linux Advanced Routing and Traffic Control guide
tc q a dev en0 root handle 1: htb default a
tc c a dev en0 parent 1: classid 1:1 htb rate 6mbit burst 15k
tc c a dev en0 parent 1:1 classid 1:a htb rate 5mbit ceil 6mbit burst 15k
tc c a dev en0 parent 1:1 classid 1:b htb rate 1mbit ceil 6mbit burst 15k
tc f a dev en0 parent 1:0 prio 1 $clsname $clsargs classid 1:b
ping $address -c1
tc -s c s dev en0
classifies traffic to 1:b or 1:a, depending on whether the packet matches
or not the pattern $clsargs of filter $clsname. However, when $clsname is
'matchall', a systematic crash can be observed in htb_classify(). HTB and
classful qdiscs don't assign initial value to struct tcf_result, but then
they expect it to contain valid values after filters have been run. Thus,
current 'matchall' ignores the TCA_MATCHALL_CLASSID attribute, configured
by user, and makes HTB (and classful qdiscs) dereference random pointers.
By assigning head->res to *res in mall_classify(), before the actions are
invoked, we fix this crash and enable TCA_MATCHALL_CLASSID functionality,
that had no effect on 'matchall' classifier since its first introduction.
BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1460213
Reported-by: Jiri Benc <jbenc@redhat.com>
Fixes: b87f7936a932 ("net/sched: introduce Match-all classifier")
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Acked-by: Yotam Gigi <yotamg@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-09-16 20:02:21 +08:00
|
|
|
*res = head->res;
|
2019-01-18 04:44:25 +08:00
|
|
|
__this_cpu_inc(head->pf->rhit);
|
2017-01-31 21:14:29 +08:00
|
|
|
return tcf_exts_exec(skb, &head->exts, res);
|
2016-07-21 18:03:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mall_init(struct tcf_proto *tp)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-11-07 05:47:26 +08:00
|
|
|
static void __mall_destroy(struct cls_mall_head *head)
|
|
|
|
{
|
|
|
|
tcf_exts_destroy(&head->exts);
|
|
|
|
tcf_exts_put_net(&head->exts);
|
2019-01-18 04:44:25 +08:00
|
|
|
free_percpu(head->pf);
|
2017-11-07 05:47:26 +08:00
|
|
|
kfree(head);
|
|
|
|
}
|
|
|
|
|
2017-10-27 09:24:35 +08:00
|
|
|
static void mall_destroy_work(struct work_struct *work)
|
|
|
|
{
|
2018-05-24 06:26:53 +08:00
|
|
|
struct cls_mall_head *head = container_of(to_rcu_work(work),
|
|
|
|
struct cls_mall_head,
|
|
|
|
rwork);
|
2017-10-27 09:24:35 +08:00
|
|
|
rtnl_lock();
|
2017-11-07 05:47:26 +08:00
|
|
|
__mall_destroy(head);
|
2017-10-27 09:24:35 +08:00
|
|
|
rtnl_unlock();
|
|
|
|
}
|
|
|
|
|
2017-10-19 21:50:33 +08:00
|
|
|
static void mall_destroy_hw_filter(struct tcf_proto *tp,
|
|
|
|
struct cls_mall_head *head,
|
2018-01-25 04:54:19 +08:00
|
|
|
unsigned long cookie,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-07-21 18:03:12 +08:00
|
|
|
{
|
2017-08-07 16:15:32 +08:00
|
|
|
struct tc_cls_matchall_offload cls_mall = {};
|
2017-10-19 21:50:33 +08:00
|
|
|
struct tcf_block *block = tp->chain->block;
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2018-01-25 04:54:19 +08:00
|
|
|
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
|
2017-10-19 21:50:33 +08:00
|
|
|
cls_mall.command = TC_CLSMATCHALL_DESTROY;
|
2017-08-07 16:15:32 +08:00
|
|
|
cls_mall.cookie = cookie;
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2018-12-12 03:15:46 +08:00
|
|
|
tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false);
|
2018-01-17 18:46:50 +08:00
|
|
|
tcf_block_offload_dec(block, &head->flags);
|
2016-07-21 18:03:12 +08:00
|
|
|
}
|
|
|
|
|
2017-10-19 21:50:33 +08:00
|
|
|
static int mall_replace_hw_filter(struct tcf_proto *tp,
|
|
|
|
struct cls_mall_head *head,
|
2018-01-20 09:44:44 +08:00
|
|
|
unsigned long cookie,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-07-21 18:03:12 +08:00
|
|
|
{
|
2017-08-07 16:15:32 +08:00
|
|
|
struct tc_cls_matchall_offload cls_mall = {};
|
2017-10-19 21:50:33 +08:00
|
|
|
struct tcf_block *block = tp->chain->block;
|
|
|
|
bool skip_sw = tc_skip_sw(head->flags);
|
|
|
|
int err;
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2018-01-25 04:54:18 +08:00
|
|
|
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
|
2017-10-19 21:50:33 +08:00
|
|
|
cls_mall.command = TC_CLSMATCHALL_REPLACE;
|
|
|
|
cls_mall.exts = &head->exts;
|
2017-08-07 16:15:32 +08:00
|
|
|
cls_mall.cookie = cookie;
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2018-12-12 03:15:46 +08:00
|
|
|
err = tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, skip_sw);
|
2017-10-19 21:50:33 +08:00
|
|
|
if (err < 0) {
|
2018-01-25 04:54:19 +08:00
|
|
|
mall_destroy_hw_filter(tp, head, cookie, NULL);
|
2017-10-19 21:50:33 +08:00
|
|
|
return err;
|
|
|
|
} else if (err > 0) {
|
2018-06-26 05:30:07 +08:00
|
|
|
head->in_hw_count = err;
|
2018-01-17 18:46:50 +08:00
|
|
|
tcf_block_offload_inc(block, &head->flags);
|
2017-10-19 21:50:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (skip_sw && !(head->flags & TCA_CLS_FLAGS_IN_HW))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
2016-07-21 18:03:12 +08:00
|
|
|
}
|
|
|
|
|
2019-02-11 16:55:45 +08:00
|
|
|
static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
|
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (!head)
|
2017-04-20 05:21:21 +08:00
|
|
|
return;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
2018-08-14 17:28:26 +08:00
|
|
|
tcf_unbind_filter(tp, &head->res);
|
|
|
|
|
2017-10-19 21:50:33 +08:00
|
|
|
if (!tc_skip_hw(head->flags))
|
2018-01-25 04:54:19 +08:00
|
|
|
mall_destroy_hw_filter(tp, head, (unsigned long) head, extack);
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2017-11-07 05:47:26 +08:00
|
|
|
if (tcf_exts_get_net(&head->exts))
|
2018-05-24 06:26:53 +08:00
|
|
|
tcf_queue_work(&head->rwork, mall_destroy_work);
|
2017-11-07 05:47:26 +08:00
|
|
|
else
|
|
|
|
__mall_destroy(head);
|
2016-07-21 18:03:11 +08:00
|
|
|
}
|
|
|
|
|
2017-08-05 12:31:43 +08:00
|
|
|
static void *mall_get(struct tcf_proto *tp, u32 handle)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
2019-03-28 17:35:06 +08:00
|
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
|
|
|
|
|
|
if (head && head->handle == handle)
|
|
|
|
return head;
|
|
|
|
|
2017-08-05 12:31:43 +08:00
|
|
|
return NULL;
|
2016-07-21 18:03:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
|
|
|
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
|
|
|
|
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
2017-01-31 21:14:29 +08:00
|
|
|
struct cls_mall_head *head,
|
2016-07-21 18:03:11 +08:00
|
|
|
unsigned long base, struct nlattr **tb,
|
2018-01-19 00:20:52 +08:00
|
|
|
struct nlattr *est, bool ovr,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
2019-02-11 16:55:43 +08:00
|
|
|
err = tcf_exts_validate(net, tp, tb, est, &head->exts, ovr, true,
|
|
|
|
extack);
|
2016-07-21 18:03:11 +08:00
|
|
|
if (err < 0)
|
2017-08-04 20:29:08 +08:00
|
|
|
return err;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
|
|
|
if (tb[TCA_MATCHALL_CLASSID]) {
|
2017-01-31 21:14:29 +08:00
|
|
|
head->res.classid = nla_get_u32(tb[TCA_MATCHALL_CLASSID]);
|
|
|
|
tcf_bind_filter(tp, &head->res, base);
|
2016-07-21 18:03:11 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mall_change(struct net *net, struct sk_buff *in_skb,
|
|
|
|
struct tcf_proto *tp, unsigned long base,
|
|
|
|
u32 handle, struct nlattr **tca,
|
2019-02-11 16:55:45 +08:00
|
|
|
void **arg, bool ovr, bool rtnl_held,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
|
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
|
|
struct nlattr *tb[TCA_MATCHALL_MAX + 1];
|
2017-01-31 21:14:29 +08:00
|
|
|
struct cls_mall_head *new;
|
2016-07-21 18:03:12 +08:00
|
|
|
u32 flags = 0;
|
2016-07-21 18:03:11 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (!tca[TCA_OPTIONS])
|
|
|
|
return -EINVAL;
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (head)
|
|
|
|
return -EEXIST;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
2017-04-12 20:34:07 +08:00
|
|
|
err = nla_parse_nested(tb, TCA_MATCHALL_MAX, tca[TCA_OPTIONS],
|
|
|
|
mall_policy, NULL);
|
2016-07-21 18:03:11 +08:00
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2016-07-21 18:03:12 +08:00
|
|
|
if (tb[TCA_MATCHALL_FLAGS]) {
|
|
|
|
flags = nla_get_u32(tb[TCA_MATCHALL_FLAGS]);
|
|
|
|
if (!tc_flags_valid(flags))
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
new = kzalloc(sizeof(*new), GFP_KERNEL);
|
|
|
|
if (!new)
|
2016-07-21 18:03:11 +08:00
|
|
|
return -ENOBUFS;
|
|
|
|
|
2019-02-21 13:37:42 +08:00
|
|
|
err = tcf_exts_init(&new->exts, net, TCA_MATCHALL_ACT, 0);
|
2017-01-04 01:20:24 +08:00
|
|
|
if (err)
|
|
|
|
goto err_exts_init;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
|
|
|
if (!handle)
|
|
|
|
handle = 1;
|
2017-01-31 21:14:29 +08:00
|
|
|
new->handle = handle;
|
|
|
|
new->flags = flags;
|
2019-01-18 04:44:25 +08:00
|
|
|
new->pf = alloc_percpu(struct tc_matchall_pcnt);
|
|
|
|
if (!new->pf) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_percpu;
|
|
|
|
}
|
2016-07-21 18:03:11 +08:00
|
|
|
|
2018-01-19 00:20:52 +08:00
|
|
|
err = mall_set_parms(net, tp, new, base, tb, tca[TCA_RATE], ovr,
|
|
|
|
extack);
|
2016-07-21 18:03:11 +08:00
|
|
|
if (err)
|
2017-01-04 01:20:24 +08:00
|
|
|
goto err_set_parms;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
2017-10-19 21:50:33 +08:00
|
|
|
if (!tc_skip_hw(new->flags)) {
|
2018-01-20 09:44:44 +08:00
|
|
|
err = mall_replace_hw_filter(tp, new, (unsigned long)new,
|
|
|
|
extack);
|
2017-10-19 21:50:33 +08:00
|
|
|
if (err)
|
|
|
|
goto err_replace_hw_filter;
|
2016-07-21 18:03:12 +08:00
|
|
|
}
|
2017-02-16 16:31:14 +08:00
|
|
|
|
|
|
|
if (!tc_in_hw(new->flags))
|
|
|
|
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
|
2016-07-21 18:03:12 +08:00
|
|
|
|
2017-08-05 12:31:43 +08:00
|
|
|
*arg = head;
|
2017-01-31 21:14:29 +08:00
|
|
|
rcu_assign_pointer(tp->root, new);
|
2016-07-21 18:03:11 +08:00
|
|
|
return 0;
|
|
|
|
|
2017-01-04 01:20:24 +08:00
|
|
|
err_replace_hw_filter:
|
|
|
|
err_set_parms:
|
2019-01-18 04:44:25 +08:00
|
|
|
free_percpu(new->pf);
|
|
|
|
err_alloc_percpu:
|
2017-02-03 05:54:00 +08:00
|
|
|
tcf_exts_destroy(&new->exts);
|
2017-01-04 01:20:24 +08:00
|
|
|
err_exts_init:
|
2017-01-31 21:14:29 +08:00
|
|
|
kfree(new);
|
2016-07-21 18:03:11 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2018-01-19 00:20:53 +08:00
|
|
|
static int mall_delete(struct tcf_proto *tp, void *arg, bool *last,
|
2019-02-11 16:55:45 +08:00
|
|
|
bool rtnl_held, struct netlink_ext_ack *extack)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
2017-01-31 21:14:29 +08:00
|
|
|
return -EOPNOTSUPP;
|
2016-07-21 18:03:11 +08:00
|
|
|
}
|
|
|
|
|
2019-02-11 16:55:45 +08:00
|
|
|
static void mall_walk(struct tcf_proto *tp, struct tcf_walker *arg,
|
|
|
|
bool rtnl_held)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
|
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
|
|
|
|
|
|
if (arg->count < arg->skip)
|
|
|
|
goto skip;
|
2019-02-15 23:17:56 +08:00
|
|
|
|
|
|
|
if (!head)
|
|
|
|
return;
|
2017-08-05 12:31:43 +08:00
|
|
|
if (arg->fn(tp, head, arg) < 0)
|
2016-07-21 18:03:11 +08:00
|
|
|
arg->stop = 1;
|
|
|
|
skip:
|
|
|
|
arg->count++;
|
|
|
|
}
|
|
|
|
|
2018-06-26 05:30:07 +08:00
|
|
|
static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
|
|
|
|
void *cb_priv, struct netlink_ext_ack *extack)
|
|
|
|
{
|
|
|
|
struct cls_mall_head *head = rtnl_dereference(tp->root);
|
|
|
|
struct tc_cls_matchall_offload cls_mall = {};
|
|
|
|
struct tcf_block *block = tp->chain->block;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (tc_skip_hw(head->flags))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, extack);
|
|
|
|
cls_mall.command = add ?
|
|
|
|
TC_CLSMATCHALL_REPLACE : TC_CLSMATCHALL_DESTROY;
|
|
|
|
cls_mall.exts = &head->exts;
|
|
|
|
cls_mall.cookie = (unsigned long)head;
|
|
|
|
|
|
|
|
err = cb(TC_SETUP_CLSMATCHALL, &cls_mall, cb_priv);
|
|
|
|
if (err) {
|
|
|
|
if (add && tc_skip_sw(head->flags))
|
|
|
|
return err;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
tc_cls_offload_cnt_update(block, &head->in_hw_count, &head->flags, add);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-05 12:31:43 +08:00
|
|
|
static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh,
|
2019-02-11 16:55:45 +08:00
|
|
|
struct sk_buff *skb, struct tcmsg *t, bool rtnl_held)
|
2016-07-21 18:03:11 +08:00
|
|
|
{
|
2019-01-18 04:44:25 +08:00
|
|
|
struct tc_matchall_pcnt gpf = {};
|
2017-08-05 12:31:43 +08:00
|
|
|
struct cls_mall_head *head = fh;
|
2016-07-21 18:03:11 +08:00
|
|
|
struct nlattr *nest;
|
2019-01-18 04:44:25 +08:00
|
|
|
int cpu;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (!head)
|
2016-07-21 18:03:11 +08:00
|
|
|
return skb->len;
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
t->tcm_handle = head->handle;
|
2016-07-21 18:03:11 +08:00
|
|
|
|
|
|
|
nest = nla_nest_start(skb, TCA_OPTIONS);
|
|
|
|
if (!nest)
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (head->res.classid &&
|
|
|
|
nla_put_u32(skb, TCA_MATCHALL_CLASSID, head->res.classid))
|
2016-07-21 18:03:11 +08:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
2017-02-16 16:31:11 +08:00
|
|
|
if (head->flags && nla_put_u32(skb, TCA_MATCHALL_FLAGS, head->flags))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2019-01-18 04:44:25 +08:00
|
|
|
for_each_possible_cpu(cpu) {
|
|
|
|
struct tc_matchall_pcnt *pf = per_cpu_ptr(head->pf, cpu);
|
|
|
|
|
|
|
|
gpf.rhit += pf->rhit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nla_put_64bit(skb, TCA_MATCHALL_PCNT,
|
|
|
|
sizeof(struct tc_matchall_pcnt),
|
|
|
|
&gpf, TCA_MATCHALL_PAD))
|
|
|
|
goto nla_put_failure;
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (tcf_exts_dump(skb, &head->exts))
|
2016-07-21 18:03:11 +08:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
nla_nest_end(skb, nest);
|
|
|
|
|
2017-01-31 21:14:29 +08:00
|
|
|
if (tcf_exts_dump_stats(skb, &head->exts) < 0)
|
2016-07-21 18:03:11 +08:00
|
|
|
goto nla_put_failure;
|
|
|
|
|
|
|
|
return skb->len;
|
|
|
|
|
|
|
|
nla_put_failure:
|
|
|
|
nla_nest_cancel(skb, nest);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 05:30:36 +08:00
|
|
|
static void mall_bind_class(void *fh, u32 classid, unsigned long cl)
|
|
|
|
{
|
|
|
|
struct cls_mall_head *head = fh;
|
|
|
|
|
|
|
|
if (head && head->res.classid == classid)
|
|
|
|
head->res.class = cl;
|
|
|
|
}
|
|
|
|
|
2016-07-21 18:03:11 +08:00
|
|
|
static struct tcf_proto_ops cls_mall_ops __read_mostly = {
|
|
|
|
.kind = "matchall",
|
|
|
|
.classify = mall_classify,
|
|
|
|
.init = mall_init,
|
|
|
|
.destroy = mall_destroy,
|
|
|
|
.get = mall_get,
|
|
|
|
.change = mall_change,
|
|
|
|
.delete = mall_delete,
|
|
|
|
.walk = mall_walk,
|
2018-06-26 05:30:07 +08:00
|
|
|
.reoffload = mall_reoffload,
|
2016-07-21 18:03:11 +08:00
|
|
|
.dump = mall_dump,
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 05:30:36 +08:00
|
|
|
.bind_class = mall_bind_class,
|
2016-07-21 18:03:11 +08:00
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init cls_mall_init(void)
|
|
|
|
{
|
|
|
|
return register_tcf_proto_ops(&cls_mall_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit cls_mall_exit(void)
|
|
|
|
{
|
|
|
|
unregister_tcf_proto_ops(&cls_mall_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(cls_mall_init);
|
|
|
|
module_exit(cls_mall_exit);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
|
|
|
|
MODULE_DESCRIPTION("Match-all classifier");
|
|
|
|
MODULE_LICENSE("GPL v2");
|