mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 16:53:54 +08:00
net: rcu-ify tcf_proto
rcu'ify tcf_proto this allows calling tc_classify() without holding any locks. Updaters are protected by RTNL. This patch prepares the core net_sched infrastracture for running the classifier/action chains without holding the qdisc lock however it does nothing to ensure cls_xxx and act_xxx types also work without locking. Additional patches are required to address the fall out. Signed-off-by: John Fastabend <john.r.fastabend@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
46e5da40ae
commit
25d8c0d55f
@ -143,7 +143,7 @@ struct Qdisc_class_ops {
|
|||||||
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
|
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
|
||||||
|
|
||||||
/* Filter manipulation */
|
/* Filter manipulation */
|
||||||
struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
|
struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long);
|
||||||
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
|
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
|
||||||
u32 classid);
|
u32 classid);
|
||||||
void (*unbind_tcf)(struct Qdisc *, unsigned long);
|
void (*unbind_tcf)(struct Qdisc *, unsigned long);
|
||||||
@ -212,8 +212,8 @@ struct tcf_proto_ops {
|
|||||||
|
|
||||||
struct tcf_proto {
|
struct tcf_proto {
|
||||||
/* Fast access part */
|
/* Fast access part */
|
||||||
struct tcf_proto *next;
|
struct tcf_proto __rcu *next;
|
||||||
void *root;
|
void __rcu *root;
|
||||||
int (*classify)(struct sk_buff *,
|
int (*classify)(struct sk_buff *,
|
||||||
const struct tcf_proto *,
|
const struct tcf_proto *,
|
||||||
struct tcf_result *);
|
struct tcf_result *);
|
||||||
@ -225,6 +225,7 @@ struct tcf_proto {
|
|||||||
struct Qdisc *q;
|
struct Qdisc *q;
|
||||||
void *data;
|
void *data;
|
||||||
const struct tcf_proto_ops *ops;
|
const struct tcf_proto_ops *ops;
|
||||||
|
struct rcu_head rcu;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct qdisc_skb_cb {
|
struct qdisc_skb_cb {
|
||||||
@ -378,7 +379,7 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
|
|||||||
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
|
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
|
||||||
const struct qdisc_size_table *stab);
|
const struct qdisc_size_table *stab);
|
||||||
void tcf_destroy(struct tcf_proto *tp);
|
void tcf_destroy(struct tcf_proto *tp);
|
||||||
void tcf_destroy_chain(struct tcf_proto **fl);
|
void tcf_destroy_chain(struct tcf_proto __rcu **fl);
|
||||||
|
|
||||||
/* Reset all TX qdiscs greater then index of a device. */
|
/* Reset all TX qdiscs greater then index of a device. */
|
||||||
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
|
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
|
||||||
|
@ -117,7 +117,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
|
|||||||
{
|
{
|
||||||
struct net *net = sock_net(skb->sk);
|
struct net *net = sock_net(skb->sk);
|
||||||
struct nlattr *tca[TCA_MAX + 1];
|
struct nlattr *tca[TCA_MAX + 1];
|
||||||
spinlock_t *root_lock;
|
|
||||||
struct tcmsg *t;
|
struct tcmsg *t;
|
||||||
u32 protocol;
|
u32 protocol;
|
||||||
u32 prio;
|
u32 prio;
|
||||||
@ -125,7 +124,8 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
|
|||||||
u32 parent;
|
u32 parent;
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct Qdisc *q;
|
struct Qdisc *q;
|
||||||
struct tcf_proto **back, **chain;
|
struct tcf_proto __rcu **back;
|
||||||
|
struct tcf_proto __rcu **chain;
|
||||||
struct tcf_proto *tp;
|
struct tcf_proto *tp;
|
||||||
const struct tcf_proto_ops *tp_ops;
|
const struct tcf_proto_ops *tp_ops;
|
||||||
const struct Qdisc_class_ops *cops;
|
const struct Qdisc_class_ops *cops;
|
||||||
@ -197,7 +197,9 @@ replay:
|
|||||||
goto errout;
|
goto errout;
|
||||||
|
|
||||||
/* Check the chain for existence of proto-tcf with this priority */
|
/* Check the chain for existence of proto-tcf with this priority */
|
||||||
for (back = chain; (tp = *back) != NULL; back = &tp->next) {
|
for (back = chain;
|
||||||
|
(tp = rtnl_dereference(*back)) != NULL;
|
||||||
|
back = &tp->next) {
|
||||||
if (tp->prio >= prio) {
|
if (tp->prio >= prio) {
|
||||||
if (tp->prio == prio) {
|
if (tp->prio == prio) {
|
||||||
if (!nprio ||
|
if (!nprio ||
|
||||||
@ -209,8 +211,6 @@ replay:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
root_lock = qdisc_root_sleeping_lock(q);
|
|
||||||
|
|
||||||
if (tp == NULL) {
|
if (tp == NULL) {
|
||||||
/* Proto-tcf does not exist, create new one */
|
/* Proto-tcf does not exist, create new one */
|
||||||
|
|
||||||
@ -259,7 +259,8 @@ replay:
|
|||||||
}
|
}
|
||||||
tp->ops = tp_ops;
|
tp->ops = tp_ops;
|
||||||
tp->protocol = protocol;
|
tp->protocol = protocol;
|
||||||
tp->prio = nprio ? : TC_H_MAJ(tcf_auto_prio(*back));
|
tp->prio = nprio ? :
|
||||||
|
TC_H_MAJ(tcf_auto_prio(rtnl_dereference(*back)));
|
||||||
tp->q = q;
|
tp->q = q;
|
||||||
tp->classify = tp_ops->classify;
|
tp->classify = tp_ops->classify;
|
||||||
tp->classid = parent;
|
tp->classid = parent;
|
||||||
@ -280,9 +281,9 @@ replay:
|
|||||||
|
|
||||||
if (fh == 0) {
|
if (fh == 0) {
|
||||||
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
|
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
|
||||||
spin_lock_bh(root_lock);
|
struct tcf_proto *next = rtnl_dereference(tp->next);
|
||||||
*back = tp->next;
|
|
||||||
spin_unlock_bh(root_lock);
|
RCU_INIT_POINTER(*back, next);
|
||||||
|
|
||||||
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
|
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
|
||||||
tcf_destroy(tp);
|
tcf_destroy(tp);
|
||||||
@ -322,10 +323,8 @@ replay:
|
|||||||
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
|
n->nlmsg_flags & NLM_F_CREATE ? TCA_ACT_NOREPLACE : TCA_ACT_REPLACE);
|
||||||
if (err == 0) {
|
if (err == 0) {
|
||||||
if (tp_created) {
|
if (tp_created) {
|
||||||
spin_lock_bh(root_lock);
|
RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
|
||||||
tp->next = *back;
|
rcu_assign_pointer(*back, tp);
|
||||||
*back = tp;
|
|
||||||
spin_unlock_bh(root_lock);
|
|
||||||
}
|
}
|
||||||
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
|
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
|
||||||
} else {
|
} else {
|
||||||
@ -420,7 +419,7 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||||||
int s_t;
|
int s_t;
|
||||||
struct net_device *dev;
|
struct net_device *dev;
|
||||||
struct Qdisc *q;
|
struct Qdisc *q;
|
||||||
struct tcf_proto *tp, **chain;
|
struct tcf_proto *tp, __rcu **chain;
|
||||||
struct tcmsg *tcm = nlmsg_data(cb->nlh);
|
struct tcmsg *tcm = nlmsg_data(cb->nlh);
|
||||||
unsigned long cl = 0;
|
unsigned long cl = 0;
|
||||||
const struct Qdisc_class_ops *cops;
|
const struct Qdisc_class_ops *cops;
|
||||||
@ -454,7 +453,8 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||||||
|
|
||||||
s_t = cb->args[0];
|
s_t = cb->args[0];
|
||||||
|
|
||||||
for (tp = *chain, t = 0; tp; tp = tp->next, t++) {
|
for (tp = rtnl_dereference(*chain), t = 0;
|
||||||
|
tp; tp = rtnl_dereference(tp->next), t++) {
|
||||||
if (t < s_t)
|
if (t < s_t)
|
||||||
continue;
|
continue;
|
||||||
if (TC_H_MAJ(tcm->tcm_info) &&
|
if (TC_H_MAJ(tcm->tcm_info) &&
|
||||||
|
@ -1781,7 +1781,7 @@ int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
|
|||||||
__be16 protocol = skb->protocol;
|
__be16 protocol = skb->protocol;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
for (; tp; tp = tp->next) {
|
for (; tp; tp = rcu_dereference_bh(tp->next)) {
|
||||||
if (tp->protocol != protocol &&
|
if (tp->protocol != protocol &&
|
||||||
tp->protocol != htons(ETH_P_ALL))
|
tp->protocol != htons(ETH_P_ALL))
|
||||||
continue;
|
continue;
|
||||||
@ -1833,15 +1833,15 @@ void tcf_destroy(struct tcf_proto *tp)
|
|||||||
{
|
{
|
||||||
tp->ops->destroy(tp);
|
tp->ops->destroy(tp);
|
||||||
module_put(tp->ops->owner);
|
module_put(tp->ops->owner);
|
||||||
kfree(tp);
|
kfree_rcu(tp, rcu);
|
||||||
}
|
}
|
||||||
|
|
||||||
void tcf_destroy_chain(struct tcf_proto **fl)
|
void tcf_destroy_chain(struct tcf_proto __rcu **fl)
|
||||||
{
|
{
|
||||||
struct tcf_proto *tp;
|
struct tcf_proto *tp;
|
||||||
|
|
||||||
while ((tp = *fl) != NULL) {
|
while ((tp = rtnl_dereference(*fl)) != NULL) {
|
||||||
*fl = tp->next;
|
RCU_INIT_POINTER(*fl, tp->next);
|
||||||
tcf_destroy(tp);
|
tcf_destroy(tp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
|
|
||||||
struct atm_flow_data {
|
struct atm_flow_data {
|
||||||
struct Qdisc *q; /* FIFO, TBF, etc. */
|
struct Qdisc *q; /* FIFO, TBF, etc. */
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
|
struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
|
||||||
void (*old_pop)(struct atm_vcc *vcc,
|
void (*old_pop)(struct atm_vcc *vcc,
|
||||||
struct sk_buff *skb); /* chaining */
|
struct sk_buff *skb); /* chaining */
|
||||||
@ -273,7 +273,7 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
|
|||||||
error = -ENOBUFS;
|
error = -ENOBUFS;
|
||||||
goto err_out;
|
goto err_out;
|
||||||
}
|
}
|
||||||
flow->filter_list = NULL;
|
RCU_INIT_POINTER(flow->filter_list, NULL);
|
||||||
flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
|
flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
|
||||||
if (!flow->q)
|
if (!flow->q)
|
||||||
flow->q = &noop_qdisc;
|
flow->q = &noop_qdisc;
|
||||||
@ -311,7 +311,7 @@ static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
|
|||||||
pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
|
pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
|
||||||
if (list_empty(&flow->list))
|
if (list_empty(&flow->list))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (flow->filter_list || flow == &p->link)
|
if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
/*
|
/*
|
||||||
* Reference count must be 2: one for "keepalive" (set at class
|
* Reference count must be 2: one for "keepalive" (set at class
|
||||||
@ -345,7 +345,8 @@ static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **atm_tc_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct atm_qdisc_data *p = qdisc_priv(sch);
|
struct atm_qdisc_data *p = qdisc_priv(sch);
|
||||||
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
struct atm_flow_data *flow = (struct atm_flow_data *)cl;
|
||||||
@ -369,11 +370,12 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
flow = NULL;
|
flow = NULL;
|
||||||
if (TC_H_MAJ(skb->priority) != sch->handle ||
|
if (TC_H_MAJ(skb->priority) != sch->handle ||
|
||||||
!(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
|
!(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
|
||||||
|
struct tcf_proto *fl;
|
||||||
|
|
||||||
list_for_each_entry(flow, &p->flows, list) {
|
list_for_each_entry(flow, &p->flows, list) {
|
||||||
if (flow->filter_list) {
|
fl = rcu_dereference_bh(flow->filter_list);
|
||||||
result = tc_classify_compat(skb,
|
if (fl) {
|
||||||
flow->filter_list,
|
result = tc_classify_compat(skb, fl, &res);
|
||||||
&res);
|
|
||||||
if (result < 0)
|
if (result < 0)
|
||||||
continue;
|
continue;
|
||||||
flow = (struct atm_flow_data *)res.class;
|
flow = (struct atm_flow_data *)res.class;
|
||||||
@ -544,7 +546,7 @@ static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
|
|||||||
if (!p->link.q)
|
if (!p->link.q)
|
||||||
p->link.q = &noop_qdisc;
|
p->link.q = &noop_qdisc;
|
||||||
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
|
pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
|
||||||
p->link.filter_list = NULL;
|
RCU_INIT_POINTER(p->link.filter_list, NULL);
|
||||||
p->link.vcc = NULL;
|
p->link.vcc = NULL;
|
||||||
p->link.sock = NULL;
|
p->link.sock = NULL;
|
||||||
p->link.classid = sch->handle;
|
p->link.classid = sch->handle;
|
||||||
|
@ -133,7 +133,7 @@ struct cbq_class {
|
|||||||
struct gnet_stats_rate_est64 rate_est;
|
struct gnet_stats_rate_est64 rate_est;
|
||||||
struct tc_cbq_xstats xstats;
|
struct tc_cbq_xstats xstats;
|
||||||
|
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
|
|
||||||
int refcnt;
|
int refcnt;
|
||||||
int filters;
|
int filters;
|
||||||
@ -221,6 +221,7 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
struct cbq_class **defmap;
|
struct cbq_class **defmap;
|
||||||
struct cbq_class *cl = NULL;
|
struct cbq_class *cl = NULL;
|
||||||
u32 prio = skb->priority;
|
u32 prio = skb->priority;
|
||||||
|
struct tcf_proto *fl;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -235,11 +236,12 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
int result = 0;
|
int result = 0;
|
||||||
defmap = head->defaults;
|
defmap = head->defaults;
|
||||||
|
|
||||||
|
fl = rcu_dereference_bh(head->filter_list);
|
||||||
/*
|
/*
|
||||||
* Step 2+n. Apply classifier.
|
* Step 2+n. Apply classifier.
|
||||||
*/
|
*/
|
||||||
if (!head->filter_list ||
|
result = tc_classify_compat(skb, fl, &res);
|
||||||
(result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
|
if (!fl || result < 0)
|
||||||
goto fallback;
|
goto fallback;
|
||||||
|
|
||||||
cl = (void *)res.class;
|
cl = (void *)res.class;
|
||||||
@ -1954,7 +1956,8 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **cbq_find_tcf(struct Qdisc *sch, unsigned long arg)
|
static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
struct cbq_sched_data *q = qdisc_priv(sch);
|
struct cbq_sched_data *q = qdisc_priv(sch);
|
||||||
struct cbq_class *cl = (struct cbq_class *)arg;
|
struct cbq_class *cl = (struct cbq_class *)arg;
|
||||||
|
@ -57,7 +57,7 @@ struct choke_sched_data {
|
|||||||
|
|
||||||
/* Variables */
|
/* Variables */
|
||||||
struct red_vars vars;
|
struct red_vars vars;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
struct {
|
struct {
|
||||||
u32 prob_drop; /* Early probability drops */
|
u32 prob_drop; /* Early probability drops */
|
||||||
u32 prob_mark; /* Early probability marks */
|
u32 prob_mark; /* Early probability marks */
|
||||||
@ -193,9 +193,11 @@ static bool choke_classify(struct sk_buff *skb,
|
|||||||
{
|
{
|
||||||
struct choke_sched_data *q = qdisc_priv(sch);
|
struct choke_sched_data *q = qdisc_priv(sch);
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
result = tc_classify(skb, fl, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
@ -249,7 +251,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
oskb = choke_peek_random(q, pidx);
|
oskb = choke_peek_random(q, pidx);
|
||||||
if (q->filter_list)
|
if (rcu_access_pointer(q->filter_list))
|
||||||
return choke_get_classid(nskb) == choke_get_classid(oskb);
|
return choke_get_classid(nskb) == choke_get_classid(oskb);
|
||||||
|
|
||||||
return choke_match_flow(oskb, nskb);
|
return choke_match_flow(oskb, nskb);
|
||||||
@ -257,11 +259,11 @@ static bool choke_match_random(const struct choke_sched_data *q,
|
|||||||
|
|
||||||
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
||||||
{
|
{
|
||||||
|
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
struct choke_sched_data *q = qdisc_priv(sch);
|
struct choke_sched_data *q = qdisc_priv(sch);
|
||||||
const struct red_parms *p = &q->parms;
|
const struct red_parms *p = &q->parms;
|
||||||
int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
|
||||||
|
|
||||||
if (q->filter_list) {
|
if (rcu_access_pointer(q->filter_list)) {
|
||||||
/* If using external classifiers, get result and record it. */
|
/* If using external classifiers, get result and record it. */
|
||||||
if (!choke_classify(skb, sch, &ret))
|
if (!choke_classify(skb, sch, &ret))
|
||||||
goto other_drop; /* Packet was eaten by filter */
|
goto other_drop; /* Packet was eaten by filter */
|
||||||
@ -554,7 +556,8 @@ static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **choke_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct choke_sched_data *q = qdisc_priv(sch);
|
struct choke_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ struct drr_class {
|
|||||||
|
|
||||||
struct drr_sched {
|
struct drr_sched {
|
||||||
struct list_head active;
|
struct list_head active;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
struct Qdisc_class_hash clhash;
|
struct Qdisc_class_hash clhash;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -184,7 +184,8 @@ static void drr_put_class(struct Qdisc *sch, unsigned long arg)
|
|||||||
drr_destroy_class(sch, cl);
|
drr_destroy_class(sch, cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **drr_tcf_chain(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **drr_tcf_chain(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct drr_sched *q = qdisc_priv(sch);
|
struct drr_sched *q = qdisc_priv(sch);
|
||||||
|
|
||||||
@ -319,6 +320,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct drr_sched *q = qdisc_priv(sch);
|
struct drr_sched *q = qdisc_priv(sch);
|
||||||
struct drr_class *cl;
|
struct drr_class *cl;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
|
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
|
||||||
@ -328,7 +330,8 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
result = tc_classify(skb, fl, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
|
@ -37,7 +37,7 @@
|
|||||||
|
|
||||||
struct dsmark_qdisc_data {
|
struct dsmark_qdisc_data {
|
||||||
struct Qdisc *q;
|
struct Qdisc *q;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
u8 *mask; /* "owns" the array */
|
u8 *mask; /* "owns" the array */
|
||||||
u8 *value;
|
u8 *value;
|
||||||
u16 indices;
|
u16 indices;
|
||||||
@ -186,8 +186,8 @@ ignore:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,
|
static inline struct tcf_proto __rcu **dsmark_find_tcf(struct Qdisc *sch,
|
||||||
unsigned long cl)
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
struct dsmark_qdisc_data *p = qdisc_priv(sch);
|
||||||
return &p->filter_list;
|
return &p->filter_list;
|
||||||
@ -229,7 +229,8 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
skb->tc_index = TC_H_MIN(skb->priority);
|
skb->tc_index = TC_H_MIN(skb->priority);
|
||||||
else {
|
else {
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
int result = tc_classify(skb, p->filter_list, &res);
|
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
|
||||||
|
int result = tc_classify(skb, fl, &res);
|
||||||
|
|
||||||
pr_debug("result %d class 0x%04x\n", result, res.classid);
|
pr_debug("result %d class 0x%04x\n", result, res.classid);
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ struct fq_codel_flow {
|
|||||||
}; /* please try to keep this structure <= 64 bytes */
|
}; /* please try to keep this structure <= 64 bytes */
|
||||||
|
|
||||||
struct fq_codel_sched_data {
|
struct fq_codel_sched_data {
|
||||||
struct tcf_proto *filter_list; /* optional external classifier */
|
struct tcf_proto __rcu *filter_list; /* optional external classifier */
|
||||||
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
|
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
|
||||||
u32 *backlogs; /* backlog table [flows_cnt] */
|
u32 *backlogs; /* backlog table [flows_cnt] */
|
||||||
u32 flows_cnt; /* number of flows */
|
u32 flows_cnt; /* number of flows */
|
||||||
@ -85,6 +85,7 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
int *qerr)
|
int *qerr)
|
||||||
{
|
{
|
||||||
struct fq_codel_sched_data *q = qdisc_priv(sch);
|
struct fq_codel_sched_data *q = qdisc_priv(sch);
|
||||||
|
struct tcf_proto *filter;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
@ -93,11 +94,12 @@ static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
TC_H_MIN(skb->priority) <= q->flows_cnt)
|
TC_H_MIN(skb->priority) <= q->flows_cnt)
|
||||||
return TC_H_MIN(skb->priority);
|
return TC_H_MIN(skb->priority);
|
||||||
|
|
||||||
if (!q->filter_list)
|
filter = rcu_dereference(q->filter_list);
|
||||||
|
if (!filter)
|
||||||
return fq_codel_hash(q, skb) + 1;
|
return fq_codel_hash(q, skb) + 1;
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
result = tc_classify(skb, filter, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
@ -496,7 +498,8 @@ static void fq_codel_put(struct Qdisc *q, unsigned long cl)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **fq_codel_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **fq_codel_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct fq_codel_sched_data *q = qdisc_priv(sch);
|
struct fq_codel_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
@ -116,7 +116,7 @@ struct hfsc_class {
|
|||||||
struct gnet_stats_queue qstats;
|
struct gnet_stats_queue qstats;
|
||||||
struct gnet_stats_rate_est64 rate_est;
|
struct gnet_stats_rate_est64 rate_est;
|
||||||
unsigned int level; /* class level in hierarchy */
|
unsigned int level; /* class level in hierarchy */
|
||||||
struct tcf_proto *filter_list; /* filter list */
|
struct tcf_proto __rcu *filter_list; /* filter list */
|
||||||
unsigned int filter_cnt; /* filter count */
|
unsigned int filter_cnt; /* filter count */
|
||||||
|
|
||||||
struct hfsc_sched *sched; /* scheduler data */
|
struct hfsc_sched *sched; /* scheduler data */
|
||||||
@ -1161,7 +1161,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
head = &q->root;
|
head = &q->root;
|
||||||
tcf = q->root.filter_list;
|
tcf = rcu_dereference_bh(q->root.filter_list);
|
||||||
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
|
while (tcf && (result = tc_classify(skb, tcf, &res)) >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
@ -1185,7 +1185,7 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
return cl; /* hit leaf class */
|
return cl; /* hit leaf class */
|
||||||
|
|
||||||
/* apply inner filter chain */
|
/* apply inner filter chain */
|
||||||
tcf = cl->filter_list;
|
tcf = rcu_dereference_bh(cl->filter_list);
|
||||||
head = cl;
|
head = cl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1285,7 +1285,7 @@ hfsc_unbind_tcf(struct Qdisc *sch, unsigned long arg)
|
|||||||
cl->filter_cnt--;
|
cl->filter_cnt--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **
|
static struct tcf_proto __rcu **
|
||||||
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
|
hfsc_tcf_chain(struct Qdisc *sch, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct hfsc_sched *q = qdisc_priv(sch);
|
struct hfsc_sched *q = qdisc_priv(sch);
|
||||||
|
@ -103,7 +103,7 @@ struct htb_class {
|
|||||||
u32 prio; /* these two are used only by leaves... */
|
u32 prio; /* these two are used only by leaves... */
|
||||||
int quantum; /* but stored for parent-to-leaf return */
|
int quantum; /* but stored for parent-to-leaf return */
|
||||||
|
|
||||||
struct tcf_proto *filter_list; /* class attached filters */
|
struct tcf_proto __rcu *filter_list; /* class attached filters */
|
||||||
int filter_cnt;
|
int filter_cnt;
|
||||||
int refcnt; /* usage count of this class */
|
int refcnt; /* usage count of this class */
|
||||||
|
|
||||||
@ -153,7 +153,7 @@ struct htb_sched {
|
|||||||
int rate2quantum; /* quant = rate / rate2quantum */
|
int rate2quantum; /* quant = rate / rate2quantum */
|
||||||
|
|
||||||
/* filters for qdisc itself */
|
/* filters for qdisc itself */
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
|
|
||||||
#define HTB_WARN_TOOMANYEVENTS 0x1
|
#define HTB_WARN_TOOMANYEVENTS 0x1
|
||||||
unsigned int warned; /* only one warning */
|
unsigned int warned; /* only one warning */
|
||||||
@ -223,9 +223,9 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
if (cl->level == 0)
|
if (cl->level == 0)
|
||||||
return cl;
|
return cl;
|
||||||
/* Start with inner filter chain if a non-leaf class is selected */
|
/* Start with inner filter chain if a non-leaf class is selected */
|
||||||
tcf = cl->filter_list;
|
tcf = rcu_dereference_bh(cl->filter_list);
|
||||||
} else {
|
} else {
|
||||||
tcf = q->filter_list;
|
tcf = rcu_dereference_bh(q->filter_list);
|
||||||
}
|
}
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
@ -251,7 +251,7 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
return cl; /* we hit leaf; return it */
|
return cl; /* we hit leaf; return it */
|
||||||
|
|
||||||
/* we have got inner class; apply inner filter chain */
|
/* we have got inner class; apply inner filter chain */
|
||||||
tcf = cl->filter_list;
|
tcf = rcu_dereference_bh(cl->filter_list);
|
||||||
}
|
}
|
||||||
/* classification failed; try to use default class */
|
/* classification failed; try to use default class */
|
||||||
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
|
cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
|
||||||
@ -1519,11 +1519,12 @@ failure:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
|
static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
struct htb_sched *q = qdisc_priv(sch);
|
struct htb_sched *q = qdisc_priv(sch);
|
||||||
struct htb_class *cl = (struct htb_class *)arg;
|
struct htb_class *cl = (struct htb_class *)arg;
|
||||||
struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
|
struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
|
||||||
|
|
||||||
return fl;
|
return fl;
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
|
|
||||||
struct ingress_qdisc_data {
|
struct ingress_qdisc_data {
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* ------------------------- Class/flow operations ------------------------- */
|
/* ------------------------- Class/flow operations ------------------------- */
|
||||||
@ -46,7 +46,8 @@ static void ingress_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **ingress_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct ingress_qdisc_data *p = qdisc_priv(sch);
|
struct ingress_qdisc_data *p = qdisc_priv(sch);
|
||||||
|
|
||||||
@ -59,9 +60,10 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
{
|
{
|
||||||
struct ingress_qdisc_data *p = qdisc_priv(sch);
|
struct ingress_qdisc_data *p = qdisc_priv(sch);
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
result = tc_classify(skb, p->filter_list, &res);
|
result = tc_classify(skb, fl, &res);
|
||||||
|
|
||||||
qdisc_bstats_update(sch, skb);
|
qdisc_bstats_update(sch, skb);
|
||||||
switch (result) {
|
switch (result) {
|
||||||
|
@ -31,7 +31,7 @@ struct multiq_sched_data {
|
|||||||
u16 bands;
|
u16 bands;
|
||||||
u16 max_bands;
|
u16 max_bands;
|
||||||
u16 curband;
|
u16 curband;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
struct Qdisc **queues;
|
struct Qdisc **queues;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -42,10 +42,11 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
struct multiq_sched_data *q = qdisc_priv(sch);
|
struct multiq_sched_data *q = qdisc_priv(sch);
|
||||||
u32 band;
|
u32 band;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl = rcu_dereference_bh(q->filter_list);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
err = tc_classify(skb, q->filter_list, &res);
|
err = tc_classify(skb, fl, &res);
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case TC_ACT_STOLEN:
|
case TC_ACT_STOLEN:
|
||||||
@ -388,7 +389,8 @@ static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **multiq_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct multiq_sched_data *q = qdisc_priv(sch);
|
struct multiq_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
|
|
||||||
struct prio_sched_data {
|
struct prio_sched_data {
|
||||||
int bands;
|
int bands;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
u8 prio2band[TC_PRIO_MAX+1];
|
u8 prio2band[TC_PRIO_MAX+1];
|
||||||
struct Qdisc *queues[TCQ_PRIO_BANDS];
|
struct Qdisc *queues[TCQ_PRIO_BANDS];
|
||||||
};
|
};
|
||||||
@ -36,11 +36,13 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
struct prio_sched_data *q = qdisc_priv(sch);
|
struct prio_sched_data *q = qdisc_priv(sch);
|
||||||
u32 band = skb->priority;
|
u32 band = skb->priority;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
if (TC_H_MAJ(skb->priority) != sch->handle) {
|
if (TC_H_MAJ(skb->priority) != sch->handle) {
|
||||||
err = tc_classify(skb, q->filter_list, &res);
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
err = tc_classify(skb, fl, &res);
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (err) {
|
switch (err) {
|
||||||
case TC_ACT_STOLEN:
|
case TC_ACT_STOLEN:
|
||||||
@ -50,7 +52,7 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
if (!q->filter_list || err < 0) {
|
if (!fl || err < 0) {
|
||||||
if (TC_H_MAJ(band))
|
if (TC_H_MAJ(band))
|
||||||
band = 0;
|
band = 0;
|
||||||
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
|
return q->queues[q->prio2band[band & TC_PRIO_MAX]];
|
||||||
@ -351,7 +353,8 @@ static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **prio_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **prio_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct prio_sched_data *q = qdisc_priv(sch);
|
struct prio_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
@ -181,7 +181,7 @@ struct qfq_group {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct qfq_sched {
|
struct qfq_sched {
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
struct Qdisc_class_hash clhash;
|
struct Qdisc_class_hash clhash;
|
||||||
|
|
||||||
u64 oldV, V; /* Precise virtual times. */
|
u64 oldV, V; /* Precise virtual times. */
|
||||||
@ -576,7 +576,8 @@ static void qfq_put_class(struct Qdisc *sch, unsigned long arg)
|
|||||||
qfq_destroy_class(sch, cl);
|
qfq_destroy_class(sch, cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **qfq_tcf_chain(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **qfq_tcf_chain(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct qfq_sched *q = qdisc_priv(sch);
|
struct qfq_sched *q = qdisc_priv(sch);
|
||||||
|
|
||||||
@ -704,6 +705,7 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
struct qfq_sched *q = qdisc_priv(sch);
|
struct qfq_sched *q = qdisc_priv(sch);
|
||||||
struct qfq_class *cl;
|
struct qfq_class *cl;
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
|
if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
|
||||||
@ -714,7 +716,8 @@ static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
}
|
}
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
result = tc_classify(skb, fl, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
|
@ -55,7 +55,7 @@ struct sfb_bins {
|
|||||||
|
|
||||||
struct sfb_sched_data {
|
struct sfb_sched_data {
|
||||||
struct Qdisc *qdisc;
|
struct Qdisc *qdisc;
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
unsigned long rehash_interval;
|
unsigned long rehash_interval;
|
||||||
unsigned long warmup_time; /* double buffering warmup time in jiffies */
|
unsigned long warmup_time; /* double buffering warmup time in jiffies */
|
||||||
u32 max;
|
u32 max;
|
||||||
@ -253,13 +253,13 @@ static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool sfb_classify(struct sk_buff *skb, struct sfb_sched_data *q,
|
static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
|
||||||
int *qerr, u32 *salt)
|
int *qerr, u32 *salt)
|
||||||
{
|
{
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
result = tc_classify(skb, fl, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
@ -281,6 +281,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
|
|
||||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||||
struct Qdisc *child = q->qdisc;
|
struct Qdisc *child = q->qdisc;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int i;
|
int i;
|
||||||
u32 p_min = ~0;
|
u32 p_min = ~0;
|
||||||
u32 minqlen = ~0;
|
u32 minqlen = ~0;
|
||||||
@ -306,9 +307,10 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->filter_list) {
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
if (fl) {
|
||||||
/* If using external classifiers, get result and record it. */
|
/* If using external classifiers, get result and record it. */
|
||||||
if (!sfb_classify(skb, q, &ret, &salt))
|
if (!sfb_classify(skb, fl, &ret, &salt))
|
||||||
goto other_drop;
|
goto other_drop;
|
||||||
keys.src = salt;
|
keys.src = salt;
|
||||||
keys.dst = 0;
|
keys.dst = 0;
|
||||||
@ -660,7 +662,8 @@ static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **sfb_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **sfb_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct sfb_sched_data *q = qdisc_priv(sch);
|
struct sfb_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
@ -125,7 +125,7 @@ struct sfq_sched_data {
|
|||||||
u8 cur_depth; /* depth of longest slot */
|
u8 cur_depth; /* depth of longest slot */
|
||||||
u8 flags;
|
u8 flags;
|
||||||
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
|
unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
|
||||||
struct tcf_proto *filter_list;
|
struct tcf_proto __rcu *filter_list;
|
||||||
sfq_index *ht; /* Hash table ('divisor' slots) */
|
sfq_index *ht; /* Hash table ('divisor' slots) */
|
||||||
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
|
struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
|
||||||
|
|
||||||
@ -187,6 +187,7 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
{
|
{
|
||||||
struct sfq_sched_data *q = qdisc_priv(sch);
|
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||||
struct tcf_result res;
|
struct tcf_result res;
|
||||||
|
struct tcf_proto *fl;
|
||||||
int result;
|
int result;
|
||||||
|
|
||||||
if (TC_H_MAJ(skb->priority) == sch->handle &&
|
if (TC_H_MAJ(skb->priority) == sch->handle &&
|
||||||
@ -194,13 +195,14 @@ static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||||||
TC_H_MIN(skb->priority) <= q->divisor)
|
TC_H_MIN(skb->priority) <= q->divisor)
|
||||||
return TC_H_MIN(skb->priority);
|
return TC_H_MIN(skb->priority);
|
||||||
|
|
||||||
if (!q->filter_list) {
|
fl = rcu_dereference_bh(q->filter_list);
|
||||||
|
if (!fl) {
|
||||||
skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
|
skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
|
||||||
return sfq_hash(q, skb) + 1;
|
return sfq_hash(q, skb) + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
|
||||||
result = tc_classify(skb, q->filter_list, &res);
|
result = tc_classify(skb, fl, &res);
|
||||||
if (result >= 0) {
|
if (result >= 0) {
|
||||||
#ifdef CONFIG_NET_CLS_ACT
|
#ifdef CONFIG_NET_CLS_ACT
|
||||||
switch (result) {
|
switch (result) {
|
||||||
@ -836,7 +838,8 @@ static void sfq_put(struct Qdisc *q, unsigned long cl)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
|
static struct tcf_proto __rcu **sfq_find_tcf(struct Qdisc *sch,
|
||||||
|
unsigned long cl)
|
||||||
{
|
{
|
||||||
struct sfq_sched_data *q = qdisc_priv(sch);
|
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user