Merge branch 'remove-qdisc-throttle'

Eric Dumazet says:

====================
net_sched: remove qdisc_is_throttled()

HTB, CBQ and HFSC pay a very high cost updating the qdisc 'throttled'
status that nothing but CBQ seems to use.

CBQ usage is flaky anyway, since no qdisc ->enqueue() updates the
'throttled' qdisc status.

This looks like some 'optimization' that actually cost more than code
without the optimization, and might cause latency issues with CBQ.

In my tests, I could achieve a 8 % performance increase in TCP_RR
workload through HTB qdisc, in presence of throttled classes,
and 5 % without throttled classes.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-06-10 23:58:29 -07:00
commit e69f73bfec
10 changed files with 15 additions and 45 deletions

View File

@ -67,12 +67,12 @@ struct qdisc_watchdog {
};
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
{
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires), true);
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
}
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);

View File

@ -26,7 +26,6 @@ struct qdisc_rate_table {
enum qdisc_state_t {
__QDISC_STATE_SCHED,
__QDISC_STATE_DEACTIVATED,
__QDISC_STATE_THROTTLED,
};
struct qdisc_size_table {
@ -125,21 +124,6 @@ static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
#endif
}
static inline bool qdisc_is_throttled(const struct Qdisc *qdisc)
{
return test_bit(__QDISC_STATE_THROTTLED, &qdisc->state) ? true : false;
}
static inline void qdisc_throttled(struct Qdisc *qdisc)
{
set_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
}
static inline void qdisc_unthrottled(struct Qdisc *qdisc)
{
clear_bit(__QDISC_STATE_THROTTLED, &qdisc->state);
}
struct Qdisc_class_ops {
/* Child qdisc manipulation */
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);

View File

@ -583,7 +583,6 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
timer);
rcu_read_lock();
qdisc_unthrottled(wd->qdisc);
__netif_schedule(qdisc_root(wd->qdisc));
rcu_read_unlock();
@ -598,15 +597,12 @@ void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_watchdog_init);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires, bool throttle)
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires)
{
if (test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(wd->qdisc)->state))
return;
if (throttle)
qdisc_throttled(wd->qdisc);
if (wd->last_expires == expires)
return;
@ -620,7 +616,6 @@ EXPORT_SYMBOL(qdisc_watchdog_schedule_ns);
void qdisc_watchdog_cancel(struct qdisc_watchdog *wd)
{
hrtimer_cancel(&wd->timer);
qdisc_unthrottled(wd->qdisc);
}
EXPORT_SYMBOL(qdisc_watchdog_cancel);

View File

@ -345,7 +345,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
{
int toplevel = q->toplevel;
if (toplevel > cl->level && !(qdisc_is_throttled(cl->q))) {
if (toplevel > cl->level) {
psched_time_t now = psched_get_time();
do {
@ -513,7 +513,6 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
}
qdisc_unthrottled(sch);
__netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@ -819,7 +818,6 @@ cbq_dequeue(struct Qdisc *sch)
if (skb) {
qdisc_bstats_update(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
return skb;
}

View File

@ -445,8 +445,7 @@ begin:
if (!head->first) {
if (q->time_next_delayed_flow != ~0ULL)
qdisc_watchdog_schedule_ns(&q->watchdog,
q->time_next_delayed_flow,
false);
q->time_next_delayed_flow);
return NULL;
}
}

View File

@ -1664,7 +1664,6 @@ hfsc_dequeue(struct Qdisc *sch)
set_passive(cl);
}
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;

View File

@ -889,7 +889,6 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (skb != NULL) {
ok:
qdisc_bstats_update(sch, skb);
qdisc_unthrottled(sch);
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
return skb;
@ -929,7 +928,7 @@ ok:
}
qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now))
qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
else
schedule_work(&q->work);
fin:

View File

@ -582,15 +582,11 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
struct sk_buff *skb;
struct rb_node *p;
if (qdisc_is_throttled(sch))
return NULL;
tfifo_dequeue:
skb = __skb_dequeue(&sch->q);
if (skb) {
qdisc_qstats_backlog_dec(sch, skb);
deliver:
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}

View File

@ -64,6 +64,8 @@ struct plug_sched_data {
*/
bool unplug_indefinite;
bool throttled;
/* Queue Limit in bytes */
u32 limit;
@ -103,7 +105,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
{
struct plug_sched_data *q = qdisc_priv(sch);
if (qdisc_is_throttled(sch))
if (q->throttled)
return NULL;
if (!q->unplug_indefinite) {
@ -111,7 +113,7 @@ static struct sk_buff *plug_dequeue(struct Qdisc *sch)
/* No more packets to dequeue. Block the queue
* and wait for the next release command.
*/
qdisc_throttled(sch);
q->throttled = true;
return NULL;
}
q->pkts_to_release--;
@ -141,7 +143,7 @@ static int plug_init(struct Qdisc *sch, struct nlattr *opt)
q->limit = ctl->limit;
}
qdisc_throttled(sch);
q->throttled = true;
return 0;
}
@ -173,7 +175,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
q->pkts_last_epoch = q->pkts_current_epoch;
q->pkts_current_epoch = 0;
if (q->unplug_indefinite)
qdisc_throttled(sch);
q->throttled = true;
q->unplug_indefinite = false;
break;
case TCQ_PLUG_RELEASE_ONE:
@ -182,7 +184,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
*/
q->pkts_to_release += q->pkts_last_epoch;
q->pkts_last_epoch = 0;
qdisc_unthrottled(sch);
q->throttled = false;
netif_schedule_queue(sch->dev_queue);
break;
case TCQ_PLUG_RELEASE_INDEFINITE:
@ -190,7 +192,7 @@ static int plug_change(struct Qdisc *sch, struct nlattr *opt)
q->pkts_to_release = 0;
q->pkts_last_epoch = 0;
q->pkts_current_epoch = 0;
qdisc_unthrottled(sch);
q->throttled = false;
netif_schedule_queue(sch->dev_queue);
break;
case TCQ_PLUG_LIMIT:

View File

@ -254,14 +254,12 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
q->ptokens = ptoks;
qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--;
qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb);
return skb;
}
qdisc_watchdog_schedule_ns(&q->watchdog,
now + max_t(long, -toks, -ptoks),
true);
now + max_t(long, -toks, -ptoks));
/* Maybe we have a shorter packet in the queue,
which can be sent now. It sounds cool,