2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 16:53:54 +08:00

Merge branch 'fq_codel-small-optimizations'

Dave Taht says:

====================
Two small fq_codel optimizations

These two patches improve fq_codel performance
under extreme network loads. The first patch
more rapidly escalates the codel count under
overload, the second just kills a totally useless
statistic.

(sent together because they'd otherwise conflict)
====================

Signed-off-by: Dave Taht <dave.taht@gmail.com>

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2019-08-06 14:18:20 -07:00
commit 2af8cfac2b

View File

@ -45,7 +45,6 @@ struct fq_codel_flow {
struct sk_buff *tail;
struct list_head flowchain;
int deficit;
u32 dropped; /* number of drops (or ECN marks) on this flow */
struct codel_vars cvars;
}; /* please try to keep this structure <= 64 bytes */
@ -173,7 +172,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
__qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold);
flow->dropped += i;
/* Tell codel to increase its signal strength also */
flow->cvars.count += i;
q->backlogs[idx] -= len;
q->memory_usage -= mem;
sch->qstats.drops += i;
@ -211,7 +211,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
list_add_tail(&flow->flowchain, &q->new_flows);
q->new_flow_count++;
flow->deficit = q->quantum;
flow->dropped = 0;
}
get_codel_cb(skb)->mem_usage = skb->truesize;
q->memory_usage += get_codel_cb(skb)->mem_usage;
@ -310,9 +309,6 @@ begin:
&flow->cvars, &q->cstats, qdisc_pkt_len,
codel_get_enqueue_time, drop_func, dequeue_func);
flow->dropped += q->cstats.drop_count - prev_drop_count;
flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
if (!skb) {
/* force a pass through old_flows to prevent starvation */
if ((head == &q->new_flows) && !list_empty(&q->old_flows))
@ -658,7 +654,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch_tree_unlock(sch);
}
qs.backlog = q->backlogs[idx];
qs.drops = flow->dropped;
qs.drops = 0;
}
if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
return -1;