mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 06:55:13 +08:00
net: gro: change GRO overflow strategy
GRO layer has a limit of 8 flows being held in GRO list, for performance reason. When a packet comes for a flow not yet in the list, and list is full, we immediately give it to upper stacks, lowering aggregation performance. With TSO auto sizing and FQ packet scheduler, this situation happens more often. This patch changes strategy to simply evict the oldest flow of the list. This works better because of the nature of packet trains for which GRO is efficient. This also has the effect of lowering the GRO latency if many flows are competing. Tested : Used a 40Gbps NIC, with 4 RX queues, and 200 concurrent TCP_STREAM netperf. Before patch, aggregate rate is 11Gbps (while a single flow can reach 30Gbps) After patch, line rate is reached. Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Jerry Chu <hkchu@google.com> Cc: Neal Cardwell <ncardwell@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e6a7675829
commit
600adc18eb
@ -3882,10 +3882,23 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
|
||||
if (same_flow)
|
||||
goto ok;
|
||||
|
||||
if (NAPI_GRO_CB(skb)->flush || napi->gro_count >= MAX_GRO_SKBS)
|
||||
if (NAPI_GRO_CB(skb)->flush)
|
||||
goto normal;
|
||||
|
||||
napi->gro_count++;
|
||||
if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
|
||||
struct sk_buff *nskb = napi->gro_list;
|
||||
|
||||
/* locate the end of the list to select the 'oldest' flow */
|
||||
while (nskb->next) {
|
||||
pp = &nskb->next;
|
||||
nskb = *pp;
|
||||
}
|
||||
*pp = NULL;
|
||||
nskb->next = NULL;
|
||||
napi_gro_complete(nskb);
|
||||
} else {
|
||||
napi->gro_count++;
|
||||
}
|
||||
NAPI_GRO_CB(skb)->count = 1;
|
||||
NAPI_GRO_CB(skb)->age = jiffies;
|
||||
skb_shinfo(skb)->gso_size = skb_gro_len(skb);
|
||||
|
Loading…
Reference in New Issue
Block a user