mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 13:14:07 +08:00
gro: Defer clearing of flush bit in tunnel paths
This patch updates the GRO handlers for GRE, VXLAN, GENEVE, and FOU so that we do not clear the flush bit until after we have called the next level GRO handler. Previously this was being cleared before parsing through the list of frames, however this resulted in several paths where either the bit needed to be reset but wasn't as in the case of FOU, or cases where it was being set as in GENEVE. By just deferring the clearing of the bit until after the next level protocol has been parsed we can avoid any unnecessary bit twiddling and avoid bugs. Signed-off-by: Alexander Duyck <aduyck@mirantis.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
3a8befcd78
commit
c194cf93c1
@ -463,8 +463,6 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
|
||||
goto out;
|
||||
}
|
||||
|
||||
flush = 0;
|
||||
|
||||
for (p = *head; p; p = p->next) {
|
||||
if (!NAPI_GRO_CB(p)->same_flow)
|
||||
continue;
|
||||
@ -481,14 +479,13 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype) {
|
||||
flush = 1;
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
skb_gro_pull(skb, gh_len);
|
||||
skb_gro_postpull_rcsum(skb, gh, gh_len);
|
||||
pp = ptype->callbacks.gro_receive(head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@ -591,8 +591,6 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
|
||||
|
||||
skb_gro_pull(skb, sizeof(struct vxlanhdr)); /* pull vxlan header */
|
||||
|
||||
flush = 0;
|
||||
|
||||
for (p = *head; p; p = p->next) {
|
||||
if (!NAPI_GRO_CB(p)->same_flow)
|
||||
continue;
|
||||
@ -606,6 +604,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
|
||||
}
|
||||
|
||||
pp = eth_gro_receive(head, skb);
|
||||
flush = 0;
|
||||
|
||||
out:
|
||||
skb_gro_remcsum_cleanup(skb, &grc);
|
||||
|
@ -319,8 +319,6 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
|
||||
|
||||
skb_gro_pull(skb, hdrlen);
|
||||
|
||||
flush = 0;
|
||||
|
||||
for (p = *head; p; p = p->next) {
|
||||
const struct guehdr *guehdr2;
|
||||
|
||||
@ -352,6 +350,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
|
||||
goto out_unlock;
|
||||
|
||||
pp = ops->callbacks.gro_receive(head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@ -175,8 +175,6 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
null_compute_pseudo);
|
||||
}
|
||||
|
||||
flush = 0;
|
||||
|
||||
for (p = *head; p; p = p->next) {
|
||||
const struct gre_base_hdr *greh2;
|
||||
|
||||
@ -213,6 +211,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
|
||||
skb_gro_postpull_rcsum(skb, greh, grehlen);
|
||||
|
||||
pp = ptype->callbacks.gro_receive(head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
Loading…
Reference in New Issue
Block a user