mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
gro: remove rcu_read_lock/rcu_read_unlock from gro_receive handlers
All gro_receive() handlers are called from dev_gro_receive() while rcu_read_lock() has been called. There is no point stacking more rcu_read_lock() Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
1aad9634b9
commit
fc1ca3348a
@ -517,18 +517,15 @@ static struct sk_buff *geneve_gro_receive(struct sock *sk,
|
||||
|
||||
type = gh->proto_type;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
skb_gro_pull(skb, gh_len);
|
||||
skb_gro_postpull_rcsum(skb, gh, gh_len);
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -476,10 +476,9 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
|
||||
|
||||
type = vhdr->h_vlan_encapsulated_proto;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
flush = 0;
|
||||
|
||||
@ -501,8 +500,6 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
|
||||
ipv6_gro_receive, inet_gro_receive,
|
||||
head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -436,11 +436,10 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
type = eh->h_proto;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (ptype == NULL) {
|
||||
flush = 1;
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_gro_pull(skb, sizeof(*eh));
|
||||
@ -450,8 +449,6 @@ struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
ipv6_gro_receive, inet_gro_receive,
|
||||
head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -1452,19 +1452,18 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
proto = iph->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(inet_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (*(u8 *)iph != 0x45)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (ip_is_fragment(iph))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
if (unlikely(ip_fast_csum((u8 *)iph, 5)))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
id = ntohl(*(__be32 *)&iph->id);
|
||||
flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id & ~IP_DF));
|
||||
@ -1541,9 +1540,6 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive,
|
||||
ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -247,17 +247,14 @@ static struct sk_buff *fou_gro_receive(struct sock *sk,
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
return pp;
|
||||
}
|
||||
|
||||
@ -439,17 +436,14 @@ next_proto:
|
||||
/* Flag this frame as already having an outer encap header */
|
||||
NAPI_GRO_CB(skb)->is_fou = 1;
|
||||
|
||||
rcu_read_lock();
|
||||
offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
|
||||
ops = rcu_dereference(offloads[proto]);
|
||||
if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
|
||||
|
||||
|
@ -163,10 +163,9 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
|
||||
type = greh->protocol;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype = gro_find_receive_by_type(type);
|
||||
if (!ptype)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
grehlen = GRE_HEADER_SECTION;
|
||||
|
||||
@ -180,13 +179,13 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
if (skb_gro_header_hard(skb, hlen)) {
|
||||
greh = skb_gro_header_slow(skb, hlen, off);
|
||||
if (unlikely(!greh))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Don't bother verifying checksum if we're going to flush anyway. */
|
||||
if ((greh->flags & GRE_CSUM) && !NAPI_GRO_CB(skb)->flush) {
|
||||
if (skb_gro_checksum_simple_validate(skb))
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
skb_gro_checksum_try_convert(skb, IPPROTO_GRE,
|
||||
null_compute_pseudo);
|
||||
@ -230,8 +229,6 @@ static struct sk_buff *gre_gro_receive(struct list_head *head,
|
||||
pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
|
||||
flush = 0;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -628,13 +628,11 @@ struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
inet_gro_compute_pseudo);
|
||||
skip:
|
||||
NAPI_GRO_CB(skb)->is_ipv6 = 0;
|
||||
rcu_read_lock();
|
||||
|
||||
if (static_branch_unlikely(&udp_encap_needed_key))
|
||||
sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
|
||||
|
||||
pp = udp_gro_receive(head, skb, uh, sk);
|
||||
rcu_read_unlock();
|
||||
return pp;
|
||||
|
||||
flush:
|
||||
|
@ -208,7 +208,6 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
|
||||
|
||||
flush += ntohs(iph->payload_len) != skb_gro_len(skb);
|
||||
|
||||
rcu_read_lock();
|
||||
proto = iph->nexthdr;
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive) {
|
||||
@ -221,7 +220,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
|
||||
|
||||
ops = rcu_dereference(inet6_offloads[proto]);
|
||||
if (!ops || !ops->callbacks.gro_receive)
|
||||
goto out_unlock;
|
||||
goto out;
|
||||
|
||||
iph = ipv6_hdr(skb);
|
||||
}
|
||||
@ -279,9 +278,6 @@ not_same_flow:
|
||||
pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive,
|
||||
ops->callbacks.gro_receive, head, skb);
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
||||
out:
|
||||
skb_gro_flush_final(skb, pp, flush);
|
||||
|
||||
|
@ -145,13 +145,11 @@ struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb)
|
||||
|
||||
skip:
|
||||
NAPI_GRO_CB(skb)->is_ipv6 = 1;
|
||||
rcu_read_lock();
|
||||
|
||||
if (static_branch_unlikely(&udpv6_encap_needed_key))
|
||||
sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest);
|
||||
|
||||
pp = udp_gro_receive(head, skb, uh, sk);
|
||||
rcu_read_unlock();
|
||||
return pp;
|
||||
|
||||
flush:
|
||||
|
Loading…
Reference in New Issue
Block a user