mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 00:34:20 +08:00
tcp: move stats merge to the end of tcp_try_coalesce
This change cleans up the last bits of tcp_try_coalesce so that we only need one goto which jumps to the end of the function. The idea is to make the code more readable by putting things in a linear order so that we start execution at the top of the function, and end it at the bottom. I also made a slight tweak to the code for handling frags when we are a clone. Instead of making it an if (clone) loop else nr_frags = 0 I changed the logic so that if (!clone) we just set the number of frags to 0 which disables the for loop anyway. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
57b55a7ec6
commit
34a802a5b9
@ -4548,15 +4548,13 @@ static bool tcp_try_coalesce(struct sock *sk,
|
||||
int i, delta, len = from->len;
|
||||
|
||||
*fragstolen = false;
|
||||
|
||||
if (tcp_hdr(from)->fin || skb_cloned(to))
|
||||
return false;
|
||||
|
||||
if (len <= skb_tailroom(to)) {
|
||||
BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
|
||||
merge:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
|
||||
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
|
||||
return true;
|
||||
goto merge;
|
||||
}
|
||||
|
||||
if (skb_has_frag_list(to) || skb_has_frag_list(from))
|
||||
@ -4581,7 +4579,6 @@ merge:
|
||||
skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
|
||||
page, offset, skb_headlen(from));
|
||||
*fragstolen = true;
|
||||
goto copyfrags;
|
||||
} else {
|
||||
if (skb_shinfo(to)->nr_frags +
|
||||
skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
|
||||
@ -4589,27 +4586,33 @@ merge:
|
||||
|
||||
delta = from->truesize -
|
||||
SKB_TRUESIZE(skb_end_pointer(from) - from->head);
|
||||
copyfrags:
|
||||
WARN_ON_ONCE(delta < len);
|
||||
memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
|
||||
skb_shinfo(from)->frags,
|
||||
skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
|
||||
skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
|
||||
|
||||
if (skb_cloned(from))
|
||||
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
|
||||
skb_frag_ref(from, i);
|
||||
else
|
||||
skb_shinfo(from)->nr_frags = 0;
|
||||
|
||||
to->truesize += delta;
|
||||
atomic_add(delta, &sk->sk_rmem_alloc);
|
||||
sk_mem_charge(sk, delta);
|
||||
to->len += len;
|
||||
to->data_len += len;
|
||||
goto merge;
|
||||
}
|
||||
return false;
|
||||
|
||||
WARN_ON_ONCE(delta < len);
|
||||
|
||||
memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
|
||||
skb_shinfo(from)->frags,
|
||||
skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
|
||||
skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
|
||||
|
||||
if (!skb_cloned(from))
|
||||
skb_shinfo(from)->nr_frags = 0;
|
||||
|
||||
/* if the skb is cloned this does nothing since we set nr_frags to 0 */
|
||||
for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
|
||||
skb_frag_ref(from, i);
|
||||
|
||||
to->truesize += delta;
|
||||
atomic_add(delta, &sk->sk_rmem_alloc);
|
||||
sk_mem_charge(sk, delta);
|
||||
to->len += len;
|
||||
to->data_len += len;
|
||||
|
||||
merge:
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
|
||||
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
|
||||
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
|
||||
|
Loading…
Reference in New Issue
Block a user