diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ad2021ccc55a..9d5078bd23a3 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -248,6 +248,7 @@ struct tcp_sock { /* inet_connection_sock has to be the first member of tcp_sock */ struct inet_connection_sock inet_conn; u16 tcp_header_len; /* Bytes of tcp header to send */ + u16 xmit_size_goal_segs; /* Goal for segmenting output packets */ /* * Header prediction flags diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 886596ff0aae..0db9f3b984f7 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -665,7 +665,7 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); - u32 xmit_size_goal; + u32 xmit_size_goal, old_size_goal; xmit_size_goal = mss_now; @@ -676,7 +676,17 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, tp->tcp_header_len); xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); - xmit_size_goal -= (xmit_size_goal % mss_now); + + /* We try hard to avoid divides here */ + old_size_goal = tp->xmit_size_goal_segs * mss_now; + + if (likely(old_size_goal <= xmit_size_goal && + old_size_goal + mss_now > xmit_size_goal)) { + xmit_size_goal = old_size_goal; + } else { + tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + xmit_size_goal = tp->xmit_size_goal_segs * mss_now; + } } return xmit_size_goal;