mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 08:04:22 +08:00
tcp: remove SOCK_QUEUE_SHRUNK
SOCK_QUEUE_SHRUNK is currently used by TCP as a temporary state
that remembers if some room has been made in the rtx queue
by an incoming ACK packet.
This is later used from tcp_check_space() before
considering to send EPOLLOUT.
Problem is: If we receive SACK packets, and no packet
is removed from RTX queue, we can send fresh packets, thus
moving them from write queue to rtx queue and eventually
empty the write queue.
This stall can happen if TCP_NOTSENT_LOWAT is used.
With this fix, we no longer risk stalling sends while holes
are repaired, and we can fully use socket sndbuf.
This also removes a cache line dirtying for typical RPC
workloads.
Fixes: c9bee3b7fd
("tcp: TCP_NOTSENT_LOWAT socket option")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Soheil Hassas Yeganeh <soheil@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Acked-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b4c5881446
commit
0cbe6a8f08
@ -845,7 +845,6 @@ enum sock_flags {
|
||||
SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
|
||||
SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
|
||||
SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
|
||||
SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
|
||||
SOCK_MEMALLOC, /* VM depends on this socket for swapping */
|
||||
SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
|
||||
SOCK_FASYNC, /* fasync() active */
|
||||
@ -1526,7 +1525,6 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
|
||||
DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
|
||||
static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
sk_wmem_queued_add(sk, -skb->truesize);
|
||||
sk_mem_uncharge(sk, skb->truesize);
|
||||
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
|
||||
|
@ -5332,12 +5332,6 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
|
||||
return true;
|
||||
}
|
||||
|
||||
/* When incoming ACK allowed to free some skb from write_queue,
|
||||
* we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
|
||||
* on the exit from tcp input handler.
|
||||
*
|
||||
* PROBLEM: sndbuf expansion does not work well with largesend.
|
||||
*/
|
||||
static void tcp_new_space(struct sock *sk)
|
||||
{
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
@ -5352,16 +5346,13 @@ static void tcp_new_space(struct sock *sk)
|
||||
|
||||
static void tcp_check_space(struct sock *sk)
|
||||
{
|
||||
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
|
||||
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
/* pairs with tcp_poll() */
|
||||
smp_mb();
|
||||
if (sk->sk_socket &&
|
||||
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
tcp_new_space(sk);
|
||||
if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
|
||||
tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
|
||||
}
|
||||
/* pairs with tcp_poll() */
|
||||
smp_mb();
|
||||
if (sk->sk_socket &&
|
||||
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
|
||||
tcp_new_space(sk);
|
||||
if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
|
||||
tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1682,7 +1682,6 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
|
||||
skb->truesize -= delta_truesize;
|
||||
sk_wmem_queued_add(sk, -delta_truesize);
|
||||
sk_mem_uncharge(sk, delta_truesize);
|
||||
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
|
||||
}
|
||||
|
||||
/* Any change of skb->len requires recalculation of tso factor. */
|
||||
|
Loading…
Reference in New Issue
Block a user