mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-15 08:44:14 +08:00
tcp/dccp: drop SYN packets if accept queue is full
Per listen(fd, backlog) rules, there is really no point accepting a SYN, sending a SYNACK, and dropping the following ACK packet if accept queue is full, because application is not draining accept queue fast enough. This behavior is fooling TCP clients that believe they established a flow, while there is nothing at server side. They might then send about 10 MSS (if using IW10) that will be dropped anyway while server is under stress. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
58effd7168
commit
5ea8ea2cb7
@ -289,11 +289,6 @@ static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
|
||||
return reqsk_queue_len(&inet_csk(sk)->icsk_accept_queue);
|
||||
}
|
||||
|
||||
static inline int inet_csk_reqsk_queue_young(const struct sock *sk)
|
||||
{
|
||||
return reqsk_queue_len_young(&inet_csk(sk)->icsk_accept_queue);
|
||||
}
|
||||
|
||||
static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
|
||||
{
|
||||
return inet_csk_reqsk_queue_len(sk) >= sk->sk_max_ack_backlog;
|
||||
|
@ -588,13 +588,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (inet_csk_reqsk_queue_is_full(sk))
|
||||
goto drop;
|
||||
|
||||
/*
|
||||
* Accept backlog is full. If we have already queued enough
|
||||
* of warm entries in syn queue, drop request. It is better than
|
||||
* clogging syn queue with openreqs with exponentially increasing
|
||||
* timeout.
|
||||
*/
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
if (sk_acceptq_is_full(sk))
|
||||
goto drop;
|
||||
|
||||
req = inet_reqsk_alloc(&dccp_request_sock_ops, sk, true);
|
||||
|
@ -325,7 +325,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
||||
if (inet_csk_reqsk_queue_is_full(sk))
|
||||
goto drop;
|
||||
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
|
||||
if (sk_acceptq_is_full(sk))
|
||||
goto drop;
|
||||
|
||||
req = inet_reqsk_alloc(&dccp6_request_sock_ops, sk, true);
|
||||
|
@ -6298,13 +6298,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
|
||||
/* Accept backlog is full. If we have already queued enough
|
||||
* of warm entries in syn queue, drop request. It is better than
|
||||
* clogging syn queue with openreqs with exponentially increasing
|
||||
* timeout.
|
||||
*/
|
||||
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
|
||||
if (sk_acceptq_is_full(sk)) {
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
|
||||
goto drop;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user