2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-08-10 11:11:08 +08:00
|
|
|
/*
|
|
|
|
* INET An implementation of the TCP/IP protocol suite for the LINUX
|
|
|
|
* operating system. INET is implemented using the BSD Socket
|
|
|
|
* interface as the means of communication with the user level.
|
|
|
|
*
|
|
|
|
* Support for INET connection oriented protocols.
|
|
|
|
*
|
|
|
|
* Authors: See the TCP sources
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/jhash.h>
|
|
|
|
|
|
|
|
#include <net/inet_connection_sock.h>
|
|
|
|
#include <net/inet_hashtables.h>
|
|
|
|
#include <net/inet_timewait_sock.h>
|
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/route.h>
|
|
|
|
#include <net/tcp_states.h>
|
2005-08-10 11:15:09 +08:00
|
|
|
#include <net/xfrm.h>
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
#include <net/tcp.h>
|
2016-02-11 00:50:40 +08:00
|
|
|
#include <net/sock_reuseport.h>
|
2017-05-20 00:55:51 +08:00
|
|
|
#include <net/addrconf.h>
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2017-01-17 23:51:01 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
2020-05-19 08:13:34 +08:00
|
|
|
/* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses
|
|
|
|
* if IPv6 only, and any IPv4 addresses
|
|
|
|
* if not IPv6 only
|
|
|
|
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
|
|
|
|
* IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
|
|
|
|
* and 0.0.0.0 equals to 0.0.0.0 only
|
2017-01-17 23:51:01 +08:00
|
|
|
*/
|
2017-09-14 04:58:15 +08:00
|
|
|
static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
|
|
|
|
const struct in6_addr *sk2_rcv_saddr6,
|
|
|
|
__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
|
|
|
bool sk1_ipv6only, bool sk2_ipv6only,
|
2020-05-19 08:13:34 +08:00
|
|
|
bool match_sk1_wildcard,
|
|
|
|
bool match_sk2_wildcard)
|
2017-01-17 23:51:01 +08:00
|
|
|
{
|
2017-01-17 23:51:06 +08:00
|
|
|
int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
|
2017-01-17 23:51:01 +08:00
|
|
|
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
|
|
|
|
|
|
|
|
/* if both are mapped, treat as IPv4 */
|
|
|
|
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
|
|
|
|
if (!sk2_ipv6only) {
|
2017-01-17 23:51:06 +08:00
|
|
|
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2020-05-19 08:13:34 +08:00
|
|
|
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
|
|
|
|
(match_sk2_wildcard && !sk2_rcv_saddr);
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
2017-09-14 04:58:15 +08:00
|
|
|
return false;
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2017-01-17 23:51:01 +08:00
|
|
|
|
2020-05-19 08:13:34 +08:00
|
|
|
if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard &&
|
2017-01-17 23:51:01 +08:00
|
|
|
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2017-01-17 23:51:01 +08:00
|
|
|
|
2020-05-19 08:13:34 +08:00
|
|
|
if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard &&
|
2017-01-17 23:51:06 +08:00
|
|
|
!(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2017-01-17 23:51:01 +08:00
|
|
|
|
|
|
|
if (sk2_rcv_saddr6 &&
|
2017-01-17 23:51:06 +08:00
|
|
|
ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2017-01-17 23:51:01 +08:00
|
|
|
|
2017-09-14 04:58:15 +08:00
|
|
|
return false;
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2020-05-19 08:13:34 +08:00
|
|
|
/* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
|
|
|
|
* match_sk*_wildcard == false: addresses must be exactly the same, i.e.
|
|
|
|
* 0.0.0.0 only equals to 0.0.0.0
|
2017-01-17 23:51:01 +08:00
|
|
|
*/
|
2017-09-14 04:58:15 +08:00
|
|
|
static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
|
2020-05-19 08:13:34 +08:00
|
|
|
bool sk2_ipv6only, bool match_sk1_wildcard,
|
|
|
|
bool match_sk2_wildcard)
|
2017-01-17 23:51:01 +08:00
|
|
|
{
|
2017-01-17 23:51:06 +08:00
|
|
|
if (!sk2_ipv6only) {
|
|
|
|
if (sk1_rcv_saddr == sk2_rcv_saddr)
|
2017-09-14 04:58:15 +08:00
|
|
|
return true;
|
2020-05-19 08:13:34 +08:00
|
|
|
return (match_sk1_wildcard && !sk1_rcv_saddr) ||
|
|
|
|
(match_sk2_wildcard && !sk2_rcv_saddr);
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
2017-09-14 04:58:15 +08:00
|
|
|
return false;
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
|
|
|
|
2017-09-14 04:58:15 +08:00
|
|
|
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
|
|
|
|
bool match_wildcard)
|
2017-01-17 23:51:01 +08:00
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (sk->sk_family == AF_INET6)
|
2017-01-17 23:51:06 +08:00
|
|
|
return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
|
2017-01-20 06:47:46 +08:00
|
|
|
inet6_rcv_saddr(sk2),
|
2017-01-17 23:51:06 +08:00
|
|
|
sk->sk_rcv_saddr,
|
|
|
|
sk2->sk_rcv_saddr,
|
|
|
|
ipv6_only_sock(sk),
|
|
|
|
ipv6_only_sock(sk2),
|
2020-05-19 08:13:34 +08:00
|
|
|
match_wildcard,
|
2017-01-17 23:51:06 +08:00
|
|
|
match_wildcard);
|
2017-01-17 23:51:01 +08:00
|
|
|
#endif
|
2017-01-17 23:51:06 +08:00
|
|
|
return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
|
2020-05-19 08:13:34 +08:00
|
|
|
ipv6_only_sock(sk2), match_wildcard,
|
|
|
|
match_wildcard);
|
2017-01-17 23:51:01 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_rcv_saddr_equal);
|
|
|
|
|
bpf: Introduce BPF_PROG_TYPE_SK_REUSEPORT
This patch adds a BPF_PROG_TYPE_SK_REUSEPORT which can select
a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY. Like other
non SK_FILTER/CGROUP_SKB program, it requires CAP_SYS_ADMIN.
BPF_PROG_TYPE_SK_REUSEPORT introduces "struct sk_reuseport_kern"
to store the bpf context instead of using the skb->cb[48].
At the SO_REUSEPORT sk lookup time, it is in the middle of transiting
from a lower layer (ipv4/ipv6) to a upper layer (udp/tcp). At this
point, it is not always clear where the bpf context can be appended
in the skb->cb[48] to avoid saving-and-restoring cb[]. Even putting
aside the difference between ipv4-vs-ipv6 and udp-vs-tcp. It is not
clear if the lower layer is only ipv4 and ipv6 in the future and
will it not touch the cb[] again before transiting to the upper
layer.
For example, in udp_gro_receive(), it uses the 48 byte NAPI_GRO_CB
instead of IP[6]CB and it may still modify the cb[] after calling
the udp[46]_lib_lookup_skb(). Because of the above reason, if
sk->cb is used for the bpf ctx, saving-and-restoring is needed
and likely the whole 48 bytes cb[] has to be saved and restored.
Instead of saving, setting and restoring the cb[], this patch opts
to create a new "struct sk_reuseport_kern" and setting the needed
values in there.
The new BPF_PROG_TYPE_SK_REUSEPORT and "struct sk_reuseport_(kern|md)"
will serve all ipv4/ipv6 + udp/tcp combinations. There is no protocol
specific usage at this point and it is also inline with the current
sock_reuseport.c implementation (i.e. no protocol specific requirement).
In "struct sk_reuseport_md", this patch exposes data/data_end/len
with semantic similar to other existing usages. Together
with "bpf_skb_load_bytes()" and "bpf_skb_load_bytes_relative()",
the bpf prog can peek anywhere in the skb. The "bind_inany" tells
the bpf prog that the reuseport group is bind-ed to a local
INANY address which cannot be learned from skb.
The new "bind_inany" is added to "struct sock_reuseport" which will be
used when running the new "BPF_PROG_TYPE_SK_REUSEPORT" bpf prog in order
to avoid repeating the "bind INANY" test on
"sk_v6_rcv_saddr/sk->sk_rcv_saddr" every time a bpf prog is run. It can
only be properly initialized when a "sk->sk_reuseport" enabled sk is
adding to a hashtable (i.e. during "reuseport_alloc()" and
"reuseport_add_sock()").
The new "sk_select_reuseport()" is the main helper that the
bpf prog will use to select a SO_REUSEPORT sk. It is the only function
that can use the new BPF_MAP_TYPE_REUSEPORT_ARRAY. As mentioned in
the earlier patch, the validity of a selected sk is checked in
run time in "sk_select_reuseport()". Doing the check in
verification time is difficult and inflexible (consider the map-in-map
use case). The runtime check is to compare the selected sk's reuseport_id
with the reuseport_id that we want. This helper will return -EXXX if the
selected sk cannot serve the incoming request (e.g. reuseport_id
not match). The bpf prog can decide if it wants to do SK_DROP as its
discretion.
When the bpf prog returns SK_PASS, the kernel will check if a
valid sk has been selected (i.e. "reuse_kern->selected_sk != NULL").
If it does , it will use the selected sk. If not, the kernel
will select one from "reuse->socks[]" (as before this patch).
The SK_DROP and SK_PASS handling logic will be in the next patch.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-08-08 16:01:25 +08:00
|
|
|
bool inet_rcv_saddr_any(const struct sock *sk)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (sk->sk_family == AF_INET6)
|
|
|
|
return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
|
|
|
|
#endif
|
|
|
|
return !sk->sk_rcv_saddr;
|
|
|
|
}
|
|
|
|
|
inet: Add IP_LOCAL_PORT_RANGE socket option
Users who want to share a single public IP address for outgoing connections
between several hosts traditionally reach for SNAT. However, SNAT requires
state keeping on the node(s) performing the NAT.
A stateless alternative exists, where a single IP address used for egress
can be shared between several hosts by partitioning the available ephemeral
port range. In such a setup:
1. Each host gets assigned a disjoint range of ephemeral ports.
2. Applications open connections from the host-assigned port range.
3. Return traffic gets routed to the host based on both, the destination IP
and the destination port.
An application which wants to open an outgoing connection (connect) from a
given port range today can choose between two solutions:
1. Manually pick the source port by bind()'ing to it before connect()'ing
the socket.
This approach has a couple of downsides:
a) Search for a free port has to be implemented in the user-space. If
the chosen 4-tuple happens to be busy, the application needs to retry
from a different local port number.
Detecting if 4-tuple is busy can be either easy (TCP) or hard
(UDP). In TCP case, the application simply has to check if connect()
returned an error (EADDRNOTAVAIL). That is assuming that the local
port sharing was enabled (REUSEADDR) by all the sockets.
# Assume desired local port range is 60_000-60_511
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(("192.0.2.1", 60_000))
s.connect(("1.1.1.1", 53))
# Fails only if 192.0.2.1:60000 -> 1.1.1.1:53 is busy
# Application must retry with another local port
In case of UDP, the network stack allows binding more than one socket
to the same 4-tuple, when local port sharing is enabled
(REUSEADDR). Hence detecting the conflict is much harder and involves
querying sock_diag and toggling the REUSEADDR flag [1].
b) For TCP, bind()-ing to a port within the ephemeral port range means
that no connecting sockets, that is those which leave it to the
network stack to find a free local port at connect() time, can use
the this port.
IOW, the bind hash bucket tb->fastreuse will be 0 or 1, and the port
will be skipped during the free port search at connect() time.
2. Isolate the app in a dedicated netns and use the use the per-netns
ip_local_port_range sysctl to adjust the ephemeral port range bounds.
The per-netns setting affects all sockets, so this approach can be used
only if:
- there is just one egress IP address, or
- the desired egress port range is the same for all egress IP addresses
used by the application.
For TCP, this approach avoids the downsides of (1). Free port search and
4-tuple conflict detection is done by the network stack:
system("sysctl -w net.ipv4.ip_local_port_range='60000 60511'")
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_IP, IP_BIND_ADDRESS_NO_PORT, 1)
s.bind(("192.0.2.1", 0))
s.connect(("1.1.1.1", 53))
# Fails if all 4-tuples 192.0.2.1:60000-60511 -> 1.1.1.1:53 are busy
For UDP this approach has limited applicability. Setting the
IP_BIND_ADDRESS_NO_PORT socket option does not result in local source
port being shared with other connected UDP sockets.
Hence relying on the network stack to find a free source port, limits the
number of outgoing UDP flows from a single IP address down to the number
of available ephemeral ports.
To put it another way, partitioning the ephemeral port range between hosts
using the existing Linux networking API is cumbersome.
To address this use case, add a new socket option at the SOL_IP level,
named IP_LOCAL_PORT_RANGE. The new option can be used to clamp down the
ephemeral port range for each socket individually.
The option can be used only to narrow down the per-netns local port
range. If the per-socket range lies outside of the per-netns range, the
latter takes precedence.
UAPI-wise, the low and high range bounds are passed to the kernel as a pair
of u16 values in host byte order packed into a u32. This avoids pointer
passing.
PORT_LO = 40_000
PORT_HI = 40_511
s = socket(AF_INET, SOCK_STREAM)
v = struct.pack("I", PORT_HI << 16 | PORT_LO)
s.setsockopt(SOL_IP, IP_LOCAL_PORT_RANGE, v)
s.bind(("127.0.0.1", 0))
s.getsockname()
# Local address between ("127.0.0.1", 40_000) and ("127.0.0.1", 40_511),
# if there is a free port. EADDRINUSE otherwise.
[1] https://github.com/cloudflare/cloudflare-blog/blob/232b432c1d57/2022-02-connectx/connectx.py#L116
Reviewed-by: Marek Majkowski <marek@cloudflare.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-01-24 21:36:43 +08:00
|
|
|
void inet_get_local_port_range(const struct net *net, int *low, int *high)
|
2007-10-11 08:30:46 +08:00
|
|
|
{
|
2012-04-15 13:58:06 +08:00
|
|
|
unsigned int seq;
|
|
|
|
|
2007-10-11 08:30:46 +08:00
|
|
|
do {
|
2014-05-07 02:02:49 +08:00
|
|
|
seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
|
2007-10-11 08:30:46 +08:00
|
|
|
|
2014-05-07 02:02:49 +08:00
|
|
|
*low = net->ipv4.ip_local_ports.range[0];
|
|
|
|
*high = net->ipv4.ip_local_ports.range[1];
|
|
|
|
} while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
|
2007-10-11 08:30:46 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_get_local_port_range);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
inet: Add IP_LOCAL_PORT_RANGE socket option
Users who want to share a single public IP address for outgoing connections
between several hosts traditionally reach for SNAT. However, SNAT requires
state keeping on the node(s) performing the NAT.
A stateless alternative exists, where a single IP address used for egress
can be shared between several hosts by partitioning the available ephemeral
port range. In such a setup:
1. Each host gets assigned a disjoint range of ephemeral ports.
2. Applications open connections from the host-assigned port range.
3. Return traffic gets routed to the host based on both, the destination IP
and the destination port.
An application which wants to open an outgoing connection (connect) from a
given port range today can choose between two solutions:
1. Manually pick the source port by bind()'ing to it before connect()'ing
the socket.
This approach has a couple of downsides:
a) Search for a free port has to be implemented in the user-space. If
the chosen 4-tuple happens to be busy, the application needs to retry
from a different local port number.
Detecting if 4-tuple is busy can be either easy (TCP) or hard
(UDP). In TCP case, the application simply has to check if connect()
returned an error (EADDRNOTAVAIL). That is assuming that the local
port sharing was enabled (REUSEADDR) by all the sockets.
# Assume desired local port range is 60_000-60_511
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(("192.0.2.1", 60_000))
s.connect(("1.1.1.1", 53))
# Fails only if 192.0.2.1:60000 -> 1.1.1.1:53 is busy
# Application must retry with another local port
In case of UDP, the network stack allows binding more than one socket
to the same 4-tuple, when local port sharing is enabled
(REUSEADDR). Hence detecting the conflict is much harder and involves
querying sock_diag and toggling the REUSEADDR flag [1].
b) For TCP, bind()-ing to a port within the ephemeral port range means
that no connecting sockets, that is those which leave it to the
network stack to find a free local port at connect() time, can use
the this port.
IOW, the bind hash bucket tb->fastreuse will be 0 or 1, and the port
will be skipped during the free port search at connect() time.
2. Isolate the app in a dedicated netns and use the use the per-netns
ip_local_port_range sysctl to adjust the ephemeral port range bounds.
The per-netns setting affects all sockets, so this approach can be used
only if:
- there is just one egress IP address, or
- the desired egress port range is the same for all egress IP addresses
used by the application.
For TCP, this approach avoids the downsides of (1). Free port search and
4-tuple conflict detection is done by the network stack:
system("sysctl -w net.ipv4.ip_local_port_range='60000 60511'")
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_IP, IP_BIND_ADDRESS_NO_PORT, 1)
s.bind(("192.0.2.1", 0))
s.connect(("1.1.1.1", 53))
# Fails if all 4-tuples 192.0.2.1:60000-60511 -> 1.1.1.1:53 are busy
For UDP this approach has limited applicability. Setting the
IP_BIND_ADDRESS_NO_PORT socket option does not result in local source
port being shared with other connected UDP sockets.
Hence relying on the network stack to find a free source port, limits the
number of outgoing UDP flows from a single IP address down to the number
of available ephemeral ports.
To put it another way, partitioning the ephemeral port range between hosts
using the existing Linux networking API is cumbersome.
To address this use case, add a new socket option at the SOL_IP level,
named IP_LOCAL_PORT_RANGE. The new option can be used to clamp down the
ephemeral port range for each socket individually.
The option can be used only to narrow down the per-netns local port
range. If the per-socket range lies outside of the per-netns range, the
latter takes precedence.
UAPI-wise, the low and high range bounds are passed to the kernel as a pair
of u16 values in host byte order packed into a u32. This avoids pointer
passing.
PORT_LO = 40_000
PORT_HI = 40_511
s = socket(AF_INET, SOCK_STREAM)
v = struct.pack("I", PORT_HI << 16 | PORT_LO)
s.setsockopt(SOL_IP, IP_LOCAL_PORT_RANGE, v)
s.bind(("127.0.0.1", 0))
s.getsockname()
# Local address between ("127.0.0.1", 40_000) and ("127.0.0.1", 40_511),
# if there is a free port. EADDRINUSE otherwise.
[1] https://github.com/cloudflare/cloudflare-blog/blob/232b432c1d57/2022-02-connectx/connectx.py#L116
Reviewed-by: Marek Majkowski <marek@cloudflare.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-01-24 21:36:43 +08:00
|
|
|
void inet_sk_get_local_port_range(const struct sock *sk, int *low, int *high)
|
|
|
|
{
|
|
|
|
const struct inet_sock *inet = inet_sk(sk);
|
|
|
|
const struct net *net = sock_net(sk);
|
|
|
|
int lo, hi, sk_lo, sk_hi;
|
|
|
|
|
|
|
|
inet_get_local_port_range(net, &lo, &hi);
|
|
|
|
|
|
|
|
sk_lo = inet->local_port_range.lo;
|
|
|
|
sk_hi = inet->local_port_range.hi;
|
|
|
|
|
|
|
|
if (unlikely(lo <= sk_lo && sk_lo <= hi))
|
|
|
|
lo = sk_lo;
|
|
|
|
if (unlikely(lo <= sk_hi && sk_hi <= hi))
|
|
|
|
hi = sk_hi;
|
|
|
|
|
|
|
|
*low = lo;
|
|
|
|
*high = hi;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_sk_get_local_port_range);
|
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
static bool inet_use_bhash2_on_bind(const struct sock *sk)
|
|
|
|
{
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (sk->sk_family == AF_INET6) {
|
|
|
|
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
|
|
|
|
|
|
|
|
return addr_type != IPV6_ADDR_ANY &&
|
|
|
|
addr_type != IPV6_ADDR_MAPPED;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return sk->sk_rcv_saddr != htonl(INADDR_ANY);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
|
|
|
|
kuid_t sk_uid, bool relax,
|
|
|
|
bool reuseport_cb_ok, bool reuseport_ok)
|
|
|
|
{
|
|
|
|
int bound_dev_if2;
|
|
|
|
|
|
|
|
if (sk == sk2)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
bound_dev_if2 = READ_ONCE(sk2->sk_bound_dev_if);
|
|
|
|
|
|
|
|
if (!sk->sk_bound_dev_if || !bound_dev_if2 ||
|
|
|
|
sk->sk_bound_dev_if == bound_dev_if2) {
|
|
|
|
if (sk->sk_reuse && sk2->sk_reuse &&
|
|
|
|
sk2->sk_state != TCP_LISTEN) {
|
|
|
|
if (!relax || (!reuseport_ok && sk->sk_reuseport &&
|
|
|
|
sk2->sk_reuseport && reuseport_cb_ok &&
|
|
|
|
(sk2->sk_state == TCP_TIME_WAIT ||
|
|
|
|
uid_eq(sk_uid, sock_i_uid(sk2)))))
|
|
|
|
return true;
|
|
|
|
} else if (!reuseport_ok || !sk->sk_reuseport ||
|
|
|
|
!sk2->sk_reuseport || !reuseport_cb_ok ||
|
|
|
|
(sk2->sk_state != TCP_TIME_WAIT &&
|
|
|
|
!uid_eq(sk_uid, sock_i_uid(sk2)))) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2022-12-26 21:27:52 +08:00
|
|
|
static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
|
|
|
|
kuid_t sk_uid, bool relax,
|
|
|
|
bool reuseport_cb_ok, bool reuseport_ok)
|
|
|
|
{
|
|
|
|
if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return inet_bind_conflict(sk, sk2, sk_uid, relax,
|
|
|
|
reuseport_cb_ok, reuseport_ok);
|
|
|
|
}
|
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
static bool inet_bhash2_conflict(const struct sock *sk,
|
|
|
|
const struct inet_bind2_bucket *tb2,
|
|
|
|
kuid_t sk_uid,
|
|
|
|
bool relax, bool reuseport_cb_ok,
|
|
|
|
bool reuseport_ok)
|
|
|
|
{
|
2022-12-26 21:27:52 +08:00
|
|
|
struct inet_timewait_sock *tw2;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
struct sock *sk2;
|
|
|
|
|
|
|
|
sk_for_each_bound_bhash2(sk2, &tb2->owners) {
|
2022-12-26 21:27:52 +08:00
|
|
|
if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
|
|
|
|
reuseport_cb_ok, reuseport_ok))
|
|
|
|
return true;
|
|
|
|
}
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
|
2022-12-26 21:27:52 +08:00
|
|
|
twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
|
|
|
|
sk2 = (struct sock *)tw2;
|
|
|
|
|
|
|
|
if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
|
|
|
|
reuseport_cb_ok, reuseport_ok))
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
return true;
|
|
|
|
}
|
2022-12-26 21:27:52 +08:00
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This should be called only when the tb and tb2 hashbuckets' locks are held */
|
2022-06-16 03:32:13 +08:00
|
|
|
static int inet_csk_bind_conflict(const struct sock *sk,
|
|
|
|
const struct inet_bind_bucket *tb,
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
const struct inet_bind2_bucket *tb2, /* may be null */
|
2022-05-20 08:18:33 +08:00
|
|
|
bool relax, bool reuseport_ok)
|
|
|
|
{
|
2022-06-16 03:32:13 +08:00
|
|
|
bool reuseport_cb_ok;
|
|
|
|
struct sock_reuseport *reuseport_cb;
|
|
|
|
kuid_t uid = sock_i_uid((struct sock *)sk);
|
2005-08-10 11:11:08 +08:00
|
|
|
|
tcp: Keep TCP_CLOSE sockets in the reuseport group.
When we close a listening socket, to migrate its connections to another
listener in the same reuseport group, we have to handle two kinds of child
sockets. One is that a listening socket has a reference to, and the other
is not.
The former is the TCP_ESTABLISHED/TCP_SYN_RECV sockets, and they are in the
accept queue of their listening socket. So we can pop them out and push
them into another listener's queue at close() or shutdown() syscalls. On
the other hand, the latter, the TCP_NEW_SYN_RECV socket is during the
three-way handshake and not in the accept queue. Thus, we cannot access
such sockets at close() or shutdown() syscalls. Accordingly, we have to
migrate immature sockets after their listening socket has been closed.
Currently, if their listening socket has been closed, TCP_NEW_SYN_RECV
sockets are freed at receiving the final ACK or retransmitting SYN+ACKs. At
that time, if we could select a new listener from the same reuseport group,
no connection would be aborted. However, we cannot do that because
reuseport_detach_sock() sets NULL to sk_reuseport_cb and forbids access to
the reuseport group from closed sockets.
This patch allows TCP_CLOSE sockets to remain in the reuseport group and
access it while any child socket references them. The point is that
reuseport_detach_sock() was called twice from inet_unhash() and
sk_destruct(). This patch replaces the first reuseport_detach_sock() with
reuseport_stop_listen_sock(), which checks if the reuseport group is
capable of migration. If capable, it decrements num_socks, moves the socket
backwards in socks[] and increments num_closed_socks. When all connections
are migrated, sk_destruct() calls reuseport_detach_sock() to remove the
socket from socks[], decrement num_closed_socks, and set NULL to
sk_reuseport_cb.
By this change, closed or shutdowned sockets can keep sk_reuseport_cb.
Consequently, calling listen() after shutdown() can cause EADDRINUSE or
EBUSY in inet_csk_bind_conflict() or reuseport_add_sock() which expects
such sockets not to have the reuseport group. Therefore, this patch also
loosens such validation rules so that a socket can listen again if it has a
reuseport group with num_closed_socks more than 0.
When such sockets listen again, we handle them in reuseport_resurrect(). If
there is an existing reuseport group (reuseport_add_sock() path), we move
the socket from the old group to the new one and free the old one if
necessary. If there is no existing group (reuseport_alloc() path), we
allocate a new reuseport group, detach sk from the old one, and free it if
necessary, not to break the current shutdown behaviour:
- we cannot carry over the eBPF prog of shutdowned sockets
- we cannot attach/detach an eBPF prog to/from listening sockets via
shutdowned sockets
Note that when the number of sockets gets over U16_MAX, we try to detach a
closed socket randomly to make room for the new listening socket in
reuseport_grow().
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-4-kuniyu@amazon.co.jp
2021-06-12 20:32:16 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
|
|
|
|
/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
|
|
|
|
reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2008-04-14 17:42:27 +08:00
|
|
|
/*
|
|
|
|
* Unlike other sk lookup places we do not check
|
|
|
|
* for sk_net here, since _all_ the socks listed
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
* in tb->owners and tb2->owners list belong
|
|
|
|
* to the same net - the one this bucket belongs to.
|
2008-04-14 17:42:27 +08:00
|
|
|
*/
|
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
if (!inet_use_bhash2_on_bind(sk)) {
|
|
|
|
struct sock *sk2;
|
2022-05-14 02:55:46 +08:00
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
sk_for_each_bound(sk2, &tb->owners)
|
|
|
|
if (inet_bind_conflict(sk, sk2, uid, relax,
|
|
|
|
reuseport_cb_ok, reuseport_ok) &&
|
|
|
|
inet_rcv_saddr_equal(sk, sk2, true))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Conflicts with an existing IPV6_ADDR_ANY (if ipv6) or INADDR_ANY (if
|
|
|
|
* ipv4) should have been checked already. We need to do these two
|
|
|
|
* checks separately because their spinlocks have to be acquired/released
|
|
|
|
* independently of each other, to prevent possible deadlocks
|
|
|
|
*/
|
|
|
|
return tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
|
|
|
|
reuseport_ok);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine if there is a bind conflict with an existing IPV6_ADDR_ANY (if ipv6) or
|
|
|
|
* INADDR_ANY (if ipv4) socket.
|
|
|
|
*
|
|
|
|
* Caller must hold bhash hashbucket lock with local bh disabled, to protect
|
|
|
|
* against concurrent binds on the port for addr any
|
|
|
|
*/
|
|
|
|
static bool inet_bhash2_addr_any_conflict(const struct sock *sk, int port, int l3mdev,
|
|
|
|
bool relax, bool reuseport_ok)
|
|
|
|
{
|
|
|
|
kuid_t uid = sock_i_uid((struct sock *)sk);
|
|
|
|
const struct net *net = sock_net(sk);
|
|
|
|
struct sock_reuseport *reuseport_cb;
|
|
|
|
struct inet_bind_hashbucket *head2;
|
|
|
|
struct inet_bind2_bucket *tb2;
|
|
|
|
bool reuseport_cb_ok;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
reuseport_cb = rcu_dereference(sk->sk_reuseport_cb);
|
|
|
|
/* paired with WRITE_ONCE() in __reuseport_(add|detach)_closed_sock */
|
|
|
|
reuseport_cb_ok = !reuseport_cb || READ_ONCE(reuseport_cb->num_closed_socks);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
head2 = inet_bhash2_addr_any_hashbucket(sk, net, port);
|
|
|
|
|
|
|
|
spin_lock(&head2->lock);
|
|
|
|
|
|
|
|
inet_bind_bucket_for_each(tb2, &head2->chain)
|
|
|
|
if (inet_bind2_bucket_match_addr_any(tb2, net, port, l3mdev, sk))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (tb2 && inet_bhash2_conflict(sk, tb2, uid, relax, reuseport_cb_ok,
|
|
|
|
reuseport_ok)) {
|
|
|
|
spin_unlock(&head2->lock);
|
|
|
|
return true;
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
|
|
|
|
spin_unlock(&head2->lock);
|
|
|
|
return false;
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
2005-12-14 15:14:47 +08:00
|
|
|
|
2017-01-17 23:51:05 +08:00
|
|
|
/*
|
|
|
|
* Find an open port number for the socket. Returns with the
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
* inet_bind_hashbucket locks held if successful.
|
2005-08-10 11:11:08 +08:00
|
|
|
*/
|
2017-01-17 23:51:05 +08:00
|
|
|
static struct inet_bind_hashbucket *
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
inet_csk_find_open_port(const struct sock *sk, struct inet_bind_bucket **tb_ret,
|
|
|
|
struct inet_bind2_bucket **tb2_ret,
|
|
|
|
struct inet_bind_hashbucket **head2_ret, int *port_ret)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
2022-09-08 09:10:19 +08:00
|
|
|
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
|
2022-09-08 09:10:17 +08:00
|
|
|
int i, low, high, attempt_half, port, l3mdev;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
struct inet_bind_hashbucket *head, *head2;
|
2008-03-26 01:26:21 +08:00
|
|
|
struct net *net = sock_net(sk);
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
struct inet_bind2_bucket *tb2;
|
2016-02-12 08:28:50 +08:00
|
|
|
struct inet_bind_bucket *tb;
|
|
|
|
u32 remaining, offset;
|
2022-09-08 09:10:17 +08:00
|
|
|
bool relax = false;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2018-11-07 23:36:02 +08:00
|
|
|
l3mdev = inet_sk_bound_l3mdev(sk);
|
tcp: bind(0) remove the SO_REUSEADDR restriction when ephemeral ports are exhausted.
Commit aacd9289af8b82f5fb01bcdd53d0e3406d1333c7 ("tcp: bind() use stronger
condition for bind_conflict") introduced a restriction to forbid to bind
SO_REUSEADDR enabled sockets to the same (addr, port) tuple in order to
assign ports dispersedly so that we can connect to the same remote host.
The change results in accelerating port depletion so that we fail to bind
sockets to the same local port even if we want to connect to the different
remote hosts.
You can reproduce this issue by following instructions below.
1. # sysctl -w net.ipv4.ip_local_port_range="32768 32768"
2. set SO_REUSEADDR to two sockets.
3. bind two sockets to (localhost, 0) and the latter fails.
Therefore, when ephemeral ports are exhausted, bind(0) should fallback to
the legacy behaviour to enable the SO_REUSEADDR option and make it possible
to connect to different remote (addr, port) tuples.
This patch allows us to bind SO_REUSEADDR enabled sockets to the same
(addr, port) only when net.ipv4.ip_autobind_reuse is set 1 and all
ephemeral ports are exhausted. This also allows connect() and listen() to
share ports in the following way and may break some applications. So the
ip_autobind_reuse is 0 by default and disables the feature.
1. setsockopt(sk1, SO_REUSEADDR)
2. setsockopt(sk2, SO_REUSEADDR)
3. bind(sk1, saddr, 0)
4. bind(sk2, saddr, 0)
5. connect(sk1, daddr)
6. listen(sk2)
If it is set 1, we can fully utilize the 4-tuples, but we should use
IP_BIND_ADDRESS_NO_PORT for bind()+connect() as possible.
The notable thing is that if all sockets bound to the same port have
both SO_REUSEADDR and SO_REUSEPORT enabled, we can bind sockets to an
ephemeral port and also do listen().
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-10 16:05:25 +08:00
|
|
|
ports_exhausted:
|
2016-02-12 08:28:50 +08:00
|
|
|
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
|
|
|
|
other_half_scan:
|
inet: Add IP_LOCAL_PORT_RANGE socket option
Users who want to share a single public IP address for outgoing connections
between several hosts traditionally reach for SNAT. However, SNAT requires
state keeping on the node(s) performing the NAT.
A stateless alternative exists, where a single IP address used for egress
can be shared between several hosts by partitioning the available ephemeral
port range. In such a setup:
1. Each host gets assigned a disjoint range of ephemeral ports.
2. Applications open connections from the host-assigned port range.
3. Return traffic gets routed to the host based on both, the destination IP
and the destination port.
An application which wants to open an outgoing connection (connect) from a
given port range today can choose between two solutions:
1. Manually pick the source port by bind()'ing to it before connect()'ing
the socket.
This approach has a couple of downsides:
a) Search for a free port has to be implemented in the user-space. If
the chosen 4-tuple happens to be busy, the application needs to retry
from a different local port number.
Detecting if 4-tuple is busy can be either easy (TCP) or hard
(UDP). In TCP case, the application simply has to check if connect()
returned an error (EADDRNOTAVAIL). That is assuming that the local
port sharing was enabled (REUSEADDR) by all the sockets.
# Assume desired local port range is 60_000-60_511
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
s.bind(("192.0.2.1", 60_000))
s.connect(("1.1.1.1", 53))
# Fails only if 192.0.2.1:60000 -> 1.1.1.1:53 is busy
# Application must retry with another local port
In case of UDP, the network stack allows binding more than one socket
to the same 4-tuple, when local port sharing is enabled
(REUSEADDR). Hence detecting the conflict is much harder and involves
querying sock_diag and toggling the REUSEADDR flag [1].
b) For TCP, bind()-ing to a port within the ephemeral port range means
that no connecting sockets, that is those which leave it to the
network stack to find a free local port at connect() time, can use
the this port.
IOW, the bind hash bucket tb->fastreuse will be 0 or 1, and the port
will be skipped during the free port search at connect() time.
2. Isolate the app in a dedicated netns and use the use the per-netns
ip_local_port_range sysctl to adjust the ephemeral port range bounds.
The per-netns setting affects all sockets, so this approach can be used
only if:
- there is just one egress IP address, or
- the desired egress port range is the same for all egress IP addresses
used by the application.
For TCP, this approach avoids the downsides of (1). Free port search and
4-tuple conflict detection is done by the network stack:
system("sysctl -w net.ipv4.ip_local_port_range='60000 60511'")
s = socket(AF_INET, SOCK_STREAM)
s.setsockopt(SOL_IP, IP_BIND_ADDRESS_NO_PORT, 1)
s.bind(("192.0.2.1", 0))
s.connect(("1.1.1.1", 53))
# Fails if all 4-tuples 192.0.2.1:60000-60511 -> 1.1.1.1:53 are busy
For UDP this approach has limited applicability. Setting the
IP_BIND_ADDRESS_NO_PORT socket option does not result in local source
port being shared with other connected UDP sockets.
Hence relying on the network stack to find a free source port, limits the
number of outgoing UDP flows from a single IP address down to the number
of available ephemeral ports.
To put it another way, partitioning the ephemeral port range between hosts
using the existing Linux networking API is cumbersome.
To address this use case, add a new socket option at the SOL_IP level,
named IP_LOCAL_PORT_RANGE. The new option can be used to clamp down the
ephemeral port range for each socket individually.
The option can be used only to narrow down the per-netns local port
range. If the per-socket range lies outside of the per-netns range, the
latter takes precedence.
UAPI-wise, the low and high range bounds are passed to the kernel as a pair
of u16 values in host byte order packed into a u32. This avoids pointer
passing.
PORT_LO = 40_000
PORT_HI = 40_511
s = socket(AF_INET, SOCK_STREAM)
v = struct.pack("I", PORT_HI << 16 | PORT_LO)
s.setsockopt(SOL_IP, IP_LOCAL_PORT_RANGE, v)
s.bind(("127.0.0.1", 0))
s.getsockname()
# Local address between ("127.0.0.1", 40_000) and ("127.0.0.1", 40_511),
# if there is a free port. EADDRINUSE otherwise.
[1] https://github.com/cloudflare/cloudflare-blog/blob/232b432c1d57/2022-02-connectx/connectx.py#L116
Reviewed-by: Marek Majkowski <marek@cloudflare.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2023-01-24 21:36:43 +08:00
|
|
|
inet_sk_get_local_port_range(sk, &low, &high);
|
2016-02-12 08:28:50 +08:00
|
|
|
high++; /* [32768, 60999] -> [32768, 61000[ */
|
|
|
|
if (high - low < 4)
|
|
|
|
attempt_half = 0;
|
|
|
|
if (attempt_half) {
|
|
|
|
int half = low + (((high - low) >> 2) << 1);
|
|
|
|
|
|
|
|
if (attempt_half == 1)
|
|
|
|
high = half;
|
|
|
|
else
|
|
|
|
low = half;
|
|
|
|
}
|
|
|
|
remaining = high - low;
|
|
|
|
if (likely(remaining > 1))
|
|
|
|
remaining &= ~1U;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2022-10-10 10:44:02 +08:00
|
|
|
offset = get_random_u32_below(remaining);
|
2016-02-12 08:28:50 +08:00
|
|
|
/* __inet_hash_connect() favors ports having @low parity
|
|
|
|
* We do the opposite to not pollute connect() users.
|
|
|
|
*/
|
|
|
|
offset |= 1U;
|
|
|
|
|
|
|
|
other_parity_scan:
|
|
|
|
port = low + offset;
|
|
|
|
for (i = 0; i < remaining; i += 2, port += 2) {
|
|
|
|
if (unlikely(port >= high))
|
|
|
|
port -= remaining;
|
|
|
|
if (inet_is_local_reserved_port(net, port))
|
|
|
|
continue;
|
|
|
|
head = &hinfo->bhash[inet_bhashfn(net, port,
|
|
|
|
hinfo->bhash_size)];
|
|
|
|
spin_lock_bh(&head->lock);
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
if (inet_use_bhash2_on_bind(sk)) {
|
|
|
|
if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, relax, false))
|
|
|
|
goto next_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
|
|
|
|
spin_lock(&head2->lock);
|
|
|
|
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
|
2016-02-12 08:28:50 +08:00
|
|
|
inet_bind_bucket_for_each(tb, &head->chain)
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
if (inet_bind_bucket_match(tb, net, port, l3mdev)) {
|
|
|
|
if (!inet_csk_bind_conflict(sk, tb, tb2,
|
|
|
|
relax, false))
|
2017-01-17 23:51:04 +08:00
|
|
|
goto success;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
spin_unlock(&head2->lock);
|
2016-02-12 08:28:50 +08:00
|
|
|
goto next_port;
|
2015-05-21 01:59:02 +08:00
|
|
|
}
|
2017-01-17 23:51:05 +08:00
|
|
|
tb = NULL;
|
|
|
|
goto success;
|
2016-02-12 08:28:50 +08:00
|
|
|
next_port:
|
|
|
|
spin_unlock_bh(&head->lock);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
|
|
|
offset--;
|
|
|
|
if (!(offset & 1))
|
|
|
|
goto other_parity_scan;
|
|
|
|
|
|
|
|
if (attempt_half == 1) {
|
|
|
|
/* OK we now try the upper half of the range */
|
|
|
|
attempt_half = 2;
|
|
|
|
goto other_half_scan;
|
|
|
|
}
|
tcp: bind(0) remove the SO_REUSEADDR restriction when ephemeral ports are exhausted.
Commit aacd9289af8b82f5fb01bcdd53d0e3406d1333c7 ("tcp: bind() use stronger
condition for bind_conflict") introduced a restriction to forbid to bind
SO_REUSEADDR enabled sockets to the same (addr, port) tuple in order to
assign ports dispersedly so that we can connect to the same remote host.
The change results in accelerating port depletion so that we fail to bind
sockets to the same local port even if we want to connect to the different
remote hosts.
You can reproduce this issue by following instructions below.
1. # sysctl -w net.ipv4.ip_local_port_range="32768 32768"
2. set SO_REUSEADDR to two sockets.
3. bind two sockets to (localhost, 0) and the latter fails.
Therefore, when ephemeral ports are exhausted, bind(0) should fallback to
the legacy behaviour to enable the SO_REUSEADDR option and make it possible
to connect to different remote (addr, port) tuples.
This patch allows us to bind SO_REUSEADDR enabled sockets to the same
(addr, port) only when net.ipv4.ip_autobind_reuse is set 1 and all
ephemeral ports are exhausted. This also allows connect() and listen() to
share ports in the following way and may break some applications. So the
ip_autobind_reuse is 0 by default and disables the feature.
1. setsockopt(sk1, SO_REUSEADDR)
2. setsockopt(sk2, SO_REUSEADDR)
3. bind(sk1, saddr, 0)
4. bind(sk2, saddr, 0)
5. connect(sk1, daddr)
6. listen(sk2)
If it is set 1, we can fully utilize the 4-tuples, but we should use
IP_BIND_ADDRESS_NO_PORT for bind()+connect() as possible.
The notable thing is that if all sockets bound to the same port have
both SO_REUSEADDR and SO_REUSEPORT enabled, we can bind sockets to an
ephemeral port and also do listen().
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-10 16:05:25 +08:00
|
|
|
|
2022-07-14 04:51:56 +08:00
|
|
|
if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) {
|
tcp: bind(0) remove the SO_REUSEADDR restriction when ephemeral ports are exhausted.
Commit aacd9289af8b82f5fb01bcdd53d0e3406d1333c7 ("tcp: bind() use stronger
condition for bind_conflict") introduced a restriction to forbid to bind
SO_REUSEADDR enabled sockets to the same (addr, port) tuple in order to
assign ports dispersedly so that we can connect to the same remote host.
The change results in accelerating port depletion so that we fail to bind
sockets to the same local port even if we want to connect to the different
remote hosts.
You can reproduce this issue by following instructions below.
1. # sysctl -w net.ipv4.ip_local_port_range="32768 32768"
2. set SO_REUSEADDR to two sockets.
3. bind two sockets to (localhost, 0) and the latter fails.
Therefore, when ephemeral ports are exhausted, bind(0) should fallback to
the legacy behaviour to enable the SO_REUSEADDR option and make it possible
to connect to different remote (addr, port) tuples.
This patch allows us to bind SO_REUSEADDR enabled sockets to the same
(addr, port) only when net.ipv4.ip_autobind_reuse is set 1 and all
ephemeral ports are exhausted. This also allows connect() and listen() to
share ports in the following way and may break some applications. So the
ip_autobind_reuse is 0 by default and disables the feature.
1. setsockopt(sk1, SO_REUSEADDR)
2. setsockopt(sk2, SO_REUSEADDR)
3. bind(sk1, saddr, 0)
4. bind(sk2, saddr, 0)
5. connect(sk1, daddr)
6. listen(sk2)
If it is set 1, we can fully utilize the 4-tuples, but we should use
IP_BIND_ADDRESS_NO_PORT for bind()+connect() as possible.
The notable thing is that if all sockets bound to the same port have
both SO_REUSEADDR and SO_REUSEPORT enabled, we can bind sockets to an
ephemeral port and also do listen().
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2020-03-10 16:05:25 +08:00
|
|
|
/* We still have a chance to connect to different destinations */
|
|
|
|
relax = true;
|
|
|
|
goto ports_exhausted;
|
|
|
|
}
|
2017-01-17 23:51:05 +08:00
|
|
|
return NULL;
|
|
|
|
success:
|
|
|
|
*port_ret = port;
|
|
|
|
*tb_ret = tb;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
*tb2_ret = tb2;
|
|
|
|
*head2_ret = head2;
|
2017-01-17 23:51:05 +08:00
|
|
|
return head;
|
|
|
|
}
|
2016-02-12 08:28:50 +08:00
|
|
|
|
2017-01-17 23:51:06 +08:00
|
|
|
static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
|
|
|
|
struct sock *sk)
|
|
|
|
{
|
|
|
|
kuid_t uid = sock_i_uid(sk);
|
|
|
|
|
|
|
|
if (tb->fastreuseport <= 0)
|
|
|
|
return 0;
|
|
|
|
if (!sk->sk_reuseport)
|
|
|
|
return 0;
|
|
|
|
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
|
|
|
return 0;
|
|
|
|
if (!uid_eq(tb->fastuid, uid))
|
|
|
|
return 0;
|
|
|
|
/* We only need to check the rcv_saddr if this tb was once marked
|
|
|
|
* without fastreuseport and then was reset, as we can only know that
|
|
|
|
* the fast_*rcv_saddr doesn't have any conflicts with the socks on the
|
|
|
|
* owners list.
|
|
|
|
*/
|
|
|
|
if (tb->fastreuseport == FASTREUSEPORT_ANY)
|
|
|
|
return 1;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
if (tb->fast_sk_family == AF_INET6)
|
|
|
|
return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
|
2017-09-23 08:20:07 +08:00
|
|
|
inet6_rcv_saddr(sk),
|
2017-01-17 23:51:06 +08:00
|
|
|
tb->fast_rcv_saddr,
|
|
|
|
sk->sk_rcv_saddr,
|
|
|
|
tb->fast_ipv6_only,
|
2020-05-19 08:13:34 +08:00
|
|
|
ipv6_only_sock(sk), true, false);
|
2017-01-17 23:51:06 +08:00
|
|
|
#endif
|
|
|
|
return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
|
2020-05-19 08:13:34 +08:00
|
|
|
ipv6_only_sock(sk), true, false);
|
2017-01-17 23:51:06 +08:00
|
|
|
}
|
|
|
|
|
2020-08-12 02:33:23 +08:00
|
|
|
void inet_csk_update_fastreuse(struct inet_bind_bucket *tb,
|
|
|
|
struct sock *sk)
|
|
|
|
{
|
|
|
|
kuid_t uid = sock_i_uid(sk);
|
|
|
|
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
|
|
|
|
|
|
|
if (hlist_empty(&tb->owners)) {
|
|
|
|
tb->fastreuse = reuse;
|
|
|
|
if (sk->sk_reuseport) {
|
|
|
|
tb->fastreuseport = FASTREUSEPORT_ANY;
|
|
|
|
tb->fastuid = uid;
|
|
|
|
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
|
|
|
|
tb->fast_ipv6_only = ipv6_only_sock(sk);
|
|
|
|
tb->fast_sk_family = sk->sk_family;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
|
|
|
#endif
|
|
|
|
} else {
|
|
|
|
tb->fastreuseport = 0;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (!reuse)
|
|
|
|
tb->fastreuse = 0;
|
|
|
|
if (sk->sk_reuseport) {
|
|
|
|
/* We didn't match or we don't have fastreuseport set on
|
|
|
|
* the tb, but we have sk_reuseport set on this socket
|
|
|
|
* and we know that there are no bind conflicts with
|
|
|
|
* this socket in this tb, so reset our tb's reuseport
|
|
|
|
* settings so that any subsequent sockets that match
|
|
|
|
* our current socket will be put on the fast path.
|
|
|
|
*
|
|
|
|
* If we reset we need to set FASTREUSEPORT_STRICT so we
|
|
|
|
* do extra checking for all subsequent sk_reuseport
|
|
|
|
* socks.
|
|
|
|
*/
|
|
|
|
if (!sk_reuseport_match(tb, sk)) {
|
|
|
|
tb->fastreuseport = FASTREUSEPORT_STRICT;
|
|
|
|
tb->fastuid = uid;
|
|
|
|
tb->fast_rcv_saddr = sk->sk_rcv_saddr;
|
|
|
|
tb->fast_ipv6_only = ipv6_only_sock(sk);
|
|
|
|
tb->fast_sk_family = sk->sk_family;
|
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
tb->fastreuseport = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-17 23:51:05 +08:00
|
|
|
/* Obtain a reference to a local port for the given sock,
|
|
|
|
* if snum is zero it means select any available local port.
|
|
|
|
* We try to allocate an odd port (and leave even ports for connect())
|
|
|
|
*/
|
|
|
|
int inet_csk_get_port(struct sock *sk, unsigned short snum)
|
|
|
|
{
|
2022-09-08 09:10:19 +08:00
|
|
|
struct inet_hashinfo *hinfo = tcp_or_dccp_get_hashinfo(sk);
|
2017-01-17 23:51:05 +08:00
|
|
|
bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
bool found_port = false, check_bind_conflict = true;
|
|
|
|
bool bhash_created = false, bhash2_created = false;
|
2022-11-19 02:25:06 +08:00
|
|
|
int ret = -EADDRINUSE, port = snum, l3mdev;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
struct inet_bind_hashbucket *head, *head2;
|
|
|
|
struct inet_bind2_bucket *tb2 = NULL;
|
2022-06-16 03:32:13 +08:00
|
|
|
struct inet_bind_bucket *tb = NULL;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
bool head2_lock_acquired = false;
|
2022-09-08 09:10:17 +08:00
|
|
|
struct net *net = sock_net(sk);
|
2018-11-07 23:36:02 +08:00
|
|
|
|
|
|
|
l3mdev = inet_sk_bound_l3mdev(sk);
|
2017-01-17 23:51:05 +08:00
|
|
|
|
|
|
|
if (!port) {
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
head = inet_csk_find_open_port(sk, &tb, &tb2, &head2, &port);
|
2017-01-17 23:51:05 +08:00
|
|
|
if (!head)
|
|
|
|
return ret;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
|
|
|
|
head2_lock_acquired = true;
|
|
|
|
|
|
|
|
if (tb && tb2)
|
|
|
|
goto success;
|
|
|
|
found_port = true;
|
|
|
|
} else {
|
|
|
|
head = &hinfo->bhash[inet_bhashfn(net, port,
|
|
|
|
hinfo->bhash_size)];
|
|
|
|
spin_lock_bh(&head->lock);
|
|
|
|
inet_bind_bucket_for_each(tb, &head->chain)
|
|
|
|
if (inet_bind_bucket_match(tb, net, port, l3mdev))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tb) {
|
|
|
|
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep, net,
|
|
|
|
head, port, l3mdev);
|
2017-01-17 23:51:05 +08:00
|
|
|
if (!tb)
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
goto fail_unlock;
|
|
|
|
bhash_created = true;
|
2022-05-20 08:18:33 +08:00
|
|
|
}
|
2012-04-19 11:39:36 +08:00
|
|
|
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
if (!found_port) {
|
|
|
|
if (!hlist_empty(&tb->owners)) {
|
|
|
|
if (sk->sk_reuse == SK_FORCE_REUSE ||
|
|
|
|
(tb->fastreuse > 0 && reuse) ||
|
|
|
|
sk_reuseport_match(tb, sk))
|
|
|
|
check_bind_conflict = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (check_bind_conflict && inet_use_bhash2_on_bind(sk)) {
|
|
|
|
if (inet_bhash2_addr_any_conflict(sk, port, l3mdev, true, true))
|
|
|
|
goto fail_unlock;
|
|
|
|
}
|
|
|
|
|
|
|
|
head2 = inet_bhashfn_portaddr(hinfo, sk, net, port);
|
|
|
|
spin_lock(&head2->lock);
|
|
|
|
head2_lock_acquired = true;
|
|
|
|
tb2 = inet_bind2_bucket_find(head2, net, port, l3mdev, sk);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!tb2) {
|
|
|
|
tb2 = inet_bind2_bucket_create(hinfo->bind2_bucket_cachep,
|
|
|
|
net, head2, port, l3mdev, sk);
|
|
|
|
if (!tb2)
|
2016-02-12 08:28:50 +08:00
|
|
|
goto fail_unlock;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
bhash2_created = true;
|
2017-01-17 23:51:04 +08:00
|
|
|
}
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
|
|
|
|
if (!found_port && check_bind_conflict) {
|
|
|
|
if (inet_csk_bind_conflict(sk, tb, tb2, true, true))
|
|
|
|
goto fail_unlock;
|
|
|
|
}
|
|
|
|
|
2017-01-17 23:51:04 +08:00
|
|
|
success:
|
2020-08-12 02:33:23 +08:00
|
|
|
inet_csk_update_fastreuse(tb, sk);
|
|
|
|
|
2005-08-10 11:11:08 +08:00
|
|
|
if (!inet_csk(sk)->icsk_bind_hash)
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
inet_bind_hash(sk, tb, tb2, port);
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2);
|
2007-02-09 22:24:47 +08:00
|
|
|
ret = 0;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
|
|
|
fail_unlock:
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
if (ret) {
|
|
|
|
if (bhash_created)
|
|
|
|
inet_bind_bucket_destroy(hinfo->bind_bucket_cachep, tb);
|
|
|
|
if (bhash2_created)
|
|
|
|
inet_bind2_bucket_destroy(hinfo->bind2_bucket_cachep,
|
|
|
|
tb2);
|
|
|
|
}
|
|
|
|
if (head2_lock_acquired)
|
|
|
|
spin_unlock(&head2->lock);
|
2016-02-12 08:28:50 +08:00
|
|
|
spin_unlock_bh(&head->lock);
|
2005-08-10 11:11:08 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_get_port);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Wait for an incoming connection, avoid race conditions. This must be called
|
|
|
|
* with the socket locked.
|
|
|
|
*/
|
|
|
|
static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
DEFINE_WAIT(wait);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* True wake-one mechanism for incoming connections: only
|
|
|
|
* one process gets woken up, not the 'whole herd'.
|
|
|
|
* Since we do not 'race & poll' for established sockets
|
|
|
|
* anymore, the common case will execute the loop only once.
|
|
|
|
*
|
|
|
|
* Subtle issue: "add_wait_queue_exclusive()" will be added
|
|
|
|
* after any current non-exclusive waiters, and we know that
|
|
|
|
* it will always _stay_ after any new non-exclusive waiters
|
|
|
|
* because all non-exclusive waiters are added at the
|
|
|
|
* beginning of the wait-queue. As such, it's ok to "drop"
|
|
|
|
* our exclusiveness temporarily when we get woken up without
|
|
|
|
* having to remove and re-insert us on the wait queue.
|
|
|
|
*/
|
|
|
|
for (;;) {
|
2010-04-20 21:03:51 +08:00
|
|
|
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
|
2005-08-10 11:11:08 +08:00
|
|
|
TASK_INTERRUPTIBLE);
|
|
|
|
release_sock(sk);
|
|
|
|
if (reqsk_queue_empty(&icsk->icsk_accept_queue))
|
|
|
|
timeo = schedule_timeout(timeo);
|
2015-03-17 03:19:24 +08:00
|
|
|
sched_annotate_sleep();
|
2005-08-10 11:11:08 +08:00
|
|
|
lock_sock(sk);
|
|
|
|
err = 0;
|
|
|
|
if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
|
|
|
|
break;
|
|
|
|
err = -EINVAL;
|
|
|
|
if (sk->sk_state != TCP_LISTEN)
|
|
|
|
break;
|
|
|
|
err = sock_intr_errno(timeo);
|
|
|
|
if (signal_pending(current))
|
|
|
|
break;
|
|
|
|
err = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
break;
|
|
|
|
}
|
2010-04-20 21:03:51 +08:00
|
|
|
finish_wait(sk_sleep(sk), &wait);
|
2005-08-10 11:11:08 +08:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This will accept the next outstanding connection.
|
|
|
|
*/
|
2017-03-09 16:09:05 +08:00
|
|
|
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
|
|
|
struct request_sock *req;
|
2015-03-18 09:32:30 +08:00
|
|
|
struct sock *newsk;
|
2005-08-10 11:11:08 +08:00
|
|
|
int error;
|
|
|
|
|
|
|
|
lock_sock(sk);
|
|
|
|
|
|
|
|
/* We need to make sure that this socket is listening,
|
|
|
|
* and that it has something pending.
|
|
|
|
*/
|
|
|
|
error = -EINVAL;
|
|
|
|
if (sk->sk_state != TCP_LISTEN)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
/* Find already established connection */
|
2012-08-31 20:29:12 +08:00
|
|
|
if (reqsk_queue_empty(queue)) {
|
2005-08-10 11:11:08 +08:00
|
|
|
long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
|
|
|
|
|
|
|
|
/* If this is a non blocking socket don't sleep */
|
|
|
|
error = -EAGAIN;
|
|
|
|
if (!timeo)
|
|
|
|
goto out_err;
|
|
|
|
|
|
|
|
error = inet_csk_wait_for_connect(sk, timeo);
|
|
|
|
if (error)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2015-10-03 02:43:23 +08:00
|
|
|
req = reqsk_queue_remove(queue, sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
newsk = req->sk;
|
|
|
|
|
2015-03-18 09:32:30 +08:00
|
|
|
if (sk->sk_protocol == IPPROTO_TCP &&
|
2015-09-29 22:42:52 +08:00
|
|
|
tcp_rsk(req)->tfo_listener) {
|
|
|
|
spin_lock_bh(&queue->fastopenq.lock);
|
2015-03-18 09:32:29 +08:00
|
|
|
if (tcp_rsk(req)->tfo_listener) {
|
2012-08-31 20:29:12 +08:00
|
|
|
/* We are still waiting for the final ACK from 3WHS
|
|
|
|
* so can't free req now. Instead, we set req->sk to
|
|
|
|
* NULL to signify that the child socket is taken
|
|
|
|
* so reqsk_fastopen_remove() will free the req
|
|
|
|
* when 3WHS finishes (or is aborted).
|
|
|
|
*/
|
|
|
|
req->sk = NULL;
|
|
|
|
req = NULL;
|
|
|
|
}
|
2015-09-29 22:42:52 +08:00
|
|
|
spin_unlock_bh(&queue->fastopenq.lock);
|
2012-08-31 20:29:12 +08:00
|
|
|
}
|
2020-03-10 13:16:06 +08:00
|
|
|
|
2005-08-10 11:11:08 +08:00
|
|
|
out:
|
|
|
|
release_sock(sk);
|
2020-03-12 02:44:26 +08:00
|
|
|
if (newsk && mem_cgroup_sockets_enabled) {
|
2020-03-10 13:16:06 +08:00
|
|
|
int amt;
|
|
|
|
|
|
|
|
/* atomically get the memory usage, set and charge the
|
2020-03-12 02:44:26 +08:00
|
|
|
* newsk->sk_memcg.
|
2020-03-10 13:16:06 +08:00
|
|
|
*/
|
|
|
|
lock_sock(newsk);
|
|
|
|
|
2020-03-12 02:44:26 +08:00
|
|
|
/* The socket has not been accepted yet, no need to look at
|
|
|
|
* newsk->sk_wmem_queued.
|
2020-03-10 13:16:06 +08:00
|
|
|
*/
|
|
|
|
amt = sk_mem_pages(newsk->sk_forward_alloc +
|
2020-03-12 02:44:26 +08:00
|
|
|
atomic_read(&newsk->sk_rmem_alloc));
|
2020-03-10 13:16:06 +08:00
|
|
|
mem_cgroup_sk_alloc(newsk);
|
|
|
|
if (newsk->sk_memcg && amt)
|
2021-08-18 03:40:03 +08:00
|
|
|
mem_cgroup_charge_skmem(newsk->sk_memcg, amt,
|
|
|
|
GFP_KERNEL | __GFP_NOFAIL);
|
2020-03-10 13:16:06 +08:00
|
|
|
|
|
|
|
release_sock(newsk);
|
|
|
|
}
|
2012-08-31 20:29:12 +08:00
|
|
|
if (req)
|
2015-03-16 12:12:16 +08:00
|
|
|
reqsk_put(req);
|
2005-08-10 11:11:08 +08:00
|
|
|
return newsk;
|
|
|
|
out_err:
|
|
|
|
newsk = NULL;
|
2012-08-31 20:29:12 +08:00
|
|
|
req = NULL;
|
2005-08-10 11:11:08 +08:00
|
|
|
*err = error;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_accept);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Using different timers for retransmit, delayed acks and probes
|
2007-02-09 22:24:47 +08:00
|
|
|
* We may wish use just one timer maintaining a list of expire jiffies
|
2005-08-10 11:11:08 +08:00
|
|
|
* to optimize.
|
|
|
|
*/
|
|
|
|
void inet_csk_init_xmit_timers(struct sock *sk,
|
2017-10-17 08:29:19 +08:00
|
|
|
void (*retransmit_handler)(struct timer_list *t),
|
|
|
|
void (*delack_handler)(struct timer_list *t),
|
|
|
|
void (*keepalive_handler)(struct timer_list *t))
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
2017-10-17 08:29:19 +08:00
|
|
|
timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
|
|
|
|
timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
|
|
|
|
timer_setup(&sk->sk_timer, keepalive_handler, 0);
|
2005-08-10 11:11:08 +08:00
|
|
|
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_init_xmit_timers);
|
|
|
|
|
|
|
|
void inet_csk_clear_xmit_timers(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
2020-09-30 20:54:56 +08:00
|
|
|
icsk->icsk_pending = icsk->icsk_ack.pending = 0;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
|
|
|
sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
|
|
|
|
sk_stop_timer(sk, &icsk->icsk_delack_timer);
|
|
|
|
sk_stop_timer(sk, &sk->sk_timer);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
|
|
|
|
|
|
|
|
void inet_csk_delete_keepalive_timer(struct sock *sk)
|
|
|
|
{
|
|
|
|
sk_stop_timer(sk, &sk->sk_timer);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
|
|
|
|
|
|
|
|
void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
|
|
|
|
{
|
|
|
|
sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
|
|
|
|
|
2015-09-25 22:39:11 +08:00
|
|
|
struct dst_entry *inet_csk_route_req(const struct sock *sk,
|
2011-05-19 06:32:03 +08:00
|
|
|
struct flowi4 *fl4,
|
2012-07-18 05:02:46 +08:00
|
|
|
const struct request_sock *req)
|
2005-08-10 11:11:08 +08:00
|
|
|
{
|
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
2015-03-23 01:22:20 +08:00
|
|
|
struct net *net = read_pnet(&ireq->ireq_net);
|
2017-10-21 00:04:13 +08:00
|
|
|
struct ip_options_rcu *opt;
|
2015-03-23 01:22:20 +08:00
|
|
|
struct rtable *rt;
|
2005-08-10 11:11:08 +08:00
|
|
|
|
2018-10-03 03:35:05 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
opt = rcu_dereference(ireq->ireq_opt);
|
2017-10-24 23:20:31 +08:00
|
|
|
|
2015-03-23 01:22:20 +08:00
|
|
|
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
2023-06-06 05:55:25 +08:00
|
|
|
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
|
2015-03-23 01:22:20 +08:00
|
|
|
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
2013-10-10 06:21:29 +08:00
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
|
2015-03-23 01:22:20 +08:00
|
|
|
ireq->ir_loc_addr, ireq->ir_rmt_port,
|
2016-11-04 01:23:43 +08:00
|
|
|
htons(ireq->ir_num), sk->sk_uid);
|
2020-09-28 10:38:26 +08:00
|
|
|
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
|
2011-05-19 06:32:03 +08:00
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
2011-03-03 06:31:35 +08:00
|
|
|
if (IS_ERR(rt))
|
2008-12-15 15:13:08 +08:00
|
|
|
goto no_route;
|
2019-09-18 01:39:49 +08:00
|
|
|
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
2008-12-15 15:13:08 +08:00
|
|
|
goto route_err;
|
2018-10-03 03:35:05 +08:00
|
|
|
rcu_read_unlock();
|
2010-06-11 14:31:35 +08:00
|
|
|
return &rt->dst;
|
2008-12-15 15:13:08 +08:00
|
|
|
|
|
|
|
route_err:
|
|
|
|
ip_rt_put(rt);
|
|
|
|
no_route:
|
2018-10-03 03:35:05 +08:00
|
|
|
rcu_read_unlock();
|
2016-04-28 07:44:35 +08:00
|
|
|
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
2008-12-15 15:13:08 +08:00
|
|
|
return NULL;
|
2005-08-10 11:11:08 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_route_req);
|
|
|
|
|
2015-09-29 22:42:43 +08:00
|
|
|
struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
|
2011-05-09 05:34:22 +08:00
|
|
|
struct sock *newsk,
|
|
|
|
const struct request_sock *req)
|
|
|
|
{
|
|
|
|
const struct inet_request_sock *ireq = inet_rsk(req);
|
2015-03-23 01:22:20 +08:00
|
|
|
struct net *net = read_pnet(&ireq->ireq_net);
|
2011-05-09 05:34:22 +08:00
|
|
|
struct inet_sock *newinet = inet_sk(newsk);
|
2012-08-20 10:52:09 +08:00
|
|
|
struct ip_options_rcu *opt;
|
2011-05-09 05:34:22 +08:00
|
|
|
struct flowi4 *fl4;
|
|
|
|
struct rtable *rt;
|
|
|
|
|
2017-10-21 00:04:13 +08:00
|
|
|
opt = rcu_dereference(ireq->ireq_opt);
|
2011-05-09 05:34:22 +08:00
|
|
|
fl4 = &newinet->cork.fl.u.ip4;
|
2012-08-20 10:52:09 +08:00
|
|
|
|
2015-03-23 01:22:20 +08:00
|
|
|
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
|
2023-06-06 05:55:25 +08:00
|
|
|
ip_sock_rt_tos(sk), ip_sock_rt_scope(sk),
|
2011-05-09 05:34:22 +08:00
|
|
|
sk->sk_protocol, inet_sk_flowi_flags(sk),
|
2013-10-10 06:21:29 +08:00
|
|
|
(opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
|
2015-03-23 01:22:20 +08:00
|
|
|
ireq->ir_loc_addr, ireq->ir_rmt_port,
|
2016-11-04 01:23:43 +08:00
|
|
|
htons(ireq->ir_num), sk->sk_uid);
|
2020-09-28 10:38:26 +08:00
|
|
|
security_req_classify_flow(req, flowi4_to_flowi_common(fl4));
|
2011-05-09 05:34:22 +08:00
|
|
|
rt = ip_route_output_flow(net, fl4, sk);
|
|
|
|
if (IS_ERR(rt))
|
|
|
|
goto no_route;
|
2019-09-18 01:39:49 +08:00
|
|
|
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
|
2011-05-09 05:34:22 +08:00
|
|
|
goto route_err;
|
|
|
|
return &rt->dst;
|
|
|
|
|
|
|
|
route_err:
|
|
|
|
ip_rt_put(rt);
|
|
|
|
no_route:
|
2016-04-28 07:44:35 +08:00
|
|
|
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
|
2011-05-09 05:34:22 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
|
|
|
|
|
2009-10-19 18:03:58 +08:00
|
|
|
/* Decide when to expire the request and when to resend SYN-ACK */
|
2020-07-10 23:57:59 +08:00
|
|
|
static void syn_ack_recalc(struct request_sock *req,
|
|
|
|
const int max_syn_ack_retries,
|
|
|
|
const u8 rskq_defer_accept,
|
|
|
|
int *expire, int *resend)
|
2009-10-19 18:03:58 +08:00
|
|
|
{
|
|
|
|
if (!rskq_defer_accept) {
|
2020-07-10 23:57:59 +08:00
|
|
|
*expire = req->num_timeout >= max_syn_ack_retries;
|
2009-10-19 18:03:58 +08:00
|
|
|
*resend = 1;
|
|
|
|
return;
|
|
|
|
}
|
2020-07-10 23:57:59 +08:00
|
|
|
*expire = req->num_timeout >= max_syn_ack_retries &&
|
|
|
|
(!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept);
|
|
|
|
/* Do not resend while waiting for data after ACK,
|
2009-10-19 18:03:58 +08:00
|
|
|
* start to resend on end of deferring period to give
|
|
|
|
* last chance for data or ACK to create established socket.
|
|
|
|
*/
|
|
|
|
*resend = !inet_rsk(req)->acked ||
|
2012-10-28 07:16:46 +08:00
|
|
|
req->num_timeout >= rskq_defer_accept - 1;
|
2009-10-19 18:03:58 +08:00
|
|
|
}
|
|
|
|
|
2015-09-25 22:39:24 +08:00
|
|
|
int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
|
2012-10-28 07:16:46 +08:00
|
|
|
{
|
2013-03-17 16:23:34 +08:00
|
|
|
int err = req->rsk_ops->rtx_syn_ack(parent, req);
|
2012-10-28 07:16:46 +08:00
|
|
|
|
|
|
|
if (!err)
|
|
|
|
req->num_retrans++;
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_rtx_syn_ack);
|
|
|
|
|
2021-06-12 20:32:18 +08:00
|
|
|
static struct request_sock *inet_reqsk_clone(struct request_sock *req,
|
|
|
|
struct sock *sk)
|
|
|
|
{
|
|
|
|
struct sock *req_sk, *nreq_sk;
|
|
|
|
struct request_sock *nreq;
|
|
|
|
|
|
|
|
nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN);
|
|
|
|
if (!nreq) {
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
|
|
|
|
|
2021-06-12 20:32:18 +08:00
|
|
|
/* paired with refcount_inc_not_zero() in reuseport_migrate_sock() */
|
|
|
|
sock_put(sk);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
req_sk = req_to_sk(req);
|
|
|
|
nreq_sk = req_to_sk(nreq);
|
|
|
|
|
|
|
|
memcpy(nreq_sk, req_sk,
|
|
|
|
offsetof(struct sock, sk_dontcopy_begin));
|
|
|
|
memcpy(&nreq_sk->sk_dontcopy_end, &req_sk->sk_dontcopy_end,
|
|
|
|
req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end));
|
|
|
|
|
|
|
|
sk_node_init(&nreq_sk->sk_node);
|
|
|
|
nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping;
|
2021-12-03 06:42:18 +08:00
|
|
|
#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
|
2021-06-12 20:32:18 +08:00
|
|
|
nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping;
|
|
|
|
#endif
|
|
|
|
nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu;
|
|
|
|
|
|
|
|
nreq->rsk_listener = sk;
|
|
|
|
|
|
|
|
/* We need not acquire fastopenq->lock
|
|
|
|
* because the child socket is locked in inet_csk_listen_stop().
|
|
|
|
*/
|
|
|
|
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(nreq)->tfo_listener)
|
|
|
|
rcu_assign_pointer(tcp_sk(nreq->sk)->fastopen_rsk, nreq);
|
|
|
|
|
|
|
|
return nreq;
|
|
|
|
}
|
|
|
|
|
2021-06-12 20:32:19 +08:00
|
|
|
static void reqsk_queue_migrated(struct request_sock_queue *queue,
|
|
|
|
const struct request_sock *req)
|
|
|
|
{
|
|
|
|
if (req->num_timeout == 0)
|
|
|
|
atomic_inc(&queue->young);
|
|
|
|
atomic_inc(&queue->qlen);
|
|
|
|
}
|
|
|
|
|
2021-06-12 20:32:18 +08:00
|
|
|
static void reqsk_migrate_reset(struct request_sock *req)
|
|
|
|
{
|
2021-06-12 20:32:19 +08:00
|
|
|
req->saved_syn = NULL;
|
2021-06-12 20:32:18 +08:00
|
|
|
#if IS_ENABLED(CONFIG_IPV6)
|
|
|
|
inet_rsk(req)->ipv6_opt = NULL;
|
2021-06-12 20:32:19 +08:00
|
|
|
inet_rsk(req)->pktopts = NULL;
|
|
|
|
#else
|
|
|
|
inet_rsk(req)->ireq_opt = NULL;
|
2021-06-12 20:32:18 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2015-10-03 02:43:32 +08:00
|
|
|
/* return true if req was found in the ehash table */
|
2019-06-05 18:49:49 +08:00
|
|
|
static bool reqsk_queue_unlink(struct request_sock *req)
|
2015-04-24 09:03:44 +08:00
|
|
|
{
|
2022-09-08 09:10:17 +08:00
|
|
|
struct sock *sk = req_to_sk(req);
|
2015-10-22 23:20:46 +08:00
|
|
|
bool found = false;
|
2015-04-24 09:03:44 +08:00
|
|
|
|
2022-09-08 09:10:17 +08:00
|
|
|
if (sk_hashed(sk)) {
|
2022-09-08 09:10:19 +08:00
|
|
|
struct inet_hashinfo *hashinfo = tcp_or_dccp_get_hashinfo(sk);
|
|
|
|
spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
|
2015-04-24 09:03:44 +08:00
|
|
|
|
2015-10-22 23:20:46 +08:00
|
|
|
spin_lock(lock);
|
2022-09-08 09:10:17 +08:00
|
|
|
found = __sk_nulls_del_node_init_rcu(sk);
|
2015-10-22 23:20:46 +08:00
|
|
|
spin_unlock(lock);
|
|
|
|
}
|
2015-08-14 06:44:51 +08:00
|
|
|
if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
|
2015-04-24 09:03:44 +08:00
|
|
|
reqsk_put(req);
|
|
|
|
return found;
|
|
|
|
}
|
|
|
|
|
2021-03-15 19:05:45 +08:00
|
|
|
bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
|
2015-04-24 09:03:44 +08:00
|
|
|
{
|
2021-03-15 19:05:45 +08:00
|
|
|
bool unlinked = reqsk_queue_unlink(req);
|
|
|
|
|
|
|
|
if (unlinked) {
|
2015-04-24 09:03:44 +08:00
|
|
|
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
|
|
|
|
reqsk_put(req);
|
|
|
|
}
|
2021-03-15 19:05:45 +08:00
|
|
|
return unlinked;
|
2015-04-24 09:03:44 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
|
|
|
|
|
2015-10-15 02:16:27 +08:00
|
|
|
void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
|
|
|
|
{
|
|
|
|
inet_csk_reqsk_queue_drop(sk, req);
|
|
|
|
reqsk_put(req);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
|
|
|
|
|
2017-10-17 08:29:19 +08:00
|
|
|
static void reqsk_timer_handler(struct timer_list *t)
|
2005-08-10 11:15:09 +08:00
|
|
|
{
|
2017-10-17 08:29:19 +08:00
|
|
|
struct request_sock *req = from_timer(req, t, rsk_timer);
|
2021-06-12 20:32:19 +08:00
|
|
|
struct request_sock *nreq = NULL, *oreq = req;
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
struct sock *sk_listener = req->rsk_listener;
|
2021-06-12 20:32:19 +08:00
|
|
|
struct inet_connection_sock *icsk;
|
|
|
|
struct request_sock_queue *queue;
|
|
|
|
struct net *net;
|
2020-07-10 23:57:59 +08:00
|
|
|
int max_syn_ack_retries, qlen, expire = 0, resend = 0;
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2021-06-12 20:32:19 +08:00
|
|
|
if (inet_sk_state_load(sk_listener) != TCP_LISTEN) {
|
|
|
|
struct sock *nsk;
|
|
|
|
|
|
|
|
nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL);
|
|
|
|
if (!nsk)
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
nreq = inet_reqsk_clone(req, nsk);
|
|
|
|
if (!nreq)
|
|
|
|
goto drop;
|
|
|
|
|
|
|
|
/* The new timer for the cloned req can decrease the 2
|
|
|
|
* by calling inet_csk_reqsk_queue_drop_and_put(), so
|
|
|
|
* hold another count to prevent use-after-free and
|
|
|
|
* call reqsk_put() just before return.
|
|
|
|
*/
|
|
|
|
refcount_set(&nreq->rsk_refcnt, 2 + 1);
|
|
|
|
timer_setup(&nreq->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
|
|
|
|
reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req);
|
|
|
|
|
|
|
|
req = nreq;
|
|
|
|
sk_listener = nsk;
|
|
|
|
}
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2021-06-12 20:32:19 +08:00
|
|
|
icsk = inet_csk(sk_listener);
|
|
|
|
net = sock_net(sk_listener);
|
2022-07-16 01:17:46 +08:00
|
|
|
max_syn_ack_retries = icsk->icsk_syn_retries ? :
|
|
|
|
READ_ONCE(net->ipv4.sysctl_tcp_synack_retries);
|
2005-08-10 11:15:09 +08:00
|
|
|
/* Normally all the openreqs are young and become mature
|
|
|
|
* (i.e. converted to established socket) for first timeout.
|
2012-04-13 03:48:40 +08:00
|
|
|
* If synack was not acknowledged for 1 second, it means
|
2005-08-10 11:15:09 +08:00
|
|
|
* one of the following things: synack was lost, ack was lost,
|
|
|
|
* rtt is high or nobody planned to ack (i.e. synflood).
|
|
|
|
* When server is a bit loaded, queue is populated with old
|
|
|
|
* open requests, reducing effective size of queue.
|
|
|
|
* When server is well loaded, queue size reduces to zero
|
|
|
|
* after several minutes of work. It is not synflood,
|
|
|
|
* it is normal operation. The solution is pruning
|
|
|
|
* too old entries overriding normal timeout, when
|
|
|
|
* situation becomes dangerous.
|
|
|
|
*
|
|
|
|
* Essentially, we reserve half of room for young
|
|
|
|
* embrions; and abort old ones without pity, if old
|
|
|
|
* ones are about to clog our table.
|
|
|
|
*/
|
2021-06-12 20:32:19 +08:00
|
|
|
queue = &icsk->icsk_accept_queue;
|
2015-10-03 02:43:24 +08:00
|
|
|
qlen = reqsk_queue_len(queue);
|
2019-11-06 06:11:54 +08:00
|
|
|
if ((qlen << 1) > max(8U, READ_ONCE(sk_listener->sk_max_ack_backlog))) {
|
2015-10-03 02:43:24 +08:00
|
|
|
int young = reqsk_queue_len_young(queue) << 1;
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2020-07-10 23:57:59 +08:00
|
|
|
while (max_syn_ack_retries > 2) {
|
2015-03-23 01:22:18 +08:00
|
|
|
if (qlen < young)
|
2005-08-10 11:15:09 +08:00
|
|
|
break;
|
2020-07-10 23:57:59 +08:00
|
|
|
max_syn_ack_retries--;
|
2005-08-10 11:15:09 +08:00
|
|
|
young <<= 1;
|
|
|
|
}
|
|
|
|
}
|
2020-07-10 23:57:59 +08:00
|
|
|
syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept),
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
&expire, &resend);
|
2015-03-23 01:22:19 +08:00
|
|
|
req->rsk_ops->syn_ack_timeout(req);
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
if (!expire &&
|
|
|
|
(!resend ||
|
|
|
|
!inet_rtx_syn_ack(sk_listener, req) ||
|
|
|
|
inet_rsk(req)->acked)) {
|
|
|
|
if (req->num_timeout++ == 0)
|
2015-10-03 02:43:24 +08:00
|
|
|
atomic_dec(&queue->young);
|
2022-01-29 03:26:21 +08:00
|
|
|
mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX));
|
2021-06-12 20:32:19 +08:00
|
|
|
|
|
|
|
if (!nreq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
|
|
|
|
/* delete timer */
|
|
|
|
inet_csk_reqsk_queue_drop(sk_listener, nreq);
|
2021-06-23 07:35:29 +08:00
|
|
|
goto no_ownership;
|
2021-06-12 20:32:19 +08:00
|
|
|
}
|
|
|
|
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQSUCCESS);
|
2021-06-12 20:32:19 +08:00
|
|
|
reqsk_migrate_reset(oreq);
|
|
|
|
reqsk_queue_removed(&inet_csk(oreq->rsk_listener)->icsk_accept_queue, oreq);
|
|
|
|
reqsk_put(oreq);
|
|
|
|
|
|
|
|
reqsk_put(nreq);
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
return;
|
|
|
|
}
|
2021-06-12 20:32:19 +08:00
|
|
|
|
|
|
|
/* Even if we can clone the req, we may need not retransmit any more
|
|
|
|
* SYN+ACKs (nreq->num_timeout > max_syn_ack_retries, etc), or another
|
|
|
|
* CPU may win the "own_req" race so that inet_ehash_insert() fails.
|
|
|
|
*/
|
|
|
|
if (nreq) {
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(net, LINUX_MIB_TCPMIGRATEREQFAILURE);
|
|
|
|
no_ownership:
|
2021-06-12 20:32:19 +08:00
|
|
|
reqsk_migrate_reset(nreq);
|
|
|
|
reqsk_queue_removed(queue, nreq);
|
|
|
|
__reqsk_free(nreq);
|
|
|
|
}
|
|
|
|
|
2021-06-23 07:35:29 +08:00
|
|
|
drop:
|
2021-06-12 20:32:19 +08:00
|
|
|
inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
}
|
tcp: Revert 'process defer accept as established' changes.
This reverts two changesets, ec3c0982a2dd1e671bad8e9d26c28dcba0039d87
("[TCP]: TCP_DEFER_ACCEPT updates - process as established") and
the follow-on bug fix 9ae27e0adbf471c7a6b80102e38e1d5a346b3b38
("tcp: Fix slab corruption with ipv6 and tcp6fuzz").
This change causes several problems, first reported by Ingo Molnar
as a distcc-over-loopback regression where connections were getting
stuck.
Ilpo Järvinen first spotted the locking problems. The new function
added by this code, tcp_defer_accept_check(), only has the
child socket locked, yet it is modifying state of the parent
listening socket.
Fixing that is non-trivial at best, because we can't simply just grab
the parent listening socket lock at this point, because it would
create an ABBA deadlock. The normal ordering is parent listening
socket --> child socket, but this code path would require the
reverse lock ordering.
Next is a problem noticed by Vitaliy Gusev, he noted:
----------------------------------------
>--- a/net/ipv4/tcp_timer.c
>+++ b/net/ipv4/tcp_timer.c
>@@ -481,6 +481,11 @@ static void tcp_keepalive_timer (unsigned long data)
> goto death;
> }
>
>+ if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) {
>+ tcp_send_active_reset(sk, GFP_ATOMIC);
>+ goto death;
Here socket sk is not attached to listening socket's request queue. tcp_done()
will not call inet_csk_destroy_sock() (and tcp_v4_destroy_sock() which should
release this sk) as socket is not DEAD. Therefore socket sk will be lost for
freeing.
----------------------------------------
Finally, Alexey Kuznetsov argues that there might not even be any
real value or advantage to these new semantics even if we fix all
of the bugs:
----------------------------------------
Hiding from accept() sockets with only out-of-order data only
is the only thing which is impossible with old approach. Is this really
so valuable? My opinion: no, this is nothing but a new loophole
to consume memory without control.
----------------------------------------
So revert this thing for now.
Signed-off-by: David S. Miller <davem@davemloft.net>
2008-06-13 07:31:35 +08:00
|
|
|
|
2015-10-03 02:43:32 +08:00
|
|
|
static void reqsk_queue_hash_req(struct request_sock *req,
|
|
|
|
unsigned long timeout)
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
{
|
2017-10-17 08:29:19 +08:00
|
|
|
timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
|
2016-07-04 17:50:23 +08:00
|
|
|
mod_timer(&req->rsk_timer, jiffies + timeout);
|
2015-09-20 00:48:04 +08:00
|
|
|
|
tcp: fix race condition when creating child sockets from syncookies
When the TCP stack is in SYN flood mode, the server child socket is
created from the SYN cookie received in a TCP packet with the ACK flag
set.
The child socket is created when the server receives the first TCP
packet with a valid SYN cookie from the client. Usually, this packet
corresponds to the final step of the TCP 3-way handshake, the ACK
packet. But is also possible to receive a valid SYN cookie from the
first TCP data packet sent by the client, and thus create a child socket
from that SYN cookie.
Since a client socket is ready to send data as soon as it receives the
SYN+ACK packet from the server, the client can send the ACK packet (sent
by the TCP stack code), and the first data packet (sent by the userspace
program) almost at the same time, and thus the server will equally
receive the two TCP packets with valid SYN cookies almost at the same
instant.
When such event happens, the TCP stack code has a race condition that
occurs between the momement a lookup is done to the established
connections hashtable to check for the existence of a connection for the
same client, and the moment that the child socket is added to the
established connections hashtable. As a consequence, this race condition
can lead to a situation where we add two child sockets to the
established connections hashtable and deliver two sockets to the
userspace program to the same client.
This patch fixes the race condition by checking if an existing child
socket exists for the same client when we are adding the second child
socket to the established connections socket. If an existing child
socket exists, we drop the packet and discard the second child socket
to the same client.
Signed-off-by: Ricardo Dias <rdias@singlestore.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20201120111133.GA67501@rdias-suse-pc.lan
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2020-11-20 19:11:33 +08:00
|
|
|
inet_ehash_insert(req_to_sk(req), NULL, NULL);
|
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling,
done by inet_csk_reqsk_queue_prune(), fired by the keepalive
timer of a TCP_LISTEN socket.
This function runs for awful long times, with socket lock held,
meaning that other cpus needing this lock have to spin for hundred of ms.
SYNACK are sent in huge bursts, likely to cause severe drops anyway.
This model was OK 15 years ago when memory was very tight.
We now can afford to have a timer per request sock.
Timer invocations no longer need to lock the listener,
and can be run from all cpus in parallel.
With following patch increasing somaxconn width to 32 bits,
I tested a listener with more than 4 million active request sockets,
and a steady SYNFLOOD of ~200,000 SYN per second.
Host was sending ~830,000 SYNACK per second.
This is ~100 times more what we could achieve before this patch.
Later, we will get rid of the listener hash and use ehash instead.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-20 10:04:20 +08:00
|
|
|
/* before letting lookups find us, make sure all req fields
|
|
|
|
* are committed to memory and refcnt initialized.
|
|
|
|
*/
|
|
|
|
smp_wmb();
|
2017-06-30 18:08:01 +08:00
|
|
|
refcount_set(&req->rsk_refcnt, 2 + 1);
|
2015-10-03 02:43:32 +08:00
|
|
|
}
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2015-10-03 02:43:32 +08:00
|
|
|
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
|
|
|
|
unsigned long timeout)
|
|
|
|
{
|
|
|
|
reqsk_queue_hash_req(req, timeout);
|
|
|
|
inet_csk_reqsk_queue_added(sk);
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
2015-10-03 02:43:32 +08:00
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2020-01-09 23:59:18 +08:00
|
|
|
static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk,
|
|
|
|
const gfp_t priority)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(newsk);
|
|
|
|
|
|
|
|
if (!icsk->icsk_ulp_ops)
|
|
|
|
return;
|
|
|
|
|
2023-02-18 04:09:20 +08:00
|
|
|
icsk->icsk_ulp_ops->clone(req, newsk, priority);
|
2020-01-09 23:59:18 +08:00
|
|
|
}
|
|
|
|
|
2011-11-09 06:07:07 +08:00
|
|
|
/**
|
|
|
|
* inet_csk_clone_lock - clone an inet socket, and lock its clone
|
|
|
|
* @sk: the socket to clone
|
|
|
|
* @req: request_sock
|
|
|
|
* @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
|
|
|
|
*
|
|
|
|
* Caller must unlock socket even in error path (bh_unlock_sock(newsk))
|
|
|
|
*/
|
|
|
|
struct sock *inet_csk_clone_lock(const struct sock *sk,
|
|
|
|
const struct request_sock *req,
|
|
|
|
const gfp_t priority)
|
2005-08-10 11:11:24 +08:00
|
|
|
{
|
2011-11-09 06:07:07 +08:00
|
|
|
struct sock *newsk = sk_clone_lock(sk, priority);
|
2005-08-10 11:11:24 +08:00
|
|
|
|
2015-04-03 16:17:27 +08:00
|
|
|
if (newsk) {
|
2005-08-10 11:11:24 +08:00
|
|
|
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
|
|
|
|
2023-05-27 00:34:58 +08:00
|
|
|
newsk->sk_wait_pending = 0;
|
2017-12-20 11:12:51 +08:00
|
|
|
inet_sk_set_state(newsk, TCP_SYN_RECV);
|
2005-08-10 11:11:24 +08:00
|
|
|
newicsk->icsk_bind_hash = NULL;
|
net: Add a bhash2 table hashed by port and address
The current bind hashtable (bhash) is hashed by port only.
In the socket bind path, we have to check for bind conflicts by
traversing the specified port's inet_bind_bucket while holding the
hashbucket's spinlock (see inet_csk_get_port() and
inet_csk_bind_conflict()). In instances where there are tons of
sockets hashed to the same port at different addresses, the bind
conflict check is time-intensive and can cause softirq cpu lockups,
as well as stops new tcp connections since __inet_inherit_port()
also contests for the spinlock.
This patch adds a second bind table, bhash2, that hashes by
port and sk->sk_rcv_saddr (ipv4) and sk->sk_v6_rcv_saddr (ipv6).
Searching the bhash2 table leads to significantly faster conflict
resolution and less time holding the hashbucket spinlock.
Please note a few things:
* There can be the case where the a socket's address changes after it
has been bound. There are two cases where this happens:
1) The case where there is a bind() call on INADDR_ANY (ipv4) or
IPV6_ADDR_ANY (ipv6) and then a connect() call. The kernel will
assign the socket an address when it handles the connect()
2) In inet_sk_reselect_saddr(), which is called when rebuilding the
sk header and a few pre-conditions are met (eg rerouting fails).
In these two cases, we need to update the bhash2 table by removing the
entry for the old address, and add a new entry reflecting the updated
address.
* The bhash2 table must have its own lock, even though concurrent
accesses on the same port are protected by the bhash lock. Bhash2 must
have its own lock to protect against cases where sockets on different
ports hash to different bhash hashbuckets but to the same bhash2
hashbucket.
This brings up a few stipulations:
1) When acquiring both the bhash and the bhash2 lock, the bhash2 lock
will always be acquired after the bhash lock and released before the
bhash lock is released.
2) There are no nested bhash2 hashbucket locks. A bhash2 lock is always
acquired+released before another bhash2 lock is acquired+released.
* The bhash table cannot be superseded by the bhash2 table because for
bind requests on INADDR_ANY (ipv4) or IPV6_ADDR_ANY (ipv6), every socket
bound to that port must be checked for a potential conflict. The bhash
table is the only source of port->socket associations.
Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2022-08-23 02:10:21 +08:00
|
|
|
newicsk->icsk_bind2_hash = NULL;
|
2005-08-10 11:11:24 +08:00
|
|
|
|
2013-10-10 06:21:29 +08:00
|
|
|
inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
|
2013-10-10 15:04:37 +08:00
|
|
|
inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
|
|
|
|
inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
|
2005-08-10 11:11:24 +08:00
|
|
|
|
2016-04-07 13:07:34 +08:00
|
|
|
/* listeners have SOCK_RCU_FREE, not the children */
|
|
|
|
sock_reset_flag(newsk, SOCK_RCU_FREE);
|
|
|
|
|
2017-05-09 21:29:19 +08:00
|
|
|
inet_sk(newsk)->mc_list = NULL;
|
|
|
|
|
net: support marking accepting TCP sockets
When using mark-based routing, sockets returned from accept()
may need to be marked differently depending on the incoming
connection request.
This is the case, for example, if different socket marks identify
different networks: a listening socket may want to accept
connections from all networks, but each connection should be
marked with the network that the request came in on, so that
subsequent packets are sent on the correct network.
This patch adds a sysctl to mark TCP sockets based on the fwmark
of the incoming SYN packet. If enabled, and an unmarked socket
receives a SYN, then the SYN packet's fwmark is written to the
connection's inet_request_sock, and later written back to the
accepted socket when the connection is established. If the
socket already has a nonzero mark, then the behaviour is the same
as it is today, i.e., the listening socket's fwmark is used.
Black-box tested using user-mode linux:
- IPv4/IPv6 SYN+ACK, FIN, etc. packets are routed based on the
mark of the incoming SYN packet.
- The socket returned by accept() is marked with the mark of the
incoming SYN packet.
- Tested with syncookies=1 and syncookies=2.
Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-14 01:17:35 +08:00
|
|
|
newsk->sk_mark = inet_rsk(req)->ir_mark;
|
2015-03-12 09:53:14 +08:00
|
|
|
atomic64_set(&newsk->sk_cookie,
|
|
|
|
atomic64_read(&inet_rsk(req)->ir_cookie));
|
net: support marking accepting TCP sockets
When using mark-based routing, sockets returned from accept()
may need to be marked differently depending on the incoming
connection request.
This is the case, for example, if different socket marks identify
different networks: a listening socket may want to accept
connections from all networks, but each connection should be
marked with the network that the request came in on, so that
subsequent packets are sent on the correct network.
This patch adds a sysctl to mark TCP sockets based on the fwmark
of the incoming SYN packet. If enabled, and an unmarked socket
receives a SYN, then the SYN packet's fwmark is written to the
connection's inet_request_sock, and later written back to the
accepted socket when the connection is established. If the
socket already has a nonzero mark, then the behaviour is the same
as it is today, i.e., the listening socket's fwmark is used.
Black-box tested using user-mode linux:
- IPv4/IPv6 SYN+ACK, FIN, etc. packets are routed based on the
mark of the incoming SYN packet.
- The socket returned by accept() is marked with the mark of the
incoming SYN packet.
- Tested with syncookies=1 and syncookies=2.
Signed-off-by: Lorenzo Colitti <lorenzo@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-05-14 01:17:35 +08:00
|
|
|
|
2005-08-10 11:11:24 +08:00
|
|
|
newicsk->icsk_retransmits = 0;
|
2005-08-10 15:03:31 +08:00
|
|
|
newicsk->icsk_backoff = 0;
|
|
|
|
newicsk->icsk_probes_out = 0;
|
2021-01-16 06:30:58 +08:00
|
|
|
newicsk->icsk_probes_tstamp = 0;
|
2005-08-10 11:11:24 +08:00
|
|
|
|
|
|
|
/* Deinitialize accept_queue to trap illegal accesses. */
|
|
|
|
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
|
2006-07-25 14:32:50 +08:00
|
|
|
|
2020-01-09 23:59:18 +08:00
|
|
|
inet_clone_ulp(req, newsk, priority);
|
|
|
|
|
2006-07-25 14:32:50 +08:00
|
|
|
security_inet_csk_clone(newsk, req);
|
2005-08-10 11:11:24 +08:00
|
|
|
}
|
|
|
|
return newsk;
|
|
|
|
}
|
2011-11-09 06:07:07 +08:00
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* At this point, there should be no process reference to this
|
|
|
|
* socket, and thus no user references at all. Therefore we
|
|
|
|
* can assume the socket waitqueue is inactive and nobody will
|
|
|
|
* try to jump onto it.
|
|
|
|
*/
|
|
|
|
void inet_csk_destroy_sock(struct sock *sk)
|
|
|
|
{
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(sk->sk_state != TCP_CLOSE);
|
|
|
|
WARN_ON(!sock_flag(sk, SOCK_DEAD));
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/* It cannot be in hash table! */
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(!sk_unhashed(sk));
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2009-10-15 14:30:45 +08:00
|
|
|
/* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
|
|
|
|
WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
sk->sk_prot->destroy(sk);
|
|
|
|
|
|
|
|
sk_stream_kill_queues(sk);
|
|
|
|
|
|
|
|
xfrm_sk_free_policy(sk);
|
|
|
|
|
2021-10-14 21:41:26 +08:00
|
|
|
this_cpu_dec(*sk->sk_prot->orphan_count);
|
2017-01-20 21:06:08 +08:00
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
sock_put(sk);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_destroy_sock);
|
|
|
|
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 12:07:58 +08:00
|
|
|
/* This function allows to force a closure of a socket after the call to
|
|
|
|
* tcp/dccp_create_openreq_child().
|
|
|
|
*/
|
|
|
|
void inet_csk_prepare_forced_close(struct sock *sk)
|
2013-03-07 10:34:33 +08:00
|
|
|
__releases(&sk->sk_lock.slock)
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 12:07:58 +08:00
|
|
|
{
|
|
|
|
/* sk_clone_lock locked the socket and set refcnt to 2 */
|
|
|
|
bh_unlock_sock(sk);
|
|
|
|
sock_put(sk);
|
2020-05-16 01:22:16 +08:00
|
|
|
inet_csk_prepare_for_destroy_sock(sk);
|
2020-06-05 00:55:45 +08:00
|
|
|
inet_sk(sk)->inet_num = 0;
|
inet: Fix kmemleak in tcp_v4/6_syn_recv_sock and dccp_v4/6_request_recv_sock
If in either of the above functions inet_csk_route_child_sock() or
__inet_inherit_port() fails, the newsk will not be freed:
unreferenced object 0xffff88022e8a92c0 (size 1592):
comm "softirq", pid 0, jiffies 4294946244 (age 726.160s)
hex dump (first 32 bytes):
0a 01 01 01 0a 01 01 02 00 00 00 00 a7 cc 16 00 ................
02 00 03 01 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<ffffffff8153d190>] kmemleak_alloc+0x21/0x3e
[<ffffffff810ab3e7>] kmem_cache_alloc+0xb5/0xc5
[<ffffffff8149b65b>] sk_prot_alloc.isra.53+0x2b/0xcd
[<ffffffff8149b784>] sk_clone_lock+0x16/0x21e
[<ffffffff814d711a>] inet_csk_clone_lock+0x10/0x7b
[<ffffffff814ebbc3>] tcp_create_openreq_child+0x21/0x481
[<ffffffff814e8fa5>] tcp_v4_syn_recv_sock+0x3a/0x23b
[<ffffffff814ec5ba>] tcp_check_req+0x29f/0x416
[<ffffffff814e8e10>] tcp_v4_do_rcv+0x161/0x2bc
[<ffffffff814eb917>] tcp_v4_rcv+0x6c9/0x701
[<ffffffff814cea9f>] ip_local_deliver_finish+0x70/0xc4
[<ffffffff814cec20>] ip_local_deliver+0x4e/0x7f
[<ffffffff814ce9f8>] ip_rcv_finish+0x1fc/0x233
[<ffffffff814cee68>] ip_rcv+0x217/0x267
[<ffffffff814a7bbe>] __netif_receive_skb+0x49e/0x553
[<ffffffff814a7cc3>] netif_receive_skb+0x50/0x82
This happens, because sk_clone_lock initializes sk_refcnt to 2, and thus
a single sock_put() is not enough to free the memory. Additionally, things
like xfrm, memcg, cookie_values,... may have been initialized.
We have to free them properly.
This is fixed by forcing a call to tcp_done(), ending up in
inet_csk_destroy_sock, doing the final sock_put(). tcp_done() is necessary,
because it ends up doing all the cleanup on xfrm, memcg, cookie_values,
xfrm,...
Before calling tcp_done, we have to set the socket to SOCK_DEAD, to
force it entering inet_csk_destroy_sock. To avoid the warning in
inet_csk_destroy_sock, inet_num has to be set to 0.
As inet_csk_destroy_sock does a dec on orphan_count, we first have to
increase it.
Calling tcp_done() allows us to remove the calls to
tcp_clear_xmit_timer() and tcp_cleanup_congestion_control().
A similar approach is taken for dccp by calling dccp_done().
This is in the kernel since 093d282321 (tproxy: fix hash locking issue
when using port redirection in __inet_inherit_port()), thus since
version >= 2.6.37.
Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be>
Signed-off-by: David S. Miller <davem@davemloft.net>
2012-12-14 12:07:58 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_prepare_forced_close);
|
|
|
|
|
2023-01-03 19:19:17 +08:00
|
|
|
static int inet_ulp_can_listen(const struct sock *sk)
|
|
|
|
{
|
|
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
|
|
|
|
|
|
if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2021-11-22 18:16:21 +08:00
|
|
|
int inet_csk_listen_start(struct sock *sk)
|
2005-08-10 11:15:09 +08:00
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2015-10-03 02:43:36 +08:00
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
2022-11-19 02:25:06 +08:00
|
|
|
int err;
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2023-01-03 19:19:17 +08:00
|
|
|
err = inet_ulp_can_listen(sk);
|
|
|
|
if (unlikely(err))
|
|
|
|
return err;
|
|
|
|
|
2015-10-03 02:43:37 +08:00
|
|
|
reqsk_queue_alloc(&icsk->icsk_accept_queue);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
sk->sk_ack_backlog = 0;
|
|
|
|
inet_csk_delack_init(sk);
|
|
|
|
|
|
|
|
/* There is race window here: we announce ourselves listening,
|
|
|
|
* but this transition is still not validated by get_port().
|
|
|
|
* It is OK, because this socket enters to hash table only
|
|
|
|
* after validation is complete.
|
|
|
|
*/
|
2017-12-20 11:12:51 +08:00
|
|
|
inet_sk_state_store(sk, TCP_LISTEN);
|
2022-11-19 02:25:06 +08:00
|
|
|
err = sk->sk_prot->get_port(sk, inet->inet_num);
|
|
|
|
if (!err) {
|
2009-10-15 14:30:45 +08:00
|
|
|
inet->inet_sport = htons(inet->inet_num);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
sk_dst_reset(sk);
|
2016-02-11 00:50:35 +08:00
|
|
|
err = sk->sk_prot->hash(sk);
|
2005-08-10 11:15:09 +08:00
|
|
|
|
2016-02-11 00:50:35 +08:00
|
|
|
if (likely(!err))
|
|
|
|
return 0;
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
|
|
|
|
2017-12-20 11:12:51 +08:00
|
|
|
inet_sk_set_state(sk, TCP_CLOSE);
|
2016-02-11 00:50:35 +08:00
|
|
|
return err;
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_listen_start);
|
|
|
|
|
2015-10-15 02:16:28 +08:00
|
|
|
static void inet_child_forget(struct sock *sk, struct request_sock *req,
|
|
|
|
struct sock *child)
|
|
|
|
{
|
|
|
|
sk->sk_prot->disconnect(child, O_NONBLOCK);
|
|
|
|
|
|
|
|
sock_orphan(child);
|
|
|
|
|
2021-10-14 21:41:26 +08:00
|
|
|
this_cpu_inc(*sk->sk_prot->orphan_count);
|
2015-10-15 02:16:28 +08:00
|
|
|
|
|
|
|
if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
|
2019-10-11 11:17:38 +08:00
|
|
|
BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
|
2015-10-15 02:16:28 +08:00
|
|
|
BUG_ON(sk != req->rsk_listener);
|
|
|
|
|
|
|
|
/* Paranoid, to prevent race condition if
|
|
|
|
* an inbound pkt destined for child is
|
|
|
|
* blocked by sock lock in tcp_v4_rcv().
|
|
|
|
* Also to satisfy an assertion in
|
|
|
|
* tcp_v4_destroy_sock().
|
|
|
|
*/
|
2019-10-11 11:17:38 +08:00
|
|
|
RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
|
2015-10-15 02:16:28 +08:00
|
|
|
}
|
|
|
|
inet_csk_destroy_sock(child);
|
|
|
|
}
|
|
|
|
|
2016-02-18 21:39:18 +08:00
|
|
|
struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
|
|
|
|
struct request_sock *req,
|
|
|
|
struct sock *child)
|
2015-10-15 02:16:28 +08:00
|
|
|
{
|
|
|
|
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
|
|
|
|
|
|
|
|
spin_lock(&queue->rskq_lock);
|
|
|
|
if (unlikely(sk->sk_state != TCP_LISTEN)) {
|
|
|
|
inet_child_forget(sk, req, child);
|
2016-02-18 21:39:18 +08:00
|
|
|
child = NULL;
|
2015-10-15 02:16:28 +08:00
|
|
|
} else {
|
|
|
|
req->sk = child;
|
|
|
|
req->dl_next = NULL;
|
|
|
|
if (queue->rskq_accept_head == NULL)
|
2019-10-10 05:51:20 +08:00
|
|
|
WRITE_ONCE(queue->rskq_accept_head, req);
|
2015-10-15 02:16:28 +08:00
|
|
|
else
|
|
|
|
queue->rskq_accept_tail->dl_next = req;
|
|
|
|
queue->rskq_accept_tail = req;
|
|
|
|
sk_acceptq_added(sk);
|
|
|
|
}
|
|
|
|
spin_unlock(&queue->rskq_lock);
|
2016-02-18 21:39:18 +08:00
|
|
|
return child;
|
2015-10-15 02:16:28 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
|
|
|
|
|
2015-10-22 23:20:46 +08:00
|
|
|
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
|
|
|
|
struct request_sock *req, bool own_req)
|
|
|
|
{
|
|
|
|
if (own_req) {
|
tcp: Migrate TCP_NEW_SYN_RECV requests at receiving the final ACK.
This patch also changes the code to call reuseport_migrate_sock() and
inet_reqsk_clone(), but unlike the other cases, we do not call
inet_reqsk_clone() right after reuseport_migrate_sock().
Currently, in the receive path for TCP_NEW_SYN_RECV sockets, its listener
has three kinds of refcnt:
(A) for listener itself
(B) carried by reuqest_sock
(C) sock_hold() in tcp_v[46]_rcv()
While processing the req, (A) may disappear by close(listener). Also, (B)
can disappear by accept(listener) once we put the req into the accept
queue. So, we have to hold another refcnt (C) for the listener to prevent
use-after-free.
For socket migration, we call reuseport_migrate_sock() to select a listener
with (A) and to increment the new listener's refcnt in tcp_v[46]_rcv().
This refcnt corresponds to (C) and is cleaned up later in tcp_v[46]_rcv().
Thus we have to take another refcnt (B) for the newly cloned request_sock.
In inet_csk_complete_hashdance(), we hold the count (B), clone the req, and
try to put the new req into the accept queue. By migrating req after
winning the "own_req" race, we can avoid such a worst situation:
CPU 1 looks up req1
CPU 2 looks up req1, unhashes it, then CPU 1 loses the race
CPU 3 looks up req2, unhashes it, then CPU 2 loses the race
...
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-8-kuniyu@amazon.co.jp
2021-06-12 20:32:20 +08:00
|
|
|
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
|
|
|
|
reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
|
|
|
|
|
|
|
|
if (sk != req->rsk_listener) {
|
|
|
|
/* another listening sk has been selected,
|
|
|
|
* migrate the req to it.
|
|
|
|
*/
|
|
|
|
struct request_sock *nreq;
|
|
|
|
|
|
|
|
/* hold a refcnt for the nreq->rsk_listener
|
|
|
|
* which is assigned in inet_reqsk_clone()
|
|
|
|
*/
|
|
|
|
sock_hold(sk);
|
|
|
|
nreq = inet_reqsk_clone(req, sk);
|
|
|
|
if (!nreq) {
|
|
|
|
inet_child_forget(sk, req, child);
|
|
|
|
goto child_put;
|
|
|
|
}
|
|
|
|
|
|
|
|
refcount_set(&nreq->rsk_refcnt, 1);
|
|
|
|
if (inet_csk_reqsk_queue_add(sk, nreq, child)) {
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQSUCCESS);
|
tcp: Migrate TCP_NEW_SYN_RECV requests at receiving the final ACK.
This patch also changes the code to call reuseport_migrate_sock() and
inet_reqsk_clone(), but unlike the other cases, we do not call
inet_reqsk_clone() right after reuseport_migrate_sock().
Currently, in the receive path for TCP_NEW_SYN_RECV sockets, its listener
has three kinds of refcnt:
(A) for listener itself
(B) carried by reuqest_sock
(C) sock_hold() in tcp_v[46]_rcv()
While processing the req, (A) may disappear by close(listener). Also, (B)
can disappear by accept(listener) once we put the req into the accept
queue. So, we have to hold another refcnt (C) for the listener to prevent
use-after-free.
For socket migration, we call reuseport_migrate_sock() to select a listener
with (A) and to increment the new listener's refcnt in tcp_v[46]_rcv().
This refcnt corresponds to (C) and is cleaned up later in tcp_v[46]_rcv().
Thus we have to take another refcnt (B) for the newly cloned request_sock.
In inet_csk_complete_hashdance(), we hold the count (B), clone the req, and
try to put the new req into the accept queue. By migrating req after
winning the "own_req" race, we can avoid such a worst situation:
CPU 1 looks up req1
CPU 2 looks up req1, unhashes it, then CPU 1 loses the race
CPU 3 looks up req2, unhashes it, then CPU 2 loses the race
...
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-8-kuniyu@amazon.co.jp
2021-06-12 20:32:20 +08:00
|
|
|
reqsk_migrate_reset(req);
|
|
|
|
reqsk_put(req);
|
|
|
|
return child;
|
|
|
|
}
|
|
|
|
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
|
tcp: Migrate TCP_NEW_SYN_RECV requests at receiving the final ACK.
This patch also changes the code to call reuseport_migrate_sock() and
inet_reqsk_clone(), but unlike the other cases, we do not call
inet_reqsk_clone() right after reuseport_migrate_sock().
Currently, in the receive path for TCP_NEW_SYN_RECV sockets, its listener
has three kinds of refcnt:
(A) for listener itself
(B) carried by reuqest_sock
(C) sock_hold() in tcp_v[46]_rcv()
While processing the req, (A) may disappear by close(listener). Also, (B)
can disappear by accept(listener) once we put the req into the accept
queue. So, we have to hold another refcnt (C) for the listener to prevent
use-after-free.
For socket migration, we call reuseport_migrate_sock() to select a listener
with (A) and to increment the new listener's refcnt in tcp_v[46]_rcv().
This refcnt corresponds to (C) and is cleaned up later in tcp_v[46]_rcv().
Thus we have to take another refcnt (B) for the newly cloned request_sock.
In inet_csk_complete_hashdance(), we hold the count (B), clone the req, and
try to put the new req into the accept queue. By migrating req after
winning the "own_req" race, we can avoid such a worst situation:
CPU 1 looks up req1
CPU 2 looks up req1, unhashes it, then CPU 1 loses the race
CPU 3 looks up req2, unhashes it, then CPU 2 loses the race
...
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-8-kuniyu@amazon.co.jp
2021-06-12 20:32:20 +08:00
|
|
|
reqsk_migrate_reset(nreq);
|
|
|
|
__reqsk_free(nreq);
|
|
|
|
} else if (inet_csk_reqsk_queue_add(sk, req, child)) {
|
2016-02-18 21:39:18 +08:00
|
|
|
return child;
|
tcp: Migrate TCP_NEW_SYN_RECV requests at receiving the final ACK.
This patch also changes the code to call reuseport_migrate_sock() and
inet_reqsk_clone(), but unlike the other cases, we do not call
inet_reqsk_clone() right after reuseport_migrate_sock().
Currently, in the receive path for TCP_NEW_SYN_RECV sockets, its listener
has three kinds of refcnt:
(A) for listener itself
(B) carried by reuqest_sock
(C) sock_hold() in tcp_v[46]_rcv()
While processing the req, (A) may disappear by close(listener). Also, (B)
can disappear by accept(listener) once we put the req into the accept
queue. So, we have to hold another refcnt (C) for the listener to prevent
use-after-free.
For socket migration, we call reuseport_migrate_sock() to select a listener
with (A) and to increment the new listener's refcnt in tcp_v[46]_rcv().
This refcnt corresponds to (C) and is cleaned up later in tcp_v[46]_rcv().
Thus we have to take another refcnt (B) for the newly cloned request_sock.
In inet_csk_complete_hashdance(), we hold the count (B), clone the req, and
try to put the new req into the accept queue. By migrating req after
winning the "own_req" race, we can avoid such a worst situation:
CPU 1 looks up req1
CPU 2 looks up req1, unhashes it, then CPU 1 loses the race
CPU 3 looks up req2, unhashes it, then CPU 2 loses the race
...
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-8-kuniyu@amazon.co.jp
2021-06-12 20:32:20 +08:00
|
|
|
}
|
2015-10-22 23:20:46 +08:00
|
|
|
}
|
|
|
|
/* Too bad, another child took ownership of the request, undo. */
|
tcp: Migrate TCP_NEW_SYN_RECV requests at receiving the final ACK.
This patch also changes the code to call reuseport_migrate_sock() and
inet_reqsk_clone(), but unlike the other cases, we do not call
inet_reqsk_clone() right after reuseport_migrate_sock().
Currently, in the receive path for TCP_NEW_SYN_RECV sockets, its listener
has three kinds of refcnt:
(A) for listener itself
(B) carried by reuqest_sock
(C) sock_hold() in tcp_v[46]_rcv()
While processing the req, (A) may disappear by close(listener). Also, (B)
can disappear by accept(listener) once we put the req into the accept
queue. So, we have to hold another refcnt (C) for the listener to prevent
use-after-free.
For socket migration, we call reuseport_migrate_sock() to select a listener
with (A) and to increment the new listener's refcnt in tcp_v[46]_rcv().
This refcnt corresponds to (C) and is cleaned up later in tcp_v[46]_rcv().
Thus we have to take another refcnt (B) for the newly cloned request_sock.
In inet_csk_complete_hashdance(), we hold the count (B), clone the req, and
try to put the new req into the accept queue. By migrating req after
winning the "own_req" race, we can avoid such a worst situation:
CPU 1 looks up req1
CPU 2 looks up req1, unhashes it, then CPU 1 loses the race
CPU 3 looks up req2, unhashes it, then CPU 2 loses the race
...
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.co.jp>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20210612123224.12525-8-kuniyu@amazon.co.jp
2021-06-12 20:32:20 +08:00
|
|
|
child_put:
|
2015-10-22 23:20:46 +08:00
|
|
|
bh_unlock_sock(child);
|
|
|
|
sock_put(child);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(inet_csk_complete_hashdance);
|
|
|
|
|
2005-08-10 11:15:09 +08:00
|
|
|
/*
|
|
|
|
* This routine closes sockets which have been at least partially
|
|
|
|
* opened, but not yet accepted.
|
|
|
|
*/
|
|
|
|
void inet_csk_listen_stop(struct sock *sk)
|
|
|
|
{
|
|
|
|
struct inet_connection_sock *icsk = inet_csk(sk);
|
2012-08-31 20:29:12 +08:00
|
|
|
struct request_sock_queue *queue = &icsk->icsk_accept_queue;
|
2015-10-03 02:43:23 +08:00
|
|
|
struct request_sock *next, *req;
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
/* Following specs, it would be better either to send FIN
|
|
|
|
* (and enter FIN-WAIT-1, it is normal close)
|
|
|
|
* or to send active reset (abort).
|
|
|
|
* Certainly, it is pretty dangerous while synflood, but it is
|
|
|
|
* bad justification for our negligence 8)
|
|
|
|
* To be honest, we are not able to make either
|
|
|
|
* of the variants now. --ANK
|
|
|
|
*/
|
2015-10-03 02:43:23 +08:00
|
|
|
while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
|
2021-06-12 20:32:18 +08:00
|
|
|
struct sock *child = req->sk, *nsk;
|
|
|
|
struct request_sock *nreq;
|
2005-08-10 11:15:09 +08:00
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
bh_lock_sock(child);
|
2008-07-26 12:43:18 +08:00
|
|
|
WARN_ON(sock_owned_by_user(child));
|
2005-08-10 11:15:09 +08:00
|
|
|
sock_hold(child);
|
|
|
|
|
2021-06-12 20:32:18 +08:00
|
|
|
nsk = reuseport_migrate_sock(sk, child, NULL);
|
|
|
|
if (nsk) {
|
|
|
|
nreq = inet_reqsk_clone(req, nsk);
|
|
|
|
if (nreq) {
|
|
|
|
refcount_set(&nreq->rsk_refcnt, 1);
|
|
|
|
|
|
|
|
if (inet_csk_reqsk_queue_add(nsk, nreq, child)) {
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(sock_net(nsk),
|
|
|
|
LINUX_MIB_TCPMIGRATEREQSUCCESS);
|
2021-06-12 20:32:18 +08:00
|
|
|
reqsk_migrate_reset(req);
|
|
|
|
} else {
|
2021-06-23 07:35:29 +08:00
|
|
|
__NET_INC_STATS(sock_net(nsk),
|
|
|
|
LINUX_MIB_TCPMIGRATEREQFAILURE);
|
2021-06-12 20:32:18 +08:00
|
|
|
reqsk_migrate_reset(nreq);
|
|
|
|
__reqsk_free(nreq);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* inet_csk_reqsk_queue_add() has already
|
|
|
|
* called inet_child_forget() on failure case.
|
|
|
|
*/
|
|
|
|
goto skip_child_forget;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-15 02:16:28 +08:00
|
|
|
inet_child_forget(sk, req, child);
|
2021-06-12 20:32:18 +08:00
|
|
|
skip_child_forget:
|
2017-09-12 06:58:38 +08:00
|
|
|
reqsk_put(req);
|
2005-08-10 11:15:09 +08:00
|
|
|
bh_unlock_sock(child);
|
|
|
|
local_bh_enable();
|
|
|
|
sock_put(child);
|
|
|
|
|
2015-10-03 02:43:38 +08:00
|
|
|
cond_resched();
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
2015-09-29 22:42:52 +08:00
|
|
|
if (queue->fastopenq.rskq_rst_head) {
|
2012-08-31 20:29:12 +08:00
|
|
|
/* Free all the reqs queued in rskq_rst_head. */
|
2015-09-29 22:42:52 +08:00
|
|
|
spin_lock_bh(&queue->fastopenq.lock);
|
2015-10-03 02:43:23 +08:00
|
|
|
req = queue->fastopenq.rskq_rst_head;
|
2015-09-29 22:42:52 +08:00
|
|
|
queue->fastopenq.rskq_rst_head = NULL;
|
|
|
|
spin_unlock_bh(&queue->fastopenq.lock);
|
2015-10-03 02:43:23 +08:00
|
|
|
while (req != NULL) {
|
|
|
|
next = req->dl_next;
|
2015-03-16 12:12:16 +08:00
|
|
|
reqsk_put(req);
|
2015-10-03 02:43:23 +08:00
|
|
|
req = next;
|
2012-08-31 20:29:12 +08:00
|
|
|
}
|
|
|
|
}
|
2015-10-15 02:16:28 +08:00
|
|
|
WARN_ON_ONCE(sk->sk_ack_backlog);
|
2005-08-10 11:15:09 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
|
2005-12-14 15:16:04 +08:00
|
|
|
|
|
|
|
void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
|
|
|
|
{
|
|
|
|
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
|
|
|
|
const struct inet_sock *inet = inet_sk(sk);
|
|
|
|
|
|
|
|
sin->sin_family = AF_INET;
|
2009-10-15 14:30:45 +08:00
|
|
|
sin->sin_addr.s_addr = inet->inet_daddr;
|
|
|
|
sin->sin_port = inet->inet_dport;
|
2005-12-14 15:16:04 +08:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
|
2006-03-21 14:01:03 +08:00
|
|
|
|
2012-07-16 18:28:06 +08:00
|
|
|
static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
|
|
|
|
{
|
2012-07-18 04:42:13 +08:00
|
|
|
const struct inet_sock *inet = inet_sk(sk);
|
|
|
|
const struct ip_options_rcu *inet_opt;
|
2012-07-16 18:28:06 +08:00
|
|
|
__be32 daddr = inet->inet_daddr;
|
|
|
|
struct flowi4 *fl4;
|
|
|
|
struct rtable *rt;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
inet_opt = rcu_dereference(inet->inet_opt);
|
|
|
|
if (inet_opt && inet_opt->opt.srr)
|
|
|
|
daddr = inet_opt->opt.faddr;
|
|
|
|
fl4 = &fl->u.ip4;
|
|
|
|
rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
|
|
|
|
inet->inet_saddr, inet->inet_dport,
|
|
|
|
inet->inet_sport, sk->sk_protocol,
|
|
|
|
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
|
|
|
|
if (IS_ERR(rt))
|
|
|
|
rt = NULL;
|
|
|
|
if (rt)
|
|
|
|
sk_setup_caps(sk, &rt->dst);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
return &rt->dst;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
|
|
|
|
{
|
|
|
|
struct dst_entry *dst = __sk_dst_check(sk, 0);
|
|
|
|
struct inet_sock *inet = inet_sk(sk);
|
|
|
|
|
|
|
|
if (!dst) {
|
|
|
|
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
|
|
|
|
if (!dst)
|
|
|
|
goto out;
|
|
|
|
}
|
2019-12-22 10:51:09 +08:00
|
|
|
dst->ops->update_pmtu(dst, sk, NULL, mtu, true);
|
2012-07-16 18:28:06 +08:00
|
|
|
|
|
|
|
dst = __sk_dst_check(sk, 0);
|
|
|
|
if (!dst)
|
|
|
|
dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
|
|
|
|
out:
|
|
|
|
return dst;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);
|