net/tcp: Allow asynchronous delete for TCP-AO keys (MKTs)

Delete becomes very, very fast - almost free, but after setsockopt()
syscall returns, the key is still alive until next RCU grace period.
Which is fine for listen sockets as userspace needs to be aware of
setsockopt(TCP_AO) and accept() race and resolve it with verification
by getsockopt() after TCP connection was accepted.

The benchmark results (on non-loaded box, worse with more RCU work pending):
> ok 33    Worst case delete    16384 keys: min=5ms max=10ms mean=6.93904ms stddev=0.263421
> ok 34        Add a new key    16384 keys: min=1ms max=4ms mean=2.17751ms stddev=0.147564
> ok 35 Remove random-search    16384 keys: min=5ms max=10ms mean=6.50243ms stddev=0.254999
> ok 36         Remove async    16384 keys: min=0ms max=0ms mean=0.0296107ms stddev=0.0172078

Co-developed-by: Francesco Ruggeri <fruggeri@arista.com>
Signed-off-by: Francesco Ruggeri <fruggeri@arista.com>
Co-developed-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Salam Noureddine <noureddine@arista.com>
Signed-off-by: Dmitry Safonov <dima@arista.com>
Acked-by: David Ahern <dsahern@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Dmitry Safonov 2023-10-23 20:22:11 +01:00 committed by David S. Miller
parent ef84703a91
commit d6732b95b6
2 changed files with 20 additions and 4 deletions

View File

@ -396,7 +396,8 @@ struct tcp_ao_del { /* setsockopt(TCP_AO_DEL_KEY) */
__s32 ifindex; /* L3 dev index for VRF */
__u32 set_current :1, /* corresponding ::current_key */
set_rnext :1, /* corresponding ::rnext */
reserved :30; /* must be 0 */
del_async :1, /* only valid for listen sockets */
reserved :29; /* must be 0 */
__u16 reserved2; /* padding, must be 0 */
__u8 prefix; /* peer's address prefix */
__u8 sndid; /* SendID for outgoing segments */

View File

@ -1628,7 +1628,7 @@ err_free_ao:
}
static int tcp_ao_delete_key(struct sock *sk, struct tcp_ao_info *ao_info,
struct tcp_ao_key *key,
bool del_async, struct tcp_ao_key *key,
struct tcp_ao_key *new_current,
struct tcp_ao_key *new_rnext)
{
@ -1636,11 +1636,24 @@ static int tcp_ao_delete_key(struct sock *sk, struct tcp_ao_info *ao_info,
hlist_del_rcu(&key->node);
/* Support for async delete on listening sockets: as they don't
* need current_key/rnext_key maintaining, we don't need to check
* them and we can just free all resources in RCU fashion.
*/
if (del_async) {
atomic_sub(tcp_ao_sizeof_key(key), &sk->sk_omem_alloc);
call_rcu(&key->rcu, tcp_ao_key_free_rcu);
return 0;
}
/* At this moment another CPU could have looked this key up
* while it was unlinked from the list. Wait for RCU grace period,
* after which the key is off-list and can't be looked up again;
* the rx path [just before RCU came] might have used it and set it
* as current_key (very unlikely).
* Free the key with next RCU grace period (in case it was
* current_key before tcp_ao_current_rnext() might have
* changed it in forced-delete).
*/
synchronize_rcu();
if (new_current)
@ -1711,6 +1724,8 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
if (!new_rnext)
return -ENOENT;
}
if (cmd.del_async && sk->sk_state != TCP_LISTEN)
return -EINVAL;
if (family == AF_INET) {
struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.addr;
@ -1758,8 +1773,8 @@ static int tcp_ao_del_cmd(struct sock *sk, unsigned short int family,
if (key == new_current || key == new_rnext)
continue;
return tcp_ao_delete_key(sk, ao_info, key,
new_current, new_rnext);
return tcp_ao_delete_key(sk, ao_info, cmd.del_async, key,
new_current, new_rnext);
}
return -ENOENT;
}