mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
tcp/dccp: fix possible race __inet_lookup_established()
commit8dbd76e79a
upstream. Michal Kubecek and Firo Yang did a very nice analysis of crashes happening in __inet_lookup_established(). Since a TCP socket can go from TCP_ESTABLISH to TCP_LISTEN (via a close()/socket()/listen() cycle) without a RCU grace period, I should not have changed listeners linkage in their hash table. They must use the nulls protocol (Documentation/RCU/rculist_nulls.txt), so that a lookup can detect a socket in a hash list was moved in another one. Since we added code in commitd296ba60d8
("soreuseport: Resolve merge conflict for v4/v6 ordering fix"), we have to add hlist_nulls_add_tail_rcu() helper. Fixes:3b24d854cb
("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Michal Kubecek <mkubecek@suse.cz> Reported-by: Firo Yang <firo.yang@suse.com> Reviewed-by: Michal Kubecek <mkubecek@suse.cz> Link: https://lore.kernel.org/netdev/20191120083919.GH27852@unicorn.suse.cz/ Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> [stable-4.14: we also need to update code in __inet_lookup_listener() and inet6_lookup_listener() which has been removed in 5.0-rc1.] Signed-off-by: Michal Kubecek <mkubecek@suse.cz> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
0ddfaacfb7
commit
94671cf125
@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_add_tail_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist_nulls,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
||||
* or hlist_nulls_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
||||
struct hlist_nulls_head *h)
|
||||
{
|
||||
struct hlist_nulls_node *i, *last = NULL;
|
||||
|
||||
/* Note: write side code, so rcu accessors are not needed. */
|
||||
for (i = h->first; !is_a_nulls(i); i = i->next)
|
||||
last = i;
|
||||
|
||||
if (last) {
|
||||
n->next = last->next;
|
||||
n->pprev = &last->next;
|
||||
rcu_assign_pointer(hlist_next_rcu(last), n);
|
||||
} else {
|
||||
hlist_nulls_add_head_rcu(n, h);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
|
@ -106,12 +106,18 @@ struct inet_bind_hashbucket {
|
||||
struct hlist_head chain;
|
||||
};
|
||||
|
||||
/*
|
||||
* Sockets can be hashed in established or listening table
|
||||
/* Sockets can be hashed in established or listening table.
|
||||
* We must use different 'nulls' end-of-chain value for all hash buckets :
|
||||
* A socket might transition from ESTABLISH to LISTEN state without
|
||||
* RCU grace period. A lookup in ehash table needs to handle this case.
|
||||
*/
|
||||
#define LISTENING_NULLS_BASE (1U << 29)
|
||||
struct inet_listen_hashbucket {
|
||||
spinlock_t lock;
|
||||
struct hlist_head head;
|
||||
union {
|
||||
struct hlist_head head;
|
||||
struct hlist_nulls_head nulls_head;
|
||||
};
|
||||
};
|
||||
|
||||
/* This is for listening sockets, thus all sockets which possess wildcards. */
|
||||
|
@ -693,6 +693,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
|
||||
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
|
||||
}
|
||||
|
||||
static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
{
|
||||
hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
|
||||
}
|
||||
|
||||
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
{
|
||||
sock_hold(sk);
|
||||
|
@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
|
||||
|
||||
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct hlist_nulls_node *node;
|
||||
|
||||
num = 0;
|
||||
ilb = &hashinfo->listening_hash[i];
|
||||
spin_lock(&ilb->lock);
|
||||
sk_for_each(sk, &ilb->head) {
|
||||
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
|
@ -219,9 +219,10 @@ struct sock *__inet_lookup_listener(struct net *net,
|
||||
int score, hiscore = 0, matches = 0, reuseport = 0;
|
||||
bool exact_dif = inet_exact_dif_match(net, skb);
|
||||
struct sock *sk, *result = NULL;
|
||||
struct hlist_nulls_node *node;
|
||||
u32 phash = 0;
|
||||
|
||||
sk_for_each_rcu(sk, &ilb->head) {
|
||||
sk_nulls_for_each_rcu(sk, node, &ilb->nulls_head) {
|
||||
score = compute_score(sk, net, hnum, daddr,
|
||||
dif, sdif, exact_dif);
|
||||
if (score > hiscore) {
|
||||
@ -442,10 +443,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
||||
struct inet_listen_hashbucket *ilb)
|
||||
{
|
||||
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
|
||||
const struct hlist_nulls_node *node;
|
||||
struct sock *sk2;
|
||||
kuid_t uid = sock_i_uid(sk);
|
||||
|
||||
sk_for_each_rcu(sk2, &ilb->head) {
|
||||
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
|
||||
if (sk2 != sk &&
|
||||
sk2->sk_family == sk->sk_family &&
|
||||
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
|
||||
@ -480,9 +482,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
|
||||
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
|
||||
else
|
||||
hlist_add_head_rcu(&sk->sk_node, &ilb->head);
|
||||
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
|
||||
unlock:
|
||||
@ -525,10 +527,7 @@ void inet_unhash(struct sock *sk)
|
||||
spin_lock_bh(lock);
|
||||
if (rcu_access_pointer(sk->sk_reuseport_cb))
|
||||
reuseport_detach_sock(sk);
|
||||
if (listener)
|
||||
done = __sk_del_node_init(sk);
|
||||
else
|
||||
done = __sk_nulls_del_node_init_rcu(sk);
|
||||
done = __sk_nulls_del_node_init_rcu(sk);
|
||||
if (done)
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
spin_unlock_bh(lock);
|
||||
@ -664,7 +663,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
|
||||
|
||||
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
|
||||
spin_lock_init(&h->listening_hash[i].lock);
|
||||
INIT_HLIST_HEAD(&h->listening_hash[i].head);
|
||||
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
|
||||
i + LISTENING_NULLS_BASE);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(inet_hashinfo_init);
|
||||
|
@ -1936,13 +1936,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
||||
struct tcp_iter_state *st = seq->private;
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct hlist_nulls_node *node;
|
||||
struct sock *sk = cur;
|
||||
|
||||
if (!sk) {
|
||||
get_head:
|
||||
ilb = &tcp_hashinfo.listening_hash[st->bucket];
|
||||
spin_lock(&ilb->lock);
|
||||
sk = sk_head(&ilb->head);
|
||||
sk = sk_nulls_head(&ilb->nulls_head);
|
||||
st->offset = 0;
|
||||
goto get_sk;
|
||||
}
|
||||
@ -1950,9 +1951,9 @@ get_head:
|
||||
++st->num;
|
||||
++st->offset;
|
||||
|
||||
sk = sk_next(sk);
|
||||
sk = sk_nulls_next(sk);
|
||||
get_sk:
|
||||
sk_for_each_from(sk) {
|
||||
sk_nulls_for_each_from(sk, node) {
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
continue;
|
||||
if (sk->sk_family == st->family)
|
||||
|
@ -137,9 +137,10 @@ struct sock *inet6_lookup_listener(struct net *net,
|
||||
int score, hiscore = 0, matches = 0, reuseport = 0;
|
||||
bool exact_dif = inet6_exact_dif_match(net, skb);
|
||||
struct sock *sk, *result = NULL;
|
||||
struct hlist_nulls_node *node;
|
||||
u32 phash = 0;
|
||||
|
||||
sk_for_each(sk, &ilb->head) {
|
||||
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
|
||||
score = compute_score(sk, net, hnum, daddr, dif, sdif, exact_dif);
|
||||
if (score > hiscore) {
|
||||
reuseport = sk->sk_reuseport;
|
||||
|
Loading…
Reference in New Issue
Block a user