mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-19 18:24:14 +08:00
netfilter: conntrack: fix race between nf_conntrack proc read and hash resize
When we do "cat /proc/net/nf_conntrack", and meanwhile resize the conntrack
hash table via /sys/module/nf_conntrack/parameters/hashsize, race will
happen, because reader can observe a newly allocated hash but the old size
(or vice versa). So oops will happen like follows:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000017
IP: [<ffffffffa0418e21>] seq_print_acct+0x11/0x50 [nf_conntrack]
Call Trace:
[<ffffffffa0412f4e>] ? ct_seq_show+0x14e/0x340 [nf_conntrack]
[<ffffffff81261a1c>] seq_read+0x2cc/0x390
[<ffffffff812a8d62>] proc_reg_read+0x42/0x70
[<ffffffff8123bee7>] __vfs_read+0x37/0x130
[<ffffffff81347980>] ? security_file_permission+0xa0/0xc0
[<ffffffff8123cf75>] vfs_read+0x95/0x140
[<ffffffff8123e475>] SyS_read+0x55/0xc0
[<ffffffff817c2572>] entry_SYSCALL_64_fastpath+0x1a/0xa4
It is very easy to reproduce this kernel crash.
1. open one shell and input the following cmds:
while : ; do
echo $RANDOM > /sys/module/nf_conntrack/parameters/hashsize
done
2. open more shells and input the following cmds:
while : ; do
cat /proc/net/nf_conntrack
done
3. just wait a monent, oops will happen soon.
The solution in this patch is based on Florian's Commit 5e3c61f981
("netfilter: conntrack: fix lookup race during hash resize"). And
add a wrapper function nf_conntrack_get_ht to get hash and hsize
suggested by Florian Westphal.
Signed-off-by: Liping Zhang <liping.zhang@spreadtrum.com>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
parent
a90a6e55f3
commit
64b87639c9
@ -51,6 +51,8 @@ bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
|
|||||||
const struct nf_conntrack_l3proto *l3proto,
|
const struct nf_conntrack_l3proto *l3proto,
|
||||||
const struct nf_conntrack_l4proto *l4proto);
|
const struct nf_conntrack_l4proto *l4proto);
|
||||||
|
|
||||||
|
void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize);
|
||||||
|
|
||||||
/* Find a connection corresponding to a tuple. */
|
/* Find a connection corresponding to a tuple. */
|
||||||
struct nf_conntrack_tuple_hash *
|
struct nf_conntrack_tuple_hash *
|
||||||
nf_conntrack_find_get(struct net *net,
|
nf_conntrack_find_get(struct net *net,
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
|
|
||||||
struct ct_iter_state {
|
struct ct_iter_state {
|
||||||
struct seq_net_private p;
|
struct seq_net_private p;
|
||||||
|
struct hlist_nulls_head *hash;
|
||||||
|
unsigned int htable_size;
|
||||||
unsigned int bucket;
|
unsigned int bucket;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -35,10 +37,10 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
|
|||||||
struct hlist_nulls_node *n;
|
struct hlist_nulls_node *n;
|
||||||
|
|
||||||
for (st->bucket = 0;
|
for (st->bucket = 0;
|
||||||
st->bucket < nf_conntrack_htable_size;
|
st->bucket < st->htable_size;
|
||||||
st->bucket++) {
|
st->bucket++) {
|
||||||
n = rcu_dereference(
|
n = rcu_dereference(
|
||||||
hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
|
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||||
if (!is_a_nulls(n))
|
if (!is_a_nulls(n))
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
@ -53,11 +55,11 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
|
|||||||
head = rcu_dereference(hlist_nulls_next_rcu(head));
|
head = rcu_dereference(hlist_nulls_next_rcu(head));
|
||||||
while (is_a_nulls(head)) {
|
while (is_a_nulls(head)) {
|
||||||
if (likely(get_nulls_value(head) == st->bucket)) {
|
if (likely(get_nulls_value(head) == st->bucket)) {
|
||||||
if (++st->bucket >= nf_conntrack_htable_size)
|
if (++st->bucket >= st->htable_size)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
head = rcu_dereference(
|
head = rcu_dereference(
|
||||||
hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
|
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||||
}
|
}
|
||||||
return head;
|
return head;
|
||||||
}
|
}
|
||||||
@ -75,7 +77,11 @@ static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
|
|||||||
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
|
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
|
||||||
__acquires(RCU)
|
__acquires(RCU)
|
||||||
{
|
{
|
||||||
|
struct ct_iter_state *st = seq->private;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
|
nf_conntrack_get_ht(&st->hash, &st->htable_size);
|
||||||
return ct_get_idx(seq, *pos);
|
return ct_get_idx(seq, *pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -460,6 +460,23 @@ nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
|
|||||||
net_eq(net, nf_ct_net(ct));
|
net_eq(net, nf_ct_net(ct));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* must be called with rcu read lock held */
|
||||||
|
void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize)
|
||||||
|
{
|
||||||
|
struct hlist_nulls_head *hptr;
|
||||||
|
unsigned int sequence, hsz;
|
||||||
|
|
||||||
|
do {
|
||||||
|
sequence = read_seqcount_begin(&nf_conntrack_generation);
|
||||||
|
hsz = nf_conntrack_htable_size;
|
||||||
|
hptr = nf_conntrack_hash;
|
||||||
|
} while (read_seqcount_retry(&nf_conntrack_generation, sequence));
|
||||||
|
|
||||||
|
*hash = hptr;
|
||||||
|
*hsize = hsz;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(nf_conntrack_get_ht);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Warning :
|
* Warning :
|
||||||
* - Caller must take a reference on returned object
|
* - Caller must take a reference on returned object
|
||||||
|
@ -48,6 +48,8 @@ EXPORT_SYMBOL_GPL(print_tuple);
|
|||||||
|
|
||||||
struct ct_iter_state {
|
struct ct_iter_state {
|
||||||
struct seq_net_private p;
|
struct seq_net_private p;
|
||||||
|
struct hlist_nulls_head *hash;
|
||||||
|
unsigned int htable_size;
|
||||||
unsigned int bucket;
|
unsigned int bucket;
|
||||||
u_int64_t time_now;
|
u_int64_t time_now;
|
||||||
};
|
};
|
||||||
@ -58,9 +60,10 @@ static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
|
|||||||
struct hlist_nulls_node *n;
|
struct hlist_nulls_node *n;
|
||||||
|
|
||||||
for (st->bucket = 0;
|
for (st->bucket = 0;
|
||||||
st->bucket < nf_conntrack_htable_size;
|
st->bucket < st->htable_size;
|
||||||
st->bucket++) {
|
st->bucket++) {
|
||||||
n = rcu_dereference(hlist_nulls_first_rcu(&nf_conntrack_hash[st->bucket]));
|
n = rcu_dereference(
|
||||||
|
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||||
if (!is_a_nulls(n))
|
if (!is_a_nulls(n))
|
||||||
return n;
|
return n;
|
||||||
}
|
}
|
||||||
@ -75,12 +78,11 @@ static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
|
|||||||
head = rcu_dereference(hlist_nulls_next_rcu(head));
|
head = rcu_dereference(hlist_nulls_next_rcu(head));
|
||||||
while (is_a_nulls(head)) {
|
while (is_a_nulls(head)) {
|
||||||
if (likely(get_nulls_value(head) == st->bucket)) {
|
if (likely(get_nulls_value(head) == st->bucket)) {
|
||||||
if (++st->bucket >= nf_conntrack_htable_size)
|
if (++st->bucket >= st->htable_size)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
head = rcu_dereference(
|
head = rcu_dereference(
|
||||||
hlist_nulls_first_rcu(
|
hlist_nulls_first_rcu(&st->hash[st->bucket]));
|
||||||
&nf_conntrack_hash[st->bucket]));
|
|
||||||
}
|
}
|
||||||
return head;
|
return head;
|
||||||
}
|
}
|
||||||
@ -102,6 +104,8 @@ static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
|
|||||||
|
|
||||||
st->time_now = ktime_get_real_ns();
|
st->time_now = ktime_get_real_ns();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
|
nf_conntrack_get_ht(&st->hash, &st->htable_size);
|
||||||
return ct_get_idx(seq, *pos);
|
return ct_get_idx(seq, *pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user