mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
netfilter: nft_set_rbtree: fix overlap expiration walk
The lazy gc on insert that should remove timed-out entries fails to release
the other half of the interval, if any.
Can be reproduced with tests/shell/testcases/sets/0044interval_overlap_0
in nftables.git and kmemleak enabled kernel.
Second bug is the use of rbe_prev vs. prev pointer.
If rbe_prev() returns NULL after at least one iteration, rbe_prev points
to element that is not an end interval, hence it should not be removed.
Lastly, check the genmask of the end interval if this is active in the
current generation.
Fixes: c9e6978e27
("netfilter: nft_set_rbtree: Switch to node list walk for overlap detection")
Signed-off-by: Florian Westphal <fw@strlen.de>
This commit is contained in:
parent
d4a7ce6421
commit
f718863aca
@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
|
|||||||
|
|
||||||
static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
static int nft_rbtree_gc_elem(const struct nft_set *__set,
|
||||||
struct nft_rbtree *priv,
|
struct nft_rbtree *priv,
|
||||||
struct nft_rbtree_elem *rbe)
|
struct nft_rbtree_elem *rbe,
|
||||||
|
u8 genmask)
|
||||||
{
|
{
|
||||||
struct nft_set *set = (struct nft_set *)__set;
|
struct nft_set *set = (struct nft_set *)__set;
|
||||||
struct rb_node *prev = rb_prev(&rbe->node);
|
struct rb_node *prev = rb_prev(&rbe->node);
|
||||||
struct nft_rbtree_elem *rbe_prev = NULL;
|
struct nft_rbtree_elem *rbe_prev;
|
||||||
struct nft_set_gc_batch *gcb;
|
struct nft_set_gc_batch *gcb;
|
||||||
|
|
||||||
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
|
gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC);
|
||||||
if (!gcb)
|
if (!gcb)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
/* search for expired end interval coming before this element. */
|
/* search for end interval coming before this element.
|
||||||
|
* end intervals don't carry a timeout extension, they
|
||||||
|
* are coupled with the interval start element.
|
||||||
|
*/
|
||||||
while (prev) {
|
while (prev) {
|
||||||
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||||
if (nft_rbtree_interval_end(rbe_prev))
|
if (nft_rbtree_interval_end(rbe_prev) &&
|
||||||
|
nft_set_elem_active(&rbe_prev->ext, genmask))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
prev = rb_prev(prev);
|
prev = rb_prev(prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rbe_prev) {
|
if (prev) {
|
||||||
|
rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||||
|
|
||||||
rb_erase(&rbe_prev->node, &priv->root);
|
rb_erase(&rbe_prev->node, &priv->root);
|
||||||
atomic_dec(&set->nelems);
|
atomic_dec(&set->nelems);
|
||||||
|
nft_set_gc_batch_add(gcb, rbe_prev);
|
||||||
}
|
}
|
||||||
|
|
||||||
rb_erase(&rbe->node, &priv->root);
|
rb_erase(&rbe->node, &priv->root);
|
||||||
@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
|
|||||||
|
|
||||||
/* perform garbage collection to avoid bogus overlap reports. */
|
/* perform garbage collection to avoid bogus overlap reports. */
|
||||||
if (nft_set_elem_expired(&rbe->ext)) {
|
if (nft_set_elem_expired(&rbe->ext)) {
|
||||||
err = nft_rbtree_gc_elem(set, priv, rbe);
|
err = nft_rbtree_gc_elem(set, priv, rbe, genmask);
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user