netfilter: conntrack: split resolve_clash function

Followup patch will need a helper function with the 'clashing entries
refer to the identical tuple in both directions' resolution logic.

This patch will add another resolve_clash helper where loser_ct must
not be added to the dying list because it will be inserted into the
table.

Therefore this also moves the stat counters and dying-list insertion
of the losing ct.

Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Florian Westphal 2020-02-03 17:37:06 +01:00 committed by Pablo Neira Ayuso
parent b1b32552c1
commit bb89abe52b

View File

@ -907,6 +907,39 @@ static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
tstamp->start = ktime_get_real_ns();
}
static int __nf_ct_resolve_clash(struct sk_buff *skb,
struct nf_conntrack_tuple_hash *h)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
loser_ct = nf_ct_get(skb, &ctinfo);
if (nf_ct_is_dying(ct))
return NF_DROP;
if (!atomic_inc_not_zero(&ct->ct_general.use))
return NF_DROP;
if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
nf_ct_match(ct, loser_ct)) {
struct net *net = nf_ct_net(ct);
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_ct_add_to_dying_list(loser_ct);
nf_conntrack_put(&loser_ct->ct_general);
nf_ct_set(skb, ct, ctinfo);
NF_CT_STAT_INC(net, insert_failed);
return NF_ACCEPT;
}
nf_ct_put(ct);
return NF_DROP;
}
/**
* nf_ct_resolve_clash - attempt to handle clash without packet drop
*
@ -941,31 +974,23 @@ nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h)
enum ip_conntrack_info ctinfo;
struct nf_conn *loser_ct;
struct net *net;
int ret;
loser_ct = nf_ct_get(skb, &ctinfo);
net = nf_ct_net(loser_ct);
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
if (!l4proto->allow_clash)
goto drop;
if (nf_ct_is_dying(ct))
goto drop;
ret = __nf_ct_resolve_clash(skb, h);
if (ret == NF_ACCEPT)
return ret;
if (!atomic_inc_not_zero(&ct->ct_general.use))
goto drop;
if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
nf_ct_match(ct, loser_ct)) {
nf_ct_acct_merge(ct, ctinfo, loser_ct);
nf_conntrack_put(&loser_ct->ct_general);
nf_ct_set(skb, ct, ctinfo);
return NF_ACCEPT;
}
nf_ct_put(ct);
drop:
net = nf_ct_net(loser_ct);
nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop);
NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
}
@ -1034,6 +1059,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (unlikely(nf_ct_is_dying(ct))) {
nf_ct_add_to_dying_list(ct);
NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
@ -1075,11 +1101,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
nf_ct_add_to_dying_list(ct);
ret = nf_ct_resolve_clash(skb, h);
dying:
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
return ret;
}