2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 23:53:55 +08:00

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf

Pablo Neira Ayuso says:

====================
Netfilter fixes for net

The following patchset contains Netfilter fixes for net:

1) refcount_inc_not_zero() is not semantically equivalent to
   atomic_int_not_zero(), from Florian Westphal. My understanding was
   that refcount_*() API provides a wrapper to easier debugging of
   reference count leaks, however, there are semantic differences
   between these two APIs, where refcount_inc_not_zero() needs a barrier.
   Reason for this subtle difference to me is unknown.

2) packet logging is not correct for ARP and IP packets, from the
   ARP family and netdev/egress respectively. Use skb_network_offset()
   to reach the headers accordingly.

3) set element extension length have been growing over time, replace
   a BUG_ON by EINVAL which might be triggerable from userspace.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2022-07-11 11:58:38 +01:00
commit e45955766b
6 changed files with 90 additions and 30 deletions

View File

@ -657,18 +657,22 @@ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
tmpl->len = sizeof(struct nft_set_ext);
}
static inline void nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
unsigned int len)
static inline int nft_set_ext_add_length(struct nft_set_ext_tmpl *tmpl, u8 id,
unsigned int len)
{
tmpl->len = ALIGN(tmpl->len, nft_set_ext_types[id].align);
BUG_ON(tmpl->len > U8_MAX);
if (tmpl->len > U8_MAX)
return -EINVAL;
tmpl->offset[id] = tmpl->len;
tmpl->len += nft_set_ext_types[id].len + len;
return 0;
}
static inline void nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
static inline int nft_set_ext_add(struct nft_set_ext_tmpl *tmpl, u8 id)
{
nft_set_ext_add_length(tmpl, id, 0);
return nft_set_ext_add_length(tmpl, id, 0);
}
static inline void nft_set_ext_init(struct nft_set_ext *ext,

View File

@ -729,6 +729,9 @@ static void nf_ct_gc_expired(struct nf_conn *ct)
if (!refcount_inc_not_zero(&ct->ct_general.use))
return;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct))
nf_ct_kill(ct);
@ -795,6 +798,9 @@ __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone,
*/
ct = nf_ct_tuplehash_to_ctrack(h);
if (likely(refcount_inc_not_zero(&ct->ct_general.use))) {
/* re-check key after refcount */
smp_acquire__after_ctrl_dep();
if (likely(nf_ct_key_equal(h, tuple, zone, net)))
goto found;
@ -1387,6 +1393,9 @@ static unsigned int early_drop_list(struct net *net,
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->ct_net and ->status after refcount increase */
smp_acquire__after_ctrl_dep();
/* kill only if still in same netns -- might have moved due to
* SLAB_TYPESAFE_BY_RCU rules.
*
@ -1536,6 +1545,9 @@ static void gc_worker(struct work_struct *work)
if (!refcount_inc_not_zero(&tmp->ct_general.use))
continue;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (gc_worker_skip_ct(tmp)) {
nf_ct_put(tmp);
continue;
@ -1775,6 +1787,16 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (!exp)
__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
/* Other CPU might have obtained a pointer to this object before it was
* released. Because refcount is 0, refcount_inc_not_zero() will fail.
*
* After refcount_set(1) it will succeed; ensure that zeroing of
* ct->status and the correct ct->net pointer are visible; else other
* core might observe CONFIRMED bit which means the entry is valid and
* in the hash table, but its not (anymore).
*/
smp_wmb();
/* Now it is going to be associated with an sk_buff, set refcount to 1. */
refcount_set(&ct->ct_general.use, 1);

View File

@ -1203,6 +1203,7 @@ restart:
hnnode) {
ct = nf_ct_tuplehash_to_ctrack(h);
if (nf_ct_is_expired(ct)) {
/* need to defer nf_ct_kill() until lock is released */
if (i < ARRAY_SIZE(nf_ct_evict) &&
refcount_inc_not_zero(&ct->ct_general.use))
nf_ct_evict[i++] = ct;

View File

@ -306,6 +306,9 @@ static int ct_seq_show(struct seq_file *s, void *v)
if (unlikely(!refcount_inc_not_zero(&ct->ct_general.use)))
return 0;
/* load ->status after refcount increase */
smp_acquire__after_ctrl_dep();
if (nf_ct_should_gc(ct)) {
nf_ct_kill(ct);
goto release;

View File

@ -67,7 +67,7 @@ dump_arp_packet(struct nf_log_buf *m,
unsigned int logflags;
struct arphdr _arph;
ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
ah = skb_header_pointer(skb, nhoff, sizeof(_arph), &_arph);
if (!ah) {
nf_log_buf_add(m, "TRUNCATED");
return;
@ -96,7 +96,7 @@ dump_arp_packet(struct nf_log_buf *m,
ah->ar_pln != sizeof(__be32))
return;
ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
ap = skb_header_pointer(skb, nhoff + sizeof(_arph), sizeof(_arpp), &_arpp);
if (!ap) {
nf_log_buf_add(m, " INCOMPLETE [%zu bytes]",
skb->len - sizeof(_arph));
@ -149,7 +149,7 @@ static void nf_log_arp_packet(struct net *net, u_int8_t pf,
nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
prefix);
dump_arp_packet(m, loginfo, skb, 0);
dump_arp_packet(m, loginfo, skb, skb_network_offset(skb));
nf_log_buf_close(m);
}
@ -850,7 +850,7 @@ static void nf_log_ip_packet(struct net *net, u_int8_t pf,
if (in)
dump_mac_header(m, loginfo, skb);
dump_ipv4_packet(net, m, loginfo, skb, 0);
dump_ipv4_packet(net, m, loginfo, skb, skb_network_offset(skb));
nf_log_buf_close(m);
}

View File

@ -5833,8 +5833,11 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (!nla[NFTA_SET_ELEM_KEY] && !(flags & NFT_SET_ELEM_CATCHALL))
return -EINVAL;
if (flags != 0)
nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (flags != 0) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (err < 0)
return err;
}
if (set->flags & NFT_SET_MAP) {
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
@ -5943,7 +5946,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
goto err_set_elem_expr;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
if (err < 0)
goto err_parse_key;
}
if (nla[NFTA_SET_ELEM_KEY_END]) {
@ -5952,22 +5957,31 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
goto err_parse_key;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
if (err < 0)
goto err_parse_key_end;
}
if (timeout > 0) {
nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (timeout != set->timeout)
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_EXPIRATION);
if (err < 0)
goto err_parse_key_end;
if (timeout != set->timeout) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
if (err < 0)
goto err_parse_key_end;
}
}
if (num_exprs) {
for (i = 0; i < num_exprs; i++)
size += expr_array[i]->ops->size;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
sizeof(struct nft_set_elem_expr) +
size);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
sizeof(struct nft_set_elem_expr) + size);
if (err < 0)
goto err_parse_key_end;
}
if (nla[NFTA_SET_ELEM_OBJREF] != NULL) {
@ -5982,7 +5996,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err = PTR_ERR(obj);
goto err_parse_key_end;
}
nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF);
if (err < 0)
goto err_parse_key_end;
}
if (nla[NFTA_SET_ELEM_DATA] != NULL) {
@ -6016,7 +6032,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
NFT_VALIDATE_NEED);
}
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_DATA, desc.len);
if (err < 0)
goto err_parse_data;
}
/* The full maximum length of userdata can exceed the maximum
@ -6026,9 +6044,12 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
ulen = 0;
if (nla[NFTA_SET_ELEM_USERDATA] != NULL) {
ulen = nla_len(nla[NFTA_SET_ELEM_USERDATA]);
if (ulen > 0)
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
ulen);
if (ulen > 0) {
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_USERDATA,
ulen);
if (err < 0)
goto err_parse_data;
}
}
err = -ENOMEM;
@ -6256,8 +6277,11 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
nft_set_ext_prepare(&tmpl);
if (flags != 0)
nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (flags != 0) {
err = nft_set_ext_add(&tmpl, NFT_SET_EXT_FLAGS);
if (err < 0)
return err;
}
if (nla[NFTA_SET_ELEM_KEY]) {
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
@ -6265,16 +6289,20 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
if (err < 0)
return err;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen);
if (err < 0)
goto fail_elem;
}
if (nla[NFTA_SET_ELEM_KEY_END]) {
err = nft_setelem_parse_key(ctx, set, &elem.key_end.val,
nla[NFTA_SET_ELEM_KEY_END]);
if (err < 0)
return err;
goto fail_elem;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
err = nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY_END, set->klen);
if (err < 0)
goto fail_elem_key_end;
}
err = -ENOMEM;
@ -6282,7 +6310,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
elem.key_end.val.data, NULL, 0, 0,
GFP_KERNEL_ACCOUNT);
if (elem.priv == NULL)
goto fail_elem;
goto fail_elem_key_end;
ext = nft_set_elem_ext(set, elem.priv);
if (flags)
@ -6306,6 +6334,8 @@ fail_ops:
kfree(trans);
fail_trans:
kfree(elem.priv);
fail_elem_key_end:
nft_data_release(&elem.key_end.val, NFT_DATA_VALUE);
fail_elem:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
return err;