netfilter: nft_payload: support for inner header matching / mangling

Allow to match and mangle on inner headers / payload data after the
transport header. There is a new field in the pktinfo structure that
stores the inner header offset which is calculated only when requested.
Only TCP and UDP supported at this stage.

Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
This commit is contained in:
Pablo Neira Ayuso 2021-10-28 22:15:00 +02:00
parent b5bdc6f9c2
commit c46b38dc87
3 changed files with 58 additions and 2 deletions

View File

@ -23,6 +23,7 @@ struct module;
enum {
NFT_PKTINFO_L4PROTO = (1 << 0),
NFT_PKTINFO_INNER = (1 << 1),
};
struct nft_pktinfo {
@ -32,6 +33,7 @@ struct nft_pktinfo {
u8 tprot;
u16 fragoff;
unsigned int thoff;
unsigned int inneroff;
};
static inline struct sock *nft_sk(const struct nft_pktinfo *pkt)

View File

@ -753,11 +753,13 @@ enum nft_dynset_attributes {
* @NFT_PAYLOAD_LL_HEADER: link layer header
* @NFT_PAYLOAD_NETWORK_HEADER: network header
* @NFT_PAYLOAD_TRANSPORT_HEADER: transport header
* @NFT_PAYLOAD_INNER_HEADER: inner header / payload
*/
enum nft_payload_bases {
NFT_PAYLOAD_LL_HEADER,
NFT_PAYLOAD_NETWORK_HEADER,
NFT_PAYLOAD_TRANSPORT_HEADER,
NFT_PAYLOAD_INNER_HEADER,
};
/**

View File

@ -22,6 +22,7 @@
#include <linux/icmpv6.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/ip.h>
#include <net/sctp/checksum.h>
static bool nft_payload_rebuild_vlan_hdr(const struct sk_buff *skb, int mac_off,
@ -79,6 +80,45 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
}
static int __nft_payload_inner_offset(struct nft_pktinfo *pkt)
{
unsigned int thoff = nft_thoff(pkt);
if (!(pkt->flags & NFT_PKTINFO_L4PROTO))
return -1;
switch (pkt->tprot) {
case IPPROTO_UDP:
pkt->inneroff = thoff + sizeof(struct udphdr);
break;
case IPPROTO_TCP: {
struct tcphdr *th, _tcph;
th = skb_header_pointer(pkt->skb, thoff, sizeof(_tcph), &_tcph);
if (!th)
return -1;
pkt->inneroff = thoff + __tcp_hdrlen(th);
}
break;
default:
return -1;
}
pkt->flags |= NFT_PKTINFO_INNER;
return 0;
}
static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
{
if (!(pkt->flags & NFT_PKTINFO_INNER) &&
__nft_payload_inner_offset((struct nft_pktinfo *)pkt) < 0)
return -1;
return pkt->inneroff;
}
void nft_payload_eval(const struct nft_expr *expr,
struct nft_regs *regs,
const struct nft_pktinfo *pkt)
@ -112,6 +152,11 @@ void nft_payload_eval(const struct nft_expr *expr,
goto err;
offset = nft_thoff(pkt);
break;
case NFT_PAYLOAD_INNER_HEADER:
offset = nft_payload_inner_offset(pkt);
if (offset < 0)
goto err;
break;
default:
BUG();
}
@ -614,6 +659,11 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
goto err;
offset = nft_thoff(pkt);
break;
case NFT_PAYLOAD_INNER_HEADER:
offset = nft_payload_inner_offset(pkt);
if (offset < 0)
goto err;
break;
default:
BUG();
}
@ -622,7 +672,8 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
offset += priv->offset;
if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
(priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
((priv->base != NFT_PAYLOAD_TRANSPORT_HEADER &&
priv->base != NFT_PAYLOAD_INNER_HEADER) ||
skb->ip_summed != CHECKSUM_PARTIAL)) {
fsum = skb_checksum(skb, offset, priv->len, 0);
tsum = csum_partial(src, priv->len, 0);
@ -741,6 +792,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
case NFT_PAYLOAD_LL_HEADER:
case NFT_PAYLOAD_NETWORK_HEADER:
case NFT_PAYLOAD_TRANSPORT_HEADER:
case NFT_PAYLOAD_INNER_HEADER:
break;
default:
return ERR_PTR(-EOPNOTSUPP);
@ -759,7 +811,7 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
len = ntohl(nla_get_be32(tb[NFTA_PAYLOAD_LEN]));
if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
base != NFT_PAYLOAD_LL_HEADER)
base != NFT_PAYLOAD_LL_HEADER && base != NFT_PAYLOAD_INNER_HEADER)
return &nft_payload_fast_ops;
else
return &nft_payload_ops;