selftests/bpf: Add tests for new nf_conntrack kfuncs

Introduce selftests for the following kfunc helpers:
- bpf_xdp_ct_alloc
- bpf_skb_ct_alloc
- bpf_ct_insert_entry
- bpf_ct_set_timeout
- bpf_ct_change_timeout
- bpf_ct_set_status
- bpf_ct_change_status

Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20220721134245.2450-12-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Lorenzo Bianconi 2022-07-21 15:42:43 +02:00 committed by Alexei Starovoitov
parent 8dd5e75683
commit 6eb7fba007
2 changed files with 81 additions and 12 deletions

View File

@ -39,6 +39,14 @@ void test_bpf_nf_ct(int mode)
ASSERT_EQ(skel->bss->test_enonet_netns_id, -ENONET, "Test ENONET for bad but valid netns_id");
ASSERT_EQ(skel->bss->test_enoent_lookup, -ENOENT, "Test ENOENT for failed lookup");
ASSERT_EQ(skel->bss->test_eafnosupport, -EAFNOSUPPORT, "Test EAFNOSUPPORT for invalid len__tuple");
ASSERT_EQ(skel->data->test_alloc_entry, 0, "Test for alloc new entry");
ASSERT_EQ(skel->data->test_insert_entry, 0, "Test for insert new entry");
ASSERT_EQ(skel->data->test_succ_lookup, 0, "Test for successful lookup");
/* allow some tolerance for test_delta_timeout value to avoid races. */
ASSERT_GT(skel->bss->test_delta_timeout, 8, "Test for min ct timeout update");
ASSERT_LE(skel->bss->test_delta_timeout, 10, "Test for max ct timeout update");
/* expected status is IPS_SEEN_REPLY */
ASSERT_EQ(skel->bss->test_status, 2, "Test for ct status update ");
end:
test_bpf_nf__destroy(skel);
}

View File

@ -8,6 +8,8 @@
#define EINVAL 22
#define ENOENT 2
extern unsigned long CONFIG_HZ __kconfig;
int test_einval_bpf_tuple = 0;
int test_einval_reserved = 0;
int test_einval_netns_id = 0;
@ -16,6 +18,11 @@ int test_eproto_l4proto = 0;
int test_enonet_netns_id = 0;
int test_enoent_lookup = 0;
int test_eafnosupport = 0;
int test_alloc_entry = -EINVAL;
int test_insert_entry = -EAFNOSUPPORT;
int test_succ_lookup = -ENOENT;
u32 test_delta_timeout = 0;
u32 test_status = 0;
struct nf_conn;
@ -26,31 +33,44 @@ struct bpf_ct_opts___local {
u8 reserved[3];
} __attribute__((preserve_access_index));
struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32) __ksym;
struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym;
void bpf_ct_release(struct nf_conn *) __ksym;
void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym;
int bpf_ct_set_status(struct nf_conn *, u32) __ksym;
int bpf_ct_change_status(struct nf_conn *, u32) __ksym;
static __always_inline void
nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32),
nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32),
struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32,
struct bpf_ct_opts___local *, u32),
void *ctx)
{
struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 };
struct bpf_sock_tuple bpf_tuple;
struct nf_conn *ct;
int err;
__builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4));
ct = func(ctx, NULL, 0, &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_einval_bpf_tuple = opts_def.error;
opts_def.reserved[0] = 1;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.reserved[0] = 0;
opts_def.l4proto = IPPROTO_TCP;
if (ct)
@ -59,21 +79,24 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
test_einval_reserved = opts_def.error;
opts_def.netns_id = -2;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_einval_netns_id = opts_def.error;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def) - 1);
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def) - 1);
if (ct)
bpf_ct_release(ct);
else
test_einval_len_opts = opts_def.error;
opts_def.l4proto = IPPROTO_ICMP;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.l4proto = IPPROTO_TCP;
if (ct)
bpf_ct_release(ct);
@ -81,37 +104,75 @@ nf_ct_test(struct nf_conn *(*func)(void *, struct bpf_sock_tuple *, u32,
test_eproto_l4proto = opts_def.error;
opts_def.netns_id = 0xf00f;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
opts_def.netns_id = -1;
if (ct)
bpf_ct_release(ct);
else
test_enonet_netns_id = opts_def.error;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_enoent_lookup = opts_def.error;
ct = func(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def, sizeof(opts_def));
ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def,
sizeof(opts_def));
if (ct)
bpf_ct_release(ct);
else
test_eafnosupport = opts_def.error;
bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */
bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */
bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */
bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */
ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def,
sizeof(opts_def));
if (ct) {
struct nf_conn *ct_ins;
bpf_ct_set_timeout(ct, 10000);
bpf_ct_set_status(ct, IPS_CONFIRMED);
ct_ins = bpf_ct_insert_entry(ct);
if (ct_ins) {
struct nf_conn *ct_lk;
ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4),
&opts_def, sizeof(opts_def));
if (ct_lk) {
/* update ct entry timeout */
bpf_ct_change_timeout(ct_lk, 10000);
test_delta_timeout = ct_lk->timeout - bpf_jiffies64();
test_delta_timeout /= CONFIG_HZ;
test_status = IPS_SEEN_REPLY;
bpf_ct_change_status(ct_lk, IPS_SEEN_REPLY);
bpf_ct_release(ct_lk);
test_succ_lookup = 0;
}
bpf_ct_release(ct_ins);
test_insert_entry = 0;
}
test_alloc_entry = 0;
}
}
SEC("xdp")
int nf_xdp_ct_test(struct xdp_md *ctx)
{
nf_ct_test((void *)bpf_xdp_ct_lookup, ctx);
nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx);
return 0;
}
SEC("tc")
int nf_skb_ct_test(struct __sk_buff *ctx)
{
nf_ct_test((void *)bpf_skb_ct_lookup, ctx);
nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx);
return 0;
}