2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-15 00:34:10 +08:00

selftests/bpf: Switch sk_lookup selftests to strict SEC("sk_lookup") use

Update "sk_lookup/" definition to be a stand-alone type specifier,
with backwards-compatible prefix match logic in non-libbpf-1.0 mode.

Currently in selftests all the "sk_lookup/<whatever>" uses just use
<whatever> for duplicated unique name encoding, which is redundant as
BPF program's name (C function name) uniquely and descriptively
identifies the intended use for such BPF programs.

With libbpf's SEC_DEF("sk_lookup") definition updated, switch existing
sk_lookup programs to use "unqualified" SEC("sk_lookup") section names,
with no random text after it.

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Dave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/bpf/20210928161946.2512801-11-andrii@kernel.org
This commit is contained in:
Andrii Nakryiko 2021-09-28 09:19:46 -07:00 committed by Alexei Starovoitov
parent dd94d45cf0
commit 7c80c87ad5
2 changed files with 20 additions and 20 deletions

View File

@ -8055,7 +8055,7 @@ static const struct bpf_sec_def section_defs[] = {
SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
SEC_DEF("sk_lookup/", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX),
};
#define MAX_TYPE_NAME_SIZE 32

View File

@ -72,13 +72,13 @@ static const __u16 DST_PORT = 7007; /* Host byte order */
static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
SEC("sk_lookup/lookup_pass")
SEC("sk_lookup")
int lookup_pass(struct bpf_sk_lookup *ctx)
{
return SK_PASS;
}
SEC("sk_lookup/lookup_drop")
SEC("sk_lookup")
int lookup_drop(struct bpf_sk_lookup *ctx)
{
return SK_DROP;
@ -97,7 +97,7 @@ int reuseport_drop(struct sk_reuseport_md *ctx)
}
/* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
SEC("sk_lookup/redir_port")
SEC("sk_lookup")
int redir_port(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -116,7 +116,7 @@ int redir_port(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
SEC("sk_lookup/redir_ip4")
SEC("sk_lookup")
int redir_ip4(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -139,7 +139,7 @@ int redir_ip4(struct bpf_sk_lookup *ctx)
}
/* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
SEC("sk_lookup/redir_ip6")
SEC("sk_lookup")
int redir_ip6(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -164,7 +164,7 @@ int redir_ip6(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup/select_sock_a")
SEC("sk_lookup")
int select_sock_a(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -179,7 +179,7 @@ int select_sock_a(struct bpf_sk_lookup *ctx)
return err ? SK_DROP : SK_PASS;
}
SEC("sk_lookup/select_sock_a_no_reuseport")
SEC("sk_lookup")
int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -205,7 +205,7 @@ int select_sock_b(struct sk_reuseport_md *ctx)
}
/* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
SEC("sk_lookup/sk_assign_eexist")
SEC("sk_lookup")
int sk_assign_eexist(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -238,7 +238,7 @@ out:
}
/* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
SEC("sk_lookup/sk_assign_replace_flag")
SEC("sk_lookup")
int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -270,7 +270,7 @@ out:
}
/* Check that bpf_sk_assign(sk=NULL) is accepted. */
SEC("sk_lookup/sk_assign_null")
SEC("sk_lookup")
int sk_assign_null(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk = NULL;
@ -313,7 +313,7 @@ out:
}
/* Check that selected sk is accessible through context. */
SEC("sk_lookup/access_ctx_sk")
SEC("sk_lookup")
int access_ctx_sk(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk1 = NULL, *sk2 = NULL;
@ -379,7 +379,7 @@ out:
* are not covered because they give bogus results, that is the
* verifier ignores the offset.
*/
SEC("sk_lookup/ctx_narrow_access")
SEC("sk_lookup")
int ctx_narrow_access(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -553,7 +553,7 @@ int ctx_narrow_access(struct bpf_sk_lookup *ctx)
}
/* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
SEC("sk_lookup/sk_assign_esocknosupport")
SEC("sk_lookup")
int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
{
struct bpf_sock *sk;
@ -578,28 +578,28 @@ out:
return ret;
}
SEC("sk_lookup/multi_prog_pass1")
SEC("sk_lookup")
int multi_prog_pass1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup/multi_prog_pass2")
SEC("sk_lookup")
int multi_prog_pass2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
return SK_PASS;
}
SEC("sk_lookup/multi_prog_drop1")
SEC("sk_lookup")
int multi_prog_drop1(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
return SK_DROP;
}
SEC("sk_lookup/multi_prog_drop2")
SEC("sk_lookup")
int multi_prog_drop2(struct bpf_sk_lookup *ctx)
{
bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
@ -623,7 +623,7 @@ static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
SEC("sk_lookup/multi_prog_redir1")
SEC("sk_lookup")
int multi_prog_redir1(struct bpf_sk_lookup *ctx)
{
int ret;
@ -633,7 +633,7 @@ int multi_prog_redir1(struct bpf_sk_lookup *ctx)
return SK_PASS;
}
SEC("sk_lookup/multi_prog_redir2")
SEC("sk_lookup")
int multi_prog_redir2(struct bpf_sk_lookup *ctx)
{
int ret;