mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-01 16:14:13 +08:00
7b219da43f
Initializing psock->sk_proto and other saved callbacks is only done in sk_psock_update_proto, after sk_psock_init has returned. The logic for this is difficult to follow, and needlessly complex. Instead, initialize psock->sk_proto whenever we allocate a new psock. Additionally, assert the following invariants: * The SK has no ULP: ULP does it's own finagling of sk->sk_prot * sk_user_data is unused: we need it to store sk_psock Protect our access to sk_user_data with sk_callback_lock, which is what other users like reuseport arrays, etc. do. The result is that an sk_psock is always fully initialized, and that psock->sk_proto is always the "original" struct proto. The latter allows us to use psock->sk_proto when initializing IPv6 TCP / UDP callbacks for sockmap. Signed-off-by: Lorenz Bauer <lmb@cloudflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200821102948.21918-2-lmb@cloudflare.com
53 lines
1.3 KiB
C
53 lines
1.3 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright (c) 2020 Cloudflare Ltd https://cloudflare.com */
|
|
|
|
#include <linux/skmsg.h>
|
|
#include <net/sock.h>
|
|
#include <net/udp.h>
|
|
|
|
enum {
|
|
UDP_BPF_IPV4,
|
|
UDP_BPF_IPV6,
|
|
UDP_BPF_NUM_PROTS,
|
|
};
|
|
|
|
static struct proto *udpv6_prot_saved __read_mostly;
|
|
static DEFINE_SPINLOCK(udpv6_prot_lock);
|
|
static struct proto udp_bpf_prots[UDP_BPF_NUM_PROTS];
|
|
|
|
static void udp_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
|
|
{
|
|
*prot = *base;
|
|
prot->unhash = sock_map_unhash;
|
|
prot->close = sock_map_close;
|
|
}
|
|
|
|
static void udp_bpf_check_v6_needs_rebuild(struct proto *ops)
|
|
{
|
|
if (unlikely(ops != smp_load_acquire(&udpv6_prot_saved))) {
|
|
spin_lock_bh(&udpv6_prot_lock);
|
|
if (likely(ops != udpv6_prot_saved)) {
|
|
udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV6], ops);
|
|
smp_store_release(&udpv6_prot_saved, ops);
|
|
}
|
|
spin_unlock_bh(&udpv6_prot_lock);
|
|
}
|
|
}
|
|
|
|
static int __init udp_bpf_v4_build_proto(void)
|
|
{
|
|
udp_bpf_rebuild_protos(&udp_bpf_prots[UDP_BPF_IPV4], &udp_prot);
|
|
return 0;
|
|
}
|
|
core_initcall(udp_bpf_v4_build_proto);
|
|
|
|
struct proto *udp_bpf_get_proto(struct sock *sk, struct sk_psock *psock)
|
|
{
|
|
int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
|
|
|
|
if (sk->sk_family == AF_INET6)
|
|
udp_bpf_check_v6_needs_rebuild(psock->sk_proto);
|
|
|
|
return &udp_bpf_prots[family];
|
|
}
|