2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00
linux-next/include/linux/sock_diag.h
Daniel Borkmann 92acdc58ab bpf, net: Rework cookie generator as per-cpu one
With its use in BPF, the cookie generator can be called very frequently
in particular when used out of cgroup v2 hooks (e.g. connect / sendmsg)
and attached to the root cgroup, for example, when used in v1/v2 mixed
environments. In particular, when there's a high churn on sockets in the
system there can be many parallel requests to the bpf_get_socket_cookie()
and bpf_get_netns_cookie() helpers which then cause contention on the
atomic counter.

As similarly done in f991bd2e14 ("fs: introduce a per-cpu last_ino
allocator"), add a small helper library that both can use for the 64 bit
counters. Given this can be called from different contexts, we also need
to deal with potential nested calls even though in practice they are
considered extremely rare. One idea as suggested by Eric Dumazet was
to use a reverse counter for this situation since we don't expect 64 bit
overflows anyways; that way, we can avoid bigger gaps in the 64 bit
counter space compared to just batch-wise increase. Even on machines
with small number of cores (e.g. 4) the cookie generation shrinks from
min/max/med/avg (ns) of 22/50/40/38.9 down to 10/35/14/17.3 when run
in parallel from multiple CPUs.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Cc: Eric Dumazet <eric.dumazet@gmail.com>
Link: https://lore.kernel.org/bpf/8a80b8d27d3c49f9a14e1d5213c19d8be87d1dc8.1601477936.git.daniel@iogearbox.net
2020-09-30 11:50:35 -07:00

94 lines
2.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __SOCK_DIAG_H__
#define __SOCK_DIAG_H__
#include <linux/netlink.h>
#include <linux/user_namespace.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <uapi/linux/sock_diag.h>
struct sk_buff;
struct nlmsghdr;
struct sock;
struct sock_diag_handler {
__u8 family;
int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
int (*get_info)(struct sk_buff *skb, struct sock *sk);
int (*destroy)(struct sk_buff *skb, struct nlmsghdr *nlh);
};
int sock_diag_register(const struct sock_diag_handler *h);
void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
u64 __sock_gen_cookie(struct sock *sk);
static inline u64 sock_gen_cookie(struct sock *sk)
{
u64 cookie;
preempt_disable();
cookie = __sock_gen_cookie(sk);
preempt_enable();
return cookie;
}
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
struct sk_buff *skb, int attrtype);
static inline
enum sknetlink_groups sock_diag_destroy_group(const struct sock *sk)
{
switch (sk->sk_family) {
case AF_INET:
if (sk->sk_type == SOCK_RAW)
return SKNLGRP_NONE;
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET_TCP_DESTROY;
case IPPROTO_UDP:
return SKNLGRP_INET_UDP_DESTROY;
default:
return SKNLGRP_NONE;
}
case AF_INET6:
if (sk->sk_type == SOCK_RAW)
return SKNLGRP_NONE;
switch (sk->sk_protocol) {
case IPPROTO_TCP:
return SKNLGRP_INET6_TCP_DESTROY;
case IPPROTO_UDP:
return SKNLGRP_INET6_UDP_DESTROY;
default:
return SKNLGRP_NONE;
}
default:
return SKNLGRP_NONE;
}
}
static inline
bool sock_diag_has_destroy_listeners(const struct sock *sk)
{
const struct net *n = sock_net(sk);
const enum sknetlink_groups group = sock_diag_destroy_group(sk);
return group != SKNLGRP_NONE && n->diag_nlsk &&
netlink_has_listeners(n->diag_nlsk, group);
}
void sock_diag_broadcast_destroy(struct sock *sk);
int sock_diag_destroy(struct sock *sk, int err);
#endif