mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-30 16:13:54 +08:00
Merge branch 'bpf_random32'
Daniel Borkmann says: ==================== BPF/random32 updates BPF update to split the prandom state apart, and to move the *once helpers to the core. For details, please see individual patches. Given the changes and since it's in the tree for quite some time, net-next is a better choice in our opinion. v1 -> v2: - Make DO_ONCE() type-safe, remove the kvec helper. Credits go to Alexei Starovoitov for the __VA_ARGS__ hint, thanks! - Add a comment to the DO_ONCE() helper as suggested by Alexei. - Rework prandom_init_once() helper to the new API. - Keep Alexei's Acked-by on the last patch. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
df71842325
@ -200,4 +200,8 @@ extern const struct bpf_func_proto bpf_get_current_comm_proto;
|
||||
extern const struct bpf_func_proto bpf_skb_vlan_push_proto;
|
||||
extern const struct bpf_func_proto bpf_skb_vlan_pop_proto;
|
||||
|
||||
/* Shared helpers among cBPF and eBPF. */
|
||||
void bpf_user_rnd_init_once(void);
|
||||
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
#endif /* _LINUX_BPF_H */
|
||||
|
@ -24,7 +24,8 @@
|
||||
#include <linux/fcntl.h> /* For O_CLOEXEC and O_NONBLOCK */
|
||||
#include <linux/kmemcheck.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/once.h>
|
||||
|
||||
#include <uapi/linux/net.h>
|
||||
|
||||
struct poll_table_struct;
|
||||
@ -250,22 +251,8 @@ do { \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
bool __net_get_random_once(void *buf, int nbytes, bool *done,
|
||||
struct static_key *done_key);
|
||||
|
||||
#define net_get_random_once(buf, nbytes) \
|
||||
({ \
|
||||
bool ___ret = false; \
|
||||
static bool ___done = false; \
|
||||
static struct static_key ___once_key = \
|
||||
STATIC_KEY_INIT_TRUE; \
|
||||
if (static_key_true(&___once_key)) \
|
||||
___ret = __net_get_random_once(buf, \
|
||||
nbytes, \
|
||||
&___done, \
|
||||
&___once_key); \
|
||||
___ret; \
|
||||
})
|
||||
#define net_get_random_once(buf, nbytes) \
|
||||
get_random_once((buf), (nbytes))
|
||||
|
||||
int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec,
|
||||
size_t num, size_t len);
|
||||
|
57
include/linux/once.h
Normal file
57
include/linux/once.h
Normal file
@ -0,0 +1,57 @@
|
||||
#ifndef _LINUX_ONCE_H
|
||||
#define _LINUX_ONCE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
bool __do_once_start(bool *done, unsigned long *flags);
|
||||
void __do_once_done(bool *done, struct static_key *once_key,
|
||||
unsigned long *flags);
|
||||
|
||||
/* Call a function exactly once. The idea of DO_ONCE() is to perform
|
||||
* a function call such as initialization of random seeds, etc, only
|
||||
* once, where DO_ONCE() can live in the fast-path. After @func has
|
||||
* been called with the passed arguments, the static key will patch
|
||||
* out the condition into a nop. DO_ONCE() guarantees type safety of
|
||||
* arguments!
|
||||
*
|
||||
* Not that the following is not equivalent ...
|
||||
*
|
||||
* DO_ONCE(func, arg);
|
||||
* DO_ONCE(func, arg);
|
||||
*
|
||||
* ... to this version:
|
||||
*
|
||||
* void foo(void)
|
||||
* {
|
||||
* DO_ONCE(func, arg);
|
||||
* }
|
||||
*
|
||||
* foo();
|
||||
* foo();
|
||||
*
|
||||
* In case the one-time invocation could be triggered from multiple
|
||||
* places, then a common helper function must be defined, so that only
|
||||
* a single static key will be placed there!
|
||||
*/
|
||||
#define DO_ONCE(func, ...) \
|
||||
({ \
|
||||
bool ___ret = false; \
|
||||
static bool ___done = false; \
|
||||
static struct static_key ___once_key = STATIC_KEY_INIT_TRUE; \
|
||||
if (static_key_true(&___once_key)) { \
|
||||
unsigned long ___flags; \
|
||||
___ret = __do_once_start(&___done, &___flags); \
|
||||
if (unlikely(___ret)) { \
|
||||
func(__VA_ARGS__); \
|
||||
__do_once_done(&___done, &___once_key, \
|
||||
&___flags); \
|
||||
} \
|
||||
} \
|
||||
___ret; \
|
||||
})
|
||||
|
||||
#define get_random_once(buf, nbytes) \
|
||||
DO_ONCE(get_random_bytes, (buf), (nbytes))
|
||||
|
||||
#endif /* _LINUX_ONCE_H */
|
@ -7,6 +7,8 @@
|
||||
#define _LINUX_RANDOM_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/once.h>
|
||||
|
||||
#include <uapi/linux/random.h>
|
||||
|
||||
struct random_ready_callback {
|
||||
@ -45,6 +47,10 @@ struct rnd_state {
|
||||
|
||||
u32 prandom_u32_state(struct rnd_state *state);
|
||||
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
|
||||
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
|
||||
|
||||
#define prandom_init_once(pcpu_state) \
|
||||
DO_ONCE(prandom_seed_full_state, (pcpu_state))
|
||||
|
||||
/**
|
||||
* prandom_u32_max - returns a pseudo-random number in interval [0, ep_ro)
|
||||
|
@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_prog_free);
|
||||
|
||||
/* RNG for unpriviledged user space with separated state from prandom_u32(). */
|
||||
static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
|
||||
|
||||
void bpf_user_rnd_init_once(void)
|
||||
{
|
||||
prandom_init_once(&bpf_user_rnd_state);
|
||||
}
|
||||
|
||||
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
/* Should someone ever have the rather unwise idea to use some
|
||||
* of the registers passed into this function, then note that
|
||||
* this function is called from native eBPF and classic-to-eBPF
|
||||
* transformations. Register assignments from both sides are
|
||||
* different, f.e. classic always sets fn(ctx, A, X) here.
|
||||
*/
|
||||
struct rnd_state *state;
|
||||
u32 res;
|
||||
|
||||
state = &get_cpu_var(bpf_user_rnd_state);
|
||||
res = prandom_u32_state(state);
|
||||
put_cpu_var(state);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Weak definitions of helper functions in case we don't have bpf syscall. */
|
||||
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
|
||||
|
@ -93,13 +93,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = {
|
||||
.arg2_type = ARG_PTR_TO_MAP_KEY,
|
||||
};
|
||||
|
||||
static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
|
||||
{
|
||||
return prandom_u32();
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_get_prandom_u32_proto = {
|
||||
.func = bpf_get_prandom_u32,
|
||||
.func = bpf_user_rnd_u32,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
};
|
||||
|
@ -404,6 +404,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
|
||||
|
||||
if (insn->imm == BPF_FUNC_get_route_realm)
|
||||
prog->dst_needed = 1;
|
||||
if (insn->imm == BPF_FUNC_get_prandom_u32)
|
||||
bpf_user_rnd_init_once();
|
||||
if (insn->imm == BPF_FUNC_tail_call) {
|
||||
/* mark bpf_tail_call as different opcode
|
||||
* to avoid conditional branch in
|
||||
|
@ -26,7 +26,8 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
|
||||
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
|
||||
gcd.o lcm.o list_sort.o uuid.o flex_array.o iov_iter.o clz_ctz.o \
|
||||
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o
|
||||
percpu-refcount.o percpu_ida.o rhashtable.o reciprocal_div.o \
|
||||
once.o
|
||||
obj-y += string_helpers.o
|
||||
obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
|
||||
obj-y += hexdump.o
|
||||
|
62
lib/once.c
Normal file
62
lib/once.c
Normal file
@ -0,0 +1,62 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/once.h>
|
||||
#include <linux/random.h>
|
||||
|
||||
struct once_work {
|
||||
struct work_struct work;
|
||||
struct static_key *key;
|
||||
};
|
||||
|
||||
static void once_deferred(struct work_struct *w)
|
||||
{
|
||||
struct once_work *work;
|
||||
|
||||
work = container_of(w, struct once_work, work);
|
||||
BUG_ON(!static_key_enabled(work->key));
|
||||
static_key_slow_dec(work->key);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void once_disable_jump(struct static_key *key)
|
||||
{
|
||||
struct once_work *w;
|
||||
|
||||
w = kmalloc(sizeof(*w), GFP_ATOMIC);
|
||||
if (!w)
|
||||
return;
|
||||
|
||||
INIT_WORK(&w->work, once_deferred);
|
||||
w->key = key;
|
||||
schedule_work(&w->work);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(once_lock);
|
||||
|
||||
bool __do_once_start(bool *done, unsigned long *flags)
|
||||
__acquires(once_lock)
|
||||
{
|
||||
spin_lock_irqsave(&once_lock, *flags);
|
||||
if (*done) {
|
||||
spin_unlock_irqrestore(&once_lock, *flags);
|
||||
/* Keep sparse happy by restoring an even lock count on
|
||||
* this lock. In case we return here, we don't call into
|
||||
* __do_once_done but return early in the DO_ONCE() macro.
|
||||
*/
|
||||
__acquire(once_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(__do_once_start);
|
||||
|
||||
void __do_once_done(bool *done, struct static_key *once_key,
|
||||
unsigned long *flags)
|
||||
__releases(once_lock)
|
||||
{
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&once_lock, *flags);
|
||||
once_disable_jump(once_key);
|
||||
}
|
||||
EXPORT_SYMBOL(__do_once_done);
|
@ -181,7 +181,7 @@ void prandom_seed(u32 entropy)
|
||||
* No locking on the CPUs, but then somewhat random results are, well,
|
||||
* expected.
|
||||
*/
|
||||
for_each_possible_cpu (i) {
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = &per_cpu(net_rand_state, i);
|
||||
|
||||
state->s1 = __seed(state->s1 ^ entropy, 2U);
|
||||
@ -201,7 +201,7 @@ static int __init prandom_init(void)
|
||||
prandom_state_selftest();
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = &per_cpu(net_rand_state,i);
|
||||
struct rnd_state *state = &per_cpu(net_rand_state, i);
|
||||
u32 weak_seed = (i + jiffies) ^ random_get_entropy();
|
||||
|
||||
prandom_seed_early(state, weak_seed, true);
|
||||
@ -238,13 +238,30 @@ static void __init __prandom_start_seed_timer(void)
|
||||
add_timer(&seed_timer);
|
||||
}
|
||||
|
||||
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state)
|
||||
{
|
||||
int i;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = per_cpu_ptr(pcpu_state, i);
|
||||
u32 seeds[4];
|
||||
|
||||
get_random_bytes(&seeds, sizeof(seeds));
|
||||
state->s1 = __seed(seeds[0], 2U);
|
||||
state->s2 = __seed(seeds[1], 8U);
|
||||
state->s3 = __seed(seeds[2], 16U);
|
||||
state->s4 = __seed(seeds[3], 128U);
|
||||
|
||||
prandom_warmup(state);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Generate better values after random number generator
|
||||
* is fully initialized.
|
||||
*/
|
||||
static void __prandom_reseed(bool late)
|
||||
{
|
||||
int i;
|
||||
unsigned long flags;
|
||||
static bool latch = false;
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
@ -266,19 +283,7 @@ static void __prandom_reseed(bool late)
|
||||
goto out;
|
||||
|
||||
latch = true;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct rnd_state *state = &per_cpu(net_rand_state,i);
|
||||
u32 seeds[4];
|
||||
|
||||
get_random_bytes(&seeds, sizeof(seeds));
|
||||
state->s1 = __seed(seeds[0], 2U);
|
||||
state->s2 = __seed(seeds[1], 8U);
|
||||
state->s3 = __seed(seeds[2], 16U);
|
||||
state->s4 = __seed(seeds[3], 128U);
|
||||
|
||||
prandom_warmup(state);
|
||||
}
|
||||
prandom_seed_full_state(&net_rand_state);
|
||||
out:
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
}
|
||||
|
@ -149,12 +149,6 @@ static u64 __get_raw_cpu_id(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
return raw_smp_processor_id();
|
||||
}
|
||||
|
||||
/* note that this only generates 32-bit random numbers */
|
||||
static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
|
||||
{
|
||||
return prandom_u32();
|
||||
}
|
||||
|
||||
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
|
||||
struct bpf_insn *insn_buf)
|
||||
{
|
||||
@ -313,7 +307,8 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
|
||||
*insn = BPF_EMIT_CALL(__get_raw_cpu_id);
|
||||
break;
|
||||
case SKF_AD_OFF + SKF_AD_RANDOM:
|
||||
*insn = BPF_EMIT_CALL(__get_random_u32);
|
||||
*insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
|
||||
bpf_user_rnd_init_once();
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -348,52 +348,3 @@ void inet_proto_csum_replace_by_diff(__sum16 *sum, struct sk_buff *skb,
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(inet_proto_csum_replace_by_diff);
|
||||
|
||||
struct __net_random_once_work {
|
||||
struct work_struct work;
|
||||
struct static_key *key;
|
||||
};
|
||||
|
||||
static void __net_random_once_deferred(struct work_struct *w)
|
||||
{
|
||||
struct __net_random_once_work *work =
|
||||
container_of(w, struct __net_random_once_work, work);
|
||||
BUG_ON(!static_key_enabled(work->key));
|
||||
static_key_slow_dec(work->key);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
static void __net_random_once_disable_jump(struct static_key *key)
|
||||
{
|
||||
struct __net_random_once_work *w;
|
||||
|
||||
w = kmalloc(sizeof(*w), GFP_ATOMIC);
|
||||
if (!w)
|
||||
return;
|
||||
|
||||
INIT_WORK(&w->work, __net_random_once_deferred);
|
||||
w->key = key;
|
||||
schedule_work(&w->work);
|
||||
}
|
||||
|
||||
bool __net_get_random_once(void *buf, int nbytes, bool *done,
|
||||
struct static_key *once_key)
|
||||
{
|
||||
static DEFINE_SPINLOCK(lock);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lock, flags);
|
||||
if (*done) {
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
return false;
|
||||
}
|
||||
|
||||
get_random_bytes(buf, nbytes);
|
||||
*done = true;
|
||||
spin_unlock_irqrestore(&lock, flags);
|
||||
|
||||
__net_random_once_disable_jump(once_key);
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(__net_get_random_once);
|
||||
|
Loading…
Reference in New Issue
Block a user