mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-20 03:04:01 +08:00
bpf: hash: move select_bucket() out of htab's spinlock
The spinlock is just used for protecting the per-bucket hlist, so it isn't needed for selecting bucket. Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6591f1e666
commit
45d8390c56
@ -248,12 +248,11 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
memcpy(l_new->key + round_up(key_size, 8), value, map->value_size);
|
||||
|
||||
l_new->hash = htab_map_hash(l_new->key, key_size);
|
||||
head = select_bucket(htab, l_new->hash);
|
||||
|
||||
/* bpf_map_update_elem() can be called in_irq() */
|
||||
raw_spin_lock_irqsave(&htab->lock, flags);
|
||||
|
||||
head = select_bucket(htab, l_new->hash);
|
||||
|
||||
l_old = lookup_elem_raw(head, l_new->hash, key, key_size);
|
||||
|
||||
if (!l_old && unlikely(atomic_read(&htab->count) >= map->max_entries)) {
|
||||
@ -310,11 +309,10 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
key_size = map->key_size;
|
||||
|
||||
hash = htab_map_hash(key, key_size);
|
||||
head = select_bucket(htab, hash);
|
||||
|
||||
raw_spin_lock_irqsave(&htab->lock, flags);
|
||||
|
||||
head = select_bucket(htab, hash);
|
||||
|
||||
l = lookup_elem_raw(head, hash, key, key_size);
|
||||
|
||||
if (l) {
|
||||
|
Loading…
Reference in New Issue
Block a user