mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-17 09:43:59 +08:00
4b45cd81f7
pcpu_freelist_populate() initializes nr_elems / num_possible_cpus() + 1
free nodes for some CPUs, and then possibly one CPU with fewer nodes,
followed by remaining cpus with 0 nodes. For example, when nr_elems == 256
and num_possible_cpus() == 32, CPU 0~27 each gets 9 free nodes, CPU 28 gets
4 free nodes, CPU 29~31 get 0 free nodes, while in fact each CPU should get
8 nodes equally.
This patch initializes nr_elems / num_possible_cpus() free nodes for each
CPU firstly, then allocates the remaining free nodes by one for each CPU
until no free nodes left.
Fixes: e19494edab
("bpf: introduce percpu_freelist")
Signed-off-by: Xu Kuohai <xukuohai@huawei.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/bpf/20221110122128.105214-1-xukuohai@huawei.com
201 lines
4.7 KiB
C
201 lines
4.7 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright (c) 2016 Facebook
|
|
*/
|
|
#include "percpu_freelist.h"
|
|
|
|
int pcpu_freelist_init(struct pcpu_freelist *s)
|
|
{
|
|
int cpu;
|
|
|
|
s->freelist = alloc_percpu(struct pcpu_freelist_head);
|
|
if (!s->freelist)
|
|
return -ENOMEM;
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
struct pcpu_freelist_head *head = per_cpu_ptr(s->freelist, cpu);
|
|
|
|
raw_spin_lock_init(&head->lock);
|
|
head->first = NULL;
|
|
}
|
|
raw_spin_lock_init(&s->extralist.lock);
|
|
s->extralist.first = NULL;
|
|
return 0;
|
|
}
|
|
|
|
void pcpu_freelist_destroy(struct pcpu_freelist *s)
|
|
{
|
|
free_percpu(s->freelist);
|
|
}
|
|
|
|
static inline void pcpu_freelist_push_node(struct pcpu_freelist_head *head,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
node->next = head->first;
|
|
WRITE_ONCE(head->first, node);
|
|
}
|
|
|
|
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
raw_spin_lock(&head->lock);
|
|
pcpu_freelist_push_node(head, node);
|
|
raw_spin_unlock(&head->lock);
|
|
}
|
|
|
|
static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
if (!raw_spin_trylock(&s->extralist.lock))
|
|
return false;
|
|
|
|
pcpu_freelist_push_node(&s->extralist, node);
|
|
raw_spin_unlock(&s->extralist.lock);
|
|
return true;
|
|
}
|
|
|
|
static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
int cpu, orig_cpu;
|
|
|
|
orig_cpu = raw_smp_processor_id();
|
|
while (1) {
|
|
for_each_cpu_wrap(cpu, cpu_possible_mask, orig_cpu) {
|
|
struct pcpu_freelist_head *head;
|
|
|
|
head = per_cpu_ptr(s->freelist, cpu);
|
|
if (raw_spin_trylock(&head->lock)) {
|
|
pcpu_freelist_push_node(head, node);
|
|
raw_spin_unlock(&head->lock);
|
|
return;
|
|
}
|
|
}
|
|
|
|
/* cannot lock any per cpu lock, try extralist */
|
|
if (pcpu_freelist_try_push_extra(s, node))
|
|
return;
|
|
}
|
|
}
|
|
|
|
void __pcpu_freelist_push(struct pcpu_freelist *s,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
if (in_nmi())
|
|
___pcpu_freelist_push_nmi(s, node);
|
|
else
|
|
___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
|
|
}
|
|
|
|
void pcpu_freelist_push(struct pcpu_freelist *s,
|
|
struct pcpu_freelist_node *node)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
__pcpu_freelist_push(s, node);
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
|
u32 nr_elems)
|
|
{
|
|
struct pcpu_freelist_head *head;
|
|
unsigned int cpu, cpu_idx, i, j, n, m;
|
|
|
|
n = nr_elems / num_possible_cpus();
|
|
m = nr_elems % num_possible_cpus();
|
|
|
|
cpu_idx = 0;
|
|
for_each_possible_cpu(cpu) {
|
|
head = per_cpu_ptr(s->freelist, cpu);
|
|
j = n + (cpu_idx < m ? 1 : 0);
|
|
for (i = 0; i < j; i++) {
|
|
/* No locking required as this is not visible yet. */
|
|
pcpu_freelist_push_node(head, buf);
|
|
buf += elem_size;
|
|
}
|
|
cpu_idx++;
|
|
}
|
|
}
|
|
|
|
static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
{
|
|
struct pcpu_freelist_head *head;
|
|
struct pcpu_freelist_node *node;
|
|
int cpu;
|
|
|
|
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
|
|
head = per_cpu_ptr(s->freelist, cpu);
|
|
if (!READ_ONCE(head->first))
|
|
continue;
|
|
raw_spin_lock(&head->lock);
|
|
node = head->first;
|
|
if (node) {
|
|
WRITE_ONCE(head->first, node->next);
|
|
raw_spin_unlock(&head->lock);
|
|
return node;
|
|
}
|
|
raw_spin_unlock(&head->lock);
|
|
}
|
|
|
|
/* per cpu lists are all empty, try extralist */
|
|
if (!READ_ONCE(s->extralist.first))
|
|
return NULL;
|
|
raw_spin_lock(&s->extralist.lock);
|
|
node = s->extralist.first;
|
|
if (node)
|
|
WRITE_ONCE(s->extralist.first, node->next);
|
|
raw_spin_unlock(&s->extralist.lock);
|
|
return node;
|
|
}
|
|
|
|
static struct pcpu_freelist_node *
|
|
___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
|
|
{
|
|
struct pcpu_freelist_head *head;
|
|
struct pcpu_freelist_node *node;
|
|
int cpu;
|
|
|
|
for_each_cpu_wrap(cpu, cpu_possible_mask, raw_smp_processor_id()) {
|
|
head = per_cpu_ptr(s->freelist, cpu);
|
|
if (!READ_ONCE(head->first))
|
|
continue;
|
|
if (raw_spin_trylock(&head->lock)) {
|
|
node = head->first;
|
|
if (node) {
|
|
WRITE_ONCE(head->first, node->next);
|
|
raw_spin_unlock(&head->lock);
|
|
return node;
|
|
}
|
|
raw_spin_unlock(&head->lock);
|
|
}
|
|
}
|
|
|
|
/* cannot pop from per cpu lists, try extralist */
|
|
if (!READ_ONCE(s->extralist.first) || !raw_spin_trylock(&s->extralist.lock))
|
|
return NULL;
|
|
node = s->extralist.first;
|
|
if (node)
|
|
WRITE_ONCE(s->extralist.first, node->next);
|
|
raw_spin_unlock(&s->extralist.lock);
|
|
return node;
|
|
}
|
|
|
|
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
{
|
|
if (in_nmi())
|
|
return ___pcpu_freelist_pop_nmi(s);
|
|
return ___pcpu_freelist_pop(s);
|
|
}
|
|
|
|
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
|
{
|
|
struct pcpu_freelist_node *ret;
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
ret = __pcpu_freelist_pop(s);
|
|
local_irq_restore(flags);
|
|
return ret;
|
|
}
|