mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
SLUB: use list_for_each_entry for loops over all slabs
Use list_for_each_entry() instead of list_for_each(). Get rid of for_all_slabs(). It had only one user. So fold it into the callback. This also gets rid of cpu_slab_flush. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2492268472
commit
5b95a4acf1
51
mm/slub.c
51
mm/slub.c
@ -2573,7 +2573,7 @@ static struct kmem_cache *find_mergeable(size_t size,
|
||||
size_t align, unsigned long flags,
|
||||
void (*ctor)(void *, struct kmem_cache *, unsigned long))
|
||||
{
|
||||
struct list_head *h;
|
||||
struct kmem_cache *s;
|
||||
|
||||
if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
|
||||
return NULL;
|
||||
@ -2585,10 +2585,7 @@ static struct kmem_cache *find_mergeable(size_t size,
|
||||
align = calculate_alignment(flags, align, size);
|
||||
size = ALIGN(size, align);
|
||||
|
||||
list_for_each(h, &slab_caches) {
|
||||
struct kmem_cache *s =
|
||||
container_of(h, struct kmem_cache, list);
|
||||
|
||||
list_for_each_entry(s, &slab_caches, list) {
|
||||
if (slab_unmergeable(s))
|
||||
continue;
|
||||
|
||||
@ -2670,33 +2667,6 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
|
||||
EXPORT_SYMBOL(kmem_cache_zalloc);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void for_all_slabs(void (*func)(struct kmem_cache *, int), int cpu)
|
||||
{
|
||||
struct list_head *h;
|
||||
|
||||
down_read(&slub_lock);
|
||||
list_for_each(h, &slab_caches) {
|
||||
struct kmem_cache *s =
|
||||
container_of(h, struct kmem_cache, list);
|
||||
|
||||
func(s, cpu);
|
||||
}
|
||||
up_read(&slub_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Version of __flush_cpu_slab for the case that interrupts
|
||||
* are enabled.
|
||||
*/
|
||||
static void cpu_slab_flush(struct kmem_cache *s, int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__flush_cpu_slab(s, cpu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Use the cpu notifier to insure that the cpu slabs are flushed when
|
||||
* necessary.
|
||||
@ -2705,13 +2675,21 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
long cpu = (long)hcpu;
|
||||
struct kmem_cache *s;
|
||||
unsigned long flags;
|
||||
|
||||
switch (action) {
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
for_all_slabs(cpu_slab_flush, cpu);
|
||||
down_read(&slub_lock);
|
||||
list_for_each_entry(s, &slab_caches, list) {
|
||||
local_irq_save(flags);
|
||||
__flush_cpu_slab(s, cpu);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
up_read(&slub_lock);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -3736,7 +3714,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
|
||||
|
||||
static int __init slab_sysfs_init(void)
|
||||
{
|
||||
struct list_head *h;
|
||||
struct kmem_cache *s;
|
||||
int err;
|
||||
|
||||
err = subsystem_register(&slab_subsys);
|
||||
@ -3747,10 +3725,7 @@ static int __init slab_sysfs_init(void)
|
||||
|
||||
slab_state = SYSFS;
|
||||
|
||||
list_for_each(h, &slab_caches) {
|
||||
struct kmem_cache *s =
|
||||
container_of(h, struct kmem_cache, list);
|
||||
|
||||
list_for_each_entry(s, &slab_caches, list) {
|
||||
err = sysfs_slab_add(s);
|
||||
BUG_ON(err);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user