mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
mm, slub: use migrate_disable() on PREEMPT_RT
We currently use preempt_disable() (directly or via get_cpu_ptr()) to stabilize the pointer to kmem_cache_cpu. On PREEMPT_RT this would be incompatible with the list_lock spinlock. We can use migrate_disable() instead, but that increases overhead on !PREEMPT_RT as it's an unconditional function call. In order to get the best available mechanism on both PREEMPT_RT and !PREEMPT_RT, introduce private slub_get_cpu_ptr() and slub_put_cpu_ptr() wrappers and use them. Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
This commit is contained in:
parent
e0a043aa41
commit
25c00c506e
39
mm/slub.c
39
mm/slub.c
@ -118,6 +118,26 @@
|
|||||||
* the fast path and disables lockless freelists.
|
* the fast path and disables lockless freelists.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We could simply use migrate_disable()/enable() but as long as it's a
|
||||||
|
* function call even on !PREEMPT_RT, use inline preempt_disable() there.
|
||||||
|
*/
|
||||||
|
#ifndef CONFIG_PREEMPT_RT
|
||||||
|
#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
|
||||||
|
#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
|
||||||
|
#else
|
||||||
|
#define slub_get_cpu_ptr(var) \
|
||||||
|
({ \
|
||||||
|
migrate_disable(); \
|
||||||
|
this_cpu_ptr(var); \
|
||||||
|
})
|
||||||
|
#define slub_put_cpu_ptr(var) \
|
||||||
|
do { \
|
||||||
|
(void)(var); \
|
||||||
|
migrate_enable(); \
|
||||||
|
} while (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SLUB_DEBUG
|
#ifdef CONFIG_SLUB_DEBUG
|
||||||
#ifdef CONFIG_SLUB_DEBUG_ON
|
#ifdef CONFIG_SLUB_DEBUG_ON
|
||||||
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
|
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
|
||||||
@ -2852,7 +2872,7 @@ redo:
|
|||||||
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
|
if (unlikely(!pfmemalloc_match_unsafe(page, gfpflags)))
|
||||||
goto deactivate_slab;
|
goto deactivate_slab;
|
||||||
|
|
||||||
/* must check again c->page in case IRQ handler changed it */
|
/* must check again c->page in case we got preempted and it changed */
|
||||||
local_irq_save(flags);
|
local_irq_save(flags);
|
||||||
if (unlikely(page != c->page)) {
|
if (unlikely(page != c->page)) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
@ -2911,7 +2931,8 @@ new_slab:
|
|||||||
}
|
}
|
||||||
if (unlikely(!slub_percpu_partial(c))) {
|
if (unlikely(!slub_percpu_partial(c))) {
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
goto new_objects; /* stolen by an IRQ handler */
|
/* we were preempted and partial list got empty */
|
||||||
|
goto new_objects;
|
||||||
}
|
}
|
||||||
|
|
||||||
page = c->page = slub_percpu_partial(c);
|
page = c->page = slub_percpu_partial(c);
|
||||||
@ -2927,9 +2948,9 @@ new_objects:
|
|||||||
if (freelist)
|
if (freelist)
|
||||||
goto check_new_page;
|
goto check_new_page;
|
||||||
|
|
||||||
put_cpu_ptr(s->cpu_slab);
|
slub_put_cpu_ptr(s->cpu_slab);
|
||||||
page = new_slab(s, gfpflags, node);
|
page = new_slab(s, gfpflags, node);
|
||||||
c = get_cpu_ptr(s->cpu_slab);
|
c = slub_get_cpu_ptr(s->cpu_slab);
|
||||||
|
|
||||||
if (unlikely(!page)) {
|
if (unlikely(!page)) {
|
||||||
slab_out_of_memory(s, gfpflags, node);
|
slab_out_of_memory(s, gfpflags, node);
|
||||||
@ -3012,12 +3033,12 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
|
|||||||
* cpu before disabling preemption. Need to reload cpu area
|
* cpu before disabling preemption. Need to reload cpu area
|
||||||
* pointer.
|
* pointer.
|
||||||
*/
|
*/
|
||||||
c = get_cpu_ptr(s->cpu_slab);
|
c = slub_get_cpu_ptr(s->cpu_slab);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
p = ___slab_alloc(s, gfpflags, node, addr, c);
|
p = ___slab_alloc(s, gfpflags, node, addr, c);
|
||||||
#ifdef CONFIG_PREEMPT_COUNT
|
#ifdef CONFIG_PREEMPT_COUNT
|
||||||
put_cpu_ptr(s->cpu_slab);
|
slub_put_cpu_ptr(s->cpu_slab);
|
||||||
#endif
|
#endif
|
||||||
return p;
|
return p;
|
||||||
}
|
}
|
||||||
@ -3546,7 +3567,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||||||
* IRQs, which protects against PREEMPT and interrupts
|
* IRQs, which protects against PREEMPT and interrupts
|
||||||
* handlers invoking normal fastpath.
|
* handlers invoking normal fastpath.
|
||||||
*/
|
*/
|
||||||
c = get_cpu_ptr(s->cpu_slab);
|
c = slub_get_cpu_ptr(s->cpu_slab);
|
||||||
local_irq_disable();
|
local_irq_disable();
|
||||||
|
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
@ -3592,7 +3613,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||||||
}
|
}
|
||||||
c->tid = next_tid(c->tid);
|
c->tid = next_tid(c->tid);
|
||||||
local_irq_enable();
|
local_irq_enable();
|
||||||
put_cpu_ptr(s->cpu_slab);
|
slub_put_cpu_ptr(s->cpu_slab);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* memcg and kmem_cache debug support and memory initialization.
|
* memcg and kmem_cache debug support and memory initialization.
|
||||||
@ -3602,7 +3623,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||||||
slab_want_init_on_alloc(flags, s));
|
slab_want_init_on_alloc(flags, s));
|
||||||
return i;
|
return i;
|
||||||
error:
|
error:
|
||||||
put_cpu_ptr(s->cpu_slab);
|
slub_put_cpu_ptr(s->cpu_slab);
|
||||||
slab_post_alloc_hook(s, objcg, flags, i, p, false);
|
slab_post_alloc_hook(s, objcg, flags, i, p, false);
|
||||||
__kmem_cache_free_bulk(s, i, p);
|
__kmem_cache_free_bulk(s, i, p);
|
||||||
return 0;
|
return 0;
|
||||||
|
Loading…
Reference in New Issue
Block a user