mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-27 22:24:11 +08:00
slab updates for 6.3
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEjUuTAak14xi+SF7M4CHKc/GJqRAFAmP003YACgkQ4CHKc/GJ qRA2Pgf/XzaHWvxPiIZA8BO8rCi6VM3ogoJ70EsMcHyPreh5bqN0wwrpQLNe6ZXT cZq1vQOhODNr0133YgiO3oZGH5rd/UXbXCR+mHAsqTKBZuAHtJ/+qtWlcEm9paag 4OrfKl8NvV+3qY1f2UnZ8Jdc+xMxGYTetq4ddu94Xf1c8u4IoaktIBkBrQs2j5Uc 0Eq7iv3dSgjSVleF9AmodQbYguwPiPYq+LWX4lBXwn1sgxxN0jfIKitpzZV0ISi3 gD3HHqh52QtXDBZbL/UZh6naL5Vtfir68UXDfpjRK3BklL1Bwd/l9ww/W4Q1Any8 hB06GMXFbPY86ZD7ZxBHQyUpWcnzGw== =dUd6 -----END PGP SIGNATURE----- Merge tag 'slab-for-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab Pull slab updates from Vlastimil Babka: "This time it's just a bunch of smaller cleanups and fixes for SLAB and SLUB: - Make it possible to use kmem_cache_alloc_bulk() early in boot when interrupts are not yet enabled, as code doing that started to appear via new maple tree users (Thomas Gleixner) - Fix debugfs-related memory leak in SLUB (Greg Kroah-Hartman) - Use the standard idiom to get head page of folio (SeongJae Park) - Simplify and inline is_debug_pagealloc_cache() in SLAB (lvqian) - Remove unused variable in SLAB (Gou Hao)" * tag 'slab-for-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm, slab/slub: Ensure kmem_cache_alloc_bulk() is available early mm/slub: fix memory leak with using debugfs_lookup() mm/slab.c: cleanup is_debug_pagealloc_cache() mm/sl{a,u}b: fix wrong usages of folio_page() for getting head pages mm/slab: remove unused slab_early_init
This commit is contained in:
commit
cd43b50686
34
mm/slab.c
34
mm/slab.c
@ -220,7 +220,6 @@ static inline void fixup_objfreelist_debug(struct kmem_cache *cachep,
|
|||||||
static inline void fixup_slab_list(struct kmem_cache *cachep,
|
static inline void fixup_slab_list(struct kmem_cache *cachep,
|
||||||
struct kmem_cache_node *n, struct slab *slab,
|
struct kmem_cache_node *n, struct slab *slab,
|
||||||
void **list);
|
void **list);
|
||||||
static int slab_early_init = 1;
|
|
||||||
|
|
||||||
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
|
#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
|
||||||
|
|
||||||
@ -1249,8 +1248,6 @@ void __init kmem_cache_init(void)
|
|||||||
slab_state = PARTIAL_NODE;
|
slab_state = PARTIAL_NODE;
|
||||||
setup_kmalloc_cache_index_table();
|
setup_kmalloc_cache_index_table();
|
||||||
|
|
||||||
slab_early_init = 0;
|
|
||||||
|
|
||||||
/* 5) Replace the bootstrap kmem_cache_node */
|
/* 5) Replace the bootstrap kmem_cache_node */
|
||||||
{
|
{
|
||||||
int nid;
|
int nid;
|
||||||
@ -1389,7 +1386,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
|||||||
|
|
||||||
BUG_ON(!folio_test_slab(folio));
|
BUG_ON(!folio_test_slab(folio));
|
||||||
__slab_clear_pfmemalloc(slab);
|
__slab_clear_pfmemalloc(slab);
|
||||||
page_mapcount_reset(folio_page(folio, 0));
|
page_mapcount_reset(&folio->page);
|
||||||
folio->mapping = NULL;
|
folio->mapping = NULL;
|
||||||
/* Make the mapping reset visible before clearing the flag */
|
/* Make the mapping reset visible before clearing the flag */
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
@ -1398,7 +1395,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
|
|||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += 1 << order;
|
current->reclaim_state->reclaimed_slab += 1 << order;
|
||||||
unaccount_slab(slab, order, cachep);
|
unaccount_slab(slab, order, cachep);
|
||||||
__free_pages(folio_page(folio, 0), order);
|
__free_pages(&folio->page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kmem_rcu_free(struct rcu_head *head)
|
static void kmem_rcu_free(struct rcu_head *head)
|
||||||
@ -1413,13 +1410,10 @@ static void kmem_rcu_free(struct rcu_head *head)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
|
static inline bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
|
||||||
{
|
{
|
||||||
if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
|
return debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
|
||||||
(cachep->size % PAGE_SIZE) == 0)
|
((cachep->size % PAGE_SIZE) == 0);
|
||||||
return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||||
@ -3479,14 +3473,15 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
||||||
void **p)
|
void **p)
|
||||||
{
|
{
|
||||||
size_t i;
|
|
||||||
struct obj_cgroup *objcg = NULL;
|
struct obj_cgroup *objcg = NULL;
|
||||||
|
unsigned long irqflags;
|
||||||
|
size_t i;
|
||||||
|
|
||||||
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
|
s = slab_pre_alloc_hook(s, NULL, &objcg, size, flags);
|
||||||
if (!s)
|
if (!s)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_save(irqflags);
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
void *objp = kfence_alloc(s, s->object_size, flags) ?:
|
void *objp = kfence_alloc(s, s->object_size, flags) ?:
|
||||||
__do_cache_alloc(s, flags, NUMA_NO_NODE);
|
__do_cache_alloc(s, flags, NUMA_NO_NODE);
|
||||||
@ -3495,7 +3490,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||||||
goto error;
|
goto error;
|
||||||
p[i] = objp;
|
p[i] = objp;
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_restore(irqflags);
|
||||||
|
|
||||||
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
|
cache_alloc_debugcheck_after_bulk(s, flags, size, p, _RET_IP_);
|
||||||
|
|
||||||
@ -3508,7 +3503,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
|
|||||||
/* FIXME: Trace call missing. Christoph would like a bulk variant */
|
/* FIXME: Trace call missing. Christoph would like a bulk variant */
|
||||||
return size;
|
return size;
|
||||||
error:
|
error:
|
||||||
local_irq_enable();
|
local_irq_restore(irqflags);
|
||||||
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
|
cache_alloc_debugcheck_after_bulk(s, flags, i, p, _RET_IP_);
|
||||||
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
|
slab_post_alloc_hook(s, objcg, flags, i, p, false, s->object_size);
|
||||||
kmem_cache_free_bulk(s, i, p);
|
kmem_cache_free_bulk(s, i, p);
|
||||||
@ -3610,8 +3605,9 @@ EXPORT_SYMBOL(kmem_cache_free);
|
|||||||
|
|
||||||
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
|
void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
local_irq_disable();
|
local_irq_save(flags);
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
void *objp = p[i];
|
void *objp = p[i];
|
||||||
struct kmem_cache *s;
|
struct kmem_cache *s;
|
||||||
@ -3621,9 +3617,9 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
|
|||||||
|
|
||||||
/* called via kfree_bulk */
|
/* called via kfree_bulk */
|
||||||
if (!folio_test_slab(folio)) {
|
if (!folio_test_slab(folio)) {
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
free_large_kmalloc(folio, objp);
|
free_large_kmalloc(folio, objp);
|
||||||
local_irq_disable();
|
local_irq_save(flags);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
s = folio_slab(folio)->slab_cache;
|
s = folio_slab(folio)->slab_cache;
|
||||||
@ -3640,7 +3636,7 @@ void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
|
|||||||
|
|
||||||
__cache_free(s, objp, _RET_IP_);
|
__cache_free(s, objp, _RET_IP_);
|
||||||
}
|
}
|
||||||
local_irq_enable();
|
local_irq_restore(flags);
|
||||||
|
|
||||||
/* FIXME: add tracing */
|
/* FIXME: add tracing */
|
||||||
}
|
}
|
||||||
|
13
mm/slub.c
13
mm/slub.c
@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
|
|||||||
if (current->reclaim_state)
|
if (current->reclaim_state)
|
||||||
current->reclaim_state->reclaimed_slab += pages;
|
current->reclaim_state->reclaimed_slab += pages;
|
||||||
unaccount_slab(slab, order, s);
|
unaccount_slab(slab, order, s);
|
||||||
__free_pages(folio_page(folio, 0), order);
|
__free_pages(&folio->page, order);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rcu_free_slab(struct rcu_head *h)
|
static void rcu_free_slab(struct rcu_head *h)
|
||||||
@ -3913,6 +3913,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
size_t size, void **p, struct obj_cgroup *objcg)
|
size_t size, void **p, struct obj_cgroup *objcg)
|
||||||
{
|
{
|
||||||
struct kmem_cache_cpu *c;
|
struct kmem_cache_cpu *c;
|
||||||
|
unsigned long irqflags;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3921,7 +3922,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
* handlers invoking normal fastpath.
|
* handlers invoking normal fastpath.
|
||||||
*/
|
*/
|
||||||
c = slub_get_cpu_ptr(s->cpu_slab);
|
c = slub_get_cpu_ptr(s->cpu_slab);
|
||||||
local_lock_irq(&s->cpu_slab->lock);
|
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
|
||||||
|
|
||||||
for (i = 0; i < size; i++) {
|
for (i = 0; i < size; i++) {
|
||||||
void *object = kfence_alloc(s, s->object_size, flags);
|
void *object = kfence_alloc(s, s->object_size, flags);
|
||||||
@ -3942,7 +3943,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
*/
|
*/
|
||||||
c->tid = next_tid(c->tid);
|
c->tid = next_tid(c->tid);
|
||||||
|
|
||||||
local_unlock_irq(&s->cpu_slab->lock);
|
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Invoking slow path likely have side-effect
|
* Invoking slow path likely have side-effect
|
||||||
@ -3956,7 +3957,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
c = this_cpu_ptr(s->cpu_slab);
|
c = this_cpu_ptr(s->cpu_slab);
|
||||||
maybe_wipe_obj_freeptr(s, p[i]);
|
maybe_wipe_obj_freeptr(s, p[i]);
|
||||||
|
|
||||||
local_lock_irq(&s->cpu_slab->lock);
|
local_lock_irqsave(&s->cpu_slab->lock, irqflags);
|
||||||
|
|
||||||
continue; /* goto for-loop */
|
continue; /* goto for-loop */
|
||||||
}
|
}
|
||||||
@ -3965,7 +3966,7 @@ static inline int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
|
|||||||
maybe_wipe_obj_freeptr(s, p[i]);
|
maybe_wipe_obj_freeptr(s, p[i]);
|
||||||
}
|
}
|
||||||
c->tid = next_tid(c->tid);
|
c->tid = next_tid(c->tid);
|
||||||
local_unlock_irq(&s->cpu_slab->lock);
|
local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
|
||||||
slub_put_cpu_ptr(s->cpu_slab);
|
slub_put_cpu_ptr(s->cpu_slab);
|
||||||
|
|
||||||
return i;
|
return i;
|
||||||
@ -6449,7 +6450,7 @@ static void debugfs_slab_add(struct kmem_cache *s)
|
|||||||
|
|
||||||
void debugfs_slab_release(struct kmem_cache *s)
|
void debugfs_slab_release(struct kmem_cache *s)
|
||||||
{
|
{
|
||||||
debugfs_remove_recursive(debugfs_lookup(s->name, slab_debugfs_root));
|
debugfs_lookup_and_remove(s->name, slab_debugfs_root);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init slab_debugfs_init(void)
|
static int __init slab_debugfs_init(void)
|
||||||
|
Loading…
Reference in New Issue
Block a user