mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
slab updates for 5.18
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEjUuTAak14xi+SF7M4CHKc/GJqRAFAmI4yWsACgkQ4CHKc/GJ qRAUhQf8CE5dBIblFU6Nfbiv/GHSu2AoHw8DRseeC7uSLGUzP9h2l1c0zMBzLIrf ujKQGU/8n2AgUZBwxd59vbi0WHLD7K9qr3Yj6hH0QnTmHiv89GXEnYXrHHAJ506q OGzT7NUiz5pEHrFyE5yqKQ2axy+Q+JrAfyzHQPEdzNtl0DJurVuHATYz5b9Pk6zW 27PrCIFjQyIkqw2s9oBCYZevAZkiAoxXntzhXicrqksZs4htArzn7LDTulpccnhy 6hwrCSg91E1Tza8oTNOCNvXt/YUGa6Q1RBEeUDmLOLwHSuOIfTPb5zW25vCvpyMK 008OYHcw7tspWnEfAEWqIDwMFWcjDg== =2g9n -----END PGP SIGNATURE----- Merge tag 'slab-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab Pull slab updates from Vlastimil Babka: - A few non-trivial SLUB code cleanups, most notably a refactoring of deactivate_slab(). - A bunch of trivial changes, such as removal of unused parameters, making stuff static, and employing helper functions. * tag 'slab-for-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/vbabka/slab: mm: slub: Delete useless parameter of alloc_slab_page() mm: slab: Delete unused SLAB_DEACTIVATED flag mm/slub: remove forced_order parameter in calculate_sizes mm/slub: refactor deactivate_slab() mm/slub: limit number of node partial slabs only in cache creation mm/slub: use helper macro __ATTR_XX_MODE for SLAB_ATTR(_RO) mm/slab_common: use helper function is_power_of_2() mm/slob: make kmem_cache_boot static
This commit is contained in:
commit
c5c009e250
@ -117,9 +117,6 @@
|
||||
#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
|
||||
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
|
||||
|
||||
/* Slab deactivation flag */
|
||||
#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
|
||||
|
||||
/*
|
||||
* ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
|
||||
*
|
||||
|
@ -807,7 +807,7 @@ void __init setup_kmalloc_cache_index_table(void)
|
||||
unsigned int i;
|
||||
|
||||
BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
|
||||
(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
|
||||
!is_power_of_2(KMALLOC_MIN_SIZE));
|
||||
|
||||
for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
|
||||
unsigned int elem = size_index_elem(i);
|
||||
|
@ -714,7 +714,7 @@ int __kmem_cache_shrink(struct kmem_cache *d)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kmem_cache kmem_cache_boot = {
|
||||
static struct kmem_cache kmem_cache_boot = {
|
||||
.name = "kmem_cache",
|
||||
.size = sizeof(struct kmem_cache),
|
||||
.flags = SLAB_PANIC,
|
||||
|
130
mm/slub.c
130
mm/slub.c
@ -1788,8 +1788,8 @@ static void *setup_object(struct kmem_cache *s, struct slab *slab,
|
||||
/*
|
||||
* Slab allocation and freeing
|
||||
*/
|
||||
static inline struct slab *alloc_slab_page(struct kmem_cache *s,
|
||||
gfp_t flags, int node, struct kmem_cache_order_objects oo)
|
||||
static inline struct slab *alloc_slab_page(gfp_t flags, int node,
|
||||
struct kmem_cache_order_objects oo)
|
||||
{
|
||||
struct folio *folio;
|
||||
struct slab *slab;
|
||||
@ -1941,7 +1941,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
|
||||
alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
|
||||
|
||||
slab = alloc_slab_page(s, alloc_gfp, node, oo);
|
||||
slab = alloc_slab_page(alloc_gfp, node, oo);
|
||||
if (unlikely(!slab)) {
|
||||
oo = s->min;
|
||||
alloc_gfp = flags;
|
||||
@ -1949,7 +1949,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
|
||||
* Allocation may have failed due to fragmentation.
|
||||
* Try a lower order alloc if possible
|
||||
*/
|
||||
slab = alloc_slab_page(s, alloc_gfp, node, oo);
|
||||
slab = alloc_slab_page(alloc_gfp, node, oo);
|
||||
if (unlikely(!slab))
|
||||
goto out;
|
||||
stat(s, ORDER_FALLBACK);
|
||||
@ -2348,10 +2348,10 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
|
||||
static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
||||
void *freelist)
|
||||
{
|
||||
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
|
||||
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE, M_FULL_NOLIST };
|
||||
struct kmem_cache_node *n = get_node(s, slab_nid(slab));
|
||||
int lock = 0, free_delta = 0;
|
||||
enum slab_modes l = M_NONE, m = M_NONE;
|
||||
int free_delta = 0;
|
||||
enum slab_modes mode = M_NONE;
|
||||
void *nextfree, *freelist_iter, *freelist_tail;
|
||||
int tail = DEACTIVATE_TO_HEAD;
|
||||
unsigned long flags = 0;
|
||||
@ -2393,14 +2393,10 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
|
||||
* Ensure that the slab is unfrozen while the list presence
|
||||
* reflects the actual number of objects during unfreeze.
|
||||
*
|
||||
* We setup the list membership and then perform a cmpxchg
|
||||
* with the count. If there is a mismatch then the slab
|
||||
* is not unfrozen but the slab is on the wrong list.
|
||||
*
|
||||
* Then we restart the process which may have to remove
|
||||
* the slab from the list that we just put it on again
|
||||
* because the number of objects in the slab may have
|
||||
* changed.
|
||||
* We first perform cmpxchg holding lock and insert to list
|
||||
* when it succeed. If there is mismatch then the slab is not
|
||||
* unfrozen and number of objects in the slab may have changed.
|
||||
* Then release lock and retry cmpxchg again.
|
||||
*/
|
||||
redo:
|
||||
|
||||
@ -2419,61 +2415,52 @@ redo:
|
||||
|
||||
new.frozen = 0;
|
||||
|
||||
if (!new.inuse && n->nr_partial >= s->min_partial)
|
||||
m = M_FREE;
|
||||
else if (new.freelist) {
|
||||
m = M_PARTIAL;
|
||||
if (!lock) {
|
||||
lock = 1;
|
||||
/*
|
||||
* Taking the spinlock removes the possibility that
|
||||
* acquire_slab() will see a slab that is frozen
|
||||
*/
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
}
|
||||
if (!new.inuse && n->nr_partial >= s->min_partial) {
|
||||
mode = M_FREE;
|
||||
} else if (new.freelist) {
|
||||
mode = M_PARTIAL;
|
||||
/*
|
||||
* Taking the spinlock removes the possibility that
|
||||
* acquire_slab() will see a slab that is frozen
|
||||
*/
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
} else if (kmem_cache_debug_flags(s, SLAB_STORE_USER)) {
|
||||
mode = M_FULL;
|
||||
/*
|
||||
* This also ensures that the scanning of full
|
||||
* slabs from diagnostic functions will not see
|
||||
* any frozen slabs.
|
||||
*/
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
} else {
|
||||
m = M_FULL;
|
||||
if (kmem_cache_debug_flags(s, SLAB_STORE_USER) && !lock) {
|
||||
lock = 1;
|
||||
/*
|
||||
* This also ensures that the scanning of full
|
||||
* slabs from diagnostic functions will not see
|
||||
* any frozen slabs.
|
||||
*/
|
||||
spin_lock_irqsave(&n->list_lock, flags);
|
||||
}
|
||||
mode = M_FULL_NOLIST;
|
||||
}
|
||||
|
||||
if (l != m) {
|
||||
if (l == M_PARTIAL)
|
||||
remove_partial(n, slab);
|
||||
else if (l == M_FULL)
|
||||
remove_full(s, n, slab);
|
||||
|
||||
if (m == M_PARTIAL)
|
||||
add_partial(n, slab, tail);
|
||||
else if (m == M_FULL)
|
||||
add_full(s, n, slab);
|
||||
}
|
||||
|
||||
l = m;
|
||||
if (!cmpxchg_double_slab(s, slab,
|
||||
old.freelist, old.counters,
|
||||
new.freelist, new.counters,
|
||||
"unfreezing slab"))
|
||||
"unfreezing slab")) {
|
||||
if (mode == M_PARTIAL || mode == M_FULL)
|
||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
goto redo;
|
||||
}
|
||||
|
||||
if (lock)
|
||||
|
||||
if (mode == M_PARTIAL) {
|
||||
add_partial(n, slab, tail);
|
||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
|
||||
if (m == M_PARTIAL)
|
||||
stat(s, tail);
|
||||
else if (m == M_FULL)
|
||||
stat(s, DEACTIVATE_FULL);
|
||||
else if (m == M_FREE) {
|
||||
} else if (mode == M_FREE) {
|
||||
stat(s, DEACTIVATE_EMPTY);
|
||||
discard_slab(s, slab);
|
||||
stat(s, FREE_SLAB);
|
||||
} else if (mode == M_FULL) {
|
||||
add_full(s, n, slab);
|
||||
spin_unlock_irqrestore(&n->list_lock, flags);
|
||||
stat(s, DEACTIVATE_FULL);
|
||||
} else if (mode == M_FULL_NOLIST) {
|
||||
stat(s, DEACTIVATE_FULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4014,15 +4001,6 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void set_min_partial(struct kmem_cache *s, unsigned long min)
|
||||
{
|
||||
if (min < MIN_PARTIAL)
|
||||
min = MIN_PARTIAL;
|
||||
else if (min > MAX_PARTIAL)
|
||||
min = MAX_PARTIAL;
|
||||
s->min_partial = min;
|
||||
}
|
||||
|
||||
static void set_cpu_partial(struct kmem_cache *s)
|
||||
{
|
||||
#ifdef CONFIG_SLUB_CPU_PARTIAL
|
||||
@ -4060,7 +4038,7 @@ static void set_cpu_partial(struct kmem_cache *s)
|
||||
* calculate_sizes() determines the order and the distribution of data within
|
||||
* a slab object.
|
||||
*/
|
||||
static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||
static int calculate_sizes(struct kmem_cache *s)
|
||||
{
|
||||
slab_flags_t flags = s->flags;
|
||||
unsigned int size = s->object_size;
|
||||
@ -4164,10 +4142,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
|
||||
size = ALIGN(size, s->align);
|
||||
s->size = size;
|
||||
s->reciprocal_size = reciprocal_value(size);
|
||||
if (forced_order >= 0)
|
||||
order = forced_order;
|
||||
else
|
||||
order = calculate_order(size);
|
||||
order = calculate_order(size);
|
||||
|
||||
if ((int)order < 0)
|
||||
return 0;
|
||||
@ -4203,7 +4178,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
s->random = get_random_long();
|
||||
#endif
|
||||
|
||||
if (!calculate_sizes(s, -1))
|
||||
if (!calculate_sizes(s))
|
||||
goto error;
|
||||
if (disable_higher_order_debug) {
|
||||
/*
|
||||
@ -4213,7 +4188,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
if (get_order(s->size) > get_order(s->object_size)) {
|
||||
s->flags &= ~DEBUG_METADATA_FLAGS;
|
||||
s->offset = 0;
|
||||
if (!calculate_sizes(s, -1))
|
||||
if (!calculate_sizes(s))
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
@ -4229,7 +4204,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
|
||||
* The larger the object size is, the more slabs we want on the partial
|
||||
* list to avoid pounding the page allocator excessively.
|
||||
*/
|
||||
set_min_partial(s, ilog2(s->size) / 2);
|
||||
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
|
||||
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
|
||||
|
||||
set_cpu_partial(s);
|
||||
|
||||
@ -5358,12 +5334,10 @@ struct slab_attribute {
|
||||
};
|
||||
|
||||
#define SLAB_ATTR_RO(_name) \
|
||||
static struct slab_attribute _name##_attr = \
|
||||
__ATTR(_name, 0400, _name##_show, NULL)
|
||||
static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
|
||||
|
||||
#define SLAB_ATTR(_name) \
|
||||
static struct slab_attribute _name##_attr = \
|
||||
__ATTR(_name, 0600, _name##_show, _name##_store)
|
||||
static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
|
||||
|
||||
static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
|
||||
{
|
||||
@ -5410,7 +5384,7 @@ static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
set_min_partial(s, min);
|
||||
s->min_partial = min;
|
||||
return length;
|
||||
}
|
||||
SLAB_ATTR(min_partial);
|
||||
|
Loading…
Reference in New Issue
Block a user