diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index dcf8bcf846d6..7c13f22a0c9e 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt @@ -50,14 +50,14 @@ F.e. in order to boot just with sanity checks and red zoning one would specify: Trying to find an issue in the dentry cache? Try - slub_debug=,dentry_cache + slub_debug=,dentry to only enable debugging on the dentry cache. Red zoning and tracking may realign the slab. We can just apply sanity checks to the dentry cache with - slub_debug=F,dentry_cache + slub_debug=F,dentry In case you forgot to enable debugging on the kernel command line: It is possible to enable debugging manually when the kernel is up. Look at the diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index fcc48096ee64..39c3a5eb8ebe 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -41,7 +41,7 @@ static inline void *kmalloc(size_t size, gfp_t flags) goto found; \ else \ i++; -#include "kmalloc_sizes.h" +#include #undef CACHE { extern void __you_cannot_kmalloc_that_much(void); @@ -75,7 +75,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node) goto found; \ else \ i++; -#include "kmalloc_sizes.h" +#include #undef CACHE { extern void __you_cannot_kmalloc_that_much(void); diff --git a/mm/slab.c b/mm/slab.c index 473e6c2eaefb..e6c698f55674 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -333,7 +333,7 @@ static __always_inline int index_of(const size_t size) return i; \ else \ i++; -#include "linux/kmalloc_sizes.h" +#include #undef CACHE __bad_size(); } else @@ -2964,11 +2964,10 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags) struct array_cache *ac; int node; - node = numa_node_id(); - - check_irq_off(); - ac = cpu_cache_get(cachep); retry: + check_irq_off(); + node = numa_node_id(); + ac = cpu_cache_get(cachep); batchcount = ac->batchcount; if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { /* @@ -3280,7 +3279,7 @@ retry: if (local_flags & __GFP_WAIT) local_irq_enable(); kmem_flagcheck(cache, flags); - obj = kmem_getpages(cache, flags, -1); + obj = kmem_getpages(cache, local_flags, -1); if (local_flags & __GFP_WAIT) local_irq_disable(); if (obj) { diff --git a/mm/slub.c b/mm/slub.c index 0863fd38a5ce..96d63eb3ab17 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1368,7 +1368,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c) struct page *page = c->page; int tail = 1; - if (c->freelist) + if (page->freelist) stat(c, DEACTIVATE_REMOTE_FREES); /* * Merge cpu freelist into slab freelist. Typically we get here @@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags, * The hardware cache alignment cannot override the specified * alignment though. If that is greater then use it. */ - if ((flags & SLAB_HWCACHE_ALIGN) && - size > cache_line_size() / 2) - return max_t(unsigned long, align, cache_line_size()); + if (flags & SLAB_HWCACHE_ALIGN) { + unsigned long ralign = cache_line_size(); + while (size <= ralign / 2) + ralign /= 2; + align = max(align, ralign); + } if (align < ARCH_SLAB_MINALIGN) - return ARCH_SLAB_MINALIGN; + align = ARCH_SLAB_MINALIGN; return ALIGN(align, sizeof(void *)); }