2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-17 23:25:46 +08:00

slub: Do not cross cacheline boundaries for very small objects

SLUB should pack even small objects nicely into cachelines if that is what
has been asked for. Use the same algorithm as SLAB for this.

The effect of this patch for a system with a cacheline size of 64
bytes is that the 24 byte sized slab caches will now put exactly
2 objects into a cacheline instead of 3 with some overlap into
the next cacheline. This reduces the object density in a 4k slab
from 170 to 128 objects (same as SLAB).

Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
This commit is contained in:
Nick Piggin 2008-03-05 14:05:56 -08:00 committed by Christoph Lameter
parent 1c61fc40fc
commit b621038678

View File

@ -1856,12 +1856,15 @@ static unsigned long calculate_alignment(unsigned long flags,
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if ((flags & SLAB_HWCACHE_ALIGN) &&
size > cache_line_size() / 2)
return max_t(unsigned long, align, cache_line_size());
if (flags & SLAB_HWCACHE_ALIGN) {
unsigned long ralign = cache_line_size();
while (size <= ralign / 2)
ralign /= 2;
align = max(align, ralign);
}
if (align < ARCH_SLAB_MINALIGN)
return ARCH_SLAB_MINALIGN;
align = ARCH_SLAB_MINALIGN;
return ALIGN(align, sizeof(void *));
}