2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-21 19:53:59 +08:00
linux-next/include/asm-generic/cmpxchg.h

99 lines
2.0 KiB
C
Raw Normal View History

/*
* Generic UP xchg and cmpxchg using interrupt disablement. Does not
* support SMP.
*/
Add cmpxchg_local to asm-generic for per cpu atomic operations Emulates the cmpxchg_local by disabling interrupts around variable modification. This is not reentrant wrt NMIs and MCEs. It is only protected against normal interrupts, but this is enough for architectures without such interrupt sources or if used in a context where the data is not shared with such handlers. It can be used as a fallback for architectures lacking a real cmpxchg instruction. For architectures that have a real cmpxchg but does not have NMIs or MCE, testing which of the generic vs architecture specific cmpxchg is the fastest should be done. asm-generic/cmpxchg.h defines a cmpxchg that uses cmpxchg_local. It is meant to be used as a cmpxchg fallback for architectures that do not support SMP. * Patch series comments Using cmpxchg_local shows a performance improvements of the fast path goes from a 66% speedup on a Pentium 4 to a 14% speedup on AMD64. In detail: Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a Pentium4, 3GHz, Hyperthread. SLUB Performance testing ======================== 1. Kmalloc: Repeatedly allocate then free test * slub HEAD, test 1 kmalloc(8) = 201 cycles kfree = 351 cycles kmalloc(16) = 198 cycles kfree = 359 cycles kmalloc(32) = 200 cycles kfree = 381 cycles kmalloc(64) = 224 cycles kfree = 394 cycles kmalloc(128) = 285 cycles kfree = 424 cycles kmalloc(256) = 411 cycles kfree = 546 cycles kmalloc(512) = 480 cycles kfree = 619 cycles kmalloc(1024) = 623 cycles kfree = 750 cycles kmalloc(2048) = 686 cycles kfree = 811 cycles kmalloc(4096) = 482 cycles kfree = 538 cycles kmalloc(8192) = 680 cycles kfree = 734 cycles kmalloc(16384) = 713 cycles kfree = 843 cycles * Slub HEAD, test 2 kmalloc(8) = 190 cycles kfree = 351 cycles kmalloc(16) = 195 cycles kfree = 360 cycles kmalloc(32) = 201 cycles kfree = 370 cycles kmalloc(64) = 245 cycles kfree = 389 cycles kmalloc(128) = 283 cycles kfree = 413 cycles kmalloc(256) = 409 cycles kfree = 547 cycles kmalloc(512) = 476 cycles kfree = 616 cycles kmalloc(1024) = 628 cycles kfree = 753 cycles kmalloc(2048) = 684 cycles kfree = 811 cycles kmalloc(4096) = 480 cycles kfree = 539 cycles kmalloc(8192) = 661 cycles kfree = 746 cycles kmalloc(16384) = 741 cycles kfree = 856 cycles * cmpxchg_local Slub test kmalloc(8) = 83 cycles kfree = 363 cycles kmalloc(16) = 85 cycles kfree = 372 cycles kmalloc(32) = 92 cycles kfree = 377 cycles kmalloc(64) = 115 cycles kfree = 397 cycles kmalloc(128) = 179 cycles kfree = 438 cycles kmalloc(256) = 314 cycles kfree = 564 cycles kmalloc(512) = 398 cycles kfree = 615 cycles kmalloc(1024) = 573 cycles kfree = 745 cycles kmalloc(2048) = 629 cycles kfree = 816 cycles kmalloc(4096) = 473 cycles kfree = 548 cycles kmalloc(8192) = 659 cycles kfree = 745 cycles kmalloc(16384) = 724 cycles kfree = 843 cycles 2. Kmalloc: alloc/free test * slub HEAD, test 1 kmalloc(8)/kfree = 322 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 325 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 678 cycles kmalloc(8192)/kfree = 1013 cycles kmalloc(16384)/kfree = 1157 cycles * Slub HEAD, test 2 kmalloc(8)/kfree = 323 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 318 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 648 cycles kmalloc(8192)/kfree = 1009 cycles kmalloc(16384)/kfree = 1105 cycles * cmpxchg_local Slub test kmalloc(8)/kfree = 112 cycles kmalloc(16)/kfree = 103 cycles kmalloc(32)/kfree = 103 cycles kmalloc(64)/kfree = 103 cycles kmalloc(128)/kfree = 112 cycles kmalloc(256)/kfree = 111 cycles kmalloc(512)/kfree = 111 cycles kmalloc(1024)/kfree = 111 cycles kmalloc(2048)/kfree = 121 cycles kmalloc(4096)/kfree = 650 cycles kmalloc(8192)/kfree = 1042 cycles kmalloc(16384)/kfree = 1149 cycles Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a AMD64 2.0 GHz dual-core In this test, we seem to remove 10 cycles from the kmalloc fast path. On small allocations, it gives a 14% performance increase. kfree fast path also seems to have a 10 cycles improvement. 1. Kmalloc: Repeatedly allocate then free test * cmpxchg_local slub kmalloc(8) = 63 cycles kfree = 126 cycles kmalloc(16) = 66 cycles kfree = 129 cycles kmalloc(32) = 76 cycles kfree = 138 cycles kmalloc(64) = 100 cycles kfree = 288 cycles kmalloc(128) = 128 cycles kfree = 309 cycles kmalloc(256) = 170 cycles kfree = 315 cycles kmalloc(512) = 221 cycles kfree = 357 cycles kmalloc(1024) = 324 cycles kfree = 393 cycles kmalloc(2048) = 354 cycles kfree = 440 cycles kmalloc(4096) = 394 cycles kfree = 330 cycles kmalloc(8192) = 523 cycles kfree = 481 cycles kmalloc(16384) = 643 cycles kfree = 649 cycles * Base kmalloc(8) = 74 cycles kfree = 113 cycles kmalloc(16) = 76 cycles kfree = 116 cycles kmalloc(32) = 85 cycles kfree = 133 cycles kmalloc(64) = 111 cycles kfree = 279 cycles kmalloc(128) = 138 cycles kfree = 294 cycles kmalloc(256) = 181 cycles kfree = 304 cycles kmalloc(512) = 237 cycles kfree = 327 cycles kmalloc(1024) = 340 cycles kfree = 379 cycles kmalloc(2048) = 378 cycles kfree = 433 cycles kmalloc(4096) = 399 cycles kfree = 329 cycles kmalloc(8192) = 528 cycles kfree = 624 cycles kmalloc(16384) = 651 cycles kfree = 737 cycles 2. Kmalloc: alloc/free test * cmpxchg_local slub kmalloc(8)/kfree = 96 cycles kmalloc(16)/kfree = 97 cycles kmalloc(32)/kfree = 97 cycles kmalloc(64)/kfree = 97 cycles kmalloc(128)/kfree = 97 cycles kmalloc(256)/kfree = 105 cycles kmalloc(512)/kfree = 108 cycles kmalloc(1024)/kfree = 105 cycles kmalloc(2048)/kfree = 107 cycles kmalloc(4096)/kfree = 390 cycles kmalloc(8192)/kfree = 626 cycles kmalloc(16384)/kfree = 662 cycles * Base kmalloc(8)/kfree = 116 cycles kmalloc(16)/kfree = 116 cycles kmalloc(32)/kfree = 116 cycles kmalloc(64)/kfree = 116 cycles kmalloc(128)/kfree = 116 cycles kmalloc(256)/kfree = 126 cycles kmalloc(512)/kfree = 126 cycles kmalloc(1024)/kfree = 126 cycles kmalloc(2048)/kfree = 126 cycles kmalloc(4096)/kfree = 384 cycles kmalloc(8192)/kfree = 749 cycles kmalloc(16384)/kfree = 786 cycles Tested-by: Christoph Lameter <clameter@sgi.com> I can confirm Mathieus' measurement now: Athlon64: regular NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 79 cycles kfree -> 92 cycles 10000 times kmalloc(16) -> 79 cycles kfree -> 93 cycles 10000 times kmalloc(32) -> 88 cycles kfree -> 95 cycles 10000 times kmalloc(64) -> 124 cycles kfree -> 132 cycles 10000 times kmalloc(128) -> 157 cycles kfree -> 247 cycles 10000 times kmalloc(256) -> 200 cycles kfree -> 257 cycles 10000 times kmalloc(512) -> 250 cycles kfree -> 277 cycles 10000 times kmalloc(1024) -> 337 cycles kfree -> 314 cycles 10000 times kmalloc(2048) -> 365 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 352 cycles kfree -> 240 cycles 10000 times kmalloc(8192) -> 456 cycles kfree -> 340 cycles 10000 times kmalloc(16384) -> 646 cycles kfree -> 471 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 124 cycles 10000 times kmalloc(16)/kfree -> 124 cycles 10000 times kmalloc(32)/kfree -> 124 cycles 10000 times kmalloc(64)/kfree -> 124 cycles 10000 times kmalloc(128)/kfree -> 124 cycles 10000 times kmalloc(256)/kfree -> 132 cycles 10000 times kmalloc(512)/kfree -> 132 cycles 10000 times kmalloc(1024)/kfree -> 132 cycles 10000 times kmalloc(2048)/kfree -> 132 cycles 10000 times kmalloc(4096)/kfree -> 319 cycles 10000 times kmalloc(8192)/kfree -> 486 cycles 10000 times kmalloc(16384)/kfree -> 539 cycles cmpxchg_local NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 55 cycles kfree -> 90 cycles 10000 times kmalloc(16) -> 55 cycles kfree -> 92 cycles 10000 times kmalloc(32) -> 70 cycles kfree -> 91 cycles 10000 times kmalloc(64) -> 100 cycles kfree -> 141 cycles 10000 times kmalloc(128) -> 128 cycles kfree -> 233 cycles 10000 times kmalloc(256) -> 172 cycles kfree -> 251 cycles 10000 times kmalloc(512) -> 225 cycles kfree -> 275 cycles 10000 times kmalloc(1024) -> 325 cycles kfree -> 311 cycles 10000 times kmalloc(2048) -> 346 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 351 cycles kfree -> 238 cycles 10000 times kmalloc(8192) -> 450 cycles kfree -> 342 cycles 10000 times kmalloc(16384) -> 630 cycles kfree -> 546 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 81 cycles 10000 times kmalloc(16)/kfree -> 81 cycles 10000 times kmalloc(32)/kfree -> 81 cycles 10000 times kmalloc(64)/kfree -> 81 cycles 10000 times kmalloc(128)/kfree -> 81 cycles 10000 times kmalloc(256)/kfree -> 91 cycles 10000 times kmalloc(512)/kfree -> 90 cycles 10000 times kmalloc(1024)/kfree -> 91 cycles 10000 times kmalloc(2048)/kfree -> 90 cycles 10000 times kmalloc(4096)/kfree -> 318 cycles 10000 times kmalloc(8192)/kfree -> 483 cycles 10000 times kmalloc(16384)/kfree -> 536 cycles Changelog: - Ran though checkpatch. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-07 16:16:07 +08:00
#ifndef __ASM_GENERIC_CMPXCHG_H
#define __ASM_GENERIC_CMPXCHG_H
#ifdef CONFIG_SMP
#error "Cannot use generic cmpxchg on SMP"
#endif
#include <linux/types.h>
#include <linux/irqflags.h>
#ifndef xchg
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg().
*/
extern void __xchg_called_with_bad_pointer(void);
static inline
unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
{
unsigned long ret, flags;
switch (size) {
case 1:
#ifdef __xchg_u8
return __xchg_u8(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u8 *)ptr;
*(volatile u8 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u8 */
case 2:
#ifdef __xchg_u16
return __xchg_u16(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u16 *)ptr;
*(volatile u16 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u16 */
case 4:
#ifdef __xchg_u32
return __xchg_u32(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u32 *)ptr;
*(volatile u32 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u32 */
#ifdef CONFIG_64BIT
case 8:
#ifdef __xchg_u64
return __xchg_u64(x, ptr);
#else
local_irq_save(flags);
ret = *(volatile u64 *)ptr;
*(volatile u64 *)ptr = x;
local_irq_restore(flags);
return ret;
#endif /* __xchg_u64 */
#endif /* CONFIG_64BIT */
default:
__xchg_called_with_bad_pointer();
return x;
}
}
#define xchg(ptr, x) \
((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#endif /* xchg */
Add cmpxchg_local to asm-generic for per cpu atomic operations Emulates the cmpxchg_local by disabling interrupts around variable modification. This is not reentrant wrt NMIs and MCEs. It is only protected against normal interrupts, but this is enough for architectures without such interrupt sources or if used in a context where the data is not shared with such handlers. It can be used as a fallback for architectures lacking a real cmpxchg instruction. For architectures that have a real cmpxchg but does not have NMIs or MCE, testing which of the generic vs architecture specific cmpxchg is the fastest should be done. asm-generic/cmpxchg.h defines a cmpxchg that uses cmpxchg_local. It is meant to be used as a cmpxchg fallback for architectures that do not support SMP. * Patch series comments Using cmpxchg_local shows a performance improvements of the fast path goes from a 66% speedup on a Pentium 4 to a 14% speedup on AMD64. In detail: Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a Pentium4, 3GHz, Hyperthread. SLUB Performance testing ======================== 1. Kmalloc: Repeatedly allocate then free test * slub HEAD, test 1 kmalloc(8) = 201 cycles kfree = 351 cycles kmalloc(16) = 198 cycles kfree = 359 cycles kmalloc(32) = 200 cycles kfree = 381 cycles kmalloc(64) = 224 cycles kfree = 394 cycles kmalloc(128) = 285 cycles kfree = 424 cycles kmalloc(256) = 411 cycles kfree = 546 cycles kmalloc(512) = 480 cycles kfree = 619 cycles kmalloc(1024) = 623 cycles kfree = 750 cycles kmalloc(2048) = 686 cycles kfree = 811 cycles kmalloc(4096) = 482 cycles kfree = 538 cycles kmalloc(8192) = 680 cycles kfree = 734 cycles kmalloc(16384) = 713 cycles kfree = 843 cycles * Slub HEAD, test 2 kmalloc(8) = 190 cycles kfree = 351 cycles kmalloc(16) = 195 cycles kfree = 360 cycles kmalloc(32) = 201 cycles kfree = 370 cycles kmalloc(64) = 245 cycles kfree = 389 cycles kmalloc(128) = 283 cycles kfree = 413 cycles kmalloc(256) = 409 cycles kfree = 547 cycles kmalloc(512) = 476 cycles kfree = 616 cycles kmalloc(1024) = 628 cycles kfree = 753 cycles kmalloc(2048) = 684 cycles kfree = 811 cycles kmalloc(4096) = 480 cycles kfree = 539 cycles kmalloc(8192) = 661 cycles kfree = 746 cycles kmalloc(16384) = 741 cycles kfree = 856 cycles * cmpxchg_local Slub test kmalloc(8) = 83 cycles kfree = 363 cycles kmalloc(16) = 85 cycles kfree = 372 cycles kmalloc(32) = 92 cycles kfree = 377 cycles kmalloc(64) = 115 cycles kfree = 397 cycles kmalloc(128) = 179 cycles kfree = 438 cycles kmalloc(256) = 314 cycles kfree = 564 cycles kmalloc(512) = 398 cycles kfree = 615 cycles kmalloc(1024) = 573 cycles kfree = 745 cycles kmalloc(2048) = 629 cycles kfree = 816 cycles kmalloc(4096) = 473 cycles kfree = 548 cycles kmalloc(8192) = 659 cycles kfree = 745 cycles kmalloc(16384) = 724 cycles kfree = 843 cycles 2. Kmalloc: alloc/free test * slub HEAD, test 1 kmalloc(8)/kfree = 322 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 325 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 678 cycles kmalloc(8192)/kfree = 1013 cycles kmalloc(16384)/kfree = 1157 cycles * Slub HEAD, test 2 kmalloc(8)/kfree = 323 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 318 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 648 cycles kmalloc(8192)/kfree = 1009 cycles kmalloc(16384)/kfree = 1105 cycles * cmpxchg_local Slub test kmalloc(8)/kfree = 112 cycles kmalloc(16)/kfree = 103 cycles kmalloc(32)/kfree = 103 cycles kmalloc(64)/kfree = 103 cycles kmalloc(128)/kfree = 112 cycles kmalloc(256)/kfree = 111 cycles kmalloc(512)/kfree = 111 cycles kmalloc(1024)/kfree = 111 cycles kmalloc(2048)/kfree = 121 cycles kmalloc(4096)/kfree = 650 cycles kmalloc(8192)/kfree = 1042 cycles kmalloc(16384)/kfree = 1149 cycles Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a AMD64 2.0 GHz dual-core In this test, we seem to remove 10 cycles from the kmalloc fast path. On small allocations, it gives a 14% performance increase. kfree fast path also seems to have a 10 cycles improvement. 1. Kmalloc: Repeatedly allocate then free test * cmpxchg_local slub kmalloc(8) = 63 cycles kfree = 126 cycles kmalloc(16) = 66 cycles kfree = 129 cycles kmalloc(32) = 76 cycles kfree = 138 cycles kmalloc(64) = 100 cycles kfree = 288 cycles kmalloc(128) = 128 cycles kfree = 309 cycles kmalloc(256) = 170 cycles kfree = 315 cycles kmalloc(512) = 221 cycles kfree = 357 cycles kmalloc(1024) = 324 cycles kfree = 393 cycles kmalloc(2048) = 354 cycles kfree = 440 cycles kmalloc(4096) = 394 cycles kfree = 330 cycles kmalloc(8192) = 523 cycles kfree = 481 cycles kmalloc(16384) = 643 cycles kfree = 649 cycles * Base kmalloc(8) = 74 cycles kfree = 113 cycles kmalloc(16) = 76 cycles kfree = 116 cycles kmalloc(32) = 85 cycles kfree = 133 cycles kmalloc(64) = 111 cycles kfree = 279 cycles kmalloc(128) = 138 cycles kfree = 294 cycles kmalloc(256) = 181 cycles kfree = 304 cycles kmalloc(512) = 237 cycles kfree = 327 cycles kmalloc(1024) = 340 cycles kfree = 379 cycles kmalloc(2048) = 378 cycles kfree = 433 cycles kmalloc(4096) = 399 cycles kfree = 329 cycles kmalloc(8192) = 528 cycles kfree = 624 cycles kmalloc(16384) = 651 cycles kfree = 737 cycles 2. Kmalloc: alloc/free test * cmpxchg_local slub kmalloc(8)/kfree = 96 cycles kmalloc(16)/kfree = 97 cycles kmalloc(32)/kfree = 97 cycles kmalloc(64)/kfree = 97 cycles kmalloc(128)/kfree = 97 cycles kmalloc(256)/kfree = 105 cycles kmalloc(512)/kfree = 108 cycles kmalloc(1024)/kfree = 105 cycles kmalloc(2048)/kfree = 107 cycles kmalloc(4096)/kfree = 390 cycles kmalloc(8192)/kfree = 626 cycles kmalloc(16384)/kfree = 662 cycles * Base kmalloc(8)/kfree = 116 cycles kmalloc(16)/kfree = 116 cycles kmalloc(32)/kfree = 116 cycles kmalloc(64)/kfree = 116 cycles kmalloc(128)/kfree = 116 cycles kmalloc(256)/kfree = 126 cycles kmalloc(512)/kfree = 126 cycles kmalloc(1024)/kfree = 126 cycles kmalloc(2048)/kfree = 126 cycles kmalloc(4096)/kfree = 384 cycles kmalloc(8192)/kfree = 749 cycles kmalloc(16384)/kfree = 786 cycles Tested-by: Christoph Lameter <clameter@sgi.com> I can confirm Mathieus' measurement now: Athlon64: regular NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 79 cycles kfree -> 92 cycles 10000 times kmalloc(16) -> 79 cycles kfree -> 93 cycles 10000 times kmalloc(32) -> 88 cycles kfree -> 95 cycles 10000 times kmalloc(64) -> 124 cycles kfree -> 132 cycles 10000 times kmalloc(128) -> 157 cycles kfree -> 247 cycles 10000 times kmalloc(256) -> 200 cycles kfree -> 257 cycles 10000 times kmalloc(512) -> 250 cycles kfree -> 277 cycles 10000 times kmalloc(1024) -> 337 cycles kfree -> 314 cycles 10000 times kmalloc(2048) -> 365 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 352 cycles kfree -> 240 cycles 10000 times kmalloc(8192) -> 456 cycles kfree -> 340 cycles 10000 times kmalloc(16384) -> 646 cycles kfree -> 471 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 124 cycles 10000 times kmalloc(16)/kfree -> 124 cycles 10000 times kmalloc(32)/kfree -> 124 cycles 10000 times kmalloc(64)/kfree -> 124 cycles 10000 times kmalloc(128)/kfree -> 124 cycles 10000 times kmalloc(256)/kfree -> 132 cycles 10000 times kmalloc(512)/kfree -> 132 cycles 10000 times kmalloc(1024)/kfree -> 132 cycles 10000 times kmalloc(2048)/kfree -> 132 cycles 10000 times kmalloc(4096)/kfree -> 319 cycles 10000 times kmalloc(8192)/kfree -> 486 cycles 10000 times kmalloc(16384)/kfree -> 539 cycles cmpxchg_local NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 55 cycles kfree -> 90 cycles 10000 times kmalloc(16) -> 55 cycles kfree -> 92 cycles 10000 times kmalloc(32) -> 70 cycles kfree -> 91 cycles 10000 times kmalloc(64) -> 100 cycles kfree -> 141 cycles 10000 times kmalloc(128) -> 128 cycles kfree -> 233 cycles 10000 times kmalloc(256) -> 172 cycles kfree -> 251 cycles 10000 times kmalloc(512) -> 225 cycles kfree -> 275 cycles 10000 times kmalloc(1024) -> 325 cycles kfree -> 311 cycles 10000 times kmalloc(2048) -> 346 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 351 cycles kfree -> 238 cycles 10000 times kmalloc(8192) -> 450 cycles kfree -> 342 cycles 10000 times kmalloc(16384) -> 630 cycles kfree -> 546 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 81 cycles 10000 times kmalloc(16)/kfree -> 81 cycles 10000 times kmalloc(32)/kfree -> 81 cycles 10000 times kmalloc(64)/kfree -> 81 cycles 10000 times kmalloc(128)/kfree -> 81 cycles 10000 times kmalloc(256)/kfree -> 91 cycles 10000 times kmalloc(512)/kfree -> 90 cycles 10000 times kmalloc(1024)/kfree -> 91 cycles 10000 times kmalloc(2048)/kfree -> 90 cycles 10000 times kmalloc(4096)/kfree -> 318 cycles 10000 times kmalloc(8192)/kfree -> 483 cycles 10000 times kmalloc(16384)/kfree -> 536 cycles Changelog: - Ran though checkpatch. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-07 16:16:07 +08:00
/*
* Atomic compare and exchange.
*
* Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
* a cmpxchg primitive faster than repeated local irq save/restore exists.
*/
#include <asm-generic/cmpxchg-local.h>
Add cmpxchg_local to asm-generic for per cpu atomic operations Emulates the cmpxchg_local by disabling interrupts around variable modification. This is not reentrant wrt NMIs and MCEs. It is only protected against normal interrupts, but this is enough for architectures without such interrupt sources or if used in a context where the data is not shared with such handlers. It can be used as a fallback for architectures lacking a real cmpxchg instruction. For architectures that have a real cmpxchg but does not have NMIs or MCE, testing which of the generic vs architecture specific cmpxchg is the fastest should be done. asm-generic/cmpxchg.h defines a cmpxchg that uses cmpxchg_local. It is meant to be used as a cmpxchg fallback for architectures that do not support SMP. * Patch series comments Using cmpxchg_local shows a performance improvements of the fast path goes from a 66% speedup on a Pentium 4 to a 14% speedup on AMD64. In detail: Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a Pentium4, 3GHz, Hyperthread. SLUB Performance testing ======================== 1. Kmalloc: Repeatedly allocate then free test * slub HEAD, test 1 kmalloc(8) = 201 cycles kfree = 351 cycles kmalloc(16) = 198 cycles kfree = 359 cycles kmalloc(32) = 200 cycles kfree = 381 cycles kmalloc(64) = 224 cycles kfree = 394 cycles kmalloc(128) = 285 cycles kfree = 424 cycles kmalloc(256) = 411 cycles kfree = 546 cycles kmalloc(512) = 480 cycles kfree = 619 cycles kmalloc(1024) = 623 cycles kfree = 750 cycles kmalloc(2048) = 686 cycles kfree = 811 cycles kmalloc(4096) = 482 cycles kfree = 538 cycles kmalloc(8192) = 680 cycles kfree = 734 cycles kmalloc(16384) = 713 cycles kfree = 843 cycles * Slub HEAD, test 2 kmalloc(8) = 190 cycles kfree = 351 cycles kmalloc(16) = 195 cycles kfree = 360 cycles kmalloc(32) = 201 cycles kfree = 370 cycles kmalloc(64) = 245 cycles kfree = 389 cycles kmalloc(128) = 283 cycles kfree = 413 cycles kmalloc(256) = 409 cycles kfree = 547 cycles kmalloc(512) = 476 cycles kfree = 616 cycles kmalloc(1024) = 628 cycles kfree = 753 cycles kmalloc(2048) = 684 cycles kfree = 811 cycles kmalloc(4096) = 480 cycles kfree = 539 cycles kmalloc(8192) = 661 cycles kfree = 746 cycles kmalloc(16384) = 741 cycles kfree = 856 cycles * cmpxchg_local Slub test kmalloc(8) = 83 cycles kfree = 363 cycles kmalloc(16) = 85 cycles kfree = 372 cycles kmalloc(32) = 92 cycles kfree = 377 cycles kmalloc(64) = 115 cycles kfree = 397 cycles kmalloc(128) = 179 cycles kfree = 438 cycles kmalloc(256) = 314 cycles kfree = 564 cycles kmalloc(512) = 398 cycles kfree = 615 cycles kmalloc(1024) = 573 cycles kfree = 745 cycles kmalloc(2048) = 629 cycles kfree = 816 cycles kmalloc(4096) = 473 cycles kfree = 548 cycles kmalloc(8192) = 659 cycles kfree = 745 cycles kmalloc(16384) = 724 cycles kfree = 843 cycles 2. Kmalloc: alloc/free test * slub HEAD, test 1 kmalloc(8)/kfree = 322 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 325 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 678 cycles kmalloc(8192)/kfree = 1013 cycles kmalloc(16384)/kfree = 1157 cycles * Slub HEAD, test 2 kmalloc(8)/kfree = 323 cycles kmalloc(16)/kfree = 318 cycles kmalloc(32)/kfree = 318 cycles kmalloc(64)/kfree = 318 cycles kmalloc(128)/kfree = 318 cycles kmalloc(256)/kfree = 328 cycles kmalloc(512)/kfree = 328 cycles kmalloc(1024)/kfree = 328 cycles kmalloc(2048)/kfree = 328 cycles kmalloc(4096)/kfree = 648 cycles kmalloc(8192)/kfree = 1009 cycles kmalloc(16384)/kfree = 1105 cycles * cmpxchg_local Slub test kmalloc(8)/kfree = 112 cycles kmalloc(16)/kfree = 103 cycles kmalloc(32)/kfree = 103 cycles kmalloc(64)/kfree = 103 cycles kmalloc(128)/kfree = 112 cycles kmalloc(256)/kfree = 111 cycles kmalloc(512)/kfree = 111 cycles kmalloc(1024)/kfree = 111 cycles kmalloc(2048)/kfree = 121 cycles kmalloc(4096)/kfree = 650 cycles kmalloc(8192)/kfree = 1042 cycles kmalloc(16384)/kfree = 1149 cycles Tested-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Measurements on a AMD64 2.0 GHz dual-core In this test, we seem to remove 10 cycles from the kmalloc fast path. On small allocations, it gives a 14% performance increase. kfree fast path also seems to have a 10 cycles improvement. 1. Kmalloc: Repeatedly allocate then free test * cmpxchg_local slub kmalloc(8) = 63 cycles kfree = 126 cycles kmalloc(16) = 66 cycles kfree = 129 cycles kmalloc(32) = 76 cycles kfree = 138 cycles kmalloc(64) = 100 cycles kfree = 288 cycles kmalloc(128) = 128 cycles kfree = 309 cycles kmalloc(256) = 170 cycles kfree = 315 cycles kmalloc(512) = 221 cycles kfree = 357 cycles kmalloc(1024) = 324 cycles kfree = 393 cycles kmalloc(2048) = 354 cycles kfree = 440 cycles kmalloc(4096) = 394 cycles kfree = 330 cycles kmalloc(8192) = 523 cycles kfree = 481 cycles kmalloc(16384) = 643 cycles kfree = 649 cycles * Base kmalloc(8) = 74 cycles kfree = 113 cycles kmalloc(16) = 76 cycles kfree = 116 cycles kmalloc(32) = 85 cycles kfree = 133 cycles kmalloc(64) = 111 cycles kfree = 279 cycles kmalloc(128) = 138 cycles kfree = 294 cycles kmalloc(256) = 181 cycles kfree = 304 cycles kmalloc(512) = 237 cycles kfree = 327 cycles kmalloc(1024) = 340 cycles kfree = 379 cycles kmalloc(2048) = 378 cycles kfree = 433 cycles kmalloc(4096) = 399 cycles kfree = 329 cycles kmalloc(8192) = 528 cycles kfree = 624 cycles kmalloc(16384) = 651 cycles kfree = 737 cycles 2. Kmalloc: alloc/free test * cmpxchg_local slub kmalloc(8)/kfree = 96 cycles kmalloc(16)/kfree = 97 cycles kmalloc(32)/kfree = 97 cycles kmalloc(64)/kfree = 97 cycles kmalloc(128)/kfree = 97 cycles kmalloc(256)/kfree = 105 cycles kmalloc(512)/kfree = 108 cycles kmalloc(1024)/kfree = 105 cycles kmalloc(2048)/kfree = 107 cycles kmalloc(4096)/kfree = 390 cycles kmalloc(8192)/kfree = 626 cycles kmalloc(16384)/kfree = 662 cycles * Base kmalloc(8)/kfree = 116 cycles kmalloc(16)/kfree = 116 cycles kmalloc(32)/kfree = 116 cycles kmalloc(64)/kfree = 116 cycles kmalloc(128)/kfree = 116 cycles kmalloc(256)/kfree = 126 cycles kmalloc(512)/kfree = 126 cycles kmalloc(1024)/kfree = 126 cycles kmalloc(2048)/kfree = 126 cycles kmalloc(4096)/kfree = 384 cycles kmalloc(8192)/kfree = 749 cycles kmalloc(16384)/kfree = 786 cycles Tested-by: Christoph Lameter <clameter@sgi.com> I can confirm Mathieus' measurement now: Athlon64: regular NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 79 cycles kfree -> 92 cycles 10000 times kmalloc(16) -> 79 cycles kfree -> 93 cycles 10000 times kmalloc(32) -> 88 cycles kfree -> 95 cycles 10000 times kmalloc(64) -> 124 cycles kfree -> 132 cycles 10000 times kmalloc(128) -> 157 cycles kfree -> 247 cycles 10000 times kmalloc(256) -> 200 cycles kfree -> 257 cycles 10000 times kmalloc(512) -> 250 cycles kfree -> 277 cycles 10000 times kmalloc(1024) -> 337 cycles kfree -> 314 cycles 10000 times kmalloc(2048) -> 365 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 352 cycles kfree -> 240 cycles 10000 times kmalloc(8192) -> 456 cycles kfree -> 340 cycles 10000 times kmalloc(16384) -> 646 cycles kfree -> 471 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 124 cycles 10000 times kmalloc(16)/kfree -> 124 cycles 10000 times kmalloc(32)/kfree -> 124 cycles 10000 times kmalloc(64)/kfree -> 124 cycles 10000 times kmalloc(128)/kfree -> 124 cycles 10000 times kmalloc(256)/kfree -> 132 cycles 10000 times kmalloc(512)/kfree -> 132 cycles 10000 times kmalloc(1024)/kfree -> 132 cycles 10000 times kmalloc(2048)/kfree -> 132 cycles 10000 times kmalloc(4096)/kfree -> 319 cycles 10000 times kmalloc(8192)/kfree -> 486 cycles 10000 times kmalloc(16384)/kfree -> 539 cycles cmpxchg_local NUMA/discontig 1. Kmalloc: Repeatedly allocate then free test 10000 times kmalloc(8) -> 55 cycles kfree -> 90 cycles 10000 times kmalloc(16) -> 55 cycles kfree -> 92 cycles 10000 times kmalloc(32) -> 70 cycles kfree -> 91 cycles 10000 times kmalloc(64) -> 100 cycles kfree -> 141 cycles 10000 times kmalloc(128) -> 128 cycles kfree -> 233 cycles 10000 times kmalloc(256) -> 172 cycles kfree -> 251 cycles 10000 times kmalloc(512) -> 225 cycles kfree -> 275 cycles 10000 times kmalloc(1024) -> 325 cycles kfree -> 311 cycles 10000 times kmalloc(2048) -> 346 cycles kfree -> 330 cycles 10000 times kmalloc(4096) -> 351 cycles kfree -> 238 cycles 10000 times kmalloc(8192) -> 450 cycles kfree -> 342 cycles 10000 times kmalloc(16384) -> 630 cycles kfree -> 546 cycles 2. Kmalloc: alloc/free test 10000 times kmalloc(8)/kfree -> 81 cycles 10000 times kmalloc(16)/kfree -> 81 cycles 10000 times kmalloc(32)/kfree -> 81 cycles 10000 times kmalloc(64)/kfree -> 81 cycles 10000 times kmalloc(128)/kfree -> 81 cycles 10000 times kmalloc(256)/kfree -> 91 cycles 10000 times kmalloc(512)/kfree -> 90 cycles 10000 times kmalloc(1024)/kfree -> 91 cycles 10000 times kmalloc(2048)/kfree -> 90 cycles 10000 times kmalloc(4096)/kfree -> 318 cycles 10000 times kmalloc(8192)/kfree -> 483 cycles 10000 times kmalloc(16384)/kfree -> 536 cycles Changelog: - Ran though checkpatch. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> Cc: <linux-arch@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-02-07 16:16:07 +08:00
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* __ASM_GENERIC_CMPXCHG_H */