mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 19:53:59 +08:00
c8399943bd
Impact: reduce kernel image size
Hugh Dickins noticed that older gcc versions when the kernel
is built for code size didn't inline some of the bitops.
Mark all complex x86 bitops that have more than a single
asm statement or two as always inline to avoid this problem.
Probably should be done for other architectures too.
Ingo then found a better fix that only requires
a single line change, but it unfortunately only
works on gcc 4.3.
On older gccs the original patch still makes a ~0.3% defconfig
difference with CONFIG_OPTIMIZE_INLINING=y.
With gcc 4.1 and a defconfig like build:
6116998
1138540 883788 8139326 7c323e vmlinux-oi-with-patch
6137043 1138540 883788 8159371 7c808b vmlinux-optimize-inlining
~20k / 0.3% difference.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
37 lines
821 B
C
37 lines
821 B
C
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
|
|
#define _ASM_GENERIC_BITOPS_FLS64_H_
|
|
|
|
#include <asm/types.h>
|
|
|
|
/**
|
|
* fls64 - find last set bit in a 64-bit word
|
|
* @x: the word to search
|
|
*
|
|
* This is defined in a similar way as the libc and compiler builtin
|
|
* ffsll, but returns the position of the most significant set bit.
|
|
*
|
|
* fls64(value) returns 0 if value is 0 or the position of the last
|
|
* set bit if value is nonzero. The last (most significant) bit is
|
|
* at position 64.
|
|
*/
|
|
#if BITS_PER_LONG == 32
|
|
static __always_inline int fls64(__u64 x)
|
|
{
|
|
__u32 h = x >> 32;
|
|
if (h)
|
|
return fls(h) + 32;
|
|
return fls(x);
|
|
}
|
|
#elif BITS_PER_LONG == 64
|
|
static __always_inline int fls64(__u64 x)
|
|
{
|
|
if (x == 0)
|
|
return 0;
|
|
return __fls(x) + 1;
|
|
}
|
|
#else
|
|
#error BITS_PER_LONG not 32 or 64
|
|
#endif
|
|
|
|
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
|