mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 16:54:20 +08:00
72d9310460
It used to be an ad-hoc hack defined by the x86 version of <asm/bitops.h> that enabled a couple of library routines to know whether an integer multiply is faster than repeated shifts and additions. This just makes it use the real Kconfig system instead, and makes x86 (which was the only architecture that did this) select the option. NOTE! Even for x86, this really is kind of wrong. If we cared, we would probably not enable this for builds optimized for netburst (P4), where shifts-and-adds are generally faster than multiplies. This patch does *not* change that kind of logic, though, it is purely a syntactic change with no code changes. This was triggered by the fact that we have other places that really want to know "do I want to expand multiples by constants by hand or not", particularly the hash generation code. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
68 lines
1.9 KiB
C
68 lines
1.9 KiB
C
#include <linux/export.h>
|
|
#include <linux/bitops.h>
|
|
#include <asm/types.h>
|
|
|
|
/**
|
|
* hweightN - returns the hamming weight of a N-bit word
|
|
* @x: the word to weigh
|
|
*
|
|
* The Hamming Weight of a number is the total number of bits set in it.
|
|
*/
|
|
|
|
unsigned int __sw_hweight32(unsigned int w)
|
|
{
|
|
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
|
|
w -= (w >> 1) & 0x55555555;
|
|
w = (w & 0x33333333) + ((w >> 2) & 0x33333333);
|
|
w = (w + (w >> 4)) & 0x0f0f0f0f;
|
|
return (w * 0x01010101) >> 24;
|
|
#else
|
|
unsigned int res = w - ((w >> 1) & 0x55555555);
|
|
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
|
res = res + (res >> 8);
|
|
return (res + (res >> 16)) & 0x000000FF;
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight32);
|
|
|
|
unsigned int __sw_hweight16(unsigned int w)
|
|
{
|
|
unsigned int res = w - ((w >> 1) & 0x5555);
|
|
res = (res & 0x3333) + ((res >> 2) & 0x3333);
|
|
res = (res + (res >> 4)) & 0x0F0F;
|
|
return (res + (res >> 8)) & 0x00FF;
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight16);
|
|
|
|
unsigned int __sw_hweight8(unsigned int w)
|
|
{
|
|
unsigned int res = w - ((w >> 1) & 0x55);
|
|
res = (res & 0x33) + ((res >> 2) & 0x33);
|
|
return (res + (res >> 4)) & 0x0F;
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight8);
|
|
|
|
unsigned long __sw_hweight64(__u64 w)
|
|
{
|
|
#if BITS_PER_LONG == 32
|
|
return __sw_hweight32((unsigned int)(w >> 32)) +
|
|
__sw_hweight32((unsigned int)w);
|
|
#elif BITS_PER_LONG == 64
|
|
#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
|
|
w -= (w >> 1) & 0x5555555555555555ul;
|
|
w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul);
|
|
w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful;
|
|
return (w * 0x0101010101010101ul) >> 56;
|
|
#else
|
|
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
|
|
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
|
|
res = res + (res >> 8);
|
|
res = res + (res >> 16);
|
|
return (res + (res >> 32)) & 0x00000000000000FFul;
|
|
#endif
|
|
#endif
|
|
}
|
|
EXPORT_SYMBOL(__sw_hweight64);
|