mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-25 13:43:55 +08:00
b14f243a42
Eric noticed, that when there will be devices with equal indices, some hash functions that use them will become less effective as they could. Fix this in advance by mixing the net_device address into the hash value instead of the device index. This is true for arp and ndisc hash fns. The netlabel, can and llc ones are also ifindex-based, but that three are init_net-only, thus will not be affected. Many thanks to David and Eric for the hash32_ptr implementation! Signed-off-by: Pavel Emelyanov <xemul@parallels.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
81 lines
2.0 KiB
C
81 lines
2.0 KiB
C
#ifndef _LINUX_HASH_H
|
|
#define _LINUX_HASH_H
|
|
/* Fast hashing routine for ints, longs and pointers.
|
|
(C) 2002 William Lee Irwin III, IBM */
|
|
|
|
/*
|
|
* Knuth recommends primes in approximately golden ratio to the maximum
|
|
* integer representable by a machine word for multiplicative hashing.
|
|
* Chuck Lever verified the effectiveness of this technique:
|
|
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
|
|
*
|
|
* These primes are chosen to be bit-sparse, that is operations on
|
|
* them can use shifts and additions instead of multiplications for
|
|
* machines where multiplications are slow.
|
|
*/
|
|
|
|
#include <asm/types.h>
|
|
|
|
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
|
|
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
|
|
#define hash_long(val, bits) hash_32(val, bits)
|
|
#elif BITS_PER_LONG == 64
|
|
#define hash_long(val, bits) hash_64(val, bits)
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
|
|
#else
|
|
#error Wordsize not 32 or 64
|
|
#endif
|
|
|
|
static inline u64 hash_64(u64 val, unsigned int bits)
|
|
{
|
|
u64 hash = val;
|
|
|
|
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
|
|
u64 n = hash;
|
|
n <<= 18;
|
|
hash -= n;
|
|
n <<= 33;
|
|
hash -= n;
|
|
n <<= 3;
|
|
hash += n;
|
|
n <<= 3;
|
|
hash -= n;
|
|
n <<= 4;
|
|
hash += n;
|
|
n <<= 2;
|
|
hash += n;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (64 - bits);
|
|
}
|
|
|
|
static inline u32 hash_32(u32 val, unsigned int bits)
|
|
{
|
|
/* On some cpus multiply is faster, on others gcc will do shifts */
|
|
u32 hash = val * GOLDEN_RATIO_PRIME_32;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (32 - bits);
|
|
}
|
|
|
|
static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
|
|
{
|
|
return hash_long((unsigned long)ptr, bits);
|
|
}
|
|
|
|
static inline u32 hash32_ptr(const void *ptr)
|
|
{
|
|
unsigned long val = (unsigned long)ptr;
|
|
|
|
#if BITS_PER_LONG == 64
|
|
val ^= (val >> 32);
|
|
#endif
|
|
return (u32)val;
|
|
}
|
|
#endif /* _LINUX_HASH_H */
|