mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-11-18 15:44:02 +08:00
f991879473
Followup to 33dd4e0ec9
"mm: make some struct page's const" which missed the
HASHED_PAGE_VIRTUAL case.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michel Lespinasse <walken@google.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
71 lines
1.8 KiB
C
71 lines
1.8 KiB
C
#ifndef _LINUX_HASH_H
|
|
#define _LINUX_HASH_H
|
|
/* Fast hashing routine for ints, longs and pointers.
|
|
(C) 2002 William Lee Irwin III, IBM */
|
|
|
|
/*
|
|
* Knuth recommends primes in approximately golden ratio to the maximum
|
|
* integer representable by a machine word for multiplicative hashing.
|
|
* Chuck Lever verified the effectiveness of this technique:
|
|
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
|
|
*
|
|
* These primes are chosen to be bit-sparse, that is operations on
|
|
* them can use shifts and additions instead of multiplications for
|
|
* machines where multiplications are slow.
|
|
*/
|
|
|
|
#include <asm/types.h>
|
|
|
|
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
|
|
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
|
|
#define hash_long(val, bits) hash_32(val, bits)
|
|
#elif BITS_PER_LONG == 64
|
|
#define hash_long(val, bits) hash_64(val, bits)
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
|
|
#else
|
|
#error Wordsize not 32 or 64
|
|
#endif
|
|
|
|
static inline u64 hash_64(u64 val, unsigned int bits)
|
|
{
|
|
u64 hash = val;
|
|
|
|
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
|
|
u64 n = hash;
|
|
n <<= 18;
|
|
hash -= n;
|
|
n <<= 33;
|
|
hash -= n;
|
|
n <<= 3;
|
|
hash += n;
|
|
n <<= 3;
|
|
hash -= n;
|
|
n <<= 4;
|
|
hash += n;
|
|
n <<= 2;
|
|
hash += n;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (64 - bits);
|
|
}
|
|
|
|
static inline u32 hash_32(u32 val, unsigned int bits)
|
|
{
|
|
/* On some cpus multiply is faster, on others gcc will do shifts */
|
|
u32 hash = val * GOLDEN_RATIO_PRIME_32;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (32 - bits);
|
|
}
|
|
|
|
static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
|
|
{
|
|
return hash_long((unsigned long)ptr, bits);
|
|
}
|
|
#endif /* _LINUX_HASH_H */
|