mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/kdave/btrfs-progs.git
synced 2024-11-24 20:54:17 +08:00
f63f29e9e9
Create directory for all sources that can be used by anything that's not rellated to a relevant kernel part, all common functions, helpers, utilities that do not fit any other specific category. The traditional location would be probably lib/ with all things that are statically linked to the main binaries, but we have libbtrfs and libbtrfsutil so this would be confusing. Signed-off-by: David Sterba <dsterba@suse.com>
257 lines
6.3 KiB
C
257 lines
6.3 KiB
C
#ifndef _PERF_LINUX_BITOPS_H_
|
|
#define _PERF_LINUX_BITOPS_H_
|
|
|
|
#include <linux/kernel.h>
|
|
#include <endian.h>
|
|
#include "common/internal.h"
|
|
|
|
#ifndef DIV_ROUND_UP
|
|
#define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
|
|
#endif
|
|
|
|
#define BITS_PER_BYTE 8
|
|
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
|
|
#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
|
|
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
|
|
|
|
#define for_each_set_bit(bit, addr, size) \
|
|
for ((bit) = find_first_bit((addr), (size)); \
|
|
(bit) < (size); \
|
|
(bit) = find_next_bit((addr), (size), (bit) + 1))
|
|
|
|
/* same as for_each_set_bit() but use bit as value to start with */
|
|
#define for_each_set_bit_from(bit, addr, size) \
|
|
for ((bit) = find_next_bit((addr), (size), (bit)); \
|
|
(bit) < (size); \
|
|
(bit) = find_next_bit((addr), (size), (bit) + 1))
|
|
|
|
static inline void set_bit(int nr, unsigned long *addr)
|
|
{
|
|
addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
|
|
}
|
|
|
|
static inline void clear_bit(int nr, unsigned long *addr)
|
|
{
|
|
addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
|
|
}
|
|
|
|
/**
|
|
* hweightN - returns the hamming weight of a N-bit word
|
|
* @x: the word to weigh
|
|
*
|
|
* The Hamming Weight of a number is the total number of bits set in it.
|
|
*/
|
|
|
|
static inline unsigned int hweight32(unsigned int w)
|
|
{
|
|
unsigned int res = w - ((w >> 1) & 0x55555555);
|
|
res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F;
|
|
res = res + (res >> 8);
|
|
return (res + (res >> 16)) & 0x000000FF;
|
|
}
|
|
|
|
static inline unsigned long hweight64(__u64 w)
|
|
{
|
|
#if BITS_PER_LONG == 32
|
|
return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
|
|
#elif BITS_PER_LONG == 64
|
|
__u64 res = w - ((w >> 1) & 0x5555555555555555ul);
|
|
res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
|
|
res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
|
|
res = res + (res >> 8);
|
|
res = res + (res >> 16);
|
|
return (res + (res >> 32)) & 0x00000000000000FFul;
|
|
#endif
|
|
}
|
|
|
|
static inline unsigned long hweight_long(unsigned long w)
|
|
{
|
|
return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
|
|
}
|
|
|
|
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
|
|
|
|
/**
|
|
* __ffs - find first bit in word.
|
|
* @word: The word to search
|
|
*
|
|
* Undefined if no bit exists, so code should check against 0 first.
|
|
*/
|
|
static __always_inline unsigned long __ffs(unsigned long word)
|
|
{
|
|
int num = 0;
|
|
|
|
#if BITS_PER_LONG == 64
|
|
if ((word & 0xffffffff) == 0) {
|
|
num += 32;
|
|
word >>= 32;
|
|
}
|
|
#endif
|
|
if ((word & 0xffff) == 0) {
|
|
num += 16;
|
|
word >>= 16;
|
|
}
|
|
if ((word & 0xff) == 0) {
|
|
num += 8;
|
|
word >>= 8;
|
|
}
|
|
if ((word & 0xf) == 0) {
|
|
num += 4;
|
|
word >>= 4;
|
|
}
|
|
if ((word & 0x3) == 0) {
|
|
num += 2;
|
|
word >>= 2;
|
|
}
|
|
if ((word & 0x1) == 0)
|
|
num += 1;
|
|
return num;
|
|
}
|
|
|
|
#define ffz(x) __ffs(~(x))
|
|
|
|
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
|
|
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
|
|
|
|
/*
|
|
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
|
* find_next_and_bit. The differences are:
|
|
* - The "invert" argument, which is XORed with each fetched word before
|
|
* searching it for one bits.
|
|
* - The optional "addr2", which is anded with "addr1" if present.
|
|
*/
|
|
static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
|
const unsigned long *addr2, unsigned long nbits,
|
|
unsigned long start, unsigned long invert)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
|
|
/* Handle 1st word. */
|
|
tmp &= BITMAP_FIRST_WORD_MASK(start);
|
|
start = round_down(start, BITS_PER_LONG);
|
|
|
|
while (!tmp) {
|
|
start += BITS_PER_LONG;
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
}
|
|
|
|
return min(start + __ffs(tmp), nbits);
|
|
}
|
|
|
|
/*
|
|
* Find the next set bit in a memory region.
|
|
*/
|
|
static inline unsigned long find_next_bit(const unsigned long *addr,
|
|
unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, 0UL);
|
|
}
|
|
|
|
static inline unsigned long find_next_zero_bit(const unsigned long *addr,
|
|
unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit(addr, NULL, size, offset, ~0UL);
|
|
}
|
|
|
|
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
|
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
|
|
|
#if __BYTE_ORDER == __BIG_ENDIAN
|
|
|
|
static inline unsigned long ext2_swab(const unsigned long y)
|
|
{
|
|
#if BITS_PER_LONG == 64
|
|
return (unsigned long) bswap_64((u64) y);
|
|
#elif BITS_PER_LONG == 32
|
|
return (unsigned long) bswap_32((u32) y);
|
|
#else
|
|
#error BITS_PER_LONG not defined
|
|
#endif
|
|
}
|
|
|
|
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
|
const unsigned long *addr2, unsigned long nbits,
|
|
unsigned long start, unsigned long invert)
|
|
{
|
|
unsigned long tmp;
|
|
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
|
|
/* Handle 1st word. */
|
|
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
|
|
start = round_down(start, BITS_PER_LONG);
|
|
|
|
while (!tmp) {
|
|
start += BITS_PER_LONG;
|
|
if (start >= nbits)
|
|
return nbits;
|
|
|
|
tmp = addr1[start / BITS_PER_LONG];
|
|
if (addr2)
|
|
tmp &= addr2[start / BITS_PER_LONG];
|
|
tmp ^= invert;
|
|
}
|
|
|
|
return min(start + __ffs(ext2_swab(tmp)), nbits);
|
|
}
|
|
|
|
static inline unsigned long find_next_zero_bit_le(const void *addr, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
|
|
}
|
|
|
|
|
|
static inline unsigned long find_next_bit_le(const void *addr, unsigned long size,
|
|
unsigned long offset)
|
|
{
|
|
return _find_next_bit_le(addr, NULL, size, offset, 0UL);
|
|
}
|
|
|
|
#else
|
|
|
|
static inline unsigned long find_next_zero_bit_le(const void *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
return find_next_zero_bit(addr, size, offset);
|
|
}
|
|
|
|
static inline unsigned long find_next_bit_le(const void *addr,
|
|
unsigned long size, unsigned long offset)
|
|
{
|
|
return find_next_bit(addr, size, offset);
|
|
}
|
|
|
|
static inline unsigned long find_first_zero_bit_le(const void *addr,
|
|
unsigned long size)
|
|
{
|
|
return find_first_zero_bit(addr, size);
|
|
}
|
|
|
|
#endif
|
|
|
|
#endif
|