mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-25 20:14:25 +08:00
0624517d80
forbid asm/bitops.h direct inclusion Because of compile errors that may occur after bit changes if asm/bitops.h is included directly without e.g. linux/kernel.h which includes linux/bitops.h, forbid direct inclusion of asm/bitops.h. Thanks to Adrian Bunk. Signed-off-by: Jiri Slaby <jirislaby@gmail.com> Cc: Adrian Bunk <bunk@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
219 lines
4.4 KiB
C
219 lines
4.4 KiB
C
#ifndef _BLACKFIN_BITOPS_H
|
|
#define _BLACKFIN_BITOPS_H
|
|
|
|
/*
|
|
* Copyright 1992, Linus Torvalds.
|
|
*/
|
|
|
|
#include <linux/compiler.h>
|
|
#include <asm/byteorder.h> /* swab32 */
|
|
#include <asm/system.h> /* save_flags */
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
#ifndef _LINUX_BITOPS_H
|
|
#error only <linux/bitops.h> can be included directly
|
|
#endif
|
|
|
|
#include <asm-generic/bitops/ffs.h>
|
|
#include <asm-generic/bitops/__ffs.h>
|
|
#include <asm-generic/bitops/sched.h>
|
|
#include <asm-generic/bitops/ffz.h>
|
|
|
|
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int *a = (int *)addr;
|
|
int mask;
|
|
unsigned long flags;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
local_irq_save(flags);
|
|
*a |= mask;
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int *a = (int *)addr;
|
|
int mask;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
*a |= mask;
|
|
}
|
|
|
|
/*
|
|
* clear_bit() doesn't provide any barrier for the compiler.
|
|
*/
|
|
#define smp_mb__before_clear_bit() barrier()
|
|
#define smp_mb__after_clear_bit() barrier()
|
|
|
|
static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int *a = (int *)addr;
|
|
int mask;
|
|
unsigned long flags;
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
local_irq_save(flags);
|
|
*a &= ~mask;
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int *a = (int *)addr;
|
|
int mask;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
*a &= ~mask;
|
|
}
|
|
|
|
static __inline__ void change_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask, flags;
|
|
unsigned long *ADDR = (unsigned long *)addr;
|
|
|
|
ADDR += nr >> 5;
|
|
mask = 1 << (nr & 31);
|
|
local_irq_save(flags);
|
|
*ADDR ^= mask;
|
|
local_irq_restore(flags);
|
|
}
|
|
|
|
static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask;
|
|
unsigned long *ADDR = (unsigned long *)addr;
|
|
|
|
ADDR += nr >> 5;
|
|
mask = 1 << (nr & 31);
|
|
*ADDR ^= mask;
|
|
}
|
|
|
|
static __inline__ int test_and_set_bit(int nr, void *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
unsigned long flags;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
local_irq_save(flags);
|
|
retval = (mask & *a) != 0;
|
|
*a |= mask;
|
|
local_irq_restore(flags);
|
|
|
|
return retval;
|
|
}
|
|
|
|
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
retval = (mask & *a) != 0;
|
|
*a |= mask;
|
|
return retval;
|
|
}
|
|
|
|
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
unsigned long flags;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
local_irq_save(flags);
|
|
retval = (mask & *a) != 0;
|
|
*a &= ~mask;
|
|
local_irq_restore(flags);
|
|
|
|
return retval;
|
|
}
|
|
|
|
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
retval = (mask & *a) != 0;
|
|
*a &= ~mask;
|
|
return retval;
|
|
}
|
|
|
|
static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
unsigned long flags;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
local_irq_save(flags);
|
|
retval = (mask & *a) != 0;
|
|
*a ^= mask;
|
|
local_irq_restore(flags);
|
|
return retval;
|
|
}
|
|
|
|
static __inline__ int __test_and_change_bit(int nr,
|
|
volatile unsigned long *addr)
|
|
{
|
|
int mask, retval;
|
|
volatile unsigned int *a = (volatile unsigned int *)addr;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
retval = (mask & *a) != 0;
|
|
*a ^= mask;
|
|
return retval;
|
|
}
|
|
|
|
/*
|
|
* This routine doesn't need to be atomic.
|
|
*/
|
|
static __inline__ int __constant_test_bit(int nr, const void *addr)
|
|
{
|
|
return ((1UL << (nr & 31)) &
|
|
(((const volatile unsigned int *)addr)[nr >> 5])) != 0;
|
|
}
|
|
|
|
static __inline__ int __test_bit(int nr, const void *addr)
|
|
{
|
|
int *a = (int *)addr;
|
|
int mask;
|
|
|
|
a += nr >> 5;
|
|
mask = 1 << (nr & 0x1f);
|
|
return ((mask & *a) != 0);
|
|
}
|
|
|
|
#define test_bit(nr,addr) \
|
|
(__builtin_constant_p(nr) ? \
|
|
__constant_test_bit((nr),(addr)) : \
|
|
__test_bit((nr),(addr)))
|
|
|
|
#include <asm-generic/bitops/find.h>
|
|
#include <asm-generic/bitops/hweight.h>
|
|
#include <asm-generic/bitops/lock.h>
|
|
|
|
#include <asm-generic/bitops/ext2-atomic.h>
|
|
#include <asm-generic/bitops/ext2-non-atomic.h>
|
|
|
|
#include <asm-generic/bitops/minix.h>
|
|
|
|
#endif /* __KERNEL__ */
|
|
|
|
#include <asm-generic/bitops/fls.h>
|
|
#include <asm-generic/bitops/fls64.h>
|
|
|
|
#endif /* _BLACKFIN_BITOPS_H */
|