mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-25 05:04:09 +08:00
s390/bitops: implement cache friendly test_and_set_bit_lock
The generic implementation for test_and_set_bit_lock in include/asm-generic uses the standard test_and_set_bit operation. This is done with either a 'csg' or a 'loag' instruction. For both version the cache line is fetched exclusively, even if the bit is already set. The result is an increase in cache traffic, for a contented lock this is a bad idea. Acked-by: Hendrik Brueckner <brueckner@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
5614dd920a
commit
acdc9fc9a8
@ -276,6 +276,28 @@ static inline int test_bit(unsigned long nr, const volatile unsigned long *ptr)
|
||||
return (*addr >> (nr & 7)) & 1;
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit_lock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
{
|
||||
if (test_bit(nr, ptr))
|
||||
return 1;
|
||||
return test_and_set_bit(nr, ptr);
|
||||
}
|
||||
|
||||
static inline void clear_bit_unlock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
{
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(nr, ptr);
|
||||
}
|
||||
|
||||
static inline void __clear_bit_unlock(unsigned long nr,
|
||||
volatile unsigned long *ptr)
|
||||
{
|
||||
smp_mb();
|
||||
__clear_bit(nr, ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Functions which use MSB0 bit numbering.
|
||||
* On an s390x system the bits are numbered:
|
||||
@ -446,7 +468,6 @@ static inline int fls(int word)
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm-generic/bitops/hweight.h>
|
||||
#include <asm-generic/bitops/lock.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
#include <asm-generic/bitops/le.h>
|
||||
#include <asm-generic/bitops/ext2-atomic-setbit.h>
|
||||
|
Loading…
Reference in New Issue
Block a user