mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 05:24:12 +08:00
1bdadf46ef
We'd like all architectures to convert to ARCH_ATOMIC, as this will enable functionality, and once all architectures are converted it will be possible to make significant cleanups to the atomic headers. A number of architectures use asm-generic/atomic64.h, and it's impractical to convert the header and all these architectures in one go. To make it possible to convert them one-by-one, let's make the asm-generic implementation function as either atomic64_*() or arch_atomic64_*() depending on whether ARCH_ATOMIC is selected. To do this, the generic implementations are prefixed as generic_atomic64_*(), and preprocessor definitions map atomic64_*()/arch_atomic64_*() onto these as appropriate. Once all users are moved over to ARCH_ATOMIC the ifdeffery in the header can be simplified and/or removed entirely. For existing users (none of which select ARCH_ATOMIC), there should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210525140232.53872-11-mark.rutland@arm.com
192 lines
4.6 KiB
C
192 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/*
|
|
* Generic implementation of 64-bit atomics using spinlocks,
|
|
* useful on processors that don't have 64-bit atomic instructions.
|
|
*
|
|
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
|
*/
|
|
#include <linux/types.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/init.h>
|
|
#include <linux/export.h>
|
|
#include <linux/atomic.h>
|
|
|
|
/*
|
|
* We use a hashed array of spinlocks to provide exclusive access
|
|
* to each atomic64_t variable. Since this is expected to used on
|
|
* systems with small numbers of CPUs (<= 4 or so), we use a
|
|
* relatively small array of 16 spinlocks to avoid wasting too much
|
|
* memory on the spinlock array.
|
|
*/
|
|
#define NR_LOCKS 16
|
|
|
|
/*
|
|
* Ensure each lock is in a separate cacheline.
|
|
*/
|
|
static union {
|
|
raw_spinlock_t lock;
|
|
char pad[L1_CACHE_BYTES];
|
|
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
|
|
[0 ... (NR_LOCKS - 1)] = {
|
|
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
|
|
},
|
|
};
|
|
|
|
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
|
|
{
|
|
unsigned long addr = (unsigned long) v;
|
|
|
|
addr >>= L1_CACHE_SHIFT;
|
|
addr ^= (addr >> 8) ^ (addr >> 16);
|
|
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
|
|
}
|
|
|
|
s64 generic_atomic64_read(const atomic64_t *v)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
s64 val;
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
val = v->counter;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
return val;
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_read);
|
|
|
|
void generic_atomic64_set(atomic64_t *v, s64 i)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
v->counter = i;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_set);
|
|
|
|
#define ATOMIC64_OP(op, c_op) \
|
|
void generic_atomic64_##op(s64 a, atomic64_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
raw_spinlock_t *lock = lock_addr(v); \
|
|
\
|
|
raw_spin_lock_irqsave(lock, flags); \
|
|
v->counter c_op a; \
|
|
raw_spin_unlock_irqrestore(lock, flags); \
|
|
} \
|
|
EXPORT_SYMBOL(generic_atomic64_##op);
|
|
|
|
#define ATOMIC64_OP_RETURN(op, c_op) \
|
|
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
raw_spinlock_t *lock = lock_addr(v); \
|
|
s64 val; \
|
|
\
|
|
raw_spin_lock_irqsave(lock, flags); \
|
|
val = (v->counter c_op a); \
|
|
raw_spin_unlock_irqrestore(lock, flags); \
|
|
return val; \
|
|
} \
|
|
EXPORT_SYMBOL(generic_atomic64_##op##_return);
|
|
|
|
#define ATOMIC64_FETCH_OP(op, c_op) \
|
|
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
|
|
{ \
|
|
unsigned long flags; \
|
|
raw_spinlock_t *lock = lock_addr(v); \
|
|
s64 val; \
|
|
\
|
|
raw_spin_lock_irqsave(lock, flags); \
|
|
val = v->counter; \
|
|
v->counter c_op a; \
|
|
raw_spin_unlock_irqrestore(lock, flags); \
|
|
return val; \
|
|
} \
|
|
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
|
|
|
|
#define ATOMIC64_OPS(op, c_op) \
|
|
ATOMIC64_OP(op, c_op) \
|
|
ATOMIC64_OP_RETURN(op, c_op) \
|
|
ATOMIC64_FETCH_OP(op, c_op)
|
|
|
|
ATOMIC64_OPS(add, +=)
|
|
ATOMIC64_OPS(sub, -=)
|
|
|
|
#undef ATOMIC64_OPS
|
|
#define ATOMIC64_OPS(op, c_op) \
|
|
ATOMIC64_OP(op, c_op) \
|
|
ATOMIC64_OP_RETURN(op, c_op) \
|
|
ATOMIC64_FETCH_OP(op, c_op)
|
|
|
|
ATOMIC64_OPS(and, &=)
|
|
ATOMIC64_OPS(or, |=)
|
|
ATOMIC64_OPS(xor, ^=)
|
|
|
|
#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_OP_RETURN
|
|
#undef ATOMIC64_OP
|
|
|
|
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
s64 val;
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
val = v->counter - 1;
|
|
if (val >= 0)
|
|
v->counter = val;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
return val;
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
|
|
|
|
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
s64 val;
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
val = v->counter;
|
|
if (val == o)
|
|
v->counter = n;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
return val;
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
|
|
|
|
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
s64 val;
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
val = v->counter;
|
|
v->counter = new;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
return val;
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_xchg);
|
|
|
|
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
|
|
{
|
|
unsigned long flags;
|
|
raw_spinlock_t *lock = lock_addr(v);
|
|
s64 val;
|
|
|
|
raw_spin_lock_irqsave(lock, flags);
|
|
val = v->counter;
|
|
if (val != u)
|
|
v->counter += a;
|
|
raw_spin_unlock_irqrestore(lock, flags);
|
|
|
|
return val;
|
|
}
|
|
EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);
|