2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 09:43:59 +08:00
linux-next/lib/atomic64.c
Mark Rutland 5fb6e8cf53 locking/atomic: atomic64: Remove unusable atomic ops
The generic atomic64 implementation provides:

* atomic64_and_return()
* atomic64_or_return()
* atomic64_xor_return()

... but none of these exist in the standard atomic64 API as described by
scripts/atomic/atomics.tbl, and none of these have prototypes exposed by
<asm-generic/atomic64.h>.

The lkp kernel test robot noted this results in warnings when building with
W=1:

  lib/atomic64.c:82:5: warning: no previous prototype for 'generic_atomic64_and_return' [-Wmissing-prototypes]

  lib/atomic64.c:82:5: warning: no previous prototype for 'generic_atomic64_or_return' [-Wmissing-prototypes]

  lib/atomic64.c:82:5: warning: no previous prototype for 'generic_atomic64_xor_return' [-Wmissing-prototypes]

This appears to have been a thinko in commit:

  28aa2bda22 ("locking/atomic: Implement atomic{,64,_long}_fetch_{add,sub,and,andnot,or,xor}{,_relaxed,_acquire,_release}()")

... where we grouped add/sub separately from and/ox/xor, so that we could avoid
implementing _return forms for the latter group, but forgot to remove
ATOMIC64_OP_RETURN() for that group.

This doesn't cause any functional problem, but it's pointless to build code
which cannot be used. Remove the unusable code. This does not affect add/sub,
for which _return forms will still be built.

Reported-by: kernel test robot <lkp@intel.com>
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/20211126115923.41489-1-mark.rutland@arm.com
2021-12-13 10:56:09 +01:00

190 lines
4.5 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic implementation of 64-bit atomics using spinlocks,
* useful on processors that don't have 64-bit atomic instructions.
*
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
#include <linux/types.h>
#include <linux/cache.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/atomic.h>
/*
* We use a hashed array of spinlocks to provide exclusive access
* to each atomic64_t variable. Since this is expected to used on
* systems with small numbers of CPUs (<= 4 or so), we use a
* relatively small array of 16 spinlocks to avoid wasting too much
* memory on the spinlock array.
*/
#define NR_LOCKS 16
/*
* Ensure each lock is in a separate cacheline.
*/
static union {
raw_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
},
};
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
addr >>= L1_CACHE_SHIFT;
addr ^= (addr >> 8) ^ (addr >> 16);
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
}
s64 generic_atomic64_read(const atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_read);
void generic_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \
void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
} \
EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \
s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = (v->counter c_op a); \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \
s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \
unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \
s64 val; \
\
raw_spin_lock_irqsave(lock, flags); \
val = v->counter; \
v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \
return val; \
} \
EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(add, +=)
ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=)
ATOMIC64_OPS(or, |=)
ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP
s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter - 1;
if (val >= 0)
v->counter = val;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val == o)
v->counter = n;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
v->counter = new;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_xchg);
s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
unsigned long flags;
raw_spinlock_t *lock = lock_addr(v);
s64 val;
raw_spin_lock_irqsave(lock, flags);
val = v->counter;
if (val != u)
v->counter += a;
raw_spin_unlock_irqrestore(lock, flags);
return val;
}
EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);