mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 18:14:48 +08:00
8e6082e94a
The code for the atomic ops is formatted inconsistently, and while this is not a functional problem it is rather distracting when working on them. Some have ops have consistent indentation, e.g. | #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ | static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ | { \ | u32 tmp; \ | \ | asm volatile( \ | __LSE_PREAMBLE \ | " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ | " add %w[i], %w[i], %w[tmp]" \ | : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ | : "r" (v) \ | : cl); \ | \ | return i; \ | } While others have negative indentation for some lines, and/or have misaligned trailing backslashes, e.g. | static inline void __lse_atomic_##op(int i, atomic_t *v) \ | { \ | asm volatile( \ | __LSE_PREAMBLE \ | " " #asm_op " %w[i], %[v]\n" \ | : [i] "+r" (i), [v] "+Q" (v->counter) \ | : "r" (v)); \ | } This patch makes the indentation consistent and also aligns the trailing backslashes. This makes the code easier to read for those (like myself) who are easily distracted by these inconsistencies. This is intended as a cleanup. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Will Deacon <will@kernel.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20211210151410.2782645-2-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
354 lines
10 KiB
C
354 lines
10 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Based on arch/arm/include/asm/atomic.h
|
|
*
|
|
* Copyright (C) 1996 Russell King.
|
|
* Copyright (C) 2002 Deep Blue Solutions Ltd.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
|
|
#ifndef __ASM_ATOMIC_LL_SC_H
|
|
#define __ASM_ATOMIC_LL_SC_H
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
|
#define __LL_SC_FALLBACK(asm_ops) \
|
|
" b 3f\n" \
|
|
" .subsection 1\n" \
|
|
"3:\n" \
|
|
asm_ops "\n" \
|
|
" b 4f\n" \
|
|
" .previous\n" \
|
|
"4:\n"
|
|
#else
|
|
#define __LL_SC_FALLBACK(asm_ops) asm_ops
|
|
#endif
|
|
|
|
#ifndef CONFIG_CC_HAS_K_CONSTRAINT
|
|
#define K
|
|
#endif
|
|
|
|
/*
|
|
* AArch64 UP and SMP safe atomic ops. We use load exclusive and
|
|
* store exclusive to ensure that these are atomic. We may loop
|
|
* to ensure that the update happens.
|
|
*/
|
|
|
|
#define ATOMIC_OP(op, asm_op, constraint) \
|
|
static inline void \
|
|
__ll_sc_atomic_##op(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
asm volatile("// atomic_" #op "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxr %w0, %2\n" \
|
|
" " #asm_op " %w0, %w0, %w3\n" \
|
|
" stxr %w1, %w0, %2\n" \
|
|
" cbnz %w1, 1b\n") \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i)); \
|
|
}
|
|
|
|
#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
|
static inline int \
|
|
__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int result; \
|
|
\
|
|
asm volatile("// atomic_" #op "_return" #name "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ld" #acq "xr %w0, %2\n" \
|
|
" " #asm_op " %w0, %w0, %w3\n" \
|
|
" st" #rel "xr %w1, %w0, %2\n" \
|
|
" cbnz %w1, 1b\n" \
|
|
" " #mb ) \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
|
|
static inline int \
|
|
__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
int val, result; \
|
|
\
|
|
asm volatile("// atomic_fetch_" #op #name "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %3\n" \
|
|
"1: ld" #acq "xr %w0, %3\n" \
|
|
" " #asm_op " %w1, %w0, %w4\n" \
|
|
" st" #rel "xr %w2, %w1, %3\n" \
|
|
" cbnz %w2, 1b\n" \
|
|
" " #mb ) \
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC_OPS(...) \
|
|
ATOMIC_OP(__VA_ARGS__) \
|
|
ATOMIC_OP_RETURN( , dmb ish, , l, "memory", __VA_ARGS__)\
|
|
ATOMIC_OP_RETURN(_relaxed, , , , , __VA_ARGS__)\
|
|
ATOMIC_OP_RETURN(_acquire, , a, , "memory", __VA_ARGS__)\
|
|
ATOMIC_OP_RETURN(_release, , , l, "memory", __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC_OPS(add, add, I)
|
|
ATOMIC_OPS(sub, sub, J)
|
|
|
|
#undef ATOMIC_OPS
|
|
#define ATOMIC_OPS(...) \
|
|
ATOMIC_OP(__VA_ARGS__) \
|
|
ATOMIC_FETCH_OP ( , dmb ish, , l, "memory", __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_relaxed, , , , , __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\
|
|
ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC_OPS(and, and, K)
|
|
ATOMIC_OPS(or, orr, K)
|
|
ATOMIC_OPS(xor, eor, K)
|
|
/*
|
|
* GAS converts the mysterious and undocumented BIC (immediate) alias to
|
|
* an AND (immediate) instruction with the immediate inverted. We don't
|
|
* have a constraint for this, so fall back to register.
|
|
*/
|
|
ATOMIC_OPS(andnot, bic, )
|
|
|
|
#undef ATOMIC_OPS
|
|
#undef ATOMIC_FETCH_OP
|
|
#undef ATOMIC_OP_RETURN
|
|
#undef ATOMIC_OP
|
|
|
|
#define ATOMIC64_OP(op, asm_op, constraint) \
|
|
static inline void \
|
|
__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \
|
|
{ \
|
|
s64 result; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile("// atomic64_" #op "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxr %0, %2\n" \
|
|
" " #asm_op " %0, %0, %3\n" \
|
|
" stxr %w1, %0, %2\n" \
|
|
" cbnz %w1, 1b") \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i)); \
|
|
}
|
|
|
|
#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
|
static inline long \
|
|
__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \
|
|
{ \
|
|
s64 result; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile("// atomic64_" #op "_return" #name "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ld" #acq "xr %0, %2\n" \
|
|
" " #asm_op " %0, %0, %3\n" \
|
|
" st" #rel "xr %w1, %0, %2\n" \
|
|
" cbnz %w1, 1b\n" \
|
|
" " #mb ) \
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
|
|
static inline long \
|
|
__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \
|
|
{ \
|
|
s64 result, val; \
|
|
unsigned long tmp; \
|
|
\
|
|
asm volatile("// atomic64_fetch_" #op #name "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %3\n" \
|
|
"1: ld" #acq "xr %0, %3\n" \
|
|
" " #asm_op " %1, %0, %4\n" \
|
|
" st" #rel "xr %w2, %1, %3\n" \
|
|
" cbnz %w2, 1b\n" \
|
|
" " #mb ) \
|
|
: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \
|
|
: __stringify(constraint) "r" (i) \
|
|
: cl); \
|
|
\
|
|
return result; \
|
|
}
|
|
|
|
#define ATOMIC64_OPS(...) \
|
|
ATOMIC64_OP(__VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(, dmb ish, , l, "memory", __VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_relaxed,, , , , __VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_acquire,, a, , "memory", __VA_ARGS__) \
|
|
ATOMIC64_OP_RETURN(_release,, , l, "memory", __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC64_OPS(add, add, I)
|
|
ATOMIC64_OPS(sub, sub, J)
|
|
|
|
#undef ATOMIC64_OPS
|
|
#define ATOMIC64_OPS(...) \
|
|
ATOMIC64_OP(__VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (, dmb ish, , l, "memory", __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_relaxed,, , , , __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \
|
|
ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__)
|
|
|
|
ATOMIC64_OPS(and, and, L)
|
|
ATOMIC64_OPS(or, orr, L)
|
|
ATOMIC64_OPS(xor, eor, L)
|
|
/*
|
|
* GAS converts the mysterious and undocumented BIC (immediate) alias to
|
|
* an AND (immediate) instruction with the immediate inverted. We don't
|
|
* have a constraint for this, so fall back to register.
|
|
*/
|
|
ATOMIC64_OPS(andnot, bic, )
|
|
|
|
#undef ATOMIC64_OPS
|
|
#undef ATOMIC64_FETCH_OP
|
|
#undef ATOMIC64_OP_RETURN
|
|
#undef ATOMIC64_OP
|
|
|
|
static inline s64
|
|
__ll_sc_atomic64_dec_if_positive(atomic64_t *v)
|
|
{
|
|
s64 result;
|
|
unsigned long tmp;
|
|
|
|
asm volatile("// atomic64_dec_if_positive\n"
|
|
__LL_SC_FALLBACK(
|
|
" prfm pstl1strm, %2\n"
|
|
"1: ldxr %0, %2\n"
|
|
" subs %0, %0, #1\n"
|
|
" b.lt 2f\n"
|
|
" stlxr %w1, %0, %2\n"
|
|
" cbnz %w1, 1b\n"
|
|
" dmb ish\n"
|
|
"2:")
|
|
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
|
|
:
|
|
: "cc", "memory");
|
|
|
|
return result;
|
|
}
|
|
|
|
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \
|
|
static inline u##sz \
|
|
__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \
|
|
unsigned long old, \
|
|
u##sz new) \
|
|
{ \
|
|
unsigned long tmp; \
|
|
u##sz oldval; \
|
|
\
|
|
/* \
|
|
* Sub-word sizes require explicit casting so that the compare \
|
|
* part of the cmpxchg doesn't end up interpreting non-zero \
|
|
* upper bits of the register containing "old". \
|
|
*/ \
|
|
if (sz < 32) \
|
|
old = (u##sz)old; \
|
|
\
|
|
asm volatile( \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %[v]\n" \
|
|
"1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \
|
|
" eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \
|
|
" cbnz %" #w "[tmp], 2f\n" \
|
|
" st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \
|
|
" cbnz %w[tmp], 1b\n" \
|
|
" " #mb "\n" \
|
|
"2:") \
|
|
: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \
|
|
[v] "+Q" (*(u##sz *)ptr) \
|
|
: [old] __stringify(constraint) "r" (old), [new] "r" (new) \
|
|
: cl); \
|
|
\
|
|
return oldval; \
|
|
}
|
|
|
|
/*
|
|
* Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
|
|
* handle the 'K' constraint for the value 4294967295 - thus we use no
|
|
* constraint for 32 bit operations.
|
|
*/
|
|
__CMPXCHG_CASE(w, b, , 8, , , , , K)
|
|
__CMPXCHG_CASE(w, h, , 16, , , , , K)
|
|
__CMPXCHG_CASE(w, , , 32, , , , , K)
|
|
__CMPXCHG_CASE( , , , 64, , , , , L)
|
|
__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K)
|
|
__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K)
|
|
__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K)
|
|
__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L)
|
|
__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K)
|
|
__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K)
|
|
__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K)
|
|
__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L)
|
|
__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K)
|
|
__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K)
|
|
__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K)
|
|
__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L)
|
|
|
|
#undef __CMPXCHG_CASE
|
|
|
|
#define __CMPXCHG_DBL(name, mb, rel, cl) \
|
|
static inline long \
|
|
__ll_sc__cmpxchg_double##name(unsigned long old1, \
|
|
unsigned long old2, \
|
|
unsigned long new1, \
|
|
unsigned long new2, \
|
|
volatile void *ptr) \
|
|
{ \
|
|
unsigned long tmp, ret; \
|
|
\
|
|
asm volatile("// __cmpxchg_double" #name "\n" \
|
|
__LL_SC_FALLBACK( \
|
|
" prfm pstl1strm, %2\n" \
|
|
"1: ldxp %0, %1, %2\n" \
|
|
" eor %0, %0, %3\n" \
|
|
" eor %1, %1, %4\n" \
|
|
" orr %1, %0, %1\n" \
|
|
" cbnz %1, 2f\n" \
|
|
" st" #rel "xp %w0, %5, %6, %2\n" \
|
|
" cbnz %w0, 1b\n" \
|
|
" " #mb "\n" \
|
|
"2:") \
|
|
: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \
|
|
: "r" (old1), "r" (old2), "r" (new1), "r" (new2) \
|
|
: cl); \
|
|
\
|
|
return ret; \
|
|
}
|
|
|
|
__CMPXCHG_DBL( , , , )
|
|
__CMPXCHG_DBL(_mb, dmb ish, l, "memory")
|
|
|
|
#undef __CMPXCHG_DBL
|
|
#undef K
|
|
|
|
#endif /* __ASM_ATOMIC_LL_SC_H */
|