mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
1d78814d41
Currently each ordering variant has several potential definitions, with a mixture of preprocessor and C definitions, including several copies of its C prototype, e.g. | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Make this a bit simpler by defining the C prototype once, and writing the various potential definitions as plain C code guarded by ifdeffery. For example, the above becomes: | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | #if defined(arch_atomic_fetch_andnot_acquire) | return arch_atomic_fetch_andnot_acquire(i, v); | #elif defined(arch_atomic_fetch_andnot_relaxed) | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | #elif defined(arch_atomic_fetch_andnot) | return arch_atomic_fetch_andnot(i, v); | #else | return raw_atomic_fetch_and_acquire(~i, v); | #endif | } Which is far easier to read. As we now always have a single copy of the C prototype wrapping all the potential definitions, we now have an obvious single location for kerneldoc comments. At the same time, the fallbacks for raw_atomic*_xhcg() are made to use 'new' rather than 'i' as the name of the new value. This is what the existing fallback template used, and is more consistent with the raw_atomic{_try,}cmpxchg() fallbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com
42 lines
1.1 KiB
Plaintext
42 lines
1.1 KiB
Plaintext
# name meta args...
|
|
#
|
|
# Where meta contains a string of variants to generate.
|
|
# Upper-case implies _{acquire,release,relaxed} variants.
|
|
# Valid meta values are:
|
|
# * B/b - bool: returns bool
|
|
# * v - void: returns void
|
|
# * I/i - int: returns base type
|
|
# * R - return: returns base type (has _return variants)
|
|
# * F/f - fetch: returns base type (has fetch_ variants)
|
|
# * l - load: returns base type (has _acquire order variant)
|
|
# * s - store: returns void (has _release order variant)
|
|
#
|
|
# Where args contains list of type[:name], where type is:
|
|
# * cv - const pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
|
|
# * v - pointer to atomic base type (atomic_t/atomic64_t/atomic_long_t)
|
|
# * i - base type (int/s64/long)
|
|
# * p - pointer to base type (int/s64/long)
|
|
#
|
|
read l cv
|
|
set s v i
|
|
add vRF i v
|
|
sub vRF i v
|
|
inc vRF v
|
|
dec vRF v
|
|
and vF i v
|
|
andnot vF i v
|
|
or vF i v
|
|
xor vF i v
|
|
xchg I v i:new
|
|
cmpxchg I v i:old i:new
|
|
try_cmpxchg B v p:old i:new
|
|
sub_and_test b i v
|
|
dec_and_test b v
|
|
inc_and_test b v
|
|
add_negative B i v
|
|
add_unless fb v i:a i:u
|
|
inc_not_zero b v
|
|
inc_unless_negative b v
|
|
dec_unless_positive b v
|
|
dec_if_positive i v
|