2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-17 01:34:00 +08:00
linux-next/include/linux/atomic.h
Mark Rutland 9fa45070a2 locking/atomics: Switch to generated fallbacks
As a step to ensuring the atomic* APIs are consistent, switch to fallbacks
generated by gen-atomic-fallback.sh.

These are checked in rather than generated with Kbuild, since:

* This allows inspection of the atomics with git grep and ctags on a
  pristine tree, which Linus strongly prefers being able to do.

* The fallbacks are not affected by machine details or configuration
  options, so it is not necessary to regenerate them to take these into
  account.

* These are included by files required *very* early in the build process
  (e.g. for generating bounds.h), and we'd rather not complicate the
  top-level Kbuild file with dependencies.

The new fallback header should be equivalent to the old fallbacks in
<linux/atomic.h>, but:

* It is formatted a little differently due to scripting ensuring things
  are more regular than they used to be.

* Fallbacks are now expanded in-place as static inline functions rather
  than macros.

* The prototypes for fallbacks are arragned consistently with the return
  type on a separate line to try to keep to a sensible line length.

There should be no functional change as a result of this patch.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Will Deacon <will.deacon@arm.com>
Cc: linux-arm-kernel@lists.infradead.org
Cc: catalin.marinas@arm.com
Cc: linuxdrivers@attotech.com
Cc: dvyukov@google.com
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: arnd@arndb.de
Cc: aryabinin@virtuozzo.com
Cc: glider@google.com
Link: http://lkml.kernel.org/r/20180904104830.2975-3-mark.rutland@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2018-11-01 11:00:46 +01:00

79 lines
2.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <linux/types.h>
#include <asm/atomic.h>
#include <asm/barrier.h>
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
* We support four variants:
*
* - Fully ordered: The default implementation, no suffix required.
* - Acquire: Provides ACQUIRE semantics, _acquire suffix.
* - Release: Provides RELEASE semantics, _release suffix.
* - Relaxed: No ordering guarantees, _relaxed suffix.
*
* For compound atomics performing both a load and a store, ACQUIRE
* semantics apply only to the load and RELEASE semantics only to the
* store portion of the operation. Note that a failed cmpxchg_acquire
* does -not- imply any memory ordering constraints.
*
* See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
*/
/*
* The idea here is to build acquire/release variants by adding explicit
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
* If an architecture overrides __atomic_acquire_fence() it will probably
* want to define smp_mb__after_spinlock().
*/
#ifndef __atomic_acquire_fence
#define __atomic_acquire_fence smp_mb__after_atomic
#endif
#ifndef __atomic_release_fence
#define __atomic_release_fence smp_mb__before_atomic
#endif
#ifndef __atomic_pre_full_fence
#define __atomic_pre_full_fence smp_mb__before_atomic
#endif
#ifndef __atomic_post_full_fence
#define __atomic_post_full_fence smp_mb__after_atomic
#endif
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
__atomic_acquire_fence(); \
__ret; \
})
#define __atomic_op_release(op, args...) \
({ \
__atomic_release_fence(); \
op##_relaxed(args); \
})
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
__atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
__atomic_post_full_fence(); \
__ret; \
})
#include <linux/atomic-fallback.h>
#include <asm-generic/atomic-long.h>
#endif /* _LINUX_ATOMIC_H */