mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-28 15:13:55 +08:00
15553dcbca
This commit first adds a trivial macro for spin_is_locked() to linux-kernel.def. It also adds cat code for enumerating all possible matches of lock write events (set LKW) with islocked events returning true (set RL, for Read from Lock), and unlock write events (set UL) with islocked events returning false (set RU, for Read from Unlock). Note that this intentionally does not model uniprocessor kernels (CONFIG_SMP=n) built with CONFIG_DEBUG_SPINLOCK=n, in which spin_is_locked() unconditionally returns zero. It also adds a pair of litmus tests demonstrating the minimal ordering provided by spin_is_locked() in conjunction with spin_lock(). Will Deacon noted that this minimal ordering happens on ARMv8: https://lkml.kernel.org/r/20180226162426.GB17158@arm.com Notice that herd7 installations strictly older than version 7.49 do not handle the new constructs. Signed-off-by: Luc Maranget <luc.maranget@inria.fr> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Alan Stern <stern@rowland.harvard.edu> Cc: Akira Yokosawa <akiyks@gmail.com> Cc: Andrea Parri <parri.andrea@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boqun Feng <boqun.feng@gmail.com> Cc: David Howells <dhowells@redhat.com> Cc: Jade Alglave <j.alglave@ucl.ac.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Luc Maranget <Luc.Maranget@inria.fr> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-arch@vger.kernel.org Link: http://lkml.kernel.org/r/1526340837-12222-10-git-send-email-paulmck@linux.vnet.ibm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
109 lines
4.3 KiB
Modula-2
109 lines
4.3 KiB
Modula-2
// SPDX-License-Identifier: GPL-2.0+
|
|
//
|
|
// An earlier version of this file appears in the companion webpage for
|
|
// "Frightening small children and disconcerting grown-ups: Concurrency
|
|
// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
|
|
// which is to appear in ASPLOS 2018.
|
|
|
|
// ONCE
|
|
READ_ONCE(X) __load{once}(X)
|
|
WRITE_ONCE(X,V) { __store{once}(X,V); }
|
|
|
|
// Release Acquire and friends
|
|
smp_store_release(X,V) { __store{release}(*X,V); }
|
|
smp_load_acquire(X) __load{acquire}(*X)
|
|
rcu_assign_pointer(X,V) { __store{release}(X,V); }
|
|
rcu_dereference(X) __load{once}(X)
|
|
smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
|
|
|
|
// Fences
|
|
smp_mb() { __fence{mb}; }
|
|
smp_rmb() { __fence{rmb}; }
|
|
smp_wmb() { __fence{wmb}; }
|
|
smp_mb__before_atomic() { __fence{before-atomic}; }
|
|
smp_mb__after_atomic() { __fence{after-atomic}; }
|
|
smp_mb__after_spinlock() { __fence{after-spinlock}; }
|
|
|
|
// Exchange
|
|
xchg(X,V) __xchg{mb}(X,V)
|
|
xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
xchg_release(X,V) __xchg{release}(X,V)
|
|
xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
// Spinlocks
|
|
spin_lock(X) { __lock(X); }
|
|
spin_unlock(X) { __unlock(X); }
|
|
spin_trylock(X) __trylock(X)
|
|
spin_is_locked(X) __islocked(X)
|
|
|
|
// RCU
|
|
rcu_read_lock() { __fence{rcu-lock}; }
|
|
rcu_read_unlock() { __fence{rcu-unlock}; }
|
|
synchronize_rcu() { __fence{sync-rcu}; }
|
|
synchronize_rcu_expedited() { __fence{sync-rcu}; }
|
|
|
|
// Atomic
|
|
atomic_read(X) READ_ONCE(*X)
|
|
atomic_set(X,V) { WRITE_ONCE(*X,V); }
|
|
atomic_read_acquire(X) smp_load_acquire(X)
|
|
atomic_set_release(X,V) { smp_store_release(X,V); }
|
|
|
|
atomic_add(V,X) { __atomic_op(X,+,V); }
|
|
atomic_sub(V,X) { __atomic_op(X,-,V); }
|
|
atomic_inc(X) { __atomic_op(X,+,1); }
|
|
atomic_dec(X) { __atomic_op(X,-,1); }
|
|
|
|
atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
|
|
atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
|
|
atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
|
|
atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
|
|
atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
|
|
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
|
|
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
|
|
atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
|
|
|
|
atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
|
|
atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
|
|
atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
|
|
atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
|
|
atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
|
|
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
|
|
atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
|
|
atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
|
|
|
|
atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
|
|
atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
|
|
atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
|
|
atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
|
|
atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
|
|
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
|
|
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
|
|
atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
|
|
|
|
atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
|
|
atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
|
|
atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
|
|
atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
|
|
atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
|
|
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
|
|
atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
|
|
atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
|
|
|
|
atomic_xchg(X,V) __xchg{mb}(X,V)
|
|
atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
atomic_xchg_release(X,V) __xchg{release}(X,V)
|
|
atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
|
|
atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
|
|
atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
|
|
atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
|