mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-19 17:14:40 +08:00
614e40faf5
LKMM has long provided only approximate handling of SRCU read-side critical sections. This has not been a pressing problem because LKMM's traditional handling is correct for the common cases of non-overlapping and properly nested critical sections. However, LKMM's traditional handling of partially overlapping critical sections incorrectly fuses them into one large critical section. For example, consider the following litmus test: ------------------------------------------------------------------------ C C-srcu-nest-5 (* * Result: Sometimes * * This demonstrates non-nested overlapping of SRCU read-side critical * sections. Unlike RCU, SRCU critical sections do not unconditionally * nest. *) {} P0(int *x, int *y, struct srcu_struct *s1) { int r1; int r2; int r3; int r4; r3 = srcu_read_lock(s1); r2 = READ_ONCE(*y); r4 = srcu_read_lock(s1); srcu_read_unlock(s1, r3); r1 = READ_ONCE(*x); srcu_read_unlock(s1, r4); } P1(int *x, int *y, struct srcu_struct *s1) { WRITE_ONCE(*y, 1); synchronize_srcu(s1); WRITE_ONCE(*x, 1); } locations [0:r1] exists (0:r1=1 /\ 0:r2=0) ------------------------------------------------------------------------ Current mainline incorrectly flattens the two critical sections into one larger critical section, giving "Never" instead of the correct "Sometimes": ------------------------------------------------------------------------ $ herd7 -conf linux-kernel.cfg C-srcu-nest-5.litmus Test C-srcu-nest-5 Allowed States 3 0:r1=0; 0:r2=0; 0:r1=0; 0:r2=1; 0:r1=1; 0:r2=1; No Witnesses Positive: 0 Negative: 3 Flag srcu-bad-nesting Condition exists (0:r1=1 /\ 0:r2=0) Observation C-srcu-nest-5 Never 0 3 Time C-srcu-nest-5 0.01 Hash=e692c106cf3e84e20f12991dc438ff1b ------------------------------------------------------------------------ To its credit, it does complain about bad nesting. But with this commit we get the following result, which has the virtue of being correct: ------------------------------------------------------------------------ $ herd7 -conf linux-kernel.cfg C-srcu-nest-5.litmus Test C-srcu-nest-5 Allowed States 4 0:r1=0; 0:r2=0; 0:r1=0; 0:r2=1; 0:r1=1; 0:r2=0; 0:r1=1; 0:r2=1; Ok Witnesses Positive: 1 Negative: 3 Condition exists (0:r1=1 /\ 0:r2=0) Observation C-srcu-nest-5 Sometimes 1 3 Time C-srcu-nest-5 0.05 Hash=e692c106cf3e84e20f12991dc438ff1b ------------------------------------------------------------------------ In addition, there are new srcu_down_read() and srcu_up_read() functions on their way to mainline. Roughly speaking, these are to srcu_read_lock() and srcu_read_unlock() as down() and up() are to mutex_lock() and mutex_unlock(). The key point is that srcu_down_read() can execute in one process and the matching srcu_up_read() in another, as shown in this litmus test: ------------------------------------------------------------------------ C C-srcu-nest-6 (* * Result: Never * * This would be valid for srcu_down_read() and srcu_up_read(). *) {} P0(int *x, int *y, struct srcu_struct *s1, int *idx, int *f) { int r2; int r3; r3 = srcu_down_read(s1); WRITE_ONCE(*idx, r3); r2 = READ_ONCE(*y); smp_store_release(f, 1); } P1(int *x, int *y, struct srcu_struct *s1, int *idx, int *f) { int r1; int r3; int r4; r4 = smp_load_acquire(f); r1 = READ_ONCE(*x); r3 = READ_ONCE(*idx); srcu_up_read(s1, r3); } P2(int *x, int *y, struct srcu_struct *s1) { WRITE_ONCE(*y, 1); synchronize_srcu(s1); WRITE_ONCE(*x, 1); } locations [0:r1] filter (1:r4=1) exists (1:r1=1 /\ 0:r2=0) ------------------------------------------------------------------------ When run on current mainline, this litmus test gets a complaint about an unknown macro srcu_down_read(). With this commit: ------------------------------------------------------------------------ herd7 -conf linux-kernel.cfg C-srcu-nest-6.litmus Test C-srcu-nest-6 Allowed States 3 0:r1=0; 0:r2=0; 1:r1=0; 0:r1=0; 0:r2=1; 1:r1=0; 0:r1=0; 0:r2=1; 1:r1=1; No Witnesses Positive: 0 Negative: 3 Condition exists (1:r1=1 /\ 0:r2=0) Observation C-srcu-nest-6 Never 0 3 Time C-srcu-nest-6 0.02 Hash=c1f20257d052ca5e899be508bedcb2a1 ------------------------------------------------------------------------ Note that the user must supply the flag "f" and the "filter" clause, similar to what must be done to emulate call_rcu(). The commit works by treating srcu_read_lock()/srcu_down_read() as loads and srcu_read_unlock()/srcu_up_read() as stores. This allows us to determine which unlock matches which lock by looking for a data dependency between them. In order for this to work properly, the data dependencies have to be tracked through stores to intermediate variables such as "idx" in the litmus test above; this is handled by the new carry-srcu-data relation. But it's important here (and in the existing carry-dep relation) to avoid tracking the dependencies through SRCU unlock stores. Otherwise, in situations resembling: A: r1 = srcu_read_lock(s); B: srcu_read_unlock(s, r1); C: r2 = srcu_read_lock(s); D: srcu_read_unlock(s, r2); it would look as if D was dependent on both A and C, because "s" would appear to be an intermediate variable written by B and read by C. This explains the complications in the definitions of carry-srcu-dep and carry-dep. As a debugging aid, the commit adds a check for errors in which the value returned by one call to srcu_read_lock()/srcu_down_read() is passed to more than one instance of srcu_read_unlock()/srcu_up_read(). Finally, since these SRCU-related primitives are now treated as ordinary reads and writes, we have to add them into the lists of marked accesses (i.e., not subject to data races) and lock-related accesses (i.e., one shouldn't try to access an srcu_struct with a non-lock-related primitive such as READ_ONCE() or a plain write). Portions of this approach were suggested by Boqun Feng and Jonas Oberhauser. [ paulmck: Fix space-before-tab whitespace nit. ] Reported-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Reviewed-by: Jonas Oberhauser <jonas.oberhauser@huaweicloud.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
120 lines
4.8 KiB
Modula-2
120 lines
4.8 KiB
Modula-2
// SPDX-License-Identifier: GPL-2.0+
|
|
//
|
|
// An earlier version of this file appeared in the companion webpage for
|
|
// "Frightening small children and disconcerting grown-ups: Concurrency
|
|
// in the Linux kernel" by Alglave, Maranget, McKenney, Parri, and Stern,
|
|
// which appeared in ASPLOS 2018.
|
|
|
|
// ONCE
|
|
READ_ONCE(X) __load{once}(X)
|
|
WRITE_ONCE(X,V) { __store{once}(X,V); }
|
|
|
|
// Release Acquire and friends
|
|
smp_store_release(X,V) { __store{release}(*X,V); }
|
|
smp_load_acquire(X) __load{acquire}(*X)
|
|
rcu_assign_pointer(X,V) { __store{release}(X,V); }
|
|
rcu_dereference(X) __load{once}(X)
|
|
smp_store_mb(X,V) { __store{once}(X,V); __fence{mb}; }
|
|
|
|
// Fences
|
|
smp_mb() { __fence{mb}; }
|
|
smp_rmb() { __fence{rmb}; }
|
|
smp_wmb() { __fence{wmb}; }
|
|
smp_mb__before_atomic() { __fence{before-atomic}; }
|
|
smp_mb__after_atomic() { __fence{after-atomic}; }
|
|
smp_mb__after_spinlock() { __fence{after-spinlock}; }
|
|
smp_mb__after_unlock_lock() { __fence{after-unlock-lock}; }
|
|
smp_mb__after_srcu_read_unlock() { __fence{after-srcu-read-unlock}; }
|
|
barrier() { __fence{barrier}; }
|
|
|
|
// Exchange
|
|
xchg(X,V) __xchg{mb}(X,V)
|
|
xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
xchg_release(X,V) __xchg{release}(X,V)
|
|
xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
// Spinlocks
|
|
spin_lock(X) { __lock(X); }
|
|
spin_unlock(X) { __unlock(X); }
|
|
spin_trylock(X) __trylock(X)
|
|
spin_is_locked(X) __islocked(X)
|
|
|
|
// RCU
|
|
rcu_read_lock() { __fence{rcu-lock}; }
|
|
rcu_read_unlock() { __fence{rcu-unlock}; }
|
|
synchronize_rcu() { __fence{sync-rcu}; }
|
|
synchronize_rcu_expedited() { __fence{sync-rcu}; }
|
|
|
|
// SRCU
|
|
srcu_read_lock(X) __load{srcu-lock}(*X)
|
|
srcu_read_unlock(X,Y) { __store{srcu-unlock}(*X,Y); }
|
|
srcu_down_read(X) __load{srcu-lock}(*X)
|
|
srcu_up_read(X,Y) { __store{srcu-unlock}(*X,Y); }
|
|
synchronize_srcu(X) { __srcu{sync-srcu}(X); }
|
|
synchronize_srcu_expedited(X) { __srcu{sync-srcu}(X); }
|
|
|
|
// Atomic
|
|
atomic_read(X) READ_ONCE(*X)
|
|
atomic_set(X,V) { WRITE_ONCE(*X,V); }
|
|
atomic_read_acquire(X) smp_load_acquire(X)
|
|
atomic_set_release(X,V) { smp_store_release(X,V); }
|
|
|
|
atomic_add(V,X) { __atomic_op(X,+,V); }
|
|
atomic_sub(V,X) { __atomic_op(X,-,V); }
|
|
atomic_inc(X) { __atomic_op(X,+,1); }
|
|
atomic_dec(X) { __atomic_op(X,-,1); }
|
|
|
|
atomic_add_return(V,X) __atomic_op_return{mb}(X,+,V)
|
|
atomic_add_return_relaxed(V,X) __atomic_op_return{once}(X,+,V)
|
|
atomic_add_return_acquire(V,X) __atomic_op_return{acquire}(X,+,V)
|
|
atomic_add_return_release(V,X) __atomic_op_return{release}(X,+,V)
|
|
atomic_fetch_add(V,X) __atomic_fetch_op{mb}(X,+,V)
|
|
atomic_fetch_add_relaxed(V,X) __atomic_fetch_op{once}(X,+,V)
|
|
atomic_fetch_add_acquire(V,X) __atomic_fetch_op{acquire}(X,+,V)
|
|
atomic_fetch_add_release(V,X) __atomic_fetch_op{release}(X,+,V)
|
|
|
|
atomic_inc_return(X) __atomic_op_return{mb}(X,+,1)
|
|
atomic_inc_return_relaxed(X) __atomic_op_return{once}(X,+,1)
|
|
atomic_inc_return_acquire(X) __atomic_op_return{acquire}(X,+,1)
|
|
atomic_inc_return_release(X) __atomic_op_return{release}(X,+,1)
|
|
atomic_fetch_inc(X) __atomic_fetch_op{mb}(X,+,1)
|
|
atomic_fetch_inc_relaxed(X) __atomic_fetch_op{once}(X,+,1)
|
|
atomic_fetch_inc_acquire(X) __atomic_fetch_op{acquire}(X,+,1)
|
|
atomic_fetch_inc_release(X) __atomic_fetch_op{release}(X,+,1)
|
|
|
|
atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
|
|
atomic_sub_return_relaxed(V,X) __atomic_op_return{once}(X,-,V)
|
|
atomic_sub_return_acquire(V,X) __atomic_op_return{acquire}(X,-,V)
|
|
atomic_sub_return_release(V,X) __atomic_op_return{release}(X,-,V)
|
|
atomic_fetch_sub(V,X) __atomic_fetch_op{mb}(X,-,V)
|
|
atomic_fetch_sub_relaxed(V,X) __atomic_fetch_op{once}(X,-,V)
|
|
atomic_fetch_sub_acquire(V,X) __atomic_fetch_op{acquire}(X,-,V)
|
|
atomic_fetch_sub_release(V,X) __atomic_fetch_op{release}(X,-,V)
|
|
|
|
atomic_dec_return(X) __atomic_op_return{mb}(X,-,1)
|
|
atomic_dec_return_relaxed(X) __atomic_op_return{once}(X,-,1)
|
|
atomic_dec_return_acquire(X) __atomic_op_return{acquire}(X,-,1)
|
|
atomic_dec_return_release(X) __atomic_op_return{release}(X,-,1)
|
|
atomic_fetch_dec(X) __atomic_fetch_op{mb}(X,-,1)
|
|
atomic_fetch_dec_relaxed(X) __atomic_fetch_op{once}(X,-,1)
|
|
atomic_fetch_dec_acquire(X) __atomic_fetch_op{acquire}(X,-,1)
|
|
atomic_fetch_dec_release(X) __atomic_fetch_op{release}(X,-,1)
|
|
|
|
atomic_xchg(X,V) __xchg{mb}(X,V)
|
|
atomic_xchg_relaxed(X,V) __xchg{once}(X,V)
|
|
atomic_xchg_release(X,V) __xchg{release}(X,V)
|
|
atomic_xchg_acquire(X,V) __xchg{acquire}(X,V)
|
|
atomic_cmpxchg(X,V,W) __cmpxchg{mb}(X,V,W)
|
|
atomic_cmpxchg_relaxed(X,V,W) __cmpxchg{once}(X,V,W)
|
|
atomic_cmpxchg_acquire(X,V,W) __cmpxchg{acquire}(X,V,W)
|
|
atomic_cmpxchg_release(X,V,W) __cmpxchg{release}(X,V,W)
|
|
|
|
atomic_sub_and_test(V,X) __atomic_op_return{mb}(X,-,V) == 0
|
|
atomic_dec_and_test(X) __atomic_op_return{mb}(X,-,1) == 0
|
|
atomic_inc_and_test(X) __atomic_op_return{mb}(X,+,1) == 0
|
|
atomic_add_negative(V,X) __atomic_op_return{mb}(X,+,V) < 0
|