mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-05 01:54:09 +08:00
c438b7d860
The memory model has been updated to provide a stronger ordering guarantee for unlock(A)+lock(B) on the same CPU/thread. Therefore add two litmus tests describing this new guarantee, these tests are simple yet can clearly show the usage of the new guarantee, also they can serve as the self tests for the modification in the model. Co-developed-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Boqun Feng <boqun.feng@gmail.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
36 lines
719 B
Plaintext
36 lines
719 B
Plaintext
C LB+unlocklockonceonce+poacquireonce
|
|
|
|
(*
|
|
* Result: Never
|
|
*
|
|
* If two locked critical sections execute on the same CPU, all accesses
|
|
* in the first must execute before any accesses in the second, even if the
|
|
* critical sections are protected by different locks. Note: Even when a
|
|
* write executes before a read, their memory effects can be reordered from
|
|
* the viewpoint of another CPU (the kind of reordering allowed by TSO).
|
|
*)
|
|
|
|
{}
|
|
|
|
P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
|
|
{
|
|
int r1;
|
|
|
|
spin_lock(s);
|
|
r1 = READ_ONCE(*x);
|
|
spin_unlock(s);
|
|
spin_lock(t);
|
|
WRITE_ONCE(*y, 1);
|
|
spin_unlock(t);
|
|
}
|
|
|
|
P1(int *x, int *y)
|
|
{
|
|
int r2;
|
|
|
|
r2 = smp_load_acquire(y);
|
|
WRITE_ONCE(*x, 1);
|
|
}
|
|
|
|
exists (0:r1=1 /\ 1:r2=1)
|