Commit 1c824bf7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'lkmm.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull memory model documentation updates from Paul McKenney:
 "This series contains documentation and litmus tests for locking,
  courtesy of Boqun Feng"

* tag 'lkmm.2022.01.09a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  tools/memory-model: litmus: Add two tests for unlock(A)+lock(B) ordering
  tools/memory-model: doc: Describe the requirement of the litmus-tests directory
  tools/memory-model: Provide extra ordering for unlock+lock pair on the same CPU
parents e7d38f16 c438b7d8
......@@ -1813,15 +1813,16 @@ spin_trylock() -- we can call these things lock-releases and
lock-acquires -- have two properties beyond those of ordinary releases
and acquires.
First, when a lock-acquire reads from a lock-release, the LKMM
requires that every instruction po-before the lock-release must
execute before any instruction po-after the lock-acquire. This would
naturally hold if the release and acquire operations were on different
CPUs, but the LKMM says it holds even when they are on the same CPU.
For example:
First, when a lock-acquire reads from or is po-after a lock-release,
the LKMM requires that every instruction po-before the lock-release
must execute before any instruction po-after the lock-acquire. This
would naturally hold if the release and acquire operations were on
different CPUs and accessed the same lock variable, but the LKMM says
it also holds when they are on the same CPU, even if they access
different lock variables. For example:
int x, y;
spinlock_t s;
spinlock_t s, t;
P0()
{
......@@ -1830,9 +1831,9 @@ For example:
spin_lock(&s);
r1 = READ_ONCE(x);
spin_unlock(&s);
spin_lock(&s);
spin_lock(&t);
r2 = READ_ONCE(y);
spin_unlock(&s);
spin_unlock(&t);
}
P1()
......@@ -1842,10 +1843,10 @@ For example:
WRITE_ONCE(x, 1);
}
Here the second spin_lock() reads from the first spin_unlock(), and
therefore the load of x must execute before the load of y. Thus we
cannot have r1 = 1 and r2 = 0 at the end (this is an instance of the
MP pattern).
Here the second spin_lock() is po-after the first spin_unlock(), and
therefore the load of x must execute before the load of y, even though
the two locking operations use different locks. Thus we cannot have
r1 = 1 and r2 = 0 at the end (this is an instance of the MP pattern).
This requirement does not apply to ordinary release and acquire
fences, only to lock-related operations. For instance, suppose P0()
......@@ -1872,13 +1873,13 @@ instructions in the following order:
and thus it could load y before x, obtaining r2 = 0 and r1 = 1.
Second, when a lock-acquire reads from a lock-release, and some other
stores W and W' occur po-before the lock-release and po-after the
lock-acquire respectively, the LKMM requires that W must propagate to
each CPU before W' does. For example, consider:
Second, when a lock-acquire reads from or is po-after a lock-release,
and some other stores W and W' occur po-before the lock-release and
po-after the lock-acquire respectively, the LKMM requires that W must
propagate to each CPU before W' does. For example, consider:
int x, y;
spinlock_t x;
spinlock_t s;
P0()
{
......@@ -1908,7 +1909,12 @@ each CPU before W' does. For example, consider:
If r1 = 1 at the end then the spin_lock() in P1 must have read from
the spin_unlock() in P0. Hence the store to x must propagate to P2
before the store to y does, so we cannot have r2 = 1 and r3 = 0.
before the store to y does, so we cannot have r2 = 1 and r3 = 0. But
if P1 had used a lock variable different from s, the writes could have
propagated in either order. (On the other hand, if the code in P0 and
P1 had all executed on a single CPU, as in the example before this
one, then the writes would have propagated in order even if the two
critical sections used different lock variables.)
These two special requirements for lock-release and lock-acquire do
not arise from the operational model. Nevertheless, kernel developers
......
......@@ -195,6 +195,18 @@ litmus-tests
are listed in litmus-tests/README. A great deal more litmus
tests are available at https://github.com/paulmckrcu/litmus.
By "representative", it means the one in the litmus-tests
directory is:
1) simple, the number of threads should be relatively
small and each thread function should be relatively
simple.
2) orthogonal, there should be no two litmus tests
describing the same aspect of the memory model.
3) textbook, developers can easily copy-paste-modify
the litmus tests to use the patterns on their own
code.
lock.cat
Provides a front-end analysis of lock acquisition and release,
for example, associating a lock acquisition with the preceding
......
......@@ -27,7 +27,7 @@ include "lock.cat"
(* Release Acquire *)
let acq-po = [Acquire] ; po ; [M]
let po-rel = [M] ; po ; [Release]
let po-unlock-rf-lock-po = po ; [UL] ; rf ; [LKR] ; po
let po-unlock-lock-po = po ; [UL] ; (po|rf) ; [LKR] ; po
(* Fences *)
let R4rmb = R \ Noreturn (* Reads for which rmb works *)
......@@ -70,12 +70,12 @@ let rwdep = (dep | ctrl) ; [W]
let overwrite = co | fr
let to-w = rwdep | (overwrite & int) | (addr ; [Plain] ; wmb)
let to-r = addr | (dep ; [Marked] ; rfi)
let ppo = to-r | to-w | fence | (po-unlock-rf-lock-po & int)
let ppo = to-r | to-w | fence | (po-unlock-lock-po & int)
(* Propagation: Ordering from release operations and strong fences. *)
let A-cumul(r) = (rfe ; [Marked])? ; r
let cumul-fence = [Marked] ; (A-cumul(strong-fence | po-rel) | wmb |
po-unlock-rf-lock-po) ; [Marked]
po-unlock-lock-po) ; [Marked]
let prop = [Marked] ; (overwrite & ext)? ; cumul-fence* ;
[Marked] ; rfe? ; [Marked]
......
C LB+unlocklockonceonce+poacquireonce
(*
* Result: Never
*
* If two locked critical sections execute on the same CPU, all accesses
* in the first must execute before any accesses in the second, even if the
* critical sections are protected by different locks. Note: Even when a
* write executes before a read, their memory effects can be reordered from
* the viewpoint of another CPU (the kind of reordering allowed by TSO).
*)
{}
P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
{
int r1;
spin_lock(s);
r1 = READ_ONCE(*x);
spin_unlock(s);
spin_lock(t);
WRITE_ONCE(*y, 1);
spin_unlock(t);
}
P1(int *x, int *y)
{
int r2;
r2 = smp_load_acquire(y);
WRITE_ONCE(*x, 1);
}
exists (0:r1=1 /\ 1:r2=1)
C MP+unlocklockonceonce+fencermbonceonce
(*
* Result: Never
*
* If two locked critical sections execute on the same CPU, stores in the
* first must propagate to each CPU before stores in the second do, even if
* the critical sections are protected by different locks.
*)
{}
P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
{
spin_lock(s);
WRITE_ONCE(*x, 1);
spin_unlock(s);
spin_lock(t);
WRITE_ONCE(*y, 1);
spin_unlock(t);
}
P1(int *x, int *y)
{
int r1;
int r2;
r1 = READ_ONCE(*y);
smp_rmb();
r2 = READ_ONCE(*x);
}
exists (1:r1=1 /\ 1:r2=0)
......@@ -63,6 +63,10 @@ LB+poonceonces.litmus
As above, but with store-release replaced with WRITE_ONCE()
and load-acquire replaced with READ_ONCE().
LB+unlocklockonceonce+poacquireonce.litmus
Does a unlock+lock pair provides ordering guarantee between a
load and a store?
MP+onceassign+derefonce.litmus
As below, but with rcu_assign_pointer() and an rcu_dereference().
......@@ -90,6 +94,10 @@ MP+porevlocks.litmus
As below, but with the first access of the writer process
and the second access of reader process protected by a lock.
MP+unlocklockonceonce+fencermbonceonce.litmus
Does a unlock+lock pair provides ordering guarantee between a
store and another store?
MP+fencewmbonceonce+fencermbonceonce.litmus
Does a smp_wmb() (between the stores) and an smp_rmb() (between
the loads) suffice for the message-passing litmus test, where one
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment