Loading...
1C MP+porevlocks
2
3(*
4 * Result: Never
5 *
6 * This litmus test demonstrates how lock acquisitions and releases can
7 * stand in for smp_load_acquire() and smp_store_release(), respectively.
8 * In other words, when holding a given lock (or indeed after releasing a
9 * given lock), a CPU is not only guaranteed to see the accesses that other
10 * CPUs made while previously holding that lock, it is also guaranteed to
11 * see all prior accesses by those other CPUs.
12 *)
13
14{}
15
16P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
17{
18 int r0;
19 int r1;
20
21 r0 = READ_ONCE(*flag);
22 spin_lock(mylock);
23 r1 = READ_ONCE(*buf);
24 spin_unlock(mylock);
25}
26
27P1(int *buf, int *flag, spinlock_t *mylock) // Producer
28{
29 spin_lock(mylock);
30 WRITE_ONCE(*buf, 1);
31 spin_unlock(mylock);
32 WRITE_ONCE(*flag, 1);
33}
34
35exists (0:r0=1 /\ 0:r1=0) (* Bad outcome. *)
1C MP+porevlocks
2
3(*
4 * Result: Never
5 *
6 * This litmus test demonstrates how lock acquisitions and releases can
7 * stand in for smp_load_acquire() and smp_store_release(), respectively.
8 * In other words, when holding a given lock (or indeed after releasing a
9 * given lock), a CPU is not only guaranteed to see the accesses that other
10 * CPUs made while previously holding that lock, it is also guaranteed to
11 * see all prior accesses by those other CPUs.
12 *)
13
14{}
15
16P0(int *buf, int *flag, spinlock_t *mylock) // Consumer
17{
18 int r0;
19 int r1;
20
21 r0 = READ_ONCE(*flag);
22 spin_lock(mylock);
23 r1 = READ_ONCE(*buf);
24 spin_unlock(mylock);
25}
26
27P1(int *buf, int *flag, spinlock_t *mylock) // Producer
28{
29 spin_lock(mylock);
30 WRITE_ONCE(*buf, 1);
31 spin_unlock(mylock);
32 WRITE_ONCE(*flag, 1);
33}
34
35exists (0:r0=1 /\ 0:r1=0) (* Bad outcome. *)