Loading...
1C LB+unlocklockonceonce+poacquireonce
2
3(*
4 * Result: Never
5 *
6 * If two locked critical sections execute on the same CPU, all accesses
7 * in the first must execute before any accesses in the second, even if the
8 * critical sections are protected by different locks. Note: Even when a
9 * write executes before a read, their memory effects can be reordered from
10 * the viewpoint of another CPU (the kind of reordering allowed by TSO).
11 *)
12
13{}
14
15P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
16{
17 int r1;
18
19 spin_lock(s);
20 r1 = READ_ONCE(*x);
21 spin_unlock(s);
22 spin_lock(t);
23 WRITE_ONCE(*y, 1);
24 spin_unlock(t);
25}
26
27P1(int *x, int *y)
28{
29 int r2;
30
31 r2 = smp_load_acquire(y);
32 WRITE_ONCE(*x, 1);
33}
34
35exists (0:r1=1 /\ 1:r2=1)
1C LB+unlocklockonceonce+poacquireonce
2
3(*
4 * Result: Never
5 *
6 * If two locked critical sections execute on the same CPU, all accesses
7 * in the first must execute before any accesses in the second, even if the
8 * critical sections are protected by different locks. Note: Even when a
9 * write executes before a read, their memory effects can be reordered from
10 * the viewpoint of another CPU (the kind of reordering allowed by TSO).
11 *)
12
13{}
14
15P0(spinlock_t *s, spinlock_t *t, int *x, int *y)
16{
17 int r1;
18
19 spin_lock(s);
20 r1 = READ_ONCE(*x);
21 spin_unlock(s);
22 spin_lock(t);
23 WRITE_ONCE(*y, 1);
24 spin_unlock(t);
25}
26
27P1(int *x, int *y)
28{
29 int r2;
30
31 r2 = smp_load_acquire(y);
32 WRITE_ONCE(*x, 1);
33}
34
35exists (0:r1=1 /\ 1:r2=1)