Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __SPARC64_BARRIER_H
3#define __SPARC64_BARRIER_H
4
5/* These are here in an effort to more fully work around Spitfire Errata
6 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
7 * branch, the chip can stop executing instructions until a trap occurs.
8 * Therefore, if interrupts are disabled, the chip can hang forever.
9 *
10 * It used to be believed that the memory barrier had to be right in the
11 * delay slot, but a case has been traced recently wherein the memory barrier
12 * was one instruction after the branch delay slot and the chip still hung.
13 * The offending sequence was the following in sym_wakeup_done() of the
14 * sym53c8xx_2 driver:
15 *
16 * call sym_ccb_from_dsa, 0
17 * movge %icc, 0, %l0
18 * brz,pn %o0, .LL1303
19 * mov %o0, %l2
20 * membar #LoadLoad
21 *
22 * The branch has to be mispredicted for the bug to occur. Therefore, we put
23 * the memory barrier explicitly into a "branch always, predicted taken"
24 * delay slot to avoid the problem case.
25 */
26#define membar_safe(type) \
27do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
28 " membar " type "\n" \
29 "1:\n" \
30 : : : "memory"); \
31} while (0)
32
33/* The kernel always executes in TSO memory model these days,
34 * and furthermore most sparc64 chips implement more stringent
35 * memory ordering than required by the specifications.
36 */
37#define mb() membar_safe("#StoreLoad")
38#define rmb() __asm__ __volatile__("":::"memory")
39#define wmb() __asm__ __volatile__("":::"memory")
40
41#define __smp_store_release(p, v) \
42do { \
43 compiletime_assert_atomic_type(*p); \
44 barrier(); \
45 WRITE_ONCE(*p, v); \
46} while (0)
47
48#define __smp_load_acquire(p) \
49({ \
50 typeof(*p) ___p1 = READ_ONCE(*p); \
51 compiletime_assert_atomic_type(*p); \
52 barrier(); \
53 ___p1; \
54})
55
56#define __smp_mb__before_atomic() barrier()
57#define __smp_mb__after_atomic() barrier()
58
59#include <asm-generic/barrier.h>
60
61#endif /* !(__SPARC64_BARRIER_H) */
1#ifndef __SPARC64_BARRIER_H
2#define __SPARC64_BARRIER_H
3
4/* These are here in an effort to more fully work around Spitfire Errata
5 * #51. Essentially, if a memory barrier occurs soon after a mispredicted
6 * branch, the chip can stop executing instructions until a trap occurs.
7 * Therefore, if interrupts are disabled, the chip can hang forever.
8 *
9 * It used to be believed that the memory barrier had to be right in the
10 * delay slot, but a case has been traced recently wherein the memory barrier
11 * was one instruction after the branch delay slot and the chip still hung.
12 * The offending sequence was the following in sym_wakeup_done() of the
13 * sym53c8xx_2 driver:
14 *
15 * call sym_ccb_from_dsa, 0
16 * movge %icc, 0, %l0
17 * brz,pn %o0, .LL1303
18 * mov %o0, %l2
19 * membar #LoadLoad
20 *
21 * The branch has to be mispredicted for the bug to occur. Therefore, we put
22 * the memory barrier explicitly into a "branch always, predicted taken"
23 * delay slot to avoid the problem case.
24 */
25#define membar_safe(type) \
26do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \
27 " membar " type "\n" \
28 "1:\n" \
29 : : : "memory"); \
30} while (0)
31
32/* The kernel always executes in TSO memory model these days,
33 * and furthermore most sparc64 chips implement more stringent
34 * memory ordering than required by the specifications.
35 */
36#define mb() membar_safe("#StoreLoad")
37#define rmb() __asm__ __volatile__("":::"memory")
38#define wmb() __asm__ __volatile__("":::"memory")
39
40#define __smp_store_release(p, v) \
41do { \
42 compiletime_assert_atomic_type(*p); \
43 barrier(); \
44 WRITE_ONCE(*p, v); \
45} while (0)
46
47#define __smp_load_acquire(p) \
48({ \
49 typeof(*p) ___p1 = READ_ONCE(*p); \
50 compiletime_assert_atomic_type(*p); \
51 barrier(); \
52 ___p1; \
53})
54
55#define __smp_mb__before_atomic() barrier()
56#define __smp_mb__after_atomic() barrier()
57
58#include <asm-generic/barrier.h>
59
60#endif /* !(__SPARC64_BARRIER_H) */