Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic barrier definitions.
4 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
10 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <linux/kcsan-checks.h>
18#include <asm/rwonce.h>
19
20#ifndef nop
21#define nop() asm volatile ("nop")
22#endif
23
24/*
25 * Architectures that want generic instrumentation can define __ prefixed
26 * variants of all barriers.
27 */
28
29#ifdef __mb
30#define mb() do { kcsan_mb(); __mb(); } while (0)
31#endif
32
33#ifdef __rmb
34#define rmb() do { kcsan_rmb(); __rmb(); } while (0)
35#endif
36
37#ifdef __wmb
38#define wmb() do { kcsan_wmb(); __wmb(); } while (0)
39#endif
40
41#ifdef __dma_mb
42#define dma_mb() do { kcsan_mb(); __dma_mb(); } while (0)
43#endif
44
45#ifdef __dma_rmb
46#define dma_rmb() do { kcsan_rmb(); __dma_rmb(); } while (0)
47#endif
48
49#ifdef __dma_wmb
50#define dma_wmb() do { kcsan_wmb(); __dma_wmb(); } while (0)
51#endif
52
53/*
54 * Force strict CPU ordering. And yes, this is required on UP too when we're
55 * talking to devices.
56 *
57 * Fall back to compiler barriers if nothing better is provided.
58 */
59
60#ifndef mb
61#define mb() barrier()
62#endif
63
64#ifndef rmb
65#define rmb() mb()
66#endif
67
68#ifndef wmb
69#define wmb() mb()
70#endif
71
72#ifndef dma_mb
73#define dma_mb() mb()
74#endif
75
76#ifndef dma_rmb
77#define dma_rmb() rmb()
78#endif
79
80#ifndef dma_wmb
81#define dma_wmb() wmb()
82#endif
83
84#ifndef __smp_mb
85#define __smp_mb() mb()
86#endif
87
88#ifndef __smp_rmb
89#define __smp_rmb() rmb()
90#endif
91
92#ifndef __smp_wmb
93#define __smp_wmb() wmb()
94#endif
95
96#ifdef CONFIG_SMP
97
98#ifndef smp_mb
99#define smp_mb() do { kcsan_mb(); __smp_mb(); } while (0)
100#endif
101
102#ifndef smp_rmb
103#define smp_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
104#endif
105
106#ifndef smp_wmb
107#define smp_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
108#endif
109
110#else /* !CONFIG_SMP */
111
112#ifndef smp_mb
113#define smp_mb() barrier()
114#endif
115
116#ifndef smp_rmb
117#define smp_rmb() barrier()
118#endif
119
120#ifndef smp_wmb
121#define smp_wmb() barrier()
122#endif
123
124#endif /* CONFIG_SMP */
125
126#ifndef __smp_store_mb
127#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
128#endif
129
130#ifndef __smp_mb__before_atomic
131#define __smp_mb__before_atomic() __smp_mb()
132#endif
133
134#ifndef __smp_mb__after_atomic
135#define __smp_mb__after_atomic() __smp_mb()
136#endif
137
138#ifndef __smp_store_release
139#define __smp_store_release(p, v) \
140do { \
141 compiletime_assert_atomic_type(*p); \
142 __smp_mb(); \
143 WRITE_ONCE(*p, v); \
144} while (0)
145#endif
146
147#ifndef __smp_load_acquire
148#define __smp_load_acquire(p) \
149({ \
150 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
151 compiletime_assert_atomic_type(*p); \
152 __smp_mb(); \
153 (typeof(*p))___p1; \
154})
155#endif
156
157#ifdef CONFIG_SMP
158
159#ifndef smp_store_mb
160#define smp_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
161#endif
162
163#ifndef smp_mb__before_atomic
164#define smp_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
165#endif
166
167#ifndef smp_mb__after_atomic
168#define smp_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
169#endif
170
171#ifndef smp_store_release
172#define smp_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
173#endif
174
175#ifndef smp_load_acquire
176#define smp_load_acquire(p) __smp_load_acquire(p)
177#endif
178
179#else /* !CONFIG_SMP */
180
181#ifndef smp_store_mb
182#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
183#endif
184
185#ifndef smp_mb__before_atomic
186#define smp_mb__before_atomic() barrier()
187#endif
188
189#ifndef smp_mb__after_atomic
190#define smp_mb__after_atomic() barrier()
191#endif
192
193#ifndef smp_store_release
194#define smp_store_release(p, v) \
195do { \
196 barrier(); \
197 WRITE_ONCE(*p, v); \
198} while (0)
199#endif
200
201#ifndef smp_load_acquire
202#define smp_load_acquire(p) \
203({ \
204 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
205 barrier(); \
206 (typeof(*p))___p1; \
207})
208#endif
209
210#endif /* CONFIG_SMP */
211
212/* Barriers for virtual machine guests when talking to an SMP host */
213#define virt_mb() do { kcsan_mb(); __smp_mb(); } while (0)
214#define virt_rmb() do { kcsan_rmb(); __smp_rmb(); } while (0)
215#define virt_wmb() do { kcsan_wmb(); __smp_wmb(); } while (0)
216#define virt_store_mb(var, value) do { kcsan_mb(); __smp_store_mb(var, value); } while (0)
217#define virt_mb__before_atomic() do { kcsan_mb(); __smp_mb__before_atomic(); } while (0)
218#define virt_mb__after_atomic() do { kcsan_mb(); __smp_mb__after_atomic(); } while (0)
219#define virt_store_release(p, v) do { kcsan_release(); __smp_store_release(p, v); } while (0)
220#define virt_load_acquire(p) __smp_load_acquire(p)
221
222/**
223 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
224 *
225 * A control dependency provides a LOAD->STORE order, the additional RMB
226 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
227 * aka. (load)-ACQUIRE.
228 *
229 * Architectures that do not do load speculation can have this be barrier().
230 */
231#ifndef smp_acquire__after_ctrl_dep
232#define smp_acquire__after_ctrl_dep() smp_rmb()
233#endif
234
235/**
236 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
237 * @ptr: pointer to the variable to wait on
238 * @cond: boolean expression to wait for
239 *
240 * Equivalent to using READ_ONCE() on the condition variable.
241 *
242 * Due to C lacking lambda expressions we load the value of *ptr into a
243 * pre-named variable @VAL to be used in @cond.
244 */
245#ifndef smp_cond_load_relaxed
246#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
247 typeof(ptr) __PTR = (ptr); \
248 __unqual_scalar_typeof(*ptr) VAL; \
249 for (;;) { \
250 VAL = READ_ONCE(*__PTR); \
251 if (cond_expr) \
252 break; \
253 cpu_relax(); \
254 } \
255 (typeof(*ptr))VAL; \
256})
257#endif
258
259/**
260 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
261 * @ptr: pointer to the variable to wait on
262 * @cond: boolean expression to wait for
263 *
264 * Equivalent to using smp_load_acquire() on the condition variable but employs
265 * the control dependency of the wait to reduce the barrier on many platforms.
266 */
267#ifndef smp_cond_load_acquire
268#define smp_cond_load_acquire(ptr, cond_expr) ({ \
269 __unqual_scalar_typeof(*ptr) _val; \
270 _val = smp_cond_load_relaxed(ptr, cond_expr); \
271 smp_acquire__after_ctrl_dep(); \
272 (typeof(*ptr))_val; \
273})
274#endif
275
276/*
277 * pmem_wmb() ensures that all stores for which the modification
278 * are written to persistent storage by preceding instructions have
279 * updated persistent storage before any data access or data transfer
280 * caused by subsequent instructions is initiated.
281 */
282#ifndef pmem_wmb
283#define pmem_wmb() wmb()
284#endif
285
286/*
287 * ioremap_wc() maps I/O memory as memory with write-combining attributes. For
288 * this kind of memory accesses, the CPU may wait for prior accesses to be
289 * merged with subsequent ones. In some situation, such wait is bad for the
290 * performance. io_stop_wc() can be used to prevent the merging of
291 * write-combining memory accesses before this macro with those after it.
292 */
293#ifndef io_stop_wc
294#define io_stop_wc() do { } while (0)
295#endif
296
297/*
298 * Architectures that guarantee an implicit smp_mb() in switch_mm()
299 * can override smp_mb__after_switch_mm.
300 */
301#ifndef smp_mb__after_switch_mm
302# define smp_mb__after_switch_mm() smp_mb()
303#endif
304
305#endif /* !__ASSEMBLY__ */
306#endif /* __ASM_GENERIC_BARRIER_H */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Generic barrier definitions.
4 *
5 * It should be possible to use these on really simple architectures,
6 * but it serves more as a starting point for new ports.
7 *
8 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
9 * Written by David Howells (dhowells@redhat.com)
10 */
11#ifndef __ASM_GENERIC_BARRIER_H
12#define __ASM_GENERIC_BARRIER_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <asm/rwonce.h>
18
19#ifndef nop
20#define nop() asm volatile ("nop")
21#endif
22
23/*
24 * Force strict CPU ordering. And yes, this is required on UP too when we're
25 * talking to devices.
26 *
27 * Fall back to compiler barriers if nothing better is provided.
28 */
29
30#ifndef mb
31#define mb() barrier()
32#endif
33
34#ifndef rmb
35#define rmb() mb()
36#endif
37
38#ifndef wmb
39#define wmb() mb()
40#endif
41
42#ifndef dma_rmb
43#define dma_rmb() rmb()
44#endif
45
46#ifndef dma_wmb
47#define dma_wmb() wmb()
48#endif
49
50#ifndef __smp_mb
51#define __smp_mb() mb()
52#endif
53
54#ifndef __smp_rmb
55#define __smp_rmb() rmb()
56#endif
57
58#ifndef __smp_wmb
59#define __smp_wmb() wmb()
60#endif
61
62#ifdef CONFIG_SMP
63
64#ifndef smp_mb
65#define smp_mb() __smp_mb()
66#endif
67
68#ifndef smp_rmb
69#define smp_rmb() __smp_rmb()
70#endif
71
72#ifndef smp_wmb
73#define smp_wmb() __smp_wmb()
74#endif
75
76#else /* !CONFIG_SMP */
77
78#ifndef smp_mb
79#define smp_mb() barrier()
80#endif
81
82#ifndef smp_rmb
83#define smp_rmb() barrier()
84#endif
85
86#ifndef smp_wmb
87#define smp_wmb() barrier()
88#endif
89
90#endif /* CONFIG_SMP */
91
92#ifndef __smp_store_mb
93#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0)
94#endif
95
96#ifndef __smp_mb__before_atomic
97#define __smp_mb__before_atomic() __smp_mb()
98#endif
99
100#ifndef __smp_mb__after_atomic
101#define __smp_mb__after_atomic() __smp_mb()
102#endif
103
104#ifndef __smp_store_release
105#define __smp_store_release(p, v) \
106do { \
107 compiletime_assert_atomic_type(*p); \
108 __smp_mb(); \
109 WRITE_ONCE(*p, v); \
110} while (0)
111#endif
112
113#ifndef __smp_load_acquire
114#define __smp_load_acquire(p) \
115({ \
116 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
117 compiletime_assert_atomic_type(*p); \
118 __smp_mb(); \
119 (typeof(*p))___p1; \
120})
121#endif
122
123#ifdef CONFIG_SMP
124
125#ifndef smp_store_mb
126#define smp_store_mb(var, value) __smp_store_mb(var, value)
127#endif
128
129#ifndef smp_mb__before_atomic
130#define smp_mb__before_atomic() __smp_mb__before_atomic()
131#endif
132
133#ifndef smp_mb__after_atomic
134#define smp_mb__after_atomic() __smp_mb__after_atomic()
135#endif
136
137#ifndef smp_store_release
138#define smp_store_release(p, v) __smp_store_release(p, v)
139#endif
140
141#ifndef smp_load_acquire
142#define smp_load_acquire(p) __smp_load_acquire(p)
143#endif
144
145#else /* !CONFIG_SMP */
146
147#ifndef smp_store_mb
148#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
149#endif
150
151#ifndef smp_mb__before_atomic
152#define smp_mb__before_atomic() barrier()
153#endif
154
155#ifndef smp_mb__after_atomic
156#define smp_mb__after_atomic() barrier()
157#endif
158
159#ifndef smp_store_release
160#define smp_store_release(p, v) \
161do { \
162 compiletime_assert_atomic_type(*p); \
163 barrier(); \
164 WRITE_ONCE(*p, v); \
165} while (0)
166#endif
167
168#ifndef smp_load_acquire
169#define smp_load_acquire(p) \
170({ \
171 __unqual_scalar_typeof(*p) ___p1 = READ_ONCE(*p); \
172 compiletime_assert_atomic_type(*p); \
173 barrier(); \
174 (typeof(*p))___p1; \
175})
176#endif
177
178#endif /* CONFIG_SMP */
179
180/* Barriers for virtual machine guests when talking to an SMP host */
181#define virt_mb() __smp_mb()
182#define virt_rmb() __smp_rmb()
183#define virt_wmb() __smp_wmb()
184#define virt_store_mb(var, value) __smp_store_mb(var, value)
185#define virt_mb__before_atomic() __smp_mb__before_atomic()
186#define virt_mb__after_atomic() __smp_mb__after_atomic()
187#define virt_store_release(p, v) __smp_store_release(p, v)
188#define virt_load_acquire(p) __smp_load_acquire(p)
189
190/**
191 * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency
192 *
193 * A control dependency provides a LOAD->STORE order, the additional RMB
194 * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order,
195 * aka. (load)-ACQUIRE.
196 *
197 * Architectures that do not do load speculation can have this be barrier().
198 */
199#ifndef smp_acquire__after_ctrl_dep
200#define smp_acquire__after_ctrl_dep() smp_rmb()
201#endif
202
203/**
204 * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees
205 * @ptr: pointer to the variable to wait on
206 * @cond: boolean expression to wait for
207 *
208 * Equivalent to using READ_ONCE() on the condition variable.
209 *
210 * Due to C lacking lambda expressions we load the value of *ptr into a
211 * pre-named variable @VAL to be used in @cond.
212 */
213#ifndef smp_cond_load_relaxed
214#define smp_cond_load_relaxed(ptr, cond_expr) ({ \
215 typeof(ptr) __PTR = (ptr); \
216 __unqual_scalar_typeof(*ptr) VAL; \
217 for (;;) { \
218 VAL = READ_ONCE(*__PTR); \
219 if (cond_expr) \
220 break; \
221 cpu_relax(); \
222 } \
223 (typeof(*ptr))VAL; \
224})
225#endif
226
227/**
228 * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering
229 * @ptr: pointer to the variable to wait on
230 * @cond: boolean expression to wait for
231 *
232 * Equivalent to using smp_load_acquire() on the condition variable but employs
233 * the control dependency of the wait to reduce the barrier on many platforms.
234 */
235#ifndef smp_cond_load_acquire
236#define smp_cond_load_acquire(ptr, cond_expr) ({ \
237 __unqual_scalar_typeof(*ptr) _val; \
238 _val = smp_cond_load_relaxed(ptr, cond_expr); \
239 smp_acquire__after_ctrl_dep(); \
240 (typeof(*ptr))_val; \
241})
242#endif
243
244/*
245 * pmem_wmb() ensures that all stores for which the modification
246 * are written to persistent storage by preceding instructions have
247 * updated persistent storage before any data access or data transfer
248 * caused by subsequent instructions is initiated.
249 */
250#ifndef pmem_wmb
251#define pmem_wmb() wmb()
252#endif
253
254#endif /* !__ASSEMBLY__ */
255#endif /* __ASM_GENERIC_BARRIER_H */