Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 */
8#ifndef __ASM_ARM_ATOMIC_H
9#define __ASM_ARM_ATOMIC_H
10
11#include <linux/compiler.h>
12#include <linux/prefetch.h>
13#include <linux/types.h>
14#include <linux/irqflags.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
27#define atomic_read(v) READ_ONCE((v)->counter)
28#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30#if __LINUX_ARM_ARCH__ >= 6
31
32/*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
35 * to ensure that the update happens.
36 */
37
38#define ATOMIC_OP(op, c_op, asm_op) \
39static inline void atomic_##op(int i, atomic_t *v) \
40{ \
41 unsigned long tmp; \
42 int result; \
43 \
44 prefetchw(&v->counter); \
45 __asm__ __volatile__("@ atomic_" #op "\n" \
46"1: ldrex %0, [%3]\n" \
47" " #asm_op " %0, %0, %4\n" \
48" strex %1, %0, [%3]\n" \
49" teq %1, #0\n" \
50" bne 1b" \
51 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
52 : "r" (&v->counter), "Ir" (i) \
53 : "cc"); \
54} \
55
56#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
57static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
58{ \
59 unsigned long tmp; \
60 int result; \
61 \
62 prefetchw(&v->counter); \
63 \
64 __asm__ __volatile__("@ atomic_" #op "_return\n" \
65"1: ldrex %0, [%3]\n" \
66" " #asm_op " %0, %0, %4\n" \
67" strex %1, %0, [%3]\n" \
68" teq %1, #0\n" \
69" bne 1b" \
70 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
71 : "r" (&v->counter), "Ir" (i) \
72 : "cc"); \
73 \
74 return result; \
75}
76
77#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
78static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
79{ \
80 unsigned long tmp; \
81 int result, val; \
82 \
83 prefetchw(&v->counter); \
84 \
85 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
86"1: ldrex %0, [%4]\n" \
87" " #asm_op " %1, %0, %5\n" \
88" strex %2, %1, [%4]\n" \
89" teq %2, #0\n" \
90" bne 1b" \
91 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
92 : "r" (&v->counter), "Ir" (i) \
93 : "cc"); \
94 \
95 return result; \
96}
97
98#define atomic_add_return_relaxed atomic_add_return_relaxed
99#define atomic_sub_return_relaxed atomic_sub_return_relaxed
100#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
101#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
102
103#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
104#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
105#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
106#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
107
108static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
109{
110 int oldval;
111 unsigned long res;
112
113 prefetchw(&ptr->counter);
114
115 do {
116 __asm__ __volatile__("@ atomic_cmpxchg\n"
117 "ldrex %1, [%3]\n"
118 "mov %0, #0\n"
119 "teq %1, %4\n"
120 "strexeq %0, %5, [%3]\n"
121 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
122 : "r" (&ptr->counter), "Ir" (old), "r" (new)
123 : "cc");
124 } while (res);
125
126 return oldval;
127}
128#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
129
130static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
131{
132 int oldval, newval;
133 unsigned long tmp;
134
135 smp_mb();
136 prefetchw(&v->counter);
137
138 __asm__ __volatile__ ("@ atomic_add_unless\n"
139"1: ldrex %0, [%4]\n"
140" teq %0, %5\n"
141" beq 2f\n"
142" add %1, %0, %6\n"
143" strex %2, %1, [%4]\n"
144" teq %2, #0\n"
145" bne 1b\n"
146"2:"
147 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
148 : "r" (&v->counter), "r" (u), "r" (a)
149 : "cc");
150
151 if (oldval != u)
152 smp_mb();
153
154 return oldval;
155}
156#define atomic_fetch_add_unless atomic_fetch_add_unless
157
158#else /* ARM_ARCH_6 */
159
160#ifdef CONFIG_SMP
161#error SMP not supported on pre-ARMv6 CPUs
162#endif
163
164#define ATOMIC_OP(op, c_op, asm_op) \
165static inline void atomic_##op(int i, atomic_t *v) \
166{ \
167 unsigned long flags; \
168 \
169 raw_local_irq_save(flags); \
170 v->counter c_op i; \
171 raw_local_irq_restore(flags); \
172} \
173
174#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
175static inline int atomic_##op##_return(int i, atomic_t *v) \
176{ \
177 unsigned long flags; \
178 int val; \
179 \
180 raw_local_irq_save(flags); \
181 v->counter c_op i; \
182 val = v->counter; \
183 raw_local_irq_restore(flags); \
184 \
185 return val; \
186}
187
188#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
189static inline int atomic_fetch_##op(int i, atomic_t *v) \
190{ \
191 unsigned long flags; \
192 int val; \
193 \
194 raw_local_irq_save(flags); \
195 val = v->counter; \
196 v->counter c_op i; \
197 raw_local_irq_restore(flags); \
198 \
199 return val; \
200}
201
202static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
203{
204 int ret;
205 unsigned long flags;
206
207 raw_local_irq_save(flags);
208 ret = v->counter;
209 if (likely(ret == old))
210 v->counter = new;
211 raw_local_irq_restore(flags);
212
213 return ret;
214}
215
216#define atomic_fetch_andnot atomic_fetch_andnot
217
218#endif /* __LINUX_ARM_ARCH__ */
219
220#define ATOMIC_OPS(op, c_op, asm_op) \
221 ATOMIC_OP(op, c_op, asm_op) \
222 ATOMIC_OP_RETURN(op, c_op, asm_op) \
223 ATOMIC_FETCH_OP(op, c_op, asm_op)
224
225ATOMIC_OPS(add, +=, add)
226ATOMIC_OPS(sub, -=, sub)
227
228#define atomic_andnot atomic_andnot
229
230#undef ATOMIC_OPS
231#define ATOMIC_OPS(op, c_op, asm_op) \
232 ATOMIC_OP(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235ATOMIC_OPS(and, &=, and)
236ATOMIC_OPS(andnot, &= ~, bic)
237ATOMIC_OPS(or, |=, orr)
238ATOMIC_OPS(xor, ^=, eor)
239
240#undef ATOMIC_OPS
241#undef ATOMIC_FETCH_OP
242#undef ATOMIC_OP_RETURN
243#undef ATOMIC_OP
244
245#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246
247#ifndef CONFIG_GENERIC_ATOMIC64
248typedef struct {
249 s64 counter;
250} atomic64_t;
251
252#define ATOMIC64_INIT(i) { (i) }
253
254#ifdef CONFIG_ARM_LPAE
255static inline s64 atomic64_read(const atomic64_t *v)
256{
257 s64 result;
258
259 __asm__ __volatile__("@ atomic64_read\n"
260" ldrd %0, %H0, [%1]"
261 : "=&r" (result)
262 : "r" (&v->counter), "Qo" (v->counter)
263 );
264
265 return result;
266}
267
268static inline void atomic64_set(atomic64_t *v, s64 i)
269{
270 __asm__ __volatile__("@ atomic64_set\n"
271" strd %2, %H2, [%1]"
272 : "=Qo" (v->counter)
273 : "r" (&v->counter), "r" (i)
274 );
275}
276#else
277static inline s64 atomic64_read(const atomic64_t *v)
278{
279 s64 result;
280
281 __asm__ __volatile__("@ atomic64_read\n"
282" ldrexd %0, %H0, [%1]"
283 : "=&r" (result)
284 : "r" (&v->counter), "Qo" (v->counter)
285 );
286
287 return result;
288}
289
290static inline void atomic64_set(atomic64_t *v, s64 i)
291{
292 s64 tmp;
293
294 prefetchw(&v->counter);
295 __asm__ __volatile__("@ atomic64_set\n"
296"1: ldrexd %0, %H0, [%2]\n"
297" strexd %0, %3, %H3, [%2]\n"
298" teq %0, #0\n"
299" bne 1b"
300 : "=&r" (tmp), "=Qo" (v->counter)
301 : "r" (&v->counter), "r" (i)
302 : "cc");
303}
304#endif
305
306#define ATOMIC64_OP(op, op1, op2) \
307static inline void atomic64_##op(s64 i, atomic64_t *v) \
308{ \
309 s64 result; \
310 unsigned long tmp; \
311 \
312 prefetchw(&v->counter); \
313 __asm__ __volatile__("@ atomic64_" #op "\n" \
314"1: ldrexd %0, %H0, [%3]\n" \
315" " #op1 " %Q0, %Q0, %Q4\n" \
316" " #op2 " %R0, %R0, %R4\n" \
317" strexd %1, %0, %H0, [%3]\n" \
318" teq %1, #0\n" \
319" bne 1b" \
320 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
321 : "r" (&v->counter), "r" (i) \
322 : "cc"); \
323} \
324
325#define ATOMIC64_OP_RETURN(op, op1, op2) \
326static inline s64 \
327atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
328{ \
329 s64 result; \
330 unsigned long tmp; \
331 \
332 prefetchw(&v->counter); \
333 \
334 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
335"1: ldrexd %0, %H0, [%3]\n" \
336" " #op1 " %Q0, %Q0, %Q4\n" \
337" " #op2 " %R0, %R0, %R4\n" \
338" strexd %1, %0, %H0, [%3]\n" \
339" teq %1, #0\n" \
340" bne 1b" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
343 : "cc"); \
344 \
345 return result; \
346}
347
348#define ATOMIC64_FETCH_OP(op, op1, op2) \
349static inline s64 \
350atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
351{ \
352 s64 result, val; \
353 unsigned long tmp; \
354 \
355 prefetchw(&v->counter); \
356 \
357 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
358"1: ldrexd %0, %H0, [%4]\n" \
359" " #op1 " %Q1, %Q0, %Q5\n" \
360" " #op2 " %R1, %R0, %R5\n" \
361" strexd %2, %1, %H1, [%4]\n" \
362" teq %2, #0\n" \
363" bne 1b" \
364 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
365 : "r" (&v->counter), "r" (i) \
366 : "cc"); \
367 \
368 return result; \
369}
370
371#define ATOMIC64_OPS(op, op1, op2) \
372 ATOMIC64_OP(op, op1, op2) \
373 ATOMIC64_OP_RETURN(op, op1, op2) \
374 ATOMIC64_FETCH_OP(op, op1, op2)
375
376ATOMIC64_OPS(add, adds, adc)
377ATOMIC64_OPS(sub, subs, sbc)
378
379#define atomic64_add_return_relaxed atomic64_add_return_relaxed
380#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
381#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
382#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
383
384#undef ATOMIC64_OPS
385#define ATOMIC64_OPS(op, op1, op2) \
386 ATOMIC64_OP(op, op1, op2) \
387 ATOMIC64_FETCH_OP(op, op1, op2)
388
389#define atomic64_andnot atomic64_andnot
390
391ATOMIC64_OPS(and, and, and)
392ATOMIC64_OPS(andnot, bic, bic)
393ATOMIC64_OPS(or, orr, orr)
394ATOMIC64_OPS(xor, eor, eor)
395
396#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
397#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
398#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
399#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
400
401#undef ATOMIC64_OPS
402#undef ATOMIC64_FETCH_OP
403#undef ATOMIC64_OP_RETURN
404#undef ATOMIC64_OP
405
406static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
407{
408 s64 oldval;
409 unsigned long res;
410
411 prefetchw(&ptr->counter);
412
413 do {
414 __asm__ __volatile__("@ atomic64_cmpxchg\n"
415 "ldrexd %1, %H1, [%3]\n"
416 "mov %0, #0\n"
417 "teq %1, %4\n"
418 "teqeq %H1, %H4\n"
419 "strexdeq %0, %5, %H5, [%3]"
420 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
421 : "r" (&ptr->counter), "r" (old), "r" (new)
422 : "cc");
423 } while (res);
424
425 return oldval;
426}
427#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
428
429static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
430{
431 s64 result;
432 unsigned long tmp;
433
434 prefetchw(&ptr->counter);
435
436 __asm__ __volatile__("@ atomic64_xchg\n"
437"1: ldrexd %0, %H0, [%3]\n"
438" strexd %1, %4, %H4, [%3]\n"
439" teq %1, #0\n"
440" bne 1b"
441 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
442 : "r" (&ptr->counter), "r" (new)
443 : "cc");
444
445 return result;
446}
447#define atomic64_xchg_relaxed atomic64_xchg_relaxed
448
449static inline s64 atomic64_dec_if_positive(atomic64_t *v)
450{
451 s64 result;
452 unsigned long tmp;
453
454 smp_mb();
455 prefetchw(&v->counter);
456
457 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
458"1: ldrexd %0, %H0, [%3]\n"
459" subs %Q0, %Q0, #1\n"
460" sbc %R0, %R0, #0\n"
461" teq %R0, #0\n"
462" bmi 2f\n"
463" strexd %1, %0, %H0, [%3]\n"
464" teq %1, #0\n"
465" bne 1b\n"
466"2:"
467 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
468 : "r" (&v->counter)
469 : "cc");
470
471 smp_mb();
472
473 return result;
474}
475#define atomic64_dec_if_positive atomic64_dec_if_positive
476
477static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
478{
479 s64 oldval, newval;
480 unsigned long tmp;
481
482 smp_mb();
483 prefetchw(&v->counter);
484
485 __asm__ __volatile__("@ atomic64_add_unless\n"
486"1: ldrexd %0, %H0, [%4]\n"
487" teq %0, %5\n"
488" teqeq %H0, %H5\n"
489" beq 2f\n"
490" adds %Q1, %Q0, %Q6\n"
491" adc %R1, %R0, %R6\n"
492" strexd %2, %1, %H1, [%4]\n"
493" teq %2, #0\n"
494" bne 1b\n"
495"2:"
496 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
497 : "r" (&v->counter), "r" (u), "r" (a)
498 : "cc");
499
500 if (oldval != u)
501 smp_mb();
502
503 return oldval;
504}
505#define atomic64_fetch_add_unless atomic64_fetch_add_unless
506
507#endif /* !CONFIG_GENERIC_ATOMIC64 */
508#endif
509#endif
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/prefetch.h>
16#include <linux/types.h>
17#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
25/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
30#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
32
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
39 */
40
41#define ATOMIC_OP(op, c_op, asm_op) \
42static inline void atomic_##op(int i, atomic_t *v) \
43{ \
44 unsigned long tmp; \
45 int result; \
46 \
47 prefetchw(&v->counter); \
48 __asm__ __volatile__("@ atomic_" #op "\n" \
49"1: ldrex %0, [%3]\n" \
50" " #asm_op " %0, %0, %4\n" \
51" strex %1, %0, [%3]\n" \
52" teq %1, #0\n" \
53" bne 1b" \
54 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
55 : "r" (&v->counter), "Ir" (i) \
56 : "cc"); \
57} \
58
59#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
61{ \
62 unsigned long tmp; \
63 int result; \
64 \
65 prefetchw(&v->counter); \
66 \
67 __asm__ __volatile__("@ atomic_" #op "_return\n" \
68"1: ldrex %0, [%3]\n" \
69" " #asm_op " %0, %0, %4\n" \
70" strex %1, %0, [%3]\n" \
71" teq %1, #0\n" \
72" bne 1b" \
73 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
74 : "r" (&v->counter), "Ir" (i) \
75 : "cc"); \
76 \
77 return result; \
78}
79
80#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
81static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
82{ \
83 unsigned long tmp; \
84 int result, val; \
85 \
86 prefetchw(&v->counter); \
87 \
88 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
89"1: ldrex %0, [%4]\n" \
90" " #asm_op " %1, %0, %5\n" \
91" strex %2, %1, [%4]\n" \
92" teq %2, #0\n" \
93" bne 1b" \
94 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
95 : "r" (&v->counter), "Ir" (i) \
96 : "cc"); \
97 \
98 return result; \
99}
100
101#define atomic_add_return_relaxed atomic_add_return_relaxed
102#define atomic_sub_return_relaxed atomic_sub_return_relaxed
103#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
104#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
105
106#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
107#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
108#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
109#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
110
111static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
112{
113 int oldval;
114 unsigned long res;
115
116 prefetchw(&ptr->counter);
117
118 do {
119 __asm__ __volatile__("@ atomic_cmpxchg\n"
120 "ldrex %1, [%3]\n"
121 "mov %0, #0\n"
122 "teq %1, %4\n"
123 "strexeq %0, %5, [%3]\n"
124 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125 : "r" (&ptr->counter), "Ir" (old), "r" (new)
126 : "cc");
127 } while (res);
128
129 return oldval;
130}
131#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
132
133static inline int __atomic_add_unless(atomic_t *v, int a, int u)
134{
135 int oldval, newval;
136 unsigned long tmp;
137
138 smp_mb();
139 prefetchw(&v->counter);
140
141 __asm__ __volatile__ ("@ atomic_add_unless\n"
142"1: ldrex %0, [%4]\n"
143" teq %0, %5\n"
144" beq 2f\n"
145" add %1, %0, %6\n"
146" strex %2, %1, [%4]\n"
147" teq %2, #0\n"
148" bne 1b\n"
149"2:"
150 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151 : "r" (&v->counter), "r" (u), "r" (a)
152 : "cc");
153
154 if (oldval != u)
155 smp_mb();
156
157 return oldval;
158}
159
160#else /* ARM_ARCH_6 */
161
162#ifdef CONFIG_SMP
163#error SMP not supported on pre-ARMv6 CPUs
164#endif
165
166#define ATOMIC_OP(op, c_op, asm_op) \
167static inline void atomic_##op(int i, atomic_t *v) \
168{ \
169 unsigned long flags; \
170 \
171 raw_local_irq_save(flags); \
172 v->counter c_op i; \
173 raw_local_irq_restore(flags); \
174} \
175
176#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
177static inline int atomic_##op##_return(int i, atomic_t *v) \
178{ \
179 unsigned long flags; \
180 int val; \
181 \
182 raw_local_irq_save(flags); \
183 v->counter c_op i; \
184 val = v->counter; \
185 raw_local_irq_restore(flags); \
186 \
187 return val; \
188}
189
190#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
191static inline int atomic_fetch_##op(int i, atomic_t *v) \
192{ \
193 unsigned long flags; \
194 int val; \
195 \
196 raw_local_irq_save(flags); \
197 val = v->counter; \
198 v->counter c_op i; \
199 raw_local_irq_restore(flags); \
200 \
201 return val; \
202}
203
204static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
205{
206 int ret;
207 unsigned long flags;
208
209 raw_local_irq_save(flags);
210 ret = v->counter;
211 if (likely(ret == old))
212 v->counter = new;
213 raw_local_irq_restore(flags);
214
215 return ret;
216}
217
218static inline int __atomic_add_unless(atomic_t *v, int a, int u)
219{
220 int c, old;
221
222 c = atomic_read(v);
223 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
224 c = old;
225 return c;
226}
227
228#endif /* __LINUX_ARM_ARCH__ */
229
230#define ATOMIC_OPS(op, c_op, asm_op) \
231 ATOMIC_OP(op, c_op, asm_op) \
232 ATOMIC_OP_RETURN(op, c_op, asm_op) \
233 ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235ATOMIC_OPS(add, +=, add)
236ATOMIC_OPS(sub, -=, sub)
237
238#define atomic_andnot atomic_andnot
239
240#undef ATOMIC_OPS
241#define ATOMIC_OPS(op, c_op, asm_op) \
242 ATOMIC_OP(op, c_op, asm_op) \
243 ATOMIC_FETCH_OP(op, c_op, asm_op)
244
245ATOMIC_OPS(and, &=, and)
246ATOMIC_OPS(andnot, &= ~, bic)
247ATOMIC_OPS(or, |=, orr)
248ATOMIC_OPS(xor, ^=, eor)
249
250#undef ATOMIC_OPS
251#undef ATOMIC_FETCH_OP
252#undef ATOMIC_OP_RETURN
253#undef ATOMIC_OP
254
255#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
256
257#define atomic_inc(v) atomic_add(1, v)
258#define atomic_dec(v) atomic_sub(1, v)
259
260#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
261#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
262#define atomic_inc_return_relaxed(v) (atomic_add_return_relaxed(1, v))
263#define atomic_dec_return_relaxed(v) (atomic_sub_return_relaxed(1, v))
264#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
265
266#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
267
268#ifndef CONFIG_GENERIC_ATOMIC64
269typedef struct {
270 long long counter;
271} atomic64_t;
272
273#define ATOMIC64_INIT(i) { (i) }
274
275#ifdef CONFIG_ARM_LPAE
276static inline long long atomic64_read(const atomic64_t *v)
277{
278 long long result;
279
280 __asm__ __volatile__("@ atomic64_read\n"
281" ldrd %0, %H0, [%1]"
282 : "=&r" (result)
283 : "r" (&v->counter), "Qo" (v->counter)
284 );
285
286 return result;
287}
288
289static inline void atomic64_set(atomic64_t *v, long long i)
290{
291 __asm__ __volatile__("@ atomic64_set\n"
292" strd %2, %H2, [%1]"
293 : "=Qo" (v->counter)
294 : "r" (&v->counter), "r" (i)
295 );
296}
297#else
298static inline long long atomic64_read(const atomic64_t *v)
299{
300 long long result;
301
302 __asm__ __volatile__("@ atomic64_read\n"
303" ldrexd %0, %H0, [%1]"
304 : "=&r" (result)
305 : "r" (&v->counter), "Qo" (v->counter)
306 );
307
308 return result;
309}
310
311static inline void atomic64_set(atomic64_t *v, long long i)
312{
313 long long tmp;
314
315 prefetchw(&v->counter);
316 __asm__ __volatile__("@ atomic64_set\n"
317"1: ldrexd %0, %H0, [%2]\n"
318" strexd %0, %3, %H3, [%2]\n"
319" teq %0, #0\n"
320" bne 1b"
321 : "=&r" (tmp), "=Qo" (v->counter)
322 : "r" (&v->counter), "r" (i)
323 : "cc");
324}
325#endif
326
327#define ATOMIC64_OP(op, op1, op2) \
328static inline void atomic64_##op(long long i, atomic64_t *v) \
329{ \
330 long long result; \
331 unsigned long tmp; \
332 \
333 prefetchw(&v->counter); \
334 __asm__ __volatile__("@ atomic64_" #op "\n" \
335"1: ldrexd %0, %H0, [%3]\n" \
336" " #op1 " %Q0, %Q0, %Q4\n" \
337" " #op2 " %R0, %R0, %R4\n" \
338" strexd %1, %0, %H0, [%3]\n" \
339" teq %1, #0\n" \
340" bne 1b" \
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
342 : "r" (&v->counter), "r" (i) \
343 : "cc"); \
344} \
345
346#define ATOMIC64_OP_RETURN(op, op1, op2) \
347static inline long long \
348atomic64_##op##_return_relaxed(long long i, atomic64_t *v) \
349{ \
350 long long result; \
351 unsigned long tmp; \
352 \
353 prefetchw(&v->counter); \
354 \
355 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
356"1: ldrexd %0, %H0, [%3]\n" \
357" " #op1 " %Q0, %Q0, %Q4\n" \
358" " #op2 " %R0, %R0, %R4\n" \
359" strexd %1, %0, %H0, [%3]\n" \
360" teq %1, #0\n" \
361" bne 1b" \
362 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
363 : "r" (&v->counter), "r" (i) \
364 : "cc"); \
365 \
366 return result; \
367}
368
369#define ATOMIC64_FETCH_OP(op, op1, op2) \
370static inline long long \
371atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v) \
372{ \
373 long long result, val; \
374 unsigned long tmp; \
375 \
376 prefetchw(&v->counter); \
377 \
378 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
379"1: ldrexd %0, %H0, [%4]\n" \
380" " #op1 " %Q1, %Q0, %Q5\n" \
381" " #op2 " %R1, %R0, %R5\n" \
382" strexd %2, %1, %H1, [%4]\n" \
383" teq %2, #0\n" \
384" bne 1b" \
385 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
386 : "r" (&v->counter), "r" (i) \
387 : "cc"); \
388 \
389 return result; \
390}
391
392#define ATOMIC64_OPS(op, op1, op2) \
393 ATOMIC64_OP(op, op1, op2) \
394 ATOMIC64_OP_RETURN(op, op1, op2) \
395 ATOMIC64_FETCH_OP(op, op1, op2)
396
397ATOMIC64_OPS(add, adds, adc)
398ATOMIC64_OPS(sub, subs, sbc)
399
400#define atomic64_add_return_relaxed atomic64_add_return_relaxed
401#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
402#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
403#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
404
405#undef ATOMIC64_OPS
406#define ATOMIC64_OPS(op, op1, op2) \
407 ATOMIC64_OP(op, op1, op2) \
408 ATOMIC64_FETCH_OP(op, op1, op2)
409
410#define atomic64_andnot atomic64_andnot
411
412ATOMIC64_OPS(and, and, and)
413ATOMIC64_OPS(andnot, bic, bic)
414ATOMIC64_OPS(or, orr, orr)
415ATOMIC64_OPS(xor, eor, eor)
416
417#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
418#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
419#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
420#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
421
422#undef ATOMIC64_OPS
423#undef ATOMIC64_FETCH_OP
424#undef ATOMIC64_OP_RETURN
425#undef ATOMIC64_OP
426
427static inline long long
428atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
429{
430 long long oldval;
431 unsigned long res;
432
433 prefetchw(&ptr->counter);
434
435 do {
436 __asm__ __volatile__("@ atomic64_cmpxchg\n"
437 "ldrexd %1, %H1, [%3]\n"
438 "mov %0, #0\n"
439 "teq %1, %4\n"
440 "teqeq %H1, %H4\n"
441 "strexdeq %0, %5, %H5, [%3]"
442 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
443 : "r" (&ptr->counter), "r" (old), "r" (new)
444 : "cc");
445 } while (res);
446
447 return oldval;
448}
449#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed
450
451static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
452{
453 long long result;
454 unsigned long tmp;
455
456 prefetchw(&ptr->counter);
457
458 __asm__ __volatile__("@ atomic64_xchg\n"
459"1: ldrexd %0, %H0, [%3]\n"
460" strexd %1, %4, %H4, [%3]\n"
461" teq %1, #0\n"
462" bne 1b"
463 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
464 : "r" (&ptr->counter), "r" (new)
465 : "cc");
466
467 return result;
468}
469#define atomic64_xchg_relaxed atomic64_xchg_relaxed
470
471static inline long long atomic64_dec_if_positive(atomic64_t *v)
472{
473 long long result;
474 unsigned long tmp;
475
476 smp_mb();
477 prefetchw(&v->counter);
478
479 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
480"1: ldrexd %0, %H0, [%3]\n"
481" subs %Q0, %Q0, #1\n"
482" sbc %R0, %R0, #0\n"
483" teq %R0, #0\n"
484" bmi 2f\n"
485" strexd %1, %0, %H0, [%3]\n"
486" teq %1, #0\n"
487" bne 1b\n"
488"2:"
489 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
490 : "r" (&v->counter)
491 : "cc");
492
493 smp_mb();
494
495 return result;
496}
497
498static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
499{
500 long long val;
501 unsigned long tmp;
502 int ret = 1;
503
504 smp_mb();
505 prefetchw(&v->counter);
506
507 __asm__ __volatile__("@ atomic64_add_unless\n"
508"1: ldrexd %0, %H0, [%4]\n"
509" teq %0, %5\n"
510" teqeq %H0, %H5\n"
511" moveq %1, #0\n"
512" beq 2f\n"
513" adds %Q0, %Q0, %Q6\n"
514" adc %R0, %R0, %R6\n"
515" strexd %2, %0, %H0, [%4]\n"
516" teq %2, #0\n"
517" bne 1b\n"
518"2:"
519 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
520 : "r" (&v->counter), "r" (u), "r" (a)
521 : "cc");
522
523 if (ret)
524 smp_mb();
525
526 return ret;
527}
528
529#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
530#define atomic64_inc(v) atomic64_add(1LL, (v))
531#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1LL, (v))
532#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
533#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
534#define atomic64_dec(v) atomic64_sub(1LL, (v))
535#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1LL, (v))
536#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
537#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
538
539#endif /* !CONFIG_GENERIC_ATOMIC64 */
540#endif
541#endif