Loading...
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/prefetch.h>
16#include <linux/types.h>
17#include <linux/irqflags.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#ifdef __KERNEL__
24
25/*
26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
27 * strex/ldrex monitor on some implementations. The reason we can use it for
28 * atomic_set() is the clrex or dummy strex done on every exception return.
29 */
30#define atomic_read(v) (*(volatile int *)&(v)->counter)
31#define atomic_set(v,i) (((v)->counter) = (i))
32
33#if __LINUX_ARM_ARCH__ >= 6
34
35/*
36 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
37 * store exclusive to ensure that these are atomic. We may loop
38 * to ensure that the update happens.
39 */
40static inline void atomic_add(int i, atomic_t *v)
41{
42 unsigned long tmp;
43 int result;
44
45 prefetchw(&v->counter);
46 __asm__ __volatile__("@ atomic_add\n"
47"1: ldrex %0, [%3]\n"
48" add %0, %0, %4\n"
49" strex %1, %0, [%3]\n"
50" teq %1, #0\n"
51" bne 1b"
52 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
53 : "r" (&v->counter), "Ir" (i)
54 : "cc");
55}
56
57static inline int atomic_add_return(int i, atomic_t *v)
58{
59 unsigned long tmp;
60 int result;
61
62 smp_mb();
63 prefetchw(&v->counter);
64
65 __asm__ __volatile__("@ atomic_add_return\n"
66"1: ldrex %0, [%3]\n"
67" add %0, %0, %4\n"
68" strex %1, %0, [%3]\n"
69" teq %1, #0\n"
70" bne 1b"
71 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
72 : "r" (&v->counter), "Ir" (i)
73 : "cc");
74
75 smp_mb();
76
77 return result;
78}
79
80static inline void atomic_sub(int i, atomic_t *v)
81{
82 unsigned long tmp;
83 int result;
84
85 prefetchw(&v->counter);
86 __asm__ __volatile__("@ atomic_sub\n"
87"1: ldrex %0, [%3]\n"
88" sub %0, %0, %4\n"
89" strex %1, %0, [%3]\n"
90" teq %1, #0\n"
91" bne 1b"
92 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
93 : "r" (&v->counter), "Ir" (i)
94 : "cc");
95}
96
97static inline int atomic_sub_return(int i, atomic_t *v)
98{
99 unsigned long tmp;
100 int result;
101
102 smp_mb();
103 prefetchw(&v->counter);
104
105 __asm__ __volatile__("@ atomic_sub_return\n"
106"1: ldrex %0, [%3]\n"
107" sub %0, %0, %4\n"
108" strex %1, %0, [%3]\n"
109" teq %1, #0\n"
110" bne 1b"
111 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112 : "r" (&v->counter), "Ir" (i)
113 : "cc");
114
115 smp_mb();
116
117 return result;
118}
119
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121{
122 int oldval;
123 unsigned long res;
124
125 smp_mb();
126 prefetchw(&ptr->counter);
127
128 do {
129 __asm__ __volatile__("@ atomic_cmpxchg\n"
130 "ldrex %1, [%3]\n"
131 "mov %0, #0\n"
132 "teq %1, %4\n"
133 "strexeq %0, %5, [%3]\n"
134 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135 : "r" (&ptr->counter), "Ir" (old), "r" (new)
136 : "cc");
137 } while (res);
138
139 smp_mb();
140
141 return oldval;
142}
143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146 int oldval, newval;
147 unsigned long tmp;
148
149 smp_mb();
150 prefetchw(&v->counter);
151
152 __asm__ __volatile__ ("@ atomic_add_unless\n"
153"1: ldrex %0, [%4]\n"
154" teq %0, %5\n"
155" beq 2f\n"
156" add %1, %0, %6\n"
157" strex %2, %1, [%4]\n"
158" teq %2, #0\n"
159" bne 1b\n"
160"2:"
161 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162 : "r" (&v->counter), "r" (u), "r" (a)
163 : "cc");
164
165 if (oldval != u)
166 smp_mb();
167
168 return oldval;
169}
170
171#else /* ARM_ARCH_6 */
172
173#ifdef CONFIG_SMP
174#error SMP not supported on pre-ARMv6 CPUs
175#endif
176
177static inline int atomic_add_return(int i, atomic_t *v)
178{
179 unsigned long flags;
180 int val;
181
182 raw_local_irq_save(flags);
183 val = v->counter;
184 v->counter = val += i;
185 raw_local_irq_restore(flags);
186
187 return val;
188}
189#define atomic_add(i, v) (void) atomic_add_return(i, v)
190
191static inline int atomic_sub_return(int i, atomic_t *v)
192{
193 unsigned long flags;
194 int val;
195
196 raw_local_irq_save(flags);
197 val = v->counter;
198 v->counter = val -= i;
199 raw_local_irq_restore(flags);
200
201 return val;
202}
203#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
204
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{
207 int ret;
208 unsigned long flags;
209
210 raw_local_irq_save(flags);
211 ret = v->counter;
212 if (likely(ret == old))
213 v->counter = new;
214 raw_local_irq_restore(flags);
215
216 return ret;
217}
218
219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
220{
221 int c, old;
222
223 c = atomic_read(v);
224 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
225 c = old;
226 return c;
227}
228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
233#define atomic_inc(v) atomic_add(1, v)
234#define atomic_dec(v) atomic_sub(1, v)
235
236#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
237#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
238#define atomic_inc_return(v) (atomic_add_return(1, v))
239#define atomic_dec_return(v) (atomic_sub_return(1, v))
240#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
241
242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243
244#define smp_mb__before_atomic_dec() smp_mb()
245#define smp_mb__after_atomic_dec() smp_mb()
246#define smp_mb__before_atomic_inc() smp_mb()
247#define smp_mb__after_atomic_inc() smp_mb()
248
249#ifndef CONFIG_GENERIC_ATOMIC64
250typedef struct {
251 long long counter;
252} atomic64_t;
253
254#define ATOMIC64_INIT(i) { (i) }
255
256#ifdef CONFIG_ARM_LPAE
257static inline long long atomic64_read(const atomic64_t *v)
258{
259 long long result;
260
261 __asm__ __volatile__("@ atomic64_read\n"
262" ldrd %0, %H0, [%1]"
263 : "=&r" (result)
264 : "r" (&v->counter), "Qo" (v->counter)
265 );
266
267 return result;
268}
269
270static inline void atomic64_set(atomic64_t *v, long long i)
271{
272 __asm__ __volatile__("@ atomic64_set\n"
273" strd %2, %H2, [%1]"
274 : "=Qo" (v->counter)
275 : "r" (&v->counter), "r" (i)
276 );
277}
278#else
279static inline long long atomic64_read(const atomic64_t *v)
280{
281 long long result;
282
283 __asm__ __volatile__("@ atomic64_read\n"
284" ldrexd %0, %H0, [%1]"
285 : "=&r" (result)
286 : "r" (&v->counter), "Qo" (v->counter)
287 );
288
289 return result;
290}
291
292static inline void atomic64_set(atomic64_t *v, long long i)
293{
294 long long tmp;
295
296 prefetchw(&v->counter);
297 __asm__ __volatile__("@ atomic64_set\n"
298"1: ldrexd %0, %H0, [%2]\n"
299" strexd %0, %3, %H3, [%2]\n"
300" teq %0, #0\n"
301" bne 1b"
302 : "=&r" (tmp), "=Qo" (v->counter)
303 : "r" (&v->counter), "r" (i)
304 : "cc");
305}
306#endif
307
308static inline void atomic64_add(long long i, atomic64_t *v)
309{
310 long long result;
311 unsigned long tmp;
312
313 prefetchw(&v->counter);
314 __asm__ __volatile__("@ atomic64_add\n"
315"1: ldrexd %0, %H0, [%3]\n"
316" adds %Q0, %Q0, %Q4\n"
317" adc %R0, %R0, %R4\n"
318" strexd %1, %0, %H0, [%3]\n"
319" teq %1, #0\n"
320" bne 1b"
321 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
322 : "r" (&v->counter), "r" (i)
323 : "cc");
324}
325
326static inline long long atomic64_add_return(long long i, atomic64_t *v)
327{
328 long long result;
329 unsigned long tmp;
330
331 smp_mb();
332 prefetchw(&v->counter);
333
334 __asm__ __volatile__("@ atomic64_add_return\n"
335"1: ldrexd %0, %H0, [%3]\n"
336" adds %Q0, %Q0, %Q4\n"
337" adc %R0, %R0, %R4\n"
338" strexd %1, %0, %H0, [%3]\n"
339" teq %1, #0\n"
340" bne 1b"
341 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
342 : "r" (&v->counter), "r" (i)
343 : "cc");
344
345 smp_mb();
346
347 return result;
348}
349
350static inline void atomic64_sub(long long i, atomic64_t *v)
351{
352 long long result;
353 unsigned long tmp;
354
355 prefetchw(&v->counter);
356 __asm__ __volatile__("@ atomic64_sub\n"
357"1: ldrexd %0, %H0, [%3]\n"
358" subs %Q0, %Q0, %Q4\n"
359" sbc %R0, %R0, %R4\n"
360" strexd %1, %0, %H0, [%3]\n"
361" teq %1, #0\n"
362" bne 1b"
363 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
364 : "r" (&v->counter), "r" (i)
365 : "cc");
366}
367
368static inline long long atomic64_sub_return(long long i, atomic64_t *v)
369{
370 long long result;
371 unsigned long tmp;
372
373 smp_mb();
374 prefetchw(&v->counter);
375
376 __asm__ __volatile__("@ atomic64_sub_return\n"
377"1: ldrexd %0, %H0, [%3]\n"
378" subs %Q0, %Q0, %Q4\n"
379" sbc %R0, %R0, %R4\n"
380" strexd %1, %0, %H0, [%3]\n"
381" teq %1, #0\n"
382" bne 1b"
383 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
384 : "r" (&v->counter), "r" (i)
385 : "cc");
386
387 smp_mb();
388
389 return result;
390}
391
392static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
393 long long new)
394{
395 long long oldval;
396 unsigned long res;
397
398 smp_mb();
399 prefetchw(&ptr->counter);
400
401 do {
402 __asm__ __volatile__("@ atomic64_cmpxchg\n"
403 "ldrexd %1, %H1, [%3]\n"
404 "mov %0, #0\n"
405 "teq %1, %4\n"
406 "teqeq %H1, %H4\n"
407 "strexdeq %0, %5, %H5, [%3]"
408 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
409 : "r" (&ptr->counter), "r" (old), "r" (new)
410 : "cc");
411 } while (res);
412
413 smp_mb();
414
415 return oldval;
416}
417
418static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
419{
420 long long result;
421 unsigned long tmp;
422
423 smp_mb();
424 prefetchw(&ptr->counter);
425
426 __asm__ __volatile__("@ atomic64_xchg\n"
427"1: ldrexd %0, %H0, [%3]\n"
428" strexd %1, %4, %H4, [%3]\n"
429" teq %1, #0\n"
430" bne 1b"
431 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
432 : "r" (&ptr->counter), "r" (new)
433 : "cc");
434
435 smp_mb();
436
437 return result;
438}
439
440static inline long long atomic64_dec_if_positive(atomic64_t *v)
441{
442 long long result;
443 unsigned long tmp;
444
445 smp_mb();
446 prefetchw(&v->counter);
447
448 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
449"1: ldrexd %0, %H0, [%3]\n"
450" subs %Q0, %Q0, #1\n"
451" sbc %R0, %R0, #0\n"
452" teq %R0, #0\n"
453" bmi 2f\n"
454" strexd %1, %0, %H0, [%3]\n"
455" teq %1, #0\n"
456" bne 1b\n"
457"2:"
458 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
459 : "r" (&v->counter)
460 : "cc");
461
462 smp_mb();
463
464 return result;
465}
466
467static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
468{
469 long long val;
470 unsigned long tmp;
471 int ret = 1;
472
473 smp_mb();
474 prefetchw(&v->counter);
475
476 __asm__ __volatile__("@ atomic64_add_unless\n"
477"1: ldrexd %0, %H0, [%4]\n"
478" teq %0, %5\n"
479" teqeq %H0, %H5\n"
480" moveq %1, #0\n"
481" beq 2f\n"
482" adds %Q0, %Q0, %Q6\n"
483" adc %R0, %R0, %R6\n"
484" strexd %2, %0, %H0, [%4]\n"
485" teq %2, #0\n"
486" bne 1b\n"
487"2:"
488 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
489 : "r" (&v->counter), "r" (u), "r" (a)
490 : "cc");
491
492 if (ret)
493 smp_mb();
494
495 return ret;
496}
497
498#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
499#define atomic64_inc(v) atomic64_add(1LL, (v))
500#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
501#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
502#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
503#define atomic64_dec(v) atomic64_sub(1LL, (v))
504#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
505#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
506#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
507
508#endif /* !CONFIG_GENERIC_ATOMIC64 */
509#endif
510#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 */
8#ifndef __ASM_ARM_ATOMIC_H
9#define __ASM_ARM_ATOMIC_H
10
11#include <linux/compiler.h>
12#include <linux/prefetch.h>
13#include <linux/types.h>
14#include <linux/irqflags.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18#ifdef __KERNEL__
19
20/*
21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
22 * strex/ldrex monitor on some implementations. The reason we can use it for
23 * atomic_set() is the clrex or dummy strex done on every exception return.
24 */
25#define arch_atomic_read(v) READ_ONCE((v)->counter)
26#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
27
28#if __LINUX_ARM_ARCH__ >= 6
29
30/*
31 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
32 * store exclusive to ensure that these are atomic. We may loop
33 * to ensure that the update happens.
34 */
35
36#define ATOMIC_OP(op, c_op, asm_op) \
37static inline void arch_atomic_##op(int i, atomic_t *v) \
38{ \
39 unsigned long tmp; \
40 int result; \
41 \
42 prefetchw(&v->counter); \
43 __asm__ __volatile__("@ atomic_" #op "\n" \
44"1: ldrex %0, [%3]\n" \
45" " #asm_op " %0, %0, %4\n" \
46" strex %1, %0, [%3]\n" \
47" teq %1, #0\n" \
48" bne 1b" \
49 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
50 : "r" (&v->counter), "Ir" (i) \
51 : "cc"); \
52} \
53
54#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
55static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
56{ \
57 unsigned long tmp; \
58 int result; \
59 \
60 prefetchw(&v->counter); \
61 \
62 __asm__ __volatile__("@ atomic_" #op "_return\n" \
63"1: ldrex %0, [%3]\n" \
64" " #asm_op " %0, %0, %4\n" \
65" strex %1, %0, [%3]\n" \
66" teq %1, #0\n" \
67" bne 1b" \
68 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
69 : "r" (&v->counter), "Ir" (i) \
70 : "cc"); \
71 \
72 return result; \
73}
74
75#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
76static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
77{ \
78 unsigned long tmp; \
79 int result, val; \
80 \
81 prefetchw(&v->counter); \
82 \
83 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
84"1: ldrex %0, [%4]\n" \
85" " #asm_op " %1, %0, %5\n" \
86" strex %2, %1, [%4]\n" \
87" teq %2, #0\n" \
88" bne 1b" \
89 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
90 : "r" (&v->counter), "Ir" (i) \
91 : "cc"); \
92 \
93 return result; \
94}
95
96#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
97#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
98#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
99#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
100
101#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
102#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
103#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
104#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
105
106static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107{
108 int oldval;
109 unsigned long res;
110
111 prefetchw(&ptr->counter);
112
113 do {
114 __asm__ __volatile__("@ atomic_cmpxchg\n"
115 "ldrex %1, [%3]\n"
116 "mov %0, #0\n"
117 "teq %1, %4\n"
118 "strexeq %0, %5, [%3]\n"
119 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120 : "r" (&ptr->counter), "Ir" (old), "r" (new)
121 : "cc");
122 } while (res);
123
124 return oldval;
125}
126#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
127
128static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129{
130 int oldval, newval;
131 unsigned long tmp;
132
133 smp_mb();
134 prefetchw(&v->counter);
135
136 __asm__ __volatile__ ("@ atomic_add_unless\n"
137"1: ldrex %0, [%4]\n"
138" teq %0, %5\n"
139" beq 2f\n"
140" add %1, %0, %6\n"
141" strex %2, %1, [%4]\n"
142" teq %2, #0\n"
143" bne 1b\n"
144"2:"
145 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146 : "r" (&v->counter), "r" (u), "r" (a)
147 : "cc");
148
149 if (oldval != u)
150 smp_mb();
151
152 return oldval;
153}
154#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
155
156#else /* ARM_ARCH_6 */
157
158#ifdef CONFIG_SMP
159#error SMP not supported on pre-ARMv6 CPUs
160#endif
161
162#define ATOMIC_OP(op, c_op, asm_op) \
163static inline void arch_atomic_##op(int i, atomic_t *v) \
164{ \
165 unsigned long flags; \
166 \
167 raw_local_irq_save(flags); \
168 v->counter c_op i; \
169 raw_local_irq_restore(flags); \
170} \
171
172#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
173static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
174{ \
175 unsigned long flags; \
176 int val; \
177 \
178 raw_local_irq_save(flags); \
179 v->counter c_op i; \
180 val = v->counter; \
181 raw_local_irq_restore(flags); \
182 \
183 return val; \
184}
185
186#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
187static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
188{ \
189 unsigned long flags; \
190 int val; \
191 \
192 raw_local_irq_save(flags); \
193 val = v->counter; \
194 v->counter c_op i; \
195 raw_local_irq_restore(flags); \
196 \
197 return val; \
198}
199
200static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
201{
202 int ret;
203 unsigned long flags;
204
205 raw_local_irq_save(flags);
206 ret = v->counter;
207 if (likely(ret == old))
208 v->counter = new;
209 raw_local_irq_restore(flags);
210
211 return ret;
212}
213
214#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
215
216#endif /* __LINUX_ARM_ARCH__ */
217
218#define ATOMIC_OPS(op, c_op, asm_op) \
219 ATOMIC_OP(op, c_op, asm_op) \
220 ATOMIC_OP_RETURN(op, c_op, asm_op) \
221 ATOMIC_FETCH_OP(op, c_op, asm_op)
222
223ATOMIC_OPS(add, +=, add)
224ATOMIC_OPS(sub, -=, sub)
225
226#define arch_atomic_andnot arch_atomic_andnot
227
228#undef ATOMIC_OPS
229#define ATOMIC_OPS(op, c_op, asm_op) \
230 ATOMIC_OP(op, c_op, asm_op) \
231 ATOMIC_FETCH_OP(op, c_op, asm_op)
232
233ATOMIC_OPS(and, &=, and)
234ATOMIC_OPS(andnot, &= ~, bic)
235ATOMIC_OPS(or, |=, orr)
236ATOMIC_OPS(xor, ^=, eor)
237
238#undef ATOMIC_OPS
239#undef ATOMIC_FETCH_OP
240#undef ATOMIC_OP_RETURN
241#undef ATOMIC_OP
242
243#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
244
245#ifndef CONFIG_GENERIC_ATOMIC64
246typedef struct {
247 s64 counter;
248} atomic64_t;
249
250#define ATOMIC64_INIT(i) { (i) }
251
252#ifdef CONFIG_ARM_LPAE
253static inline s64 arch_atomic64_read(const atomic64_t *v)
254{
255 s64 result;
256
257 __asm__ __volatile__("@ atomic64_read\n"
258" ldrd %0, %H0, [%1]"
259 : "=&r" (result)
260 : "r" (&v->counter), "Qo" (v->counter)
261 );
262
263 return result;
264}
265
266static inline void arch_atomic64_set(atomic64_t *v, s64 i)
267{
268 __asm__ __volatile__("@ atomic64_set\n"
269" strd %2, %H2, [%1]"
270 : "=Qo" (v->counter)
271 : "r" (&v->counter), "r" (i)
272 );
273}
274#else
275static inline s64 arch_atomic64_read(const atomic64_t *v)
276{
277 s64 result;
278
279 __asm__ __volatile__("@ atomic64_read\n"
280" ldrexd %0, %H0, [%1]"
281 : "=&r" (result)
282 : "r" (&v->counter), "Qo" (v->counter)
283 );
284
285 return result;
286}
287
288static inline void arch_atomic64_set(atomic64_t *v, s64 i)
289{
290 s64 tmp;
291
292 prefetchw(&v->counter);
293 __asm__ __volatile__("@ atomic64_set\n"
294"1: ldrexd %0, %H0, [%2]\n"
295" strexd %0, %3, %H3, [%2]\n"
296" teq %0, #0\n"
297" bne 1b"
298 : "=&r" (tmp), "=Qo" (v->counter)
299 : "r" (&v->counter), "r" (i)
300 : "cc");
301}
302#endif
303
304#define ATOMIC64_OP(op, op1, op2) \
305static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
306{ \
307 s64 result; \
308 unsigned long tmp; \
309 \
310 prefetchw(&v->counter); \
311 __asm__ __volatile__("@ atomic64_" #op "\n" \
312"1: ldrexd %0, %H0, [%3]\n" \
313" " #op1 " %Q0, %Q0, %Q4\n" \
314" " #op2 " %R0, %R0, %R4\n" \
315" strexd %1, %0, %H0, [%3]\n" \
316" teq %1, #0\n" \
317" bne 1b" \
318 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
319 : "r" (&v->counter), "r" (i) \
320 : "cc"); \
321} \
322
323#define ATOMIC64_OP_RETURN(op, op1, op2) \
324static inline s64 \
325arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
326{ \
327 s64 result; \
328 unsigned long tmp; \
329 \
330 prefetchw(&v->counter); \
331 \
332 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
333"1: ldrexd %0, %H0, [%3]\n" \
334" " #op1 " %Q0, %Q0, %Q4\n" \
335" " #op2 " %R0, %R0, %R4\n" \
336" strexd %1, %0, %H0, [%3]\n" \
337" teq %1, #0\n" \
338" bne 1b" \
339 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
340 : "r" (&v->counter), "r" (i) \
341 : "cc"); \
342 \
343 return result; \
344}
345
346#define ATOMIC64_FETCH_OP(op, op1, op2) \
347static inline s64 \
348arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
349{ \
350 s64 result, val; \
351 unsigned long tmp; \
352 \
353 prefetchw(&v->counter); \
354 \
355 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
356"1: ldrexd %0, %H0, [%4]\n" \
357" " #op1 " %Q1, %Q0, %Q5\n" \
358" " #op2 " %R1, %R0, %R5\n" \
359" strexd %2, %1, %H1, [%4]\n" \
360" teq %2, #0\n" \
361" bne 1b" \
362 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
363 : "r" (&v->counter), "r" (i) \
364 : "cc"); \
365 \
366 return result; \
367}
368
369#define ATOMIC64_OPS(op, op1, op2) \
370 ATOMIC64_OP(op, op1, op2) \
371 ATOMIC64_OP_RETURN(op, op1, op2) \
372 ATOMIC64_FETCH_OP(op, op1, op2)
373
374ATOMIC64_OPS(add, adds, adc)
375ATOMIC64_OPS(sub, subs, sbc)
376
377#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
378#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
379#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
380#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
381
382#undef ATOMIC64_OPS
383#define ATOMIC64_OPS(op, op1, op2) \
384 ATOMIC64_OP(op, op1, op2) \
385 ATOMIC64_FETCH_OP(op, op1, op2)
386
387#define arch_atomic64_andnot arch_atomic64_andnot
388
389ATOMIC64_OPS(and, and, and)
390ATOMIC64_OPS(andnot, bic, bic)
391ATOMIC64_OPS(or, orr, orr)
392ATOMIC64_OPS(xor, eor, eor)
393
394#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
395#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
396#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
397#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
398
399#undef ATOMIC64_OPS
400#undef ATOMIC64_FETCH_OP
401#undef ATOMIC64_OP_RETURN
402#undef ATOMIC64_OP
403
404static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
405{
406 s64 oldval;
407 unsigned long res;
408
409 prefetchw(&ptr->counter);
410
411 do {
412 __asm__ __volatile__("@ atomic64_cmpxchg\n"
413 "ldrexd %1, %H1, [%3]\n"
414 "mov %0, #0\n"
415 "teq %1, %4\n"
416 "teqeq %H1, %H4\n"
417 "strexdeq %0, %5, %H5, [%3]"
418 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
419 : "r" (&ptr->counter), "r" (old), "r" (new)
420 : "cc");
421 } while (res);
422
423 return oldval;
424}
425#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
426
427static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
428{
429 s64 result;
430 unsigned long tmp;
431
432 prefetchw(&ptr->counter);
433
434 __asm__ __volatile__("@ atomic64_xchg\n"
435"1: ldrexd %0, %H0, [%3]\n"
436" strexd %1, %4, %H4, [%3]\n"
437" teq %1, #0\n"
438" bne 1b"
439 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
440 : "r" (&ptr->counter), "r" (new)
441 : "cc");
442
443 return result;
444}
445#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
446
447static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
448{
449 s64 result;
450 unsigned long tmp;
451
452 smp_mb();
453 prefetchw(&v->counter);
454
455 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
456"1: ldrexd %0, %H0, [%3]\n"
457" subs %Q0, %Q0, #1\n"
458" sbc %R0, %R0, #0\n"
459" teq %R0, #0\n"
460" bmi 2f\n"
461" strexd %1, %0, %H0, [%3]\n"
462" teq %1, #0\n"
463" bne 1b\n"
464"2:"
465 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
466 : "r" (&v->counter)
467 : "cc");
468
469 smp_mb();
470
471 return result;
472}
473#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
474
475static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
476{
477 s64 oldval, newval;
478 unsigned long tmp;
479
480 smp_mb();
481 prefetchw(&v->counter);
482
483 __asm__ __volatile__("@ atomic64_add_unless\n"
484"1: ldrexd %0, %H0, [%4]\n"
485" teq %0, %5\n"
486" teqeq %H0, %H5\n"
487" beq 2f\n"
488" adds %Q1, %Q0, %Q6\n"
489" adc %R1, %R0, %R6\n"
490" strexd %2, %1, %H1, [%4]\n"
491" teq %2, #0\n"
492" bne 1b\n"
493"2:"
494 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
495 : "r" (&v->counter), "r" (u), "r" (a)
496 : "cc");
497
498 if (oldval != u)
499 smp_mb();
500
501 return oldval;
502}
503#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
504
505#endif /* !CONFIG_GENERIC_ATOMIC64 */
506#endif
507#endif