Loading...
1/*
2 * arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#ifndef __ASM_ARM_ATOMIC_H
12#define __ASM_ARM_ATOMIC_H
13
14#include <linux/compiler.h>
15#include <linux/types.h>
16#include <asm/system.h>
17
18#define ATOMIC_INIT(i) { (i) }
19
20#ifdef __KERNEL__
21
22/*
23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
24 * strex/ldrex monitor on some implementations. The reason we can use it for
25 * atomic_set() is the clrex or dummy strex done on every exception return.
26 */
27#define atomic_read(v) (*(volatile int *)&(v)->counter)
28#define atomic_set(v,i) (((v)->counter) = (i))
29
30#if __LINUX_ARM_ARCH__ >= 6
31
32/*
33 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
34 * store exclusive to ensure that these are atomic. We may loop
35 * to ensure that the update happens.
36 */
37static inline void atomic_add(int i, atomic_t *v)
38{
39 unsigned long tmp;
40 int result;
41
42 __asm__ __volatile__("@ atomic_add\n"
43"1: ldrex %0, [%3]\n"
44" add %0, %0, %4\n"
45" strex %1, %0, [%3]\n"
46" teq %1, #0\n"
47" bne 1b"
48 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
49 : "r" (&v->counter), "Ir" (i)
50 : "cc");
51}
52
53static inline int atomic_add_return(int i, atomic_t *v)
54{
55 unsigned long tmp;
56 int result;
57
58 smp_mb();
59
60 __asm__ __volatile__("@ atomic_add_return\n"
61"1: ldrex %0, [%3]\n"
62" add %0, %0, %4\n"
63" strex %1, %0, [%3]\n"
64" teq %1, #0\n"
65" bne 1b"
66 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
67 : "r" (&v->counter), "Ir" (i)
68 : "cc");
69
70 smp_mb();
71
72 return result;
73}
74
75static inline void atomic_sub(int i, atomic_t *v)
76{
77 unsigned long tmp;
78 int result;
79
80 __asm__ __volatile__("@ atomic_sub\n"
81"1: ldrex %0, [%3]\n"
82" sub %0, %0, %4\n"
83" strex %1, %0, [%3]\n"
84" teq %1, #0\n"
85" bne 1b"
86 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
87 : "r" (&v->counter), "Ir" (i)
88 : "cc");
89}
90
91static inline int atomic_sub_return(int i, atomic_t *v)
92{
93 unsigned long tmp;
94 int result;
95
96 smp_mb();
97
98 __asm__ __volatile__("@ atomic_sub_return\n"
99"1: ldrex %0, [%3]\n"
100" sub %0, %0, %4\n"
101" strex %1, %0, [%3]\n"
102" teq %1, #0\n"
103" bne 1b"
104 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105 : "r" (&v->counter), "Ir" (i)
106 : "cc");
107
108 smp_mb();
109
110 return result;
111}
112
113static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114{
115 unsigned long oldval, res;
116
117 smp_mb();
118
119 do {
120 __asm__ __volatile__("@ atomic_cmpxchg\n"
121 "ldrex %1, [%3]\n"
122 "mov %0, #0\n"
123 "teq %1, %4\n"
124 "strexeq %0, %5, [%3]\n"
125 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126 : "r" (&ptr->counter), "Ir" (old), "r" (new)
127 : "cc");
128 } while (res);
129
130 smp_mb();
131
132 return oldval;
133}
134
135static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136{
137 unsigned long tmp, tmp2;
138
139 __asm__ __volatile__("@ atomic_clear_mask\n"
140"1: ldrex %0, [%3]\n"
141" bic %0, %0, %4\n"
142" strex %1, %0, [%3]\n"
143" teq %1, #0\n"
144" bne 1b"
145 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146 : "r" (addr), "Ir" (mask)
147 : "cc");
148}
149
150#else /* ARM_ARCH_6 */
151
152#ifdef CONFIG_SMP
153#error SMP not supported on pre-ARMv6 CPUs
154#endif
155
156static inline int atomic_add_return(int i, atomic_t *v)
157{
158 unsigned long flags;
159 int val;
160
161 raw_local_irq_save(flags);
162 val = v->counter;
163 v->counter = val += i;
164 raw_local_irq_restore(flags);
165
166 return val;
167}
168#define atomic_add(i, v) (void) atomic_add_return(i, v)
169
170static inline int atomic_sub_return(int i, atomic_t *v)
171{
172 unsigned long flags;
173 int val;
174
175 raw_local_irq_save(flags);
176 val = v->counter;
177 v->counter = val -= i;
178 raw_local_irq_restore(flags);
179
180 return val;
181}
182#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
183
184static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186 int ret;
187 unsigned long flags;
188
189 raw_local_irq_save(flags);
190 ret = v->counter;
191 if (likely(ret == old))
192 v->counter = new;
193 raw_local_irq_restore(flags);
194
195 return ret;
196}
197
198static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199{
200 unsigned long flags;
201
202 raw_local_irq_save(flags);
203 *addr &= ~mask;
204 raw_local_irq_restore(flags);
205}
206
207#endif /* __LINUX_ARM_ARCH__ */
208
209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
211static inline int __atomic_add_unless(atomic_t *v, int a, int u)
212{
213 int c, old;
214
215 c = atomic_read(v);
216 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217 c = old;
218 return c;
219}
220
221#define atomic_inc(v) atomic_add(1, v)
222#define atomic_dec(v) atomic_sub(1, v)
223
224#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
225#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
226#define atomic_inc_return(v) (atomic_add_return(1, v))
227#define atomic_dec_return(v) (atomic_sub_return(1, v))
228#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
229
230#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
231
232#define smp_mb__before_atomic_dec() smp_mb()
233#define smp_mb__after_atomic_dec() smp_mb()
234#define smp_mb__before_atomic_inc() smp_mb()
235#define smp_mb__after_atomic_inc() smp_mb()
236
237#ifndef CONFIG_GENERIC_ATOMIC64
238typedef struct {
239 u64 __aligned(8) counter;
240} atomic64_t;
241
242#define ATOMIC64_INIT(i) { (i) }
243
244static inline u64 atomic64_read(atomic64_t *v)
245{
246 u64 result;
247
248 __asm__ __volatile__("@ atomic64_read\n"
249" ldrexd %0, %H0, [%1]"
250 : "=&r" (result)
251 : "r" (&v->counter), "Qo" (v->counter)
252 );
253
254 return result;
255}
256
257static inline void atomic64_set(atomic64_t *v, u64 i)
258{
259 u64 tmp;
260
261 __asm__ __volatile__("@ atomic64_set\n"
262"1: ldrexd %0, %H0, [%2]\n"
263" strexd %0, %3, %H3, [%2]\n"
264" teq %0, #0\n"
265" bne 1b"
266 : "=&r" (tmp), "=Qo" (v->counter)
267 : "r" (&v->counter), "r" (i)
268 : "cc");
269}
270
271static inline void atomic64_add(u64 i, atomic64_t *v)
272{
273 u64 result;
274 unsigned long tmp;
275
276 __asm__ __volatile__("@ atomic64_add\n"
277"1: ldrexd %0, %H0, [%3]\n"
278" adds %0, %0, %4\n"
279" adc %H0, %H0, %H4\n"
280" strexd %1, %0, %H0, [%3]\n"
281" teq %1, #0\n"
282" bne 1b"
283 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
284 : "r" (&v->counter), "r" (i)
285 : "cc");
286}
287
288static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
289{
290 u64 result;
291 unsigned long tmp;
292
293 smp_mb();
294
295 __asm__ __volatile__("@ atomic64_add_return\n"
296"1: ldrexd %0, %H0, [%3]\n"
297" adds %0, %0, %4\n"
298" adc %H0, %H0, %H4\n"
299" strexd %1, %0, %H0, [%3]\n"
300" teq %1, #0\n"
301" bne 1b"
302 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
303 : "r" (&v->counter), "r" (i)
304 : "cc");
305
306 smp_mb();
307
308 return result;
309}
310
311static inline void atomic64_sub(u64 i, atomic64_t *v)
312{
313 u64 result;
314 unsigned long tmp;
315
316 __asm__ __volatile__("@ atomic64_sub\n"
317"1: ldrexd %0, %H0, [%3]\n"
318" subs %0, %0, %4\n"
319" sbc %H0, %H0, %H4\n"
320" strexd %1, %0, %H0, [%3]\n"
321" teq %1, #0\n"
322" bne 1b"
323 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
324 : "r" (&v->counter), "r" (i)
325 : "cc");
326}
327
328static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
329{
330 u64 result;
331 unsigned long tmp;
332
333 smp_mb();
334
335 __asm__ __volatile__("@ atomic64_sub_return\n"
336"1: ldrexd %0, %H0, [%3]\n"
337" subs %0, %0, %4\n"
338" sbc %H0, %H0, %H4\n"
339" strexd %1, %0, %H0, [%3]\n"
340" teq %1, #0\n"
341" bne 1b"
342 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
343 : "r" (&v->counter), "r" (i)
344 : "cc");
345
346 smp_mb();
347
348 return result;
349}
350
351static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
352{
353 u64 oldval;
354 unsigned long res;
355
356 smp_mb();
357
358 do {
359 __asm__ __volatile__("@ atomic64_cmpxchg\n"
360 "ldrexd %1, %H1, [%3]\n"
361 "mov %0, #0\n"
362 "teq %1, %4\n"
363 "teqeq %H1, %H4\n"
364 "strexdeq %0, %5, %H5, [%3]"
365 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
366 : "r" (&ptr->counter), "r" (old), "r" (new)
367 : "cc");
368 } while (res);
369
370 smp_mb();
371
372 return oldval;
373}
374
375static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
376{
377 u64 result;
378 unsigned long tmp;
379
380 smp_mb();
381
382 __asm__ __volatile__("@ atomic64_xchg\n"
383"1: ldrexd %0, %H0, [%3]\n"
384" strexd %1, %4, %H4, [%3]\n"
385" teq %1, #0\n"
386" bne 1b"
387 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
388 : "r" (&ptr->counter), "r" (new)
389 : "cc");
390
391 smp_mb();
392
393 return result;
394}
395
396static inline u64 atomic64_dec_if_positive(atomic64_t *v)
397{
398 u64 result;
399 unsigned long tmp;
400
401 smp_mb();
402
403 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
404"1: ldrexd %0, %H0, [%3]\n"
405" subs %0, %0, #1\n"
406" sbc %H0, %H0, #0\n"
407" teq %H0, #0\n"
408" bmi 2f\n"
409" strexd %1, %0, %H0, [%3]\n"
410" teq %1, #0\n"
411" bne 1b\n"
412"2:"
413 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
414 : "r" (&v->counter)
415 : "cc");
416
417 smp_mb();
418
419 return result;
420}
421
422static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
423{
424 u64 val;
425 unsigned long tmp;
426 int ret = 1;
427
428 smp_mb();
429
430 __asm__ __volatile__("@ atomic64_add_unless\n"
431"1: ldrexd %0, %H0, [%4]\n"
432" teq %0, %5\n"
433" teqeq %H0, %H5\n"
434" moveq %1, #0\n"
435" beq 2f\n"
436" adds %0, %0, %6\n"
437" adc %H0, %H0, %H6\n"
438" strexd %2, %0, %H0, [%4]\n"
439" teq %2, #0\n"
440" bne 1b\n"
441"2:"
442 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
443 : "r" (&v->counter), "r" (u), "r" (a)
444 : "cc");
445
446 if (ret)
447 smp_mb();
448
449 return ret;
450}
451
452#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
453#define atomic64_inc(v) atomic64_add(1LL, (v))
454#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
455#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
456#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
457#define atomic64_dec(v) atomic64_sub(1LL, (v))
458#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
459#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
460#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
461
462#endif /* !CONFIG_GENERIC_ATOMIC64 */
463#endif
464#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/atomic.h
4 *
5 * Copyright (C) 1996 Russell King.
6 * Copyright (C) 2002 Deep Blue Solutions Ltd.
7 */
8#ifndef __ASM_ARM_ATOMIC_H
9#define __ASM_ARM_ATOMIC_H
10
11#include <linux/compiler.h>
12#include <linux/prefetch.h>
13#include <linux/types.h>
14#include <linux/irqflags.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18#ifdef __KERNEL__
19
20/*
21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
22 * strex/ldrex monitor on some implementations. The reason we can use it for
23 * atomic_set() is the clrex or dummy strex done on every exception return.
24 */
25#define arch_atomic_read(v) READ_ONCE((v)->counter)
26#define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
27
28#if __LINUX_ARM_ARCH__ >= 6
29
30/*
31 * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
32 * store exclusive to ensure that these are atomic. We may loop
33 * to ensure that the update happens.
34 */
35
36#define ATOMIC_OP(op, c_op, asm_op) \
37static inline void arch_atomic_##op(int i, atomic_t *v) \
38{ \
39 unsigned long tmp; \
40 int result; \
41 \
42 prefetchw(&v->counter); \
43 __asm__ __volatile__("@ atomic_" #op "\n" \
44"1: ldrex %0, [%3]\n" \
45" " #asm_op " %0, %0, %4\n" \
46" strex %1, %0, [%3]\n" \
47" teq %1, #0\n" \
48" bne 1b" \
49 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
50 : "r" (&v->counter), "Ir" (i) \
51 : "cc"); \
52} \
53
54#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
55static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
56{ \
57 unsigned long tmp; \
58 int result; \
59 \
60 prefetchw(&v->counter); \
61 \
62 __asm__ __volatile__("@ atomic_" #op "_return\n" \
63"1: ldrex %0, [%3]\n" \
64" " #asm_op " %0, %0, %4\n" \
65" strex %1, %0, [%3]\n" \
66" teq %1, #0\n" \
67" bne 1b" \
68 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
69 : "r" (&v->counter), "Ir" (i) \
70 : "cc"); \
71 \
72 return result; \
73}
74
75#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
76static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
77{ \
78 unsigned long tmp; \
79 int result, val; \
80 \
81 prefetchw(&v->counter); \
82 \
83 __asm__ __volatile__("@ atomic_fetch_" #op "\n" \
84"1: ldrex %0, [%4]\n" \
85" " #asm_op " %1, %0, %5\n" \
86" strex %2, %1, [%4]\n" \
87" teq %2, #0\n" \
88" bne 1b" \
89 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
90 : "r" (&v->counter), "Ir" (i) \
91 : "cc"); \
92 \
93 return result; \
94}
95
96#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
97#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
98#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
99#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
100
101#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
102#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
103#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
104#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
105
106static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107{
108 int oldval;
109 unsigned long res;
110
111 prefetchw(&ptr->counter);
112
113 do {
114 __asm__ __volatile__("@ atomic_cmpxchg\n"
115 "ldrex %1, [%3]\n"
116 "mov %0, #0\n"
117 "teq %1, %4\n"
118 "strexeq %0, %5, [%3]\n"
119 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120 : "r" (&ptr->counter), "Ir" (old), "r" (new)
121 : "cc");
122 } while (res);
123
124 return oldval;
125}
126#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
127
128static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129{
130 int oldval, newval;
131 unsigned long tmp;
132
133 smp_mb();
134 prefetchw(&v->counter);
135
136 __asm__ __volatile__ ("@ atomic_add_unless\n"
137"1: ldrex %0, [%4]\n"
138" teq %0, %5\n"
139" beq 2f\n"
140" add %1, %0, %6\n"
141" strex %2, %1, [%4]\n"
142" teq %2, #0\n"
143" bne 1b\n"
144"2:"
145 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146 : "r" (&v->counter), "r" (u), "r" (a)
147 : "cc");
148
149 if (oldval != u)
150 smp_mb();
151
152 return oldval;
153}
154#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
155
156#else /* ARM_ARCH_6 */
157
158#ifdef CONFIG_SMP
159#error SMP not supported on pre-ARMv6 CPUs
160#endif
161
162#define ATOMIC_OP(op, c_op, asm_op) \
163static inline void arch_atomic_##op(int i, atomic_t *v) \
164{ \
165 unsigned long flags; \
166 \
167 raw_local_irq_save(flags); \
168 v->counter c_op i; \
169 raw_local_irq_restore(flags); \
170} \
171
172#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
173static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
174{ \
175 unsigned long flags; \
176 int val; \
177 \
178 raw_local_irq_save(flags); \
179 v->counter c_op i; \
180 val = v->counter; \
181 raw_local_irq_restore(flags); \
182 \
183 return val; \
184}
185
186#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
187static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
188{ \
189 unsigned long flags; \
190 int val; \
191 \
192 raw_local_irq_save(flags); \
193 val = v->counter; \
194 v->counter c_op i; \
195 raw_local_irq_restore(flags); \
196 \
197 return val; \
198}
199
200#define arch_atomic_add_return arch_atomic_add_return
201#define arch_atomic_sub_return arch_atomic_sub_return
202#define arch_atomic_fetch_add arch_atomic_fetch_add
203#define arch_atomic_fetch_sub arch_atomic_fetch_sub
204
205#define arch_atomic_fetch_and arch_atomic_fetch_and
206#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
207#define arch_atomic_fetch_or arch_atomic_fetch_or
208#define arch_atomic_fetch_xor arch_atomic_fetch_xor
209
210static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
211{
212 int ret;
213 unsigned long flags;
214
215 raw_local_irq_save(flags);
216 ret = v->counter;
217 if (likely(ret == old))
218 v->counter = new;
219 raw_local_irq_restore(flags);
220
221 return ret;
222}
223#define arch_atomic_cmpxchg arch_atomic_cmpxchg
224
225#endif /* __LINUX_ARM_ARCH__ */
226
227#define ATOMIC_OPS(op, c_op, asm_op) \
228 ATOMIC_OP(op, c_op, asm_op) \
229 ATOMIC_OP_RETURN(op, c_op, asm_op) \
230 ATOMIC_FETCH_OP(op, c_op, asm_op)
231
232ATOMIC_OPS(add, +=, add)
233ATOMIC_OPS(sub, -=, sub)
234
235#define arch_atomic_andnot arch_atomic_andnot
236
237#undef ATOMIC_OPS
238#define ATOMIC_OPS(op, c_op, asm_op) \
239 ATOMIC_OP(op, c_op, asm_op) \
240 ATOMIC_FETCH_OP(op, c_op, asm_op)
241
242ATOMIC_OPS(and, &=, and)
243ATOMIC_OPS(andnot, &= ~, bic)
244ATOMIC_OPS(or, |=, orr)
245ATOMIC_OPS(xor, ^=, eor)
246
247#undef ATOMIC_OPS
248#undef ATOMIC_FETCH_OP
249#undef ATOMIC_OP_RETURN
250#undef ATOMIC_OP
251
252#ifndef CONFIG_GENERIC_ATOMIC64
253typedef struct {
254 s64 counter;
255} atomic64_t;
256
257#define ATOMIC64_INIT(i) { (i) }
258
259#ifdef CONFIG_ARM_LPAE
260static inline s64 arch_atomic64_read(const atomic64_t *v)
261{
262 s64 result;
263
264 __asm__ __volatile__("@ atomic64_read\n"
265" ldrd %0, %H0, [%1]"
266 : "=&r" (result)
267 : "r" (&v->counter), "Qo" (v->counter)
268 );
269
270 return result;
271}
272
273static inline void arch_atomic64_set(atomic64_t *v, s64 i)
274{
275 __asm__ __volatile__("@ atomic64_set\n"
276" strd %2, %H2, [%1]"
277 : "=Qo" (v->counter)
278 : "r" (&v->counter), "r" (i)
279 );
280}
281#else
282static inline s64 arch_atomic64_read(const atomic64_t *v)
283{
284 s64 result;
285
286 __asm__ __volatile__("@ atomic64_read\n"
287" ldrexd %0, %H0, [%1]"
288 : "=&r" (result)
289 : "r" (&v->counter), "Qo" (v->counter)
290 );
291
292 return result;
293}
294
295static inline void arch_atomic64_set(atomic64_t *v, s64 i)
296{
297 s64 tmp;
298
299 prefetchw(&v->counter);
300 __asm__ __volatile__("@ atomic64_set\n"
301"1: ldrexd %0, %H0, [%2]\n"
302" strexd %0, %3, %H3, [%2]\n"
303" teq %0, #0\n"
304" bne 1b"
305 : "=&r" (tmp), "=Qo" (v->counter)
306 : "r" (&v->counter), "r" (i)
307 : "cc");
308}
309#endif
310
311#define ATOMIC64_OP(op, op1, op2) \
312static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
313{ \
314 s64 result; \
315 unsigned long tmp; \
316 \
317 prefetchw(&v->counter); \
318 __asm__ __volatile__("@ atomic64_" #op "\n" \
319"1: ldrexd %0, %H0, [%3]\n" \
320" " #op1 " %Q0, %Q0, %Q4\n" \
321" " #op2 " %R0, %R0, %R4\n" \
322" strexd %1, %0, %H0, [%3]\n" \
323" teq %1, #0\n" \
324" bne 1b" \
325 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
326 : "r" (&v->counter), "r" (i) \
327 : "cc"); \
328} \
329
330#define ATOMIC64_OP_RETURN(op, op1, op2) \
331static inline s64 \
332arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
333{ \
334 s64 result; \
335 unsigned long tmp; \
336 \
337 prefetchw(&v->counter); \
338 \
339 __asm__ __volatile__("@ atomic64_" #op "_return\n" \
340"1: ldrexd %0, %H0, [%3]\n" \
341" " #op1 " %Q0, %Q0, %Q4\n" \
342" " #op2 " %R0, %R0, %R4\n" \
343" strexd %1, %0, %H0, [%3]\n" \
344" teq %1, #0\n" \
345" bne 1b" \
346 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter) \
347 : "r" (&v->counter), "r" (i) \
348 : "cc"); \
349 \
350 return result; \
351}
352
353#define ATOMIC64_FETCH_OP(op, op1, op2) \
354static inline s64 \
355arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
356{ \
357 s64 result, val; \
358 unsigned long tmp; \
359 \
360 prefetchw(&v->counter); \
361 \
362 __asm__ __volatile__("@ atomic64_fetch_" #op "\n" \
363"1: ldrexd %0, %H0, [%4]\n" \
364" " #op1 " %Q1, %Q0, %Q5\n" \
365" " #op2 " %R1, %R0, %R5\n" \
366" strexd %2, %1, %H1, [%4]\n" \
367" teq %2, #0\n" \
368" bne 1b" \
369 : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter) \
370 : "r" (&v->counter), "r" (i) \
371 : "cc"); \
372 \
373 return result; \
374}
375
376#define ATOMIC64_OPS(op, op1, op2) \
377 ATOMIC64_OP(op, op1, op2) \
378 ATOMIC64_OP_RETURN(op, op1, op2) \
379 ATOMIC64_FETCH_OP(op, op1, op2)
380
381ATOMIC64_OPS(add, adds, adc)
382ATOMIC64_OPS(sub, subs, sbc)
383
384#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
385#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
386#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
387#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
388
389#undef ATOMIC64_OPS
390#define ATOMIC64_OPS(op, op1, op2) \
391 ATOMIC64_OP(op, op1, op2) \
392 ATOMIC64_FETCH_OP(op, op1, op2)
393
394#define arch_atomic64_andnot arch_atomic64_andnot
395
396ATOMIC64_OPS(and, and, and)
397ATOMIC64_OPS(andnot, bic, bic)
398ATOMIC64_OPS(or, orr, orr)
399ATOMIC64_OPS(xor, eor, eor)
400
401#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
402#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
403#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
404#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
405
406#undef ATOMIC64_OPS
407#undef ATOMIC64_FETCH_OP
408#undef ATOMIC64_OP_RETURN
409#undef ATOMIC64_OP
410
411static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
412{
413 s64 oldval;
414 unsigned long res;
415
416 prefetchw(&ptr->counter);
417
418 do {
419 __asm__ __volatile__("@ atomic64_cmpxchg\n"
420 "ldrexd %1, %H1, [%3]\n"
421 "mov %0, #0\n"
422 "teq %1, %4\n"
423 "teqeq %H1, %H4\n"
424 "strexdeq %0, %5, %H5, [%3]"
425 : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
426 : "r" (&ptr->counter), "r" (old), "r" (new)
427 : "cc");
428 } while (res);
429
430 return oldval;
431}
432#define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
433
434static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
435{
436 s64 result;
437 unsigned long tmp;
438
439 prefetchw(&ptr->counter);
440
441 __asm__ __volatile__("@ atomic64_xchg\n"
442"1: ldrexd %0, %H0, [%3]\n"
443" strexd %1, %4, %H4, [%3]\n"
444" teq %1, #0\n"
445" bne 1b"
446 : "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
447 : "r" (&ptr->counter), "r" (new)
448 : "cc");
449
450 return result;
451}
452#define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
453
454static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
455{
456 s64 result;
457 unsigned long tmp;
458
459 smp_mb();
460 prefetchw(&v->counter);
461
462 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
463"1: ldrexd %0, %H0, [%3]\n"
464" subs %Q0, %Q0, #1\n"
465" sbc %R0, %R0, #0\n"
466" teq %R0, #0\n"
467" bmi 2f\n"
468" strexd %1, %0, %H0, [%3]\n"
469" teq %1, #0\n"
470" bne 1b\n"
471"2:"
472 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
473 : "r" (&v->counter)
474 : "cc");
475
476 smp_mb();
477
478 return result;
479}
480#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
481
482static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
483{
484 s64 oldval, newval;
485 unsigned long tmp;
486
487 smp_mb();
488 prefetchw(&v->counter);
489
490 __asm__ __volatile__("@ atomic64_add_unless\n"
491"1: ldrexd %0, %H0, [%4]\n"
492" teq %0, %5\n"
493" teqeq %H0, %H5\n"
494" beq 2f\n"
495" adds %Q1, %Q0, %Q6\n"
496" adc %R1, %R0, %R6\n"
497" strexd %2, %1, %H1, [%4]\n"
498" teq %2, #0\n"
499" bne 1b\n"
500"2:"
501 : "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
502 : "r" (&v->counter), "r" (u), "r" (a)
503 : "cc");
504
505 if (oldval != u)
506 smp_mb();
507
508 return oldval;
509}
510#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
511
512#endif /* !CONFIG_GENERIC_ATOMIC64 */
513#endif
514#endif