Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/atomic.h
  4 *
  5 *  Copyright (C) 1996 Russell King.
  6 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  7 */
  8#ifndef __ASM_ARM_ATOMIC_H
  9#define __ASM_ARM_ATOMIC_H
 10
 11#include <linux/compiler.h>
 12#include <linux/prefetch.h>
 13#include <linux/types.h>
 14#include <linux/irqflags.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 18#define ATOMIC_INIT(i)	{ (i) }
 19
 20#ifdef __KERNEL__
 21
 22/*
 23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 24 * strex/ldrex monitor on some implementations. The reason we can use it for
 25 * atomic_set() is the clrex or dummy strex done on every exception return.
 26 */
 27#define atomic_read(v)	READ_ONCE((v)->counter)
 28#define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 29
 30#if __LINUX_ARM_ARCH__ >= 6
 31
 32/*
 33 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 34 * store exclusive to ensure that these are atomic.  We may loop
 35 * to ensure that the update happens.
 36 */
 37
 38#define ATOMIC_OP(op, c_op, asm_op)					\
 39static inline void atomic_##op(int i, atomic_t *v)			\
 40{									\
 41	unsigned long tmp;						\
 42	int result;							\
 43									\
 44	prefetchw(&v->counter);						\
 45	__asm__ __volatile__("@ atomic_" #op "\n"			\
 46"1:	ldrex	%0, [%3]\n"						\
 47"	" #asm_op "	%0, %0, %4\n"					\
 48"	strex	%1, %0, [%3]\n"						\
 49"	teq	%1, #0\n"						\
 50"	bne	1b"							\
 51	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 52	: "r" (&v->counter), "Ir" (i)					\
 53	: "cc");							\
 54}									\
 55
 56#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 57static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 58{									\
 59	unsigned long tmp;						\
 60	int result;							\
 61									\
 62	prefetchw(&v->counter);						\
 63									\
 64	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 65"1:	ldrex	%0, [%3]\n"						\
 66"	" #asm_op "	%0, %0, %4\n"					\
 67"	strex	%1, %0, [%3]\n"						\
 68"	teq	%1, #0\n"						\
 69"	bne	1b"							\
 70	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 71	: "r" (&v->counter), "Ir" (i)					\
 72	: "cc");							\
 73									\
 74	return result;							\
 75}
 76
 77#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 78static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 79{									\
 80	unsigned long tmp;						\
 81	int result, val;						\
 82									\
 83	prefetchw(&v->counter);						\
 84									\
 85	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
 86"1:	ldrex	%0, [%4]\n"						\
 87"	" #asm_op "	%1, %0, %5\n"					\
 88"	strex	%2, %1, [%4]\n"						\
 89"	teq	%2, #0\n"						\
 90"	bne	1b"							\
 91	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
 92	: "r" (&v->counter), "Ir" (i)					\
 93	: "cc");							\
 94									\
 95	return result;							\
 96}
 97
 98#define atomic_add_return_relaxed	atomic_add_return_relaxed
 99#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
100#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
101#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
102
103#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
104#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
105#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
106#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
107
108static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
109{
110	int oldval;
111	unsigned long res;
112
113	prefetchw(&ptr->counter);
114
115	do {
116		__asm__ __volatile__("@ atomic_cmpxchg\n"
117		"ldrex	%1, [%3]\n"
118		"mov	%0, #0\n"
119		"teq	%1, %4\n"
120		"strexeq %0, %5, [%3]\n"
121		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
122		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
123		    : "cc");
124	} while (res);
125
126	return oldval;
127}
128#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
129
130static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
131{
132	int oldval, newval;
133	unsigned long tmp;
134
135	smp_mb();
136	prefetchw(&v->counter);
137
138	__asm__ __volatile__ ("@ atomic_add_unless\n"
139"1:	ldrex	%0, [%4]\n"
140"	teq	%0, %5\n"
141"	beq	2f\n"
142"	add	%1, %0, %6\n"
143"	strex	%2, %1, [%4]\n"
144"	teq	%2, #0\n"
145"	bne	1b\n"
146"2:"
147	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
148	: "r" (&v->counter), "r" (u), "r" (a)
149	: "cc");
150
151	if (oldval != u)
152		smp_mb();
153
154	return oldval;
155}
156#define atomic_fetch_add_unless		atomic_fetch_add_unless
157
158#else /* ARM_ARCH_6 */
159
160#ifdef CONFIG_SMP
161#error SMP not supported on pre-ARMv6 CPUs
162#endif
163
164#define ATOMIC_OP(op, c_op, asm_op)					\
165static inline void atomic_##op(int i, atomic_t *v)			\
166{									\
167	unsigned long flags;						\
168									\
169	raw_local_irq_save(flags);					\
170	v->counter c_op i;						\
171	raw_local_irq_restore(flags);					\
172}									\
173
174#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
175static inline int atomic_##op##_return(int i, atomic_t *v)		\
176{									\
177	unsigned long flags;						\
178	int val;							\
179									\
180	raw_local_irq_save(flags);					\
181	v->counter c_op i;						\
182	val = v->counter;						\
183	raw_local_irq_restore(flags);					\
184									\
185	return val;							\
186}
187
188#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
189static inline int atomic_fetch_##op(int i, atomic_t *v)			\
190{									\
191	unsigned long flags;						\
192	int val;							\
193									\
194	raw_local_irq_save(flags);					\
195	val = v->counter;						\
196	v->counter c_op i;						\
197	raw_local_irq_restore(flags);					\
198									\
199	return val;							\
200}
201
202static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 
 
 
 
 
 
 
 
 
 
203{
204	int ret;
205	unsigned long flags;
206
207	raw_local_irq_save(flags);
208	ret = v->counter;
209	if (likely(ret == old))
210		v->counter = new;
211	raw_local_irq_restore(flags);
212
213	return ret;
214}
215
216#define atomic_fetch_andnot		atomic_fetch_andnot
217
218#endif /* __LINUX_ARM_ARCH__ */
219
220#define ATOMIC_OPS(op, c_op, asm_op)					\
221	ATOMIC_OP(op, c_op, asm_op)					\
222	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
223	ATOMIC_FETCH_OP(op, c_op, asm_op)
224
225ATOMIC_OPS(add, +=, add)
226ATOMIC_OPS(sub, -=, sub)
227
228#define atomic_andnot atomic_andnot
229
230#undef ATOMIC_OPS
231#define ATOMIC_OPS(op, c_op, asm_op)					\
232	ATOMIC_OP(op, c_op, asm_op)					\
233	ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235ATOMIC_OPS(and, &=, and)
236ATOMIC_OPS(andnot, &= ~, bic)
237ATOMIC_OPS(or,  |=, orr)
238ATOMIC_OPS(xor, ^=, eor)
239
240#undef ATOMIC_OPS
241#undef ATOMIC_FETCH_OP
242#undef ATOMIC_OP_RETURN
243#undef ATOMIC_OP
244
245#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
246
247#ifndef CONFIG_GENERIC_ATOMIC64
248typedef struct {
249	s64 counter;
250} atomic64_t;
251
252#define ATOMIC64_INIT(i) { (i) }
253
254#ifdef CONFIG_ARM_LPAE
255static inline s64 atomic64_read(const atomic64_t *v)
256{
257	s64 result;
258
259	__asm__ __volatile__("@ atomic64_read\n"
260"	ldrd	%0, %H0, [%1]"
261	: "=&r" (result)
262	: "r" (&v->counter), "Qo" (v->counter)
263	);
264
265	return result;
266}
267
268static inline void atomic64_set(atomic64_t *v, s64 i)
269{
270	__asm__ __volatile__("@ atomic64_set\n"
271"	strd	%2, %H2, [%1]"
272	: "=Qo" (v->counter)
273	: "r" (&v->counter), "r" (i)
274	);
275}
276#else
277static inline s64 atomic64_read(const atomic64_t *v)
278{
279	s64 result;
280
281	__asm__ __volatile__("@ atomic64_read\n"
282"	ldrexd	%0, %H0, [%1]"
283	: "=&r" (result)
284	: "r" (&v->counter), "Qo" (v->counter)
285	);
286
287	return result;
288}
289
290static inline void atomic64_set(atomic64_t *v, s64 i)
291{
292	s64 tmp;
293
294	prefetchw(&v->counter);
295	__asm__ __volatile__("@ atomic64_set\n"
296"1:	ldrexd	%0, %H0, [%2]\n"
297"	strexd	%0, %3, %H3, [%2]\n"
298"	teq	%0, #0\n"
299"	bne	1b"
300	: "=&r" (tmp), "=Qo" (v->counter)
301	: "r" (&v->counter), "r" (i)
302	: "cc");
303}
304#endif
305
306#define ATOMIC64_OP(op, op1, op2)					\
307static inline void atomic64_##op(s64 i, atomic64_t *v)			\
308{									\
309	s64 result;							\
310	unsigned long tmp;						\
311									\
312	prefetchw(&v->counter);						\
313	__asm__ __volatile__("@ atomic64_" #op "\n"			\
314"1:	ldrexd	%0, %H0, [%3]\n"					\
315"	" #op1 " %Q0, %Q0, %Q4\n"					\
316"	" #op2 " %R0, %R0, %R4\n"					\
317"	strexd	%1, %0, %H0, [%3]\n"					\
318"	teq	%1, #0\n"						\
319"	bne	1b"							\
320	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
321	: "r" (&v->counter), "r" (i)					\
322	: "cc");							\
323}									\
324
325#define ATOMIC64_OP_RETURN(op, op1, op2)				\
326static inline s64							\
327atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)			\
328{									\
329	s64 result;							\
330	unsigned long tmp;						\
331									\
332	prefetchw(&v->counter);						\
333									\
334	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
335"1:	ldrexd	%0, %H0, [%3]\n"					\
336"	" #op1 " %Q0, %Q0, %Q4\n"					\
337"	" #op2 " %R0, %R0, %R4\n"					\
338"	strexd	%1, %0, %H0, [%3]\n"					\
339"	teq	%1, #0\n"						\
340"	bne	1b"							\
341	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
342	: "r" (&v->counter), "r" (i)					\
343	: "cc");							\
344									\
345	return result;							\
346}
347
348#define ATOMIC64_FETCH_OP(op, op1, op2)					\
349static inline s64							\
350atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)			\
351{									\
352	s64 result, val;						\
353	unsigned long tmp;						\
354									\
355	prefetchw(&v->counter);						\
356									\
357	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
358"1:	ldrexd	%0, %H0, [%4]\n"					\
359"	" #op1 " %Q1, %Q0, %Q5\n"					\
360"	" #op2 " %R1, %R0, %R5\n"					\
361"	strexd	%2, %1, %H1, [%4]\n"					\
362"	teq	%2, #0\n"						\
363"	bne	1b"							\
364	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
365	: "r" (&v->counter), "r" (i)					\
366	: "cc");							\
367									\
368	return result;							\
369}
370
371#define ATOMIC64_OPS(op, op1, op2)					\
372	ATOMIC64_OP(op, op1, op2)					\
373	ATOMIC64_OP_RETURN(op, op1, op2)				\
374	ATOMIC64_FETCH_OP(op, op1, op2)
375
376ATOMIC64_OPS(add, adds, adc)
377ATOMIC64_OPS(sub, subs, sbc)
378
379#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
380#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
381#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
382#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
383
384#undef ATOMIC64_OPS
385#define ATOMIC64_OPS(op, op1, op2)					\
386	ATOMIC64_OP(op, op1, op2)					\
387	ATOMIC64_FETCH_OP(op, op1, op2)
388
389#define atomic64_andnot atomic64_andnot
390
391ATOMIC64_OPS(and, and, and)
392ATOMIC64_OPS(andnot, bic, bic)
393ATOMIC64_OPS(or,  orr, orr)
394ATOMIC64_OPS(xor, eor, eor)
395
396#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
397#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
398#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
399#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
400
401#undef ATOMIC64_OPS
402#undef ATOMIC64_FETCH_OP
403#undef ATOMIC64_OP_RETURN
404#undef ATOMIC64_OP
405
406static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
407{
408	s64 oldval;
409	unsigned long res;
410
411	prefetchw(&ptr->counter);
412
413	do {
414		__asm__ __volatile__("@ atomic64_cmpxchg\n"
415		"ldrexd		%1, %H1, [%3]\n"
416		"mov		%0, #0\n"
417		"teq		%1, %4\n"
418		"teqeq		%H1, %H4\n"
419		"strexdeq	%0, %5, %H5, [%3]"
420		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
421		: "r" (&ptr->counter), "r" (old), "r" (new)
422		: "cc");
423	} while (res);
424
425	return oldval;
426}
427#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
428
429static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
430{
431	s64 result;
432	unsigned long tmp;
433
434	prefetchw(&ptr->counter);
435
436	__asm__ __volatile__("@ atomic64_xchg\n"
437"1:	ldrexd	%0, %H0, [%3]\n"
438"	strexd	%1, %4, %H4, [%3]\n"
439"	teq	%1, #0\n"
440"	bne	1b"
441	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
442	: "r" (&ptr->counter), "r" (new)
443	: "cc");
444
445	return result;
446}
447#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
448
449static inline s64 atomic64_dec_if_positive(atomic64_t *v)
450{
451	s64 result;
452	unsigned long tmp;
453
454	smp_mb();
455	prefetchw(&v->counter);
456
457	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
458"1:	ldrexd	%0, %H0, [%3]\n"
459"	subs	%Q0, %Q0, #1\n"
460"	sbc	%R0, %R0, #0\n"
461"	teq	%R0, #0\n"
462"	bmi	2f\n"
463"	strexd	%1, %0, %H0, [%3]\n"
464"	teq	%1, #0\n"
465"	bne	1b\n"
466"2:"
467	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
468	: "r" (&v->counter)
469	: "cc");
470
471	smp_mb();
472
473	return result;
474}
475#define atomic64_dec_if_positive atomic64_dec_if_positive
476
477static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
478{
479	s64 oldval, newval;
480	unsigned long tmp;
481
482	smp_mb();
483	prefetchw(&v->counter);
484
485	__asm__ __volatile__("@ atomic64_add_unless\n"
486"1:	ldrexd	%0, %H0, [%4]\n"
487"	teq	%0, %5\n"
488"	teqeq	%H0, %H5\n"
489"	beq	2f\n"
490"	adds	%Q1, %Q0, %Q6\n"
491"	adc	%R1, %R0, %R6\n"
492"	strexd	%2, %1, %H1, [%4]\n"
493"	teq	%2, #0\n"
494"	bne	1b\n"
495"2:"
496	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
497	: "r" (&v->counter), "r" (u), "r" (a)
498	: "cc");
499
500	if (oldval != u)
501		smp_mb();
502
503	return oldval;
504}
505#define atomic64_fetch_add_unless atomic64_fetch_add_unless
506
507#endif /* !CONFIG_GENERIC_ATOMIC64 */
508#endif
509#endif
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/atomic.h
  4 *
  5 *  Copyright (C) 1996 Russell King.
  6 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  7 */
  8#ifndef __ASM_ARM_ATOMIC_H
  9#define __ASM_ARM_ATOMIC_H
 10
 11#include <linux/compiler.h>
 12#include <linux/prefetch.h>
 13#include <linux/types.h>
 14#include <linux/irqflags.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 
 
 18#ifdef __KERNEL__
 19
 20/*
 21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 22 * strex/ldrex monitor on some implementations. The reason we can use it for
 23 * atomic_set() is the clrex or dummy strex done on every exception return.
 24 */
 25#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 26#define arch_atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 27
 28#if __LINUX_ARM_ARCH__ >= 6
 29
 30/*
 31 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 32 * store exclusive to ensure that these are atomic.  We may loop
 33 * to ensure that the update happens.
 34 */
 35
 36#define ATOMIC_OP(op, c_op, asm_op)					\
 37static inline void arch_atomic_##op(int i, atomic_t *v)			\
 38{									\
 39	unsigned long tmp;						\
 40	int result;							\
 41									\
 42	prefetchw(&v->counter);						\
 43	__asm__ __volatile__("@ atomic_" #op "\n"			\
 44"1:	ldrex	%0, [%3]\n"						\
 45"	" #asm_op "	%0, %0, %4\n"					\
 46"	strex	%1, %0, [%3]\n"						\
 47"	teq	%1, #0\n"						\
 48"	bne	1b"							\
 49	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 50	: "r" (&v->counter), "Ir" (i)					\
 51	: "cc");							\
 52}									\
 53
 54#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 55static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 56{									\
 57	unsigned long tmp;						\
 58	int result;							\
 59									\
 60	prefetchw(&v->counter);						\
 61									\
 62	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 63"1:	ldrex	%0, [%3]\n"						\
 64"	" #asm_op "	%0, %0, %4\n"					\
 65"	strex	%1, %0, [%3]\n"						\
 66"	teq	%1, #0\n"						\
 67"	bne	1b"							\
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 69	: "r" (&v->counter), "Ir" (i)					\
 70	: "cc");							\
 71									\
 72	return result;							\
 73}
 74
 75#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 76static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 77{									\
 78	unsigned long tmp;						\
 79	int result, val;						\
 80									\
 81	prefetchw(&v->counter);						\
 82									\
 83	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
 84"1:	ldrex	%0, [%4]\n"						\
 85"	" #asm_op "	%1, %0, %5\n"					\
 86"	strex	%2, %1, [%4]\n"						\
 87"	teq	%2, #0\n"						\
 88"	bne	1b"							\
 89	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
 90	: "r" (&v->counter), "Ir" (i)					\
 91	: "cc");							\
 92									\
 93	return result;							\
 94}
 95
 96#define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
 97#define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
 98#define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
 99#define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
100
101#define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
102#define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
103#define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
104#define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
105
106static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107{
108	int oldval;
109	unsigned long res;
110
111	prefetchw(&ptr->counter);
112
113	do {
114		__asm__ __volatile__("@ atomic_cmpxchg\n"
115		"ldrex	%1, [%3]\n"
116		"mov	%0, #0\n"
117		"teq	%1, %4\n"
118		"strexeq %0, %5, [%3]\n"
119		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
121		    : "cc");
122	} while (res);
123
124	return oldval;
125}
126#define arch_atomic_cmpxchg_relaxed		arch_atomic_cmpxchg_relaxed
127
128static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129{
130	int oldval, newval;
131	unsigned long tmp;
132
133	smp_mb();
134	prefetchw(&v->counter);
135
136	__asm__ __volatile__ ("@ atomic_add_unless\n"
137"1:	ldrex	%0, [%4]\n"
138"	teq	%0, %5\n"
139"	beq	2f\n"
140"	add	%1, %0, %6\n"
141"	strex	%2, %1, [%4]\n"
142"	teq	%2, #0\n"
143"	bne	1b\n"
144"2:"
145	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146	: "r" (&v->counter), "r" (u), "r" (a)
147	: "cc");
148
149	if (oldval != u)
150		smp_mb();
151
152	return oldval;
153}
154#define arch_atomic_fetch_add_unless		arch_atomic_fetch_add_unless
155
156#else /* ARM_ARCH_6 */
157
158#ifdef CONFIG_SMP
159#error SMP not supported on pre-ARMv6 CPUs
160#endif
161
162#define ATOMIC_OP(op, c_op, asm_op)					\
163static inline void arch_atomic_##op(int i, atomic_t *v)			\
164{									\
165	unsigned long flags;						\
166									\
167	raw_local_irq_save(flags);					\
168	v->counter c_op i;						\
169	raw_local_irq_restore(flags);					\
170}									\
171
172#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
173static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
174{									\
175	unsigned long flags;						\
176	int val;							\
177									\
178	raw_local_irq_save(flags);					\
179	v->counter c_op i;						\
180	val = v->counter;						\
181	raw_local_irq_restore(flags);					\
182									\
183	return val;							\
184}
185
186#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
187static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
188{									\
189	unsigned long flags;						\
190	int val;							\
191									\
192	raw_local_irq_save(flags);					\
193	val = v->counter;						\
194	v->counter c_op i;						\
195	raw_local_irq_restore(flags);					\
196									\
197	return val;							\
198}
199
200#define arch_atomic_add_return			arch_atomic_add_return
201#define arch_atomic_sub_return			arch_atomic_sub_return
202#define arch_atomic_fetch_add			arch_atomic_fetch_add
203#define arch_atomic_fetch_sub			arch_atomic_fetch_sub
204
205#define arch_atomic_fetch_and			arch_atomic_fetch_and
206#define arch_atomic_fetch_andnot		arch_atomic_fetch_andnot
207#define arch_atomic_fetch_or			arch_atomic_fetch_or
208#define arch_atomic_fetch_xor			arch_atomic_fetch_xor
209
210static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
211{
212	int ret;
213	unsigned long flags;
214
215	raw_local_irq_save(flags);
216	ret = v->counter;
217	if (likely(ret == old))
218		v->counter = new;
219	raw_local_irq_restore(flags);
220
221	return ret;
222}
223#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 
224
225#endif /* __LINUX_ARM_ARCH__ */
226
227#define ATOMIC_OPS(op, c_op, asm_op)					\
228	ATOMIC_OP(op, c_op, asm_op)					\
229	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
230	ATOMIC_FETCH_OP(op, c_op, asm_op)
231
232ATOMIC_OPS(add, +=, add)
233ATOMIC_OPS(sub, -=, sub)
234
235#define arch_atomic_andnot arch_atomic_andnot
236
237#undef ATOMIC_OPS
238#define ATOMIC_OPS(op, c_op, asm_op)					\
239	ATOMIC_OP(op, c_op, asm_op)					\
240	ATOMIC_FETCH_OP(op, c_op, asm_op)
241
242ATOMIC_OPS(and, &=, and)
243ATOMIC_OPS(andnot, &= ~, bic)
244ATOMIC_OPS(or,  |=, orr)
245ATOMIC_OPS(xor, ^=, eor)
246
247#undef ATOMIC_OPS
248#undef ATOMIC_FETCH_OP
249#undef ATOMIC_OP_RETURN
250#undef ATOMIC_OP
251
 
 
252#ifndef CONFIG_GENERIC_ATOMIC64
253typedef struct {
254	s64 counter;
255} atomic64_t;
256
257#define ATOMIC64_INIT(i) { (i) }
258
259#ifdef CONFIG_ARM_LPAE
260static inline s64 arch_atomic64_read(const atomic64_t *v)
261{
262	s64 result;
263
264	__asm__ __volatile__("@ atomic64_read\n"
265"	ldrd	%0, %H0, [%1]"
266	: "=&r" (result)
267	: "r" (&v->counter), "Qo" (v->counter)
268	);
269
270	return result;
271}
272
273static inline void arch_atomic64_set(atomic64_t *v, s64 i)
274{
275	__asm__ __volatile__("@ atomic64_set\n"
276"	strd	%2, %H2, [%1]"
277	: "=Qo" (v->counter)
278	: "r" (&v->counter), "r" (i)
279	);
280}
281#else
282static inline s64 arch_atomic64_read(const atomic64_t *v)
283{
284	s64 result;
285
286	__asm__ __volatile__("@ atomic64_read\n"
287"	ldrexd	%0, %H0, [%1]"
288	: "=&r" (result)
289	: "r" (&v->counter), "Qo" (v->counter)
290	);
291
292	return result;
293}
294
295static inline void arch_atomic64_set(atomic64_t *v, s64 i)
296{
297	s64 tmp;
298
299	prefetchw(&v->counter);
300	__asm__ __volatile__("@ atomic64_set\n"
301"1:	ldrexd	%0, %H0, [%2]\n"
302"	strexd	%0, %3, %H3, [%2]\n"
303"	teq	%0, #0\n"
304"	bne	1b"
305	: "=&r" (tmp), "=Qo" (v->counter)
306	: "r" (&v->counter), "r" (i)
307	: "cc");
308}
309#endif
310
311#define ATOMIC64_OP(op, op1, op2)					\
312static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
313{									\
314	s64 result;							\
315	unsigned long tmp;						\
316									\
317	prefetchw(&v->counter);						\
318	__asm__ __volatile__("@ atomic64_" #op "\n"			\
319"1:	ldrexd	%0, %H0, [%3]\n"					\
320"	" #op1 " %Q0, %Q0, %Q4\n"					\
321"	" #op2 " %R0, %R0, %R4\n"					\
322"	strexd	%1, %0, %H0, [%3]\n"					\
323"	teq	%1, #0\n"						\
324"	bne	1b"							\
325	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
326	: "r" (&v->counter), "r" (i)					\
327	: "cc");							\
328}									\
329
330#define ATOMIC64_OP_RETURN(op, op1, op2)				\
331static inline s64							\
332arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)		\
333{									\
334	s64 result;							\
335	unsigned long tmp;						\
336									\
337	prefetchw(&v->counter);						\
338									\
339	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
340"1:	ldrexd	%0, %H0, [%3]\n"					\
341"	" #op1 " %Q0, %Q0, %Q4\n"					\
342"	" #op2 " %R0, %R0, %R4\n"					\
343"	strexd	%1, %0, %H0, [%3]\n"					\
344"	teq	%1, #0\n"						\
345"	bne	1b"							\
346	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
347	: "r" (&v->counter), "r" (i)					\
348	: "cc");							\
349									\
350	return result;							\
351}
352
353#define ATOMIC64_FETCH_OP(op, op1, op2)					\
354static inline s64							\
355arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)		\
356{									\
357	s64 result, val;						\
358	unsigned long tmp;						\
359									\
360	prefetchw(&v->counter);						\
361									\
362	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
363"1:	ldrexd	%0, %H0, [%4]\n"					\
364"	" #op1 " %Q1, %Q0, %Q5\n"					\
365"	" #op2 " %R1, %R0, %R5\n"					\
366"	strexd	%2, %1, %H1, [%4]\n"					\
367"	teq	%2, #0\n"						\
368"	bne	1b"							\
369	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
370	: "r" (&v->counter), "r" (i)					\
371	: "cc");							\
372									\
373	return result;							\
374}
375
376#define ATOMIC64_OPS(op, op1, op2)					\
377	ATOMIC64_OP(op, op1, op2)					\
378	ATOMIC64_OP_RETURN(op, op1, op2)				\
379	ATOMIC64_FETCH_OP(op, op1, op2)
380
381ATOMIC64_OPS(add, adds, adc)
382ATOMIC64_OPS(sub, subs, sbc)
383
384#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
385#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
386#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
387#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
388
389#undef ATOMIC64_OPS
390#define ATOMIC64_OPS(op, op1, op2)					\
391	ATOMIC64_OP(op, op1, op2)					\
392	ATOMIC64_FETCH_OP(op, op1, op2)
393
394#define arch_atomic64_andnot arch_atomic64_andnot
395
396ATOMIC64_OPS(and, and, and)
397ATOMIC64_OPS(andnot, bic, bic)
398ATOMIC64_OPS(or,  orr, orr)
399ATOMIC64_OPS(xor, eor, eor)
400
401#define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
402#define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
403#define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
404#define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
405
406#undef ATOMIC64_OPS
407#undef ATOMIC64_FETCH_OP
408#undef ATOMIC64_OP_RETURN
409#undef ATOMIC64_OP
410
411static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
412{
413	s64 oldval;
414	unsigned long res;
415
416	prefetchw(&ptr->counter);
417
418	do {
419		__asm__ __volatile__("@ atomic64_cmpxchg\n"
420		"ldrexd		%1, %H1, [%3]\n"
421		"mov		%0, #0\n"
422		"teq		%1, %4\n"
423		"teqeq		%H1, %H4\n"
424		"strexdeq	%0, %5, %H5, [%3]"
425		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
426		: "r" (&ptr->counter), "r" (old), "r" (new)
427		: "cc");
428	} while (res);
429
430	return oldval;
431}
432#define arch_atomic64_cmpxchg_relaxed	arch_atomic64_cmpxchg_relaxed
433
434static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
435{
436	s64 result;
437	unsigned long tmp;
438
439	prefetchw(&ptr->counter);
440
441	__asm__ __volatile__("@ atomic64_xchg\n"
442"1:	ldrexd	%0, %H0, [%3]\n"
443"	strexd	%1, %4, %H4, [%3]\n"
444"	teq	%1, #0\n"
445"	bne	1b"
446	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
447	: "r" (&ptr->counter), "r" (new)
448	: "cc");
449
450	return result;
451}
452#define arch_atomic64_xchg_relaxed		arch_atomic64_xchg_relaxed
453
454static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
455{
456	s64 result;
457	unsigned long tmp;
458
459	smp_mb();
460	prefetchw(&v->counter);
461
462	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
463"1:	ldrexd	%0, %H0, [%3]\n"
464"	subs	%Q0, %Q0, #1\n"
465"	sbc	%R0, %R0, #0\n"
466"	teq	%R0, #0\n"
467"	bmi	2f\n"
468"	strexd	%1, %0, %H0, [%3]\n"
469"	teq	%1, #0\n"
470"	bne	1b\n"
471"2:"
472	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
473	: "r" (&v->counter)
474	: "cc");
475
476	smp_mb();
477
478	return result;
479}
480#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
481
482static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
483{
484	s64 oldval, newval;
485	unsigned long tmp;
486
487	smp_mb();
488	prefetchw(&v->counter);
489
490	__asm__ __volatile__("@ atomic64_add_unless\n"
491"1:	ldrexd	%0, %H0, [%4]\n"
492"	teq	%0, %5\n"
493"	teqeq	%H0, %H5\n"
494"	beq	2f\n"
495"	adds	%Q1, %Q0, %Q6\n"
496"	adc	%R1, %R0, %R6\n"
497"	strexd	%2, %1, %H1, [%4]\n"
498"	teq	%2, #0\n"
499"	bne	1b\n"
500"2:"
501	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
502	: "r" (&v->counter), "r" (u), "r" (a)
503	: "cc");
504
505	if (oldval != u)
506		smp_mb();
507
508	return oldval;
509}
510#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
511
512#endif /* !CONFIG_GENERIC_ATOMIC64 */
513#endif
514#endif