Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/prefetch.h>
 16#include <linux/types.h>
 17#include <linux/irqflags.h>
 18#include <asm/barrier.h>
 19#include <asm/cmpxchg.h>
 20
 21#define ATOMIC_INIT(i)	{ (i) }
 22
 23#ifdef __KERNEL__
 24
 25/*
 26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 27 * strex/ldrex monitor on some implementations. The reason we can use it for
 28 * atomic_set() is the clrex or dummy strex done on every exception return.
 29 */
 30#define atomic_read(v)	READ_ONCE((v)->counter)
 31#define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 32
 33#if __LINUX_ARM_ARCH__ >= 6
 34
 35/*
 36 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 37 * store exclusive to ensure that these are atomic.  We may loop
 38 * to ensure that the update happens.
 39 */
 40
 41#define ATOMIC_OP(op, c_op, asm_op)					\
 42static inline void atomic_##op(int i, atomic_t *v)			\
 43{									\
 44	unsigned long tmp;						\
 45	int result;							\
 46									\
 47	prefetchw(&v->counter);						\
 48	__asm__ __volatile__("@ atomic_" #op "\n"			\
 49"1:	ldrex	%0, [%3]\n"						\
 50"	" #asm_op "	%0, %0, %4\n"					\
 51"	strex	%1, %0, [%3]\n"						\
 52"	teq	%1, #0\n"						\
 53"	bne	1b"							\
 54	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 55	: "r" (&v->counter), "Ir" (i)					\
 56	: "cc");							\
 57}									\
 58
 59#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 61{									\
 62	unsigned long tmp;						\
 63	int result;							\
 64									\
 65	prefetchw(&v->counter);						\
 66									\
 67	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 68"1:	ldrex	%0, [%3]\n"						\
 69"	" #asm_op "	%0, %0, %4\n"					\
 70"	strex	%1, %0, [%3]\n"						\
 71"	teq	%1, #0\n"						\
 72"	bne	1b"							\
 73	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 74	: "r" (&v->counter), "Ir" (i)					\
 75	: "cc");							\
 76									\
 77	return result;							\
 78}
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80#define atomic_add_return_relaxed	atomic_add_return_relaxed
 81#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
 
 
 
 
 
 
 
 82
 83static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 84{
 85	int oldval;
 86	unsigned long res;
 87
 88	prefetchw(&ptr->counter);
 89
 90	do {
 91		__asm__ __volatile__("@ atomic_cmpxchg\n"
 92		"ldrex	%1, [%3]\n"
 93		"mov	%0, #0\n"
 94		"teq	%1, %4\n"
 95		"strexeq %0, %5, [%3]\n"
 96		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
 97		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
 98		    : "cc");
 99	} while (res);
100
101	return oldval;
102}
103#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
104
105static inline int __atomic_add_unless(atomic_t *v, int a, int u)
106{
107	int oldval, newval;
108	unsigned long tmp;
109
110	smp_mb();
111	prefetchw(&v->counter);
112
113	__asm__ __volatile__ ("@ atomic_add_unless\n"
114"1:	ldrex	%0, [%4]\n"
115"	teq	%0, %5\n"
116"	beq	2f\n"
117"	add	%1, %0, %6\n"
118"	strex	%2, %1, [%4]\n"
119"	teq	%2, #0\n"
120"	bne	1b\n"
121"2:"
122	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
123	: "r" (&v->counter), "r" (u), "r" (a)
124	: "cc");
125
126	if (oldval != u)
127		smp_mb();
128
129	return oldval;
130}
131
132#else /* ARM_ARCH_6 */
133
134#ifdef CONFIG_SMP
135#error SMP not supported on pre-ARMv6 CPUs
136#endif
137
138#define ATOMIC_OP(op, c_op, asm_op)					\
139static inline void atomic_##op(int i, atomic_t *v)			\
140{									\
141	unsigned long flags;						\
142									\
143	raw_local_irq_save(flags);					\
144	v->counter c_op i;						\
145	raw_local_irq_restore(flags);					\
146}									\
147
148#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
149static inline int atomic_##op##_return(int i, atomic_t *v)		\
150{									\
151	unsigned long flags;						\
152	int val;							\
153									\
154	raw_local_irq_save(flags);					\
155	v->counter c_op i;						\
156	val = v->counter;						\
157	raw_local_irq_restore(flags);					\
158									\
159	return val;							\
160}
161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
163{
164	int ret;
165	unsigned long flags;
166
167	raw_local_irq_save(flags);
168	ret = v->counter;
169	if (likely(ret == old))
170		v->counter = new;
171	raw_local_irq_restore(flags);
172
173	return ret;
174}
175
176static inline int __atomic_add_unless(atomic_t *v, int a, int u)
177{
178	int c, old;
179
180	c = atomic_read(v);
181	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
182		c = old;
183	return c;
184}
185
186#endif /* __LINUX_ARM_ARCH__ */
187
188#define ATOMIC_OPS(op, c_op, asm_op)					\
189	ATOMIC_OP(op, c_op, asm_op)					\
190	ATOMIC_OP_RETURN(op, c_op, asm_op)
 
191
192ATOMIC_OPS(add, +=, add)
193ATOMIC_OPS(sub, -=, sub)
194
195#define atomic_andnot atomic_andnot
196
197ATOMIC_OP(and, &=, and)
198ATOMIC_OP(andnot, &= ~, bic)
199ATOMIC_OP(or,  |=, orr)
200ATOMIC_OP(xor, ^=, eor)
 
 
 
 
 
201
202#undef ATOMIC_OPS
 
203#undef ATOMIC_OP_RETURN
204#undef ATOMIC_OP
205
206#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
207
208#define atomic_inc(v)		atomic_add(1, v)
209#define atomic_dec(v)		atomic_sub(1, v)
210
211#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
212#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
213#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
214#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
215#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
216
217#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
218
219#ifndef CONFIG_GENERIC_ATOMIC64
220typedef struct {
221	long long counter;
222} atomic64_t;
223
224#define ATOMIC64_INIT(i) { (i) }
225
226#ifdef CONFIG_ARM_LPAE
227static inline long long atomic64_read(const atomic64_t *v)
228{
229	long long result;
230
231	__asm__ __volatile__("@ atomic64_read\n"
232"	ldrd	%0, %H0, [%1]"
233	: "=&r" (result)
234	: "r" (&v->counter), "Qo" (v->counter)
235	);
236
237	return result;
238}
239
240static inline void atomic64_set(atomic64_t *v, long long i)
241{
242	__asm__ __volatile__("@ atomic64_set\n"
243"	strd	%2, %H2, [%1]"
244	: "=Qo" (v->counter)
245	: "r" (&v->counter), "r" (i)
246	);
247}
248#else
249static inline long long atomic64_read(const atomic64_t *v)
250{
251	long long result;
252
253	__asm__ __volatile__("@ atomic64_read\n"
254"	ldrexd	%0, %H0, [%1]"
255	: "=&r" (result)
256	: "r" (&v->counter), "Qo" (v->counter)
257	);
258
259	return result;
260}
261
262static inline void atomic64_set(atomic64_t *v, long long i)
263{
264	long long tmp;
265
266	prefetchw(&v->counter);
267	__asm__ __volatile__("@ atomic64_set\n"
268"1:	ldrexd	%0, %H0, [%2]\n"
269"	strexd	%0, %3, %H3, [%2]\n"
270"	teq	%0, #0\n"
271"	bne	1b"
272	: "=&r" (tmp), "=Qo" (v->counter)
273	: "r" (&v->counter), "r" (i)
274	: "cc");
275}
276#endif
277
278#define ATOMIC64_OP(op, op1, op2)					\
279static inline void atomic64_##op(long long i, atomic64_t *v)		\
280{									\
281	long long result;						\
282	unsigned long tmp;						\
283									\
284	prefetchw(&v->counter);						\
285	__asm__ __volatile__("@ atomic64_" #op "\n"			\
286"1:	ldrexd	%0, %H0, [%3]\n"					\
287"	" #op1 " %Q0, %Q0, %Q4\n"					\
288"	" #op2 " %R0, %R0, %R4\n"					\
289"	strexd	%1, %0, %H0, [%3]\n"					\
290"	teq	%1, #0\n"						\
291"	bne	1b"							\
292	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
293	: "r" (&v->counter), "r" (i)					\
294	: "cc");							\
295}									\
296
297#define ATOMIC64_OP_RETURN(op, op1, op2)				\
298static inline long long							\
299atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
300{									\
301	long long result;						\
302	unsigned long tmp;						\
303									\
304	prefetchw(&v->counter);						\
305									\
306	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
307"1:	ldrexd	%0, %H0, [%3]\n"					\
308"	" #op1 " %Q0, %Q0, %Q4\n"					\
309"	" #op2 " %R0, %R0, %R4\n"					\
310"	strexd	%1, %0, %H0, [%3]\n"					\
311"	teq	%1, #0\n"						\
312"	bne	1b"							\
313	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
314	: "r" (&v->counter), "r" (i)					\
315	: "cc");							\
316									\
317	return result;							\
318}
319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
320#define ATOMIC64_OPS(op, op1, op2)					\
321	ATOMIC64_OP(op, op1, op2)					\
322	ATOMIC64_OP_RETURN(op, op1, op2)
 
323
324ATOMIC64_OPS(add, adds, adc)
325ATOMIC64_OPS(sub, subs, sbc)
326
327#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
328#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
 
 
 
 
 
 
 
329
330#define atomic64_andnot atomic64_andnot
331
332ATOMIC64_OP(and, and, and)
333ATOMIC64_OP(andnot, bic, bic)
334ATOMIC64_OP(or,  orr, orr)
335ATOMIC64_OP(xor, eor, eor)
 
 
 
 
 
336
337#undef ATOMIC64_OPS
 
338#undef ATOMIC64_OP_RETURN
339#undef ATOMIC64_OP
340
341static inline long long
342atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
343{
344	long long oldval;
345	unsigned long res;
346
347	prefetchw(&ptr->counter);
348
349	do {
350		__asm__ __volatile__("@ atomic64_cmpxchg\n"
351		"ldrexd		%1, %H1, [%3]\n"
352		"mov		%0, #0\n"
353		"teq		%1, %4\n"
354		"teqeq		%H1, %H4\n"
355		"strexdeq	%0, %5, %H5, [%3]"
356		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
357		: "r" (&ptr->counter), "r" (old), "r" (new)
358		: "cc");
359	} while (res);
360
361	return oldval;
362}
363#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
364
365static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
366{
367	long long result;
368	unsigned long tmp;
369
370	prefetchw(&ptr->counter);
371
372	__asm__ __volatile__("@ atomic64_xchg\n"
373"1:	ldrexd	%0, %H0, [%3]\n"
374"	strexd	%1, %4, %H4, [%3]\n"
375"	teq	%1, #0\n"
376"	bne	1b"
377	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
378	: "r" (&ptr->counter), "r" (new)
379	: "cc");
380
381	return result;
382}
383#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
384
385static inline long long atomic64_dec_if_positive(atomic64_t *v)
386{
387	long long result;
388	unsigned long tmp;
389
390	smp_mb();
391	prefetchw(&v->counter);
392
393	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
394"1:	ldrexd	%0, %H0, [%3]\n"
395"	subs	%Q0, %Q0, #1\n"
396"	sbc	%R0, %R0, #0\n"
397"	teq	%R0, #0\n"
398"	bmi	2f\n"
399"	strexd	%1, %0, %H0, [%3]\n"
400"	teq	%1, #0\n"
401"	bne	1b\n"
402"2:"
403	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
404	: "r" (&v->counter)
405	: "cc");
406
407	smp_mb();
408
409	return result;
410}
411
412static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
413{
414	long long val;
415	unsigned long tmp;
416	int ret = 1;
417
418	smp_mb();
419	prefetchw(&v->counter);
420
421	__asm__ __volatile__("@ atomic64_add_unless\n"
422"1:	ldrexd	%0, %H0, [%4]\n"
423"	teq	%0, %5\n"
424"	teqeq	%H0, %H5\n"
425"	moveq	%1, #0\n"
426"	beq	2f\n"
427"	adds	%Q0, %Q0, %Q6\n"
428"	adc	%R0, %R0, %R6\n"
429"	strexd	%2, %0, %H0, [%4]\n"
430"	teq	%2, #0\n"
431"	bne	1b\n"
432"2:"
433	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
434	: "r" (&v->counter), "r" (u), "r" (a)
435	: "cc");
436
437	if (ret)
438		smp_mb();
439
440	return ret;
441}
442
443#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
444#define atomic64_inc(v)			atomic64_add(1LL, (v))
445#define atomic64_inc_return_relaxed(v)	atomic64_add_return_relaxed(1LL, (v))
446#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
447#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
448#define atomic64_dec(v)			atomic64_sub(1LL, (v))
449#define atomic64_dec_return_relaxed(v)	atomic64_sub_return_relaxed(1LL, (v))
450#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
451#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
452
453#endif /* !CONFIG_GENERIC_ATOMIC64 */
454#endif
455#endif
v4.10.11
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/prefetch.h>
 16#include <linux/types.h>
 17#include <linux/irqflags.h>
 18#include <asm/barrier.h>
 19#include <asm/cmpxchg.h>
 20
 21#define ATOMIC_INIT(i)	{ (i) }
 22
 23#ifdef __KERNEL__
 24
 25/*
 26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 27 * strex/ldrex monitor on some implementations. The reason we can use it for
 28 * atomic_set() is the clrex or dummy strex done on every exception return.
 29 */
 30#define atomic_read(v)	READ_ONCE((v)->counter)
 31#define atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 32
 33#if __LINUX_ARM_ARCH__ >= 6
 34
 35/*
 36 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 37 * store exclusive to ensure that these are atomic.  We may loop
 38 * to ensure that the update happens.
 39 */
 40
 41#define ATOMIC_OP(op, c_op, asm_op)					\
 42static inline void atomic_##op(int i, atomic_t *v)			\
 43{									\
 44	unsigned long tmp;						\
 45	int result;							\
 46									\
 47	prefetchw(&v->counter);						\
 48	__asm__ __volatile__("@ atomic_" #op "\n"			\
 49"1:	ldrex	%0, [%3]\n"						\
 50"	" #asm_op "	%0, %0, %4\n"					\
 51"	strex	%1, %0, [%3]\n"						\
 52"	teq	%1, #0\n"						\
 53"	bne	1b"							\
 54	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 55	: "r" (&v->counter), "Ir" (i)					\
 56	: "cc");							\
 57}									\
 58
 59#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 60static inline int atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 61{									\
 62	unsigned long tmp;						\
 63	int result;							\
 64									\
 65	prefetchw(&v->counter);						\
 66									\
 67	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 68"1:	ldrex	%0, [%3]\n"						\
 69"	" #asm_op "	%0, %0, %4\n"					\
 70"	strex	%1, %0, [%3]\n"						\
 71"	teq	%1, #0\n"						\
 72"	bne	1b"							\
 73	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 74	: "r" (&v->counter), "Ir" (i)					\
 75	: "cc");							\
 76									\
 77	return result;							\
 78}
 79
 80#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 81static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 82{									\
 83	unsigned long tmp;						\
 84	int result, val;						\
 85									\
 86	prefetchw(&v->counter);						\
 87									\
 88	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
 89"1:	ldrex	%0, [%4]\n"						\
 90"	" #asm_op "	%1, %0, %5\n"					\
 91"	strex	%2, %1, [%4]\n"						\
 92"	teq	%2, #0\n"						\
 93"	bne	1b"							\
 94	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
 95	: "r" (&v->counter), "Ir" (i)					\
 96	: "cc");							\
 97									\
 98	return result;							\
 99}
100
101#define atomic_add_return_relaxed	atomic_add_return_relaxed
102#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
103#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
104#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
105
106#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
107#define atomic_fetch_andnot_relaxed	atomic_fetch_andnot_relaxed
108#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
109#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
110
111static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
112{
113	int oldval;
114	unsigned long res;
115
116	prefetchw(&ptr->counter);
117
118	do {
119		__asm__ __volatile__("@ atomic_cmpxchg\n"
120		"ldrex	%1, [%3]\n"
121		"mov	%0, #0\n"
122		"teq	%1, %4\n"
123		"strexeq %0, %5, [%3]\n"
124		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
125		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
126		    : "cc");
127	} while (res);
128
129	return oldval;
130}
131#define atomic_cmpxchg_relaxed		atomic_cmpxchg_relaxed
132
133static inline int __atomic_add_unless(atomic_t *v, int a, int u)
134{
135	int oldval, newval;
136	unsigned long tmp;
137
138	smp_mb();
139	prefetchw(&v->counter);
140
141	__asm__ __volatile__ ("@ atomic_add_unless\n"
142"1:	ldrex	%0, [%4]\n"
143"	teq	%0, %5\n"
144"	beq	2f\n"
145"	add	%1, %0, %6\n"
146"	strex	%2, %1, [%4]\n"
147"	teq	%2, #0\n"
148"	bne	1b\n"
149"2:"
150	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
151	: "r" (&v->counter), "r" (u), "r" (a)
152	: "cc");
153
154	if (oldval != u)
155		smp_mb();
156
157	return oldval;
158}
159
160#else /* ARM_ARCH_6 */
161
162#ifdef CONFIG_SMP
163#error SMP not supported on pre-ARMv6 CPUs
164#endif
165
166#define ATOMIC_OP(op, c_op, asm_op)					\
167static inline void atomic_##op(int i, atomic_t *v)			\
168{									\
169	unsigned long flags;						\
170									\
171	raw_local_irq_save(flags);					\
172	v->counter c_op i;						\
173	raw_local_irq_restore(flags);					\
174}									\
175
176#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
177static inline int atomic_##op##_return(int i, atomic_t *v)		\
178{									\
179	unsigned long flags;						\
180	int val;							\
181									\
182	raw_local_irq_save(flags);					\
183	v->counter c_op i;						\
184	val = v->counter;						\
185	raw_local_irq_restore(flags);					\
186									\
187	return val;							\
188}
189
190#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
191static inline int atomic_fetch_##op(int i, atomic_t *v)			\
192{									\
193	unsigned long flags;						\
194	int val;							\
195									\
196	raw_local_irq_save(flags);					\
197	val = v->counter;						\
198	v->counter c_op i;						\
199	raw_local_irq_restore(flags);					\
200									\
201	return val;							\
202}
203
204static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
205{
206	int ret;
207	unsigned long flags;
208
209	raw_local_irq_save(flags);
210	ret = v->counter;
211	if (likely(ret == old))
212		v->counter = new;
213	raw_local_irq_restore(flags);
214
215	return ret;
216}
217
218static inline int __atomic_add_unless(atomic_t *v, int a, int u)
219{
220	int c, old;
221
222	c = atomic_read(v);
223	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
224		c = old;
225	return c;
226}
227
228#endif /* __LINUX_ARM_ARCH__ */
229
230#define ATOMIC_OPS(op, c_op, asm_op)					\
231	ATOMIC_OP(op, c_op, asm_op)					\
232	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
233	ATOMIC_FETCH_OP(op, c_op, asm_op)
234
235ATOMIC_OPS(add, +=, add)
236ATOMIC_OPS(sub, -=, sub)
237
238#define atomic_andnot atomic_andnot
239
240#undef ATOMIC_OPS
241#define ATOMIC_OPS(op, c_op, asm_op)					\
242	ATOMIC_OP(op, c_op, asm_op)					\
243	ATOMIC_FETCH_OP(op, c_op, asm_op)
244
245ATOMIC_OPS(and, &=, and)
246ATOMIC_OPS(andnot, &= ~, bic)
247ATOMIC_OPS(or,  |=, orr)
248ATOMIC_OPS(xor, ^=, eor)
249
250#undef ATOMIC_OPS
251#undef ATOMIC_FETCH_OP
252#undef ATOMIC_OP_RETURN
253#undef ATOMIC_OP
254
255#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
256
257#define atomic_inc(v)		atomic_add(1, v)
258#define atomic_dec(v)		atomic_sub(1, v)
259
260#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
261#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
262#define atomic_inc_return_relaxed(v)    (atomic_add_return_relaxed(1, v))
263#define atomic_dec_return_relaxed(v)    (atomic_sub_return_relaxed(1, v))
264#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
265
266#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
267
268#ifndef CONFIG_GENERIC_ATOMIC64
269typedef struct {
270	long long counter;
271} atomic64_t;
272
273#define ATOMIC64_INIT(i) { (i) }
274
275#ifdef CONFIG_ARM_LPAE
276static inline long long atomic64_read(const atomic64_t *v)
277{
278	long long result;
279
280	__asm__ __volatile__("@ atomic64_read\n"
281"	ldrd	%0, %H0, [%1]"
282	: "=&r" (result)
283	: "r" (&v->counter), "Qo" (v->counter)
284	);
285
286	return result;
287}
288
289static inline void atomic64_set(atomic64_t *v, long long i)
290{
291	__asm__ __volatile__("@ atomic64_set\n"
292"	strd	%2, %H2, [%1]"
293	: "=Qo" (v->counter)
294	: "r" (&v->counter), "r" (i)
295	);
296}
297#else
298static inline long long atomic64_read(const atomic64_t *v)
299{
300	long long result;
301
302	__asm__ __volatile__("@ atomic64_read\n"
303"	ldrexd	%0, %H0, [%1]"
304	: "=&r" (result)
305	: "r" (&v->counter), "Qo" (v->counter)
306	);
307
308	return result;
309}
310
311static inline void atomic64_set(atomic64_t *v, long long i)
312{
313	long long tmp;
314
315	prefetchw(&v->counter);
316	__asm__ __volatile__("@ atomic64_set\n"
317"1:	ldrexd	%0, %H0, [%2]\n"
318"	strexd	%0, %3, %H3, [%2]\n"
319"	teq	%0, #0\n"
320"	bne	1b"
321	: "=&r" (tmp), "=Qo" (v->counter)
322	: "r" (&v->counter), "r" (i)
323	: "cc");
324}
325#endif
326
327#define ATOMIC64_OP(op, op1, op2)					\
328static inline void atomic64_##op(long long i, atomic64_t *v)		\
329{									\
330	long long result;						\
331	unsigned long tmp;						\
332									\
333	prefetchw(&v->counter);						\
334	__asm__ __volatile__("@ atomic64_" #op "\n"			\
335"1:	ldrexd	%0, %H0, [%3]\n"					\
336"	" #op1 " %Q0, %Q0, %Q4\n"					\
337"	" #op2 " %R0, %R0, %R4\n"					\
338"	strexd	%1, %0, %H0, [%3]\n"					\
339"	teq	%1, #0\n"						\
340"	bne	1b"							\
341	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
342	: "r" (&v->counter), "r" (i)					\
343	: "cc");							\
344}									\
345
346#define ATOMIC64_OP_RETURN(op, op1, op2)				\
347static inline long long							\
348atomic64_##op##_return_relaxed(long long i, atomic64_t *v)		\
349{									\
350	long long result;						\
351	unsigned long tmp;						\
352									\
353	prefetchw(&v->counter);						\
354									\
355	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
356"1:	ldrexd	%0, %H0, [%3]\n"					\
357"	" #op1 " %Q0, %Q0, %Q4\n"					\
358"	" #op2 " %R0, %R0, %R4\n"					\
359"	strexd	%1, %0, %H0, [%3]\n"					\
360"	teq	%1, #0\n"						\
361"	bne	1b"							\
362	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
363	: "r" (&v->counter), "r" (i)					\
364	: "cc");							\
365									\
366	return result;							\
367}
368
369#define ATOMIC64_FETCH_OP(op, op1, op2)					\
370static inline long long							\
371atomic64_fetch_##op##_relaxed(long long i, atomic64_t *v)		\
372{									\
373	long long result, val;						\
374	unsigned long tmp;						\
375									\
376	prefetchw(&v->counter);						\
377									\
378	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
379"1:	ldrexd	%0, %H0, [%4]\n"					\
380"	" #op1 " %Q1, %Q0, %Q5\n"					\
381"	" #op2 " %R1, %R0, %R5\n"					\
382"	strexd	%2, %1, %H1, [%4]\n"					\
383"	teq	%2, #0\n"						\
384"	bne	1b"							\
385	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
386	: "r" (&v->counter), "r" (i)					\
387	: "cc");							\
388									\
389	return result;							\
390}
391
392#define ATOMIC64_OPS(op, op1, op2)					\
393	ATOMIC64_OP(op, op1, op2)					\
394	ATOMIC64_OP_RETURN(op, op1, op2)				\
395	ATOMIC64_FETCH_OP(op, op1, op2)
396
397ATOMIC64_OPS(add, adds, adc)
398ATOMIC64_OPS(sub, subs, sbc)
399
400#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
401#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
402#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
403#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
404
405#undef ATOMIC64_OPS
406#define ATOMIC64_OPS(op, op1, op2)					\
407	ATOMIC64_OP(op, op1, op2)					\
408	ATOMIC64_FETCH_OP(op, op1, op2)
409
410#define atomic64_andnot atomic64_andnot
411
412ATOMIC64_OPS(and, and, and)
413ATOMIC64_OPS(andnot, bic, bic)
414ATOMIC64_OPS(or,  orr, orr)
415ATOMIC64_OPS(xor, eor, eor)
416
417#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
418#define atomic64_fetch_andnot_relaxed	atomic64_fetch_andnot_relaxed
419#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
420#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
421
422#undef ATOMIC64_OPS
423#undef ATOMIC64_FETCH_OP
424#undef ATOMIC64_OP_RETURN
425#undef ATOMIC64_OP
426
427static inline long long
428atomic64_cmpxchg_relaxed(atomic64_t *ptr, long long old, long long new)
429{
430	long long oldval;
431	unsigned long res;
432
433	prefetchw(&ptr->counter);
434
435	do {
436		__asm__ __volatile__("@ atomic64_cmpxchg\n"
437		"ldrexd		%1, %H1, [%3]\n"
438		"mov		%0, #0\n"
439		"teq		%1, %4\n"
440		"teqeq		%H1, %H4\n"
441		"strexdeq	%0, %5, %H5, [%3]"
442		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
443		: "r" (&ptr->counter), "r" (old), "r" (new)
444		: "cc");
445	} while (res);
446
447	return oldval;
448}
449#define atomic64_cmpxchg_relaxed	atomic64_cmpxchg_relaxed
450
451static inline long long atomic64_xchg_relaxed(atomic64_t *ptr, long long new)
452{
453	long long result;
454	unsigned long tmp;
455
456	prefetchw(&ptr->counter);
457
458	__asm__ __volatile__("@ atomic64_xchg\n"
459"1:	ldrexd	%0, %H0, [%3]\n"
460"	strexd	%1, %4, %H4, [%3]\n"
461"	teq	%1, #0\n"
462"	bne	1b"
463	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
464	: "r" (&ptr->counter), "r" (new)
465	: "cc");
466
467	return result;
468}
469#define atomic64_xchg_relaxed		atomic64_xchg_relaxed
470
471static inline long long atomic64_dec_if_positive(atomic64_t *v)
472{
473	long long result;
474	unsigned long tmp;
475
476	smp_mb();
477	prefetchw(&v->counter);
478
479	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
480"1:	ldrexd	%0, %H0, [%3]\n"
481"	subs	%Q0, %Q0, #1\n"
482"	sbc	%R0, %R0, #0\n"
483"	teq	%R0, #0\n"
484"	bmi	2f\n"
485"	strexd	%1, %0, %H0, [%3]\n"
486"	teq	%1, #0\n"
487"	bne	1b\n"
488"2:"
489	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
490	: "r" (&v->counter)
491	: "cc");
492
493	smp_mb();
494
495	return result;
496}
497
498static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
499{
500	long long val;
501	unsigned long tmp;
502	int ret = 1;
503
504	smp_mb();
505	prefetchw(&v->counter);
506
507	__asm__ __volatile__("@ atomic64_add_unless\n"
508"1:	ldrexd	%0, %H0, [%4]\n"
509"	teq	%0, %5\n"
510"	teqeq	%H0, %H5\n"
511"	moveq	%1, #0\n"
512"	beq	2f\n"
513"	adds	%Q0, %Q0, %Q6\n"
514"	adc	%R0, %R0, %R6\n"
515"	strexd	%2, %0, %H0, [%4]\n"
516"	teq	%2, #0\n"
517"	bne	1b\n"
518"2:"
519	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
520	: "r" (&v->counter), "r" (u), "r" (a)
521	: "cc");
522
523	if (ret)
524		smp_mb();
525
526	return ret;
527}
528
529#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
530#define atomic64_inc(v)			atomic64_add(1LL, (v))
531#define atomic64_inc_return_relaxed(v)	atomic64_add_return_relaxed(1LL, (v))
532#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
533#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
534#define atomic64_dec(v)			atomic64_sub(1LL, (v))
535#define atomic64_dec_return_relaxed(v)	atomic64_sub_return_relaxed(1LL, (v))
536#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
537#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
538
539#endif /* !CONFIG_GENERIC_ATOMIC64 */
540#endif
541#endif