Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 
 15#include <linux/types.h>
 16#include <linux/irqflags.h>
 17#include <asm/barrier.h>
 18#include <asm/cmpxchg.h>
 19
 20#define ATOMIC_INIT(i)	{ (i) }
 21
 22#ifdef __KERNEL__
 23
 24/*
 25 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 26 * strex/ldrex monitor on some implementations. The reason we can use it for
 27 * atomic_set() is the clrex or dummy strex done on every exception return.
 28 */
 29#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 30#define atomic_set(v,i)	(((v)->counter) = (i))
 31
 32#if __LINUX_ARM_ARCH__ >= 6
 33
 34/*
 35 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 36 * store exclusive to ensure that these are atomic.  We may loop
 37 * to ensure that the update happens.
 38 */
 39static inline void atomic_add(int i, atomic_t *v)
 40{
 41	unsigned long tmp;
 42	int result;
 43
 
 44	__asm__ __volatile__("@ atomic_add\n"
 45"1:	ldrex	%0, [%3]\n"
 46"	add	%0, %0, %4\n"
 47"	strex	%1, %0, [%3]\n"
 48"	teq	%1, #0\n"
 49"	bne	1b"
 50	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 51	: "r" (&v->counter), "Ir" (i)
 52	: "cc");
 53}
 54
 55static inline int atomic_add_return(int i, atomic_t *v)
 56{
 57	unsigned long tmp;
 58	int result;
 59
 60	smp_mb();
 
 61
 62	__asm__ __volatile__("@ atomic_add_return\n"
 63"1:	ldrex	%0, [%3]\n"
 64"	add	%0, %0, %4\n"
 65"	strex	%1, %0, [%3]\n"
 66"	teq	%1, #0\n"
 67"	bne	1b"
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 69	: "r" (&v->counter), "Ir" (i)
 70	: "cc");
 71
 72	smp_mb();
 73
 74	return result;
 75}
 76
 77static inline void atomic_sub(int i, atomic_t *v)
 78{
 79	unsigned long tmp;
 80	int result;
 81
 
 82	__asm__ __volatile__("@ atomic_sub\n"
 83"1:	ldrex	%0, [%3]\n"
 84"	sub	%0, %0, %4\n"
 85"	strex	%1, %0, [%3]\n"
 86"	teq	%1, #0\n"
 87"	bne	1b"
 88	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 89	: "r" (&v->counter), "Ir" (i)
 90	: "cc");
 91}
 92
 93static inline int atomic_sub_return(int i, atomic_t *v)
 94{
 95	unsigned long tmp;
 96	int result;
 97
 98	smp_mb();
 
 99
100	__asm__ __volatile__("@ atomic_sub_return\n"
101"1:	ldrex	%0, [%3]\n"
102"	sub	%0, %0, %4\n"
103"	strex	%1, %0, [%3]\n"
104"	teq	%1, #0\n"
105"	bne	1b"
106	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107	: "r" (&v->counter), "Ir" (i)
108	: "cc");
109
110	smp_mb();
111
112	return result;
113}
114
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{
117	unsigned long oldval, res;
 
118
119	smp_mb();
 
120
121	do {
122		__asm__ __volatile__("@ atomic_cmpxchg\n"
123		"ldrex	%1, [%3]\n"
124		"mov	%0, #0\n"
125		"teq	%1, %4\n"
126		"strexeq %0, %5, [%3]\n"
127		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
128		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
129		    : "cc");
130	} while (res);
131
132	smp_mb();
133
134	return oldval;
135}
136
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139	unsigned long tmp, tmp2;
 
140
141	__asm__ __volatile__("@ atomic_clear_mask\n"
142"1:	ldrex	%0, [%3]\n"
143"	bic	%0, %0, %4\n"
144"	strex	%1, %0, [%3]\n"
145"	teq	%1, #0\n"
146"	bne	1b"
147	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148	: "r" (addr), "Ir" (mask)
 
 
 
 
 
 
149	: "cc");
 
 
 
 
 
150}
151
152#else /* ARM_ARCH_6 */
153
154#ifdef CONFIG_SMP
155#error SMP not supported on pre-ARMv6 CPUs
156#endif
157
158static inline int atomic_add_return(int i, atomic_t *v)
159{
160	unsigned long flags;
161	int val;
162
163	raw_local_irq_save(flags);
164	val = v->counter;
165	v->counter = val += i;
166	raw_local_irq_restore(flags);
167
168	return val;
169}
170#define atomic_add(i, v)	(void) atomic_add_return(i, v)
171
172static inline int atomic_sub_return(int i, atomic_t *v)
173{
174	unsigned long flags;
175	int val;
176
177	raw_local_irq_save(flags);
178	val = v->counter;
179	v->counter = val -= i;
180	raw_local_irq_restore(flags);
181
182	return val;
183}
184#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
185
186static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
187{
188	int ret;
189	unsigned long flags;
190
191	raw_local_irq_save(flags);
192	ret = v->counter;
193	if (likely(ret == old))
194		v->counter = new;
195	raw_local_irq_restore(flags);
196
197	return ret;
198}
199
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202	unsigned long flags;
203
204	raw_local_irq_save(flags);
205	*addr &= ~mask;
206	raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */
210
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213static inline int __atomic_add_unless(atomic_t *v, int a, int u)
214{
215	int c, old;
216
217	c = atomic_read(v);
218	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
219		c = old;
220	return c;
221}
222
 
 
 
 
223#define atomic_inc(v)		atomic_add(1, v)
224#define atomic_dec(v)		atomic_sub(1, v)
225
226#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
227#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
228#define atomic_inc_return(v)    (atomic_add_return(1, v))
229#define atomic_dec_return(v)    (atomic_sub_return(1, v))
230#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
231
232#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
233
234#define smp_mb__before_atomic_dec()	smp_mb()
235#define smp_mb__after_atomic_dec()	smp_mb()
236#define smp_mb__before_atomic_inc()	smp_mb()
237#define smp_mb__after_atomic_inc()	smp_mb()
238
239#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct {
241	u64 __aligned(8) counter;
242} atomic64_t;
243
244#define ATOMIC64_INIT(i) { (i) }
245
246static inline u64 atomic64_read(const atomic64_t *v)
 
247{
248	u64 result;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
250	__asm__ __volatile__("@ atomic64_read\n"
251"	ldrexd	%0, %H0, [%1]"
252	: "=&r" (result)
253	: "r" (&v->counter), "Qo" (v->counter)
254	);
255
256	return result;
257}
258
259static inline void atomic64_set(atomic64_t *v, u64 i)
260{
261	u64 tmp;
262
 
263	__asm__ __volatile__("@ atomic64_set\n"
264"1:	ldrexd	%0, %H0, [%2]\n"
265"	strexd	%0, %3, %H3, [%2]\n"
266"	teq	%0, #0\n"
267"	bne	1b"
268	: "=&r" (tmp), "=Qo" (v->counter)
269	: "r" (&v->counter), "r" (i)
270	: "cc");
271}
 
272
273static inline void atomic64_add(u64 i, atomic64_t *v)
274{
275	u64 result;
276	unsigned long tmp;
277
 
278	__asm__ __volatile__("@ atomic64_add\n"
279"1:	ldrexd	%0, %H0, [%3]\n"
280"	adds	%0, %0, %4\n"
281"	adc	%H0, %H0, %H4\n"
282"	strexd	%1, %0, %H0, [%3]\n"
283"	teq	%1, #0\n"
284"	bne	1b"
285	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
286	: "r" (&v->counter), "r" (i)
287	: "cc");
288}
289
290static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
291{
292	u64 result;
293	unsigned long tmp;
294
295	smp_mb();
 
296
297	__asm__ __volatile__("@ atomic64_add_return\n"
298"1:	ldrexd	%0, %H0, [%3]\n"
299"	adds	%0, %0, %4\n"
300"	adc	%H0, %H0, %H4\n"
301"	strexd	%1, %0, %H0, [%3]\n"
302"	teq	%1, #0\n"
303"	bne	1b"
304	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
305	: "r" (&v->counter), "r" (i)
306	: "cc");
307
308	smp_mb();
309
310	return result;
311}
312
313static inline void atomic64_sub(u64 i, atomic64_t *v)
314{
315	u64 result;
316	unsigned long tmp;
317
 
318	__asm__ __volatile__("@ atomic64_sub\n"
319"1:	ldrexd	%0, %H0, [%3]\n"
320"	subs	%0, %0, %4\n"
321"	sbc	%H0, %H0, %H4\n"
322"	strexd	%1, %0, %H0, [%3]\n"
323"	teq	%1, #0\n"
324"	bne	1b"
325	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
326	: "r" (&v->counter), "r" (i)
327	: "cc");
328}
329
330static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
331{
332	u64 result;
333	unsigned long tmp;
334
335	smp_mb();
 
336
337	__asm__ __volatile__("@ atomic64_sub_return\n"
338"1:	ldrexd	%0, %H0, [%3]\n"
339"	subs	%0, %0, %4\n"
340"	sbc	%H0, %H0, %H4\n"
341"	strexd	%1, %0, %H0, [%3]\n"
342"	teq	%1, #0\n"
343"	bne	1b"
344	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
345	: "r" (&v->counter), "r" (i)
346	: "cc");
347
348	smp_mb();
349
350	return result;
351}
352
353static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
 
354{
355	u64 oldval;
356	unsigned long res;
357
358	smp_mb();
 
359
360	do {
361		__asm__ __volatile__("@ atomic64_cmpxchg\n"
362		"ldrexd		%1, %H1, [%3]\n"
363		"mov		%0, #0\n"
364		"teq		%1, %4\n"
365		"teqeq		%H1, %H4\n"
366		"strexdeq	%0, %5, %H5, [%3]"
367		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
368		: "r" (&ptr->counter), "r" (old), "r" (new)
369		: "cc");
370	} while (res);
371
372	smp_mb();
373
374	return oldval;
375}
376
377static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
378{
379	u64 result;
380	unsigned long tmp;
381
382	smp_mb();
 
383
384	__asm__ __volatile__("@ atomic64_xchg\n"
385"1:	ldrexd	%0, %H0, [%3]\n"
386"	strexd	%1, %4, %H4, [%3]\n"
387"	teq	%1, #0\n"
388"	bne	1b"
389	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
390	: "r" (&ptr->counter), "r" (new)
391	: "cc");
392
393	smp_mb();
394
395	return result;
396}
397
398static inline u64 atomic64_dec_if_positive(atomic64_t *v)
399{
400	u64 result;
401	unsigned long tmp;
402
403	smp_mb();
 
404
405	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
406"1:	ldrexd	%0, %H0, [%3]\n"
407"	subs	%0, %0, #1\n"
408"	sbc	%H0, %H0, #0\n"
409"	teq	%H0, #0\n"
410"	bmi	2f\n"
411"	strexd	%1, %0, %H0, [%3]\n"
412"	teq	%1, #0\n"
413"	bne	1b\n"
414"2:"
415	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
416	: "r" (&v->counter)
417	: "cc");
418
419	smp_mb();
420
421	return result;
422}
423
424static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
425{
426	u64 val;
427	unsigned long tmp;
428	int ret = 1;
429
430	smp_mb();
 
431
432	__asm__ __volatile__("@ atomic64_add_unless\n"
433"1:	ldrexd	%0, %H0, [%4]\n"
434"	teq	%0, %5\n"
435"	teqeq	%H0, %H5\n"
436"	moveq	%1, #0\n"
437"	beq	2f\n"
438"	adds	%0, %0, %6\n"
439"	adc	%H0, %H0, %H6\n"
440"	strexd	%2, %0, %H0, [%4]\n"
441"	teq	%2, #0\n"
442"	bne	1b\n"
443"2:"
444	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
445	: "r" (&v->counter), "r" (u), "r" (a)
446	: "cc");
447
448	if (ret)
449		smp_mb();
450
451	return ret;
452}
453
454#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
455#define atomic64_inc(v)			atomic64_add(1LL, (v))
456#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
457#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
458#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
459#define atomic64_dec(v)			atomic64_sub(1LL, (v))
460#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
461#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
462#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
463
464#endif /* !CONFIG_GENERIC_ATOMIC64 */
465#endif
466#endif
v3.15
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/prefetch.h>
 16#include <linux/types.h>
 17#include <linux/irqflags.h>
 18#include <asm/barrier.h>
 19#include <asm/cmpxchg.h>
 20
 21#define ATOMIC_INIT(i)	{ (i) }
 22
 23#ifdef __KERNEL__
 24
 25/*
 26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 27 * strex/ldrex monitor on some implementations. The reason we can use it for
 28 * atomic_set() is the clrex or dummy strex done on every exception return.
 29 */
 30#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 31#define atomic_set(v,i)	(((v)->counter) = (i))
 32
 33#if __LINUX_ARM_ARCH__ >= 6
 34
 35/*
 36 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 37 * store exclusive to ensure that these are atomic.  We may loop
 38 * to ensure that the update happens.
 39 */
 40static inline void atomic_add(int i, atomic_t *v)
 41{
 42	unsigned long tmp;
 43	int result;
 44
 45	prefetchw(&v->counter);
 46	__asm__ __volatile__("@ atomic_add\n"
 47"1:	ldrex	%0, [%3]\n"
 48"	add	%0, %0, %4\n"
 49"	strex	%1, %0, [%3]\n"
 50"	teq	%1, #0\n"
 51"	bne	1b"
 52	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 53	: "r" (&v->counter), "Ir" (i)
 54	: "cc");
 55}
 56
 57static inline int atomic_add_return(int i, atomic_t *v)
 58{
 59	unsigned long tmp;
 60	int result;
 61
 62	smp_mb();
 63	prefetchw(&v->counter);
 64
 65	__asm__ __volatile__("@ atomic_add_return\n"
 66"1:	ldrex	%0, [%3]\n"
 67"	add	%0, %0, %4\n"
 68"	strex	%1, %0, [%3]\n"
 69"	teq	%1, #0\n"
 70"	bne	1b"
 71	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 72	: "r" (&v->counter), "Ir" (i)
 73	: "cc");
 74
 75	smp_mb();
 76
 77	return result;
 78}
 79
 80static inline void atomic_sub(int i, atomic_t *v)
 81{
 82	unsigned long tmp;
 83	int result;
 84
 85	prefetchw(&v->counter);
 86	__asm__ __volatile__("@ atomic_sub\n"
 87"1:	ldrex	%0, [%3]\n"
 88"	sub	%0, %0, %4\n"
 89"	strex	%1, %0, [%3]\n"
 90"	teq	%1, #0\n"
 91"	bne	1b"
 92	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 93	: "r" (&v->counter), "Ir" (i)
 94	: "cc");
 95}
 96
 97static inline int atomic_sub_return(int i, atomic_t *v)
 98{
 99	unsigned long tmp;
100	int result;
101
102	smp_mb();
103	prefetchw(&v->counter);
104
105	__asm__ __volatile__("@ atomic_sub_return\n"
106"1:	ldrex	%0, [%3]\n"
107"	sub	%0, %0, %4\n"
108"	strex	%1, %0, [%3]\n"
109"	teq	%1, #0\n"
110"	bne	1b"
111	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112	: "r" (&v->counter), "Ir" (i)
113	: "cc");
114
115	smp_mb();
116
117	return result;
118}
119
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121{
122	int oldval;
123	unsigned long res;
124
125	smp_mb();
126	prefetchw(&ptr->counter);
127
128	do {
129		__asm__ __volatile__("@ atomic_cmpxchg\n"
130		"ldrex	%1, [%3]\n"
131		"mov	%0, #0\n"
132		"teq	%1, %4\n"
133		"strexeq %0, %5, [%3]\n"
134		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
136		    : "cc");
137	} while (res);
138
139	smp_mb();
140
141	return oldval;
142}
143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146	int oldval, newval;
147	unsigned long tmp;
148
149	smp_mb();
150	prefetchw(&v->counter);
151
152	__asm__ __volatile__ ("@ atomic_add_unless\n"
153"1:	ldrex	%0, [%4]\n"
154"	teq	%0, %5\n"
155"	beq	2f\n"
156"	add	%1, %0, %6\n"
157"	strex	%2, %1, [%4]\n"
158"	teq	%2, #0\n"
159"	bne	1b\n"
160"2:"
161	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162	: "r" (&v->counter), "r" (u), "r" (a)
163	: "cc");
164
165	if (oldval != u)
166		smp_mb();
167
168	return oldval;
169}
170
171#else /* ARM_ARCH_6 */
172
173#ifdef CONFIG_SMP
174#error SMP not supported on pre-ARMv6 CPUs
175#endif
176
177static inline int atomic_add_return(int i, atomic_t *v)
178{
179	unsigned long flags;
180	int val;
181
182	raw_local_irq_save(flags);
183	val = v->counter;
184	v->counter = val += i;
185	raw_local_irq_restore(flags);
186
187	return val;
188}
189#define atomic_add(i, v)	(void) atomic_add_return(i, v)
190
191static inline int atomic_sub_return(int i, atomic_t *v)
192{
193	unsigned long flags;
194	int val;
195
196	raw_local_irq_save(flags);
197	val = v->counter;
198	v->counter = val -= i;
199	raw_local_irq_restore(flags);
200
201	return val;
202}
203#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
204
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{
207	int ret;
208	unsigned long flags;
209
210	raw_local_irq_save(flags);
211	ret = v->counter;
212	if (likely(ret == old))
213		v->counter = new;
214	raw_local_irq_restore(flags);
215
216	return ret;
217}
218
 
 
 
 
 
 
 
 
 
 
 
 
 
219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
220{
221	int c, old;
222
223	c = atomic_read(v);
224	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
225		c = old;
226	return c;
227}
228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
233#define atomic_inc(v)		atomic_add(1, v)
234#define atomic_dec(v)		atomic_sub(1, v)
235
236#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
237#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
238#define atomic_inc_return(v)    (atomic_add_return(1, v))
239#define atomic_dec_return(v)    (atomic_sub_return(1, v))
240#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
241
242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243
244#define smp_mb__before_atomic_dec()	smp_mb()
245#define smp_mb__after_atomic_dec()	smp_mb()
246#define smp_mb__before_atomic_inc()	smp_mb()
247#define smp_mb__after_atomic_inc()	smp_mb()
248
249#ifndef CONFIG_GENERIC_ATOMIC64
250typedef struct {
251	long long counter;
252} atomic64_t;
253
254#define ATOMIC64_INIT(i) { (i) }
255
256#ifdef CONFIG_ARM_LPAE
257static inline long long atomic64_read(const atomic64_t *v)
258{
259	long long result;
260
261	__asm__ __volatile__("@ atomic64_read\n"
262"	ldrd	%0, %H0, [%1]"
263	: "=&r" (result)
264	: "r" (&v->counter), "Qo" (v->counter)
265	);
266
267	return result;
268}
269
270static inline void atomic64_set(atomic64_t *v, long long i)
271{
272	__asm__ __volatile__("@ atomic64_set\n"
273"	strd	%2, %H2, [%1]"
274	: "=Qo" (v->counter)
275	: "r" (&v->counter), "r" (i)
276	);
277}
278#else
279static inline long long atomic64_read(const atomic64_t *v)
280{
281	long long result;
282
283	__asm__ __volatile__("@ atomic64_read\n"
284"	ldrexd	%0, %H0, [%1]"
285	: "=&r" (result)
286	: "r" (&v->counter), "Qo" (v->counter)
287	);
288
289	return result;
290}
291
292static inline void atomic64_set(atomic64_t *v, long long i)
293{
294	long long tmp;
295
296	prefetchw(&v->counter);
297	__asm__ __volatile__("@ atomic64_set\n"
298"1:	ldrexd	%0, %H0, [%2]\n"
299"	strexd	%0, %3, %H3, [%2]\n"
300"	teq	%0, #0\n"
301"	bne	1b"
302	: "=&r" (tmp), "=Qo" (v->counter)
303	: "r" (&v->counter), "r" (i)
304	: "cc");
305}
306#endif
307
308static inline void atomic64_add(long long i, atomic64_t *v)
309{
310	long long result;
311	unsigned long tmp;
312
313	prefetchw(&v->counter);
314	__asm__ __volatile__("@ atomic64_add\n"
315"1:	ldrexd	%0, %H0, [%3]\n"
316"	adds	%Q0, %Q0, %Q4\n"
317"	adc	%R0, %R0, %R4\n"
318"	strexd	%1, %0, %H0, [%3]\n"
319"	teq	%1, #0\n"
320"	bne	1b"
321	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
322	: "r" (&v->counter), "r" (i)
323	: "cc");
324}
325
326static inline long long atomic64_add_return(long long i, atomic64_t *v)
327{
328	long long result;
329	unsigned long tmp;
330
331	smp_mb();
332	prefetchw(&v->counter);
333
334	__asm__ __volatile__("@ atomic64_add_return\n"
335"1:	ldrexd	%0, %H0, [%3]\n"
336"	adds	%Q0, %Q0, %Q4\n"
337"	adc	%R0, %R0, %R4\n"
338"	strexd	%1, %0, %H0, [%3]\n"
339"	teq	%1, #0\n"
340"	bne	1b"
341	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
342	: "r" (&v->counter), "r" (i)
343	: "cc");
344
345	smp_mb();
346
347	return result;
348}
349
350static inline void atomic64_sub(long long i, atomic64_t *v)
351{
352	long long result;
353	unsigned long tmp;
354
355	prefetchw(&v->counter);
356	__asm__ __volatile__("@ atomic64_sub\n"
357"1:	ldrexd	%0, %H0, [%3]\n"
358"	subs	%Q0, %Q0, %Q4\n"
359"	sbc	%R0, %R0, %R4\n"
360"	strexd	%1, %0, %H0, [%3]\n"
361"	teq	%1, #0\n"
362"	bne	1b"
363	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
364	: "r" (&v->counter), "r" (i)
365	: "cc");
366}
367
368static inline long long atomic64_sub_return(long long i, atomic64_t *v)
369{
370	long long result;
371	unsigned long tmp;
372
373	smp_mb();
374	prefetchw(&v->counter);
375
376	__asm__ __volatile__("@ atomic64_sub_return\n"
377"1:	ldrexd	%0, %H0, [%3]\n"
378"	subs	%Q0, %Q0, %Q4\n"
379"	sbc	%R0, %R0, %R4\n"
380"	strexd	%1, %0, %H0, [%3]\n"
381"	teq	%1, #0\n"
382"	bne	1b"
383	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
384	: "r" (&v->counter), "r" (i)
385	: "cc");
386
387	smp_mb();
388
389	return result;
390}
391
392static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
393					long long new)
394{
395	long long oldval;
396	unsigned long res;
397
398	smp_mb();
399	prefetchw(&ptr->counter);
400
401	do {
402		__asm__ __volatile__("@ atomic64_cmpxchg\n"
403		"ldrexd		%1, %H1, [%3]\n"
404		"mov		%0, #0\n"
405		"teq		%1, %4\n"
406		"teqeq		%H1, %H4\n"
407		"strexdeq	%0, %5, %H5, [%3]"
408		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
409		: "r" (&ptr->counter), "r" (old), "r" (new)
410		: "cc");
411	} while (res);
412
413	smp_mb();
414
415	return oldval;
416}
417
418static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
419{
420	long long result;
421	unsigned long tmp;
422
423	smp_mb();
424	prefetchw(&ptr->counter);
425
426	__asm__ __volatile__("@ atomic64_xchg\n"
427"1:	ldrexd	%0, %H0, [%3]\n"
428"	strexd	%1, %4, %H4, [%3]\n"
429"	teq	%1, #0\n"
430"	bne	1b"
431	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
432	: "r" (&ptr->counter), "r" (new)
433	: "cc");
434
435	smp_mb();
436
437	return result;
438}
439
440static inline long long atomic64_dec_if_positive(atomic64_t *v)
441{
442	long long result;
443	unsigned long tmp;
444
445	smp_mb();
446	prefetchw(&v->counter);
447
448	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
449"1:	ldrexd	%0, %H0, [%3]\n"
450"	subs	%Q0, %Q0, #1\n"
451"	sbc	%R0, %R0, #0\n"
452"	teq	%R0, #0\n"
453"	bmi	2f\n"
454"	strexd	%1, %0, %H0, [%3]\n"
455"	teq	%1, #0\n"
456"	bne	1b\n"
457"2:"
458	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
459	: "r" (&v->counter)
460	: "cc");
461
462	smp_mb();
463
464	return result;
465}
466
467static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
468{
469	long long val;
470	unsigned long tmp;
471	int ret = 1;
472
473	smp_mb();
474	prefetchw(&v->counter);
475
476	__asm__ __volatile__("@ atomic64_add_unless\n"
477"1:	ldrexd	%0, %H0, [%4]\n"
478"	teq	%0, %5\n"
479"	teqeq	%H0, %H5\n"
480"	moveq	%1, #0\n"
481"	beq	2f\n"
482"	adds	%Q0, %Q0, %Q6\n"
483"	adc	%R0, %R0, %R6\n"
484"	strexd	%2, %0, %H0, [%4]\n"
485"	teq	%2, #0\n"
486"	bne	1b\n"
487"2:"
488	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
489	: "r" (&v->counter), "r" (u), "r" (a)
490	: "cc");
491
492	if (ret)
493		smp_mb();
494
495	return ret;
496}
497
498#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
499#define atomic64_inc(v)			atomic64_add(1LL, (v))
500#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
501#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
502#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
503#define atomic64_dec(v)			atomic64_sub(1LL, (v))
504#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
505#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
506#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
507
508#endif /* !CONFIG_GENERIC_ATOMIC64 */
509#endif
510#endif