Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/types.h>
 16#include <linux/irqflags.h>
 17#include <asm/barrier.h>
 18#include <asm/cmpxchg.h>
 19
 20#define ATOMIC_INIT(i)	{ (i) }
 21
 22#ifdef __KERNEL__
 23
 24/*
 25 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 26 * strex/ldrex monitor on some implementations. The reason we can use it for
 27 * atomic_set() is the clrex or dummy strex done on every exception return.
 28 */
 29#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 30#define atomic_set(v,i)	(((v)->counter) = (i))
 31
 32#if __LINUX_ARM_ARCH__ >= 6
 33
 34/*
 35 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 36 * store exclusive to ensure that these are atomic.  We may loop
 37 * to ensure that the update happens.
 38 */
 39static inline void atomic_add(int i, atomic_t *v)
 40{
 41	unsigned long tmp;
 42	int result;
 43
 44	__asm__ __volatile__("@ atomic_add\n"
 45"1:	ldrex	%0, [%3]\n"
 46"	add	%0, %0, %4\n"
 47"	strex	%1, %0, [%3]\n"
 48"	teq	%1, #0\n"
 49"	bne	1b"
 50	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 51	: "r" (&v->counter), "Ir" (i)
 52	: "cc");
 53}
 54
 55static inline int atomic_add_return(int i, atomic_t *v)
 56{
 57	unsigned long tmp;
 58	int result;
 59
 60	smp_mb();
 61
 62	__asm__ __volatile__("@ atomic_add_return\n"
 63"1:	ldrex	%0, [%3]\n"
 64"	add	%0, %0, %4\n"
 65"	strex	%1, %0, [%3]\n"
 66"	teq	%1, #0\n"
 67"	bne	1b"
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 69	: "r" (&v->counter), "Ir" (i)
 70	: "cc");
 71
 72	smp_mb();
 73
 74	return result;
 75}
 76
 77static inline void atomic_sub(int i, atomic_t *v)
 78{
 79	unsigned long tmp;
 80	int result;
 81
 82	__asm__ __volatile__("@ atomic_sub\n"
 83"1:	ldrex	%0, [%3]\n"
 84"	sub	%0, %0, %4\n"
 85"	strex	%1, %0, [%3]\n"
 86"	teq	%1, #0\n"
 87"	bne	1b"
 88	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 89	: "r" (&v->counter), "Ir" (i)
 90	: "cc");
 91}
 92
 93static inline int atomic_sub_return(int i, atomic_t *v)
 94{
 95	unsigned long tmp;
 96	int result;
 97
 98	smp_mb();
 99
100	__asm__ __volatile__("@ atomic_sub_return\n"
101"1:	ldrex	%0, [%3]\n"
102"	sub	%0, %0, %4\n"
103"	strex	%1, %0, [%3]\n"
104"	teq	%1, #0\n"
105"	bne	1b"
106	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107	: "r" (&v->counter), "Ir" (i)
108	: "cc");
109
110	smp_mb();
111
112	return result;
113}
114
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{
117	unsigned long oldval, res;
118
119	smp_mb();
120
121	do {
122		__asm__ __volatile__("@ atomic_cmpxchg\n"
123		"ldrex	%1, [%3]\n"
124		"mov	%0, #0\n"
125		"teq	%1, %4\n"
126		"strexeq %0, %5, [%3]\n"
127		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
128		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
129		    : "cc");
130	} while (res);
131
132	smp_mb();
133
134	return oldval;
135}
136
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139	unsigned long tmp, tmp2;
140
141	__asm__ __volatile__("@ atomic_clear_mask\n"
142"1:	ldrex	%0, [%3]\n"
143"	bic	%0, %0, %4\n"
144"	strex	%1, %0, [%3]\n"
145"	teq	%1, #0\n"
146"	bne	1b"
147	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148	: "r" (addr), "Ir" (mask)
149	: "cc");
150}
151
152#else /* ARM_ARCH_6 */
153
154#ifdef CONFIG_SMP
155#error SMP not supported on pre-ARMv6 CPUs
156#endif
157
158static inline int atomic_add_return(int i, atomic_t *v)
159{
160	unsigned long flags;
161	int val;
162
163	raw_local_irq_save(flags);
164	val = v->counter;
165	v->counter = val += i;
166	raw_local_irq_restore(flags);
167
168	return val;
169}
170#define atomic_add(i, v)	(void) atomic_add_return(i, v)
171
172static inline int atomic_sub_return(int i, atomic_t *v)
173{
174	unsigned long flags;
175	int val;
176
177	raw_local_irq_save(flags);
178	val = v->counter;
179	v->counter = val -= i;
180	raw_local_irq_restore(flags);
181
182	return val;
183}
184#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
185
186static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
187{
188	int ret;
189	unsigned long flags;
190
191	raw_local_irq_save(flags);
192	ret = v->counter;
193	if (likely(ret == old))
194		v->counter = new;
195	raw_local_irq_restore(flags);
196
197	return ret;
198}
199
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202	unsigned long flags;
203
204	raw_local_irq_save(flags);
205	*addr &= ~mask;
206	raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */
210
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213static inline int __atomic_add_unless(atomic_t *v, int a, int u)
214{
215	int c, old;
216
217	c = atomic_read(v);
218	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
219		c = old;
220	return c;
221}
222
223#define atomic_inc(v)		atomic_add(1, v)
224#define atomic_dec(v)		atomic_sub(1, v)
225
226#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
227#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
228#define atomic_inc_return(v)    (atomic_add_return(1, v))
229#define atomic_dec_return(v)    (atomic_sub_return(1, v))
230#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
231
232#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
233
234#define smp_mb__before_atomic_dec()	smp_mb()
235#define smp_mb__after_atomic_dec()	smp_mb()
236#define smp_mb__before_atomic_inc()	smp_mb()
237#define smp_mb__after_atomic_inc()	smp_mb()
238
239#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct {
241	u64 __aligned(8) counter;
242} atomic64_t;
243
244#define ATOMIC64_INIT(i) { (i) }
245
246static inline u64 atomic64_read(const atomic64_t *v)
247{
248	u64 result;
249
250	__asm__ __volatile__("@ atomic64_read\n"
251"	ldrexd	%0, %H0, [%1]"
252	: "=&r" (result)
253	: "r" (&v->counter), "Qo" (v->counter)
254	);
255
256	return result;
257}
258
259static inline void atomic64_set(atomic64_t *v, u64 i)
260{
261	u64 tmp;
262
263	__asm__ __volatile__("@ atomic64_set\n"
264"1:	ldrexd	%0, %H0, [%2]\n"
265"	strexd	%0, %3, %H3, [%2]\n"
266"	teq	%0, #0\n"
267"	bne	1b"
268	: "=&r" (tmp), "=Qo" (v->counter)
269	: "r" (&v->counter), "r" (i)
270	: "cc");
271}
272
273static inline void atomic64_add(u64 i, atomic64_t *v)
274{
275	u64 result;
276	unsigned long tmp;
277
278	__asm__ __volatile__("@ atomic64_add\n"
279"1:	ldrexd	%0, %H0, [%3]\n"
280"	adds	%0, %0, %4\n"
281"	adc	%H0, %H0, %H4\n"
282"	strexd	%1, %0, %H0, [%3]\n"
283"	teq	%1, #0\n"
284"	bne	1b"
285	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
286	: "r" (&v->counter), "r" (i)
287	: "cc");
288}
289
290static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
291{
292	u64 result;
293	unsigned long tmp;
294
295	smp_mb();
296
297	__asm__ __volatile__("@ atomic64_add_return\n"
298"1:	ldrexd	%0, %H0, [%3]\n"
299"	adds	%0, %0, %4\n"
300"	adc	%H0, %H0, %H4\n"
301"	strexd	%1, %0, %H0, [%3]\n"
302"	teq	%1, #0\n"
303"	bne	1b"
304	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
305	: "r" (&v->counter), "r" (i)
306	: "cc");
307
308	smp_mb();
309
310	return result;
311}
312
313static inline void atomic64_sub(u64 i, atomic64_t *v)
314{
315	u64 result;
316	unsigned long tmp;
317
318	__asm__ __volatile__("@ atomic64_sub\n"
319"1:	ldrexd	%0, %H0, [%3]\n"
320"	subs	%0, %0, %4\n"
321"	sbc	%H0, %H0, %H4\n"
322"	strexd	%1, %0, %H0, [%3]\n"
323"	teq	%1, #0\n"
324"	bne	1b"
325	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
326	: "r" (&v->counter), "r" (i)
327	: "cc");
328}
329
330static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
331{
332	u64 result;
333	unsigned long tmp;
334
335	smp_mb();
336
337	__asm__ __volatile__("@ atomic64_sub_return\n"
338"1:	ldrexd	%0, %H0, [%3]\n"
339"	subs	%0, %0, %4\n"
340"	sbc	%H0, %H0, %H4\n"
341"	strexd	%1, %0, %H0, [%3]\n"
342"	teq	%1, #0\n"
343"	bne	1b"
344	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
345	: "r" (&v->counter), "r" (i)
346	: "cc");
347
348	smp_mb();
349
350	return result;
351}
352
353static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
354{
355	u64 oldval;
356	unsigned long res;
357
358	smp_mb();
359
360	do {
361		__asm__ __volatile__("@ atomic64_cmpxchg\n"
362		"ldrexd		%1, %H1, [%3]\n"
363		"mov		%0, #0\n"
364		"teq		%1, %4\n"
365		"teqeq		%H1, %H4\n"
366		"strexdeq	%0, %5, %H5, [%3]"
367		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
368		: "r" (&ptr->counter), "r" (old), "r" (new)
369		: "cc");
370	} while (res);
371
372	smp_mb();
373
374	return oldval;
375}
376
377static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
378{
379	u64 result;
380	unsigned long tmp;
381
382	smp_mb();
383
384	__asm__ __volatile__("@ atomic64_xchg\n"
385"1:	ldrexd	%0, %H0, [%3]\n"
386"	strexd	%1, %4, %H4, [%3]\n"
387"	teq	%1, #0\n"
388"	bne	1b"
389	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
390	: "r" (&ptr->counter), "r" (new)
391	: "cc");
392
393	smp_mb();
394
395	return result;
396}
397
398static inline u64 atomic64_dec_if_positive(atomic64_t *v)
399{
400	u64 result;
401	unsigned long tmp;
402
403	smp_mb();
404
405	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
406"1:	ldrexd	%0, %H0, [%3]\n"
407"	subs	%0, %0, #1\n"
408"	sbc	%H0, %H0, #0\n"
409"	teq	%H0, #0\n"
410"	bmi	2f\n"
411"	strexd	%1, %0, %H0, [%3]\n"
412"	teq	%1, #0\n"
413"	bne	1b\n"
414"2:"
415	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
416	: "r" (&v->counter)
417	: "cc");
418
419	smp_mb();
420
421	return result;
422}
423
424static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
425{
426	u64 val;
427	unsigned long tmp;
428	int ret = 1;
429
430	smp_mb();
431
432	__asm__ __volatile__("@ atomic64_add_unless\n"
433"1:	ldrexd	%0, %H0, [%4]\n"
434"	teq	%0, %5\n"
435"	teqeq	%H0, %H5\n"
436"	moveq	%1, #0\n"
437"	beq	2f\n"
438"	adds	%0, %0, %6\n"
439"	adc	%H0, %H0, %H6\n"
440"	strexd	%2, %0, %H0, [%4]\n"
441"	teq	%2, #0\n"
442"	bne	1b\n"
443"2:"
444	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
445	: "r" (&v->counter), "r" (u), "r" (a)
446	: "cc");
447
448	if (ret)
449		smp_mb();
450
451	return ret;
452}
453
454#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
455#define atomic64_inc(v)			atomic64_add(1LL, (v))
456#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
457#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
458#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
459#define atomic64_dec(v)			atomic64_sub(1LL, (v))
460#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
461#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
462#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
463
464#endif /* !CONFIG_GENERIC_ATOMIC64 */
465#endif
466#endif
v3.1
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/types.h>
 16#include <asm/system.h>
 
 
 17
 18#define ATOMIC_INIT(i)	{ (i) }
 19
 20#ifdef __KERNEL__
 21
 22/*
 23 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 24 * strex/ldrex monitor on some implementations. The reason we can use it for
 25 * atomic_set() is the clrex or dummy strex done on every exception return.
 26 */
 27#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 28#define atomic_set(v,i)	(((v)->counter) = (i))
 29
 30#if __LINUX_ARM_ARCH__ >= 6
 31
 32/*
 33 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 34 * store exclusive to ensure that these are atomic.  We may loop
 35 * to ensure that the update happens.
 36 */
 37static inline void atomic_add(int i, atomic_t *v)
 38{
 39	unsigned long tmp;
 40	int result;
 41
 42	__asm__ __volatile__("@ atomic_add\n"
 43"1:	ldrex	%0, [%3]\n"
 44"	add	%0, %0, %4\n"
 45"	strex	%1, %0, [%3]\n"
 46"	teq	%1, #0\n"
 47"	bne	1b"
 48	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 49	: "r" (&v->counter), "Ir" (i)
 50	: "cc");
 51}
 52
 53static inline int atomic_add_return(int i, atomic_t *v)
 54{
 55	unsigned long tmp;
 56	int result;
 57
 58	smp_mb();
 59
 60	__asm__ __volatile__("@ atomic_add_return\n"
 61"1:	ldrex	%0, [%3]\n"
 62"	add	%0, %0, %4\n"
 63"	strex	%1, %0, [%3]\n"
 64"	teq	%1, #0\n"
 65"	bne	1b"
 66	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 67	: "r" (&v->counter), "Ir" (i)
 68	: "cc");
 69
 70	smp_mb();
 71
 72	return result;
 73}
 74
 75static inline void atomic_sub(int i, atomic_t *v)
 76{
 77	unsigned long tmp;
 78	int result;
 79
 80	__asm__ __volatile__("@ atomic_sub\n"
 81"1:	ldrex	%0, [%3]\n"
 82"	sub	%0, %0, %4\n"
 83"	strex	%1, %0, [%3]\n"
 84"	teq	%1, #0\n"
 85"	bne	1b"
 86	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 87	: "r" (&v->counter), "Ir" (i)
 88	: "cc");
 89}
 90
 91static inline int atomic_sub_return(int i, atomic_t *v)
 92{
 93	unsigned long tmp;
 94	int result;
 95
 96	smp_mb();
 97
 98	__asm__ __volatile__("@ atomic_sub_return\n"
 99"1:	ldrex	%0, [%3]\n"
100"	sub	%0, %0, %4\n"
101"	strex	%1, %0, [%3]\n"
102"	teq	%1, #0\n"
103"	bne	1b"
104	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
105	: "r" (&v->counter), "Ir" (i)
106	: "cc");
107
108	smp_mb();
109
110	return result;
111}
112
113static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
114{
115	unsigned long oldval, res;
116
117	smp_mb();
118
119	do {
120		__asm__ __volatile__("@ atomic_cmpxchg\n"
121		"ldrex	%1, [%3]\n"
122		"mov	%0, #0\n"
123		"teq	%1, %4\n"
124		"strexeq %0, %5, [%3]\n"
125		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
126		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
127		    : "cc");
128	} while (res);
129
130	smp_mb();
131
132	return oldval;
133}
134
135static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
136{
137	unsigned long tmp, tmp2;
138
139	__asm__ __volatile__("@ atomic_clear_mask\n"
140"1:	ldrex	%0, [%3]\n"
141"	bic	%0, %0, %4\n"
142"	strex	%1, %0, [%3]\n"
143"	teq	%1, #0\n"
144"	bne	1b"
145	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
146	: "r" (addr), "Ir" (mask)
147	: "cc");
148}
149
150#else /* ARM_ARCH_6 */
151
152#ifdef CONFIG_SMP
153#error SMP not supported on pre-ARMv6 CPUs
154#endif
155
156static inline int atomic_add_return(int i, atomic_t *v)
157{
158	unsigned long flags;
159	int val;
160
161	raw_local_irq_save(flags);
162	val = v->counter;
163	v->counter = val += i;
164	raw_local_irq_restore(flags);
165
166	return val;
167}
168#define atomic_add(i, v)	(void) atomic_add_return(i, v)
169
170static inline int atomic_sub_return(int i, atomic_t *v)
171{
172	unsigned long flags;
173	int val;
174
175	raw_local_irq_save(flags);
176	val = v->counter;
177	v->counter = val -= i;
178	raw_local_irq_restore(flags);
179
180	return val;
181}
182#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
183
184static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
185{
186	int ret;
187	unsigned long flags;
188
189	raw_local_irq_save(flags);
190	ret = v->counter;
191	if (likely(ret == old))
192		v->counter = new;
193	raw_local_irq_restore(flags);
194
195	return ret;
196}
197
198static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
199{
200	unsigned long flags;
201
202	raw_local_irq_save(flags);
203	*addr &= ~mask;
204	raw_local_irq_restore(flags);
205}
206
207#endif /* __LINUX_ARM_ARCH__ */
208
209#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
210
211static inline int __atomic_add_unless(atomic_t *v, int a, int u)
212{
213	int c, old;
214
215	c = atomic_read(v);
216	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
217		c = old;
218	return c;
219}
220
221#define atomic_inc(v)		atomic_add(1, v)
222#define atomic_dec(v)		atomic_sub(1, v)
223
224#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
225#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
226#define atomic_inc_return(v)    (atomic_add_return(1, v))
227#define atomic_dec_return(v)    (atomic_sub_return(1, v))
228#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
229
230#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
231
232#define smp_mb__before_atomic_dec()	smp_mb()
233#define smp_mb__after_atomic_dec()	smp_mb()
234#define smp_mb__before_atomic_inc()	smp_mb()
235#define smp_mb__after_atomic_inc()	smp_mb()
236
237#ifndef CONFIG_GENERIC_ATOMIC64
238typedef struct {
239	u64 __aligned(8) counter;
240} atomic64_t;
241
242#define ATOMIC64_INIT(i) { (i) }
243
244static inline u64 atomic64_read(atomic64_t *v)
245{
246	u64 result;
247
248	__asm__ __volatile__("@ atomic64_read\n"
249"	ldrexd	%0, %H0, [%1]"
250	: "=&r" (result)
251	: "r" (&v->counter), "Qo" (v->counter)
252	);
253
254	return result;
255}
256
257static inline void atomic64_set(atomic64_t *v, u64 i)
258{
259	u64 tmp;
260
261	__asm__ __volatile__("@ atomic64_set\n"
262"1:	ldrexd	%0, %H0, [%2]\n"
263"	strexd	%0, %3, %H3, [%2]\n"
264"	teq	%0, #0\n"
265"	bne	1b"
266	: "=&r" (tmp), "=Qo" (v->counter)
267	: "r" (&v->counter), "r" (i)
268	: "cc");
269}
270
271static inline void atomic64_add(u64 i, atomic64_t *v)
272{
273	u64 result;
274	unsigned long tmp;
275
276	__asm__ __volatile__("@ atomic64_add\n"
277"1:	ldrexd	%0, %H0, [%3]\n"
278"	adds	%0, %0, %4\n"
279"	adc	%H0, %H0, %H4\n"
280"	strexd	%1, %0, %H0, [%3]\n"
281"	teq	%1, #0\n"
282"	bne	1b"
283	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
284	: "r" (&v->counter), "r" (i)
285	: "cc");
286}
287
288static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
289{
290	u64 result;
291	unsigned long tmp;
292
293	smp_mb();
294
295	__asm__ __volatile__("@ atomic64_add_return\n"
296"1:	ldrexd	%0, %H0, [%3]\n"
297"	adds	%0, %0, %4\n"
298"	adc	%H0, %H0, %H4\n"
299"	strexd	%1, %0, %H0, [%3]\n"
300"	teq	%1, #0\n"
301"	bne	1b"
302	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
303	: "r" (&v->counter), "r" (i)
304	: "cc");
305
306	smp_mb();
307
308	return result;
309}
310
311static inline void atomic64_sub(u64 i, atomic64_t *v)
312{
313	u64 result;
314	unsigned long tmp;
315
316	__asm__ __volatile__("@ atomic64_sub\n"
317"1:	ldrexd	%0, %H0, [%3]\n"
318"	subs	%0, %0, %4\n"
319"	sbc	%H0, %H0, %H4\n"
320"	strexd	%1, %0, %H0, [%3]\n"
321"	teq	%1, #0\n"
322"	bne	1b"
323	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
324	: "r" (&v->counter), "r" (i)
325	: "cc");
326}
327
328static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
329{
330	u64 result;
331	unsigned long tmp;
332
333	smp_mb();
334
335	__asm__ __volatile__("@ atomic64_sub_return\n"
336"1:	ldrexd	%0, %H0, [%3]\n"
337"	subs	%0, %0, %4\n"
338"	sbc	%H0, %H0, %H4\n"
339"	strexd	%1, %0, %H0, [%3]\n"
340"	teq	%1, #0\n"
341"	bne	1b"
342	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
343	: "r" (&v->counter), "r" (i)
344	: "cc");
345
346	smp_mb();
347
348	return result;
349}
350
351static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
352{
353	u64 oldval;
354	unsigned long res;
355
356	smp_mb();
357
358	do {
359		__asm__ __volatile__("@ atomic64_cmpxchg\n"
360		"ldrexd		%1, %H1, [%3]\n"
361		"mov		%0, #0\n"
362		"teq		%1, %4\n"
363		"teqeq		%H1, %H4\n"
364		"strexdeq	%0, %5, %H5, [%3]"
365		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
366		: "r" (&ptr->counter), "r" (old), "r" (new)
367		: "cc");
368	} while (res);
369
370	smp_mb();
371
372	return oldval;
373}
374
375static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
376{
377	u64 result;
378	unsigned long tmp;
379
380	smp_mb();
381
382	__asm__ __volatile__("@ atomic64_xchg\n"
383"1:	ldrexd	%0, %H0, [%3]\n"
384"	strexd	%1, %4, %H4, [%3]\n"
385"	teq	%1, #0\n"
386"	bne	1b"
387	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
388	: "r" (&ptr->counter), "r" (new)
389	: "cc");
390
391	smp_mb();
392
393	return result;
394}
395
396static inline u64 atomic64_dec_if_positive(atomic64_t *v)
397{
398	u64 result;
399	unsigned long tmp;
400
401	smp_mb();
402
403	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
404"1:	ldrexd	%0, %H0, [%3]\n"
405"	subs	%0, %0, #1\n"
406"	sbc	%H0, %H0, #0\n"
407"	teq	%H0, #0\n"
408"	bmi	2f\n"
409"	strexd	%1, %0, %H0, [%3]\n"
410"	teq	%1, #0\n"
411"	bne	1b\n"
412"2:"
413	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
414	: "r" (&v->counter)
415	: "cc");
416
417	smp_mb();
418
419	return result;
420}
421
422static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
423{
424	u64 val;
425	unsigned long tmp;
426	int ret = 1;
427
428	smp_mb();
429
430	__asm__ __volatile__("@ atomic64_add_unless\n"
431"1:	ldrexd	%0, %H0, [%4]\n"
432"	teq	%0, %5\n"
433"	teqeq	%H0, %H5\n"
434"	moveq	%1, #0\n"
435"	beq	2f\n"
436"	adds	%0, %0, %6\n"
437"	adc	%H0, %H0, %H6\n"
438"	strexd	%2, %0, %H0, [%4]\n"
439"	teq	%2, #0\n"
440"	bne	1b\n"
441"2:"
442	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
443	: "r" (&v->counter), "r" (u), "r" (a)
444	: "cc");
445
446	if (ret)
447		smp_mb();
448
449	return ret;
450}
451
452#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
453#define atomic64_inc(v)			atomic64_add(1LL, (v))
454#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
455#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
456#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
457#define atomic64_dec(v)			atomic64_sub(1LL, (v))
458#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
459#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
460#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
461
462#endif /* !CONFIG_GENERIC_ATOMIC64 */
463#endif
464#endif