Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 15#include <linux/prefetch.h>
 16#include <linux/types.h>
 17#include <linux/irqflags.h>
 18#include <asm/barrier.h>
 19#include <asm/cmpxchg.h>
 20
 21#define ATOMIC_INIT(i)	{ (i) }
 22
 23#ifdef __KERNEL__
 24
 25/*
 26 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 27 * strex/ldrex monitor on some implementations. The reason we can use it for
 28 * atomic_set() is the clrex or dummy strex done on every exception return.
 29 */
 30#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 31#define atomic_set(v,i)	(((v)->counter) = (i))
 32
 33#if __LINUX_ARM_ARCH__ >= 6
 34
 35/*
 36 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 37 * store exclusive to ensure that these are atomic.  We may loop
 38 * to ensure that the update happens.
 39 */
 40static inline void atomic_add(int i, atomic_t *v)
 41{
 42	unsigned long tmp;
 43	int result;
 44
 45	prefetchw(&v->counter);
 46	__asm__ __volatile__("@ atomic_add\n"
 47"1:	ldrex	%0, [%3]\n"
 48"	add	%0, %0, %4\n"
 49"	strex	%1, %0, [%3]\n"
 50"	teq	%1, #0\n"
 51"	bne	1b"
 52	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 53	: "r" (&v->counter), "Ir" (i)
 54	: "cc");
 55}
 56
 57static inline int atomic_add_return(int i, atomic_t *v)
 58{
 59	unsigned long tmp;
 60	int result;
 61
 62	smp_mb();
 63	prefetchw(&v->counter);
 64
 65	__asm__ __volatile__("@ atomic_add_return\n"
 66"1:	ldrex	%0, [%3]\n"
 67"	add	%0, %0, %4\n"
 68"	strex	%1, %0, [%3]\n"
 69"	teq	%1, #0\n"
 70"	bne	1b"
 71	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 72	: "r" (&v->counter), "Ir" (i)
 73	: "cc");
 74
 75	smp_mb();
 76
 77	return result;
 78}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79
 80static inline void atomic_sub(int i, atomic_t *v)
 81{
 82	unsigned long tmp;
 83	int result;
 84
 85	prefetchw(&v->counter);
 86	__asm__ __volatile__("@ atomic_sub\n"
 87"1:	ldrex	%0, [%3]\n"
 88"	sub	%0, %0, %4\n"
 89"	strex	%1, %0, [%3]\n"
 90"	teq	%1, #0\n"
 91"	bne	1b"
 92	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 93	: "r" (&v->counter), "Ir" (i)
 94	: "cc");
 95}
 96
 97static inline int atomic_sub_return(int i, atomic_t *v)
 98{
 99	unsigned long tmp;
100	int result;
101
102	smp_mb();
103	prefetchw(&v->counter);
104
105	__asm__ __volatile__("@ atomic_sub_return\n"
106"1:	ldrex	%0, [%3]\n"
107"	sub	%0, %0, %4\n"
108"	strex	%1, %0, [%3]\n"
109"	teq	%1, #0\n"
110"	bne	1b"
111	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
112	: "r" (&v->counter), "Ir" (i)
113	: "cc");
114
115	smp_mb();
116
117	return result;
118}
119
120static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
121{
122	int oldval;
123	unsigned long res;
124
125	smp_mb();
126	prefetchw(&ptr->counter);
127
128	do {
129		__asm__ __volatile__("@ atomic_cmpxchg\n"
130		"ldrex	%1, [%3]\n"
131		"mov	%0, #0\n"
132		"teq	%1, %4\n"
133		"strexeq %0, %5, [%3]\n"
134		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
135		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
136		    : "cc");
137	} while (res);
138
139	smp_mb();
140
141	return oldval;
142}
 
143
144static inline int __atomic_add_unless(atomic_t *v, int a, int u)
145{
146	int oldval, newval;
147	unsigned long tmp;
148
149	smp_mb();
150	prefetchw(&v->counter);
151
152	__asm__ __volatile__ ("@ atomic_add_unless\n"
153"1:	ldrex	%0, [%4]\n"
154"	teq	%0, %5\n"
155"	beq	2f\n"
156"	add	%1, %0, %6\n"
157"	strex	%2, %1, [%4]\n"
158"	teq	%2, #0\n"
159"	bne	1b\n"
160"2:"
161	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
162	: "r" (&v->counter), "r" (u), "r" (a)
163	: "cc");
164
165	if (oldval != u)
166		smp_mb();
167
168	return oldval;
169}
 
170
171#else /* ARM_ARCH_6 */
172
173#ifdef CONFIG_SMP
174#error SMP not supported on pre-ARMv6 CPUs
175#endif
176
177static inline int atomic_add_return(int i, atomic_t *v)
178{
179	unsigned long flags;
180	int val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
182	raw_local_irq_save(flags);
183	val = v->counter;
184	v->counter = val += i;
185	raw_local_irq_restore(flags);
186
187	return val;
188}
189#define atomic_add(i, v)	(void) atomic_add_return(i, v)
190
191static inline int atomic_sub_return(int i, atomic_t *v)
192{
193	unsigned long flags;
194	int val;
195
196	raw_local_irq_save(flags);
197	val = v->counter;
198	v->counter = val -= i;
199	raw_local_irq_restore(flags);
200
201	return val;
202}
203#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
204
205static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
206{
207	int ret;
208	unsigned long flags;
209
210	raw_local_irq_save(flags);
211	ret = v->counter;
212	if (likely(ret == old))
213		v->counter = new;
214	raw_local_irq_restore(flags);
215
216	return ret;
217}
218
219static inline int __atomic_add_unless(atomic_t *v, int a, int u)
220{
221	int c, old;
222
223	c = atomic_read(v);
224	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
225		c = old;
226	return c;
227}
228
229#endif /* __LINUX_ARM_ARCH__ */
230
231#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
232
233#define atomic_inc(v)		atomic_add(1, v)
234#define atomic_dec(v)		atomic_sub(1, v)
235
236#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
237#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
238#define atomic_inc_return(v)    (atomic_add_return(1, v))
239#define atomic_dec_return(v)    (atomic_sub_return(1, v))
240#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
241
242#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
243
244#define smp_mb__before_atomic_dec()	smp_mb()
245#define smp_mb__after_atomic_dec()	smp_mb()
246#define smp_mb__before_atomic_inc()	smp_mb()
247#define smp_mb__after_atomic_inc()	smp_mb()
 
 
 
 
 
 
 
248
249#ifndef CONFIG_GENERIC_ATOMIC64
250typedef struct {
251	long long counter;
252} atomic64_t;
253
254#define ATOMIC64_INIT(i) { (i) }
255
256#ifdef CONFIG_ARM_LPAE
257static inline long long atomic64_read(const atomic64_t *v)
258{
259	long long result;
260
261	__asm__ __volatile__("@ atomic64_read\n"
262"	ldrd	%0, %H0, [%1]"
263	: "=&r" (result)
264	: "r" (&v->counter), "Qo" (v->counter)
265	);
266
267	return result;
268}
269
270static inline void atomic64_set(atomic64_t *v, long long i)
271{
272	__asm__ __volatile__("@ atomic64_set\n"
273"	strd	%2, %H2, [%1]"
274	: "=Qo" (v->counter)
275	: "r" (&v->counter), "r" (i)
276	);
277}
278#else
279static inline long long atomic64_read(const atomic64_t *v)
280{
281	long long result;
282
283	__asm__ __volatile__("@ atomic64_read\n"
284"	ldrexd	%0, %H0, [%1]"
285	: "=&r" (result)
286	: "r" (&v->counter), "Qo" (v->counter)
287	);
288
289	return result;
290}
291
292static inline void atomic64_set(atomic64_t *v, long long i)
293{
294	long long tmp;
295
296	prefetchw(&v->counter);
297	__asm__ __volatile__("@ atomic64_set\n"
298"1:	ldrexd	%0, %H0, [%2]\n"
299"	strexd	%0, %3, %H3, [%2]\n"
300"	teq	%0, #0\n"
301"	bne	1b"
302	: "=&r" (tmp), "=Qo" (v->counter)
303	: "r" (&v->counter), "r" (i)
304	: "cc");
305}
306#endif
307
308static inline void atomic64_add(long long i, atomic64_t *v)
309{
310	long long result;
311	unsigned long tmp;
312
313	prefetchw(&v->counter);
314	__asm__ __volatile__("@ atomic64_add\n"
315"1:	ldrexd	%0, %H0, [%3]\n"
316"	adds	%Q0, %Q0, %Q4\n"
317"	adc	%R0, %R0, %R4\n"
318"	strexd	%1, %0, %H0, [%3]\n"
319"	teq	%1, #0\n"
320"	bne	1b"
321	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
322	: "r" (&v->counter), "r" (i)
323	: "cc");
324}
325
326static inline long long atomic64_add_return(long long i, atomic64_t *v)
327{
328	long long result;
329	unsigned long tmp;
330
331	smp_mb();
332	prefetchw(&v->counter);
333
334	__asm__ __volatile__("@ atomic64_add_return\n"
335"1:	ldrexd	%0, %H0, [%3]\n"
336"	adds	%Q0, %Q0, %Q4\n"
337"	adc	%R0, %R0, %R4\n"
338"	strexd	%1, %0, %H0, [%3]\n"
339"	teq	%1, #0\n"
340"	bne	1b"
341	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
342	: "r" (&v->counter), "r" (i)
343	: "cc");
344
345	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
346
347	return result;
348}
349
350static inline void atomic64_sub(long long i, atomic64_t *v)
351{
352	long long result;
353	unsigned long tmp;
354
355	prefetchw(&v->counter);
356	__asm__ __volatile__("@ atomic64_sub\n"
357"1:	ldrexd	%0, %H0, [%3]\n"
358"	subs	%Q0, %Q0, %Q4\n"
359"	sbc	%R0, %R0, %R4\n"
360"	strexd	%1, %0, %H0, [%3]\n"
361"	teq	%1, #0\n"
362"	bne	1b"
363	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
364	: "r" (&v->counter), "r" (i)
365	: "cc");
366}
367
368static inline long long atomic64_sub_return(long long i, atomic64_t *v)
369{
370	long long result;
371	unsigned long tmp;
372
373	smp_mb();
374	prefetchw(&v->counter);
375
376	__asm__ __volatile__("@ atomic64_sub_return\n"
377"1:	ldrexd	%0, %H0, [%3]\n"
378"	subs	%Q0, %Q0, %Q4\n"
379"	sbc	%R0, %R0, %R4\n"
380"	strexd	%1, %0, %H0, [%3]\n"
381"	teq	%1, #0\n"
382"	bne	1b"
383	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
384	: "r" (&v->counter), "r" (i)
385	: "cc");
386
387	smp_mb();
388
389	return result;
390}
391
392static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
393					long long new)
394{
395	long long oldval;
396	unsigned long res;
397
398	smp_mb();
399	prefetchw(&ptr->counter);
400
401	do {
402		__asm__ __volatile__("@ atomic64_cmpxchg\n"
403		"ldrexd		%1, %H1, [%3]\n"
404		"mov		%0, #0\n"
405		"teq		%1, %4\n"
406		"teqeq		%H1, %H4\n"
407		"strexdeq	%0, %5, %H5, [%3]"
408		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
409		: "r" (&ptr->counter), "r" (old), "r" (new)
410		: "cc");
411	} while (res);
412
413	smp_mb();
414
415	return oldval;
416}
 
417
418static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
419{
420	long long result;
421	unsigned long tmp;
422
423	smp_mb();
424	prefetchw(&ptr->counter);
425
426	__asm__ __volatile__("@ atomic64_xchg\n"
427"1:	ldrexd	%0, %H0, [%3]\n"
428"	strexd	%1, %4, %H4, [%3]\n"
429"	teq	%1, #0\n"
430"	bne	1b"
431	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
432	: "r" (&ptr->counter), "r" (new)
433	: "cc");
434
435	smp_mb();
436
437	return result;
438}
 
439
440static inline long long atomic64_dec_if_positive(atomic64_t *v)
441{
442	long long result;
443	unsigned long tmp;
444
445	smp_mb();
446	prefetchw(&v->counter);
447
448	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
449"1:	ldrexd	%0, %H0, [%3]\n"
450"	subs	%Q0, %Q0, #1\n"
451"	sbc	%R0, %R0, #0\n"
452"	teq	%R0, #0\n"
453"	bmi	2f\n"
454"	strexd	%1, %0, %H0, [%3]\n"
455"	teq	%1, #0\n"
456"	bne	1b\n"
457"2:"
458	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
459	: "r" (&v->counter)
460	: "cc");
461
462	smp_mb();
463
464	return result;
465}
 
466
467static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
468{
469	long long val;
470	unsigned long tmp;
471	int ret = 1;
472
473	smp_mb();
474	prefetchw(&v->counter);
475
476	__asm__ __volatile__("@ atomic64_add_unless\n"
477"1:	ldrexd	%0, %H0, [%4]\n"
478"	teq	%0, %5\n"
479"	teqeq	%H0, %H5\n"
480"	moveq	%1, #0\n"
481"	beq	2f\n"
482"	adds	%Q0, %Q0, %Q6\n"
483"	adc	%R0, %R0, %R6\n"
484"	strexd	%2, %0, %H0, [%4]\n"
485"	teq	%2, #0\n"
486"	bne	1b\n"
487"2:"
488	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
489	: "r" (&v->counter), "r" (u), "r" (a)
490	: "cc");
491
492	if (ret)
493		smp_mb();
494
495	return ret;
496}
497
498#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
499#define atomic64_inc(v)			atomic64_add(1LL, (v))
500#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
501#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
502#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
503#define atomic64_dec(v)			atomic64_sub(1LL, (v))
504#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
505#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
506#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
507
508#endif /* !CONFIG_GENERIC_ATOMIC64 */
509#endif
510#endif
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/atomic.h
  4 *
  5 *  Copyright (C) 1996 Russell King.
  6 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
 
 
 
 
  7 */
  8#ifndef __ASM_ARM_ATOMIC_H
  9#define __ASM_ARM_ATOMIC_H
 10
 11#include <linux/compiler.h>
 12#include <linux/prefetch.h>
 13#include <linux/types.h>
 14#include <linux/irqflags.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 
 
 18#ifdef __KERNEL__
 19
 20/*
 21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 22 * strex/ldrex monitor on some implementations. The reason we can use it for
 23 * atomic_set() is the clrex or dummy strex done on every exception return.
 24 */
 25#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 26#define arch_atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 27
 28#if __LINUX_ARM_ARCH__ >= 6
 29
 30/*
 31 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 32 * store exclusive to ensure that these are atomic.  We may loop
 33 * to ensure that the update happens.
 34 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35
 36#define ATOMIC_OP(op, c_op, asm_op)					\
 37static inline void arch_atomic_##op(int i, atomic_t *v)			\
 38{									\
 39	unsigned long tmp;						\
 40	int result;							\
 41									\
 42	prefetchw(&v->counter);						\
 43	__asm__ __volatile__("@ atomic_" #op "\n"			\
 44"1:	ldrex	%0, [%3]\n"						\
 45"	" #asm_op "	%0, %0, %4\n"					\
 46"	strex	%1, %0, [%3]\n"						\
 47"	teq	%1, #0\n"						\
 48"	bne	1b"							\
 49	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 50	: "r" (&v->counter), "Ir" (i)					\
 51	: "cc");							\
 52}									\
 53
 54#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 55static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 56{									\
 57	unsigned long tmp;						\
 58	int result;							\
 59									\
 60	prefetchw(&v->counter);						\
 61									\
 62	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 63"1:	ldrex	%0, [%3]\n"						\
 64"	" #asm_op "	%0, %0, %4\n"					\
 65"	strex	%1, %0, [%3]\n"						\
 66"	teq	%1, #0\n"						\
 67"	bne	1b"							\
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 69	: "r" (&v->counter), "Ir" (i)					\
 70	: "cc");							\
 71									\
 72	return result;							\
 73}
 74
 75#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 76static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 77{									\
 78	unsigned long tmp;						\
 79	int result, val;						\
 80									\
 81	prefetchw(&v->counter);						\
 82									\
 83	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
 84"1:	ldrex	%0, [%4]\n"						\
 85"	" #asm_op "	%1, %0, %5\n"					\
 86"	strex	%2, %1, [%4]\n"						\
 87"	teq	%2, #0\n"						\
 88"	bne	1b"							\
 89	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
 90	: "r" (&v->counter), "Ir" (i)					\
 91	: "cc");							\
 92									\
 93	return result;							\
 94}
 95
 96#define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
 97#define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
 98#define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
 99#define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
100
101#define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
102#define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
103#define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
104#define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
105
106static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107{
108	int oldval;
109	unsigned long res;
110
 
111	prefetchw(&ptr->counter);
112
113	do {
114		__asm__ __volatile__("@ atomic_cmpxchg\n"
115		"ldrex	%1, [%3]\n"
116		"mov	%0, #0\n"
117		"teq	%1, %4\n"
118		"strexeq %0, %5, [%3]\n"
119		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
121		    : "cc");
122	} while (res);
123
 
 
124	return oldval;
125}
126#define arch_atomic_cmpxchg_relaxed		arch_atomic_cmpxchg_relaxed
127
128static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129{
130	int oldval, newval;
131	unsigned long tmp;
132
133	smp_mb();
134	prefetchw(&v->counter);
135
136	__asm__ __volatile__ ("@ atomic_add_unless\n"
137"1:	ldrex	%0, [%4]\n"
138"	teq	%0, %5\n"
139"	beq	2f\n"
140"	add	%1, %0, %6\n"
141"	strex	%2, %1, [%4]\n"
142"	teq	%2, #0\n"
143"	bne	1b\n"
144"2:"
145	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146	: "r" (&v->counter), "r" (u), "r" (a)
147	: "cc");
148
149	if (oldval != u)
150		smp_mb();
151
152	return oldval;
153}
154#define arch_atomic_fetch_add_unless		arch_atomic_fetch_add_unless
155
156#else /* ARM_ARCH_6 */
157
158#ifdef CONFIG_SMP
159#error SMP not supported on pre-ARMv6 CPUs
160#endif
161
162#define ATOMIC_OP(op, c_op, asm_op)					\
163static inline void arch_atomic_##op(int i, atomic_t *v)			\
164{									\
165	unsigned long flags;						\
166									\
167	raw_local_irq_save(flags);					\
168	v->counter c_op i;						\
169	raw_local_irq_restore(flags);					\
170}									\
171
172#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
173static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
174{									\
175	unsigned long flags;						\
176	int val;							\
177									\
178	raw_local_irq_save(flags);					\
179	v->counter c_op i;						\
180	val = v->counter;						\
181	raw_local_irq_restore(flags);					\
182									\
183	return val;							\
184}
185
186#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
187static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
188{									\
189	unsigned long flags;						\
190	int val;							\
191									\
192	raw_local_irq_save(flags);					\
193	val = v->counter;						\
194	v->counter c_op i;						\
195	raw_local_irq_restore(flags);					\
196									\
197	return val;							\
198}
199
200#define arch_atomic_add_return			arch_atomic_add_return
201#define arch_atomic_sub_return			arch_atomic_sub_return
202#define arch_atomic_fetch_add			arch_atomic_fetch_add
203#define arch_atomic_fetch_sub			arch_atomic_fetch_sub
204
205#define arch_atomic_fetch_and			arch_atomic_fetch_and
206#define arch_atomic_fetch_andnot		arch_atomic_fetch_andnot
207#define arch_atomic_fetch_or			arch_atomic_fetch_or
208#define arch_atomic_fetch_xor			arch_atomic_fetch_xor
209
210static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
211{
212	int ret;
213	unsigned long flags;
214
215	raw_local_irq_save(flags);
216	ret = v->counter;
217	if (likely(ret == old))
218		v->counter = new;
219	raw_local_irq_restore(flags);
220
221	return ret;
222}
223#define arch_atomic_cmpxchg arch_atomic_cmpxchg
 
 
 
 
 
 
 
 
 
224
225#endif /* __LINUX_ARM_ARCH__ */
226
227#define ATOMIC_OPS(op, c_op, asm_op)					\
228	ATOMIC_OP(op, c_op, asm_op)					\
229	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
230	ATOMIC_FETCH_OP(op, c_op, asm_op)
231
232ATOMIC_OPS(add, +=, add)
233ATOMIC_OPS(sub, -=, sub)
234
235#define arch_atomic_andnot arch_atomic_andnot
236
237#undef ATOMIC_OPS
238#define ATOMIC_OPS(op, c_op, asm_op)					\
239	ATOMIC_OP(op, c_op, asm_op)					\
240	ATOMIC_FETCH_OP(op, c_op, asm_op)
241
242ATOMIC_OPS(and, &=, and)
243ATOMIC_OPS(andnot, &= ~, bic)
244ATOMIC_OPS(or,  |=, orr)
245ATOMIC_OPS(xor, ^=, eor)
246
247#undef ATOMIC_OPS
248#undef ATOMIC_FETCH_OP
249#undef ATOMIC_OP_RETURN
250#undef ATOMIC_OP
251
252#ifndef CONFIG_GENERIC_ATOMIC64
253typedef struct {
254	s64 counter;
255} atomic64_t;
256
257#define ATOMIC64_INIT(i) { (i) }
258
259#ifdef CONFIG_ARM_LPAE
260static inline s64 arch_atomic64_read(const atomic64_t *v)
261{
262	s64 result;
263
264	__asm__ __volatile__("@ atomic64_read\n"
265"	ldrd	%0, %H0, [%1]"
266	: "=&r" (result)
267	: "r" (&v->counter), "Qo" (v->counter)
268	);
269
270	return result;
271}
272
273static inline void arch_atomic64_set(atomic64_t *v, s64 i)
274{
275	__asm__ __volatile__("@ atomic64_set\n"
276"	strd	%2, %H2, [%1]"
277	: "=Qo" (v->counter)
278	: "r" (&v->counter), "r" (i)
279	);
280}
281#else
282static inline s64 arch_atomic64_read(const atomic64_t *v)
283{
284	s64 result;
285
286	__asm__ __volatile__("@ atomic64_read\n"
287"	ldrexd	%0, %H0, [%1]"
288	: "=&r" (result)
289	: "r" (&v->counter), "Qo" (v->counter)
290	);
291
292	return result;
293}
294
295static inline void arch_atomic64_set(atomic64_t *v, s64 i)
296{
297	s64 tmp;
298
299	prefetchw(&v->counter);
300	__asm__ __volatile__("@ atomic64_set\n"
301"1:	ldrexd	%0, %H0, [%2]\n"
302"	strexd	%0, %3, %H3, [%2]\n"
303"	teq	%0, #0\n"
304"	bne	1b"
305	: "=&r" (tmp), "=Qo" (v->counter)
306	: "r" (&v->counter), "r" (i)
307	: "cc");
308}
309#endif
310
311#define ATOMIC64_OP(op, op1, op2)					\
312static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
313{									\
314	s64 result;							\
315	unsigned long tmp;						\
316									\
317	prefetchw(&v->counter);						\
318	__asm__ __volatile__("@ atomic64_" #op "\n"			\
319"1:	ldrexd	%0, %H0, [%3]\n"					\
320"	" #op1 " %Q0, %Q0, %Q4\n"					\
321"	" #op2 " %R0, %R0, %R4\n"					\
322"	strexd	%1, %0, %H0, [%3]\n"					\
323"	teq	%1, #0\n"						\
324"	bne	1b"							\
325	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
326	: "r" (&v->counter), "r" (i)					\
327	: "cc");							\
328}									\
329
330#define ATOMIC64_OP_RETURN(op, op1, op2)				\
331static inline s64							\
332arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)		\
333{									\
334	s64 result;							\
335	unsigned long tmp;						\
336									\
337	prefetchw(&v->counter);						\
338									\
339	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
340"1:	ldrexd	%0, %H0, [%3]\n"					\
341"	" #op1 " %Q0, %Q0, %Q4\n"					\
342"	" #op2 " %R0, %R0, %R4\n"					\
343"	strexd	%1, %0, %H0, [%3]\n"					\
344"	teq	%1, #0\n"						\
345"	bne	1b"							\
346	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
347	: "r" (&v->counter), "r" (i)					\
348	: "cc");							\
349									\
350	return result;							\
351}
352
353#define ATOMIC64_FETCH_OP(op, op1, op2)					\
354static inline s64							\
355arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)		\
356{									\
357	s64 result, val;						\
358	unsigned long tmp;						\
359									\
360	prefetchw(&v->counter);						\
361									\
362	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
363"1:	ldrexd	%0, %H0, [%4]\n"					\
364"	" #op1 " %Q1, %Q0, %Q5\n"					\
365"	" #op2 " %R1, %R0, %R5\n"					\
366"	strexd	%2, %1, %H1, [%4]\n"					\
367"	teq	%2, #0\n"						\
368"	bne	1b"							\
369	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
370	: "r" (&v->counter), "r" (i)					\
371	: "cc");							\
372									\
373	return result;							\
374}
375
376#define ATOMIC64_OPS(op, op1, op2)					\
377	ATOMIC64_OP(op, op1, op2)					\
378	ATOMIC64_OP_RETURN(op, op1, op2)				\
379	ATOMIC64_FETCH_OP(op, op1, op2)
380
381ATOMIC64_OPS(add, adds, adc)
382ATOMIC64_OPS(sub, subs, sbc)
383
384#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
385#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
386#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
387#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
388
389#undef ATOMIC64_OPS
390#define ATOMIC64_OPS(op, op1, op2)					\
391	ATOMIC64_OP(op, op1, op2)					\
392	ATOMIC64_FETCH_OP(op, op1, op2)
393
394#define arch_atomic64_andnot arch_atomic64_andnot
395
396ATOMIC64_OPS(and, and, and)
397ATOMIC64_OPS(andnot, bic, bic)
398ATOMIC64_OPS(or,  orr, orr)
399ATOMIC64_OPS(xor, eor, eor)
400
401#define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
402#define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
403#define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
404#define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
405
406#undef ATOMIC64_OPS
407#undef ATOMIC64_FETCH_OP
408#undef ATOMIC64_OP_RETURN
409#undef ATOMIC64_OP
410
411static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
 
 
 
412{
413	s64 oldval;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
414	unsigned long res;
415
 
416	prefetchw(&ptr->counter);
417
418	do {
419		__asm__ __volatile__("@ atomic64_cmpxchg\n"
420		"ldrexd		%1, %H1, [%3]\n"
421		"mov		%0, #0\n"
422		"teq		%1, %4\n"
423		"teqeq		%H1, %H4\n"
424		"strexdeq	%0, %5, %H5, [%3]"
425		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
426		: "r" (&ptr->counter), "r" (old), "r" (new)
427		: "cc");
428	} while (res);
429
 
 
430	return oldval;
431}
432#define arch_atomic64_cmpxchg_relaxed	arch_atomic64_cmpxchg_relaxed
433
434static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
435{
436	s64 result;
437	unsigned long tmp;
438
 
439	prefetchw(&ptr->counter);
440
441	__asm__ __volatile__("@ atomic64_xchg\n"
442"1:	ldrexd	%0, %H0, [%3]\n"
443"	strexd	%1, %4, %H4, [%3]\n"
444"	teq	%1, #0\n"
445"	bne	1b"
446	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
447	: "r" (&ptr->counter), "r" (new)
448	: "cc");
449
 
 
450	return result;
451}
452#define arch_atomic64_xchg_relaxed		arch_atomic64_xchg_relaxed
453
454static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
455{
456	s64 result;
457	unsigned long tmp;
458
459	smp_mb();
460	prefetchw(&v->counter);
461
462	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
463"1:	ldrexd	%0, %H0, [%3]\n"
464"	subs	%Q0, %Q0, #1\n"
465"	sbc	%R0, %R0, #0\n"
466"	teq	%R0, #0\n"
467"	bmi	2f\n"
468"	strexd	%1, %0, %H0, [%3]\n"
469"	teq	%1, #0\n"
470"	bne	1b\n"
471"2:"
472	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
473	: "r" (&v->counter)
474	: "cc");
475
476	smp_mb();
477
478	return result;
479}
480#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
481
482static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
483{
484	s64 oldval, newval;
485	unsigned long tmp;
 
486
487	smp_mb();
488	prefetchw(&v->counter);
489
490	__asm__ __volatile__("@ atomic64_add_unless\n"
491"1:	ldrexd	%0, %H0, [%4]\n"
492"	teq	%0, %5\n"
493"	teqeq	%H0, %H5\n"
 
494"	beq	2f\n"
495"	adds	%Q1, %Q0, %Q6\n"
496"	adc	%R1, %R0, %R6\n"
497"	strexd	%2, %1, %H1, [%4]\n"
498"	teq	%2, #0\n"
499"	bne	1b\n"
500"2:"
501	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
502	: "r" (&v->counter), "r" (u), "r" (a)
503	: "cc");
504
505	if (oldval != u)
506		smp_mb();
507
508	return oldval;
509}
510#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 
 
 
 
 
 
 
 
 
511
512#endif /* !CONFIG_GENERIC_ATOMIC64 */
513#endif
514#endif