Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 *  arch/arm/include/asm/atomic.h
  3 *
  4 *  Copyright (C) 1996 Russell King.
  5 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#ifndef __ASM_ARM_ATOMIC_H
 12#define __ASM_ARM_ATOMIC_H
 13
 14#include <linux/compiler.h>
 
 15#include <linux/types.h>
 16#include <linux/irqflags.h>
 17#include <asm/barrier.h>
 18#include <asm/cmpxchg.h>
 19
 20#define ATOMIC_INIT(i)	{ (i) }
 21
 22#ifdef __KERNEL__
 23
 24/*
 25 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 26 * strex/ldrex monitor on some implementations. The reason we can use it for
 27 * atomic_set() is the clrex or dummy strex done on every exception return.
 28 */
 29#define atomic_read(v)	(*(volatile int *)&(v)->counter)
 30#define atomic_set(v,i)	(((v)->counter) = (i))
 31
 32#if __LINUX_ARM_ARCH__ >= 6
 33
 34/*
 35 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 36 * store exclusive to ensure that these are atomic.  We may loop
 37 * to ensure that the update happens.
 38 */
 39static inline void atomic_add(int i, atomic_t *v)
 40{
 41	unsigned long tmp;
 42	int result;
 43
 44	__asm__ __volatile__("@ atomic_add\n"
 45"1:	ldrex	%0, [%3]\n"
 46"	add	%0, %0, %4\n"
 47"	strex	%1, %0, [%3]\n"
 48"	teq	%1, #0\n"
 49"	bne	1b"
 50	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 51	: "r" (&v->counter), "Ir" (i)
 52	: "cc");
 53}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55static inline int atomic_add_return(int i, atomic_t *v)
 56{
 57	unsigned long tmp;
 58	int result;
 59
 60	smp_mb();
 61
 62	__asm__ __volatile__("@ atomic_add_return\n"
 63"1:	ldrex	%0, [%3]\n"
 64"	add	%0, %0, %4\n"
 65"	strex	%1, %0, [%3]\n"
 66"	teq	%1, #0\n"
 67"	bne	1b"
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 69	: "r" (&v->counter), "Ir" (i)
 70	: "cc");
 71
 72	smp_mb();
 73
 74	return result;
 75}
 76
 77static inline void atomic_sub(int i, atomic_t *v)
 78{
 79	unsigned long tmp;
 80	int result;
 81
 82	__asm__ __volatile__("@ atomic_sub\n"
 83"1:	ldrex	%0, [%3]\n"
 84"	sub	%0, %0, %4\n"
 85"	strex	%1, %0, [%3]\n"
 86"	teq	%1, #0\n"
 87"	bne	1b"
 88	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
 89	: "r" (&v->counter), "Ir" (i)
 90	: "cc");
 91}
 92
 93static inline int atomic_sub_return(int i, atomic_t *v)
 94{
 95	unsigned long tmp;
 96	int result;
 97
 98	smp_mb();
 99
100	__asm__ __volatile__("@ atomic_sub_return\n"
101"1:	ldrex	%0, [%3]\n"
102"	sub	%0, %0, %4\n"
103"	strex	%1, %0, [%3]\n"
104"	teq	%1, #0\n"
105"	bne	1b"
106	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
107	: "r" (&v->counter), "Ir" (i)
108	: "cc");
109
110	smp_mb();
111
112	return result;
113}
114
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{
117	unsigned long oldval, res;
118
119	smp_mb();
120
121	do {
122		__asm__ __volatile__("@ atomic_cmpxchg\n"
123		"ldrex	%1, [%3]\n"
124		"mov	%0, #0\n"
125		"teq	%1, %4\n"
126		"strexeq %0, %5, [%3]\n"
127		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
128		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
129		    : "cc");
130	} while (res);
131
132	smp_mb();
133
134	return oldval;
135}
 
136
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139	unsigned long tmp, tmp2;
 
140
141	__asm__ __volatile__("@ atomic_clear_mask\n"
142"1:	ldrex	%0, [%3]\n"
143"	bic	%0, %0, %4\n"
144"	strex	%1, %0, [%3]\n"
145"	teq	%1, #0\n"
146"	bne	1b"
147	: "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148	: "r" (addr), "Ir" (mask)
 
 
 
 
 
 
149	: "cc");
 
 
 
 
 
150}
 
151
152#else /* ARM_ARCH_6 */
153
154#ifdef CONFIG_SMP
155#error SMP not supported on pre-ARMv6 CPUs
156#endif
157
158static inline int atomic_add_return(int i, atomic_t *v)
159{
160	unsigned long flags;
161	int val;
162
163	raw_local_irq_save(flags);
164	val = v->counter;
165	v->counter = val += i;
166	raw_local_irq_restore(flags);
167
168	return val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169}
170#define atomic_add(i, v)	(void) atomic_add_return(i, v)
171
172static inline int atomic_sub_return(int i, atomic_t *v)
173{
174	unsigned long flags;
175	int val;
176
177	raw_local_irq_save(flags);
178	val = v->counter;
179	v->counter = val -= i;
180	raw_local_irq_restore(flags);
181
182	return val;
183}
184#define atomic_sub(i, v)	(void) atomic_sub_return(i, v)
185
186static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
187{
188	int ret;
189	unsigned long flags;
190
191	raw_local_irq_save(flags);
192	ret = v->counter;
193	if (likely(ret == old))
194		v->counter = new;
195	raw_local_irq_restore(flags);
196
197	return ret;
198}
199
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202	unsigned long flags;
203
204	raw_local_irq_save(flags);
205	*addr &= ~mask;
206	raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */
210
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213static inline int __atomic_add_unless(atomic_t *v, int a, int u)
214{
215	int c, old;
216
217	c = atomic_read(v);
218	while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
219		c = old;
220	return c;
221}
 
 
 
 
 
 
 
 
 
 
 
 
 
222
223#define atomic_inc(v)		atomic_add(1, v)
224#define atomic_dec(v)		atomic_sub(1, v)
225
226#define atomic_inc_and_test(v)	(atomic_add_return(1, v) == 0)
227#define atomic_dec_and_test(v)	(atomic_sub_return(1, v) == 0)
228#define atomic_inc_return(v)    (atomic_add_return(1, v))
229#define atomic_dec_return(v)    (atomic_sub_return(1, v))
230#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
231
232#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
233
234#define smp_mb__before_atomic_dec()	smp_mb()
235#define smp_mb__after_atomic_dec()	smp_mb()
236#define smp_mb__before_atomic_inc()	smp_mb()
237#define smp_mb__after_atomic_inc()	smp_mb()
238
239#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct {
241	u64 __aligned(8) counter;
242} atomic64_t;
243
244#define ATOMIC64_INIT(i) { (i) }
245
246static inline u64 atomic64_read(const atomic64_t *v)
 
247{
248	u64 result;
249
250	__asm__ __volatile__("@ atomic64_read\n"
251"	ldrexd	%0, %H0, [%1]"
252	: "=&r" (result)
253	: "r" (&v->counter), "Qo" (v->counter)
254	);
255
256	return result;
257}
258
259static inline void atomic64_set(atomic64_t *v, u64 i)
260{
261	u64 tmp;
262
263	__asm__ __volatile__("@ atomic64_set\n"
264"1:	ldrexd	%0, %H0, [%2]\n"
265"	strexd	%0, %3, %H3, [%2]\n"
266"	teq	%0, #0\n"
267"	bne	1b"
268	: "=&r" (tmp), "=Qo" (v->counter)
269	: "r" (&v->counter), "r" (i)
270	: "cc");
271}
272
273static inline void atomic64_add(u64 i, atomic64_t *v)
274{
275	u64 result;
276	unsigned long tmp;
277
278	__asm__ __volatile__("@ atomic64_add\n"
279"1:	ldrexd	%0, %H0, [%3]\n"
280"	adds	%0, %0, %4\n"
281"	adc	%H0, %H0, %H4\n"
282"	strexd	%1, %0, %H0, [%3]\n"
283"	teq	%1, #0\n"
284"	bne	1b"
285	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
286	: "r" (&v->counter), "r" (i)
287	: "cc");
288}
289
290static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
291{
292	u64 result;
293	unsigned long tmp;
294
295	smp_mb();
296
297	__asm__ __volatile__("@ atomic64_add_return\n"
298"1:	ldrexd	%0, %H0, [%3]\n"
299"	adds	%0, %0, %4\n"
300"	adc	%H0, %H0, %H4\n"
301"	strexd	%1, %0, %H0, [%3]\n"
302"	teq	%1, #0\n"
303"	bne	1b"
304	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
305	: "r" (&v->counter), "r" (i)
306	: "cc");
307
308	smp_mb();
309
310	return result;
311}
312
313static inline void atomic64_sub(u64 i, atomic64_t *v)
314{
315	u64 result;
316	unsigned long tmp;
317
318	__asm__ __volatile__("@ atomic64_sub\n"
319"1:	ldrexd	%0, %H0, [%3]\n"
320"	subs	%0, %0, %4\n"
321"	sbc	%H0, %H0, %H4\n"
322"	strexd	%1, %0, %H0, [%3]\n"
323"	teq	%1, #0\n"
324"	bne	1b"
325	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
326	: "r" (&v->counter), "r" (i)
327	: "cc");
328}
 
329
330static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
331{
332	u64 result;
333	unsigned long tmp;
334
335	smp_mb();
336
337	__asm__ __volatile__("@ atomic64_sub_return\n"
338"1:	ldrexd	%0, %H0, [%3]\n"
339"	subs	%0, %0, %4\n"
340"	sbc	%H0, %H0, %H4\n"
341"	strexd	%1, %0, %H0, [%3]\n"
342"	teq	%1, #0\n"
343"	bne	1b"
344	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
345	: "r" (&v->counter), "r" (i)
346	: "cc");
347
348	smp_mb();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
350	return result;
351}
352
353static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
354{
355	u64 oldval;
356	unsigned long res;
357
358	smp_mb();
359
360	do {
361		__asm__ __volatile__("@ atomic64_cmpxchg\n"
362		"ldrexd		%1, %H1, [%3]\n"
363		"mov		%0, #0\n"
364		"teq		%1, %4\n"
365		"teqeq		%H1, %H4\n"
366		"strexdeq	%0, %5, %H5, [%3]"
367		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
368		: "r" (&ptr->counter), "r" (old), "r" (new)
369		: "cc");
370	} while (res);
371
372	smp_mb();
373
374	return oldval;
375}
 
376
377static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
378{
379	u64 result;
380	unsigned long tmp;
381
382	smp_mb();
383
384	__asm__ __volatile__("@ atomic64_xchg\n"
385"1:	ldrexd	%0, %H0, [%3]\n"
386"	strexd	%1, %4, %H4, [%3]\n"
387"	teq	%1, #0\n"
388"	bne	1b"
389	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
390	: "r" (&ptr->counter), "r" (new)
391	: "cc");
392
393	smp_mb();
394
395	return result;
396}
 
397
398static inline u64 atomic64_dec_if_positive(atomic64_t *v)
399{
400	u64 result;
401	unsigned long tmp;
402
403	smp_mb();
 
404
405	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
406"1:	ldrexd	%0, %H0, [%3]\n"
407"	subs	%0, %0, #1\n"
408"	sbc	%H0, %H0, #0\n"
409"	teq	%H0, #0\n"
410"	bmi	2f\n"
411"	strexd	%1, %0, %H0, [%3]\n"
412"	teq	%1, #0\n"
413"	bne	1b\n"
414"2:"
415	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
416	: "r" (&v->counter)
417	: "cc");
418
419	smp_mb();
420
421	return result;
422}
 
423
424static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
425{
426	u64 val;
427	unsigned long tmp;
428	int ret = 1;
429
430	smp_mb();
 
431
432	__asm__ __volatile__("@ atomic64_add_unless\n"
433"1:	ldrexd	%0, %H0, [%4]\n"
434"	teq	%0, %5\n"
435"	teqeq	%H0, %H5\n"
436"	moveq	%1, #0\n"
437"	beq	2f\n"
438"	adds	%0, %0, %6\n"
439"	adc	%H0, %H0, %H6\n"
440"	strexd	%2, %0, %H0, [%4]\n"
441"	teq	%2, #0\n"
442"	bne	1b\n"
443"2:"
444	: "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
445	: "r" (&v->counter), "r" (u), "r" (a)
446	: "cc");
447
448	if (ret)
449		smp_mb();
450
451	return ret;
452}
453
454#define atomic64_add_negative(a, v)	(atomic64_add_return((a), (v)) < 0)
455#define atomic64_inc(v)			atomic64_add(1LL, (v))
456#define atomic64_inc_return(v)		atomic64_add_return(1LL, (v))
457#define atomic64_inc_and_test(v)	(atomic64_inc_return(v) == 0)
458#define atomic64_sub_and_test(a, v)	(atomic64_sub_return((a), (v)) == 0)
459#define atomic64_dec(v)			atomic64_sub(1LL, (v))
460#define atomic64_dec_return(v)		atomic64_sub_return(1LL, (v))
461#define atomic64_dec_and_test(v)	(atomic64_dec_return((v)) == 0)
462#define atomic64_inc_not_zero(v)	atomic64_add_unless((v), 1LL, 0LL)
463
464#endif /* !CONFIG_GENERIC_ATOMIC64 */
465#endif
466#endif
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/atomic.h
  4 *
  5 *  Copyright (C) 1996 Russell King.
  6 *  Copyright (C) 2002 Deep Blue Solutions Ltd.
 
 
 
 
  7 */
  8#ifndef __ASM_ARM_ATOMIC_H
  9#define __ASM_ARM_ATOMIC_H
 10
 11#include <linux/compiler.h>
 12#include <linux/prefetch.h>
 13#include <linux/types.h>
 14#include <linux/irqflags.h>
 15#include <asm/barrier.h>
 16#include <asm/cmpxchg.h>
 17
 
 
 18#ifdef __KERNEL__
 19
 20/*
 21 * On ARM, ordinary assignment (str instruction) doesn't clear the local
 22 * strex/ldrex monitor on some implementations. The reason we can use it for
 23 * atomic_set() is the clrex or dummy strex done on every exception return.
 24 */
 25#define arch_atomic_read(v)	READ_ONCE((v)->counter)
 26#define arch_atomic_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 27
 28#if __LINUX_ARM_ARCH__ >= 6
 29
 30/*
 31 * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
 32 * store exclusive to ensure that these are atomic.  We may loop
 33 * to ensure that the update happens.
 34 */
 
 
 
 
 35
 36#define ATOMIC_OP(op, c_op, asm_op)					\
 37static inline void arch_atomic_##op(int i, atomic_t *v)			\
 38{									\
 39	unsigned long tmp;						\
 40	int result;							\
 41									\
 42	prefetchw(&v->counter);						\
 43	__asm__ __volatile__("@ atomic_" #op "\n"			\
 44"1:	ldrex	%0, [%3]\n"						\
 45"	" #asm_op "	%0, %0, %4\n"					\
 46"	strex	%1, %0, [%3]\n"						\
 47"	teq	%1, #0\n"						\
 48"	bne	1b"							\
 49	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 50	: "r" (&v->counter), "Ir" (i)					\
 51	: "cc");							\
 52}									\
 53
 54#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
 55static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v)	\
 56{									\
 57	unsigned long tmp;						\
 58	int result;							\
 59									\
 60	prefetchw(&v->counter);						\
 61									\
 62	__asm__ __volatile__("@ atomic_" #op "_return\n"		\
 63"1:	ldrex	%0, [%3]\n"						\
 64"	" #asm_op "	%0, %0, %4\n"					\
 65"	strex	%1, %0, [%3]\n"						\
 66"	teq	%1, #0\n"						\
 67"	bne	1b"							\
 68	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
 69	: "r" (&v->counter), "Ir" (i)					\
 70	: "cc");							\
 71									\
 72	return result;							\
 73}
 74
 75#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
 76static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)	\
 77{									\
 78	unsigned long tmp;						\
 79	int result, val;						\
 80									\
 81	prefetchw(&v->counter);						\
 82									\
 83	__asm__ __volatile__("@ atomic_fetch_" #op "\n"			\
 84"1:	ldrex	%0, [%4]\n"						\
 85"	" #asm_op "	%1, %0, %5\n"					\
 86"	strex	%2, %1, [%4]\n"						\
 87"	teq	%2, #0\n"						\
 88"	bne	1b"							\
 89	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
 90	: "r" (&v->counter), "Ir" (i)					\
 91	: "cc");							\
 92									\
 93	return result;							\
 94}
 95
 96#define arch_atomic_add_return_relaxed		arch_atomic_add_return_relaxed
 97#define arch_atomic_sub_return_relaxed		arch_atomic_sub_return_relaxed
 98#define arch_atomic_fetch_add_relaxed		arch_atomic_fetch_add_relaxed
 99#define arch_atomic_fetch_sub_relaxed		arch_atomic_fetch_sub_relaxed
100
101#define arch_atomic_fetch_and_relaxed		arch_atomic_fetch_and_relaxed
102#define arch_atomic_fetch_andnot_relaxed	arch_atomic_fetch_andnot_relaxed
103#define arch_atomic_fetch_or_relaxed		arch_atomic_fetch_or_relaxed
104#define arch_atomic_fetch_xor_relaxed		arch_atomic_fetch_xor_relaxed
105
106static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
107{
108	int oldval;
109	unsigned long res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
110
111	prefetchw(&ptr->counter);
 
 
 
 
112
113	do {
114		__asm__ __volatile__("@ atomic_cmpxchg\n"
115		"ldrex	%1, [%3]\n"
116		"mov	%0, #0\n"
117		"teq	%1, %4\n"
118		"strexeq %0, %5, [%3]\n"
119		    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
120		    : "r" (&ptr->counter), "Ir" (old), "r" (new)
121		    : "cc");
122	} while (res);
123
 
 
124	return oldval;
125}
126#define arch_atomic_cmpxchg_relaxed		arch_atomic_cmpxchg_relaxed
127
128static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
129{
130	int oldval, newval;
131	unsigned long tmp;
132
133	smp_mb();
134	prefetchw(&v->counter);
135
136	__asm__ __volatile__ ("@ atomic_add_unless\n"
137"1:	ldrex	%0, [%4]\n"
138"	teq	%0, %5\n"
139"	beq	2f\n"
140"	add	%1, %0, %6\n"
141"	strex	%2, %1, [%4]\n"
142"	teq	%2, #0\n"
143"	bne	1b\n"
144"2:"
145	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
146	: "r" (&v->counter), "r" (u), "r" (a)
147	: "cc");
148
149	if (oldval != u)
150		smp_mb();
151
152	return oldval;
153}
154#define arch_atomic_fetch_add_unless		arch_atomic_fetch_add_unless
155
156#else /* ARM_ARCH_6 */
157
158#ifdef CONFIG_SMP
159#error SMP not supported on pre-ARMv6 CPUs
160#endif
161
162#define ATOMIC_OP(op, c_op, asm_op)					\
163static inline void arch_atomic_##op(int i, atomic_t *v)			\
164{									\
165	unsigned long flags;						\
166									\
167	raw_local_irq_save(flags);					\
168	v->counter c_op i;						\
169	raw_local_irq_restore(flags);					\
170}									\
171
172#define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
173static inline int arch_atomic_##op##_return(int i, atomic_t *v)		\
174{									\
175	unsigned long flags;						\
176	int val;							\
177									\
178	raw_local_irq_save(flags);					\
179	v->counter c_op i;						\
180	val = v->counter;						\
181	raw_local_irq_restore(flags);					\
182									\
183	return val;							\
184}
185
186#define ATOMIC_FETCH_OP(op, c_op, asm_op)				\
187static inline int arch_atomic_fetch_##op(int i, atomic_t *v)		\
188{									\
189	unsigned long flags;						\
190	int val;							\
191									\
192	raw_local_irq_save(flags);					\
193	val = v->counter;						\
194	v->counter c_op i;						\
195	raw_local_irq_restore(flags);					\
196									\
197	return val;							\
198}
 
199
200static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201{
202	int ret;
203	unsigned long flags;
204
205	raw_local_irq_save(flags);
206	ret = v->counter;
207	if (likely(ret == old))
208		v->counter = new;
209	raw_local_irq_restore(flags);
210
211	return ret;
212}
213
214#define arch_atomic_fetch_andnot		arch_atomic_fetch_andnot
 
 
 
 
 
 
 
215
216#endif /* __LINUX_ARM_ARCH__ */
217
218#define ATOMIC_OPS(op, c_op, asm_op)					\
219	ATOMIC_OP(op, c_op, asm_op)					\
220	ATOMIC_OP_RETURN(op, c_op, asm_op)				\
221	ATOMIC_FETCH_OP(op, c_op, asm_op)
222
223ATOMIC_OPS(add, +=, add)
224ATOMIC_OPS(sub, -=, sub)
225
226#define arch_atomic_andnot arch_atomic_andnot
227
228#undef ATOMIC_OPS
229#define ATOMIC_OPS(op, c_op, asm_op)					\
230	ATOMIC_OP(op, c_op, asm_op)					\
231	ATOMIC_FETCH_OP(op, c_op, asm_op)
232
233ATOMIC_OPS(and, &=, and)
234ATOMIC_OPS(andnot, &= ~, bic)
235ATOMIC_OPS(or,  |=, orr)
236ATOMIC_OPS(xor, ^=, eor)
237
238#undef ATOMIC_OPS
239#undef ATOMIC_FETCH_OP
240#undef ATOMIC_OP_RETURN
241#undef ATOMIC_OP
242
243#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
244
245#ifndef CONFIG_GENERIC_ATOMIC64
246typedef struct {
247	s64 counter;
248} atomic64_t;
249
250#define ATOMIC64_INIT(i) { (i) }
251
252#ifdef CONFIG_ARM_LPAE
253static inline s64 arch_atomic64_read(const atomic64_t *v)
254{
255	s64 result;
256
257	__asm__ __volatile__("@ atomic64_read\n"
258"	ldrd	%0, %H0, [%1]"
259	: "=&r" (result)
260	: "r" (&v->counter), "Qo" (v->counter)
261	);
262
263	return result;
264}
265
266static inline void arch_atomic64_set(atomic64_t *v, s64 i)
267{
 
 
268	__asm__ __volatile__("@ atomic64_set\n"
269"	strd	%2, %H2, [%1]"
270	: "=Qo" (v->counter)
 
 
 
271	: "r" (&v->counter), "r" (i)
272	);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273}
274#else
275static inline s64 arch_atomic64_read(const atomic64_t *v)
276{
277	s64 result;
 
278
279	__asm__ __volatile__("@ atomic64_read\n"
280"	ldrexd	%0, %H0, [%1]"
281	: "=&r" (result)
282	: "r" (&v->counter), "Qo" (v->counter)
283	);
 
 
 
 
 
 
 
 
 
284
285	return result;
286}
287
288static inline void arch_atomic64_set(atomic64_t *v, s64 i)
289{
290	s64 tmp;
 
291
292	prefetchw(&v->counter);
293	__asm__ __volatile__("@ atomic64_set\n"
294"1:	ldrexd	%0, %H0, [%2]\n"
295"	strexd	%0, %3, %H3, [%2]\n"
296"	teq	%0, #0\n"
 
297"	bne	1b"
298	: "=&r" (tmp), "=Qo" (v->counter)
299	: "r" (&v->counter), "r" (i)
300	: "cc");
301}
302#endif
303
304#define ATOMIC64_OP(op, op1, op2)					\
305static inline void arch_atomic64_##op(s64 i, atomic64_t *v)		\
306{									\
307	s64 result;							\
308	unsigned long tmp;						\
309									\
310	prefetchw(&v->counter);						\
311	__asm__ __volatile__("@ atomic64_" #op "\n"			\
312"1:	ldrexd	%0, %H0, [%3]\n"					\
313"	" #op1 " %Q0, %Q0, %Q4\n"					\
314"	" #op2 " %R0, %R0, %R4\n"					\
315"	strexd	%1, %0, %H0, [%3]\n"					\
316"	teq	%1, #0\n"						\
317"	bne	1b"							\
318	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
319	: "r" (&v->counter), "r" (i)					\
320	: "cc");							\
321}									\
322
323#define ATOMIC64_OP_RETURN(op, op1, op2)				\
324static inline s64							\
325arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v)		\
326{									\
327	s64 result;							\
328	unsigned long tmp;						\
329									\
330	prefetchw(&v->counter);						\
331									\
332	__asm__ __volatile__("@ atomic64_" #op "_return\n"		\
333"1:	ldrexd	%0, %H0, [%3]\n"					\
334"	" #op1 " %Q0, %Q0, %Q4\n"					\
335"	" #op2 " %R0, %R0, %R4\n"					\
336"	strexd	%1, %0, %H0, [%3]\n"					\
337"	teq	%1, #0\n"						\
338"	bne	1b"							\
339	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)		\
340	: "r" (&v->counter), "r" (i)					\
341	: "cc");							\
342									\
343	return result;							\
344}
345
346#define ATOMIC64_FETCH_OP(op, op1, op2)					\
347static inline s64							\
348arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v)		\
349{									\
350	s64 result, val;						\
351	unsigned long tmp;						\
352									\
353	prefetchw(&v->counter);						\
354									\
355	__asm__ __volatile__("@ atomic64_fetch_" #op "\n"		\
356"1:	ldrexd	%0, %H0, [%4]\n"					\
357"	" #op1 " %Q1, %Q0, %Q5\n"					\
358"	" #op2 " %R1, %R0, %R5\n"					\
359"	strexd	%2, %1, %H1, [%4]\n"					\
360"	teq	%2, #0\n"						\
361"	bne	1b"							\
362	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Qo" (v->counter)	\
363	: "r" (&v->counter), "r" (i)					\
364	: "cc");							\
365									\
366	return result;							\
367}
368
369#define ATOMIC64_OPS(op, op1, op2)					\
370	ATOMIC64_OP(op, op1, op2)					\
371	ATOMIC64_OP_RETURN(op, op1, op2)				\
372	ATOMIC64_FETCH_OP(op, op1, op2)
373
374ATOMIC64_OPS(add, adds, adc)
375ATOMIC64_OPS(sub, subs, sbc)
376
377#define arch_atomic64_add_return_relaxed	arch_atomic64_add_return_relaxed
378#define arch_atomic64_sub_return_relaxed	arch_atomic64_sub_return_relaxed
379#define arch_atomic64_fetch_add_relaxed		arch_atomic64_fetch_add_relaxed
380#define arch_atomic64_fetch_sub_relaxed		arch_atomic64_fetch_sub_relaxed
381
382#undef ATOMIC64_OPS
383#define ATOMIC64_OPS(op, op1, op2)					\
384	ATOMIC64_OP(op, op1, op2)					\
385	ATOMIC64_FETCH_OP(op, op1, op2)
386
387#define arch_atomic64_andnot arch_atomic64_andnot
388
389ATOMIC64_OPS(and, and, and)
390ATOMIC64_OPS(andnot, bic, bic)
391ATOMIC64_OPS(or,  orr, orr)
392ATOMIC64_OPS(xor, eor, eor)
393
394#define arch_atomic64_fetch_and_relaxed		arch_atomic64_fetch_and_relaxed
395#define arch_atomic64_fetch_andnot_relaxed	arch_atomic64_fetch_andnot_relaxed
396#define arch_atomic64_fetch_or_relaxed		arch_atomic64_fetch_or_relaxed
397#define arch_atomic64_fetch_xor_relaxed		arch_atomic64_fetch_xor_relaxed
398
399#undef ATOMIC64_OPS
400#undef ATOMIC64_FETCH_OP
401#undef ATOMIC64_OP_RETURN
402#undef ATOMIC64_OP
403
404static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
 
 
 
405{
406	s64 oldval;
407	unsigned long res;
408
409	prefetchw(&ptr->counter);
410
411	do {
412		__asm__ __volatile__("@ atomic64_cmpxchg\n"
413		"ldrexd		%1, %H1, [%3]\n"
414		"mov		%0, #0\n"
415		"teq		%1, %4\n"
416		"teqeq		%H1, %H4\n"
417		"strexdeq	%0, %5, %H5, [%3]"
418		: "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
419		: "r" (&ptr->counter), "r" (old), "r" (new)
420		: "cc");
421	} while (res);
422
 
 
423	return oldval;
424}
425#define arch_atomic64_cmpxchg_relaxed	arch_atomic64_cmpxchg_relaxed
426
427static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
428{
429	s64 result;
430	unsigned long tmp;
431
432	prefetchw(&ptr->counter);
433
434	__asm__ __volatile__("@ atomic64_xchg\n"
435"1:	ldrexd	%0, %H0, [%3]\n"
436"	strexd	%1, %4, %H4, [%3]\n"
437"	teq	%1, #0\n"
438"	bne	1b"
439	: "=&r" (result), "=&r" (tmp), "+Qo" (ptr->counter)
440	: "r" (&ptr->counter), "r" (new)
441	: "cc");
442
 
 
443	return result;
444}
445#define arch_atomic64_xchg_relaxed		arch_atomic64_xchg_relaxed
446
447static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
448{
449	s64 result;
450	unsigned long tmp;
451
452	smp_mb();
453	prefetchw(&v->counter);
454
455	__asm__ __volatile__("@ atomic64_dec_if_positive\n"
456"1:	ldrexd	%0, %H0, [%3]\n"
457"	subs	%Q0, %Q0, #1\n"
458"	sbc	%R0, %R0, #0\n"
459"	teq	%R0, #0\n"
460"	bmi	2f\n"
461"	strexd	%1, %0, %H0, [%3]\n"
462"	teq	%1, #0\n"
463"	bne	1b\n"
464"2:"
465	: "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
466	: "r" (&v->counter)
467	: "cc");
468
469	smp_mb();
470
471	return result;
472}
473#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
474
475static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
476{
477	s64 oldval, newval;
478	unsigned long tmp;
 
479
480	smp_mb();
481	prefetchw(&v->counter);
482
483	__asm__ __volatile__("@ atomic64_add_unless\n"
484"1:	ldrexd	%0, %H0, [%4]\n"
485"	teq	%0, %5\n"
486"	teqeq	%H0, %H5\n"
 
487"	beq	2f\n"
488"	adds	%Q1, %Q0, %Q6\n"
489"	adc	%R1, %R0, %R6\n"
490"	strexd	%2, %1, %H1, [%4]\n"
491"	teq	%2, #0\n"
492"	bne	1b\n"
493"2:"
494	: "=&r" (oldval), "=&r" (newval), "=&r" (tmp), "+Qo" (v->counter)
495	: "r" (&v->counter), "r" (u), "r" (a)
496	: "cc");
497
498	if (oldval != u)
499		smp_mb();
500
501	return oldval;
502}
503#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
 
 
 
 
 
 
 
 
 
504
505#endif /* !CONFIG_GENERIC_ATOMIC64 */
506#endif
507#endif