Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cmpxchg.h>
 23#include <asm/war.h>
 24
 25/*
 26 * Using a branch-likely instruction to check the result of an sc instruction
 27 * works around a bug present in R10000 CPUs prior to revision 3.0 that could
 28 * cause ll-sc sequences to execute non-atomically.
 29 */
 30#if R10000_LLSC_WAR
 31# define __scbeqz "beqzl"
 32#else
 33# define __scbeqz "beqz"
 34#endif
 35
 36#define ATOMIC_INIT(i)	  { (i) }
 37
 38/*
 39 * atomic_read - read atomic variable
 40 * @v: pointer of type atomic_t
 41 *
 42 * Atomically reads the value of @v.
 43 */
 44#define atomic_read(v)		READ_ONCE((v)->counter)
 45
 46/*
 47 * atomic_set - set atomic variable
 48 * @v: pointer of type atomic_t
 49 * @i: required value
 50 *
 51 * Atomically sets the value of @v to @i.
 52 */
 53#define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55#define ATOMIC_OP(op, c_op, asm_op)					      \
 56static __inline__ void atomic_##op(int i, atomic_t * v)			      \
 57{									      \
 58	if (kernel_uses_llsc) {						      \
 59		int temp;						      \
 60									      \
 61		loongson_llsc_mb();					      \
 62		__asm__ __volatile__(					      \
 63		"	.set	push					\n"   \
 64		"	.set	"MIPS_ISA_LEVEL"			\n"   \
 65		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
 66		"	" #asm_op " %0, %2				\n"   \
 67		"	sc	%0, %1					\n"   \
 68		"\t" __scbeqz "	%0, 1b					\n"   \
 69		"	.set	pop					\n"   \
 70		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 71		: "Ir" (i) : __LLSC_CLOBBER);				      \
 72	} else {							      \
 73		unsigned long flags;					      \
 74									      \
 75		raw_local_irq_save(flags);				      \
 76		v->counter c_op i;					      \
 77		raw_local_irq_restore(flags);				      \
 78	}								      \
 79}
 80
 81#define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
 82static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
 83{									      \
 84	int result;							      \
 85									      \
 86	if (kernel_uses_llsc) {						      \
 87		int temp;						      \
 88									      \
 89		loongson_llsc_mb();					      \
 90		__asm__ __volatile__(					      \
 91		"	.set	push					\n"   \
 92		"	.set	"MIPS_ISA_LEVEL"			\n"   \
 93		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
 94		"	" #asm_op " %0, %1, %3				\n"   \
 95		"	sc	%0, %2					\n"   \
 96		"\t" __scbeqz "	%0, 1b					\n"   \
 97		"	" #asm_op " %0, %1, %3				\n"   \
 98		"	.set	pop					\n"   \
 99		: "=&r" (result), "=&r" (temp),				      \
100		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
101		: "Ir" (i) : __LLSC_CLOBBER);				      \
102	} else {							      \
103		unsigned long flags;					      \
104									      \
105		raw_local_irq_save(flags);				      \
106		result = v->counter;					      \
107		result c_op i;						      \
108		v->counter = result;					      \
109		raw_local_irq_restore(flags);				      \
110	}								      \
111									      \
112	return result;							      \
113}
114
115#define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
116static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
117{									      \
118	int result;							      \
119									      \
120	if (kernel_uses_llsc) {						      \
121		int temp;						      \
122									      \
123		loongson_llsc_mb();					      \
124		__asm__ __volatile__(					      \
125		"	.set	push					\n"   \
126		"	.set	"MIPS_ISA_LEVEL"			\n"   \
127		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
128		"	" #asm_op " %0, %1, %3				\n"   \
129		"	sc	%0, %2					\n"   \
130		"\t" __scbeqz "	%0, 1b					\n"   \
131		"	.set	pop					\n"   \
132		"	move	%0, %1					\n"   \
133		: "=&r" (result), "=&r" (temp),				      \
134		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
135		: "Ir" (i) : __LLSC_CLOBBER);				      \
136	} else {							      \
137		unsigned long flags;					      \
138									      \
139		raw_local_irq_save(flags);				      \
140		result = v->counter;					      \
141		v->counter c_op i;					      \
142		raw_local_irq_restore(flags);				      \
143	}								      \
144									      \
145	return result;							      \
146}
147
148#define ATOMIC_OPS(op, c_op, asm_op)					      \
149	ATOMIC_OP(op, c_op, asm_op)					      \
150	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
151	ATOMIC_FETCH_OP(op, c_op, asm_op)
152
153ATOMIC_OPS(add, +=, addu)
154ATOMIC_OPS(sub, -=, subu)
155
156#define atomic_add_return_relaxed	atomic_add_return_relaxed
157#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
158#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
159#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
160
161#undef ATOMIC_OPS
162#define ATOMIC_OPS(op, c_op, asm_op)					      \
163	ATOMIC_OP(op, c_op, asm_op)					      \
164	ATOMIC_FETCH_OP(op, c_op, asm_op)
165
166ATOMIC_OPS(and, &=, and)
167ATOMIC_OPS(or, |=, or)
168ATOMIC_OPS(xor, ^=, xor)
169
170#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
171#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
172#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
173
174#undef ATOMIC_OPS
175#undef ATOMIC_FETCH_OP
176#undef ATOMIC_OP_RETURN
177#undef ATOMIC_OP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
179/*
180 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
181 * @i: integer value to subtract
182 * @v: pointer of type atomic_t
183 *
184 * Atomically test @v and subtract @i if @v is greater or equal than @i.
185 * The function returns the old value of @v minus @i.
186 */
187static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
188{
189	int result;
190
191	smp_mb__before_llsc();
192
193	if (kernel_uses_llsc) {
194		int temp;
195
196		loongson_llsc_mb();
197		__asm__ __volatile__(
198		"	.set	push					\n"
199		"	.set	"MIPS_ISA_LEVEL"			\n"
200		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
201		"	.set	pop					\n"
202		"	subu	%0, %1, %3				\n"
203		"	move	%1, %0					\n"
204		"	bltz	%0, 2f					\n"
205		"	.set	push					\n"
206		"	.set	"MIPS_ISA_LEVEL"			\n"
207		"	sc	%1, %2					\n"
208		"\t" __scbeqz "	%1, 1b					\n"
209		"2:							\n"
210		"	.set	pop					\n"
211		: "=&r" (result), "=&r" (temp),
212		  "+" GCC_OFF_SMALL_ASM() (v->counter)
213		: "Ir" (i) : __LLSC_CLOBBER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214	} else {
215		unsigned long flags;
216
217		raw_local_irq_save(flags);
218		result = v->counter;
219		result -= i;
220		if (result >= 0)
221			v->counter = result;
222		raw_local_irq_restore(flags);
223	}
224
225	smp_llsc_mb();
226
227	return result;
228}
229
230#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
231#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233/*
234 * atomic_dec_if_positive - decrement by 1 if old value positive
235 * @v: pointer of type atomic_t
236 */
237#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239#ifdef CONFIG_64BIT
240
241#define ATOMIC64_INIT(i)    { (i) }
242
243/*
244 * atomic64_read - read atomic variable
245 * @v: pointer of type atomic64_t
246 *
247 */
248#define atomic64_read(v)	READ_ONCE((v)->counter)
249
250/*
251 * atomic64_set - set atomic variable
252 * @v: pointer of type atomic64_t
253 * @i: required value
254 */
255#define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
 
 
 
 
 
 
 
 
 
 
 
 
256
257#define ATOMIC64_OP(op, c_op, asm_op)					      \
258static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		      \
259{									      \
260	if (kernel_uses_llsc) {						      \
261		s64 temp;						      \
262									      \
263		loongson_llsc_mb();					      \
264		__asm__ __volatile__(					      \
265		"	.set	push					\n"   \
266		"	.set	"MIPS_ISA_LEVEL"			\n"   \
267		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
268		"	" #asm_op " %0, %2				\n"   \
269		"	scd	%0, %1					\n"   \
270		"\t" __scbeqz "	%0, 1b					\n"   \
271		"	.set	pop					\n"   \
272		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
273		: "Ir" (i) : __LLSC_CLOBBER);				      \
274	} else {							      \
275		unsigned long flags;					      \
276									      \
277		raw_local_irq_save(flags);				      \
278		v->counter c_op i;					      \
279		raw_local_irq_restore(flags);				      \
280	}								      \
281}
282
283#define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
284static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)   \
285{									      \
286	s64 result;							      \
287									      \
288	if (kernel_uses_llsc) {						      \
289		s64 temp;						      \
290									      \
291		loongson_llsc_mb();					      \
292		__asm__ __volatile__(					      \
293		"	.set	push					\n"   \
294		"	.set	"MIPS_ISA_LEVEL"			\n"   \
295		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
296		"	" #asm_op " %0, %1, %3				\n"   \
297		"	scd	%0, %2					\n"   \
298		"\t" __scbeqz "	%0, 1b					\n"   \
299		"	" #asm_op " %0, %1, %3				\n"   \
300		"	.set	pop					\n"   \
301		: "=&r" (result), "=&r" (temp),				      \
302		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
303		: "Ir" (i) : __LLSC_CLOBBER);				      \
304	} else {							      \
305		unsigned long flags;					      \
306									      \
307		raw_local_irq_save(flags);				      \
308		result = v->counter;					      \
309		result c_op i;						      \
310		v->counter = result;					      \
311		raw_local_irq_restore(flags);				      \
312	}								      \
313									      \
314	return result;							      \
315}
316
317#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
318static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)    \
319{									      \
320	s64 result;							      \
321									      \
322	if (kernel_uses_llsc) {						      \
323		s64 temp;						      \
324									      \
325		loongson_llsc_mb();					      \
326		__asm__ __volatile__(					      \
327		"	.set	push					\n"   \
328		"	.set	"MIPS_ISA_LEVEL"			\n"   \
329		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
330		"	" #asm_op " %0, %1, %3				\n"   \
331		"	scd	%0, %2					\n"   \
332		"\t" __scbeqz "	%0, 1b					\n"   \
333		"	move	%0, %1					\n"   \
334		"	.set	pop					\n"   \
335		: "=&r" (result), "=&r" (temp),				      \
336		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
337		: "Ir" (i) : __LLSC_CLOBBER);				      \
338	} else {							      \
339		unsigned long flags;					      \
340									      \
341		raw_local_irq_save(flags);				      \
342		result = v->counter;					      \
343		v->counter c_op i;					      \
344		raw_local_irq_restore(flags);				      \
345	}								      \
346									      \
347	return result;							      \
348}
349
350#define ATOMIC64_OPS(op, c_op, asm_op)					      \
351	ATOMIC64_OP(op, c_op, asm_op)					      \
352	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
353	ATOMIC64_FETCH_OP(op, c_op, asm_op)
354
355ATOMIC64_OPS(add, +=, daddu)
356ATOMIC64_OPS(sub, -=, dsubu)
357
358#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
359#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
360#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
361#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
362
363#undef ATOMIC64_OPS
364#define ATOMIC64_OPS(op, c_op, asm_op)					      \
365	ATOMIC64_OP(op, c_op, asm_op)					      \
366	ATOMIC64_FETCH_OP(op, c_op, asm_op)
367
368ATOMIC64_OPS(and, &=, and)
369ATOMIC64_OPS(or, |=, or)
370ATOMIC64_OPS(xor, ^=, xor)
371
372#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
373#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
374#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
375
376#undef ATOMIC64_OPS
377#undef ATOMIC64_FETCH_OP
378#undef ATOMIC64_OP_RETURN
379#undef ATOMIC64_OP
380
381/*
382 * atomic64_sub_if_positive - conditionally subtract integer from atomic
383 *                            variable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384 * @i: integer value to subtract
385 * @v: pointer of type atomic64_t
386 *
387 * Atomically test @v and subtract @i if @v is greater or equal than @i.
388 * The function returns the old value of @v minus @i.
389 */
390static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
391{
392	s64 result;
393
394	smp_mb__before_llsc();
395
396	if (kernel_uses_llsc) {
397		s64 temp;
398
399		__asm__ __volatile__(
400		"	.set	push					\n"
401		"	.set	"MIPS_ISA_LEVEL"			\n"
402		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
403		"	dsubu	%0, %1, %3				\n"
404		"	move	%1, %0					\n"
405		"	bltz	%0, 1f					\n"
406		"	scd	%1, %2					\n"
407		"\t" __scbeqz "	%1, 1b					\n"
 
 
 
408		"1:							\n"
409		"	.set	pop					\n"
410		: "=&r" (result), "=&r" (temp),
411		  "+" GCC_OFF_SMALL_ASM() (v->counter)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
412		: "Ir" (i));
413	} else {
414		unsigned long flags;
415
416		raw_local_irq_save(flags);
417		result = v->counter;
418		result -= i;
419		if (result >= 0)
420			v->counter = result;
421		raw_local_irq_restore(flags);
422	}
423
424	smp_llsc_mb();
425
426	return result;
427}
428
429#define atomic64_cmpxchg(v, o, n) \
430	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
431#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433/*
434 * atomic64_dec_if_positive - decrement by 1 if old value positive
435 * @v: pointer of type atomic64_t
436 */
437#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439#endif /* CONFIG_64BIT */
 
 
 
 
 
 
 
 
 
440
441#endif /* _ASM_ATOMIC_H */
v3.15
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 
 20#include <asm/cpu-features.h>
 21#include <asm/cmpxchg.h>
 22#include <asm/war.h>
 23
 
 
 
 
 
 
 
 
 
 
 
 24#define ATOMIC_INIT(i)	  { (i) }
 25
 26/*
 27 * atomic_read - read atomic variable
 28 * @v: pointer of type atomic_t
 29 *
 30 * Atomically reads the value of @v.
 31 */
 32#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 33
 34/*
 35 * atomic_set - set atomic variable
 36 * @v: pointer of type atomic_t
 37 * @i: required value
 38 *
 39 * Atomically sets the value of @v to @i.
 40 */
 41#define atomic_set(v, i)		((v)->counter = (i))
 42
 43/*
 44 * atomic_add - add integer to atomic variable
 45 * @i: integer value to add
 46 * @v: pointer of type atomic_t
 47 *
 48 * Atomically adds @i to @v.
 49 */
 50static __inline__ void atomic_add(int i, atomic_t * v)
 51{
 52	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 53		int temp;
 54
 55		__asm__ __volatile__(
 56		"	.set	arch=r4000				\n"
 57		"1:	ll	%0, %1		# atomic_add		\n"
 58		"	addu	%0, %2					\n"
 59		"	sc	%0, %1					\n"
 60		"	beqzl	%0, 1b					\n"
 61		"	.set	mips0					\n"
 62		: "=&r" (temp), "+m" (v->counter)
 63		: "Ir" (i));
 64	} else if (kernel_uses_llsc) {
 65		int temp;
 66
 67		do {
 68			__asm__ __volatile__(
 69			"	.set	arch=r4000			\n"
 70			"	ll	%0, %1		# atomic_add	\n"
 71			"	addu	%0, %2				\n"
 72			"	sc	%0, %1				\n"
 73			"	.set	mips0				\n"
 74			: "=&r" (temp), "+m" (v->counter)
 75			: "Ir" (i));
 76		} while (unlikely(!temp));
 77	} else {
 78		unsigned long flags;
 79
 80		raw_local_irq_save(flags);
 81		v->counter += i;
 82		raw_local_irq_restore(flags);
 83	}
 84}
 85
 86/*
 87 * atomic_sub - subtract the atomic variable
 88 * @i: integer value to subtract
 89 * @v: pointer of type atomic_t
 90 *
 91 * Atomically subtracts @i from @v.
 92 */
 93static __inline__ void atomic_sub(int i, atomic_t * v)
 94{
 95	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 96		int temp;
 97
 98		__asm__ __volatile__(
 99		"	.set	arch=r4000				\n"
100		"1:	ll	%0, %1		# atomic_sub		\n"
101		"	subu	%0, %2					\n"
102		"	sc	%0, %1					\n"
103		"	beqzl	%0, 1b					\n"
104		"	.set	mips0					\n"
105		: "=&r" (temp), "+m" (v->counter)
106		: "Ir" (i));
107	} else if (kernel_uses_llsc) {
108		int temp;
109
110		do {
111			__asm__ __volatile__(
112			"	.set	arch=r4000			\n"
113			"	ll	%0, %1		# atomic_sub	\n"
114			"	subu	%0, %2				\n"
115			"	sc	%0, %1				\n"
116			"	.set	mips0				\n"
117			: "=&r" (temp), "+m" (v->counter)
118			: "Ir" (i));
119		} while (unlikely(!temp));
120	} else {
121		unsigned long flags;
122
123		raw_local_irq_save(flags);
124		v->counter -= i;
125		raw_local_irq_restore(flags);
126	}
127}
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134	int result;
135
136	smp_mb__before_llsc();
137
138	if (kernel_uses_llsc && R10000_LLSC_WAR) {
139		int temp;
140
141		__asm__ __volatile__(
142		"	.set	arch=r4000				\n"
143		"1:	ll	%1, %2		# atomic_add_return	\n"
144		"	addu	%0, %1, %3				\n"
145		"	sc	%0, %2					\n"
146		"	beqzl	%0, 1b					\n"
147		"	addu	%0, %1, %3				\n"
148		"	.set	mips0					\n"
149		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
150		: "Ir" (i));
151	} else if (kernel_uses_llsc) {
152		int temp;
153
154		do {
155			__asm__ __volatile__(
156			"	.set	arch=r4000			\n"
157			"	ll	%1, %2	# atomic_add_return	\n"
158			"	addu	%0, %1, %3			\n"
159			"	sc	%0, %2				\n"
160			"	.set	mips0				\n"
161			: "=&r" (result), "=&r" (temp), "+m" (v->counter)
162			: "Ir" (i));
163		} while (unlikely(!result));
164
165		result = temp + i;
166	} else {
167		unsigned long flags;
168
169		raw_local_irq_save(flags);
170		result = v->counter;
171		result += i;
172		v->counter = result;
173		raw_local_irq_restore(flags);
174	}
175
176	smp_llsc_mb();
177
178	return result;
179}
180
181static __inline__ int atomic_sub_return(int i, atomic_t * v)
182{
183	int result;
184
185	smp_mb__before_llsc();
186
187	if (kernel_uses_llsc && R10000_LLSC_WAR) {
188		int temp;
189
190		__asm__ __volatile__(
191		"	.set	arch=r4000				\n"
192		"1:	ll	%1, %2		# atomic_sub_return	\n"
193		"	subu	%0, %1, %3				\n"
194		"	sc	%0, %2					\n"
195		"	beqzl	%0, 1b					\n"
196		"	subu	%0, %1, %3				\n"
197		"	.set	mips0					\n"
198		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
199		: "Ir" (i), "m" (v->counter)
200		: "memory");
201
202		result = temp - i;
203	} else if (kernel_uses_llsc) {
204		int temp;
205
206		do {
207			__asm__ __volatile__(
208			"	.set	arch=r4000			\n"
209			"	ll	%1, %2	# atomic_sub_return	\n"
210			"	subu	%0, %1, %3			\n"
211			"	sc	%0, %2				\n"
212			"	.set	mips0				\n"
213			: "=&r" (result), "=&r" (temp), "+m" (v->counter)
214			: "Ir" (i));
215		} while (unlikely(!result));
216
217		result = temp - i;
218	} else {
219		unsigned long flags;
220
221		raw_local_irq_save(flags);
222		result = v->counter;
223		result -= i;
224		v->counter = result;
225		raw_local_irq_restore(flags);
226	}
227
228	smp_llsc_mb();
229
230	return result;
231}
232
233/*
234 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
235 * @i: integer value to subtract
236 * @v: pointer of type atomic_t
237 *
238 * Atomically test @v and subtract @i if @v is greater or equal than @i.
239 * The function returns the old value of @v minus @i.
240 */
241static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
242{
243	int result;
244
245	smp_mb__before_llsc();
246
247	if (kernel_uses_llsc && R10000_LLSC_WAR) {
248		int temp;
249
 
250		__asm__ __volatile__(
251		"	.set	arch=r4000				\n"
 
252		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
 
253		"	subu	%0, %1, %3				\n"
254		"	bltz	%0, 1f					\n"
255		"	sc	%0, %2					\n"
256		"	.set	noreorder				\n"
257		"	beqzl	%0, 1b					\n"
258		"	 subu	%0, %1, %3				\n"
259		"	.set	reorder					\n"
260		"1:							\n"
261		"	.set	mips0					\n"
262		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
263		: "Ir" (i), "m" (v->counter)
264		: "memory");
265	} else if (kernel_uses_llsc) {
266		int temp;
267
268		__asm__ __volatile__(
269		"	.set	arch=r4000				\n"
270		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
271		"	subu	%0, %1, %3				\n"
272		"	bltz	%0, 1f					\n"
273		"	sc	%0, %2					\n"
274		"	.set	noreorder				\n"
275		"	beqz	%0, 1b					\n"
276		"	 subu	%0, %1, %3				\n"
277		"	.set	reorder					\n"
278		"1:							\n"
279		"	.set	mips0					\n"
280		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
281		: "Ir" (i));
282	} else {
283		unsigned long flags;
284
285		raw_local_irq_save(flags);
286		result = v->counter;
287		result -= i;
288		if (result >= 0)
289			v->counter = result;
290		raw_local_irq_restore(flags);
291	}
292
293	smp_llsc_mb();
294
295	return result;
296}
297
298#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
299#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
300
301/**
302 * __atomic_add_unless - add unless the number is a given value
303 * @v: pointer of type atomic_t
304 * @a: the amount to add to v...
305 * @u: ...unless v is equal to u.
306 *
307 * Atomically adds @a to @v, so long as it was not @u.
308 * Returns the old value of @v.
309 */
310static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
311{
312	int c, old;
313	c = atomic_read(v);
314	for (;;) {
315		if (unlikely(c == (u)))
316			break;
317		old = atomic_cmpxchg((v), c, c + (a));
318		if (likely(old == c))
319			break;
320		c = old;
321	}
322	return c;
323}
324
325#define atomic_dec_return(v) atomic_sub_return(1, (v))
326#define atomic_inc_return(v) atomic_add_return(1, (v))
327
328/*
329 * atomic_sub_and_test - subtract value from variable and test result
330 * @i: integer value to subtract
331 * @v: pointer of type atomic_t
332 *
333 * Atomically subtracts @i from @v and returns
334 * true if the result is zero, or false for all
335 * other cases.
336 */
337#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
338
339/*
340 * atomic_inc_and_test - increment and test
341 * @v: pointer of type atomic_t
342 *
343 * Atomically increments @v by 1
344 * and returns true if the result is zero, or false for all
345 * other cases.
346 */
347#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
348
349/*
350 * atomic_dec_and_test - decrement by 1 and test
351 * @v: pointer of type atomic_t
352 *
353 * Atomically decrements @v by 1 and
354 * returns true if the result is 0, or false for all other
355 * cases.
356 */
357#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
358
359/*
360 * atomic_dec_if_positive - decrement by 1 if old value positive
361 * @v: pointer of type atomic_t
362 */
363#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
364
365/*
366 * atomic_inc - increment atomic variable
367 * @v: pointer of type atomic_t
368 *
369 * Atomically increments @v by 1.
370 */
371#define atomic_inc(v) atomic_add(1, (v))
372
373/*
374 * atomic_dec - decrement and test
375 * @v: pointer of type atomic_t
376 *
377 * Atomically decrements @v by 1.
378 */
379#define atomic_dec(v) atomic_sub(1, (v))
380
381/*
382 * atomic_add_negative - add and test if negative
383 * @v: pointer of type atomic_t
384 * @i: integer value to add
385 *
386 * Atomically adds @i to @v and returns true
387 * if the result is negative, or false when
388 * result is greater than or equal to zero.
389 */
390#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
391
392#ifdef CONFIG_64BIT
393
394#define ATOMIC64_INIT(i)    { (i) }
395
396/*
397 * atomic64_read - read atomic variable
398 * @v: pointer of type atomic64_t
399 *
400 */
401#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
402
403/*
404 * atomic64_set - set atomic variable
405 * @v: pointer of type atomic64_t
406 * @i: required value
407 */
408#define atomic64_set(v, i)	((v)->counter = (i))
409
410/*
411 * atomic64_add - add integer to atomic variable
412 * @i: integer value to add
413 * @v: pointer of type atomic64_t
414 *
415 * Atomically adds @i to @v.
416 */
417static __inline__ void atomic64_add(long i, atomic64_t * v)
418{
419	if (kernel_uses_llsc && R10000_LLSC_WAR) {
420		long temp;
421
422		__asm__ __volatile__(
423		"	.set	arch=r4000				\n"
424		"1:	lld	%0, %1		# atomic64_add		\n"
425		"	daddu	%0, %2					\n"
426		"	scd	%0, %1					\n"
427		"	beqzl	%0, 1b					\n"
428		"	.set	mips0					\n"
429		: "=&r" (temp), "+m" (v->counter)
430		: "Ir" (i));
431	} else if (kernel_uses_llsc) {
432		long temp;
433
434		do {
435			__asm__ __volatile__(
436			"	.set	arch=r4000			\n"
437			"	lld	%0, %1		# atomic64_add	\n"
438			"	daddu	%0, %2				\n"
439			"	scd	%0, %1				\n"
440			"	.set	mips0				\n"
441			: "=&r" (temp), "+m" (v->counter)
442			: "Ir" (i));
443		} while (unlikely(!temp));
444	} else {
445		unsigned long flags;
446
447		raw_local_irq_save(flags);
448		v->counter += i;
449		raw_local_irq_restore(flags);
450	}
451}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
452
453/*
454 * atomic64_sub - subtract the atomic variable
455 * @i: integer value to subtract
456 * @v: pointer of type atomic64_t
457 *
458 * Atomically subtracts @i from @v.
459 */
460static __inline__ void atomic64_sub(long i, atomic64_t * v)
461{
462	if (kernel_uses_llsc && R10000_LLSC_WAR) {
463		long temp;
464
465		__asm__ __volatile__(
466		"	.set	arch=r4000				\n"
467		"1:	lld	%0, %1		# atomic64_sub		\n"
468		"	dsubu	%0, %2					\n"
469		"	scd	%0, %1					\n"
470		"	beqzl	%0, 1b					\n"
471		"	.set	mips0					\n"
472		: "=&r" (temp), "+m" (v->counter)
473		: "Ir" (i));
474	} else if (kernel_uses_llsc) {
475		long temp;
476
477		do {
478			__asm__ __volatile__(
479			"	.set	arch=r4000			\n"
480			"	lld	%0, %1		# atomic64_sub	\n"
481			"	dsubu	%0, %2				\n"
482			"	scd	%0, %1				\n"
483			"	.set	mips0				\n"
484			: "=&r" (temp), "+m" (v->counter)
485			: "Ir" (i));
486		} while (unlikely(!temp));
487	} else {
488		unsigned long flags;
489
490		raw_local_irq_save(flags);
491		v->counter -= i;
492		raw_local_irq_restore(flags);
493	}
494}
495
496/*
497 * Same as above, but return the result value
498 */
499static __inline__ long atomic64_add_return(long i, atomic64_t * v)
500{
501	long result;
502
503	smp_mb__before_llsc();
504
505	if (kernel_uses_llsc && R10000_LLSC_WAR) {
506		long temp;
507
508		__asm__ __volatile__(
509		"	.set	arch=r4000				\n"
510		"1:	lld	%1, %2		# atomic64_add_return	\n"
511		"	daddu	%0, %1, %3				\n"
512		"	scd	%0, %2					\n"
513		"	beqzl	%0, 1b					\n"
514		"	daddu	%0, %1, %3				\n"
515		"	.set	mips0					\n"
516		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
517		: "Ir" (i));
518	} else if (kernel_uses_llsc) {
519		long temp;
520
521		do {
522			__asm__ __volatile__(
523			"	.set	arch=r4000			\n"
524			"	lld	%1, %2	# atomic64_add_return	\n"
525			"	daddu	%0, %1, %3			\n"
526			"	scd	%0, %2				\n"
527			"	.set	mips0				\n"
528			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
529			: "Ir" (i), "m" (v->counter)
530			: "memory");
531		} while (unlikely(!result));
532
533		result = temp + i;
534	} else {
535		unsigned long flags;
536
537		raw_local_irq_save(flags);
538		result = v->counter;
539		result += i;
540		v->counter = result;
541		raw_local_irq_restore(flags);
542	}
543
544	smp_llsc_mb();
545
546	return result;
547}
548
549static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
550{
551	long result;
552
553	smp_mb__before_llsc();
554
555	if (kernel_uses_llsc && R10000_LLSC_WAR) {
556		long temp;
557
558		__asm__ __volatile__(
559		"	.set	arch=r4000				\n"
560		"1:	lld	%1, %2		# atomic64_sub_return	\n"
561		"	dsubu	%0, %1, %3				\n"
562		"	scd	%0, %2					\n"
563		"	beqzl	%0, 1b					\n"
564		"	dsubu	%0, %1, %3				\n"
565		"	.set	mips0					\n"
566		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
567		: "Ir" (i), "m" (v->counter)
568		: "memory");
569	} else if (kernel_uses_llsc) {
570		long temp;
571
572		do {
573			__asm__ __volatile__(
574			"	.set	arch=r4000			\n"
575			"	lld	%1, %2	# atomic64_sub_return	\n"
576			"	dsubu	%0, %1, %3			\n"
577			"	scd	%0, %2				\n"
578			"	.set	mips0				\n"
579			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
580			: "Ir" (i), "m" (v->counter)
581			: "memory");
582		} while (unlikely(!result));
583
584		result = temp - i;
585	} else {
586		unsigned long flags;
587
588		raw_local_irq_save(flags);
589		result = v->counter;
590		result -= i;
591		v->counter = result;
592		raw_local_irq_restore(flags);
593	}
594
595	smp_llsc_mb();
596
597	return result;
598}
599
600/*
601 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
602 * @i: integer value to subtract
603 * @v: pointer of type atomic64_t
604 *
605 * Atomically test @v and subtract @i if @v is greater or equal than @i.
606 * The function returns the old value of @v minus @i.
607 */
608static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
609{
610	long result;
611
612	smp_mb__before_llsc();
613
614	if (kernel_uses_llsc && R10000_LLSC_WAR) {
615		long temp;
616
617		__asm__ __volatile__(
618		"	.set	arch=r4000				\n"
 
619		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
620		"	dsubu	%0, %1, %3				\n"
 
621		"	bltz	%0, 1f					\n"
622		"	scd	%0, %2					\n"
623		"	.set	noreorder				\n"
624		"	beqzl	%0, 1b					\n"
625		"	 dsubu	%0, %1, %3				\n"
626		"	.set	reorder					\n"
627		"1:							\n"
628		"	.set	mips0					\n"
629		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
630		: "Ir" (i), "m" (v->counter)
631		: "memory");
632	} else if (kernel_uses_llsc) {
633		long temp;
634
635		__asm__ __volatile__(
636		"	.set	arch=r4000				\n"
637		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
638		"	dsubu	%0, %1, %3				\n"
639		"	bltz	%0, 1f					\n"
640		"	scd	%0, %2					\n"
641		"	.set	noreorder				\n"
642		"	beqz	%0, 1b					\n"
643		"	 dsubu	%0, %1, %3				\n"
644		"	.set	reorder					\n"
645		"1:							\n"
646		"	.set	mips0					\n"
647		: "=&r" (result), "=&r" (temp), "+m" (v->counter)
648		: "Ir" (i));
649	} else {
650		unsigned long flags;
651
652		raw_local_irq_save(flags);
653		result = v->counter;
654		result -= i;
655		if (result >= 0)
656			v->counter = result;
657		raw_local_irq_restore(flags);
658	}
659
660	smp_llsc_mb();
661
662	return result;
663}
664
665#define atomic64_cmpxchg(v, o, n) \
666	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
667#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
668
669/**
670 * atomic64_add_unless - add unless the number is a given value
671 * @v: pointer of type atomic64_t
672 * @a: the amount to add to v...
673 * @u: ...unless v is equal to u.
674 *
675 * Atomically adds @a to @v, so long as it was not @u.
676 * Returns the old value of @v.
677 */
678static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
679{
680	long c, old;
681	c = atomic64_read(v);
682	for (;;) {
683		if (unlikely(c == (u)))
684			break;
685		old = atomic64_cmpxchg((v), c, c + (a));
686		if (likely(old == c))
687			break;
688		c = old;
689	}
690	return c != (u);
691}
692
693#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
694
695#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
696#define atomic64_inc_return(v) atomic64_add_return(1, (v))
697
698/*
699 * atomic64_sub_and_test - subtract value from variable and test result
700 * @i: integer value to subtract
701 * @v: pointer of type atomic64_t
702 *
703 * Atomically subtracts @i from @v and returns
704 * true if the result is zero, or false for all
705 * other cases.
706 */
707#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
708
709/*
710 * atomic64_inc_and_test - increment and test
711 * @v: pointer of type atomic64_t
712 *
713 * Atomically increments @v by 1
714 * and returns true if the result is zero, or false for all
715 * other cases.
716 */
717#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
718
719/*
720 * atomic64_dec_and_test - decrement by 1 and test
721 * @v: pointer of type atomic64_t
722 *
723 * Atomically decrements @v by 1 and
724 * returns true if the result is 0, or false for all other
725 * cases.
726 */
727#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
728
729/*
730 * atomic64_dec_if_positive - decrement by 1 if old value positive
731 * @v: pointer of type atomic64_t
732 */
733#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
734
735/*
736 * atomic64_inc - increment atomic variable
737 * @v: pointer of type atomic64_t
738 *
739 * Atomically increments @v by 1.
740 */
741#define atomic64_inc(v) atomic64_add(1, (v))
742
743/*
744 * atomic64_dec - decrement and test
745 * @v: pointer of type atomic64_t
746 *
747 * Atomically decrements @v by 1.
748 */
749#define atomic64_dec(v) atomic64_sub(1, (v))
750
751/*
752 * atomic64_add_negative - add and test if negative
753 * @v: pointer of type atomic64_t
754 * @i: integer value to add
755 *
756 * Atomically adds @i to @v and returns true
757 * if the result is negative, or false when
758 * result is greater than or equal to zero.
759 */
760#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
761
762#endif /* CONFIG_64BIT */
763
764/*
765 * atomic*_return operations are serializing but not the non-*_return
766 * versions.
767 */
768#define smp_mb__before_atomic_dec()	smp_mb__before_llsc()
769#define smp_mb__after_atomic_dec()	smp_llsc_mb()
770#define smp_mb__before_atomic_inc()	smp_mb__before_llsc()
771#define smp_mb__after_atomic_inc()	smp_llsc_mb()
772
773#endif /* _ASM_ATOMIC_H */