Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cmpxchg.h>
 23#include <asm/war.h>
 
 24
 25/*
 26 * Using a branch-likely instruction to check the result of an sc instruction
 27 * works around a bug present in R10000 CPUs prior to revision 3.0 that could
 28 * cause ll-sc sequences to execute non-atomically.
 29 */
 30#if R10000_LLSC_WAR
 31# define __scbeqz "beqzl"
 32#else
 33# define __scbeqz "beqz"
 34#endif
 35
 36#define ATOMIC_INIT(i)	  { (i) }
 37
 38/*
 39 * atomic_read - read atomic variable
 40 * @v: pointer of type atomic_t
 41 *
 42 * Atomically reads the value of @v.
 43 */
 44#define atomic_read(v)		READ_ONCE((v)->counter)
 45
 46/*
 47 * atomic_set - set atomic variable
 48 * @v: pointer of type atomic_t
 49 * @i: required value
 50 *
 51 * Atomically sets the value of @v to @i.
 52 */
 53#define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54
 55#define ATOMIC_OP(op, c_op, asm_op)					      \
 56static __inline__ void atomic_##op(int i, atomic_t * v)			      \
 57{									      \
 58	if (kernel_uses_llsc) {						      \
 59		int temp;						      \
 60									      \
 61		loongson_llsc_mb();					      \
 62		__asm__ __volatile__(					      \
 63		"	.set	push					\n"   \
 64		"	.set	"MIPS_ISA_LEVEL"			\n"   \
 65		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
 66		"	" #asm_op " %0, %2				\n"   \
 67		"	sc	%0, %1					\n"   \
 68		"\t" __scbeqz "	%0, 1b					\n"   \
 69		"	.set	pop					\n"   \
 70		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 71		: "Ir" (i) : __LLSC_CLOBBER);				      \
 72	} else {							      \
 73		unsigned long flags;					      \
 74									      \
 75		raw_local_irq_save(flags);				      \
 76		v->counter c_op i;					      \
 77		raw_local_irq_restore(flags);				      \
 78	}								      \
 79}
 80
 81#define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
 82static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
 83{									      \
 84	int result;							      \
 85									      \
 86	if (kernel_uses_llsc) {						      \
 87		int temp;						      \
 88									      \
 89		loongson_llsc_mb();					      \
 90		__asm__ __volatile__(					      \
 91		"	.set	push					\n"   \
 92		"	.set	"MIPS_ISA_LEVEL"			\n"   \
 93		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
 94		"	" #asm_op " %0, %1, %3				\n"   \
 95		"	sc	%0, %2					\n"   \
 96		"\t" __scbeqz "	%0, 1b					\n"   \
 97		"	" #asm_op " %0, %1, %3				\n"   \
 98		"	.set	pop					\n"   \
 99		: "=&r" (result), "=&r" (temp),				      \
100		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
101		: "Ir" (i) : __LLSC_CLOBBER);				      \
102	} else {							      \
103		unsigned long flags;					      \
104									      \
105		raw_local_irq_save(flags);				      \
106		result = v->counter;					      \
107		result c_op i;						      \
108		v->counter = result;					      \
109		raw_local_irq_restore(flags);				      \
110	}								      \
111									      \
112	return result;							      \
113}
114
115#define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
116static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
117{									      \
118	int result;							      \
119									      \
120	if (kernel_uses_llsc) {						      \
121		int temp;						      \
122									      \
123		loongson_llsc_mb();					      \
124		__asm__ __volatile__(					      \
125		"	.set	push					\n"   \
126		"	.set	"MIPS_ISA_LEVEL"			\n"   \
127		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
128		"	" #asm_op " %0, %1, %3				\n"   \
129		"	sc	%0, %2					\n"   \
130		"\t" __scbeqz "	%0, 1b					\n"   \
131		"	.set	pop					\n"   \
132		"	move	%0, %1					\n"   \
133		: "=&r" (result), "=&r" (temp),				      \
134		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
135		: "Ir" (i) : __LLSC_CLOBBER);				      \
136	} else {							      \
137		unsigned long flags;					      \
138									      \
139		raw_local_irq_save(flags);				      \
140		result = v->counter;					      \
141		v->counter c_op i;					      \
142		raw_local_irq_restore(flags);				      \
143	}								      \
144									      \
145	return result;							      \
146}
147
148#define ATOMIC_OPS(op, c_op, asm_op)					      \
149	ATOMIC_OP(op, c_op, asm_op)					      \
150	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
151	ATOMIC_FETCH_OP(op, c_op, asm_op)
152
153ATOMIC_OPS(add, +=, addu)
154ATOMIC_OPS(sub, -=, subu)
155
156#define atomic_add_return_relaxed	atomic_add_return_relaxed
157#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
158#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
159#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
160
161#undef ATOMIC_OPS
162#define ATOMIC_OPS(op, c_op, asm_op)					      \
163	ATOMIC_OP(op, c_op, asm_op)					      \
164	ATOMIC_FETCH_OP(op, c_op, asm_op)
165
166ATOMIC_OPS(and, &=, and)
167ATOMIC_OPS(or, |=, or)
168ATOMIC_OPS(xor, ^=, xor)
169
170#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
171#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
172#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
173
174#undef ATOMIC_OPS
175#undef ATOMIC_FETCH_OP
176#undef ATOMIC_OP_RETURN
177#undef ATOMIC_OP
178
179/*
180 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
181 * @i: integer value to subtract
182 * @v: pointer of type atomic_t
183 *
184 * Atomically test @v and subtract @i if @v is greater or equal than @i.
185 * The function returns the old value of @v minus @i.
186 */
187static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
188{
189	int result;
190
191	smp_mb__before_llsc();
192
193	if (kernel_uses_llsc) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194		int temp;
195
196		loongson_llsc_mb();
197		__asm__ __volatile__(
198		"	.set	push					\n"
199		"	.set	"MIPS_ISA_LEVEL"			\n"
200		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
201		"	.set	pop					\n"
202		"	subu	%0, %1, %3				\n"
203		"	move	%1, %0					\n"
204		"	bltz	%0, 2f					\n"
205		"	.set	push					\n"
206		"	.set	"MIPS_ISA_LEVEL"			\n"
207		"	sc	%1, %2					\n"
208		"\t" __scbeqz "	%1, 1b					\n"
209		"2:							\n"
210		"	.set	pop					\n"
211		: "=&r" (result), "=&r" (temp),
212		  "+" GCC_OFF_SMALL_ASM() (v->counter)
213		: "Ir" (i) : __LLSC_CLOBBER);
214	} else {
215		unsigned long flags;
216
217		raw_local_irq_save(flags);
218		result = v->counter;
219		result -= i;
220		if (result >= 0)
221			v->counter = result;
222		raw_local_irq_restore(flags);
223	}
224
225	smp_llsc_mb();
226
227	return result;
228}
229
230#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
231#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233/*
234 * atomic_dec_if_positive - decrement by 1 if old value positive
235 * @v: pointer of type atomic_t
236 */
237#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239#ifdef CONFIG_64BIT
240
241#define ATOMIC64_INIT(i)    { (i) }
242
243/*
244 * atomic64_read - read atomic variable
245 * @v: pointer of type atomic64_t
246 *
247 */
248#define atomic64_read(v)	READ_ONCE((v)->counter)
249
250/*
251 * atomic64_set - set atomic variable
252 * @v: pointer of type atomic64_t
253 * @i: required value
254 */
255#define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256
257#define ATOMIC64_OP(op, c_op, asm_op)					      \
258static __inline__ void atomic64_##op(s64 i, atomic64_t * v)		      \
259{									      \
260	if (kernel_uses_llsc) {						      \
261		s64 temp;						      \
262									      \
263		loongson_llsc_mb();					      \
264		__asm__ __volatile__(					      \
265		"	.set	push					\n"   \
266		"	.set	"MIPS_ISA_LEVEL"			\n"   \
267		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
268		"	" #asm_op " %0, %2				\n"   \
269		"	scd	%0, %1					\n"   \
270		"\t" __scbeqz "	%0, 1b					\n"   \
271		"	.set	pop					\n"   \
272		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
273		: "Ir" (i) : __LLSC_CLOBBER);				      \
274	} else {							      \
275		unsigned long flags;					      \
276									      \
277		raw_local_irq_save(flags);				      \
278		v->counter c_op i;					      \
279		raw_local_irq_restore(flags);				      \
280	}								      \
281}
282
283#define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
284static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v)   \
285{									      \
286	s64 result;							      \
287									      \
288	if (kernel_uses_llsc) {						      \
289		s64 temp;						      \
290									      \
291		loongson_llsc_mb();					      \
292		__asm__ __volatile__(					      \
293		"	.set	push					\n"   \
294		"	.set	"MIPS_ISA_LEVEL"			\n"   \
295		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
296		"	" #asm_op " %0, %1, %3				\n"   \
297		"	scd	%0, %2					\n"   \
298		"\t" __scbeqz "	%0, 1b					\n"   \
299		"	" #asm_op " %0, %1, %3				\n"   \
300		"	.set	pop					\n"   \
301		: "=&r" (result), "=&r" (temp),				      \
302		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
303		: "Ir" (i) : __LLSC_CLOBBER);				      \
304	} else {							      \
305		unsigned long flags;					      \
306									      \
307		raw_local_irq_save(flags);				      \
308		result = v->counter;					      \
309		result c_op i;						      \
310		v->counter = result;					      \
311		raw_local_irq_restore(flags);				      \
312	}								      \
313									      \
314	return result;							      \
315}
316
317#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
318static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v)    \
319{									      \
320	s64 result;							      \
321									      \
322	if (kernel_uses_llsc) {						      \
323		s64 temp;						      \
324									      \
325		loongson_llsc_mb();					      \
326		__asm__ __volatile__(					      \
327		"	.set	push					\n"   \
328		"	.set	"MIPS_ISA_LEVEL"			\n"   \
329		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
330		"	" #asm_op " %0, %1, %3				\n"   \
331		"	scd	%0, %2					\n"   \
332		"\t" __scbeqz "	%0, 1b					\n"   \
333		"	move	%0, %1					\n"   \
334		"	.set	pop					\n"   \
335		: "=&r" (result), "=&r" (temp),				      \
336		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
337		: "Ir" (i) : __LLSC_CLOBBER);				      \
338	} else {							      \
339		unsigned long flags;					      \
340									      \
341		raw_local_irq_save(flags);				      \
342		result = v->counter;					      \
343		v->counter c_op i;					      \
344		raw_local_irq_restore(flags);				      \
345	}								      \
346									      \
347	return result;							      \
348}
349
350#define ATOMIC64_OPS(op, c_op, asm_op)					      \
351	ATOMIC64_OP(op, c_op, asm_op)					      \
352	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
353	ATOMIC64_FETCH_OP(op, c_op, asm_op)
354
355ATOMIC64_OPS(add, +=, daddu)
356ATOMIC64_OPS(sub, -=, dsubu)
357
358#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
359#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
360#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
361#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
362
363#undef ATOMIC64_OPS
364#define ATOMIC64_OPS(op, c_op, asm_op)					      \
365	ATOMIC64_OP(op, c_op, asm_op)					      \
366	ATOMIC64_FETCH_OP(op, c_op, asm_op)
367
368ATOMIC64_OPS(and, &=, and)
369ATOMIC64_OPS(or, |=, or)
370ATOMIC64_OPS(xor, ^=, xor)
371
372#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
373#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
374#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
375
376#undef ATOMIC64_OPS
377#undef ATOMIC64_FETCH_OP
378#undef ATOMIC64_OP_RETURN
379#undef ATOMIC64_OP
380
381/*
382 * atomic64_sub_if_positive - conditionally subtract integer from atomic
383 *                            variable
384 * @i: integer value to subtract
385 * @v: pointer of type atomic64_t
386 *
387 * Atomically test @v and subtract @i if @v is greater or equal than @i.
388 * The function returns the old value of @v minus @i.
389 */
390static __inline__ s64 atomic64_sub_if_positive(s64 i, atomic64_t * v)
391{
392	s64 result;
393
394	smp_mb__before_llsc();
395
396	if (kernel_uses_llsc) {
397		s64 temp;
398
399		__asm__ __volatile__(
400		"	.set	push					\n"
401		"	.set	"MIPS_ISA_LEVEL"			\n"
402		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
403		"	dsubu	%0, %1, %3				\n"
404		"	move	%1, %0					\n"
405		"	bltz	%0, 1f					\n"
406		"	scd	%1, %2					\n"
407		"\t" __scbeqz "	%1, 1b					\n"
 
 
 
408		"1:							\n"
409		"	.set	pop					\n"
410		: "=&r" (result), "=&r" (temp),
411		  "+" GCC_OFF_SMALL_ASM() (v->counter)
412		: "Ir" (i));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
413	} else {
414		unsigned long flags;
415
416		raw_local_irq_save(flags);
417		result = v->counter;
418		result -= i;
419		if (result >= 0)
420			v->counter = result;
421		raw_local_irq_restore(flags);
422	}
423
424	smp_llsc_mb();
425
426	return result;
427}
428
429#define atomic64_cmpxchg(v, o, n) \
430	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
431#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
432
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433/*
434 * atomic64_dec_if_positive - decrement by 1 if old value positive
435 * @v: pointer of type atomic64_t
436 */
437#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
438
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439#endif /* CONFIG_64BIT */
 
 
 
 
 
 
 
 
 
440
441#endif /* _ASM_ATOMIC_H */
v3.1
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 
 20#include <asm/cpu-features.h>
 
 21#include <asm/war.h>
 22#include <asm/system.h>
 23
 24#define ATOMIC_INIT(i)    { (i) }
 
 
 
 
 
 
 
 
 
 
 
 25
 26/*
 27 * atomic_read - read atomic variable
 28 * @v: pointer of type atomic_t
 29 *
 30 * Atomically reads the value of @v.
 31 */
 32#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 33
 34/*
 35 * atomic_set - set atomic variable
 36 * @v: pointer of type atomic_t
 37 * @i: required value
 38 *
 39 * Atomically sets the value of @v to @i.
 40 */
 41#define atomic_set(v, i)		((v)->counter = (i))
 42
 43/*
 44 * atomic_add - add integer to atomic variable
 45 * @i: integer value to add
 46 * @v: pointer of type atomic_t
 47 *
 48 * Atomically adds @i to @v.
 49 */
 50static __inline__ void atomic_add(int i, atomic_t * v)
 51{
 52	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 53		int temp;
 54
 55		__asm__ __volatile__(
 56		"	.set	mips3					\n"
 57		"1:	ll	%0, %1		# atomic_add		\n"
 58		"	addu	%0, %2					\n"
 59		"	sc	%0, %1					\n"
 60		"	beqzl	%0, 1b					\n"
 61		"	.set	mips0					\n"
 62		: "=&r" (temp), "=m" (v->counter)
 63		: "Ir" (i), "m" (v->counter));
 64	} else if (kernel_uses_llsc) {
 65		int temp;
 66
 67		do {
 68			__asm__ __volatile__(
 69			"	.set	mips3				\n"
 70			"	ll	%0, %1		# atomic_add	\n"
 71			"	addu	%0, %2				\n"
 72			"	sc	%0, %1				\n"
 73			"	.set	mips0				\n"
 74			: "=&r" (temp), "=m" (v->counter)
 75			: "Ir" (i), "m" (v->counter));
 76		} while (unlikely(!temp));
 77	} else {
 78		unsigned long flags;
 79
 80		raw_local_irq_save(flags);
 81		v->counter += i;
 82		raw_local_irq_restore(flags);
 83	}
 84}
 85
 86/*
 87 * atomic_sub - subtract the atomic variable
 88 * @i: integer value to subtract
 89 * @v: pointer of type atomic_t
 90 *
 91 * Atomically subtracts @i from @v.
 92 */
 93static __inline__ void atomic_sub(int i, atomic_t * v)
 94{
 95	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 96		int temp;
 97
 98		__asm__ __volatile__(
 99		"	.set	mips3					\n"
100		"1:	ll	%0, %1		# atomic_sub		\n"
101		"	subu	%0, %2					\n"
102		"	sc	%0, %1					\n"
103		"	beqzl	%0, 1b					\n"
104		"	.set	mips0					\n"
105		: "=&r" (temp), "=m" (v->counter)
106		: "Ir" (i), "m" (v->counter));
107	} else if (kernel_uses_llsc) {
108		int temp;
109
110		do {
111			__asm__ __volatile__(
112			"	.set	mips3				\n"
113			"	ll	%0, %1		# atomic_sub	\n"
114			"	subu	%0, %2				\n"
115			"	sc	%0, %1				\n"
116			"	.set	mips0				\n"
117			: "=&r" (temp), "=m" (v->counter)
118			: "Ir" (i), "m" (v->counter));
119		} while (unlikely(!temp));
120	} else {
121		unsigned long flags;
122
123		raw_local_irq_save(flags);
124		v->counter -= i;
125		raw_local_irq_restore(flags);
126	}
127}
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134	int result;
135
136	smp_mb__before_llsc();
137
138	if (kernel_uses_llsc && R10000_LLSC_WAR) {
139		int temp;
140
141		__asm__ __volatile__(
142		"	.set	mips3					\n"
143		"1:	ll	%1, %2		# atomic_add_return	\n"
144		"	addu	%0, %1, %3				\n"
145		"	sc	%0, %2					\n"
146		"	beqzl	%0, 1b					\n"
147		"	addu	%0, %1, %3				\n"
148		"	.set	mips0					\n"
149		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
150		: "Ir" (i), "m" (v->counter)
151		: "memory");
152	} else if (kernel_uses_llsc) {
153		int temp;
154
155		do {
156			__asm__ __volatile__(
157			"	.set	mips3				\n"
158			"	ll	%1, %2	# atomic_add_return	\n"
159			"	addu	%0, %1, %3			\n"
160			"	sc	%0, %2				\n"
161			"	.set	mips0				\n"
162			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
163			: "Ir" (i), "m" (v->counter)
164			: "memory");
165		} while (unlikely(!result));
166
167		result = temp + i;
168	} else {
169		unsigned long flags;
170
171		raw_local_irq_save(flags);
172		result = v->counter;
173		result += i;
174		v->counter = result;
175		raw_local_irq_restore(flags);
176	}
177
178	smp_llsc_mb();
179
180	return result;
181}
182
183static __inline__ int atomic_sub_return(int i, atomic_t * v)
184{
185	int result;
186
187	smp_mb__before_llsc();
188
189	if (kernel_uses_llsc && R10000_LLSC_WAR) {
190		int temp;
191
192		__asm__ __volatile__(
193		"	.set	mips3					\n"
194		"1:	ll	%1, %2		# atomic_sub_return	\n"
195		"	subu	%0, %1, %3				\n"
196		"	sc	%0, %2					\n"
197		"	beqzl	%0, 1b					\n"
198		"	subu	%0, %1, %3				\n"
199		"	.set	mips0					\n"
200		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
201		: "Ir" (i), "m" (v->counter)
202		: "memory");
203
204		result = temp - i;
205	} else if (kernel_uses_llsc) {
206		int temp;
207
208		do {
209			__asm__ __volatile__(
210			"	.set	mips3				\n"
211			"	ll	%1, %2	# atomic_sub_return	\n"
212			"	subu	%0, %1, %3			\n"
213			"	sc	%0, %2				\n"
214			"	.set	mips0				\n"
215			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
216			: "Ir" (i), "m" (v->counter)
217			: "memory");
218		} while (unlikely(!result));
219
220		result = temp - i;
221	} else {
222		unsigned long flags;
223
224		raw_local_irq_save(flags);
225		result = v->counter;
226		result -= i;
227		v->counter = result;
228		raw_local_irq_restore(flags);
229	}
230
231	smp_llsc_mb();
232
233	return result;
234}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
236/*
237 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
238 * @i: integer value to subtract
239 * @v: pointer of type atomic_t
240 *
241 * Atomically test @v and subtract @i if @v is greater or equal than @i.
242 * The function returns the old value of @v minus @i.
243 */
244static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
245{
246	int result;
247
248	smp_mb__before_llsc();
249
250	if (kernel_uses_llsc && R10000_LLSC_WAR) {
251		int temp;
252
253		__asm__ __volatile__(
254		"	.set	mips3					\n"
255		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
256		"	subu	%0, %1, %3				\n"
257		"	bltz	%0, 1f					\n"
258		"	sc	%0, %2					\n"
259		"	.set	noreorder				\n"
260		"	beqzl	%0, 1b					\n"
261		"	 subu	%0, %1, %3				\n"
262		"	.set	reorder					\n"
263		"1:							\n"
264		"	.set	mips0					\n"
265		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
266		: "Ir" (i), "m" (v->counter)
267		: "memory");
268	} else if (kernel_uses_llsc) {
269		int temp;
270
 
271		__asm__ __volatile__(
272		"	.set	mips3					\n"
 
273		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
 
274		"	subu	%0, %1, %3				\n"
275		"	bltz	%0, 1f					\n"
276		"	sc	%0, %2					\n"
277		"	.set	noreorder				\n"
278		"	beqz	%0, 1b					\n"
279		"	 subu	%0, %1, %3				\n"
280		"	.set	reorder					\n"
281		"1:							\n"
282		"	.set	mips0					\n"
283		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
284		: "Ir" (i), "m" (v->counter)
285		: "memory");
286	} else {
287		unsigned long flags;
288
289		raw_local_irq_save(flags);
290		result = v->counter;
291		result -= i;
292		if (result >= 0)
293			v->counter = result;
294		raw_local_irq_restore(flags);
295	}
296
297	smp_llsc_mb();
298
299	return result;
300}
301
302#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
303#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
304
305/**
306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u.
310 *
311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns the old value of @v.
313 */
314static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
315{
316	int c, old;
317	c = atomic_read(v);
318	for (;;) {
319		if (unlikely(c == (u)))
320			break;
321		old = atomic_cmpxchg((v), c, c + (a));
322		if (likely(old == c))
323			break;
324		c = old;
325	}
326	return c;
327}
328
329#define atomic_dec_return(v) atomic_sub_return(1, (v))
330#define atomic_inc_return(v) atomic_add_return(1, (v))
331
332/*
333 * atomic_sub_and_test - subtract value from variable and test result
334 * @i: integer value to subtract
335 * @v: pointer of type atomic_t
336 *
337 * Atomically subtracts @i from @v and returns
338 * true if the result is zero, or false for all
339 * other cases.
340 */
341#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
342
343/*
344 * atomic_inc_and_test - increment and test
345 * @v: pointer of type atomic_t
346 *
347 * Atomically increments @v by 1
348 * and returns true if the result is zero, or false for all
349 * other cases.
350 */
351#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
352
353/*
354 * atomic_dec_and_test - decrement by 1 and test
355 * @v: pointer of type atomic_t
356 *
357 * Atomically decrements @v by 1 and
358 * returns true if the result is 0, or false for all other
359 * cases.
360 */
361#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
362
363/*
364 * atomic_dec_if_positive - decrement by 1 if old value positive
365 * @v: pointer of type atomic_t
366 */
367#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
368
369/*
370 * atomic_inc - increment atomic variable
371 * @v: pointer of type atomic_t
372 *
373 * Atomically increments @v by 1.
374 */
375#define atomic_inc(v) atomic_add(1, (v))
376
377/*
378 * atomic_dec - decrement and test
379 * @v: pointer of type atomic_t
380 *
381 * Atomically decrements @v by 1.
382 */
383#define atomic_dec(v) atomic_sub(1, (v))
384
385/*
386 * atomic_add_negative - add and test if negative
387 * @v: pointer of type atomic_t
388 * @i: integer value to add
389 *
390 * Atomically adds @i to @v and returns true
391 * if the result is negative, or false when
392 * result is greater than or equal to zero.
393 */
394#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
395
396#ifdef CONFIG_64BIT
397
398#define ATOMIC64_INIT(i)    { (i) }
399
400/*
401 * atomic64_read - read atomic variable
402 * @v: pointer of type atomic64_t
403 *
404 */
405#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
406
407/*
408 * atomic64_set - set atomic variable
409 * @v: pointer of type atomic64_t
410 * @i: required value
411 */
412#define atomic64_set(v, i)	((v)->counter = (i))
413
414/*
415 * atomic64_add - add integer to atomic variable
416 * @i: integer value to add
417 * @v: pointer of type atomic64_t
418 *
419 * Atomically adds @i to @v.
420 */
421static __inline__ void atomic64_add(long i, atomic64_t * v)
422{
423	if (kernel_uses_llsc && R10000_LLSC_WAR) {
424		long temp;
425
426		__asm__ __volatile__(
427		"	.set	mips3					\n"
428		"1:	lld	%0, %1		# atomic64_add		\n"
429		"	daddu	%0, %2					\n"
430		"	scd	%0, %1					\n"
431		"	beqzl	%0, 1b					\n"
432		"	.set	mips0					\n"
433		: "=&r" (temp), "=m" (v->counter)
434		: "Ir" (i), "m" (v->counter));
435	} else if (kernel_uses_llsc) {
436		long temp;
437
438		do {
439			__asm__ __volatile__(
440			"	.set	mips3				\n"
441			"	lld	%0, %1		# atomic64_add	\n"
442			"	daddu	%0, %2				\n"
443			"	scd	%0, %1				\n"
444			"	.set	mips0				\n"
445			: "=&r" (temp), "=m" (v->counter)
446			: "Ir" (i), "m" (v->counter));
447		} while (unlikely(!temp));
448	} else {
449		unsigned long flags;
450
451		raw_local_irq_save(flags);
452		v->counter += i;
453		raw_local_irq_restore(flags);
454	}
455}
456
457/*
458 * atomic64_sub - subtract the atomic variable
459 * @i: integer value to subtract
460 * @v: pointer of type atomic64_t
461 *
462 * Atomically subtracts @i from @v.
463 */
464static __inline__ void atomic64_sub(long i, atomic64_t * v)
465{
466	if (kernel_uses_llsc && R10000_LLSC_WAR) {
467		long temp;
468
469		__asm__ __volatile__(
470		"	.set	mips3					\n"
471		"1:	lld	%0, %1		# atomic64_sub		\n"
472		"	dsubu	%0, %2					\n"
473		"	scd	%0, %1					\n"
474		"	beqzl	%0, 1b					\n"
475		"	.set	mips0					\n"
476		: "=&r" (temp), "=m" (v->counter)
477		: "Ir" (i), "m" (v->counter));
478	} else if (kernel_uses_llsc) {
479		long temp;
480
481		do {
482			__asm__ __volatile__(
483			"	.set	mips3				\n"
484			"	lld	%0, %1		# atomic64_sub	\n"
485			"	dsubu	%0, %2				\n"
486			"	scd	%0, %1				\n"
487			"	.set	mips0				\n"
488			: "=&r" (temp), "=m" (v->counter)
489			: "Ir" (i), "m" (v->counter));
490		} while (unlikely(!temp));
491	} else {
492		unsigned long flags;
493
494		raw_local_irq_save(flags);
495		v->counter -= i;
496		raw_local_irq_restore(flags);
497	}
498}
499
500/*
501 * Same as above, but return the result value
502 */
503static __inline__ long atomic64_add_return(long i, atomic64_t * v)
504{
505	long result;
506
507	smp_mb__before_llsc();
508
509	if (kernel_uses_llsc && R10000_LLSC_WAR) {
510		long temp;
511
512		__asm__ __volatile__(
513		"	.set	mips3					\n"
514		"1:	lld	%1, %2		# atomic64_add_return	\n"
515		"	daddu	%0, %1, %3				\n"
516		"	scd	%0, %2					\n"
517		"	beqzl	%0, 1b					\n"
518		"	daddu	%0, %1, %3				\n"
519		"	.set	mips0					\n"
520		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
521		: "Ir" (i), "m" (v->counter)
522		: "memory");
523	} else if (kernel_uses_llsc) {
524		long temp;
525
526		do {
527			__asm__ __volatile__(
528			"	.set	mips3				\n"
529			"	lld	%1, %2	# atomic64_add_return	\n"
530			"	daddu	%0, %1, %3			\n"
531			"	scd	%0, %2				\n"
532			"	.set	mips0				\n"
533			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
534			: "Ir" (i), "m" (v->counter)
535			: "memory");
536		} while (unlikely(!result));
537
538		result = temp + i;
539	} else {
540		unsigned long flags;
541
542		raw_local_irq_save(flags);
543		result = v->counter;
544		result += i;
545		v->counter = result;
546		raw_local_irq_restore(flags);
547	}
548
549	smp_llsc_mb();
550
551	return result;
552}
553
554static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
555{
556	long result;
557
558	smp_mb__before_llsc();
559
560	if (kernel_uses_llsc && R10000_LLSC_WAR) {
561		long temp;
562
563		__asm__ __volatile__(
564		"	.set	mips3					\n"
565		"1:	lld	%1, %2		# atomic64_sub_return	\n"
566		"	dsubu	%0, %1, %3				\n"
567		"	scd	%0, %2					\n"
568		"	beqzl	%0, 1b					\n"
569		"	dsubu	%0, %1, %3				\n"
570		"	.set	mips0					\n"
571		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
572		: "Ir" (i), "m" (v->counter)
573		: "memory");
574	} else if (kernel_uses_llsc) {
575		long temp;
576
577		do {
578			__asm__ __volatile__(
579			"	.set	mips3				\n"
580			"	lld	%1, %2	# atomic64_sub_return	\n"
581			"	dsubu	%0, %1, %3			\n"
582			"	scd	%0, %2				\n"
583			"	.set	mips0				\n"
584			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
585			: "Ir" (i), "m" (v->counter)
586			: "memory");
587		} while (unlikely(!result));
588
589		result = temp - i;
590	} else {
591		unsigned long flags;
592
593		raw_local_irq_save(flags);
594		result = v->counter;
595		result -= i;
596		v->counter = result;
597		raw_local_irq_restore(flags);
598	}
599
600	smp_llsc_mb();
601
602	return result;
603}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
604
605/*
606 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
 
607 * @i: integer value to subtract
608 * @v: pointer of type atomic64_t
609 *
610 * Atomically test @v and subtract @i if @v is greater or equal than @i.
611 * The function returns the old value of @v minus @i.
612 */
613static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
614{
615	long result;
616
617	smp_mb__before_llsc();
618
619	if (kernel_uses_llsc && R10000_LLSC_WAR) {
620		long temp;
621
622		__asm__ __volatile__(
623		"	.set	mips3					\n"
 
624		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
625		"	dsubu	%0, %1, %3				\n"
 
626		"	bltz	%0, 1f					\n"
627		"	scd	%0, %2					\n"
628		"	.set	noreorder				\n"
629		"	beqzl	%0, 1b					\n"
630		"	 dsubu	%0, %1, %3				\n"
631		"	.set	reorder					\n"
632		"1:							\n"
633		"	.set	mips0					\n"
634		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
635		: "Ir" (i), "m" (v->counter)
636		: "memory");
637	} else if (kernel_uses_llsc) {
638		long temp;
639
640		__asm__ __volatile__(
641		"	.set	mips3					\n"
642		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
643		"	dsubu	%0, %1, %3				\n"
644		"	bltz	%0, 1f					\n"
645		"	scd	%0, %2					\n"
646		"	.set	noreorder				\n"
647		"	beqz	%0, 1b					\n"
648		"	 dsubu	%0, %1, %3				\n"
649		"	.set	reorder					\n"
650		"1:							\n"
651		"	.set	mips0					\n"
652		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
653		: "Ir" (i), "m" (v->counter)
654		: "memory");
655	} else {
656		unsigned long flags;
657
658		raw_local_irq_save(flags);
659		result = v->counter;
660		result -= i;
661		if (result >= 0)
662			v->counter = result;
663		raw_local_irq_restore(flags);
664	}
665
666	smp_llsc_mb();
667
668	return result;
669}
670
671#define atomic64_cmpxchg(v, o, n) \
672	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
673#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
674
675/**
676 * atomic64_add_unless - add unless the number is a given value
677 * @v: pointer of type atomic64_t
678 * @a: the amount to add to v...
679 * @u: ...unless v is equal to u.
680 *
681 * Atomically adds @a to @v, so long as it was not @u.
682 * Returns the old value of @v.
683 */
684static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
685{
686	long c, old;
687	c = atomic64_read(v);
688	for (;;) {
689		if (unlikely(c == (u)))
690			break;
691		old = atomic64_cmpxchg((v), c, c + (a));
692		if (likely(old == c))
693			break;
694		c = old;
695	}
696	return c != (u);
697}
698
699#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
700
701#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
702#define atomic64_inc_return(v) atomic64_add_return(1, (v))
703
704/*
705 * atomic64_sub_and_test - subtract value from variable and test result
706 * @i: integer value to subtract
707 * @v: pointer of type atomic64_t
708 *
709 * Atomically subtracts @i from @v and returns
710 * true if the result is zero, or false for all
711 * other cases.
712 */
713#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
714
715/*
716 * atomic64_inc_and_test - increment and test
717 * @v: pointer of type atomic64_t
718 *
719 * Atomically increments @v by 1
720 * and returns true if the result is zero, or false for all
721 * other cases.
722 */
723#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
724
725/*
726 * atomic64_dec_and_test - decrement by 1 and test
727 * @v: pointer of type atomic64_t
728 *
729 * Atomically decrements @v by 1 and
730 * returns true if the result is 0, or false for all other
731 * cases.
732 */
733#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
734
735/*
736 * atomic64_dec_if_positive - decrement by 1 if old value positive
737 * @v: pointer of type atomic64_t
738 */
739#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
740
741/*
742 * atomic64_inc - increment atomic variable
743 * @v: pointer of type atomic64_t
744 *
745 * Atomically increments @v by 1.
746 */
747#define atomic64_inc(v) atomic64_add(1, (v))
748
749/*
750 * atomic64_dec - decrement and test
751 * @v: pointer of type atomic64_t
752 *
753 * Atomically decrements @v by 1.
754 */
755#define atomic64_dec(v) atomic64_sub(1, (v))
756
757/*
758 * atomic64_add_negative - add and test if negative
759 * @v: pointer of type atomic64_t
760 * @i: integer value to add
761 *
762 * Atomically adds @i to @v and returns true
763 * if the result is negative, or false when
764 * result is greater than or equal to zero.
765 */
766#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
767
768#endif /* CONFIG_64BIT */
769
770/*
771 * atomic*_return operations are serializing but not the non-*_return
772 * versions.
773 */
774#define smp_mb__before_atomic_dec()	smp_mb__before_llsc()
775#define smp_mb__after_atomic_dec()	smp_llsc_mb()
776#define smp_mb__before_atomic_inc()	smp_mb__before_llsc()
777#define smp_mb__after_atomic_inc()	smp_llsc_mb()
778
779#endif /* _ASM_ATOMIC_H */