Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 20#include <asm/compiler.h>
 21#include <asm/cpu-features.h>
 22#include <asm/cmpxchg.h>
 23#include <asm/war.h>
 
 24
 25#define ATOMIC_INIT(i)	  { (i) }
 26
 27/*
 28 * atomic_read - read atomic variable
 29 * @v: pointer of type atomic_t
 30 *
 31 * Atomically reads the value of @v.
 32 */
 33#define atomic_read(v)		READ_ONCE((v)->counter)
 34
 35/*
 36 * atomic_set - set atomic variable
 37 * @v: pointer of type atomic_t
 38 * @i: required value
 39 *
 40 * Atomically sets the value of @v to @i.
 41 */
 42#define atomic_set(v, i)	WRITE_ONCE((v)->counter, (i))
 43
 44#define ATOMIC_OP(op, c_op, asm_op)					      \
 45static __inline__ void atomic_##op(int i, atomic_t * v)			      \
 46{									      \
 47	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
 48		int temp;						      \
 49									      \
 50		__asm__ __volatile__(					      \
 51		"	.set	arch=r4000				\n"   \
 52		"1:	ll	%0, %1		# atomic_" #op "	\n"   \
 53		"	" #asm_op " %0, %2				\n"   \
 54		"	sc	%0, %1					\n"   \
 55		"	beqzl	%0, 1b					\n"   \
 56		"	.set	mips0					\n"   \
 57		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
 58		: "Ir" (i));						      \
 59	} else if (kernel_uses_llsc) {					      \
 60		int temp;						      \
 61									      \
 62		do {							      \
 63			__asm__ __volatile__(				      \
 64			"	.set	"MIPS_ISA_LEVEL"		\n"   \
 65			"	ll	%0, %1		# atomic_" #op "\n"   \
 66			"	" #asm_op " %0, %2			\n"   \
 67			"	sc	%0, %1				\n"   \
 68			"	.set	mips0				\n"   \
 69			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)  \
 70			: "Ir" (i));					      \
 71		} while (unlikely(!temp));				      \
 72	} else {							      \
 73		unsigned long flags;					      \
 74									      \
 75		raw_local_irq_save(flags);				      \
 76		v->counter c_op i;					      \
 77		raw_local_irq_restore(flags);				      \
 78	}								      \
 79}
 80
 81#define ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
 82static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v)	      \
 83{									      \
 84	int result;							      \
 85									      \
 86	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
 87		int temp;						      \
 88									      \
 89		__asm__ __volatile__(					      \
 90		"	.set	arch=r4000				\n"   \
 91		"1:	ll	%1, %2		# atomic_" #op "_return	\n"   \
 92		"	" #asm_op " %0, %1, %3				\n"   \
 93		"	sc	%0, %2					\n"   \
 94		"	beqzl	%0, 1b					\n"   \
 95		"	" #asm_op " %0, %1, %3				\n"   \
 96		"	.set	mips0					\n"   \
 97		: "=&r" (result), "=&r" (temp),				      \
 98		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
 99		: "Ir" (i));						      \
100	} else if (kernel_uses_llsc) {					      \
101		int temp;						      \
102									      \
103		do {							      \
104			__asm__ __volatile__(				      \
105			"	.set	"MIPS_ISA_LEVEL"		\n"   \
106			"	ll	%1, %2	# atomic_" #op "_return	\n"   \
107			"	" #asm_op " %0, %1, %3			\n"   \
108			"	sc	%0, %2				\n"   \
109			"	.set	mips0				\n"   \
110			: "=&r" (result), "=&r" (temp),			      \
111			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
112			: "Ir" (i));					      \
113		} while (unlikely(!result));				      \
114									      \
115		result = temp; result c_op i;				      \
116	} else {							      \
117		unsigned long flags;					      \
118									      \
119		raw_local_irq_save(flags);				      \
120		result = v->counter;					      \
121		result c_op i;						      \
122		v->counter = result;					      \
123		raw_local_irq_restore(flags);				      \
124	}								      \
125									      \
126	return result;							      \
127}
128
129#define ATOMIC_FETCH_OP(op, c_op, asm_op)				      \
130static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v)	      \
131{									      \
132	int result;							      \
133									      \
134	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
135		int temp;						      \
136									      \
137		__asm__ __volatile__(					      \
138		"	.set	arch=r4000				\n"   \
139		"1:	ll	%1, %2		# atomic_fetch_" #op "	\n"   \
140		"	" #asm_op " %0, %1, %3				\n"   \
141		"	sc	%0, %2					\n"   \
142		"	beqzl	%0, 1b					\n"   \
143		"	move	%0, %1					\n"   \
144		"	.set	mips0					\n"   \
145		: "=&r" (result), "=&r" (temp),				      \
146		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
147		: "Ir" (i));						      \
148	} else if (kernel_uses_llsc) {					      \
149		int temp;						      \
150									      \
151		do {							      \
152			__asm__ __volatile__(				      \
153			"	.set	"MIPS_ISA_LEVEL"		\n"   \
154			"	ll	%1, %2	# atomic_fetch_" #op "	\n"   \
155			"	" #asm_op " %0, %1, %3			\n"   \
156			"	sc	%0, %2				\n"   \
157			"	.set	mips0				\n"   \
158			: "=&r" (result), "=&r" (temp),			      \
159			  "+" GCC_OFF_SMALL_ASM() (v->counter)		      \
160			: "Ir" (i));					      \
161		} while (unlikely(!result));				      \
162									      \
163		result = temp;						      \
164	} else {							      \
165		unsigned long flags;					      \
166									      \
167		raw_local_irq_save(flags);				      \
168		result = v->counter;					      \
169		v->counter c_op i;					      \
170		raw_local_irq_restore(flags);				      \
171	}								      \
172									      \
173	return result;							      \
174}
175
176#define ATOMIC_OPS(op, c_op, asm_op)					      \
177	ATOMIC_OP(op, c_op, asm_op)					      \
178	ATOMIC_OP_RETURN(op, c_op, asm_op)				      \
179	ATOMIC_FETCH_OP(op, c_op, asm_op)
180
181ATOMIC_OPS(add, +=, addu)
182ATOMIC_OPS(sub, -=, subu)
183
184#define atomic_add_return_relaxed	atomic_add_return_relaxed
185#define atomic_sub_return_relaxed	atomic_sub_return_relaxed
186#define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
187#define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
188
189#undef ATOMIC_OPS
190#define ATOMIC_OPS(op, c_op, asm_op)					      \
191	ATOMIC_OP(op, c_op, asm_op)					      \
192	ATOMIC_FETCH_OP(op, c_op, asm_op)
193
194ATOMIC_OPS(and, &=, and)
195ATOMIC_OPS(or, |=, or)
196ATOMIC_OPS(xor, ^=, xor)
197
198#define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
199#define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
200#define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
201
202#undef ATOMIC_OPS
203#undef ATOMIC_FETCH_OP
204#undef ATOMIC_OP_RETURN
205#undef ATOMIC_OP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206
207/*
208 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
209 * @i: integer value to subtract
210 * @v: pointer of type atomic_t
211 *
212 * Atomically test @v and subtract @i if @v is greater or equal than @i.
213 * The function returns the old value of @v minus @i.
214 */
215static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
216{
217	int result;
218
219	smp_mb__before_llsc();
220
221	if (kernel_uses_llsc && R10000_LLSC_WAR) {
222		int temp;
223
224		__asm__ __volatile__(
225		"	.set	arch=r4000				\n"
226		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
227		"	subu	%0, %1, %3				\n"
228		"	bltz	%0, 1f					\n"
229		"	sc	%0, %2					\n"
230		"	.set	noreorder				\n"
231		"	beqzl	%0, 1b					\n"
232		"	 subu	%0, %1, %3				\n"
233		"	.set	reorder					\n"
234		"1:							\n"
235		"	.set	mips0					\n"
236		: "=&r" (result), "=&r" (temp),
237		  "+" GCC_OFF_SMALL_ASM() (v->counter)
238		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
239		: "memory");
240	} else if (kernel_uses_llsc) {
241		int temp;
242
243		__asm__ __volatile__(
244		"	.set	"MIPS_ISA_LEVEL"			\n"
245		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
246		"	subu	%0, %1, %3				\n"
247		"	bltz	%0, 1f					\n"
248		"	sc	%0, %2					\n"
249		"	.set	noreorder				\n"
250		"	beqz	%0, 1b					\n"
251		"	 subu	%0, %1, %3				\n"
252		"	.set	reorder					\n"
253		"1:							\n"
254		"	.set	mips0					\n"
255		: "=&r" (result), "=&r" (temp),
256		  "+" GCC_OFF_SMALL_ASM() (v->counter)
257		: "Ir" (i));
258	} else {
259		unsigned long flags;
260
261		raw_local_irq_save(flags);
262		result = v->counter;
263		result -= i;
264		if (result >= 0)
265			v->counter = result;
266		raw_local_irq_restore(flags);
267	}
268
269	smp_llsc_mb();
270
271	return result;
272}
273
274#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
275#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
276
277/**
278 * __atomic_add_unless - add unless the number is a given value
279 * @v: pointer of type atomic_t
280 * @a: the amount to add to v...
281 * @u: ...unless v is equal to u.
282 *
283 * Atomically adds @a to @v, so long as it was not @u.
284 * Returns the old value of @v.
285 */
286static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
287{
288	int c, old;
289	c = atomic_read(v);
290	for (;;) {
291		if (unlikely(c == (u)))
292			break;
293		old = atomic_cmpxchg((v), c, c + (a));
294		if (likely(old == c))
295			break;
296		c = old;
297	}
298	return c;
299}
300
301#define atomic_dec_return(v) atomic_sub_return(1, (v))
302#define atomic_inc_return(v) atomic_add_return(1, (v))
303
304/*
305 * atomic_sub_and_test - subtract value from variable and test result
306 * @i: integer value to subtract
307 * @v: pointer of type atomic_t
308 *
309 * Atomically subtracts @i from @v and returns
310 * true if the result is zero, or false for all
311 * other cases.
312 */
313#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
314
315/*
316 * atomic_inc_and_test - increment and test
317 * @v: pointer of type atomic_t
318 *
319 * Atomically increments @v by 1
320 * and returns true if the result is zero, or false for all
321 * other cases.
322 */
323#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
324
325/*
326 * atomic_dec_and_test - decrement by 1 and test
327 * @v: pointer of type atomic_t
328 *
329 * Atomically decrements @v by 1 and
330 * returns true if the result is 0, or false for all other
331 * cases.
332 */
333#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
334
335/*
336 * atomic_dec_if_positive - decrement by 1 if old value positive
337 * @v: pointer of type atomic_t
338 */
339#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
340
341/*
342 * atomic_inc - increment atomic variable
343 * @v: pointer of type atomic_t
344 *
345 * Atomically increments @v by 1.
346 */
347#define atomic_inc(v) atomic_add(1, (v))
348
349/*
350 * atomic_dec - decrement and test
351 * @v: pointer of type atomic_t
352 *
353 * Atomically decrements @v by 1.
354 */
355#define atomic_dec(v) atomic_sub(1, (v))
356
357/*
358 * atomic_add_negative - add and test if negative
359 * @v: pointer of type atomic_t
360 * @i: integer value to add
361 *
362 * Atomically adds @i to @v and returns true
363 * if the result is negative, or false when
364 * result is greater than or equal to zero.
365 */
366#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
367
368#ifdef CONFIG_64BIT
369
370#define ATOMIC64_INIT(i)    { (i) }
371
372/*
373 * atomic64_read - read atomic variable
374 * @v: pointer of type atomic64_t
375 *
376 */
377#define atomic64_read(v)	READ_ONCE((v)->counter)
378
379/*
380 * atomic64_set - set atomic variable
381 * @v: pointer of type atomic64_t
382 * @i: required value
383 */
384#define atomic64_set(v, i)	WRITE_ONCE((v)->counter, (i))
385
386#define ATOMIC64_OP(op, c_op, asm_op)					      \
387static __inline__ void atomic64_##op(long i, atomic64_t * v)		      \
388{									      \
389	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
390		long temp;						      \
391									      \
392		__asm__ __volatile__(					      \
393		"	.set	arch=r4000				\n"   \
394		"1:	lld	%0, %1		# atomic64_" #op "	\n"   \
395		"	" #asm_op " %0, %2				\n"   \
396		"	scd	%0, %1					\n"   \
397		"	beqzl	%0, 1b					\n"   \
398		"	.set	mips0					\n"   \
399		: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)	      \
400		: "Ir" (i));						      \
401	} else if (kernel_uses_llsc) {					      \
402		long temp;						      \
403									      \
404		do {							      \
405			__asm__ __volatile__(				      \
406			"	.set	"MIPS_ISA_LEVEL"		\n"   \
407			"	lld	%0, %1		# atomic64_" #op "\n" \
408			"	" #asm_op " %0, %2			\n"   \
409			"	scd	%0, %1				\n"   \
410			"	.set	mips0				\n"   \
411			: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
412			: "Ir" (i));					      \
413		} while (unlikely(!temp));				      \
414	} else {							      \
415		unsigned long flags;					      \
416									      \
417		raw_local_irq_save(flags);				      \
418		v->counter c_op i;					      \
419		raw_local_irq_restore(flags);				      \
420	}								      \
421}
422
423#define ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
424static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
425{									      \
426	long result;							      \
427									      \
428	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
429		long temp;						      \
430									      \
431		__asm__ __volatile__(					      \
432		"	.set	arch=r4000				\n"   \
433		"1:	lld	%1, %2		# atomic64_" #op "_return\n"  \
434		"	" #asm_op " %0, %1, %3				\n"   \
435		"	scd	%0, %2					\n"   \
436		"	beqzl	%0, 1b					\n"   \
437		"	" #asm_op " %0, %1, %3				\n"   \
438		"	.set	mips0					\n"   \
439		: "=&r" (result), "=&r" (temp),				      \
440		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
441		: "Ir" (i));						      \
442	} else if (kernel_uses_llsc) {					      \
443		long temp;						      \
444									      \
445		do {							      \
446			__asm__ __volatile__(				      \
447			"	.set	"MIPS_ISA_LEVEL"		\n"   \
448			"	lld	%1, %2	# atomic64_" #op "_return\n"  \
449			"	" #asm_op " %0, %1, %3			\n"   \
450			"	scd	%0, %2				\n"   \
451			"	.set	mips0				\n"   \
452			: "=&r" (result), "=&r" (temp),			      \
453			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
454			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
455			: "memory");					      \
456		} while (unlikely(!result));				      \
457									      \
458		result = temp; result c_op i;				      \
459	} else {							      \
460		unsigned long flags;					      \
461									      \
462		raw_local_irq_save(flags);				      \
463		result = v->counter;					      \
464		result c_op i;						      \
465		v->counter = result;					      \
466		raw_local_irq_restore(flags);				      \
467	}								      \
468									      \
469	return result;							      \
470}
471
472#define ATOMIC64_FETCH_OP(op, c_op, asm_op)				      \
473static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v)  \
474{									      \
475	long result;							      \
476									      \
477	if (kernel_uses_llsc && R10000_LLSC_WAR) {			      \
478		long temp;						      \
479									      \
480		__asm__ __volatile__(					      \
481		"	.set	arch=r4000				\n"   \
482		"1:	lld	%1, %2		# atomic64_fetch_" #op "\n"   \
483		"	" #asm_op " %0, %1, %3				\n"   \
484		"	scd	%0, %2					\n"   \
485		"	beqzl	%0, 1b					\n"   \
486		"	move	%0, %1					\n"   \
487		"	.set	mips0					\n"   \
488		: "=&r" (result), "=&r" (temp),				      \
489		  "+" GCC_OFF_SMALL_ASM() (v->counter)			      \
490		: "Ir" (i));						      \
491	} else if (kernel_uses_llsc) {					      \
492		long temp;						      \
493									      \
494		do {							      \
495			__asm__ __volatile__(				      \
496			"	.set	"MIPS_ISA_LEVEL"		\n"   \
497			"	lld	%1, %2	# atomic64_fetch_" #op "\n"   \
498			"	" #asm_op " %0, %1, %3			\n"   \
499			"	scd	%0, %2				\n"   \
500			"	.set	mips0				\n"   \
501			: "=&r" (result), "=&r" (temp),			      \
502			  "=" GCC_OFF_SMALL_ASM() (v->counter)		      \
503			: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)	      \
504			: "memory");					      \
505		} while (unlikely(!result));				      \
506									      \
507		result = temp;						      \
508	} else {							      \
509		unsigned long flags;					      \
510									      \
511		raw_local_irq_save(flags);				      \
512		result = v->counter;					      \
513		v->counter c_op i;					      \
514		raw_local_irq_restore(flags);				      \
515	}								      \
516									      \
517	return result;							      \
518}
519
520#define ATOMIC64_OPS(op, c_op, asm_op)					      \
521	ATOMIC64_OP(op, c_op, asm_op)					      \
522	ATOMIC64_OP_RETURN(op, c_op, asm_op)				      \
523	ATOMIC64_FETCH_OP(op, c_op, asm_op)
524
525ATOMIC64_OPS(add, +=, daddu)
526ATOMIC64_OPS(sub, -=, dsubu)
527
528#define atomic64_add_return_relaxed	atomic64_add_return_relaxed
529#define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
530#define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
531#define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
532
533#undef ATOMIC64_OPS
534#define ATOMIC64_OPS(op, c_op, asm_op)					      \
535	ATOMIC64_OP(op, c_op, asm_op)					      \
536	ATOMIC64_FETCH_OP(op, c_op, asm_op)
537
538ATOMIC64_OPS(and, &=, and)
539ATOMIC64_OPS(or, |=, or)
540ATOMIC64_OPS(xor, ^=, xor)
541
542#define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
543#define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
544#define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
545
546#undef ATOMIC64_OPS
547#undef ATOMIC64_FETCH_OP
548#undef ATOMIC64_OP_RETURN
549#undef ATOMIC64_OP
550
551/*
552 * atomic64_sub_if_positive - conditionally subtract integer from atomic
553 *                            variable
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
554 * @i: integer value to subtract
555 * @v: pointer of type atomic64_t
556 *
557 * Atomically test @v and subtract @i if @v is greater or equal than @i.
558 * The function returns the old value of @v minus @i.
559 */
560static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
561{
562	long result;
563
564	smp_mb__before_llsc();
565
566	if (kernel_uses_llsc && R10000_LLSC_WAR) {
567		long temp;
568
569		__asm__ __volatile__(
570		"	.set	arch=r4000				\n"
571		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
572		"	dsubu	%0, %1, %3				\n"
573		"	bltz	%0, 1f					\n"
574		"	scd	%0, %2					\n"
575		"	.set	noreorder				\n"
576		"	beqzl	%0, 1b					\n"
577		"	 dsubu	%0, %1, %3				\n"
578		"	.set	reorder					\n"
579		"1:							\n"
580		"	.set	mips0					\n"
581		: "=&r" (result), "=&r" (temp),
582		  "=" GCC_OFF_SMALL_ASM() (v->counter)
583		: "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
584		: "memory");
585	} else if (kernel_uses_llsc) {
586		long temp;
587
588		__asm__ __volatile__(
589		"	.set	"MIPS_ISA_LEVEL"			\n"
590		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
591		"	dsubu	%0, %1, %3				\n"
592		"	bltz	%0, 1f					\n"
593		"	scd	%0, %2					\n"
594		"	.set	noreorder				\n"
595		"	beqz	%0, 1b					\n"
596		"	 dsubu	%0, %1, %3				\n"
597		"	.set	reorder					\n"
598		"1:							\n"
599		"	.set	mips0					\n"
600		: "=&r" (result), "=&r" (temp),
601		  "+" GCC_OFF_SMALL_ASM() (v->counter)
602		: "Ir" (i));
603	} else {
604		unsigned long flags;
605
606		raw_local_irq_save(flags);
607		result = v->counter;
608		result -= i;
609		if (result >= 0)
610			v->counter = result;
611		raw_local_irq_restore(flags);
612	}
613
614	smp_llsc_mb();
615
616	return result;
617}
618
619#define atomic64_cmpxchg(v, o, n) \
620	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
621#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
622
623/**
624 * atomic64_add_unless - add unless the number is a given value
625 * @v: pointer of type atomic64_t
626 * @a: the amount to add to v...
627 * @u: ...unless v is equal to u.
628 *
629 * Atomically adds @a to @v, so long as it was not @u.
630 * Returns true iff @v was not @u.
631 */
632static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
633{
634	long c, old;
635	c = atomic64_read(v);
636	for (;;) {
637		if (unlikely(c == (u)))
638			break;
639		old = atomic64_cmpxchg((v), c, c + (a));
640		if (likely(old == c))
641			break;
642		c = old;
643	}
644	return c != (u);
645}
646
647#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
648
649#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
650#define atomic64_inc_return(v) atomic64_add_return(1, (v))
651
652/*
653 * atomic64_sub_and_test - subtract value from variable and test result
654 * @i: integer value to subtract
655 * @v: pointer of type atomic64_t
656 *
657 * Atomically subtracts @i from @v and returns
658 * true if the result is zero, or false for all
659 * other cases.
660 */
661#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
662
663/*
664 * atomic64_inc_and_test - increment and test
665 * @v: pointer of type atomic64_t
666 *
667 * Atomically increments @v by 1
668 * and returns true if the result is zero, or false for all
669 * other cases.
670 */
671#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
672
673/*
674 * atomic64_dec_and_test - decrement by 1 and test
675 * @v: pointer of type atomic64_t
676 *
677 * Atomically decrements @v by 1 and
678 * returns true if the result is 0, or false for all other
679 * cases.
680 */
681#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
682
683/*
684 * atomic64_dec_if_positive - decrement by 1 if old value positive
685 * @v: pointer of type atomic64_t
686 */
687#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
688
689/*
690 * atomic64_inc - increment atomic variable
691 * @v: pointer of type atomic64_t
692 *
693 * Atomically increments @v by 1.
694 */
695#define atomic64_inc(v) atomic64_add(1, (v))
696
697/*
698 * atomic64_dec - decrement and test
699 * @v: pointer of type atomic64_t
700 *
701 * Atomically decrements @v by 1.
702 */
703#define atomic64_dec(v) atomic64_sub(1, (v))
704
705/*
706 * atomic64_add_negative - add and test if negative
707 * @v: pointer of type atomic64_t
708 * @i: integer value to add
709 *
710 * Atomically adds @i to @v and returns true
711 * if the result is negative, or false when
712 * result is greater than or equal to zero.
713 */
714#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
715
716#endif /* CONFIG_64BIT */
 
 
 
 
 
 
 
 
 
717
718#endif /* _ASM_ATOMIC_H */
v3.1
  1/*
  2 * Atomic operations that C can't guarantee us.  Useful for
  3 * resource counting etc..
  4 *
  5 * But use these as seldom as possible since they are much more slower
  6 * than regular operations.
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 *
 12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
 13 */
 14#ifndef _ASM_ATOMIC_H
 15#define _ASM_ATOMIC_H
 16
 17#include <linux/irqflags.h>
 18#include <linux/types.h>
 19#include <asm/barrier.h>
 
 20#include <asm/cpu-features.h>
 
 21#include <asm/war.h>
 22#include <asm/system.h>
 23
 24#define ATOMIC_INIT(i)    { (i) }
 25
 26/*
 27 * atomic_read - read atomic variable
 28 * @v: pointer of type atomic_t
 29 *
 30 * Atomically reads the value of @v.
 31 */
 32#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 33
 34/*
 35 * atomic_set - set atomic variable
 36 * @v: pointer of type atomic_t
 37 * @i: required value
 38 *
 39 * Atomically sets the value of @v to @i.
 40 */
 41#define atomic_set(v, i)		((v)->counter = (i))
 42
 43/*
 44 * atomic_add - add integer to atomic variable
 45 * @i: integer value to add
 46 * @v: pointer of type atomic_t
 47 *
 48 * Atomically adds @i to @v.
 49 */
 50static __inline__ void atomic_add(int i, atomic_t * v)
 51{
 52	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 53		int temp;
 54
 55		__asm__ __volatile__(
 56		"	.set	mips3					\n"
 57		"1:	ll	%0, %1		# atomic_add		\n"
 58		"	addu	%0, %2					\n"
 59		"	sc	%0, %1					\n"
 60		"	beqzl	%0, 1b					\n"
 61		"	.set	mips0					\n"
 62		: "=&r" (temp), "=m" (v->counter)
 63		: "Ir" (i), "m" (v->counter));
 64	} else if (kernel_uses_llsc) {
 65		int temp;
 66
 67		do {
 68			__asm__ __volatile__(
 69			"	.set	mips3				\n"
 70			"	ll	%0, %1		# atomic_add	\n"
 71			"	addu	%0, %2				\n"
 72			"	sc	%0, %1				\n"
 73			"	.set	mips0				\n"
 74			: "=&r" (temp), "=m" (v->counter)
 75			: "Ir" (i), "m" (v->counter));
 76		} while (unlikely(!temp));
 77	} else {
 78		unsigned long flags;
 79
 80		raw_local_irq_save(flags);
 81		v->counter += i;
 82		raw_local_irq_restore(flags);
 83	}
 84}
 85
 86/*
 87 * atomic_sub - subtract the atomic variable
 88 * @i: integer value to subtract
 89 * @v: pointer of type atomic_t
 90 *
 91 * Atomically subtracts @i from @v.
 92 */
 93static __inline__ void atomic_sub(int i, atomic_t * v)
 94{
 95	if (kernel_uses_llsc && R10000_LLSC_WAR) {
 96		int temp;
 97
 98		__asm__ __volatile__(
 99		"	.set	mips3					\n"
100		"1:	ll	%0, %1		# atomic_sub		\n"
101		"	subu	%0, %2					\n"
102		"	sc	%0, %1					\n"
103		"	beqzl	%0, 1b					\n"
104		"	.set	mips0					\n"
105		: "=&r" (temp), "=m" (v->counter)
106		: "Ir" (i), "m" (v->counter));
107	} else if (kernel_uses_llsc) {
108		int temp;
109
110		do {
111			__asm__ __volatile__(
112			"	.set	mips3				\n"
113			"	ll	%0, %1		# atomic_sub	\n"
114			"	subu	%0, %2				\n"
115			"	sc	%0, %1				\n"
116			"	.set	mips0				\n"
117			: "=&r" (temp), "=m" (v->counter)
118			: "Ir" (i), "m" (v->counter));
119		} while (unlikely(!temp));
120	} else {
121		unsigned long flags;
122
123		raw_local_irq_save(flags);
124		v->counter -= i;
125		raw_local_irq_restore(flags);
126	}
127}
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134	int result;
135
136	smp_mb__before_llsc();
137
138	if (kernel_uses_llsc && R10000_LLSC_WAR) {
139		int temp;
140
141		__asm__ __volatile__(
142		"	.set	mips3					\n"
143		"1:	ll	%1, %2		# atomic_add_return	\n"
144		"	addu	%0, %1, %3				\n"
145		"	sc	%0, %2					\n"
146		"	beqzl	%0, 1b					\n"
147		"	addu	%0, %1, %3				\n"
148		"	.set	mips0					\n"
149		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
150		: "Ir" (i), "m" (v->counter)
151		: "memory");
152	} else if (kernel_uses_llsc) {
153		int temp;
154
155		do {
156			__asm__ __volatile__(
157			"	.set	mips3				\n"
158			"	ll	%1, %2	# atomic_add_return	\n"
159			"	addu	%0, %1, %3			\n"
160			"	sc	%0, %2				\n"
161			"	.set	mips0				\n"
162			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
163			: "Ir" (i), "m" (v->counter)
164			: "memory");
165		} while (unlikely(!result));
166
167		result = temp + i;
168	} else {
169		unsigned long flags;
170
171		raw_local_irq_save(flags);
172		result = v->counter;
173		result += i;
174		v->counter = result;
175		raw_local_irq_restore(flags);
176	}
177
178	smp_llsc_mb();
179
180	return result;
181}
182
183static __inline__ int atomic_sub_return(int i, atomic_t * v)
184{
185	int result;
186
187	smp_mb__before_llsc();
188
189	if (kernel_uses_llsc && R10000_LLSC_WAR) {
190		int temp;
191
192		__asm__ __volatile__(
193		"	.set	mips3					\n"
194		"1:	ll	%1, %2		# atomic_sub_return	\n"
195		"	subu	%0, %1, %3				\n"
196		"	sc	%0, %2					\n"
197		"	beqzl	%0, 1b					\n"
198		"	subu	%0, %1, %3				\n"
199		"	.set	mips0					\n"
200		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
201		: "Ir" (i), "m" (v->counter)
202		: "memory");
203
204		result = temp - i;
205	} else if (kernel_uses_llsc) {
206		int temp;
207
208		do {
209			__asm__ __volatile__(
210			"	.set	mips3				\n"
211			"	ll	%1, %2	# atomic_sub_return	\n"
212			"	subu	%0, %1, %3			\n"
213			"	sc	%0, %2				\n"
214			"	.set	mips0				\n"
215			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
216			: "Ir" (i), "m" (v->counter)
217			: "memory");
218		} while (unlikely(!result));
219
220		result = temp - i;
221	} else {
222		unsigned long flags;
223
224		raw_local_irq_save(flags);
225		result = v->counter;
226		result -= i;
227		v->counter = result;
228		raw_local_irq_restore(flags);
229	}
230
231	smp_llsc_mb();
232
233	return result;
234}
235
236/*
237 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
238 * @i: integer value to subtract
239 * @v: pointer of type atomic_t
240 *
241 * Atomically test @v and subtract @i if @v is greater or equal than @i.
242 * The function returns the old value of @v minus @i.
243 */
244static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
245{
246	int result;
247
248	smp_mb__before_llsc();
249
250	if (kernel_uses_llsc && R10000_LLSC_WAR) {
251		int temp;
252
253		__asm__ __volatile__(
254		"	.set	mips3					\n"
255		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
256		"	subu	%0, %1, %3				\n"
257		"	bltz	%0, 1f					\n"
258		"	sc	%0, %2					\n"
259		"	.set	noreorder				\n"
260		"	beqzl	%0, 1b					\n"
261		"	 subu	%0, %1, %3				\n"
262		"	.set	reorder					\n"
263		"1:							\n"
264		"	.set	mips0					\n"
265		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
266		: "Ir" (i), "m" (v->counter)
 
267		: "memory");
268	} else if (kernel_uses_llsc) {
269		int temp;
270
271		__asm__ __volatile__(
272		"	.set	mips3					\n"
273		"1:	ll	%1, %2		# atomic_sub_if_positive\n"
274		"	subu	%0, %1, %3				\n"
275		"	bltz	%0, 1f					\n"
276		"	sc	%0, %2					\n"
277		"	.set	noreorder				\n"
278		"	beqz	%0, 1b					\n"
279		"	 subu	%0, %1, %3				\n"
280		"	.set	reorder					\n"
281		"1:							\n"
282		"	.set	mips0					\n"
283		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
284		: "Ir" (i), "m" (v->counter)
285		: "memory");
286	} else {
287		unsigned long flags;
288
289		raw_local_irq_save(flags);
290		result = v->counter;
291		result -= i;
292		if (result >= 0)
293			v->counter = result;
294		raw_local_irq_restore(flags);
295	}
296
297	smp_llsc_mb();
298
299	return result;
300}
301
302#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
303#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
304
305/**
306 * __atomic_add_unless - add unless the number is a given value
307 * @v: pointer of type atomic_t
308 * @a: the amount to add to v...
309 * @u: ...unless v is equal to u.
310 *
311 * Atomically adds @a to @v, so long as it was not @u.
312 * Returns the old value of @v.
313 */
314static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
315{
316	int c, old;
317	c = atomic_read(v);
318	for (;;) {
319		if (unlikely(c == (u)))
320			break;
321		old = atomic_cmpxchg((v), c, c + (a));
322		if (likely(old == c))
323			break;
324		c = old;
325	}
326	return c;
327}
328
329#define atomic_dec_return(v) atomic_sub_return(1, (v))
330#define atomic_inc_return(v) atomic_add_return(1, (v))
331
332/*
333 * atomic_sub_and_test - subtract value from variable and test result
334 * @i: integer value to subtract
335 * @v: pointer of type atomic_t
336 *
337 * Atomically subtracts @i from @v and returns
338 * true if the result is zero, or false for all
339 * other cases.
340 */
341#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
342
343/*
344 * atomic_inc_and_test - increment and test
345 * @v: pointer of type atomic_t
346 *
347 * Atomically increments @v by 1
348 * and returns true if the result is zero, or false for all
349 * other cases.
350 */
351#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
352
353/*
354 * atomic_dec_and_test - decrement by 1 and test
355 * @v: pointer of type atomic_t
356 *
357 * Atomically decrements @v by 1 and
358 * returns true if the result is 0, or false for all other
359 * cases.
360 */
361#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
362
363/*
364 * atomic_dec_if_positive - decrement by 1 if old value positive
365 * @v: pointer of type atomic_t
366 */
367#define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
368
369/*
370 * atomic_inc - increment atomic variable
371 * @v: pointer of type atomic_t
372 *
373 * Atomically increments @v by 1.
374 */
375#define atomic_inc(v) atomic_add(1, (v))
376
377/*
378 * atomic_dec - decrement and test
379 * @v: pointer of type atomic_t
380 *
381 * Atomically decrements @v by 1.
382 */
383#define atomic_dec(v) atomic_sub(1, (v))
384
385/*
386 * atomic_add_negative - add and test if negative
387 * @v: pointer of type atomic_t
388 * @i: integer value to add
389 *
390 * Atomically adds @i to @v and returns true
391 * if the result is negative, or false when
392 * result is greater than or equal to zero.
393 */
394#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
395
396#ifdef CONFIG_64BIT
397
398#define ATOMIC64_INIT(i)    { (i) }
399
400/*
401 * atomic64_read - read atomic variable
402 * @v: pointer of type atomic64_t
403 *
404 */
405#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
406
407/*
408 * atomic64_set - set atomic variable
409 * @v: pointer of type atomic64_t
410 * @i: required value
411 */
412#define atomic64_set(v, i)	((v)->counter = (i))
413
414/*
415 * atomic64_add - add integer to atomic variable
416 * @i: integer value to add
417 * @v: pointer of type atomic64_t
418 *
419 * Atomically adds @i to @v.
420 */
421static __inline__ void atomic64_add(long i, atomic64_t * v)
422{
423	if (kernel_uses_llsc && R10000_LLSC_WAR) {
424		long temp;
425
426		__asm__ __volatile__(
427		"	.set	mips3					\n"
428		"1:	lld	%0, %1		# atomic64_add		\n"
429		"	daddu	%0, %2					\n"
430		"	scd	%0, %1					\n"
431		"	beqzl	%0, 1b					\n"
432		"	.set	mips0					\n"
433		: "=&r" (temp), "=m" (v->counter)
434		: "Ir" (i), "m" (v->counter));
435	} else if (kernel_uses_llsc) {
436		long temp;
437
438		do {
439			__asm__ __volatile__(
440			"	.set	mips3				\n"
441			"	lld	%0, %1		# atomic64_add	\n"
442			"	daddu	%0, %2				\n"
443			"	scd	%0, %1				\n"
444			"	.set	mips0				\n"
445			: "=&r" (temp), "=m" (v->counter)
446			: "Ir" (i), "m" (v->counter));
447		} while (unlikely(!temp));
448	} else {
449		unsigned long flags;
450
451		raw_local_irq_save(flags);
452		v->counter += i;
453		raw_local_irq_restore(flags);
454	}
455}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
456
457/*
458 * atomic64_sub - subtract the atomic variable
459 * @i: integer value to subtract
460 * @v: pointer of type atomic64_t
461 *
462 * Atomically subtracts @i from @v.
463 */
464static __inline__ void atomic64_sub(long i, atomic64_t * v)
465{
466	if (kernel_uses_llsc && R10000_LLSC_WAR) {
467		long temp;
468
469		__asm__ __volatile__(
470		"	.set	mips3					\n"
471		"1:	lld	%0, %1		# atomic64_sub		\n"
472		"	dsubu	%0, %2					\n"
473		"	scd	%0, %1					\n"
474		"	beqzl	%0, 1b					\n"
475		"	.set	mips0					\n"
476		: "=&r" (temp), "=m" (v->counter)
477		: "Ir" (i), "m" (v->counter));
478	} else if (kernel_uses_llsc) {
479		long temp;
480
481		do {
482			__asm__ __volatile__(
483			"	.set	mips3				\n"
484			"	lld	%0, %1		# atomic64_sub	\n"
485			"	dsubu	%0, %2				\n"
486			"	scd	%0, %1				\n"
487			"	.set	mips0				\n"
488			: "=&r" (temp), "=m" (v->counter)
489			: "Ir" (i), "m" (v->counter));
490		} while (unlikely(!temp));
491	} else {
492		unsigned long flags;
493
494		raw_local_irq_save(flags);
495		v->counter -= i;
496		raw_local_irq_restore(flags);
497	}
498}
499
500/*
501 * Same as above, but return the result value
502 */
503static __inline__ long atomic64_add_return(long i, atomic64_t * v)
504{
505	long result;
506
507	smp_mb__before_llsc();
508
509	if (kernel_uses_llsc && R10000_LLSC_WAR) {
510		long temp;
511
512		__asm__ __volatile__(
513		"	.set	mips3					\n"
514		"1:	lld	%1, %2		# atomic64_add_return	\n"
515		"	daddu	%0, %1, %3				\n"
516		"	scd	%0, %2					\n"
517		"	beqzl	%0, 1b					\n"
518		"	daddu	%0, %1, %3				\n"
519		"	.set	mips0					\n"
520		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
521		: "Ir" (i), "m" (v->counter)
522		: "memory");
523	} else if (kernel_uses_llsc) {
524		long temp;
525
526		do {
527			__asm__ __volatile__(
528			"	.set	mips3				\n"
529			"	lld	%1, %2	# atomic64_add_return	\n"
530			"	daddu	%0, %1, %3			\n"
531			"	scd	%0, %2				\n"
532			"	.set	mips0				\n"
533			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
534			: "Ir" (i), "m" (v->counter)
535			: "memory");
536		} while (unlikely(!result));
537
538		result = temp + i;
539	} else {
540		unsigned long flags;
541
542		raw_local_irq_save(flags);
543		result = v->counter;
544		result += i;
545		v->counter = result;
546		raw_local_irq_restore(flags);
547	}
548
549	smp_llsc_mb();
550
551	return result;
552}
553
554static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
555{
556	long result;
557
558	smp_mb__before_llsc();
559
560	if (kernel_uses_llsc && R10000_LLSC_WAR) {
561		long temp;
562
563		__asm__ __volatile__(
564		"	.set	mips3					\n"
565		"1:	lld	%1, %2		# atomic64_sub_return	\n"
566		"	dsubu	%0, %1, %3				\n"
567		"	scd	%0, %2					\n"
568		"	beqzl	%0, 1b					\n"
569		"	dsubu	%0, %1, %3				\n"
570		"	.set	mips0					\n"
571		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
572		: "Ir" (i), "m" (v->counter)
573		: "memory");
574	} else if (kernel_uses_llsc) {
575		long temp;
576
577		do {
578			__asm__ __volatile__(
579			"	.set	mips3				\n"
580			"	lld	%1, %2	# atomic64_sub_return	\n"
581			"	dsubu	%0, %1, %3			\n"
582			"	scd	%0, %2				\n"
583			"	.set	mips0				\n"
584			: "=&r" (result), "=&r" (temp), "=m" (v->counter)
585			: "Ir" (i), "m" (v->counter)
586			: "memory");
587		} while (unlikely(!result));
588
589		result = temp - i;
590	} else {
591		unsigned long flags;
592
593		raw_local_irq_save(flags);
594		result = v->counter;
595		result -= i;
596		v->counter = result;
597		raw_local_irq_restore(flags);
598	}
599
600	smp_llsc_mb();
601
602	return result;
603}
604
605/*
606 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
607 * @i: integer value to subtract
608 * @v: pointer of type atomic64_t
609 *
610 * Atomically test @v and subtract @i if @v is greater or equal than @i.
611 * The function returns the old value of @v minus @i.
612 */
613static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
614{
615	long result;
616
617	smp_mb__before_llsc();
618
619	if (kernel_uses_llsc && R10000_LLSC_WAR) {
620		long temp;
621
622		__asm__ __volatile__(
623		"	.set	mips3					\n"
624		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
625		"	dsubu	%0, %1, %3				\n"
626		"	bltz	%0, 1f					\n"
627		"	scd	%0, %2					\n"
628		"	.set	noreorder				\n"
629		"	beqzl	%0, 1b					\n"
630		"	 dsubu	%0, %1, %3				\n"
631		"	.set	reorder					\n"
632		"1:							\n"
633		"	.set	mips0					\n"
634		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
635		: "Ir" (i), "m" (v->counter)
 
636		: "memory");
637	} else if (kernel_uses_llsc) {
638		long temp;
639
640		__asm__ __volatile__(
641		"	.set	mips3					\n"
642		"1:	lld	%1, %2		# atomic64_sub_if_positive\n"
643		"	dsubu	%0, %1, %3				\n"
644		"	bltz	%0, 1f					\n"
645		"	scd	%0, %2					\n"
646		"	.set	noreorder				\n"
647		"	beqz	%0, 1b					\n"
648		"	 dsubu	%0, %1, %3				\n"
649		"	.set	reorder					\n"
650		"1:							\n"
651		"	.set	mips0					\n"
652		: "=&r" (result), "=&r" (temp), "=m" (v->counter)
653		: "Ir" (i), "m" (v->counter)
654		: "memory");
655	} else {
656		unsigned long flags;
657
658		raw_local_irq_save(flags);
659		result = v->counter;
660		result -= i;
661		if (result >= 0)
662			v->counter = result;
663		raw_local_irq_restore(flags);
664	}
665
666	smp_llsc_mb();
667
668	return result;
669}
670
671#define atomic64_cmpxchg(v, o, n) \
672	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
673#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
674
675/**
676 * atomic64_add_unless - add unless the number is a given value
677 * @v: pointer of type atomic64_t
678 * @a: the amount to add to v...
679 * @u: ...unless v is equal to u.
680 *
681 * Atomically adds @a to @v, so long as it was not @u.
682 * Returns the old value of @v.
683 */
684static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
685{
686	long c, old;
687	c = atomic64_read(v);
688	for (;;) {
689		if (unlikely(c == (u)))
690			break;
691		old = atomic64_cmpxchg((v), c, c + (a));
692		if (likely(old == c))
693			break;
694		c = old;
695	}
696	return c != (u);
697}
698
699#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
700
701#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
702#define atomic64_inc_return(v) atomic64_add_return(1, (v))
703
704/*
705 * atomic64_sub_and_test - subtract value from variable and test result
706 * @i: integer value to subtract
707 * @v: pointer of type atomic64_t
708 *
709 * Atomically subtracts @i from @v and returns
710 * true if the result is zero, or false for all
711 * other cases.
712 */
713#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
714
715/*
716 * atomic64_inc_and_test - increment and test
717 * @v: pointer of type atomic64_t
718 *
719 * Atomically increments @v by 1
720 * and returns true if the result is zero, or false for all
721 * other cases.
722 */
723#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
724
725/*
726 * atomic64_dec_and_test - decrement by 1 and test
727 * @v: pointer of type atomic64_t
728 *
729 * Atomically decrements @v by 1 and
730 * returns true if the result is 0, or false for all other
731 * cases.
732 */
733#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
734
735/*
736 * atomic64_dec_if_positive - decrement by 1 if old value positive
737 * @v: pointer of type atomic64_t
738 */
739#define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
740
741/*
742 * atomic64_inc - increment atomic variable
743 * @v: pointer of type atomic64_t
744 *
745 * Atomically increments @v by 1.
746 */
747#define atomic64_inc(v) atomic64_add(1, (v))
748
749/*
750 * atomic64_dec - decrement and test
751 * @v: pointer of type atomic64_t
752 *
753 * Atomically decrements @v by 1.
754 */
755#define atomic64_dec(v) atomic64_sub(1, (v))
756
757/*
758 * atomic64_add_negative - add and test if negative
759 * @v: pointer of type atomic64_t
760 * @i: integer value to add
761 *
762 * Atomically adds @i to @v and returns true
763 * if the result is negative, or false when
764 * result is greater than or equal to zero.
765 */
766#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
767
768#endif /* CONFIG_64BIT */
769
770/*
771 * atomic*_return operations are serializing but not the non-*_return
772 * versions.
773 */
774#define smp_mb__before_atomic_dec()	smp_mb__before_llsc()
775#define smp_mb__after_atomic_dec()	smp_llsc_mb()
776#define smp_mb__before_atomic_inc()	smp_mb__before_llsc()
777#define smp_mb__after_atomic_inc()	smp_llsc_mb()
778
779#endif /* _ASM_ATOMIC_H */