Linux Audio

Check our new training course

Loading...
v3.15
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 
 18
 19
 20#define ATOMIC_INIT(i)		{ (i) }
 21#define ATOMIC64_INIT(i)	{ (i) }
 22
 23#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 24#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
 25
 26#define atomic_set(v,i)		(((v)->counter) = (i))
 27#define atomic64_set(v,i)	(((v)->counter) = (i))
 28
 29static __inline__ int
 30ia64_atomic_add (int i, atomic_t *v)
 31{
 32	__s32 old, new;
 33	CMPXCHG_BUGCHECK_DECL
 34
 35	do {
 36		CMPXCHG_BUGCHECK(v);
 37		old = atomic_read(v);
 38		new = old + i;
 39	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 40	return new;
 41}
 42
 43static __inline__ long
 44ia64_atomic64_add (__s64 i, atomic64_t *v)
 45{
 46	__s64 old, new;
 47	CMPXCHG_BUGCHECK_DECL
 48
 49	do {
 50		CMPXCHG_BUGCHECK(v);
 51		old = atomic64_read(v);
 52		new = old + i;
 53	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 54	return new;
 55}
 56
 57static __inline__ int
 58ia64_atomic_sub (int i, atomic_t *v)
 59{
 60	__s32 old, new;
 61	CMPXCHG_BUGCHECK_DECL
 62
 63	do {
 64		CMPXCHG_BUGCHECK(v);
 65		old = atomic_read(v);
 66		new = old - i;
 67	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 68	return new;
 69}
 70
 71static __inline__ long
 72ia64_atomic64_sub (__s64 i, atomic64_t *v)
 73{
 74	__s64 old, new;
 75	CMPXCHG_BUGCHECK_DECL
 76
 77	do {
 78		CMPXCHG_BUGCHECK(v);
 79		old = atomic64_read(v);
 80		new = old - i;
 81	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 82	return new;
 83}
 84
 85#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 87
 88#define atomic64_cmpxchg(v, old, new) \
 89	(cmpxchg(&((v)->counter), old, new))
 90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 91
 92static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 93{
 94	int c, old;
 95	c = atomic_read(v);
 96	for (;;) {
 97		if (unlikely(c == (u)))
 98			break;
 99		old = atomic_cmpxchg((v), c, c + (a));
100		if (likely(old == c))
101			break;
102		c = old;
103	}
104	return c;
105}
106
107
108static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
109{
110	long c, old;
111	c = atomic64_read(v);
112	for (;;) {
113		if (unlikely(c == (u)))
114			break;
115		old = atomic64_cmpxchg((v), c, c + (a));
116		if (likely(old == c))
117			break;
118		c = old;
119	}
120	return c != (u);
121}
122
123#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
124
125#define atomic_add_return(i,v)						\
126({									\
127	int __ia64_aar_i = (i);						\
128	(__builtin_constant_p(i)					\
129	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
130	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
131	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
132	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
133		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
134		: ia64_atomic_add(__ia64_aar_i, v);			\
135})
136
137#define atomic64_add_return(i,v)					\
138({									\
139	long __ia64_aar_i = (i);					\
140	(__builtin_constant_p(i)					\
141	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
142	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
143	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
144	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
145		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
146		: ia64_atomic64_add(__ia64_aar_i, v);			\
147})
148
149/*
150 * Atomically add I to V and return TRUE if the resulting value is
151 * negative.
152 */
153static __inline__ int
154atomic_add_negative (int i, atomic_t *v)
155{
156	return atomic_add_return(i, v) < 0;
157}
158
159static __inline__ long
160atomic64_add_negative (__s64 i, atomic64_t *v)
161{
162	return atomic64_add_return(i, v) < 0;
163}
164
165#define atomic_sub_return(i,v)						\
166({									\
167	int __ia64_asr_i = (i);						\
168	(__builtin_constant_p(i)					\
169	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
170	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
171	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
172	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
173		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
174		: ia64_atomic_sub(__ia64_asr_i, v);			\
175})
176
177#define atomic64_sub_return(i,v)					\
178({									\
179	long __ia64_asr_i = (i);					\
180	(__builtin_constant_p(i)					\
181	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
182	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
183	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
184	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
185		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
186		: ia64_atomic64_sub(__ia64_asr_i, v);			\
187})
188
189#define atomic_dec_return(v)		atomic_sub_return(1, (v))
190#define atomic_inc_return(v)		atomic_add_return(1, (v))
191#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
192#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
193
194#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
195#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
196#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
197#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
198#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
199#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
200
201#define atomic_add(i,v)			atomic_add_return((i), (v))
202#define atomic_sub(i,v)			atomic_sub_return((i), (v))
203#define atomic_inc(v)			atomic_add(1, (v))
204#define atomic_dec(v)			atomic_sub(1, (v))
205
206#define atomic64_add(i,v)		atomic64_add_return((i), (v))
207#define atomic64_sub(i,v)		atomic64_sub_return((i), (v))
208#define atomic64_inc(v)			atomic64_add(1, (v))
209#define atomic64_dec(v)			atomic64_sub(1, (v))
210
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec()	barrier()
213#define smp_mb__after_atomic_dec()	barrier()
214#define smp_mb__before_atomic_inc()	barrier()
215#define smp_mb__after_atomic_inc()	barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */
v3.1
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 18#include <asm/system.h>
 19
 20
 21#define ATOMIC_INIT(i)		((atomic_t) { (i) })
 22#define ATOMIC64_INIT(i)	((atomic64_t) { (i) })
 23
 24#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 25#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
 26
 27#define atomic_set(v,i)		(((v)->counter) = (i))
 28#define atomic64_set(v,i)	(((v)->counter) = (i))
 29
 30static __inline__ int
 31ia64_atomic_add (int i, atomic_t *v)
 32{
 33	__s32 old, new;
 34	CMPXCHG_BUGCHECK_DECL
 35
 36	do {
 37		CMPXCHG_BUGCHECK(v);
 38		old = atomic_read(v);
 39		new = old + i;
 40	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 41	return new;
 42}
 43
 44static __inline__ long
 45ia64_atomic64_add (__s64 i, atomic64_t *v)
 46{
 47	__s64 old, new;
 48	CMPXCHG_BUGCHECK_DECL
 49
 50	do {
 51		CMPXCHG_BUGCHECK(v);
 52		old = atomic64_read(v);
 53		new = old + i;
 54	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 55	return new;
 56}
 57
 58static __inline__ int
 59ia64_atomic_sub (int i, atomic_t *v)
 60{
 61	__s32 old, new;
 62	CMPXCHG_BUGCHECK_DECL
 63
 64	do {
 65		CMPXCHG_BUGCHECK(v);
 66		old = atomic_read(v);
 67		new = old - i;
 68	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 69	return new;
 70}
 71
 72static __inline__ long
 73ia64_atomic64_sub (__s64 i, atomic64_t *v)
 74{
 75	__s64 old, new;
 76	CMPXCHG_BUGCHECK_DECL
 77
 78	do {
 79		CMPXCHG_BUGCHECK(v);
 80		old = atomic64_read(v);
 81		new = old - i;
 82	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 83	return new;
 84}
 85
 86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 87#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 88
 89#define atomic64_cmpxchg(v, old, new) \
 90	(cmpxchg(&((v)->counter), old, new))
 91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 92
 93static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 94{
 95	int c, old;
 96	c = atomic_read(v);
 97	for (;;) {
 98		if (unlikely(c == (u)))
 99			break;
100		old = atomic_cmpxchg((v), c, c + (a));
101		if (likely(old == c))
102			break;
103		c = old;
104	}
105	return c;
106}
107
108
109static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
110{
111	long c, old;
112	c = atomic64_read(v);
113	for (;;) {
114		if (unlikely(c == (u)))
115			break;
116		old = atomic64_cmpxchg((v), c, c + (a));
117		if (likely(old == c))
118			break;
119		c = old;
120	}
121	return c != (u);
122}
123
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125
126#define atomic_add_return(i,v)						\
127({									\
128	int __ia64_aar_i = (i);						\
129	(__builtin_constant_p(i)					\
130	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
131	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
132	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
133	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
134		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
135		: ia64_atomic_add(__ia64_aar_i, v);			\
136})
137
138#define atomic64_add_return(i,v)					\
139({									\
140	long __ia64_aar_i = (i);					\
141	(__builtin_constant_p(i)					\
142	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
143	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
144	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
145	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
146		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
147		: ia64_atomic64_add(__ia64_aar_i, v);			\
148})
149
150/*
151 * Atomically add I to V and return TRUE if the resulting value is
152 * negative.
153 */
154static __inline__ int
155atomic_add_negative (int i, atomic_t *v)
156{
157	return atomic_add_return(i, v) < 0;
158}
159
160static __inline__ long
161atomic64_add_negative (__s64 i, atomic64_t *v)
162{
163	return atomic64_add_return(i, v) < 0;
164}
165
166#define atomic_sub_return(i,v)						\
167({									\
168	int __ia64_asr_i = (i);						\
169	(__builtin_constant_p(i)					\
170	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
171	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
172	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
173	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
174		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175		: ia64_atomic_sub(__ia64_asr_i, v);			\
176})
177
178#define atomic64_sub_return(i,v)					\
179({									\
180	long __ia64_asr_i = (i);					\
181	(__builtin_constant_p(i)					\
182	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
183	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
184	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
185	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
186		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
187		: ia64_atomic64_sub(__ia64_asr_i, v);			\
188})
189
190#define atomic_dec_return(v)		atomic_sub_return(1, (v))
191#define atomic_inc_return(v)		atomic_add_return(1, (v))
192#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v)			atomic_add_return((i), (v))
203#define atomic_sub(i,v)			atomic_sub_return((i), (v))
204#define atomic_inc(v)			atomic_add(1, (v))
205#define atomic_dec(v)			atomic_sub(1, (v))
206
207#define atomic64_add(i,v)		atomic64_add_return((i), (v))
208#define atomic64_sub(i,v)		atomic64_sub_return((i), (v))
209#define atomic64_inc(v)			atomic64_add(1, (v))
210#define atomic64_dec(v)			atomic64_sub(1, (v))
211
212/* Atomic operations are already serializing */
213#define smp_mb__before_atomic_dec()	barrier()
214#define smp_mb__after_atomic_dec()	barrier()
215#define smp_mb__before_atomic_inc()	barrier()
216#define smp_mb__after_atomic_inc()	barrier()
217
218#endif /* _ASM_IA64_ATOMIC_H */