Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_ATOMIC_H
  3#define _ASM_IA64_ATOMIC_H
  4
  5/*
  6 * Atomic operations that C can't guarantee us.  Useful for
  7 * resource counting etc..
  8 *
  9 * NOTE: don't mess with the types below!  The "unsigned long" and
 10 * "int" types were carefully placed so as to ensure proper operation
 11 * of the macros.
 12 *
 13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 14 *	David Mosberger-Tang <davidm@hpl.hp.com>
 15 */
 16#include <linux/types.h>
 17
 18#include <asm/intrinsics.h>
 19#include <asm/barrier.h>
 20
 21
 
 22#define ATOMIC64_INIT(i)	{ (i) }
 23
 24#define atomic_read(v)		READ_ONCE((v)->counter)
 25#define atomic64_read(v)	READ_ONCE((v)->counter)
 26
 27#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 28#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 29
 30#define ATOMIC_OP(op, c_op)						\
 31static __inline__ int							\
 32ia64_atomic_##op (int i, atomic_t *v)					\
 33{									\
 34	__s32 old, new;							\
 35	CMPXCHG_BUGCHECK_DECL						\
 36									\
 37	do {								\
 38		CMPXCHG_BUGCHECK(v);					\
 39		old = atomic_read(v);					\
 40		new = old c_op i;					\
 41	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 42	return new;							\
 43}
 44
 45#define ATOMIC_FETCH_OP(op, c_op)					\
 46static __inline__ int							\
 47ia64_atomic_fetch_##op (int i, atomic_t *v)				\
 48{									\
 49	__s32 old, new;							\
 50	CMPXCHG_BUGCHECK_DECL						\
 51									\
 52	do {								\
 53		CMPXCHG_BUGCHECK(v);					\
 54		old = atomic_read(v);					\
 55		new = old c_op i;					\
 56	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 57	return old;							\
 58}
 59
 60#define ATOMIC_OPS(op, c_op)						\
 61	ATOMIC_OP(op, c_op)						\
 62	ATOMIC_FETCH_OP(op, c_op)
 63
 64ATOMIC_OPS(add, +)
 65ATOMIC_OPS(sub, -)
 66
 67#ifdef __OPTIMIZE__
 68#define __ia64_atomic_const(i)						\
 69	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
 70		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
 71		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
 72	__ia64_atomic_p
 73#else
 74#define __ia64_atomic_const(i)	0
 75#endif
 76
 77#define atomic_add_return(i,v)						\
 78({									\
 79	int __ia64_aar_i = (i);						\
 80	__ia64_atomic_const(i)						\
 
 
 
 
 81		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 82		: ia64_atomic_add(__ia64_aar_i, v);			\
 83})
 84
 85#define atomic_sub_return(i,v)						\
 86({									\
 87	int __ia64_asr_i = (i);						\
 88	__ia64_atomic_const(i)						\
 
 
 
 
 89		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 90		: ia64_atomic_sub(__ia64_asr_i, v);			\
 91})
 92
 93#define atomic_fetch_add(i,v)						\
 94({									\
 95	int __ia64_aar_i = (i);						\
 96	__ia64_atomic_const(i)						\
 
 
 
 
 97		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
 98		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
 99})
100
101#define atomic_fetch_sub(i,v)						\
102({									\
103	int __ia64_asr_i = (i);						\
104	__ia64_atomic_const(i)						\
 
 
 
 
105		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
106		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
107})
108
109ATOMIC_FETCH_OP(and, &)
110ATOMIC_FETCH_OP(or, |)
111ATOMIC_FETCH_OP(xor, ^)
112
113#define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
114#define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
115#define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
116
117#define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
118#define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
119#define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
120
121#undef ATOMIC_OPS
122#undef ATOMIC_FETCH_OP
123#undef ATOMIC_OP
124
125#define ATOMIC64_OP(op, c_op)						\
126static __inline__ s64							\
127ia64_atomic64_##op (s64 i, atomic64_t *v)				\
128{									\
129	s64 old, new;							\
130	CMPXCHG_BUGCHECK_DECL						\
131									\
132	do {								\
133		CMPXCHG_BUGCHECK(v);					\
134		old = atomic64_read(v);					\
135		new = old c_op i;					\
136	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
137	return new;							\
138}
139
140#define ATOMIC64_FETCH_OP(op, c_op)					\
141static __inline__ s64							\
142ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
143{									\
144	s64 old, new;							\
145	CMPXCHG_BUGCHECK_DECL						\
146									\
147	do {								\
148		CMPXCHG_BUGCHECK(v);					\
149		old = atomic64_read(v);					\
150		new = old c_op i;					\
151	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
152	return old;							\
153}
154
155#define ATOMIC64_OPS(op, c_op)						\
156	ATOMIC64_OP(op, c_op)						\
157	ATOMIC64_FETCH_OP(op, c_op)
158
159ATOMIC64_OPS(add, +)
160ATOMIC64_OPS(sub, -)
161
162#define atomic64_add_return(i,v)					\
163({									\
164	s64 __ia64_aar_i = (i);						\
165	__ia64_atomic_const(i)						\
 
 
 
 
166		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
167		: ia64_atomic64_add(__ia64_aar_i, v);			\
168})
169
170#define atomic64_sub_return(i,v)					\
171({									\
172	s64 __ia64_asr_i = (i);						\
173	__ia64_atomic_const(i)						\
 
 
 
 
174		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175		: ia64_atomic64_sub(__ia64_asr_i, v);			\
176})
177
178#define atomic64_fetch_add(i,v)						\
179({									\
180	s64 __ia64_aar_i = (i);						\
181	__ia64_atomic_const(i)						\
 
 
 
 
182		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
183		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
184})
185
186#define atomic64_fetch_sub(i,v)						\
187({									\
188	s64 __ia64_asr_i = (i);						\
189	__ia64_atomic_const(i)						\
 
 
 
 
190		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
191		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
192})
193
194ATOMIC64_FETCH_OP(and, &)
195ATOMIC64_FETCH_OP(or, |)
196ATOMIC64_FETCH_OP(xor, ^)
197
198#define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
199#define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
200#define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
201
202#define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
203#define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
204#define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
205
206#undef ATOMIC64_OPS
207#undef ATOMIC64_FETCH_OP
208#undef ATOMIC64_OP
209
210#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213#define atomic64_cmpxchg(v, old, new) \
214	(cmpxchg(&((v)->counter), old, new))
215#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
216
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
218#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
 
 
219
220#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
221#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
 
 
222
223#endif /* _ASM_IA64_ATOMIC_H */
v4.10.11
 
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 18#include <asm/barrier.h>
 19
 20
 21#define ATOMIC_INIT(i)		{ (i) }
 22#define ATOMIC64_INIT(i)	{ (i) }
 23
 24#define atomic_read(v)		READ_ONCE((v)->counter)
 25#define atomic64_read(v)	READ_ONCE((v)->counter)
 26
 27#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 28#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 29
 30#define ATOMIC_OP(op, c_op)						\
 31static __inline__ int							\
 32ia64_atomic_##op (int i, atomic_t *v)					\
 33{									\
 34	__s32 old, new;							\
 35	CMPXCHG_BUGCHECK_DECL						\
 36									\
 37	do {								\
 38		CMPXCHG_BUGCHECK(v);					\
 39		old = atomic_read(v);					\
 40		new = old c_op i;					\
 41	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 42	return new;							\
 43}
 44
 45#define ATOMIC_FETCH_OP(op, c_op)					\
 46static __inline__ int							\
 47ia64_atomic_fetch_##op (int i, atomic_t *v)				\
 48{									\
 49	__s32 old, new;							\
 50	CMPXCHG_BUGCHECK_DECL						\
 51									\
 52	do {								\
 53		CMPXCHG_BUGCHECK(v);					\
 54		old = atomic_read(v);					\
 55		new = old c_op i;					\
 56	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 57	return old;							\
 58}
 59
 60#define ATOMIC_OPS(op, c_op)						\
 61	ATOMIC_OP(op, c_op)						\
 62	ATOMIC_FETCH_OP(op, c_op)
 63
 64ATOMIC_OPS(add, +)
 65ATOMIC_OPS(sub, -)
 66
 
 
 
 
 
 
 
 
 
 
 67#define atomic_add_return(i,v)						\
 68({									\
 69	int __ia64_aar_i = (i);						\
 70	(__builtin_constant_p(i)					\
 71	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
 72	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
 73	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
 74	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
 75		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 76		: ia64_atomic_add(__ia64_aar_i, v);			\
 77})
 78
 79#define atomic_sub_return(i,v)						\
 80({									\
 81	int __ia64_asr_i = (i);						\
 82	(__builtin_constant_p(i)					\
 83	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
 84	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
 85	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
 86	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
 87		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 88		: ia64_atomic_sub(__ia64_asr_i, v);			\
 89})
 90
 91#define atomic_fetch_add(i,v)						\
 92({									\
 93	int __ia64_aar_i = (i);						\
 94	(__builtin_constant_p(i)					\
 95	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
 96	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
 97	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
 98	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
 99		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
100		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
101})
102
103#define atomic_fetch_sub(i,v)						\
104({									\
105	int __ia64_asr_i = (i);						\
106	(__builtin_constant_p(i)					\
107	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
108	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
109	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
110	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
111		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
112		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
113})
114
115ATOMIC_FETCH_OP(and, &)
116ATOMIC_FETCH_OP(or, |)
117ATOMIC_FETCH_OP(xor, ^)
118
119#define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
120#define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
121#define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
122
123#define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
124#define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
125#define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP
129#undef ATOMIC_OP
130
131#define ATOMIC64_OP(op, c_op)						\
132static __inline__ long							\
133ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
134{									\
135	__s64 old, new;							\
136	CMPXCHG_BUGCHECK_DECL						\
137									\
138	do {								\
139		CMPXCHG_BUGCHECK(v);					\
140		old = atomic64_read(v);					\
141		new = old c_op i;					\
142	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
143	return new;							\
144}
145
146#define ATOMIC64_FETCH_OP(op, c_op)					\
147static __inline__ long							\
148ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)			\
149{									\
150	__s64 old, new;							\
151	CMPXCHG_BUGCHECK_DECL						\
152									\
153	do {								\
154		CMPXCHG_BUGCHECK(v);					\
155		old = atomic64_read(v);					\
156		new = old c_op i;					\
157	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
158	return old;							\
159}
160
161#define ATOMIC64_OPS(op, c_op)						\
162	ATOMIC64_OP(op, c_op)						\
163	ATOMIC64_FETCH_OP(op, c_op)
164
165ATOMIC64_OPS(add, +)
166ATOMIC64_OPS(sub, -)
167
168#define atomic64_add_return(i,v)					\
169({									\
170	long __ia64_aar_i = (i);					\
171	(__builtin_constant_p(i)					\
172	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
173	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
174	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
175	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
176		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
177		: ia64_atomic64_add(__ia64_aar_i, v);			\
178})
179
180#define atomic64_sub_return(i,v)					\
181({									\
182	long __ia64_asr_i = (i);					\
183	(__builtin_constant_p(i)					\
184	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
185	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
186	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
187	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
188		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
189		: ia64_atomic64_sub(__ia64_asr_i, v);			\
190})
191
192#define atomic64_fetch_add(i,v)						\
193({									\
194	long __ia64_aar_i = (i);					\
195	(__builtin_constant_p(i)					\
196	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
197	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
198	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
199	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
200		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
201		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
202})
203
204#define atomic64_fetch_sub(i,v)						\
205({									\
206	long __ia64_asr_i = (i);					\
207	(__builtin_constant_p(i)					\
208	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
209	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
210	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
211	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
212		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
213		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
214})
215
216ATOMIC64_FETCH_OP(and, &)
217ATOMIC64_FETCH_OP(or, |)
218ATOMIC64_FETCH_OP(xor, ^)
219
220#define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
221#define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
222#define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
223
224#define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
225#define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
226#define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
227
228#undef ATOMIC64_OPS
229#undef ATOMIC64_FETCH_OP
230#undef ATOMIC64_OP
231
232#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
233#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
234
235#define atomic64_cmpxchg(v, old, new) \
236	(cmpxchg(&((v)->counter), old, new))
237#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
238
239static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
240{
241	int c, old;
242	c = atomic_read(v);
243	for (;;) {
244		if (unlikely(c == (u)))
245			break;
246		old = atomic_cmpxchg((v), c, c + (a));
247		if (likely(old == c))
248			break;
249		c = old;
250	}
251	return c;
252}
253
254
255static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
256{
257	long c, old;
258	c = atomic64_read(v);
259	for (;;) {
260		if (unlikely(c == (u)))
261			break;
262		old = atomic64_cmpxchg((v), c, c + (a));
263		if (likely(old == c))
264			break;
265		c = old;
266	}
267	return c != (u);
268}
269
270#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
271
272static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
273{
274	long c, old, dec;
275	c = atomic64_read(v);
276	for (;;) {
277		dec = c - 1;
278		if (unlikely(dec < 0))
279			break;
280		old = atomic64_cmpxchg((v), c, dec);
281		if (likely(old == c))
282			break;
283		c = old;
284	}
285	return dec;
286}
287
288/*
289 * Atomically add I to V and return TRUE if the resulting value is
290 * negative.
291 */
292static __inline__ int
293atomic_add_negative (int i, atomic_t *v)
294{
295	return atomic_add_return(i, v) < 0;
296}
297
298static __inline__ long
299atomic64_add_negative (__s64 i, atomic64_t *v)
300{
301	return atomic64_add_return(i, v) < 0;
302}
303
304#define atomic_dec_return(v)		atomic_sub_return(1, (v))
305#define atomic_inc_return(v)		atomic_add_return(1, (v))
306#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
307#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
308
309#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
310#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
311#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
312#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
313#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
314#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
315
316#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
317#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
318#define atomic_inc(v)			atomic_add(1, (v))
319#define atomic_dec(v)			atomic_sub(1, (v))
320
321#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
322#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
323#define atomic64_inc(v)			atomic64_add(1, (v))
324#define atomic64_dec(v)			atomic64_sub(1, (v))
325
326#endif /* _ASM_IA64_ATOMIC_H */