Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 
 18
 19
 20#define ATOMIC_INIT(i)		{ (i) }
 21#define ATOMIC64_INIT(i)	{ (i) }
 22
 23#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 24#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
 25
 26#define atomic_set(v,i)		(((v)->counter) = (i))
 27#define atomic64_set(v,i)	(((v)->counter) = (i))
 28
 29static __inline__ int
 30ia64_atomic_add (int i, atomic_t *v)
 31{
 32	__s32 old, new;
 33	CMPXCHG_BUGCHECK_DECL
 
 
 
 
 
 
 
 
 
 34
 35	do {
 36		CMPXCHG_BUGCHECK(v);
 37		old = atomic_read(v);
 38		new = old + i;
 39	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 40	return new;
 
 
 
 
 
 
 
 41}
 42
 43static __inline__ long
 44ia64_atomic64_add (__s64 i, atomic64_t *v)
 45{
 46	__s64 old, new;
 47	CMPXCHG_BUGCHECK_DECL
 
 
 
 
 
 
 
 
 
 
 
 48
 49	do {
 50		CMPXCHG_BUGCHECK(v);
 51		old = atomic64_read(v);
 52		new = old + i;
 53	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 54	return new;
 55}
 56
 57static __inline__ int
 58ia64_atomic_sub (int i, atomic_t *v)
 59{
 60	__s32 old, new;
 61	CMPXCHG_BUGCHECK_DECL
 
 
 62
 63	do {
 64		CMPXCHG_BUGCHECK(v);
 65		old = atomic_read(v);
 66		new = old - i;
 67	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 68	return new;
 69}
 70
 71static __inline__ long
 72ia64_atomic64_sub (__s64 i, atomic64_t *v)
 73{
 74	__s64 old, new;
 75	CMPXCHG_BUGCHECK_DECL
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77	do {
 78		CMPXCHG_BUGCHECK(v);
 79		old = atomic64_read(v);
 80		new = old - i;
 81	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 82	return new;
 
 
 
 
 
 
 
 83}
 84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 87
 88#define atomic64_cmpxchg(v, old, new) \
 89	(cmpxchg(&((v)->counter), old, new))
 90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 91
 92static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 93{
 94	int c, old;
 95	c = atomic_read(v);
 96	for (;;) {
 97		if (unlikely(c == (u)))
 98			break;
 99		old = atomic_cmpxchg((v), c, c + (a));
100		if (likely(old == c))
101			break;
102		c = old;
103	}
104	return c;
105}
106
107
108static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
109{
110	long c, old;
111	c = atomic64_read(v);
112	for (;;) {
113		if (unlikely(c == (u)))
114			break;
115		old = atomic64_cmpxchg((v), c, c + (a));
116		if (likely(old == c))
117			break;
118		c = old;
119	}
120	return c != (u);
121}
122
123#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
124
125#define atomic_add_return(i,v)						\
126({									\
127	int __ia64_aar_i = (i);						\
128	(__builtin_constant_p(i)					\
129	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
130	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
131	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
132	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
133		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
134		: ia64_atomic_add(__ia64_aar_i, v);			\
135})
136
137#define atomic64_add_return(i,v)					\
138({									\
139	long __ia64_aar_i = (i);					\
140	(__builtin_constant_p(i)					\
141	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
142	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
143	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
144	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
145		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
146		: ia64_atomic64_add(__ia64_aar_i, v);			\
147})
148
149/*
150 * Atomically add I to V and return TRUE if the resulting value is
151 * negative.
152 */
153static __inline__ int
154atomic_add_negative (int i, atomic_t *v)
155{
156	return atomic_add_return(i, v) < 0;
157}
158
159static __inline__ long
160atomic64_add_negative (__s64 i, atomic64_t *v)
161{
162	return atomic64_add_return(i, v) < 0;
163}
164
165#define atomic_sub_return(i,v)						\
166({									\
167	int __ia64_asr_i = (i);						\
168	(__builtin_constant_p(i)					\
169	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
170	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
171	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
172	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
173		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
174		: ia64_atomic_sub(__ia64_asr_i, v);			\
175})
176
177#define atomic64_sub_return(i,v)					\
178({									\
179	long __ia64_asr_i = (i);					\
180	(__builtin_constant_p(i)					\
181	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
182	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
183	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
184	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
185		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
186		: ia64_atomic64_sub(__ia64_asr_i, v);			\
187})
188
189#define atomic_dec_return(v)		atomic_sub_return(1, (v))
190#define atomic_inc_return(v)		atomic_add_return(1, (v))
191#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
192#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
193
194#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
195#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
196#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
197#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
198#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
199#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
200
201#define atomic_add(i,v)			atomic_add_return((i), (v))
202#define atomic_sub(i,v)			atomic_sub_return((i), (v))
203#define atomic_inc(v)			atomic_add(1, (v))
204#define atomic_dec(v)			atomic_sub(1, (v))
205
206#define atomic64_add(i,v)		atomic64_add_return((i), (v))
207#define atomic64_sub(i,v)		atomic64_sub_return((i), (v))
208#define atomic64_inc(v)			atomic64_add(1, (v))
209#define atomic64_dec(v)			atomic64_sub(1, (v))
210
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec()	barrier()
213#define smp_mb__after_atomic_dec()	barrier()
214#define smp_mb__before_atomic_inc()	barrier()
215#define smp_mb__after_atomic_inc()	barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_ATOMIC_H
  3#define _ASM_IA64_ATOMIC_H
  4
  5/*
  6 * Atomic operations that C can't guarantee us.  Useful for
  7 * resource counting etc..
  8 *
  9 * NOTE: don't mess with the types below!  The "unsigned long" and
 10 * "int" types were carefully placed so as to ensure proper operation
 11 * of the macros.
 12 *
 13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 14 *	David Mosberger-Tang <davidm@hpl.hp.com>
 15 */
 16#include <linux/types.h>
 17
 18#include <asm/intrinsics.h>
 19#include <asm/barrier.h>
 20
 21
 22#define ATOMIC_INIT(i)		{ (i) }
 23#define ATOMIC64_INIT(i)	{ (i) }
 24
 25#define atomic_read(v)		READ_ONCE((v)->counter)
 26#define atomic64_read(v)	READ_ONCE((v)->counter)
 27
 28#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 29#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 30
 31#define ATOMIC_OP(op, c_op)						\
 32static __inline__ int							\
 33ia64_atomic_##op (int i, atomic_t *v)					\
 34{									\
 35	__s32 old, new;							\
 36	CMPXCHG_BUGCHECK_DECL						\
 37									\
 38	do {								\
 39		CMPXCHG_BUGCHECK(v);					\
 40		old = atomic_read(v);					\
 41		new = old c_op i;					\
 42	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 43	return new;							\
 44}
 45
 46#define ATOMIC_FETCH_OP(op, c_op)					\
 47static __inline__ int							\
 48ia64_atomic_fetch_##op (int i, atomic_t *v)				\
 49{									\
 50	__s32 old, new;							\
 51	CMPXCHG_BUGCHECK_DECL						\
 52									\
 53	do {								\
 54		CMPXCHG_BUGCHECK(v);					\
 55		old = atomic_read(v);					\
 56		new = old c_op i;					\
 57	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 58	return old;							\
 59}
 60
 61#define ATOMIC_OPS(op, c_op)						\
 62	ATOMIC_OP(op, c_op)						\
 63	ATOMIC_FETCH_OP(op, c_op)
 64
 65ATOMIC_OPS(add, +)
 66ATOMIC_OPS(sub, -)
 67
 68#ifdef __OPTIMIZE__
 69#define __ia64_atomic_const(i)						\
 70	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
 71		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
 72		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
 73	__ia64_atomic_p
 74#else
 75#define __ia64_atomic_const(i)	0
 76#endif
 77
 78#define atomic_add_return(i,v)						\
 79({									\
 80	int __ia64_aar_i = (i);						\
 81	__ia64_atomic_const(i)						\
 82		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 83		: ia64_atomic_add(__ia64_aar_i, v);			\
 84})
 85
 86#define atomic_sub_return(i,v)						\
 87({									\
 88	int __ia64_asr_i = (i);						\
 89	__ia64_atomic_const(i)						\
 90		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 91		: ia64_atomic_sub(__ia64_asr_i, v);			\
 92})
 93
 94#define atomic_fetch_add(i,v)						\
 95({									\
 96	int __ia64_aar_i = (i);						\
 97	__ia64_atomic_const(i)						\
 98		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
 99		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
100})
101
102#define atomic_fetch_sub(i,v)						\
103({									\
104	int __ia64_asr_i = (i);						\
105	__ia64_atomic_const(i)						\
106		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
107		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
108})
109
110ATOMIC_FETCH_OP(and, &)
111ATOMIC_FETCH_OP(or, |)
112ATOMIC_FETCH_OP(xor, ^)
113
114#define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
115#define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
116#define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
117
118#define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
119#define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
120#define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
121
122#undef ATOMIC_OPS
123#undef ATOMIC_FETCH_OP
124#undef ATOMIC_OP
125
126#define ATOMIC64_OP(op, c_op)						\
127static __inline__ long							\
128ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
129{									\
130	__s64 old, new;							\
131	CMPXCHG_BUGCHECK_DECL						\
132									\
133	do {								\
134		CMPXCHG_BUGCHECK(v);					\
135		old = atomic64_read(v);					\
136		new = old c_op i;					\
137	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
138	return new;							\
139}
140
141#define ATOMIC64_FETCH_OP(op, c_op)					\
142static __inline__ long							\
143ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v)			\
144{									\
145	__s64 old, new;							\
146	CMPXCHG_BUGCHECK_DECL						\
147									\
148	do {								\
149		CMPXCHG_BUGCHECK(v);					\
150		old = atomic64_read(v);					\
151		new = old c_op i;					\
152	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
153	return old;							\
154}
155
156#define ATOMIC64_OPS(op, c_op)						\
157	ATOMIC64_OP(op, c_op)						\
158	ATOMIC64_FETCH_OP(op, c_op)
159
160ATOMIC64_OPS(add, +)
161ATOMIC64_OPS(sub, -)
162
163#define atomic64_add_return(i,v)					\
164({									\
165	long __ia64_aar_i = (i);					\
166	__ia64_atomic_const(i)						\
167		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
168		: ia64_atomic64_add(__ia64_aar_i, v);			\
169})
170
171#define atomic64_sub_return(i,v)					\
172({									\
173	long __ia64_asr_i = (i);					\
174	__ia64_atomic_const(i)						\
175		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
176		: ia64_atomic64_sub(__ia64_asr_i, v);			\
177})
178
179#define atomic64_fetch_add(i,v)						\
180({									\
181	long __ia64_aar_i = (i);					\
182	__ia64_atomic_const(i)						\
183		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
184		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
185})
186
187#define atomic64_fetch_sub(i,v)						\
188({									\
189	long __ia64_asr_i = (i);					\
190	__ia64_atomic_const(i)						\
191		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
192		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
193})
194
195ATOMIC64_FETCH_OP(and, &)
196ATOMIC64_FETCH_OP(or, |)
197ATOMIC64_FETCH_OP(xor, ^)
198
199#define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
200#define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
201#define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
202
203#define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
204#define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
205#define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
206
207#undef ATOMIC64_OPS
208#undef ATOMIC64_FETCH_OP
209#undef ATOMIC64_OP
210
211#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
212#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213
214#define atomic64_cmpxchg(v, old, new) \
215	(cmpxchg(&((v)->counter), old, new))
216#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
217
218static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
219{
220	int c, old;
221	c = atomic_read(v);
222	for (;;) {
223		if (unlikely(c == (u)))
224			break;
225		old = atomic_cmpxchg((v), c, c + (a));
226		if (likely(old == c))
227			break;
228		c = old;
229	}
230	return c;
231}
232
233
234static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
235{
236	long c, old;
237	c = atomic64_read(v);
238	for (;;) {
239		if (unlikely(c == (u)))
240			break;
241		old = atomic64_cmpxchg((v), c, c + (a));
242		if (likely(old == c))
243			break;
244		c = old;
245	}
246	return c != (u);
247}
248
249#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
250
251static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
252{
253	long c, old, dec;
254	c = atomic64_read(v);
255	for (;;) {
256		dec = c - 1;
257		if (unlikely(dec < 0))
258			break;
259		old = atomic64_cmpxchg((v), c, dec);
260		if (likely(old == c))
261			break;
262		c = old;
263	}
264	return dec;
265}
 
 
 
 
 
 
 
 
266
267/*
268 * Atomically add I to V and return TRUE if the resulting value is
269 * negative.
270 */
271static __inline__ int
272atomic_add_negative (int i, atomic_t *v)
273{
274	return atomic_add_return(i, v) < 0;
275}
276
277static __inline__ long
278atomic64_add_negative (__s64 i, atomic64_t *v)
279{
280	return atomic64_add_return(i, v) < 0;
281}
282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283#define atomic_dec_return(v)		atomic_sub_return(1, (v))
284#define atomic_inc_return(v)		atomic_add_return(1, (v))
285#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
286#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
287
288#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
289#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
290#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
291#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
292#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
293#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
294
295#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
296#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
297#define atomic_inc(v)			atomic_add(1, (v))
298#define atomic_dec(v)			atomic_sub(1, (v))
299
300#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
301#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
302#define atomic64_inc(v)			atomic64_add(1, (v))
303#define atomic64_dec(v)			atomic64_sub(1, (v))
 
 
 
 
 
 
304
305#endif /* _ASM_IA64_ATOMIC_H */