Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_IA64_ATOMIC_H
  3#define _ASM_IA64_ATOMIC_H
  4
  5/*
  6 * Atomic operations that C can't guarantee us.  Useful for
  7 * resource counting etc..
  8 *
  9 * NOTE: don't mess with the types below!  The "unsigned long" and
 10 * "int" types were carefully placed so as to ensure proper operation
 11 * of the macros.
 12 *
 13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 14 *	David Mosberger-Tang <davidm@hpl.hp.com>
 15 */
 16#include <linux/types.h>
 17
 18#include <asm/intrinsics.h>
 19#include <asm/barrier.h>
 20
 21
 22#define ATOMIC64_INIT(i)	{ (i) }
 
 23
 24#define atomic_read(v)		READ_ONCE((v)->counter)
 25#define atomic64_read(v)	READ_ONCE((v)->counter)
 26
 27#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 28#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 29
 30#define ATOMIC_OP(op, c_op)						\
 31static __inline__ int							\
 32ia64_atomic_##op (int i, atomic_t *v)					\
 33{									\
 34	__s32 old, new;							\
 35	CMPXCHG_BUGCHECK_DECL						\
 36									\
 37	do {								\
 38		CMPXCHG_BUGCHECK(v);					\
 39		old = atomic_read(v);					\
 40		new = old c_op i;					\
 41	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 42	return new;							\
 43}
 44
 45#define ATOMIC_FETCH_OP(op, c_op)					\
 46static __inline__ int							\
 47ia64_atomic_fetch_##op (int i, atomic_t *v)				\
 48{									\
 49	__s32 old, new;							\
 50	CMPXCHG_BUGCHECK_DECL						\
 51									\
 52	do {								\
 53		CMPXCHG_BUGCHECK(v);					\
 54		old = atomic_read(v);					\
 55		new = old c_op i;					\
 56	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 57	return old;							\
 58}
 59
 60#define ATOMIC_OPS(op, c_op)						\
 61	ATOMIC_OP(op, c_op)						\
 62	ATOMIC_FETCH_OP(op, c_op)
 63
 64ATOMIC_OPS(add, +)
 65ATOMIC_OPS(sub, -)
 66
 67#ifdef __OPTIMIZE__
 68#define __ia64_atomic_const(i)						\
 69	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
 70		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
 71		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
 72	__ia64_atomic_p
 73#else
 74#define __ia64_atomic_const(i)	0
 75#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76
 77#define atomic_add_return(i,v)						\
 78({									\
 79	int __ia64_aar_i = (i);						\
 80	__ia64_atomic_const(i)						\
 
 
 
 
 81		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 82		: ia64_atomic_add(__ia64_aar_i, v);			\
 83})
 84
 85#define atomic_sub_return(i,v)						\
 86({									\
 87	int __ia64_asr_i = (i);						\
 88	__ia64_atomic_const(i)						\
 89		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 90		: ia64_atomic_sub(__ia64_asr_i, v);			\
 91})
 92
 93#define atomic_fetch_add(i,v)						\
 94({									\
 95	int __ia64_aar_i = (i);						\
 96	__ia64_atomic_const(i)						\
 97		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
 98		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
 99})
100
101#define atomic_fetch_sub(i,v)						\
102({									\
103	int __ia64_asr_i = (i);						\
104	__ia64_atomic_const(i)						\
105		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
106		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
 
 
 
 
107})
108
109ATOMIC_FETCH_OP(and, &)
110ATOMIC_FETCH_OP(or, |)
111ATOMIC_FETCH_OP(xor, ^)
112
113#define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
114#define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
115#define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
116
117#define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
118#define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
119#define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
120
121#undef ATOMIC_OPS
122#undef ATOMIC_FETCH_OP
123#undef ATOMIC_OP
124
125#define ATOMIC64_OP(op, c_op)						\
126static __inline__ s64							\
127ia64_atomic64_##op (s64 i, atomic64_t *v)				\
128{									\
129	s64 old, new;							\
130	CMPXCHG_BUGCHECK_DECL						\
131									\
132	do {								\
133		CMPXCHG_BUGCHECK(v);					\
134		old = atomic64_read(v);					\
135		new = old c_op i;					\
136	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
137	return new;							\
138}
139
140#define ATOMIC64_FETCH_OP(op, c_op)					\
141static __inline__ s64							\
142ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
143{									\
144	s64 old, new;							\
145	CMPXCHG_BUGCHECK_DECL						\
146									\
147	do {								\
148		CMPXCHG_BUGCHECK(v);					\
149		old = atomic64_read(v);					\
150		new = old c_op i;					\
151	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
152	return old;							\
153}
154
155#define ATOMIC64_OPS(op, c_op)						\
156	ATOMIC64_OP(op, c_op)						\
157	ATOMIC64_FETCH_OP(op, c_op)
158
159ATOMIC64_OPS(add, +)
160ATOMIC64_OPS(sub, -)
 
 
 
161
162#define atomic64_add_return(i,v)					\
163({									\
164	s64 __ia64_aar_i = (i);						\
165	__ia64_atomic_const(i)						\
166		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
167		: ia64_atomic64_add(__ia64_aar_i, v);			\
 
 
 
 
168})
169
170#define atomic64_sub_return(i,v)					\
171({									\
172	s64 __ia64_asr_i = (i);						\
173	__ia64_atomic_const(i)						\
 
 
 
 
174		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175		: ia64_atomic64_sub(__ia64_asr_i, v);			\
176})
177
178#define atomic64_fetch_add(i,v)						\
179({									\
180	s64 __ia64_aar_i = (i);						\
181	__ia64_atomic_const(i)						\
182		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
183		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
184})
185
186#define atomic64_fetch_sub(i,v)						\
187({									\
188	s64 __ia64_asr_i = (i);						\
189	__ia64_atomic_const(i)						\
190		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
191		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
192})
193
194ATOMIC64_FETCH_OP(and, &)
195ATOMIC64_FETCH_OP(or, |)
196ATOMIC64_FETCH_OP(xor, ^)
197
198#define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
199#define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
200#define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
201
202#define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
203#define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
204#define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
205
206#undef ATOMIC64_OPS
207#undef ATOMIC64_FETCH_OP
208#undef ATOMIC64_OP
209
210#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213#define atomic64_cmpxchg(v, old, new) \
214	(cmpxchg(&((v)->counter), old, new))
215#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
216
217#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
218#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
219
220#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
221#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
222
223#endif /* _ASM_IA64_ATOMIC_H */
v3.1
 
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 18#include <asm/system.h>
 19
 20
 21#define ATOMIC_INIT(i)		((atomic_t) { (i) })
 22#define ATOMIC64_INIT(i)	((atomic64_t) { (i) })
 23
 24#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 25#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
 26
 27#define atomic_set(v,i)		(((v)->counter) = (i))
 28#define atomic64_set(v,i)	(((v)->counter) = (i))
 29
 30static __inline__ int
 31ia64_atomic_add (int i, atomic_t *v)
 32{
 33	__s32 old, new;
 34	CMPXCHG_BUGCHECK_DECL
 35
 36	do {
 37		CMPXCHG_BUGCHECK(v);
 38		old = atomic_read(v);
 39		new = old + i;
 40	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 41	return new;
 42}
 43
 44static __inline__ long
 45ia64_atomic64_add (__s64 i, atomic64_t *v)
 46{
 47	__s64 old, new;
 48	CMPXCHG_BUGCHECK_DECL
 49
 50	do {
 51		CMPXCHG_BUGCHECK(v);
 52		old = atomic64_read(v);
 53		new = old + i;
 54	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 55	return new;
 56}
 57
 58static __inline__ int
 59ia64_atomic_sub (int i, atomic_t *v)
 60{
 61	__s32 old, new;
 62	CMPXCHG_BUGCHECK_DECL
 63
 64	do {
 65		CMPXCHG_BUGCHECK(v);
 66		old = atomic_read(v);
 67		new = old - i;
 68	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 69	return new;
 70}
 71
 72static __inline__ long
 73ia64_atomic64_sub (__s64 i, atomic64_t *v)
 74{
 75	__s64 old, new;
 76	CMPXCHG_BUGCHECK_DECL
 77
 78	do {
 79		CMPXCHG_BUGCHECK(v);
 80		old = atomic64_read(v);
 81		new = old - i;
 82	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 83	return new;
 84}
 85
 86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 87#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 88
 89#define atomic64_cmpxchg(v, old, new) \
 90	(cmpxchg(&((v)->counter), old, new))
 91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 92
 93static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 94{
 95	int c, old;
 96	c = atomic_read(v);
 97	for (;;) {
 98		if (unlikely(c == (u)))
 99			break;
100		old = atomic_cmpxchg((v), c, c + (a));
101		if (likely(old == c))
102			break;
103		c = old;
104	}
105	return c;
106}
107
108
109static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
110{
111	long c, old;
112	c = atomic64_read(v);
113	for (;;) {
114		if (unlikely(c == (u)))
115			break;
116		old = atomic64_cmpxchg((v), c, c + (a));
117		if (likely(old == c))
118			break;
119		c = old;
120	}
121	return c != (u);
122}
123
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125
126#define atomic_add_return(i,v)						\
127({									\
128	int __ia64_aar_i = (i);						\
129	(__builtin_constant_p(i)					\
130	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
131	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
132	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
133	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
134		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
135		: ia64_atomic_add(__ia64_aar_i, v);			\
136})
137
138#define atomic64_add_return(i,v)					\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139({									\
140	long __ia64_aar_i = (i);					\
141	(__builtin_constant_p(i)					\
142	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
143	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
144	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
145	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
146		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
147		: ia64_atomic64_add(__ia64_aar_i, v);			\
148})
149
150/*
151 * Atomically add I to V and return TRUE if the resulting value is
152 * negative.
153 */
154static __inline__ int
155atomic_add_negative (int i, atomic_t *v)
156{
157	return atomic_add_return(i, v) < 0;
158}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159
160static __inline__ long
161atomic64_add_negative (__s64 i, atomic64_t *v)
162{
163	return atomic64_add_return(i, v) < 0;
164}
165
166#define atomic_sub_return(i,v)						\
167({									\
168	int __ia64_asr_i = (i);						\
169	(__builtin_constant_p(i)					\
170	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
171	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
172	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
173	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
174		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175		: ia64_atomic_sub(__ia64_asr_i, v);			\
176})
177
178#define atomic64_sub_return(i,v)					\
179({									\
180	long __ia64_asr_i = (i);					\
181	(__builtin_constant_p(i)					\
182	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
183	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
184	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
185	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
186		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
187		: ia64_atomic64_sub(__ia64_asr_i, v);			\
188})
189
190#define atomic_dec_return(v)		atomic_sub_return(1, (v))
191#define atomic_inc_return(v)		atomic_add_return(1, (v))
192#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v)			atomic_add_return((i), (v))
203#define atomic_sub(i,v)			atomic_sub_return((i), (v))
204#define atomic_inc(v)			atomic_add(1, (v))
205#define atomic_dec(v)			atomic_sub(1, (v))
206
207#define atomic64_add(i,v)		atomic64_add_return((i), (v))
208#define atomic64_sub(i,v)		atomic64_sub_return((i), (v))
209#define atomic64_inc(v)			atomic64_add(1, (v))
210#define atomic64_dec(v)			atomic64_sub(1, (v))
211
212/* Atomic operations are already serializing */
213#define smp_mb__before_atomic_dec()	barrier()
214#define smp_mb__after_atomic_dec()	barrier()
215#define smp_mb__before_atomic_inc()	barrier()
216#define smp_mb__after_atomic_inc()	barrier()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217
218#endif /* _ASM_IA64_ATOMIC_H */