Linux Audio

Check our new training course

Loading...
v4.6
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 18#include <asm/barrier.h>
 19
 20
 21#define ATOMIC_INIT(i)		{ (i) }
 22#define ATOMIC64_INIT(i)	{ (i) }
 23
 24#define atomic_read(v)		READ_ONCE((v)->counter)
 25#define atomic64_read(v)	READ_ONCE((v)->counter)
 26
 27#define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
 28#define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
 29
 30#define ATOMIC_OP(op, c_op)						\
 31static __inline__ int							\
 32ia64_atomic_##op (int i, atomic_t *v)					\
 33{									\
 34	__s32 old, new;							\
 35	CMPXCHG_BUGCHECK_DECL						\
 36									\
 37	do {								\
 38		CMPXCHG_BUGCHECK(v);					\
 39		old = atomic_read(v);					\
 40		new = old c_op i;					\
 41	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
 42	return new;							\
 43}
 44
 45ATOMIC_OP(add, +)
 46ATOMIC_OP(sub, -)
 
 
 
 47
 48#define atomic_add_return(i,v)						\
 49({									\
 50	int __ia64_aar_i = (i);						\
 51	(__builtin_constant_p(i)					\
 52	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
 53	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
 54	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
 55	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
 56		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
 57		: ia64_atomic_add(__ia64_aar_i, v);			\
 58})
 59
 60#define atomic_sub_return(i,v)						\
 61({									\
 62	int __ia64_asr_i = (i);						\
 63	(__builtin_constant_p(i)					\
 64	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
 65	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
 66	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
 67	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
 68		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
 69		: ia64_atomic_sub(__ia64_asr_i, v);			\
 70})
 71
 72ATOMIC_OP(and, &)
 73ATOMIC_OP(or, |)
 74ATOMIC_OP(xor, ^)
 75
 76#define atomic_and(i,v)	(void)ia64_atomic_and(i,v)
 77#define atomic_or(i,v)	(void)ia64_atomic_or(i,v)
 78#define atomic_xor(i,v)	(void)ia64_atomic_xor(i,v)
 79
 80#undef ATOMIC_OP
 81
 82#define ATOMIC64_OP(op, c_op)						\
 83static __inline__ long							\
 84ia64_atomic64_##op (__s64 i, atomic64_t *v)				\
 85{									\
 86	__s64 old, new;							\
 87	CMPXCHG_BUGCHECK_DECL						\
 88									\
 89	do {								\
 90		CMPXCHG_BUGCHECK(v);					\
 91		old = atomic64_read(v);					\
 92		new = old c_op i;					\
 93	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
 94	return new;							\
 95}
 96
 97ATOMIC64_OP(add, +)
 98ATOMIC64_OP(sub, -)
 99
100#define atomic64_add_return(i,v)					\
101({									\
102	long __ia64_aar_i = (i);					\
103	(__builtin_constant_p(i)					\
104	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
105	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
106	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
107	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
108		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
109		: ia64_atomic64_add(__ia64_aar_i, v);			\
110})
111
112#define atomic64_sub_return(i,v)					\
113({									\
114	long __ia64_asr_i = (i);					\
115	(__builtin_constant_p(i)					\
116	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
117	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
118	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
119	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
120		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
121		: ia64_atomic64_sub(__ia64_asr_i, v);			\
122})
123
124ATOMIC64_OP(and, &)
125ATOMIC64_OP(or, |)
126ATOMIC64_OP(xor, ^)
127
128#define atomic64_and(i,v)	(void)ia64_atomic64_and(i,v)
129#define atomic64_or(i,v)	(void)ia64_atomic64_or(i,v)
130#define atomic64_xor(i,v)	(void)ia64_atomic64_xor(i,v)
131
132#undef ATOMIC64_OP
 
 
 
 
 
 
133
134#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
136
137#define atomic64_cmpxchg(v, old, new) \
138	(cmpxchg(&((v)->counter), old, new))
139#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
140
141static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
142{
143	int c, old;
144	c = atomic_read(v);
145	for (;;) {
146		if (unlikely(c == (u)))
147			break;
148		old = atomic_cmpxchg((v), c, c + (a));
149		if (likely(old == c))
150			break;
151		c = old;
152	}
153	return c;
154}
155
156
157static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
158{
159	long c, old;
160	c = atomic64_read(v);
161	for (;;) {
162		if (unlikely(c == (u)))
163			break;
164		old = atomic64_cmpxchg((v), c, c + (a));
165		if (likely(old == c))
166			break;
167		c = old;
168	}
169	return c != (u);
170}
171
172#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174/*
175 * Atomically add I to V and return TRUE if the resulting value is
176 * negative.
177 */
178static __inline__ int
179atomic_add_negative (int i, atomic_t *v)
180{
181	return atomic_add_return(i, v) < 0;
182}
183
184static __inline__ long
185atomic64_add_negative (__s64 i, atomic64_t *v)
186{
187	return atomic64_add_return(i, v) < 0;
188}
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190#define atomic_dec_return(v)		atomic_sub_return(1, (v))
191#define atomic_inc_return(v)		atomic_add_return(1, (v))
192#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v)			(void)atomic_add_return((i), (v))
203#define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
204#define atomic_inc(v)			atomic_add(1, (v))
205#define atomic_dec(v)			atomic_sub(1, (v))
206
207#define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v)			atomic64_add(1, (v))
210#define atomic64_dec(v)			atomic64_sub(1, (v))
 
 
 
 
 
 
211
212#endif /* _ASM_IA64_ATOMIC_H */
v3.1
  1#ifndef _ASM_IA64_ATOMIC_H
  2#define _ASM_IA64_ATOMIC_H
  3
  4/*
  5 * Atomic operations that C can't guarantee us.  Useful for
  6 * resource counting etc..
  7 *
  8 * NOTE: don't mess with the types below!  The "unsigned long" and
  9 * "int" types were carefully placed so as to ensure proper operation
 10 * of the macros.
 11 *
 12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
 13 *	David Mosberger-Tang <davidm@hpl.hp.com>
 14 */
 15#include <linux/types.h>
 16
 17#include <asm/intrinsics.h>
 18#include <asm/system.h>
 19
 20
 21#define ATOMIC_INIT(i)		((atomic_t) { (i) })
 22#define ATOMIC64_INIT(i)	((atomic64_t) { (i) })
 23
 24#define atomic_read(v)		(*(volatile int *)&(v)->counter)
 25#define atomic64_read(v)	(*(volatile long *)&(v)->counter)
 26
 27#define atomic_set(v,i)		(((v)->counter) = (i))
 28#define atomic64_set(v,i)	(((v)->counter) = (i))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 29
 30static __inline__ int
 31ia64_atomic_add (int i, atomic_t *v)
 32{
 33	__s32 old, new;
 34	CMPXCHG_BUGCHECK_DECL
 35
 36	do {
 37		CMPXCHG_BUGCHECK(v);
 38		old = atomic_read(v);
 39		new = old + i;
 40	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 41	return new;
 42}
 
 
 
 
 43
 44static __inline__ long
 45ia64_atomic64_add (__s64 i, atomic64_t *v)
 46{
 47	__s64 old, new;
 48	CMPXCHG_BUGCHECK_DECL
 
 
 
 
 
 
 49
 50	do {
 51		CMPXCHG_BUGCHECK(v);
 52		old = atomic64_read(v);
 53		new = old + i;
 54	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 55	return new;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56}
 57
 58static __inline__ int
 59ia64_atomic_sub (int i, atomic_t *v)
 60{
 61	__s32 old, new;
 62	CMPXCHG_BUGCHECK_DECL
 
 
 
 
 
 
 
 
 
 63
 64	do {
 65		CMPXCHG_BUGCHECK(v);
 66		old = atomic_read(v);
 67		new = old - i;
 68	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
 69	return new;
 70}
 
 
 
 
 71
 72static __inline__ long
 73ia64_atomic64_sub (__s64 i, atomic64_t *v)
 74{
 75	__s64 old, new;
 76	CMPXCHG_BUGCHECK_DECL
 
 
 77
 78	do {
 79		CMPXCHG_BUGCHECK(v);
 80		old = atomic64_read(v);
 81		new = old - i;
 82	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
 83	return new;
 84}
 85
 86#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 87#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 88
 89#define atomic64_cmpxchg(v, old, new) \
 90	(cmpxchg(&((v)->counter), old, new))
 91#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 92
 93static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
 94{
 95	int c, old;
 96	c = atomic_read(v);
 97	for (;;) {
 98		if (unlikely(c == (u)))
 99			break;
100		old = atomic_cmpxchg((v), c, c + (a));
101		if (likely(old == c))
102			break;
103		c = old;
104	}
105	return c;
106}
107
108
109static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
110{
111	long c, old;
112	c = atomic64_read(v);
113	for (;;) {
114		if (unlikely(c == (u)))
115			break;
116		old = atomic64_cmpxchg((v), c, c + (a));
117		if (likely(old == c))
118			break;
119		c = old;
120	}
121	return c != (u);
122}
123
124#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
125
126#define atomic_add_return(i,v)						\
127({									\
128	int __ia64_aar_i = (i);						\
129	(__builtin_constant_p(i)					\
130	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
131	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
132	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
133	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
134		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
135		: ia64_atomic_add(__ia64_aar_i, v);			\
136})
137
138#define atomic64_add_return(i,v)					\
139({									\
140	long __ia64_aar_i = (i);					\
141	(__builtin_constant_p(i)					\
142	 && (   (__ia64_aar_i ==  1) || (__ia64_aar_i ==   4)		\
143	     || (__ia64_aar_i ==  8) || (__ia64_aar_i ==  16)		\
144	     || (__ia64_aar_i == -1) || (__ia64_aar_i ==  -4)		\
145	     || (__ia64_aar_i == -8) || (__ia64_aar_i == -16)))		\
146		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
147		: ia64_atomic64_add(__ia64_aar_i, v);			\
148})
149
150/*
151 * Atomically add I to V and return TRUE if the resulting value is
152 * negative.
153 */
154static __inline__ int
155atomic_add_negative (int i, atomic_t *v)
156{
157	return atomic_add_return(i, v) < 0;
158}
159
160static __inline__ long
161atomic64_add_negative (__s64 i, atomic64_t *v)
162{
163	return atomic64_add_return(i, v) < 0;
164}
165
166#define atomic_sub_return(i,v)						\
167({									\
168	int __ia64_asr_i = (i);						\
169	(__builtin_constant_p(i)					\
170	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
171	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
172	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
173	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
174		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175		: ia64_atomic_sub(__ia64_asr_i, v);			\
176})
177
178#define atomic64_sub_return(i,v)					\
179({									\
180	long __ia64_asr_i = (i);					\
181	(__builtin_constant_p(i)					\
182	 && (   (__ia64_asr_i ==   1) || (__ia64_asr_i ==   4)		\
183	     || (__ia64_asr_i ==   8) || (__ia64_asr_i ==  16)		\
184	     || (__ia64_asr_i ==  -1) || (__ia64_asr_i ==  -4)		\
185	     || (__ia64_asr_i ==  -8) || (__ia64_asr_i == -16)))	\
186		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
187		: ia64_atomic64_sub(__ia64_asr_i, v);			\
188})
189
190#define atomic_dec_return(v)		atomic_sub_return(1, (v))
191#define atomic_inc_return(v)		atomic_add_return(1, (v))
192#define atomic64_dec_return(v)		atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v)		atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v)	(atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v)		(atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v)		(atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v)	(atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v)	(atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v)	(atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v)			atomic_add_return((i), (v))
203#define atomic_sub(i,v)			atomic_sub_return((i), (v))
204#define atomic_inc(v)			atomic_add(1, (v))
205#define atomic_dec(v)			atomic_sub(1, (v))
206
207#define atomic64_add(i,v)		atomic64_add_return((i), (v))
208#define atomic64_sub(i,v)		atomic64_sub_return((i), (v))
209#define atomic64_inc(v)			atomic64_add(1, (v))
210#define atomic64_dec(v)			atomic64_sub(1, (v))
211
212/* Atomic operations are already serializing */
213#define smp_mb__before_atomic_dec()	barrier()
214#define smp_mb__after_atomic_dec()	barrier()
215#define smp_mb__before_atomic_inc()	barrier()
216#define smp_mb__after_atomic_inc()	barrier()
217
218#endif /* _ASM_IA64_ATOMIC_H */