Loading...
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18
19
20#define ATOMIC_INIT(i) { (i) }
21#define ATOMIC64_INIT(i) { (i) }
22
23#define atomic_read(v) (*(volatile int *)&(v)->counter)
24#define atomic64_read(v) (*(volatile long *)&(v)->counter)
25
26#define atomic_set(v,i) (((v)->counter) = (i))
27#define atomic64_set(v,i) (((v)->counter) = (i))
28
29static __inline__ int
30ia64_atomic_add (int i, atomic_t *v)
31{
32 __s32 old, new;
33 CMPXCHG_BUGCHECK_DECL
34
35 do {
36 CMPXCHG_BUGCHECK(v);
37 old = atomic_read(v);
38 new = old + i;
39 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
40 return new;
41}
42
43static __inline__ long
44ia64_atomic64_add (__s64 i, atomic64_t *v)
45{
46 __s64 old, new;
47 CMPXCHG_BUGCHECK_DECL
48
49 do {
50 CMPXCHG_BUGCHECK(v);
51 old = atomic64_read(v);
52 new = old + i;
53 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
54 return new;
55}
56
57static __inline__ int
58ia64_atomic_sub (int i, atomic_t *v)
59{
60 __s32 old, new;
61 CMPXCHG_BUGCHECK_DECL
62
63 do {
64 CMPXCHG_BUGCHECK(v);
65 old = atomic_read(v);
66 new = old - i;
67 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old);
68 return new;
69}
70
71static __inline__ long
72ia64_atomic64_sub (__s64 i, atomic64_t *v)
73{
74 __s64 old, new;
75 CMPXCHG_BUGCHECK_DECL
76
77 do {
78 CMPXCHG_BUGCHECK(v);
79 old = atomic64_read(v);
80 new = old - i;
81 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old);
82 return new;
83}
84
85#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88#define atomic64_cmpxchg(v, old, new) \
89 (cmpxchg(&((v)->counter), old, new))
90#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
91
92static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
93{
94 int c, old;
95 c = atomic_read(v);
96 for (;;) {
97 if (unlikely(c == (u)))
98 break;
99 old = atomic_cmpxchg((v), c, c + (a));
100 if (likely(old == c))
101 break;
102 c = old;
103 }
104 return c;
105}
106
107
108static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
109{
110 long c, old;
111 c = atomic64_read(v);
112 for (;;) {
113 if (unlikely(c == (u)))
114 break;
115 old = atomic64_cmpxchg((v), c, c + (a));
116 if (likely(old == c))
117 break;
118 c = old;
119 }
120 return c != (u);
121}
122
123#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
124
125#define atomic_add_return(i,v) \
126({ \
127 int __ia64_aar_i = (i); \
128 (__builtin_constant_p(i) \
129 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
130 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
131 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
132 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
133 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
134 : ia64_atomic_add(__ia64_aar_i, v); \
135})
136
137#define atomic64_add_return(i,v) \
138({ \
139 long __ia64_aar_i = (i); \
140 (__builtin_constant_p(i) \
141 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
142 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
143 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
144 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
145 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
146 : ia64_atomic64_add(__ia64_aar_i, v); \
147})
148
149/*
150 * Atomically add I to V and return TRUE if the resulting value is
151 * negative.
152 */
153static __inline__ int
154atomic_add_negative (int i, atomic_t *v)
155{
156 return atomic_add_return(i, v) < 0;
157}
158
159static __inline__ long
160atomic64_add_negative (__s64 i, atomic64_t *v)
161{
162 return atomic64_add_return(i, v) < 0;
163}
164
165#define atomic_sub_return(i,v) \
166({ \
167 int __ia64_asr_i = (i); \
168 (__builtin_constant_p(i) \
169 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
170 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
171 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
172 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
173 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
174 : ia64_atomic_sub(__ia64_asr_i, v); \
175})
176
177#define atomic64_sub_return(i,v) \
178({ \
179 long __ia64_asr_i = (i); \
180 (__builtin_constant_p(i) \
181 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
182 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
183 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
184 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
185 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
186 : ia64_atomic64_sub(__ia64_asr_i, v); \
187})
188
189#define atomic_dec_return(v) atomic_sub_return(1, (v))
190#define atomic_inc_return(v) atomic_add_return(1, (v))
191#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
192#define atomic64_inc_return(v) atomic64_add_return(1, (v))
193
194#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
195#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
196#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
197#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
198#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
199#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
200
201#define atomic_add(i,v) atomic_add_return((i), (v))
202#define atomic_sub(i,v) atomic_sub_return((i), (v))
203#define atomic_inc(v) atomic_add(1, (v))
204#define atomic_dec(v) atomic_sub(1, (v))
205
206#define atomic64_add(i,v) atomic64_add_return((i), (v))
207#define atomic64_sub(i,v) atomic64_sub_return((i), (v))
208#define atomic64_inc(v) atomic64_add(1, (v))
209#define atomic64_dec(v) atomic64_sub(1, (v))
210
211/* Atomic operations are already serializing */
212#define smp_mb__before_atomic_dec() barrier()
213#define smp_mb__after_atomic_dec() barrier()
214#define smp_mb__before_atomic_inc() barrier()
215#define smp_mb__after_atomic_inc() barrier()
216
217#endif /* _ASM_IA64_ATOMIC_H */
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
19
20
21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) }
23
24#define atomic_read(v) READ_ONCE((v)->counter)
25#define atomic64_read(v) READ_ONCE((v)->counter)
26
27#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30#define ATOMIC_OP(op, c_op) \
31static __inline__ int \
32ia64_atomic_##op (int i, atomic_t *v) \
33{ \
34 __s32 old, new; \
35 CMPXCHG_BUGCHECK_DECL \
36 \
37 do { \
38 CMPXCHG_BUGCHECK(v); \
39 old = atomic_read(v); \
40 new = old c_op i; \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
43}
44
45#define ATOMIC_FETCH_OP(op, c_op) \
46static __inline__ int \
47ia64_atomic_fetch_##op (int i, atomic_t *v) \
48{ \
49 __s32 old, new; \
50 CMPXCHG_BUGCHECK_DECL \
51 \
52 do { \
53 CMPXCHG_BUGCHECK(v); \
54 old = atomic_read(v); \
55 new = old c_op i; \
56 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
57 return old; \
58}
59
60#define ATOMIC_OPS(op, c_op) \
61 ATOMIC_OP(op, c_op) \
62 ATOMIC_FETCH_OP(op, c_op)
63
64ATOMIC_OPS(add, +)
65ATOMIC_OPS(sub, -)
66
67#define atomic_add_return(i,v) \
68({ \
69 int __ia64_aar_i = (i); \
70 (__builtin_constant_p(i) \
71 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
72 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
73 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
74 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
75 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
76 : ia64_atomic_add(__ia64_aar_i, v); \
77})
78
79#define atomic_sub_return(i,v) \
80({ \
81 int __ia64_asr_i = (i); \
82 (__builtin_constant_p(i) \
83 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
84 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
85 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
86 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
87 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
88 : ia64_atomic_sub(__ia64_asr_i, v); \
89})
90
91#define atomic_fetch_add(i,v) \
92({ \
93 int __ia64_aar_i = (i); \
94 (__builtin_constant_p(i) \
95 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
96 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
97 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
98 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
99 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
100 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
101})
102
103#define atomic_fetch_sub(i,v) \
104({ \
105 int __ia64_asr_i = (i); \
106 (__builtin_constant_p(i) \
107 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
108 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
109 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
110 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
111 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
112 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
113})
114
115ATOMIC_FETCH_OP(and, &)
116ATOMIC_FETCH_OP(or, |)
117ATOMIC_FETCH_OP(xor, ^)
118
119#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
120#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
121#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
122
123#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
124#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
125#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
126
127#undef ATOMIC_OPS
128#undef ATOMIC_FETCH_OP
129#undef ATOMIC_OP
130
131#define ATOMIC64_OP(op, c_op) \
132static __inline__ long \
133ia64_atomic64_##op (__s64 i, atomic64_t *v) \
134{ \
135 __s64 old, new; \
136 CMPXCHG_BUGCHECK_DECL \
137 \
138 do { \
139 CMPXCHG_BUGCHECK(v); \
140 old = atomic64_read(v); \
141 new = old c_op i; \
142 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
143 return new; \
144}
145
146#define ATOMIC64_FETCH_OP(op, c_op) \
147static __inline__ long \
148ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
149{ \
150 __s64 old, new; \
151 CMPXCHG_BUGCHECK_DECL \
152 \
153 do { \
154 CMPXCHG_BUGCHECK(v); \
155 old = atomic64_read(v); \
156 new = old c_op i; \
157 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
158 return old; \
159}
160
161#define ATOMIC64_OPS(op, c_op) \
162 ATOMIC64_OP(op, c_op) \
163 ATOMIC64_FETCH_OP(op, c_op)
164
165ATOMIC64_OPS(add, +)
166ATOMIC64_OPS(sub, -)
167
168#define atomic64_add_return(i,v) \
169({ \
170 long __ia64_aar_i = (i); \
171 (__builtin_constant_p(i) \
172 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
173 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
174 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
175 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
176 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
177 : ia64_atomic64_add(__ia64_aar_i, v); \
178})
179
180#define atomic64_sub_return(i,v) \
181({ \
182 long __ia64_asr_i = (i); \
183 (__builtin_constant_p(i) \
184 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
185 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
186 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
187 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
188 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
189 : ia64_atomic64_sub(__ia64_asr_i, v); \
190})
191
192#define atomic64_fetch_add(i,v) \
193({ \
194 long __ia64_aar_i = (i); \
195 (__builtin_constant_p(i) \
196 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
197 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
198 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
199 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
200 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
201 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
202})
203
204#define atomic64_fetch_sub(i,v) \
205({ \
206 long __ia64_asr_i = (i); \
207 (__builtin_constant_p(i) \
208 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
209 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
210 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
211 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
212 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
213 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
214})
215
216ATOMIC64_FETCH_OP(and, &)
217ATOMIC64_FETCH_OP(or, |)
218ATOMIC64_FETCH_OP(xor, ^)
219
220#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
221#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
222#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
223
224#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
225#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
226#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
227
228#undef ATOMIC64_OPS
229#undef ATOMIC64_FETCH_OP
230#undef ATOMIC64_OP
231
232#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
233#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
234
235#define atomic64_cmpxchg(v, old, new) \
236 (cmpxchg(&((v)->counter), old, new))
237#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
238
239static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
240{
241 int c, old;
242 c = atomic_read(v);
243 for (;;) {
244 if (unlikely(c == (u)))
245 break;
246 old = atomic_cmpxchg((v), c, c + (a));
247 if (likely(old == c))
248 break;
249 c = old;
250 }
251 return c;
252}
253
254
255static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
256{
257 long c, old;
258 c = atomic64_read(v);
259 for (;;) {
260 if (unlikely(c == (u)))
261 break;
262 old = atomic64_cmpxchg((v), c, c + (a));
263 if (likely(old == c))
264 break;
265 c = old;
266 }
267 return c != (u);
268}
269
270#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
271
272static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
273{
274 long c, old, dec;
275 c = atomic64_read(v);
276 for (;;) {
277 dec = c - 1;
278 if (unlikely(dec < 0))
279 break;
280 old = atomic64_cmpxchg((v), c, dec);
281 if (likely(old == c))
282 break;
283 c = old;
284 }
285 return dec;
286}
287
288/*
289 * Atomically add I to V and return TRUE if the resulting value is
290 * negative.
291 */
292static __inline__ int
293atomic_add_negative (int i, atomic_t *v)
294{
295 return atomic_add_return(i, v) < 0;
296}
297
298static __inline__ long
299atomic64_add_negative (__s64 i, atomic64_t *v)
300{
301 return atomic64_add_return(i, v) < 0;
302}
303
304#define atomic_dec_return(v) atomic_sub_return(1, (v))
305#define atomic_inc_return(v) atomic_add_return(1, (v))
306#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
307#define atomic64_inc_return(v) atomic64_add_return(1, (v))
308
309#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
310#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
311#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
312#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
313#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
314#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
315
316#define atomic_add(i,v) (void)atomic_add_return((i), (v))
317#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
318#define atomic_inc(v) atomic_add(1, (v))
319#define atomic_dec(v) atomic_sub(1, (v))
320
321#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
322#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
323#define atomic64_inc(v) atomic64_add(1, (v))
324#define atomic64_dec(v) atomic64_sub(1, (v))
325
326#endif /* _ASM_IA64_ATOMIC_H */