Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_ATOMIC_H
3#define _ASM_IA64_ATOMIC_H
4
5/*
6 * Atomic operations that C can't guarantee us. Useful for
7 * resource counting etc..
8 *
9 * NOTE: don't mess with the types below! The "unsigned long" and
10 * "int" types were carefully placed so as to ensure proper operation
11 * of the macros.
12 *
13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14 * David Mosberger-Tang <davidm@hpl.hp.com>
15 */
16#include <linux/types.h>
17
18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
20
21
22#define ATOMIC64_INIT(i) { (i) }
23
24#define atomic_read(v) READ_ONCE((v)->counter)
25#define atomic64_read(v) READ_ONCE((v)->counter)
26
27#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30#define ATOMIC_OP(op, c_op) \
31static __inline__ int \
32ia64_atomic_##op (int i, atomic_t *v) \
33{ \
34 __s32 old, new; \
35 CMPXCHG_BUGCHECK_DECL \
36 \
37 do { \
38 CMPXCHG_BUGCHECK(v); \
39 old = atomic_read(v); \
40 new = old c_op i; \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
43}
44
45#define ATOMIC_FETCH_OP(op, c_op) \
46static __inline__ int \
47ia64_atomic_fetch_##op (int i, atomic_t *v) \
48{ \
49 __s32 old, new; \
50 CMPXCHG_BUGCHECK_DECL \
51 \
52 do { \
53 CMPXCHG_BUGCHECK(v); \
54 old = atomic_read(v); \
55 new = old c_op i; \
56 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
57 return old; \
58}
59
60#define ATOMIC_OPS(op, c_op) \
61 ATOMIC_OP(op, c_op) \
62 ATOMIC_FETCH_OP(op, c_op)
63
64ATOMIC_OPS(add, +)
65ATOMIC_OPS(sub, -)
66
67#ifdef __OPTIMIZE__
68#define __ia64_atomic_const(i) \
69 static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
70 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
71 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
72 __ia64_atomic_p
73#else
74#define __ia64_atomic_const(i) 0
75#endif
76
77#define atomic_add_return(i,v) \
78({ \
79 int __ia64_aar_i = (i); \
80 __ia64_atomic_const(i) \
81 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
82 : ia64_atomic_add(__ia64_aar_i, v); \
83})
84
85#define atomic_sub_return(i,v) \
86({ \
87 int __ia64_asr_i = (i); \
88 __ia64_atomic_const(i) \
89 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
90 : ia64_atomic_sub(__ia64_asr_i, v); \
91})
92
93#define atomic_fetch_add(i,v) \
94({ \
95 int __ia64_aar_i = (i); \
96 __ia64_atomic_const(i) \
97 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
98 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
99})
100
101#define atomic_fetch_sub(i,v) \
102({ \
103 int __ia64_asr_i = (i); \
104 __ia64_atomic_const(i) \
105 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
106 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
107})
108
109ATOMIC_FETCH_OP(and, &)
110ATOMIC_FETCH_OP(or, |)
111ATOMIC_FETCH_OP(xor, ^)
112
113#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
114#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
115#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
116
117#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
118#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
119#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
120
121#undef ATOMIC_OPS
122#undef ATOMIC_FETCH_OP
123#undef ATOMIC_OP
124
125#define ATOMIC64_OP(op, c_op) \
126static __inline__ s64 \
127ia64_atomic64_##op (s64 i, atomic64_t *v) \
128{ \
129 s64 old, new; \
130 CMPXCHG_BUGCHECK_DECL \
131 \
132 do { \
133 CMPXCHG_BUGCHECK(v); \
134 old = atomic64_read(v); \
135 new = old c_op i; \
136 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
137 return new; \
138}
139
140#define ATOMIC64_FETCH_OP(op, c_op) \
141static __inline__ s64 \
142ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
143{ \
144 s64 old, new; \
145 CMPXCHG_BUGCHECK_DECL \
146 \
147 do { \
148 CMPXCHG_BUGCHECK(v); \
149 old = atomic64_read(v); \
150 new = old c_op i; \
151 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
152 return old; \
153}
154
155#define ATOMIC64_OPS(op, c_op) \
156 ATOMIC64_OP(op, c_op) \
157 ATOMIC64_FETCH_OP(op, c_op)
158
159ATOMIC64_OPS(add, +)
160ATOMIC64_OPS(sub, -)
161
162#define atomic64_add_return(i,v) \
163({ \
164 s64 __ia64_aar_i = (i); \
165 __ia64_atomic_const(i) \
166 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
167 : ia64_atomic64_add(__ia64_aar_i, v); \
168})
169
170#define atomic64_sub_return(i,v) \
171({ \
172 s64 __ia64_asr_i = (i); \
173 __ia64_atomic_const(i) \
174 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
175 : ia64_atomic64_sub(__ia64_asr_i, v); \
176})
177
178#define atomic64_fetch_add(i,v) \
179({ \
180 s64 __ia64_aar_i = (i); \
181 __ia64_atomic_const(i) \
182 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
183 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
184})
185
186#define atomic64_fetch_sub(i,v) \
187({ \
188 s64 __ia64_asr_i = (i); \
189 __ia64_atomic_const(i) \
190 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
191 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
192})
193
194ATOMIC64_FETCH_OP(and, &)
195ATOMIC64_FETCH_OP(or, |)
196ATOMIC64_FETCH_OP(xor, ^)
197
198#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
199#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
200#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
201
202#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
203#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
204#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
205
206#undef ATOMIC64_OPS
207#undef ATOMIC64_FETCH_OP
208#undef ATOMIC64_OP
209
210#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212
213#define atomic64_cmpxchg(v, old, new) \
214 (cmpxchg(&((v)->counter), old, new))
215#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
216
217#define atomic_add(i,v) (void)atomic_add_return((i), (v))
218#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
219
220#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
221#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
222
223#endif /* _ASM_IA64_ATOMIC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_ATOMIC_H
3#define _ASM_IA64_ATOMIC_H
4
5/*
6 * Atomic operations that C can't guarantee us. Useful for
7 * resource counting etc..
8 *
9 * NOTE: don't mess with the types below! The "unsigned long" and
10 * "int" types were carefully placed so as to ensure proper operation
11 * of the macros.
12 *
13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14 * David Mosberger-Tang <davidm@hpl.hp.com>
15 */
16#include <linux/types.h>
17
18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
20
21
22#define ATOMIC_INIT(i) { (i) }
23#define ATOMIC64_INIT(i) { (i) }
24
25#define atomic_read(v) READ_ONCE((v)->counter)
26#define atomic64_read(v) READ_ONCE((v)->counter)
27
28#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
30
31#define ATOMIC_OP(op, c_op) \
32static __inline__ int \
33ia64_atomic_##op (int i, atomic_t *v) \
34{ \
35 __s32 old, new; \
36 CMPXCHG_BUGCHECK_DECL \
37 \
38 do { \
39 CMPXCHG_BUGCHECK(v); \
40 old = atomic_read(v); \
41 new = old c_op i; \
42 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
43 return new; \
44}
45
46#define ATOMIC_FETCH_OP(op, c_op) \
47static __inline__ int \
48ia64_atomic_fetch_##op (int i, atomic_t *v) \
49{ \
50 __s32 old, new; \
51 CMPXCHG_BUGCHECK_DECL \
52 \
53 do { \
54 CMPXCHG_BUGCHECK(v); \
55 old = atomic_read(v); \
56 new = old c_op i; \
57 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
58 return old; \
59}
60
61#define ATOMIC_OPS(op, c_op) \
62 ATOMIC_OP(op, c_op) \
63 ATOMIC_FETCH_OP(op, c_op)
64
65ATOMIC_OPS(add, +)
66ATOMIC_OPS(sub, -)
67
68#ifdef __OPTIMIZE__
69#define __ia64_atomic_const(i) \
70 static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
71 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
72 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
73 __ia64_atomic_p
74#else
75#define __ia64_atomic_const(i) 0
76#endif
77
78#define atomic_add_return(i,v) \
79({ \
80 int __ia64_aar_i = (i); \
81 __ia64_atomic_const(i) \
82 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
83 : ia64_atomic_add(__ia64_aar_i, v); \
84})
85
86#define atomic_sub_return(i,v) \
87({ \
88 int __ia64_asr_i = (i); \
89 __ia64_atomic_const(i) \
90 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
91 : ia64_atomic_sub(__ia64_asr_i, v); \
92})
93
94#define atomic_fetch_add(i,v) \
95({ \
96 int __ia64_aar_i = (i); \
97 __ia64_atomic_const(i) \
98 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
99 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
100})
101
102#define atomic_fetch_sub(i,v) \
103({ \
104 int __ia64_asr_i = (i); \
105 __ia64_atomic_const(i) \
106 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
107 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
108})
109
110ATOMIC_FETCH_OP(and, &)
111ATOMIC_FETCH_OP(or, |)
112ATOMIC_FETCH_OP(xor, ^)
113
114#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
115#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
116#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
117
118#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
119#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
120#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
121
122#undef ATOMIC_OPS
123#undef ATOMIC_FETCH_OP
124#undef ATOMIC_OP
125
126#define ATOMIC64_OP(op, c_op) \
127static __inline__ long \
128ia64_atomic64_##op (__s64 i, atomic64_t *v) \
129{ \
130 __s64 old, new; \
131 CMPXCHG_BUGCHECK_DECL \
132 \
133 do { \
134 CMPXCHG_BUGCHECK(v); \
135 old = atomic64_read(v); \
136 new = old c_op i; \
137 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
138 return new; \
139}
140
141#define ATOMIC64_FETCH_OP(op, c_op) \
142static __inline__ long \
143ia64_atomic64_fetch_##op (__s64 i, atomic64_t *v) \
144{ \
145 __s64 old, new; \
146 CMPXCHG_BUGCHECK_DECL \
147 \
148 do { \
149 CMPXCHG_BUGCHECK(v); \
150 old = atomic64_read(v); \
151 new = old c_op i; \
152 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
153 return old; \
154}
155
156#define ATOMIC64_OPS(op, c_op) \
157 ATOMIC64_OP(op, c_op) \
158 ATOMIC64_FETCH_OP(op, c_op)
159
160ATOMIC64_OPS(add, +)
161ATOMIC64_OPS(sub, -)
162
163#define atomic64_add_return(i,v) \
164({ \
165 long __ia64_aar_i = (i); \
166 __ia64_atomic_const(i) \
167 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
168 : ia64_atomic64_add(__ia64_aar_i, v); \
169})
170
171#define atomic64_sub_return(i,v) \
172({ \
173 long __ia64_asr_i = (i); \
174 __ia64_atomic_const(i) \
175 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
176 : ia64_atomic64_sub(__ia64_asr_i, v); \
177})
178
179#define atomic64_fetch_add(i,v) \
180({ \
181 long __ia64_aar_i = (i); \
182 __ia64_atomic_const(i) \
183 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
184 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
185})
186
187#define atomic64_fetch_sub(i,v) \
188({ \
189 long __ia64_asr_i = (i); \
190 __ia64_atomic_const(i) \
191 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
192 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
193})
194
195ATOMIC64_FETCH_OP(and, &)
196ATOMIC64_FETCH_OP(or, |)
197ATOMIC64_FETCH_OP(xor, ^)
198
199#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
200#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
201#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
202
203#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
204#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
205#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
206
207#undef ATOMIC64_OPS
208#undef ATOMIC64_FETCH_OP
209#undef ATOMIC64_OP
210
211#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
212#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213
214#define atomic64_cmpxchg(v, old, new) \
215 (cmpxchg(&((v)->counter), old, new))
216#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
217
218static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
219{
220 int c, old;
221 c = atomic_read(v);
222 for (;;) {
223 if (unlikely(c == (u)))
224 break;
225 old = atomic_cmpxchg((v), c, c + (a));
226 if (likely(old == c))
227 break;
228 c = old;
229 }
230 return c;
231}
232
233
234static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
235{
236 long c, old;
237 c = atomic64_read(v);
238 for (;;) {
239 if (unlikely(c == (u)))
240 break;
241 old = atomic64_cmpxchg((v), c, c + (a));
242 if (likely(old == c))
243 break;
244 c = old;
245 }
246 return c != (u);
247}
248
249#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
250
251static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
252{
253 long c, old, dec;
254 c = atomic64_read(v);
255 for (;;) {
256 dec = c - 1;
257 if (unlikely(dec < 0))
258 break;
259 old = atomic64_cmpxchg((v), c, dec);
260 if (likely(old == c))
261 break;
262 c = old;
263 }
264 return dec;
265}
266
267/*
268 * Atomically add I to V and return TRUE if the resulting value is
269 * negative.
270 */
271static __inline__ int
272atomic_add_negative (int i, atomic_t *v)
273{
274 return atomic_add_return(i, v) < 0;
275}
276
277static __inline__ long
278atomic64_add_negative (__s64 i, atomic64_t *v)
279{
280 return atomic64_add_return(i, v) < 0;
281}
282
283#define atomic_dec_return(v) atomic_sub_return(1, (v))
284#define atomic_inc_return(v) atomic_add_return(1, (v))
285#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
286#define atomic64_inc_return(v) atomic64_add_return(1, (v))
287
288#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
289#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
290#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
291#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
292#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
293#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
294
295#define atomic_add(i,v) (void)atomic_add_return((i), (v))
296#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
297#define atomic_inc(v) atomic_add(1, (v))
298#define atomic_dec(v) atomic_sub(1, (v))
299
300#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
301#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
302#define atomic64_inc(v) atomic64_add(1, (v))
303#define atomic64_dec(v) atomic64_sub(1, (v))
304
305#endif /* _ASM_IA64_ATOMIC_H */