Loading...
1#ifndef _ASM_IA64_ATOMIC_H
2#define _ASM_IA64_ATOMIC_H
3
4/*
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
7 *
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
10 * of the macros.
11 *
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
13 * David Mosberger-Tang <davidm@hpl.hp.com>
14 */
15#include <linux/types.h>
16
17#include <asm/intrinsics.h>
18#include <asm/barrier.h>
19
20
21#define ATOMIC_INIT(i) { (i) }
22#define ATOMIC64_INIT(i) { (i) }
23
24#define atomic_read(v) READ_ONCE((v)->counter)
25#define atomic64_read(v) READ_ONCE((v)->counter)
26
27#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
29
30#define ATOMIC_OP(op, c_op) \
31static __inline__ int \
32ia64_atomic_##op (int i, atomic_t *v) \
33{ \
34 __s32 old, new; \
35 CMPXCHG_BUGCHECK_DECL \
36 \
37 do { \
38 CMPXCHG_BUGCHECK(v); \
39 old = atomic_read(v); \
40 new = old c_op i; \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42 return new; \
43}
44
45ATOMIC_OP(add, +)
46ATOMIC_OP(sub, -)
47
48#define atomic_add_return(i,v) \
49({ \
50 int __ia64_aar_i = (i); \
51 (__builtin_constant_p(i) \
52 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
53 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
54 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
55 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
56 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
57 : ia64_atomic_add(__ia64_aar_i, v); \
58})
59
60#define atomic_sub_return(i,v) \
61({ \
62 int __ia64_asr_i = (i); \
63 (__builtin_constant_p(i) \
64 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
65 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
66 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
67 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
68 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
69 : ia64_atomic_sub(__ia64_asr_i, v); \
70})
71
72ATOMIC_OP(and, &)
73ATOMIC_OP(or, |)
74ATOMIC_OP(xor, ^)
75
76#define atomic_and(i,v) (void)ia64_atomic_and(i,v)
77#define atomic_or(i,v) (void)ia64_atomic_or(i,v)
78#define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
79
80#undef ATOMIC_OP
81
82#define ATOMIC64_OP(op, c_op) \
83static __inline__ long \
84ia64_atomic64_##op (__s64 i, atomic64_t *v) \
85{ \
86 __s64 old, new; \
87 CMPXCHG_BUGCHECK_DECL \
88 \
89 do { \
90 CMPXCHG_BUGCHECK(v); \
91 old = atomic64_read(v); \
92 new = old c_op i; \
93 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
94 return new; \
95}
96
97ATOMIC64_OP(add, +)
98ATOMIC64_OP(sub, -)
99
100#define atomic64_add_return(i,v) \
101({ \
102 long __ia64_aar_i = (i); \
103 (__builtin_constant_p(i) \
104 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
105 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
106 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
107 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
108 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
109 : ia64_atomic64_add(__ia64_aar_i, v); \
110})
111
112#define atomic64_sub_return(i,v) \
113({ \
114 long __ia64_asr_i = (i); \
115 (__builtin_constant_p(i) \
116 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
117 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
118 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
119 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
120 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
121 : ia64_atomic64_sub(__ia64_asr_i, v); \
122})
123
124ATOMIC64_OP(and, &)
125ATOMIC64_OP(or, |)
126ATOMIC64_OP(xor, ^)
127
128#define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
129#define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
130#define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
131
132#undef ATOMIC64_OP
133
134#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
135#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
136
137#define atomic64_cmpxchg(v, old, new) \
138 (cmpxchg(&((v)->counter), old, new))
139#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
140
141static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
142{
143 int c, old;
144 c = atomic_read(v);
145 for (;;) {
146 if (unlikely(c == (u)))
147 break;
148 old = atomic_cmpxchg((v), c, c + (a));
149 if (likely(old == c))
150 break;
151 c = old;
152 }
153 return c;
154}
155
156
157static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
158{
159 long c, old;
160 c = atomic64_read(v);
161 for (;;) {
162 if (unlikely(c == (u)))
163 break;
164 old = atomic64_cmpxchg((v), c, c + (a));
165 if (likely(old == c))
166 break;
167 c = old;
168 }
169 return c != (u);
170}
171
172#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
173
174/*
175 * Atomically add I to V and return TRUE if the resulting value is
176 * negative.
177 */
178static __inline__ int
179atomic_add_negative (int i, atomic_t *v)
180{
181 return atomic_add_return(i, v) < 0;
182}
183
184static __inline__ long
185atomic64_add_negative (__s64 i, atomic64_t *v)
186{
187 return atomic64_add_return(i, v) < 0;
188}
189
190#define atomic_dec_return(v) atomic_sub_return(1, (v))
191#define atomic_inc_return(v) atomic_add_return(1, (v))
192#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
193#define atomic64_inc_return(v) atomic64_add_return(1, (v))
194
195#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
196#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
197#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
198#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
199#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
201
202#define atomic_add(i,v) (void)atomic_add_return((i), (v))
203#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204#define atomic_inc(v) atomic_add(1, (v))
205#define atomic_dec(v) atomic_sub(1, (v))
206
207#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209#define atomic64_inc(v) atomic64_add(1, (v))
210#define atomic64_dec(v) atomic64_sub(1, (v))
211
212#endif /* _ASM_IA64_ATOMIC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_IA64_ATOMIC_H
3#define _ASM_IA64_ATOMIC_H
4
5/*
6 * Atomic operations that C can't guarantee us. Useful for
7 * resource counting etc..
8 *
9 * NOTE: don't mess with the types below! The "unsigned long" and
10 * "int" types were carefully placed so as to ensure proper operation
11 * of the macros.
12 *
13 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14 * David Mosberger-Tang <davidm@hpl.hp.com>
15 */
16#include <linux/types.h>
17
18#include <asm/intrinsics.h>
19#include <asm/barrier.h>
20
21
22#define ATOMIC_INIT(i) { (i) }
23#define ATOMIC64_INIT(i) { (i) }
24
25#define atomic_read(v) READ_ONCE((v)->counter)
26#define atomic64_read(v) READ_ONCE((v)->counter)
27
28#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
29#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
30
31#define ATOMIC_OP(op, c_op) \
32static __inline__ int \
33ia64_atomic_##op (int i, atomic_t *v) \
34{ \
35 __s32 old, new; \
36 CMPXCHG_BUGCHECK_DECL \
37 \
38 do { \
39 CMPXCHG_BUGCHECK(v); \
40 old = atomic_read(v); \
41 new = old c_op i; \
42 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
43 return new; \
44}
45
46#define ATOMIC_FETCH_OP(op, c_op) \
47static __inline__ int \
48ia64_atomic_fetch_##op (int i, atomic_t *v) \
49{ \
50 __s32 old, new; \
51 CMPXCHG_BUGCHECK_DECL \
52 \
53 do { \
54 CMPXCHG_BUGCHECK(v); \
55 old = atomic_read(v); \
56 new = old c_op i; \
57 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
58 return old; \
59}
60
61#define ATOMIC_OPS(op, c_op) \
62 ATOMIC_OP(op, c_op) \
63 ATOMIC_FETCH_OP(op, c_op)
64
65ATOMIC_OPS(add, +)
66ATOMIC_OPS(sub, -)
67
68#ifdef __OPTIMIZE__
69#define __ia64_atomic_const(i) \
70 static const int __ia64_atomic_p = __builtin_constant_p(i) ? \
71 ((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 || \
72 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
73 __ia64_atomic_p
74#else
75#define __ia64_atomic_const(i) 0
76#endif
77
78#define atomic_add_return(i,v) \
79({ \
80 int __ia64_aar_i = (i); \
81 __ia64_atomic_const(i) \
82 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
83 : ia64_atomic_add(__ia64_aar_i, v); \
84})
85
86#define atomic_sub_return(i,v) \
87({ \
88 int __ia64_asr_i = (i); \
89 __ia64_atomic_const(i) \
90 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
91 : ia64_atomic_sub(__ia64_asr_i, v); \
92})
93
94#define atomic_fetch_add(i,v) \
95({ \
96 int __ia64_aar_i = (i); \
97 __ia64_atomic_const(i) \
98 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
99 : ia64_atomic_fetch_add(__ia64_aar_i, v); \
100})
101
102#define atomic_fetch_sub(i,v) \
103({ \
104 int __ia64_asr_i = (i); \
105 __ia64_atomic_const(i) \
106 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
107 : ia64_atomic_fetch_sub(__ia64_asr_i, v); \
108})
109
110ATOMIC_FETCH_OP(and, &)
111ATOMIC_FETCH_OP(or, |)
112ATOMIC_FETCH_OP(xor, ^)
113
114#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
115#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
116#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
117
118#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
119#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
120#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
121
122#undef ATOMIC_OPS
123#undef ATOMIC_FETCH_OP
124#undef ATOMIC_OP
125
126#define ATOMIC64_OP(op, c_op) \
127static __inline__ s64 \
128ia64_atomic64_##op (s64 i, atomic64_t *v) \
129{ \
130 s64 old, new; \
131 CMPXCHG_BUGCHECK_DECL \
132 \
133 do { \
134 CMPXCHG_BUGCHECK(v); \
135 old = atomic64_read(v); \
136 new = old c_op i; \
137 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
138 return new; \
139}
140
141#define ATOMIC64_FETCH_OP(op, c_op) \
142static __inline__ s64 \
143ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
144{ \
145 s64 old, new; \
146 CMPXCHG_BUGCHECK_DECL \
147 \
148 do { \
149 CMPXCHG_BUGCHECK(v); \
150 old = atomic64_read(v); \
151 new = old c_op i; \
152 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
153 return old; \
154}
155
156#define ATOMIC64_OPS(op, c_op) \
157 ATOMIC64_OP(op, c_op) \
158 ATOMIC64_FETCH_OP(op, c_op)
159
160ATOMIC64_OPS(add, +)
161ATOMIC64_OPS(sub, -)
162
163#define atomic64_add_return(i,v) \
164({ \
165 s64 __ia64_aar_i = (i); \
166 __ia64_atomic_const(i) \
167 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
168 : ia64_atomic64_add(__ia64_aar_i, v); \
169})
170
171#define atomic64_sub_return(i,v) \
172({ \
173 s64 __ia64_asr_i = (i); \
174 __ia64_atomic_const(i) \
175 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
176 : ia64_atomic64_sub(__ia64_asr_i, v); \
177})
178
179#define atomic64_fetch_add(i,v) \
180({ \
181 s64 __ia64_aar_i = (i); \
182 __ia64_atomic_const(i) \
183 ? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq) \
184 : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
185})
186
187#define atomic64_fetch_sub(i,v) \
188({ \
189 s64 __ia64_asr_i = (i); \
190 __ia64_atomic_const(i) \
191 ? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq) \
192 : ia64_atomic64_fetch_sub(__ia64_asr_i, v); \
193})
194
195ATOMIC64_FETCH_OP(and, &)
196ATOMIC64_FETCH_OP(or, |)
197ATOMIC64_FETCH_OP(xor, ^)
198
199#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
200#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
201#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
202
203#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
204#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
205#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
206
207#undef ATOMIC64_OPS
208#undef ATOMIC64_FETCH_OP
209#undef ATOMIC64_OP
210
211#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
212#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
213
214#define atomic64_cmpxchg(v, old, new) \
215 (cmpxchg(&((v)->counter), old, new))
216#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
217
218#define atomic_add(i,v) (void)atomic_add_return((i), (v))
219#define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
220
221#define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
222#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
223
224#endif /* _ASM_IA64_ATOMIC_H */