Loading...
1#ifndef _ALPHA_ATOMIC_H
2#define _ALPHA_ATOMIC_H
3
4#include <linux/types.h>
5#include <asm/barrier.h>
6#include <asm/system.h>
7
8/*
9 * Atomic operations that C can't guarantee us. Useful for
10 * resource counting etc...
11 *
12 * But use these as seldom as possible since they are much slower
13 * than regular operations.
14 */
15
16
17#define ATOMIC_INIT(i) ( (atomic_t) { (i) } )
18#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } )
19
20#define atomic_read(v) (*(volatile int *)&(v)->counter)
21#define atomic64_read(v) (*(volatile long *)&(v)->counter)
22
23#define atomic_set(v,i) ((v)->counter = (i))
24#define atomic64_set(v,i) ((v)->counter = (i))
25
26/*
27 * To get proper branch prediction for the main line, we must branch
28 * forward to code at the end of this object's .text section, then
29 * branch back to restart the operation.
30 */
31
32static __inline__ void atomic_add(int i, atomic_t * v)
33{
34 unsigned long temp;
35 __asm__ __volatile__(
36 "1: ldl_l %0,%1\n"
37 " addl %0,%2,%0\n"
38 " stl_c %0,%1\n"
39 " beq %0,2f\n"
40 ".subsection 2\n"
41 "2: br 1b\n"
42 ".previous"
43 :"=&r" (temp), "=m" (v->counter)
44 :"Ir" (i), "m" (v->counter));
45}
46
47static __inline__ void atomic64_add(long i, atomic64_t * v)
48{
49 unsigned long temp;
50 __asm__ __volatile__(
51 "1: ldq_l %0,%1\n"
52 " addq %0,%2,%0\n"
53 " stq_c %0,%1\n"
54 " beq %0,2f\n"
55 ".subsection 2\n"
56 "2: br 1b\n"
57 ".previous"
58 :"=&r" (temp), "=m" (v->counter)
59 :"Ir" (i), "m" (v->counter));
60}
61
62static __inline__ void atomic_sub(int i, atomic_t * v)
63{
64 unsigned long temp;
65 __asm__ __volatile__(
66 "1: ldl_l %0,%1\n"
67 " subl %0,%2,%0\n"
68 " stl_c %0,%1\n"
69 " beq %0,2f\n"
70 ".subsection 2\n"
71 "2: br 1b\n"
72 ".previous"
73 :"=&r" (temp), "=m" (v->counter)
74 :"Ir" (i), "m" (v->counter));
75}
76
77static __inline__ void atomic64_sub(long i, atomic64_t * v)
78{
79 unsigned long temp;
80 __asm__ __volatile__(
81 "1: ldq_l %0,%1\n"
82 " subq %0,%2,%0\n"
83 " stq_c %0,%1\n"
84 " beq %0,2f\n"
85 ".subsection 2\n"
86 "2: br 1b\n"
87 ".previous"
88 :"=&r" (temp), "=m" (v->counter)
89 :"Ir" (i), "m" (v->counter));
90}
91
92
93/*
94 * Same as above, but return the result value
95 */
96static inline int atomic_add_return(int i, atomic_t *v)
97{
98 long temp, result;
99 smp_mb();
100 __asm__ __volatile__(
101 "1: ldl_l %0,%1\n"
102 " addl %0,%3,%2\n"
103 " addl %0,%3,%0\n"
104 " stl_c %0,%1\n"
105 " beq %0,2f\n"
106 ".subsection 2\n"
107 "2: br 1b\n"
108 ".previous"
109 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
110 :"Ir" (i), "m" (v->counter) : "memory");
111 smp_mb();
112 return result;
113}
114
115static __inline__ long atomic64_add_return(long i, atomic64_t * v)
116{
117 long temp, result;
118 smp_mb();
119 __asm__ __volatile__(
120 "1: ldq_l %0,%1\n"
121 " addq %0,%3,%2\n"
122 " addq %0,%3,%0\n"
123 " stq_c %0,%1\n"
124 " beq %0,2f\n"
125 ".subsection 2\n"
126 "2: br 1b\n"
127 ".previous"
128 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
129 :"Ir" (i), "m" (v->counter) : "memory");
130 smp_mb();
131 return result;
132}
133
134static __inline__ long atomic_sub_return(int i, atomic_t * v)
135{
136 long temp, result;
137 smp_mb();
138 __asm__ __volatile__(
139 "1: ldl_l %0,%1\n"
140 " subl %0,%3,%2\n"
141 " subl %0,%3,%0\n"
142 " stl_c %0,%1\n"
143 " beq %0,2f\n"
144 ".subsection 2\n"
145 "2: br 1b\n"
146 ".previous"
147 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
148 :"Ir" (i), "m" (v->counter) : "memory");
149 smp_mb();
150 return result;
151}
152
153static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
154{
155 long temp, result;
156 smp_mb();
157 __asm__ __volatile__(
158 "1: ldq_l %0,%1\n"
159 " subq %0,%3,%2\n"
160 " subq %0,%3,%0\n"
161 " stq_c %0,%1\n"
162 " beq %0,2f\n"
163 ".subsection 2\n"
164 "2: br 1b\n"
165 ".previous"
166 :"=&r" (temp), "=m" (v->counter), "=&r" (result)
167 :"Ir" (i), "m" (v->counter) : "memory");
168 smp_mb();
169 return result;
170}
171
172#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
173#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
174
175#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
176#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
177
178/**
179 * __atomic_add_unless - add unless the number is a given value
180 * @v: pointer of type atomic_t
181 * @a: the amount to add to v...
182 * @u: ...unless v is equal to u.
183 *
184 * Atomically adds @a to @v, so long as it was not @u.
185 * Returns the old value of @v.
186 */
187static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
188{
189 int c, old;
190 c = atomic_read(v);
191 for (;;) {
192 if (unlikely(c == (u)))
193 break;
194 old = atomic_cmpxchg((v), c, c + (a));
195 if (likely(old == c))
196 break;
197 c = old;
198 }
199 return c;
200}
201
202
203/**
204 * atomic64_add_unless - add unless the number is a given value
205 * @v: pointer of type atomic64_t
206 * @a: the amount to add to v...
207 * @u: ...unless v is equal to u.
208 *
209 * Atomically adds @a to @v, so long as it was not @u.
210 * Returns the old value of @v.
211 */
212static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
213{
214 long c, old;
215 c = atomic64_read(v);
216 for (;;) {
217 if (unlikely(c == (u)))
218 break;
219 old = atomic64_cmpxchg((v), c, c + (a));
220 if (likely(old == c))
221 break;
222 c = old;
223 }
224 return c != (u);
225}
226
227#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
228
229#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
230#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
231
232#define atomic_dec_return(v) atomic_sub_return(1,(v))
233#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
234
235#define atomic_inc_return(v) atomic_add_return(1,(v))
236#define atomic64_inc_return(v) atomic64_add_return(1,(v))
237
238#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
239#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
240
241#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
242#define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
243
244#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
245#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
246
247#define atomic_inc(v) atomic_add(1,(v))
248#define atomic64_inc(v) atomic64_add(1,(v))
249
250#define atomic_dec(v) atomic_sub(1,(v))
251#define atomic64_dec(v) atomic64_sub(1,(v))
252
253#define smp_mb__before_atomic_dec() smp_mb()
254#define smp_mb__after_atomic_dec() smp_mb()
255#define smp_mb__before_atomic_inc() smp_mb()
256#define smp_mb__after_atomic_inc() smp_mb()
257
258#endif /* _ALPHA_ATOMIC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ALPHA_ATOMIC_H
3#define _ALPHA_ATOMIC_H
4
5#include <linux/types.h>
6#include <asm/barrier.h>
7#include <asm/cmpxchg.h>
8
9/*
10 * Atomic operations that C can't guarantee us. Useful for
11 * resource counting etc...
12 *
13 * But use these as seldom as possible since they are much slower
14 * than regular operations.
15 */
16
17/*
18 * To ensure dependency ordering is preserved for the _relaxed and
19 * _release atomics, an smp_read_barrier_depends() is unconditionally
20 * inserted into the _relaxed variants, which are used to build the
21 * barriered versions. Avoid redundant back-to-back fences in the
22 * _acquire and _fence versions.
23 */
24#define __atomic_acquire_fence()
25#define __atomic_post_full_fence()
26
27#define ATOMIC_INIT(i) { (i) }
28#define ATOMIC64_INIT(i) { (i) }
29
30#define atomic_read(v) READ_ONCE((v)->counter)
31#define atomic64_read(v) READ_ONCE((v)->counter)
32
33#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
34#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
35
36/*
37 * To get proper branch prediction for the main line, we must branch
38 * forward to code at the end of this object's .text section, then
39 * branch back to restart the operation.
40 */
41
42#define ATOMIC_OP(op, asm_op) \
43static __inline__ void atomic_##op(int i, atomic_t * v) \
44{ \
45 unsigned long temp; \
46 __asm__ __volatile__( \
47 "1: ldl_l %0,%1\n" \
48 " " #asm_op " %0,%2,%0\n" \
49 " stl_c %0,%1\n" \
50 " beq %0,2f\n" \
51 ".subsection 2\n" \
52 "2: br 1b\n" \
53 ".previous" \
54 :"=&r" (temp), "=m" (v->counter) \
55 :"Ir" (i), "m" (v->counter)); \
56} \
57
58#define ATOMIC_OP_RETURN(op, asm_op) \
59static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
60{ \
61 long temp, result; \
62 __asm__ __volatile__( \
63 "1: ldl_l %0,%1\n" \
64 " " #asm_op " %0,%3,%2\n" \
65 " " #asm_op " %0,%3,%0\n" \
66 " stl_c %0,%1\n" \
67 " beq %0,2f\n" \
68 ".subsection 2\n" \
69 "2: br 1b\n" \
70 ".previous" \
71 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
72 :"Ir" (i), "m" (v->counter) : "memory"); \
73 smp_read_barrier_depends(); \
74 return result; \
75}
76
77#define ATOMIC_FETCH_OP(op, asm_op) \
78static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
79{ \
80 long temp, result; \
81 __asm__ __volatile__( \
82 "1: ldl_l %2,%1\n" \
83 " " #asm_op " %2,%3,%0\n" \
84 " stl_c %0,%1\n" \
85 " beq %0,2f\n" \
86 ".subsection 2\n" \
87 "2: br 1b\n" \
88 ".previous" \
89 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
90 :"Ir" (i), "m" (v->counter) : "memory"); \
91 smp_read_barrier_depends(); \
92 return result; \
93}
94
95#define ATOMIC64_OP(op, asm_op) \
96static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
97{ \
98 s64 temp; \
99 __asm__ __volatile__( \
100 "1: ldq_l %0,%1\n" \
101 " " #asm_op " %0,%2,%0\n" \
102 " stq_c %0,%1\n" \
103 " beq %0,2f\n" \
104 ".subsection 2\n" \
105 "2: br 1b\n" \
106 ".previous" \
107 :"=&r" (temp), "=m" (v->counter) \
108 :"Ir" (i), "m" (v->counter)); \
109} \
110
111#define ATOMIC64_OP_RETURN(op, asm_op) \
112static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
113{ \
114 s64 temp, result; \
115 __asm__ __volatile__( \
116 "1: ldq_l %0,%1\n" \
117 " " #asm_op " %0,%3,%2\n" \
118 " " #asm_op " %0,%3,%0\n" \
119 " stq_c %0,%1\n" \
120 " beq %0,2f\n" \
121 ".subsection 2\n" \
122 "2: br 1b\n" \
123 ".previous" \
124 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
125 :"Ir" (i), "m" (v->counter) : "memory"); \
126 smp_read_barrier_depends(); \
127 return result; \
128}
129
130#define ATOMIC64_FETCH_OP(op, asm_op) \
131static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
132{ \
133 s64 temp, result; \
134 __asm__ __volatile__( \
135 "1: ldq_l %2,%1\n" \
136 " " #asm_op " %2,%3,%0\n" \
137 " stq_c %0,%1\n" \
138 " beq %0,2f\n" \
139 ".subsection 2\n" \
140 "2: br 1b\n" \
141 ".previous" \
142 :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
143 :"Ir" (i), "m" (v->counter) : "memory"); \
144 smp_read_barrier_depends(); \
145 return result; \
146}
147
148#define ATOMIC_OPS(op) \
149 ATOMIC_OP(op, op##l) \
150 ATOMIC_OP_RETURN(op, op##l) \
151 ATOMIC_FETCH_OP(op, op##l) \
152 ATOMIC64_OP(op, op##q) \
153 ATOMIC64_OP_RETURN(op, op##q) \
154 ATOMIC64_FETCH_OP(op, op##q)
155
156ATOMIC_OPS(add)
157ATOMIC_OPS(sub)
158
159#define atomic_add_return_relaxed atomic_add_return_relaxed
160#define atomic_sub_return_relaxed atomic_sub_return_relaxed
161#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
162#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
163
164#define atomic64_add_return_relaxed atomic64_add_return_relaxed
165#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
166#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
167#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
168
169#define atomic_andnot atomic_andnot
170#define atomic64_andnot atomic64_andnot
171
172#undef ATOMIC_OPS
173#define ATOMIC_OPS(op, asm) \
174 ATOMIC_OP(op, asm) \
175 ATOMIC_FETCH_OP(op, asm) \
176 ATOMIC64_OP(op, asm) \
177 ATOMIC64_FETCH_OP(op, asm)
178
179ATOMIC_OPS(and, and)
180ATOMIC_OPS(andnot, bic)
181ATOMIC_OPS(or, bis)
182ATOMIC_OPS(xor, xor)
183
184#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
185#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
186#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
187#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
188
189#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
190#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
191#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
192#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
193
194#undef ATOMIC_OPS
195#undef ATOMIC64_FETCH_OP
196#undef ATOMIC64_OP_RETURN
197#undef ATOMIC64_OP
198#undef ATOMIC_FETCH_OP
199#undef ATOMIC_OP_RETURN
200#undef ATOMIC_OP
201
202#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
203#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
204
205#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
206#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
207
208/**
209 * atomic_fetch_add_unless - add unless the number is a given value
210 * @v: pointer of type atomic_t
211 * @a: the amount to add to v...
212 * @u: ...unless v is equal to u.
213 *
214 * Atomically adds @a to @v, so long as it was not @u.
215 * Returns the old value of @v.
216 */
217static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
218{
219 int c, new, old;
220 smp_mb();
221 __asm__ __volatile__(
222 "1: ldl_l %[old],%[mem]\n"
223 " cmpeq %[old],%[u],%[c]\n"
224 " addl %[old],%[a],%[new]\n"
225 " bne %[c],2f\n"
226 " stl_c %[new],%[mem]\n"
227 " beq %[new],3f\n"
228 "2:\n"
229 ".subsection 2\n"
230 "3: br 1b\n"
231 ".previous"
232 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
233 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
234 : "memory");
235 smp_mb();
236 return old;
237}
238#define atomic_fetch_add_unless atomic_fetch_add_unless
239
240/**
241 * atomic64_fetch_add_unless - add unless the number is a given value
242 * @v: pointer of type atomic64_t
243 * @a: the amount to add to v...
244 * @u: ...unless v is equal to u.
245 *
246 * Atomically adds @a to @v, so long as it was not @u.
247 * Returns the old value of @v.
248 */
249static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
250{
251 s64 c, new, old;
252 smp_mb();
253 __asm__ __volatile__(
254 "1: ldq_l %[old],%[mem]\n"
255 " cmpeq %[old],%[u],%[c]\n"
256 " addq %[old],%[a],%[new]\n"
257 " bne %[c],2f\n"
258 " stq_c %[new],%[mem]\n"
259 " beq %[new],3f\n"
260 "2:\n"
261 ".subsection 2\n"
262 "3: br 1b\n"
263 ".previous"
264 : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
265 : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
266 : "memory");
267 smp_mb();
268 return old;
269}
270#define atomic64_fetch_add_unless atomic64_fetch_add_unless
271
272/*
273 * atomic64_dec_if_positive - decrement by 1 if old value positive
274 * @v: pointer of type atomic_t
275 *
276 * The function returns the old value of *v minus 1, even if
277 * the atomic variable, v, was not decremented.
278 */
279static inline s64 atomic64_dec_if_positive(atomic64_t *v)
280{
281 s64 old, tmp;
282 smp_mb();
283 __asm__ __volatile__(
284 "1: ldq_l %[old],%[mem]\n"
285 " subq %[old],1,%[tmp]\n"
286 " ble %[old],2f\n"
287 " stq_c %[tmp],%[mem]\n"
288 " beq %[tmp],3f\n"
289 "2:\n"
290 ".subsection 2\n"
291 "3: br 1b\n"
292 ".previous"
293 : [old] "=&r"(old), [tmp] "=&r"(tmp)
294 : [mem] "m"(*v)
295 : "memory");
296 smp_mb();
297 return old - 1;
298}
299#define atomic64_dec_if_positive atomic64_dec_if_positive
300
301#endif /* _ALPHA_ATOMIC_H */