Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18static inline int arch_atomic_read(const atomic_t *v)
19{
20 return __atomic_read(v);
21}
22#define arch_atomic_read arch_atomic_read
23
24static inline void arch_atomic_set(atomic_t *v, int i)
25{
26 __atomic_set(v, i);
27}
28#define arch_atomic_set arch_atomic_set
29
30static inline int arch_atomic_add_return(int i, atomic_t *v)
31{
32 return __atomic_add_barrier(i, &v->counter) + i;
33}
34#define arch_atomic_add_return arch_atomic_add_return
35
36static inline int arch_atomic_fetch_add(int i, atomic_t *v)
37{
38 return __atomic_add_barrier(i, &v->counter);
39}
40#define arch_atomic_fetch_add arch_atomic_fetch_add
41
42static inline void arch_atomic_add(int i, atomic_t *v)
43{
44 __atomic_add(i, &v->counter);
45}
46#define arch_atomic_add arch_atomic_add
47
48#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
49#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
50#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
51
52#define ATOMIC_OPS(op) \
53static inline void arch_atomic_##op(int i, atomic_t *v) \
54{ \
55 __atomic_##op(i, &v->counter); \
56} \
57static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
58{ \
59 return __atomic_##op##_barrier(i, &v->counter); \
60}
61
62ATOMIC_OPS(and)
63ATOMIC_OPS(or)
64ATOMIC_OPS(xor)
65
66#undef ATOMIC_OPS
67
68#define arch_atomic_and arch_atomic_and
69#define arch_atomic_or arch_atomic_or
70#define arch_atomic_xor arch_atomic_xor
71#define arch_atomic_fetch_and arch_atomic_fetch_and
72#define arch_atomic_fetch_or arch_atomic_fetch_or
73#define arch_atomic_fetch_xor arch_atomic_fetch_xor
74
75#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
76
77static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
78{
79 return __atomic_cmpxchg(&v->counter, old, new);
80}
81#define arch_atomic_cmpxchg arch_atomic_cmpxchg
82
83#define ATOMIC64_INIT(i) { (i) }
84
85static inline s64 arch_atomic64_read(const atomic64_t *v)
86{
87 return __atomic64_read(v);
88}
89#define arch_atomic64_read arch_atomic64_read
90
91static inline void arch_atomic64_set(atomic64_t *v, s64 i)
92{
93 __atomic64_set(v, i);
94}
95#define arch_atomic64_set arch_atomic64_set
96
97static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
98{
99 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100}
101#define arch_atomic64_add_return arch_atomic64_add_return
102
103static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104{
105 return __atomic64_add_barrier(i, (long *)&v->counter);
106}
107#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108
109static inline void arch_atomic64_add(s64 i, atomic64_t *v)
110{
111 __atomic64_add(i, (long *)&v->counter);
112}
113#define arch_atomic64_add arch_atomic64_add
114
115#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
116
117static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118{
119 return __atomic64_cmpxchg((long *)&v->counter, old, new);
120}
121#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122
123#define ATOMIC64_OPS(op) \
124static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125{ \
126 __atomic64_##op(i, (long *)&v->counter); \
127} \
128static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129{ \
130 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
131}
132
133ATOMIC64_OPS(and)
134ATOMIC64_OPS(or)
135ATOMIC64_OPS(xor)
136
137#undef ATOMIC64_OPS
138
139#define arch_atomic64_and arch_atomic64_and
140#define arch_atomic64_or arch_atomic64_or
141#define arch_atomic64_xor arch_atomic64_xor
142#define arch_atomic64_fetch_and arch_atomic64_fetch_and
143#define arch_atomic64_fetch_or arch_atomic64_fetch_or
144#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
145
146#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
148#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
149
150#endif /* __ARCH_S390_ATOMIC__ */
1/*
2 * Copyright IBM Corp. 1999, 2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Denis Joseph Barrow,
5 * Arnd Bergmann <arndb@de.ibm.com>,
6 *
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10 *
11 */
12
13#ifndef __ARCH_S390_ATOMIC__
14#define __ARCH_S390_ATOMIC__
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#define __ATOMIC_NO_BARRIER "\n"
24
25#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
26
27#define __ATOMIC_OR "lao"
28#define __ATOMIC_AND "lan"
29#define __ATOMIC_ADD "laa"
30#define __ATOMIC_XOR "lax"
31#define __ATOMIC_BARRIER "bcr 14,0\n"
32
33#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
34({ \
35 int old_val; \
36 \
37 typecheck(atomic_t *, ptr); \
38 asm volatile( \
39 op_string " %0,%2,%1\n" \
40 __barrier \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
42 : "d" (op_val) \
43 : "cc", "memory"); \
44 old_val; \
45})
46
47#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
48
49#define __ATOMIC_OR "or"
50#define __ATOMIC_AND "nr"
51#define __ATOMIC_ADD "ar"
52#define __ATOMIC_XOR "xr"
53#define __ATOMIC_BARRIER "\n"
54
55#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
56({ \
57 int old_val, new_val; \
58 \
59 typecheck(atomic_t *, ptr); \
60 asm volatile( \
61 " l %0,%2\n" \
62 "0: lr %1,%0\n" \
63 op_string " %1,%3\n" \
64 " cs %0,%1,%2\n" \
65 " jl 0b" \
66 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
67 : "d" (op_val) \
68 : "cc", "memory"); \
69 old_val; \
70})
71
72#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
73
74static inline int atomic_read(const atomic_t *v)
75{
76 int c;
77
78 asm volatile(
79 " l %0,%1\n"
80 : "=d" (c) : "Q" (v->counter));
81 return c;
82}
83
84static inline void atomic_set(atomic_t *v, int i)
85{
86 asm volatile(
87 " st %1,%0\n"
88 : "=Q" (v->counter) : "d" (i));
89}
90
91static inline int atomic_add_return(int i, atomic_t *v)
92{
93 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
94}
95
96static inline void atomic_add(int i, atomic_t *v)
97{
98#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
99 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
100 asm volatile(
101 "asi %0,%1\n"
102 : "+Q" (v->counter)
103 : "i" (i)
104 : "cc", "memory");
105 return;
106 }
107#endif
108 __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
109}
110
111#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
112#define atomic_inc(_v) atomic_add(1, _v)
113#define atomic_inc_return(_v) atomic_add_return(1, _v)
114#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
115#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
116#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
117#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
118#define atomic_dec(_v) atomic_sub(1, _v)
119#define atomic_dec_return(_v) atomic_sub_return(1, _v)
120#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
121
122#define ATOMIC_OP(op, OP) \
123static inline void atomic_##op(int i, atomic_t *v) \
124{ \
125 __ATOMIC_LOOP(v, i, __ATOMIC_##OP, __ATOMIC_NO_BARRIER); \
126}
127
128ATOMIC_OP(and, AND)
129ATOMIC_OP(or, OR)
130ATOMIC_OP(xor, XOR)
131
132#undef ATOMIC_OP
133
134#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
135
136static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
137{
138 asm volatile(
139 " cs %0,%2,%1"
140 : "+d" (old), "+Q" (v->counter)
141 : "d" (new)
142 : "cc", "memory");
143 return old;
144}
145
146static inline int __atomic_add_unless(atomic_t *v, int a, int u)
147{
148 int c, old;
149 c = atomic_read(v);
150 for (;;) {
151 if (unlikely(c == u))
152 break;
153 old = atomic_cmpxchg(v, c, c + a);
154 if (likely(old == c))
155 break;
156 c = old;
157 }
158 return c;
159}
160
161
162#undef __ATOMIC_LOOP
163
164#define ATOMIC64_INIT(i) { (i) }
165
166#define __ATOMIC64_NO_BARRIER "\n"
167
168#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
169
170#define __ATOMIC64_OR "laog"
171#define __ATOMIC64_AND "lang"
172#define __ATOMIC64_ADD "laag"
173#define __ATOMIC64_XOR "laxg"
174#define __ATOMIC64_BARRIER "bcr 14,0\n"
175
176#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
177({ \
178 long long old_val; \
179 \
180 typecheck(atomic64_t *, ptr); \
181 asm volatile( \
182 op_string " %0,%2,%1\n" \
183 __barrier \
184 : "=d" (old_val), "+Q" ((ptr)->counter) \
185 : "d" (op_val) \
186 : "cc", "memory"); \
187 old_val; \
188})
189
190#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
191
192#define __ATOMIC64_OR "ogr"
193#define __ATOMIC64_AND "ngr"
194#define __ATOMIC64_ADD "agr"
195#define __ATOMIC64_XOR "xgr"
196#define __ATOMIC64_BARRIER "\n"
197
198#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
199({ \
200 long long old_val, new_val; \
201 \
202 typecheck(atomic64_t *, ptr); \
203 asm volatile( \
204 " lg %0,%2\n" \
205 "0: lgr %1,%0\n" \
206 op_string " %1,%3\n" \
207 " csg %0,%1,%2\n" \
208 " jl 0b" \
209 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
210 : "d" (op_val) \
211 : "cc", "memory"); \
212 old_val; \
213})
214
215#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
216
217static inline long long atomic64_read(const atomic64_t *v)
218{
219 long long c;
220
221 asm volatile(
222 " lg %0,%1\n"
223 : "=d" (c) : "Q" (v->counter));
224 return c;
225}
226
227static inline void atomic64_set(atomic64_t *v, long long i)
228{
229 asm volatile(
230 " stg %1,%0\n"
231 : "=Q" (v->counter) : "d" (i));
232}
233
234static inline long long atomic64_add_return(long long i, atomic64_t *v)
235{
236 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
237}
238
239static inline void atomic64_add(long long i, atomic64_t *v)
240{
241#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
242 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
243 asm volatile(
244 "agsi %0,%1\n"
245 : "+Q" (v->counter)
246 : "i" (i)
247 : "cc", "memory");
248 return;
249 }
250#endif
251 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
252}
253
254#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
255
256static inline long long atomic64_cmpxchg(atomic64_t *v,
257 long long old, long long new)
258{
259 asm volatile(
260 " csg %0,%2,%1"
261 : "+d" (old), "+Q" (v->counter)
262 : "d" (new)
263 : "cc", "memory");
264 return old;
265}
266
267#define ATOMIC64_OP(op, OP) \
268static inline void atomic64_##op(long i, atomic64_t *v) \
269{ \
270 __ATOMIC64_LOOP(v, i, __ATOMIC64_##OP, __ATOMIC64_NO_BARRIER); \
271}
272
273ATOMIC64_OP(and, AND)
274ATOMIC64_OP(or, OR)
275ATOMIC64_OP(xor, XOR)
276
277#undef ATOMIC64_OP
278#undef __ATOMIC64_LOOP
279
280static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
281{
282 long long c, old;
283
284 c = atomic64_read(v);
285 for (;;) {
286 if (unlikely(c == u))
287 break;
288 old = atomic64_cmpxchg(v, c, c + i);
289 if (likely(old == c))
290 break;
291 c = old;
292 }
293 return c != u;
294}
295
296static inline long long atomic64_dec_if_positive(atomic64_t *v)
297{
298 long long c, old, dec;
299
300 c = atomic64_read(v);
301 for (;;) {
302 dec = c - 1;
303 if (unlikely(dec < 0))
304 break;
305 old = atomic64_cmpxchg((v), c, dec);
306 if (likely(old == c))
307 break;
308 c = old;
309 }
310 return dec;
311}
312
313#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
314#define atomic64_inc(_v) atomic64_add(1, _v)
315#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
316#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
317#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
318#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
319#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
320#define atomic64_dec(_v) atomic64_sub(1, _v)
321#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
322#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
323#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
324
325#endif /* __ARCH_S390_ATOMIC__ */