Loading...
1#ifndef __ARCH_S390_ATOMIC__
2#define __ARCH_S390_ATOMIC__
3
4/*
5 * Copyright 1999,2009 IBM Corp.
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
7 * Denis Joseph Barrow,
8 * Arnd Bergmann <arndb@de.ibm.com>,
9 *
10 * Atomic operations that C can't guarantee us.
11 * Useful for resource counting etc.
12 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
13 *
14 */
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/cmpxchg.h>
19
20#define ATOMIC_INIT(i) { (i) }
21
22#define __CS_LOOP(ptr, op_val, op_string) ({ \
23 int old_val, new_val; \
24 asm volatile( \
25 " l %0,%2\n" \
26 "0: lr %1,%0\n" \
27 op_string " %1,%3\n" \
28 " cs %0,%1,%2\n" \
29 " jl 0b" \
30 : "=&d" (old_val), "=&d" (new_val), \
31 "=Q" (((atomic_t *)(ptr))->counter) \
32 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
33 : "cc", "memory"); \
34 new_val; \
35})
36
37static inline int atomic_read(const atomic_t *v)
38{
39 int c;
40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
45}
46
47static inline void atomic_set(atomic_t *v, int i)
48{
49 asm volatile(
50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
52}
53
54static inline int atomic_add_return(int i, atomic_t *v)
55{
56 return __CS_LOOP(v, i, "ar");
57}
58#define atomic_add(_i, _v) atomic_add_return(_i, _v)
59#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
60#define atomic_inc(_v) atomic_add_return(1, _v)
61#define atomic_inc_return(_v) atomic_add_return(1, _v)
62#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
63
64static inline int atomic_sub_return(int i, atomic_t *v)
65{
66 return __CS_LOOP(v, i, "sr");
67}
68#define atomic_sub(_i, _v) atomic_sub_return(_i, _v)
69#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
70#define atomic_dec(_v) atomic_sub_return(1, _v)
71#define atomic_dec_return(_v) atomic_sub_return(1, _v)
72#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
73
74static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
75{
76 __CS_LOOP(v, ~mask, "nr");
77}
78
79static inline void atomic_set_mask(unsigned long mask, atomic_t *v)
80{
81 __CS_LOOP(v, mask, "or");
82}
83
84#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
85
86static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
87{
88 asm volatile(
89 " cs %0,%2,%1"
90 : "+d" (old), "=Q" (v->counter)
91 : "d" (new), "Q" (v->counter)
92 : "cc", "memory");
93 return old;
94}
95
96static inline int __atomic_add_unless(atomic_t *v, int a, int u)
97{
98 int c, old;
99 c = atomic_read(v);
100 for (;;) {
101 if (unlikely(c == u))
102 break;
103 old = atomic_cmpxchg(v, c, c + a);
104 if (likely(old == c))
105 break;
106 c = old;
107 }
108 return c;
109}
110
111
112#undef __CS_LOOP
113
114#define ATOMIC64_INIT(i) { (i) }
115
116#ifdef CONFIG_64BIT
117
118#define __CSG_LOOP(ptr, op_val, op_string) ({ \
119 long long old_val, new_val; \
120 asm volatile( \
121 " lg %0,%2\n" \
122 "0: lgr %1,%0\n" \
123 op_string " %1,%3\n" \
124 " csg %0,%1,%2\n" \
125 " jl 0b" \
126 : "=&d" (old_val), "=&d" (new_val), \
127 "=Q" (((atomic_t *)(ptr))->counter) \
128 : "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
129 : "cc", "memory"); \
130 new_val; \
131})
132
133static inline long long atomic64_read(const atomic64_t *v)
134{
135 long long c;
136
137 asm volatile(
138 " lg %0,%1\n"
139 : "=d" (c) : "Q" (v->counter));
140 return c;
141}
142
143static inline void atomic64_set(atomic64_t *v, long long i)
144{
145 asm volatile(
146 " stg %1,%0\n"
147 : "=Q" (v->counter) : "d" (i));
148}
149
150static inline long long atomic64_add_return(long long i, atomic64_t *v)
151{
152 return __CSG_LOOP(v, i, "agr");
153}
154
155static inline long long atomic64_sub_return(long long i, atomic64_t *v)
156{
157 return __CSG_LOOP(v, i, "sgr");
158}
159
160static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
161{
162 __CSG_LOOP(v, ~mask, "ngr");
163}
164
165static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
166{
167 __CSG_LOOP(v, mask, "ogr");
168}
169
170#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
171
172static inline long long atomic64_cmpxchg(atomic64_t *v,
173 long long old, long long new)
174{
175 asm volatile(
176 " csg %0,%2,%1"
177 : "+d" (old), "=Q" (v->counter)
178 : "d" (new), "Q" (v->counter)
179 : "cc", "memory");
180 return old;
181}
182
183#undef __CSG_LOOP
184
185#else /* CONFIG_64BIT */
186
187typedef struct {
188 long long counter;
189} atomic64_t;
190
191static inline long long atomic64_read(const atomic64_t *v)
192{
193 register_pair rp;
194
195 asm volatile(
196 " lm %0,%N0,%1"
197 : "=&d" (rp) : "Q" (v->counter) );
198 return rp.pair;
199}
200
201static inline void atomic64_set(atomic64_t *v, long long i)
202{
203 register_pair rp = {.pair = i};
204
205 asm volatile(
206 " stm %1,%N1,%0"
207 : "=Q" (v->counter) : "d" (rp) );
208}
209
210static inline long long atomic64_xchg(atomic64_t *v, long long new)
211{
212 register_pair rp_new = {.pair = new};
213 register_pair rp_old;
214
215 asm volatile(
216 " lm %0,%N0,%1\n"
217 "0: cds %0,%2,%1\n"
218 " jl 0b\n"
219 : "=&d" (rp_old), "=Q" (v->counter)
220 : "d" (rp_new), "Q" (v->counter)
221 : "cc");
222 return rp_old.pair;
223}
224
225static inline long long atomic64_cmpxchg(atomic64_t *v,
226 long long old, long long new)
227{
228 register_pair rp_old = {.pair = old};
229 register_pair rp_new = {.pair = new};
230
231 asm volatile(
232 " cds %0,%2,%1"
233 : "+&d" (rp_old), "=Q" (v->counter)
234 : "d" (rp_new), "Q" (v->counter)
235 : "cc");
236 return rp_old.pair;
237}
238
239
240static inline long long atomic64_add_return(long long i, atomic64_t *v)
241{
242 long long old, new;
243
244 do {
245 old = atomic64_read(v);
246 new = old + i;
247 } while (atomic64_cmpxchg(v, old, new) != old);
248 return new;
249}
250
251static inline long long atomic64_sub_return(long long i, atomic64_t *v)
252{
253 long long old, new;
254
255 do {
256 old = atomic64_read(v);
257 new = old - i;
258 } while (atomic64_cmpxchg(v, old, new) != old);
259 return new;
260}
261
262static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
263{
264 long long old, new;
265
266 do {
267 old = atomic64_read(v);
268 new = old | mask;
269 } while (atomic64_cmpxchg(v, old, new) != old);
270}
271
272static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
273{
274 long long old, new;
275
276 do {
277 old = atomic64_read(v);
278 new = old & mask;
279 } while (atomic64_cmpxchg(v, old, new) != old);
280}
281
282#endif /* CONFIG_64BIT */
283
284static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
285{
286 long long c, old;
287
288 c = atomic64_read(v);
289 for (;;) {
290 if (unlikely(c == u))
291 break;
292 old = atomic64_cmpxchg(v, c, c + a);
293 if (likely(old == c))
294 break;
295 c = old;
296 }
297 return c != u;
298}
299
300static inline long long atomic64_dec_if_positive(atomic64_t *v)
301{
302 long long c, old, dec;
303
304 c = atomic64_read(v);
305 for (;;) {
306 dec = c - 1;
307 if (unlikely(dec < 0))
308 break;
309 old = atomic64_cmpxchg((v), c, dec);
310 if (likely(old == c))
311 break;
312 c = old;
313 }
314 return dec;
315}
316
317#define atomic64_add(_i, _v) atomic64_add_return(_i, _v)
318#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
319#define atomic64_inc(_v) atomic64_add_return(1, _v)
320#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
321#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
322#define atomic64_sub(_i, _v) atomic64_sub_return(_i, _v)
323#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
324#define atomic64_dec(_v) atomic64_sub_return(1, _v)
325#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
326#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
327#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
328
329#define smp_mb__before_atomic_dec() smp_mb()
330#define smp_mb__after_atomic_dec() smp_mb()
331#define smp_mb__before_atomic_inc() smp_mb()
332#define smp_mb__after_atomic_inc() smp_mb()
333
334#endif /* __ARCH_S390_ATOMIC__ */
1/*
2 * Copyright IBM Corp. 1999, 2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
4 * Denis Joseph Barrow,
5 * Arnd Bergmann <arndb@de.ibm.com>,
6 *
7 * Atomic operations that C can't guarantee us.
8 * Useful for resource counting etc.
9 * s390 uses 'Compare And Swap' for atomicity in SMP environment.
10 *
11 */
12
13#ifndef __ARCH_S390_ATOMIC__
14#define __ARCH_S390_ATOMIC__
15
16#include <linux/compiler.h>
17#include <linux/types.h>
18#include <asm/barrier.h>
19#include <asm/cmpxchg.h>
20
21#define ATOMIC_INIT(i) { (i) }
22
23#define __ATOMIC_NO_BARRIER "\n"
24
25#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
26
27#define __ATOMIC_OR "lao"
28#define __ATOMIC_AND "lan"
29#define __ATOMIC_ADD "laa"
30#define __ATOMIC_BARRIER "bcr 14,0\n"
31
32#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
33({ \
34 int old_val; \
35 \
36 typecheck(atomic_t *, ptr); \
37 asm volatile( \
38 __barrier \
39 op_string " %0,%2,%1\n" \
40 __barrier \
41 : "=d" (old_val), "+Q" ((ptr)->counter) \
42 : "d" (op_val) \
43 : "cc", "memory"); \
44 old_val; \
45})
46
47#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
48
49#define __ATOMIC_OR "or"
50#define __ATOMIC_AND "nr"
51#define __ATOMIC_ADD "ar"
52#define __ATOMIC_BARRIER "\n"
53
54#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
55({ \
56 int old_val, new_val; \
57 \
58 typecheck(atomic_t *, ptr); \
59 asm volatile( \
60 " l %0,%2\n" \
61 "0: lr %1,%0\n" \
62 op_string " %1,%3\n" \
63 " cs %0,%1,%2\n" \
64 " jl 0b" \
65 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
66 : "d" (op_val) \
67 : "cc", "memory"); \
68 old_val; \
69})
70
71#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
72
73static inline int atomic_read(const atomic_t *v)
74{
75 int c;
76
77 asm volatile(
78 " l %0,%1\n"
79 : "=d" (c) : "Q" (v->counter));
80 return c;
81}
82
83static inline void atomic_set(atomic_t *v, int i)
84{
85 asm volatile(
86 " st %1,%0\n"
87 : "=Q" (v->counter) : "d" (i));
88}
89
90static inline int atomic_add_return(int i, atomic_t *v)
91{
92 return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
93}
94
95static inline void atomic_add(int i, atomic_t *v)
96{
97#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
98 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
99 asm volatile(
100 "asi %0,%1\n"
101 : "+Q" (v->counter)
102 : "i" (i)
103 : "cc", "memory");
104 return;
105 }
106#endif
107 __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
108}
109
110#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
111#define atomic_inc(_v) atomic_add(1, _v)
112#define atomic_inc_return(_v) atomic_add_return(1, _v)
113#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
114#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
115#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
116#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
117#define atomic_dec(_v) atomic_sub(1, _v)
118#define atomic_dec_return(_v) atomic_sub_return(1, _v)
119#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
120
121static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
122{
123 __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
124}
125
126static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
127{
128 __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
129}
130
131#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
132
133static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
134{
135 asm volatile(
136 " cs %0,%2,%1"
137 : "+d" (old), "+Q" (v->counter)
138 : "d" (new)
139 : "cc", "memory");
140 return old;
141}
142
143static inline int __atomic_add_unless(atomic_t *v, int a, int u)
144{
145 int c, old;
146 c = atomic_read(v);
147 for (;;) {
148 if (unlikely(c == u))
149 break;
150 old = atomic_cmpxchg(v, c, c + a);
151 if (likely(old == c))
152 break;
153 c = old;
154 }
155 return c;
156}
157
158
159#undef __ATOMIC_LOOP
160
161#define ATOMIC64_INIT(i) { (i) }
162
163#ifdef CONFIG_64BIT
164
165#define __ATOMIC64_NO_BARRIER "\n"
166
167#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
168
169#define __ATOMIC64_OR "laog"
170#define __ATOMIC64_AND "lang"
171#define __ATOMIC64_ADD "laag"
172#define __ATOMIC64_BARRIER "bcr 14,0\n"
173
174#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
175({ \
176 long long old_val; \
177 \
178 typecheck(atomic64_t *, ptr); \
179 asm volatile( \
180 __barrier \
181 op_string " %0,%2,%1\n" \
182 __barrier \
183 : "=d" (old_val), "+Q" ((ptr)->counter) \
184 : "d" (op_val) \
185 : "cc", "memory"); \
186 old_val; \
187})
188
189#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
190
191#define __ATOMIC64_OR "ogr"
192#define __ATOMIC64_AND "ngr"
193#define __ATOMIC64_ADD "agr"
194#define __ATOMIC64_BARRIER "\n"
195
196#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
197({ \
198 long long old_val, new_val; \
199 \
200 typecheck(atomic64_t *, ptr); \
201 asm volatile( \
202 " lg %0,%2\n" \
203 "0: lgr %1,%0\n" \
204 op_string " %1,%3\n" \
205 " csg %0,%1,%2\n" \
206 " jl 0b" \
207 : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
208 : "d" (op_val) \
209 : "cc", "memory"); \
210 old_val; \
211})
212
213#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
214
215static inline long long atomic64_read(const atomic64_t *v)
216{
217 long long c;
218
219 asm volatile(
220 " lg %0,%1\n"
221 : "=d" (c) : "Q" (v->counter));
222 return c;
223}
224
225static inline void atomic64_set(atomic64_t *v, long long i)
226{
227 asm volatile(
228 " stg %1,%0\n"
229 : "=Q" (v->counter) : "d" (i));
230}
231
232static inline long long atomic64_add_return(long long i, atomic64_t *v)
233{
234 return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
235}
236
237static inline void atomic64_add(long long i, atomic64_t *v)
238{
239#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
240 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
241 asm volatile(
242 "agsi %0,%1\n"
243 : "+Q" (v->counter)
244 : "i" (i)
245 : "cc", "memory");
246 return;
247 }
248#endif
249 __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
250}
251
252static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
253{
254 __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
255}
256
257static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
258{
259 __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
260}
261
262#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
263
264static inline long long atomic64_cmpxchg(atomic64_t *v,
265 long long old, long long new)
266{
267 asm volatile(
268 " csg %0,%2,%1"
269 : "+d" (old), "+Q" (v->counter)
270 : "d" (new)
271 : "cc", "memory");
272 return old;
273}
274
275#undef __ATOMIC64_LOOP
276
277#else /* CONFIG_64BIT */
278
279typedef struct {
280 long long counter;
281} atomic64_t;
282
283static inline long long atomic64_read(const atomic64_t *v)
284{
285 register_pair rp;
286
287 asm volatile(
288 " lm %0,%N0,%1"
289 : "=&d" (rp) : "Q" (v->counter) );
290 return rp.pair;
291}
292
293static inline void atomic64_set(atomic64_t *v, long long i)
294{
295 register_pair rp = {.pair = i};
296
297 asm volatile(
298 " stm %1,%N1,%0"
299 : "=Q" (v->counter) : "d" (rp) );
300}
301
302static inline long long atomic64_xchg(atomic64_t *v, long long new)
303{
304 register_pair rp_new = {.pair = new};
305 register_pair rp_old;
306
307 asm volatile(
308 " lm %0,%N0,%1\n"
309 "0: cds %0,%2,%1\n"
310 " jl 0b\n"
311 : "=&d" (rp_old), "+Q" (v->counter)
312 : "d" (rp_new)
313 : "cc");
314 return rp_old.pair;
315}
316
317static inline long long atomic64_cmpxchg(atomic64_t *v,
318 long long old, long long new)
319{
320 register_pair rp_old = {.pair = old};
321 register_pair rp_new = {.pair = new};
322
323 asm volatile(
324 " cds %0,%2,%1"
325 : "+&d" (rp_old), "+Q" (v->counter)
326 : "d" (rp_new)
327 : "cc");
328 return rp_old.pair;
329}
330
331
332static inline long long atomic64_add_return(long long i, atomic64_t *v)
333{
334 long long old, new;
335
336 do {
337 old = atomic64_read(v);
338 new = old + i;
339 } while (atomic64_cmpxchg(v, old, new) != old);
340 return new;
341}
342
343static inline void atomic64_set_mask(unsigned long long mask, atomic64_t *v)
344{
345 long long old, new;
346
347 do {
348 old = atomic64_read(v);
349 new = old | mask;
350 } while (atomic64_cmpxchg(v, old, new) != old);
351}
352
353static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
354{
355 long long old, new;
356
357 do {
358 old = atomic64_read(v);
359 new = old & mask;
360 } while (atomic64_cmpxchg(v, old, new) != old);
361}
362
363static inline void atomic64_add(long long i, atomic64_t *v)
364{
365 atomic64_add_return(i, v);
366}
367
368#endif /* CONFIG_64BIT */
369
370static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
371{
372 long long c, old;
373
374 c = atomic64_read(v);
375 for (;;) {
376 if (unlikely(c == u))
377 break;
378 old = atomic64_cmpxchg(v, c, c + i);
379 if (likely(old == c))
380 break;
381 c = old;
382 }
383 return c != u;
384}
385
386static inline long long atomic64_dec_if_positive(atomic64_t *v)
387{
388 long long c, old, dec;
389
390 c = atomic64_read(v);
391 for (;;) {
392 dec = c - 1;
393 if (unlikely(dec < 0))
394 break;
395 old = atomic64_cmpxchg((v), c, dec);
396 if (likely(old == c))
397 break;
398 c = old;
399 }
400 return dec;
401}
402
403#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
404#define atomic64_inc(_v) atomic64_add(1, _v)
405#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
406#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
407#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long long)(_i), _v)
408#define atomic64_sub(_i, _v) atomic64_add(-(long long)(_i), _v)
409#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
410#define atomic64_dec(_v) atomic64_sub(1, _v)
411#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
412#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
413#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
414
415#define smp_mb__before_atomic_dec() smp_mb()
416#define smp_mb__after_atomic_dec() smp_mb()
417#define smp_mb__before_atomic_inc() smp_mb()
418#define smp_mb__after_atomic_inc() smp_mb()
419
420#endif /* __ARCH_S390_ATOMIC__ */