Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18static inline int arch_atomic_read(const atomic_t *v)
19{
20 return __atomic_read(v);
21}
22#define arch_atomic_read arch_atomic_read
23
24static inline void arch_atomic_set(atomic_t *v, int i)
25{
26 __atomic_set(v, i);
27}
28#define arch_atomic_set arch_atomic_set
29
30static inline int arch_atomic_add_return(int i, atomic_t *v)
31{
32 return __atomic_add_barrier(i, &v->counter) + i;
33}
34#define arch_atomic_add_return arch_atomic_add_return
35
36static inline int arch_atomic_fetch_add(int i, atomic_t *v)
37{
38 return __atomic_add_barrier(i, &v->counter);
39}
40#define arch_atomic_fetch_add arch_atomic_fetch_add
41
42static inline void arch_atomic_add(int i, atomic_t *v)
43{
44 __atomic_add(i, &v->counter);
45}
46#define arch_atomic_add arch_atomic_add
47
48#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
49#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
50#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
51
52#define ATOMIC_OPS(op) \
53static inline void arch_atomic_##op(int i, atomic_t *v) \
54{ \
55 __atomic_##op(i, &v->counter); \
56} \
57static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
58{ \
59 return __atomic_##op##_barrier(i, &v->counter); \
60}
61
62ATOMIC_OPS(and)
63ATOMIC_OPS(or)
64ATOMIC_OPS(xor)
65
66#undef ATOMIC_OPS
67
68#define arch_atomic_and arch_atomic_and
69#define arch_atomic_or arch_atomic_or
70#define arch_atomic_xor arch_atomic_xor
71#define arch_atomic_fetch_and arch_atomic_fetch_and
72#define arch_atomic_fetch_or arch_atomic_fetch_or
73#define arch_atomic_fetch_xor arch_atomic_fetch_xor
74
75#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
76
77static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
78{
79 return __atomic_cmpxchg(&v->counter, old, new);
80}
81#define arch_atomic_cmpxchg arch_atomic_cmpxchg
82
83#define ATOMIC64_INIT(i) { (i) }
84
85static inline s64 arch_atomic64_read(const atomic64_t *v)
86{
87 return __atomic64_read(v);
88}
89#define arch_atomic64_read arch_atomic64_read
90
91static inline void arch_atomic64_set(atomic64_t *v, s64 i)
92{
93 __atomic64_set(v, i);
94}
95#define arch_atomic64_set arch_atomic64_set
96
97static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
98{
99 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100}
101#define arch_atomic64_add_return arch_atomic64_add_return
102
103static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104{
105 return __atomic64_add_barrier(i, (long *)&v->counter);
106}
107#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108
109static inline void arch_atomic64_add(s64 i, atomic64_t *v)
110{
111 __atomic64_add(i, (long *)&v->counter);
112}
113#define arch_atomic64_add arch_atomic64_add
114
115#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
116
117static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118{
119 return __atomic64_cmpxchg((long *)&v->counter, old, new);
120}
121#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122
123#define ATOMIC64_OPS(op) \
124static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125{ \
126 __atomic64_##op(i, (long *)&v->counter); \
127} \
128static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129{ \
130 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
131}
132
133ATOMIC64_OPS(and)
134ATOMIC64_OPS(or)
135ATOMIC64_OPS(xor)
136
137#undef ATOMIC64_OPS
138
139#define arch_atomic64_and arch_atomic64_and
140#define arch_atomic64_or arch_atomic64_or
141#define arch_atomic64_xor arch_atomic64_xor
142#define arch_atomic64_fetch_and arch_atomic64_fetch_and
143#define arch_atomic64_fetch_or arch_atomic64_fetch_or
144#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
145
146#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
148#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
149
150#endif /* __ARCH_S390_ATOMIC__ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18#define ATOMIC_INIT(i) { (i) }
19
20static inline int atomic_read(const atomic_t *v)
21{
22 int c;
23
24 asm volatile(
25 " l %0,%1\n"
26 : "=d" (c) : "Q" (v->counter));
27 return c;
28}
29
30static inline void atomic_set(atomic_t *v, int i)
31{
32 asm volatile(
33 " st %1,%0\n"
34 : "=Q" (v->counter) : "d" (i));
35}
36
37static inline int atomic_add_return(int i, atomic_t *v)
38{
39 return __atomic_add_barrier(i, &v->counter) + i;
40}
41
42static inline int atomic_fetch_add(int i, atomic_t *v)
43{
44 return __atomic_add_barrier(i, &v->counter);
45}
46
47static inline void atomic_add(int i, atomic_t *v)
48{
49#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
50 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
51 __atomic_add_const(i, &v->counter);
52 return;
53 }
54#endif
55 __atomic_add(i, &v->counter);
56}
57
58#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
59#define atomic_inc(_v) atomic_add(1, _v)
60#define atomic_inc_return(_v) atomic_add_return(1, _v)
61#define atomic_inc_and_test(_v) (atomic_add_return(1, _v) == 0)
62#define atomic_sub(_i, _v) atomic_add(-(int)(_i), _v)
63#define atomic_sub_return(_i, _v) atomic_add_return(-(int)(_i), _v)
64#define atomic_fetch_sub(_i, _v) atomic_fetch_add(-(int)(_i), _v)
65#define atomic_sub_and_test(_i, _v) (atomic_sub_return(_i, _v) == 0)
66#define atomic_dec(_v) atomic_sub(1, _v)
67#define atomic_dec_return(_v) atomic_sub_return(1, _v)
68#define atomic_dec_and_test(_v) (atomic_sub_return(1, _v) == 0)
69
70#define ATOMIC_OPS(op) \
71static inline void atomic_##op(int i, atomic_t *v) \
72{ \
73 __atomic_##op(i, &v->counter); \
74} \
75static inline int atomic_fetch_##op(int i, atomic_t *v) \
76{ \
77 return __atomic_##op##_barrier(i, &v->counter); \
78}
79
80ATOMIC_OPS(and)
81ATOMIC_OPS(or)
82ATOMIC_OPS(xor)
83
84#undef ATOMIC_OPS
85
86#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
87
88static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
89{
90 return __atomic_cmpxchg(&v->counter, old, new);
91}
92
93static inline int __atomic_add_unless(atomic_t *v, int a, int u)
94{
95 int c, old;
96 c = atomic_read(v);
97 for (;;) {
98 if (unlikely(c == u))
99 break;
100 old = atomic_cmpxchg(v, c, c + a);
101 if (likely(old == c))
102 break;
103 c = old;
104 }
105 return c;
106}
107
108#define ATOMIC64_INIT(i) { (i) }
109
110static inline long atomic64_read(const atomic64_t *v)
111{
112 long c;
113
114 asm volatile(
115 " lg %0,%1\n"
116 : "=d" (c) : "Q" (v->counter));
117 return c;
118}
119
120static inline void atomic64_set(atomic64_t *v, long i)
121{
122 asm volatile(
123 " stg %1,%0\n"
124 : "=Q" (v->counter) : "d" (i));
125}
126
127static inline long atomic64_add_return(long i, atomic64_t *v)
128{
129 return __atomic64_add_barrier(i, &v->counter) + i;
130}
131
132static inline long atomic64_fetch_add(long i, atomic64_t *v)
133{
134 return __atomic64_add_barrier(i, &v->counter);
135}
136
137static inline void atomic64_add(long i, atomic64_t *v)
138{
139#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
140 if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
141 __atomic64_add_const(i, &v->counter);
142 return;
143 }
144#endif
145 __atomic64_add(i, &v->counter);
146}
147
148#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
149
150static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
151{
152 return __atomic64_cmpxchg(&v->counter, old, new);
153}
154
155#define ATOMIC64_OPS(op) \
156static inline void atomic64_##op(long i, atomic64_t *v) \
157{ \
158 __atomic64_##op(i, &v->counter); \
159} \
160static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
161{ \
162 return __atomic64_##op##_barrier(i, &v->counter); \
163}
164
165ATOMIC64_OPS(and)
166ATOMIC64_OPS(or)
167ATOMIC64_OPS(xor)
168
169#undef ATOMIC64_OPS
170
171static inline int atomic64_add_unless(atomic64_t *v, long i, long u)
172{
173 long c, old;
174
175 c = atomic64_read(v);
176 for (;;) {
177 if (unlikely(c == u))
178 break;
179 old = atomic64_cmpxchg(v, c, c + i);
180 if (likely(old == c))
181 break;
182 c = old;
183 }
184 return c != u;
185}
186
187static inline long atomic64_dec_if_positive(atomic64_t *v)
188{
189 long c, old, dec;
190
191 c = atomic64_read(v);
192 for (;;) {
193 dec = c - 1;
194 if (unlikely(dec < 0))
195 break;
196 old = atomic64_cmpxchg((v), c, dec);
197 if (likely(old == c))
198 break;
199 c = old;
200 }
201 return dec;
202}
203
204#define atomic64_add_negative(_i, _v) (atomic64_add_return(_i, _v) < 0)
205#define atomic64_inc(_v) atomic64_add(1, _v)
206#define atomic64_inc_return(_v) atomic64_add_return(1, _v)
207#define atomic64_inc_and_test(_v) (atomic64_add_return(1, _v) == 0)
208#define atomic64_sub_return(_i, _v) atomic64_add_return(-(long)(_i), _v)
209#define atomic64_fetch_sub(_i, _v) atomic64_fetch_add(-(long)(_i), _v)
210#define atomic64_sub(_i, _v) atomic64_add(-(long)(_i), _v)
211#define atomic64_sub_and_test(_i, _v) (atomic64_sub_return(_i, _v) == 0)
212#define atomic64_dec(_v) atomic64_sub(1, _v)
213#define atomic64_dec_return(_v) atomic64_sub_return(1, _v)
214#define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
215#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
216
217#endif /* __ARCH_S390_ATOMIC__ */