Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18static inline int arch_atomic_read(const atomic_t *v)
19{
20 return __atomic_read(v);
21}
22#define arch_atomic_read arch_atomic_read
23
24static inline void arch_atomic_set(atomic_t *v, int i)
25{
26 __atomic_set(v, i);
27}
28#define arch_atomic_set arch_atomic_set
29
30static inline int arch_atomic_add_return(int i, atomic_t *v)
31{
32 return __atomic_add_barrier(i, &v->counter) + i;
33}
34#define arch_atomic_add_return arch_atomic_add_return
35
36static inline int arch_atomic_fetch_add(int i, atomic_t *v)
37{
38 return __atomic_add_barrier(i, &v->counter);
39}
40#define arch_atomic_fetch_add arch_atomic_fetch_add
41
42static inline void arch_atomic_add(int i, atomic_t *v)
43{
44 __atomic_add(i, &v->counter);
45}
46#define arch_atomic_add arch_atomic_add
47
48#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
49#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
50#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
51
52#define ATOMIC_OPS(op) \
53static inline void arch_atomic_##op(int i, atomic_t *v) \
54{ \
55 __atomic_##op(i, &v->counter); \
56} \
57static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
58{ \
59 return __atomic_##op##_barrier(i, &v->counter); \
60}
61
62ATOMIC_OPS(and)
63ATOMIC_OPS(or)
64ATOMIC_OPS(xor)
65
66#undef ATOMIC_OPS
67
68#define arch_atomic_and arch_atomic_and
69#define arch_atomic_or arch_atomic_or
70#define arch_atomic_xor arch_atomic_xor
71#define arch_atomic_fetch_and arch_atomic_fetch_and
72#define arch_atomic_fetch_or arch_atomic_fetch_or
73#define arch_atomic_fetch_xor arch_atomic_fetch_xor
74
75#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
76
77static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
78{
79 return __atomic_cmpxchg(&v->counter, old, new);
80}
81#define arch_atomic_cmpxchg arch_atomic_cmpxchg
82
83#define ATOMIC64_INIT(i) { (i) }
84
85static inline s64 arch_atomic64_read(const atomic64_t *v)
86{
87 return __atomic64_read(v);
88}
89#define arch_atomic64_read arch_atomic64_read
90
91static inline void arch_atomic64_set(atomic64_t *v, s64 i)
92{
93 __atomic64_set(v, i);
94}
95#define arch_atomic64_set arch_atomic64_set
96
97static inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
98{
99 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
100}
101#define arch_atomic64_add_return arch_atomic64_add_return
102
103static inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
104{
105 return __atomic64_add_barrier(i, (long *)&v->counter);
106}
107#define arch_atomic64_fetch_add arch_atomic64_fetch_add
108
109static inline void arch_atomic64_add(s64 i, atomic64_t *v)
110{
111 __atomic64_add(i, (long *)&v->counter);
112}
113#define arch_atomic64_add arch_atomic64_add
114
115#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
116
117static inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
118{
119 return __atomic64_cmpxchg((long *)&v->counter, old, new);
120}
121#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
122
123#define ATOMIC64_OPS(op) \
124static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
125{ \
126 __atomic64_##op(i, (long *)&v->counter); \
127} \
128static inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
129{ \
130 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
131}
132
133ATOMIC64_OPS(and)
134ATOMIC64_OPS(or)
135ATOMIC64_OPS(xor)
136
137#undef ATOMIC64_OPS
138
139#define arch_atomic64_and arch_atomic64_and
140#define arch_atomic64_or arch_atomic64_or
141#define arch_atomic64_xor arch_atomic64_xor
142#define arch_atomic64_fetch_and arch_atomic64_fetch_and
143#define arch_atomic64_fetch_or arch_atomic64_fetch_or
144#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
145
146#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
147#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
148#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
149
150#endif /* __ARCH_S390_ATOMIC__ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright IBM Corp. 1999, 2016
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5 * Denis Joseph Barrow,
6 * Arnd Bergmann,
7 */
8
9#ifndef __ARCH_S390_ATOMIC__
10#define __ARCH_S390_ATOMIC__
11
12#include <linux/compiler.h>
13#include <linux/types.h>
14#include <asm/atomic_ops.h>
15#include <asm/barrier.h>
16#include <asm/cmpxchg.h>
17
18static __always_inline int arch_atomic_read(const atomic_t *v)
19{
20 return __atomic_read(v);
21}
22#define arch_atomic_read arch_atomic_read
23
24static __always_inline void arch_atomic_set(atomic_t *v, int i)
25{
26 __atomic_set(v, i);
27}
28#define arch_atomic_set arch_atomic_set
29
30static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
31{
32 return __atomic_add_barrier(i, &v->counter) + i;
33}
34#define arch_atomic_add_return arch_atomic_add_return
35
36static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
37{
38 return __atomic_add_barrier(i, &v->counter);
39}
40#define arch_atomic_fetch_add arch_atomic_fetch_add
41
42static __always_inline void arch_atomic_add(int i, atomic_t *v)
43{
44 __atomic_add(i, &v->counter);
45}
46#define arch_atomic_add arch_atomic_add
47
48#define arch_atomic_sub(_i, _v) arch_atomic_add(-(int)(_i), _v)
49#define arch_atomic_sub_return(_i, _v) arch_atomic_add_return(-(int)(_i), _v)
50#define arch_atomic_fetch_sub(_i, _v) arch_atomic_fetch_add(-(int)(_i), _v)
51
52#define ATOMIC_OPS(op) \
53static __always_inline void arch_atomic_##op(int i, atomic_t *v) \
54{ \
55 __atomic_##op(i, &v->counter); \
56} \
57static __always_inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
58{ \
59 return __atomic_##op##_barrier(i, &v->counter); \
60}
61
62ATOMIC_OPS(and)
63ATOMIC_OPS(or)
64ATOMIC_OPS(xor)
65
66#undef ATOMIC_OPS
67
68#define arch_atomic_and arch_atomic_and
69#define arch_atomic_or arch_atomic_or
70#define arch_atomic_xor arch_atomic_xor
71#define arch_atomic_fetch_and arch_atomic_fetch_and
72#define arch_atomic_fetch_or arch_atomic_fetch_or
73#define arch_atomic_fetch_xor arch_atomic_fetch_xor
74
75static __always_inline int arch_atomic_xchg(atomic_t *v, int new)
76{
77 return arch_xchg(&v->counter, new);
78}
79#define arch_atomic_xchg arch_atomic_xchg
80
81static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
82{
83 return arch_cmpxchg(&v->counter, old, new);
84}
85#define arch_atomic_cmpxchg arch_atomic_cmpxchg
86
87static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
88{
89 return arch_try_cmpxchg(&v->counter, old, new);
90}
91#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
92
93#define ATOMIC64_INIT(i) { (i) }
94
95static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
96{
97 return __atomic64_read(v);
98}
99#define arch_atomic64_read arch_atomic64_read
100
101static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
102{
103 __atomic64_set(v, i);
104}
105#define arch_atomic64_set arch_atomic64_set
106
107static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
108{
109 return __atomic64_add_barrier(i, (long *)&v->counter) + i;
110}
111#define arch_atomic64_add_return arch_atomic64_add_return
112
113static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
114{
115 return __atomic64_add_barrier(i, (long *)&v->counter);
116}
117#define arch_atomic64_fetch_add arch_atomic64_fetch_add
118
119static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
120{
121 __atomic64_add(i, (long *)&v->counter);
122}
123#define arch_atomic64_add arch_atomic64_add
124
125static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 new)
126{
127 return arch_xchg(&v->counter, new);
128}
129#define arch_atomic64_xchg arch_atomic64_xchg
130
131static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
132{
133 return arch_cmpxchg(&v->counter, old, new);
134}
135#define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
136
137static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
138{
139 return arch_try_cmpxchg(&v->counter, old, new);
140}
141#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
142
143#define ATOMIC64_OPS(op) \
144static __always_inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
145{ \
146 __atomic64_##op(i, (long *)&v->counter); \
147} \
148static __always_inline long arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
149{ \
150 return __atomic64_##op##_barrier(i, (long *)&v->counter); \
151}
152
153ATOMIC64_OPS(and)
154ATOMIC64_OPS(or)
155ATOMIC64_OPS(xor)
156
157#undef ATOMIC64_OPS
158
159#define arch_atomic64_and arch_atomic64_and
160#define arch_atomic64_or arch_atomic64_or
161#define arch_atomic64_xor arch_atomic64_xor
162#define arch_atomic64_fetch_and arch_atomic64_fetch_and
163#define arch_atomic64_fetch_or arch_atomic64_fetch_or
164#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
165
166#define arch_atomic64_sub_return(_i, _v) arch_atomic64_add_return(-(s64)(_i), _v)
167#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
168#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
169
170#endif /* __ARCH_S390_ATOMIC__ */