Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Low level function for atomic operations
4 *
5 * Copyright IBM Corp. 1999, 2016
6 */
7
8#ifndef __ARCH_S390_ATOMIC_OPS__
9#define __ARCH_S390_ATOMIC_OPS__
10
11static inline int __atomic_read(const atomic_t *v)
12{
13 int c;
14
15 asm volatile(
16 " l %0,%1\n"
17 : "=d" (c) : "R" (v->counter));
18 return c;
19}
20
21static inline void __atomic_set(atomic_t *v, int i)
22{
23 asm volatile(
24 " st %1,%0\n"
25 : "=R" (v->counter) : "d" (i));
26}
27
28static inline s64 __atomic64_read(const atomic64_t *v)
29{
30 s64 c;
31
32 asm volatile(
33 " lg %0,%1\n"
34 : "=d" (c) : "RT" (v->counter));
35 return c;
36}
37
38static inline void __atomic64_set(atomic64_t *v, s64 i)
39{
40 asm volatile(
41 " stg %1,%0\n"
42 : "=RT" (v->counter) : "d" (i));
43}
44
45#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
46
47#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
48static inline op_type op_name(op_type val, op_type *ptr) \
49{ \
50 op_type old; \
51 \
52 asm volatile( \
53 op_string " %[old],%[val],%[ptr]\n" \
54 op_barrier \
55 : [old] "=d" (old), [ptr] "+QS" (*ptr) \
56 : [val] "d" (val) : "cc", "memory"); \
57 return old; \
58} \
59
60#define __ATOMIC_OPS(op_name, op_type, op_string) \
61 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
62 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
63
64__ATOMIC_OPS(__atomic_add, int, "laa")
65__ATOMIC_OPS(__atomic_and, int, "lan")
66__ATOMIC_OPS(__atomic_or, int, "lao")
67__ATOMIC_OPS(__atomic_xor, int, "lax")
68
69__ATOMIC_OPS(__atomic64_add, long, "laag")
70__ATOMIC_OPS(__atomic64_and, long, "lang")
71__ATOMIC_OPS(__atomic64_or, long, "laog")
72__ATOMIC_OPS(__atomic64_xor, long, "laxg")
73
74#undef __ATOMIC_OPS
75#undef __ATOMIC_OP
76
77#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
78static __always_inline void op_name(op_type val, op_type *ptr) \
79{ \
80 asm volatile( \
81 op_string " %[ptr],%[val]\n" \
82 op_barrier \
83 : [ptr] "+QS" (*ptr) : [val] "i" (val) : "cc", "memory");\
84}
85
86#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
87 __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
88 __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
89
90__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
91__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
92
93#undef __ATOMIC_CONST_OPS
94#undef __ATOMIC_CONST_OP
95
96#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
97
98#define __ATOMIC_OP(op_name, op_string) \
99static inline int op_name(int val, int *ptr) \
100{ \
101 int old, new; \
102 \
103 asm volatile( \
104 "0: lr %[new],%[old]\n" \
105 op_string " %[new],%[val]\n" \
106 " cs %[old],%[new],%[ptr]\n" \
107 " jl 0b" \
108 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
109 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
110 return old; \
111}
112
113#define __ATOMIC_OPS(op_name, op_string) \
114 __ATOMIC_OP(op_name, op_string) \
115 __ATOMIC_OP(op_name##_barrier, op_string)
116
117__ATOMIC_OPS(__atomic_add, "ar")
118__ATOMIC_OPS(__atomic_and, "nr")
119__ATOMIC_OPS(__atomic_or, "or")
120__ATOMIC_OPS(__atomic_xor, "xr")
121
122#undef __ATOMIC_OPS
123
124#define __ATOMIC64_OP(op_name, op_string) \
125static inline long op_name(long val, long *ptr) \
126{ \
127 long old, new; \
128 \
129 asm volatile( \
130 "0: lgr %[new],%[old]\n" \
131 op_string " %[new],%[val]\n" \
132 " csg %[old],%[new],%[ptr]\n" \
133 " jl 0b" \
134 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+QS" (*ptr)\
135 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
136 return old; \
137}
138
139#define __ATOMIC64_OPS(op_name, op_string) \
140 __ATOMIC64_OP(op_name, op_string) \
141 __ATOMIC64_OP(op_name##_barrier, op_string)
142
143__ATOMIC64_OPS(__atomic64_add, "agr")
144__ATOMIC64_OPS(__atomic64_and, "ngr")
145__ATOMIC64_OPS(__atomic64_or, "ogr")
146__ATOMIC64_OPS(__atomic64_xor, "xgr")
147
148#undef __ATOMIC64_OPS
149
150#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
151#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
152#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
153#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
154
155#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
156
157static inline int __atomic_cmpxchg(int *ptr, int old, int new)
158{
159 asm volatile(
160 " cs %[old],%[new],%[ptr]"
161 : [old] "+d" (old), [ptr] "+Q" (*ptr)
162 : [new] "d" (new)
163 : "cc", "memory");
164 return old;
165}
166
167static inline bool __atomic_cmpxchg_bool(int *ptr, int old, int new)
168{
169 int old_expected = old;
170
171 asm volatile(
172 " cs %[old],%[new],%[ptr]"
173 : [old] "+d" (old), [ptr] "+Q" (*ptr)
174 : [new] "d" (new)
175 : "cc", "memory");
176 return old == old_expected;
177}
178
179static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
180{
181 asm volatile(
182 " csg %[old],%[new],%[ptr]"
183 : [old] "+d" (old), [ptr] "+QS" (*ptr)
184 : [new] "d" (new)
185 : "cc", "memory");
186 return old;
187}
188
189static inline bool __atomic64_cmpxchg_bool(long *ptr, long old, long new)
190{
191 long old_expected = old;
192
193 asm volatile(
194 " csg %[old],%[new],%[ptr]"
195 : [old] "+d" (old), [ptr] "+QS" (*ptr)
196 : [new] "d" (new)
197 : "cc", "memory");
198 return old == old_expected;
199}
200
201#endif /* __ARCH_S390_ATOMIC_OPS__ */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Low level function for atomic operations
4 *
5 * Copyright IBM Corp. 1999, 2016
6 */
7
8#ifndef __ARCH_S390_ATOMIC_OPS__
9#define __ARCH_S390_ATOMIC_OPS__
10
11#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
12
13#define __ATOMIC_OP(op_name, op_type, op_string, op_barrier) \
14static inline op_type op_name(op_type val, op_type *ptr) \
15{ \
16 op_type old; \
17 \
18 asm volatile( \
19 op_string " %[old],%[val],%[ptr]\n" \
20 op_barrier \
21 : [old] "=d" (old), [ptr] "+Q" (*ptr) \
22 : [val] "d" (val) : "cc", "memory"); \
23 return old; \
24} \
25
26#define __ATOMIC_OPS(op_name, op_type, op_string) \
27 __ATOMIC_OP(op_name, op_type, op_string, "\n") \
28 __ATOMIC_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
29
30__ATOMIC_OPS(__atomic_add, int, "laa")
31__ATOMIC_OPS(__atomic_and, int, "lan")
32__ATOMIC_OPS(__atomic_or, int, "lao")
33__ATOMIC_OPS(__atomic_xor, int, "lax")
34
35__ATOMIC_OPS(__atomic64_add, long, "laag")
36__ATOMIC_OPS(__atomic64_and, long, "lang")
37__ATOMIC_OPS(__atomic64_or, long, "laog")
38__ATOMIC_OPS(__atomic64_xor, long, "laxg")
39
40#undef __ATOMIC_OPS
41#undef __ATOMIC_OP
42
43#define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier) \
44static __always_inline void op_name(op_type val, op_type *ptr) \
45{ \
46 asm volatile( \
47 op_string " %[ptr],%[val]\n" \
48 op_barrier \
49 : [ptr] "+Q" (*ptr) : [val] "i" (val) : "cc", "memory");\
50}
51
52#define __ATOMIC_CONST_OPS(op_name, op_type, op_string) \
53 __ATOMIC_CONST_OP(op_name, op_type, op_string, "\n") \
54 __ATOMIC_CONST_OP(op_name##_barrier, op_type, op_string, "bcr 14,0\n")
55
56__ATOMIC_CONST_OPS(__atomic_add_const, int, "asi")
57__ATOMIC_CONST_OPS(__atomic64_add_const, long, "agsi")
58
59#undef __ATOMIC_CONST_OPS
60#undef __ATOMIC_CONST_OP
61
62#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
63
64#define __ATOMIC_OP(op_name, op_string) \
65static inline int op_name(int val, int *ptr) \
66{ \
67 int old, new; \
68 \
69 asm volatile( \
70 "0: lr %[new],%[old]\n" \
71 op_string " %[new],%[val]\n" \
72 " cs %[old],%[new],%[ptr]\n" \
73 " jl 0b" \
74 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
75 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
76 return old; \
77}
78
79#define __ATOMIC_OPS(op_name, op_string) \
80 __ATOMIC_OP(op_name, op_string) \
81 __ATOMIC_OP(op_name##_barrier, op_string)
82
83__ATOMIC_OPS(__atomic_add, "ar")
84__ATOMIC_OPS(__atomic_and, "nr")
85__ATOMIC_OPS(__atomic_or, "or")
86__ATOMIC_OPS(__atomic_xor, "xr")
87
88#undef __ATOMIC_OPS
89
90#define __ATOMIC64_OP(op_name, op_string) \
91static inline long op_name(long val, long *ptr) \
92{ \
93 long old, new; \
94 \
95 asm volatile( \
96 "0: lgr %[new],%[old]\n" \
97 op_string " %[new],%[val]\n" \
98 " csg %[old],%[new],%[ptr]\n" \
99 " jl 0b" \
100 : [old] "=d" (old), [new] "=&d" (new), [ptr] "+Q" (*ptr)\
101 : [val] "d" (val), "0" (*ptr) : "cc", "memory"); \
102 return old; \
103}
104
105#define __ATOMIC64_OPS(op_name, op_string) \
106 __ATOMIC64_OP(op_name, op_string) \
107 __ATOMIC64_OP(op_name##_barrier, op_string)
108
109__ATOMIC64_OPS(__atomic64_add, "agr")
110__ATOMIC64_OPS(__atomic64_and, "ngr")
111__ATOMIC64_OPS(__atomic64_or, "ogr")
112__ATOMIC64_OPS(__atomic64_xor, "xgr")
113
114#undef __ATOMIC64_OPS
115
116#define __atomic_add_const(val, ptr) __atomic_add(val, ptr)
117#define __atomic_add_const_barrier(val, ptr) __atomic_add(val, ptr)
118#define __atomic64_add_const(val, ptr) __atomic64_add(val, ptr)
119#define __atomic64_add_const_barrier(val, ptr) __atomic64_add(val, ptr)
120
121#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
122
123static inline int __atomic_cmpxchg(int *ptr, int old, int new)
124{
125 return __sync_val_compare_and_swap(ptr, old, new);
126}
127
128static inline int __atomic_cmpxchg_bool(int *ptr, int old, int new)
129{
130 return __sync_bool_compare_and_swap(ptr, old, new);
131}
132
133static inline long __atomic64_cmpxchg(long *ptr, long old, long new)
134{
135 return __sync_val_compare_and_swap(ptr, old, new);
136}
137
138static inline long __atomic64_cmpxchg_bool(long *ptr, long old, long new)
139{
140 return __sync_bool_compare_and_swap(ptr, old, new);
141}
142
143#endif /* __ARCH_S390_ATOMIC_OPS__ */