Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
5/*
6 * PowerPC atomic operations
7 */
8
9#ifdef __KERNEL__
10#include <linux/types.h>
11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
13#include <asm/asm-const.h>
14
15/*
16 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
17 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
18 * on the platform without lwsync.
19 */
20#define __atomic_acquire_fence() \
21 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
22
23#define __atomic_release_fence() \
24 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
25
26static __inline__ int arch_atomic_read(const atomic_t *v)
27{
28 int t;
29
30 /* -mprefixed can generate offsets beyond range, fall back hack */
31 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
32 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
33 else
34 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
35
36 return t;
37}
38
39static __inline__ void arch_atomic_set(atomic_t *v, int i)
40{
41 /* -mprefixed can generate offsets beyond range, fall back hack */
42 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
43 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
44 else
45 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
46}
47
48#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
49static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
50{ \
51 int t; \
52 \
53 __asm__ __volatile__( \
54"1: lwarx %0,0,%3 # atomic_" #op "\n" \
55 #asm_op "%I2" suffix " %0,%0,%2\n" \
56" stwcx. %0,0,%3 \n" \
57" bne- 1b\n" \
58 : "=&r" (t), "+m" (v->counter) \
59 : "r"#sign (a), "r" (&v->counter) \
60 : "cc", ##__VA_ARGS__); \
61} \
62
63#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
64static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
65{ \
66 int t; \
67 \
68 __asm__ __volatile__( \
69"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
70 #asm_op "%I2" suffix " %0,%0,%2\n" \
71" stwcx. %0,0,%3\n" \
72" bne- 1b\n" \
73 : "=&r" (t), "+m" (v->counter) \
74 : "r"#sign (a), "r" (&v->counter) \
75 : "cc", ##__VA_ARGS__); \
76 \
77 return t; \
78}
79
80#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
81static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
82{ \
83 int res, t; \
84 \
85 __asm__ __volatile__( \
86"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
87 #asm_op "%I3" suffix " %1,%0,%3\n" \
88" stwcx. %1,0,%4\n" \
89" bne- 1b\n" \
90 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
91 : "r"#sign (a), "r" (&v->counter) \
92 : "cc", ##__VA_ARGS__); \
93 \
94 return res; \
95}
96
97#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
98 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
99 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
100 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
101
102ATOMIC_OPS(add, add, "c", I, "xer")
103ATOMIC_OPS(sub, sub, "c", I, "xer")
104
105#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
106#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
107
108#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
109#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
110
111#undef ATOMIC_OPS
112#define ATOMIC_OPS(op, asm_op, suffix, sign) \
113 ATOMIC_OP(op, asm_op, suffix, sign) \
114 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
115
116ATOMIC_OPS(and, and, ".", K)
117ATOMIC_OPS(or, or, "", K)
118ATOMIC_OPS(xor, xor, "", K)
119
120#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
121#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
122#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
123
124#undef ATOMIC_OPS
125#undef ATOMIC_FETCH_OP_RELAXED
126#undef ATOMIC_OP_RETURN_RELAXED
127#undef ATOMIC_OP
128
129/**
130 * atomic_fetch_add_unless - add unless the number is a given value
131 * @v: pointer of type atomic_t
132 * @a: the amount to add to v...
133 * @u: ...unless v is equal to u.
134 *
135 * Atomically adds @a to @v, so long as it was not @u.
136 * Returns the old value of @v.
137 */
138static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
139{
140 int t;
141
142 __asm__ __volatile__ (
143 PPC_ATOMIC_ENTRY_BARRIER
144"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
145 cmpw 0,%0,%3 \n\
146 beq 2f \n\
147 add%I2c %0,%0,%2 \n"
148" stwcx. %0,0,%1 \n\
149 bne- 1b \n"
150 PPC_ATOMIC_EXIT_BARRIER
151" sub%I2c %0,%0,%2 \n\
1522:"
153 : "=&r" (t)
154 : "r" (&v->counter), "rI" (a), "r" (u)
155 : "cc", "memory", "xer");
156
157 return t;
158}
159#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
160
161/*
162 * Atomically test *v and decrement if it is greater than 0.
163 * The function returns the old value of *v minus 1, even if
164 * the atomic variable, v, was not decremented.
165 */
166static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
167{
168 int t;
169
170 __asm__ __volatile__(
171 PPC_ATOMIC_ENTRY_BARRIER
172"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
173 cmpwi %0,1\n\
174 addi %0,%0,-1\n\
175 blt- 2f\n"
176" stwcx. %0,0,%1\n\
177 bne- 1b"
178 PPC_ATOMIC_EXIT_BARRIER
179 "\n\
1802:" : "=&b" (t)
181 : "r" (&v->counter)
182 : "cc", "memory");
183
184 return t;
185}
186#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
187
188#ifdef __powerpc64__
189
190#define ATOMIC64_INIT(i) { (i) }
191
192static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
193{
194 s64 t;
195
196 /* -mprefixed can generate offsets beyond range, fall back hack */
197 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
198 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
199 else
200 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
201
202 return t;
203}
204
205static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
206{
207 /* -mprefixed can generate offsets beyond range, fall back hack */
208 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
209 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
210 else
211 __asm__ __volatile__("std%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
212}
213
214#define ATOMIC64_OP(op, asm_op) \
215static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
216{ \
217 s64 t; \
218 \
219 __asm__ __volatile__( \
220"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
221 #asm_op " %0,%2,%0\n" \
222" stdcx. %0,0,%3 \n" \
223" bne- 1b\n" \
224 : "=&r" (t), "+m" (v->counter) \
225 : "r" (a), "r" (&v->counter) \
226 : "cc"); \
227}
228
229#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
230static inline s64 \
231arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
232{ \
233 s64 t; \
234 \
235 __asm__ __volatile__( \
236"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
237 #asm_op " %0,%2,%0\n" \
238" stdcx. %0,0,%3\n" \
239" bne- 1b\n" \
240 : "=&r" (t), "+m" (v->counter) \
241 : "r" (a), "r" (&v->counter) \
242 : "cc"); \
243 \
244 return t; \
245}
246
247#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
248static inline s64 \
249arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
250{ \
251 s64 res, t; \
252 \
253 __asm__ __volatile__( \
254"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
255 #asm_op " %1,%3,%0\n" \
256" stdcx. %1,0,%4\n" \
257" bne- 1b\n" \
258 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
259 : "r" (a), "r" (&v->counter) \
260 : "cc"); \
261 \
262 return res; \
263}
264
265#define ATOMIC64_OPS(op, asm_op) \
266 ATOMIC64_OP(op, asm_op) \
267 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
268 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
269
270ATOMIC64_OPS(add, add)
271ATOMIC64_OPS(sub, subf)
272
273#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
274#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
275
276#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
277#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
278
279#undef ATOMIC64_OPS
280#define ATOMIC64_OPS(op, asm_op) \
281 ATOMIC64_OP(op, asm_op) \
282 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
283
284ATOMIC64_OPS(and, and)
285ATOMIC64_OPS(or, or)
286ATOMIC64_OPS(xor, xor)
287
288#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
289#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
290#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
291
292#undef ATOPIC64_OPS
293#undef ATOMIC64_FETCH_OP_RELAXED
294#undef ATOMIC64_OP_RETURN_RELAXED
295#undef ATOMIC64_OP
296
297static __inline__ void arch_atomic64_inc(atomic64_t *v)
298{
299 s64 t;
300
301 __asm__ __volatile__(
302"1: ldarx %0,0,%2 # atomic64_inc\n\
303 addic %0,%0,1\n\
304 stdcx. %0,0,%2 \n\
305 bne- 1b"
306 : "=&r" (t), "+m" (v->counter)
307 : "r" (&v->counter)
308 : "cc", "xer");
309}
310#define arch_atomic64_inc arch_atomic64_inc
311
312static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
313{
314 s64 t;
315
316 __asm__ __volatile__(
317"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
318" addic %0,%0,1\n"
319" stdcx. %0,0,%2\n"
320" bne- 1b"
321 : "=&r" (t), "+m" (v->counter)
322 : "r" (&v->counter)
323 : "cc", "xer");
324
325 return t;
326}
327
328static __inline__ void arch_atomic64_dec(atomic64_t *v)
329{
330 s64 t;
331
332 __asm__ __volatile__(
333"1: ldarx %0,0,%2 # atomic64_dec\n\
334 addic %0,%0,-1\n\
335 stdcx. %0,0,%2\n\
336 bne- 1b"
337 : "=&r" (t), "+m" (v->counter)
338 : "r" (&v->counter)
339 : "cc", "xer");
340}
341#define arch_atomic64_dec arch_atomic64_dec
342
343static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
344{
345 s64 t;
346
347 __asm__ __volatile__(
348"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
349" addic %0,%0,-1\n"
350" stdcx. %0,0,%2\n"
351" bne- 1b"
352 : "=&r" (t), "+m" (v->counter)
353 : "r" (&v->counter)
354 : "cc", "xer");
355
356 return t;
357}
358
359#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
360#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
361
362/*
363 * Atomically test *v and decrement if it is greater than 0.
364 * The function returns the old value of *v minus 1.
365 */
366static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
367{
368 s64 t;
369
370 __asm__ __volatile__(
371 PPC_ATOMIC_ENTRY_BARRIER
372"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
373 addic. %0,%0,-1\n\
374 blt- 2f\n\
375 stdcx. %0,0,%1\n\
376 bne- 1b"
377 PPC_ATOMIC_EXIT_BARRIER
378 "\n\
3792:" : "=&r" (t)
380 : "r" (&v->counter)
381 : "cc", "xer", "memory");
382
383 return t;
384}
385#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
386
387/**
388 * atomic64_fetch_add_unless - add unless the number is a given value
389 * @v: pointer of type atomic64_t
390 * @a: the amount to add to v...
391 * @u: ...unless v is equal to u.
392 *
393 * Atomically adds @a to @v, so long as it was not @u.
394 * Returns the old value of @v.
395 */
396static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
397{
398 s64 t;
399
400 __asm__ __volatile__ (
401 PPC_ATOMIC_ENTRY_BARRIER
402"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
403 cmpd 0,%0,%3 \n\
404 beq 2f \n\
405 add %0,%2,%0 \n"
406" stdcx. %0,0,%1 \n\
407 bne- 1b \n"
408 PPC_ATOMIC_EXIT_BARRIER
409" subf %0,%2,%0 \n\
4102:"
411 : "=&r" (t)
412 : "r" (&v->counter), "r" (a), "r" (u)
413 : "cc", "memory");
414
415 return t;
416}
417#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
418
419/**
420 * atomic_inc64_not_zero - increment unless the number is zero
421 * @v: pointer of type atomic64_t
422 *
423 * Atomically increments @v by 1, so long as @v is non-zero.
424 * Returns non-zero if @v was non-zero, and zero otherwise.
425 */
426static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
427{
428 s64 t1, t2;
429
430 __asm__ __volatile__ (
431 PPC_ATOMIC_ENTRY_BARRIER
432"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
433 cmpdi 0,%0,0\n\
434 beq- 2f\n\
435 addic %1,%0,1\n\
436 stdcx. %1,0,%2\n\
437 bne- 1b\n"
438 PPC_ATOMIC_EXIT_BARRIER
439 "\n\
4402:"
441 : "=&r" (t1), "=&r" (t2)
442 : "r" (&v->counter)
443 : "cc", "xer", "memory");
444
445 return t1 != 0;
446}
447#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
448
449#endif /* __powerpc64__ */
450
451#endif /* __KERNEL__ */
452#endif /* _ASM_POWERPC_ATOMIC_H_ */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_ATOMIC_H_
3#define _ASM_POWERPC_ATOMIC_H_
4
5/*
6 * PowerPC atomic operations
7 */
8
9#ifdef __KERNEL__
10#include <linux/types.h>
11#include <asm/cmpxchg.h>
12#include <asm/barrier.h>
13#include <asm/asm-const.h>
14#include <asm/asm-compat.h>
15
16/*
17 * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
18 * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
19 * on the platform without lwsync.
20 */
21#define __atomic_acquire_fence() \
22 __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
23
24#define __atomic_release_fence() \
25 __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
26
27static __inline__ int arch_atomic_read(const atomic_t *v)
28{
29 int t;
30
31 /* -mprefixed can generate offsets beyond range, fall back hack */
32 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
33 __asm__ __volatile__("lwz %0,0(%1)" : "=r"(t) : "b"(&v->counter));
34 else
35 __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m<>"(v->counter));
36
37 return t;
38}
39
40static __inline__ void arch_atomic_set(atomic_t *v, int i)
41{
42 /* -mprefixed can generate offsets beyond range, fall back hack */
43 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
44 __asm__ __volatile__("stw %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
45 else
46 __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
47}
48
49#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
50static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
51{ \
52 int t; \
53 \
54 __asm__ __volatile__( \
55"1: lwarx %0,0,%3 # atomic_" #op "\n" \
56 #asm_op "%I2" suffix " %0,%0,%2\n" \
57" stwcx. %0,0,%3 \n" \
58" bne- 1b\n" \
59 : "=&r" (t), "+m" (v->counter) \
60 : "r"#sign (a), "r" (&v->counter) \
61 : "cc", ##__VA_ARGS__); \
62} \
63
64#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
65static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
66{ \
67 int t; \
68 \
69 __asm__ __volatile__( \
70"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
71 #asm_op "%I2" suffix " %0,%0,%2\n" \
72" stwcx. %0,0,%3\n" \
73" bne- 1b\n" \
74 : "=&r" (t), "+m" (v->counter) \
75 : "r"#sign (a), "r" (&v->counter) \
76 : "cc", ##__VA_ARGS__); \
77 \
78 return t; \
79}
80
81#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
82static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
83{ \
84 int res, t; \
85 \
86 __asm__ __volatile__( \
87"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
88 #asm_op "%I3" suffix " %1,%0,%3\n" \
89" stwcx. %1,0,%4\n" \
90" bne- 1b\n" \
91 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
92 : "r"#sign (a), "r" (&v->counter) \
93 : "cc", ##__VA_ARGS__); \
94 \
95 return res; \
96}
97
98#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
99 ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
100 ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
101 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
102
103ATOMIC_OPS(add, add, "c", I, "xer")
104ATOMIC_OPS(sub, sub, "c", I, "xer")
105
106#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
107#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
108
109#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
110#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
111
112#undef ATOMIC_OPS
113#define ATOMIC_OPS(op, asm_op, suffix, sign) \
114 ATOMIC_OP(op, asm_op, suffix, sign) \
115 ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
116
117ATOMIC_OPS(and, and, ".", K)
118ATOMIC_OPS(or, or, "", K)
119ATOMIC_OPS(xor, xor, "", K)
120
121#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
122#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
123#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
124
125#undef ATOMIC_OPS
126#undef ATOMIC_FETCH_OP_RELAXED
127#undef ATOMIC_OP_RETURN_RELAXED
128#undef ATOMIC_OP
129
130/**
131 * atomic_fetch_add_unless - add unless the number is a given value
132 * @v: pointer of type atomic_t
133 * @a: the amount to add to v...
134 * @u: ...unless v is equal to u.
135 *
136 * Atomically adds @a to @v, so long as it was not @u.
137 * Returns the old value of @v.
138 */
139static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
140{
141 int t;
142
143 __asm__ __volatile__ (
144 PPC_ATOMIC_ENTRY_BARRIER
145"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
146 cmpw 0,%0,%3 \n\
147 beq 2f \n\
148 add%I2c %0,%0,%2 \n"
149" stwcx. %0,0,%1 \n\
150 bne- 1b \n"
151 PPC_ATOMIC_EXIT_BARRIER
152" sub%I2c %0,%0,%2 \n\
1532:"
154 : "=&r" (t)
155 : "r" (&v->counter), "rI" (a), "r" (u)
156 : "cc", "memory", "xer");
157
158 return t;
159}
160#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
161
162/*
163 * Atomically test *v and decrement if it is greater than 0.
164 * The function returns the old value of *v minus 1, even if
165 * the atomic variable, v, was not decremented.
166 */
167static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
168{
169 int t;
170
171 __asm__ __volatile__(
172 PPC_ATOMIC_ENTRY_BARRIER
173"1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
174 cmpwi %0,1\n\
175 addi %0,%0,-1\n\
176 blt- 2f\n"
177" stwcx. %0,0,%1\n\
178 bne- 1b"
179 PPC_ATOMIC_EXIT_BARRIER
180 "\n\
1812:" : "=&b" (t)
182 : "r" (&v->counter)
183 : "cc", "memory");
184
185 return t;
186}
187#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
188
189#ifdef __powerpc64__
190
191#define ATOMIC64_INIT(i) { (i) }
192
193static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
194{
195 s64 t;
196
197 /* -mprefixed can generate offsets beyond range, fall back hack */
198 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
199 __asm__ __volatile__("ld %0,0(%1)" : "=r"(t) : "b"(&v->counter));
200 else
201 __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : DS_FORM_CONSTRAINT (v->counter));
202
203 return t;
204}
205
206static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
207{
208 /* -mprefixed can generate offsets beyond range, fall back hack */
209 if (IS_ENABLED(CONFIG_PPC_KERNEL_PREFIXED))
210 __asm__ __volatile__("std %1,0(%2)" : "=m"(v->counter) : "r"(i), "b"(&v->counter));
211 else
212 __asm__ __volatile__("std%U0%X0 %1,%0" : "=" DS_FORM_CONSTRAINT (v->counter) : "r"(i));
213}
214
215#define ATOMIC64_OP(op, asm_op) \
216static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
217{ \
218 s64 t; \
219 \
220 __asm__ __volatile__( \
221"1: ldarx %0,0,%3 # atomic64_" #op "\n" \
222 #asm_op " %0,%2,%0\n" \
223" stdcx. %0,0,%3 \n" \
224" bne- 1b\n" \
225 : "=&r" (t), "+m" (v->counter) \
226 : "r" (a), "r" (&v->counter) \
227 : "cc"); \
228}
229
230#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
231static inline s64 \
232arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
233{ \
234 s64 t; \
235 \
236 __asm__ __volatile__( \
237"1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
238 #asm_op " %0,%2,%0\n" \
239" stdcx. %0,0,%3\n" \
240" bne- 1b\n" \
241 : "=&r" (t), "+m" (v->counter) \
242 : "r" (a), "r" (&v->counter) \
243 : "cc"); \
244 \
245 return t; \
246}
247
248#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
249static inline s64 \
250arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
251{ \
252 s64 res, t; \
253 \
254 __asm__ __volatile__( \
255"1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
256 #asm_op " %1,%3,%0\n" \
257" stdcx. %1,0,%4\n" \
258" bne- 1b\n" \
259 : "=&r" (res), "=&r" (t), "+m" (v->counter) \
260 : "r" (a), "r" (&v->counter) \
261 : "cc"); \
262 \
263 return res; \
264}
265
266#define ATOMIC64_OPS(op, asm_op) \
267 ATOMIC64_OP(op, asm_op) \
268 ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
269 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
270
271ATOMIC64_OPS(add, add)
272ATOMIC64_OPS(sub, subf)
273
274#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
275#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
276
277#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
278#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
279
280#undef ATOMIC64_OPS
281#define ATOMIC64_OPS(op, asm_op) \
282 ATOMIC64_OP(op, asm_op) \
283 ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
284
285ATOMIC64_OPS(and, and)
286ATOMIC64_OPS(or, or)
287ATOMIC64_OPS(xor, xor)
288
289#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
290#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
291#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
292
293#undef ATOPIC64_OPS
294#undef ATOMIC64_FETCH_OP_RELAXED
295#undef ATOMIC64_OP_RETURN_RELAXED
296#undef ATOMIC64_OP
297
298static __inline__ void arch_atomic64_inc(atomic64_t *v)
299{
300 s64 t;
301
302 __asm__ __volatile__(
303"1: ldarx %0,0,%2 # atomic64_inc\n\
304 addic %0,%0,1\n\
305 stdcx. %0,0,%2 \n\
306 bne- 1b"
307 : "=&r" (t), "+m" (v->counter)
308 : "r" (&v->counter)
309 : "cc", "xer");
310}
311#define arch_atomic64_inc arch_atomic64_inc
312
313static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
314{
315 s64 t;
316
317 __asm__ __volatile__(
318"1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
319" addic %0,%0,1\n"
320" stdcx. %0,0,%2\n"
321" bne- 1b"
322 : "=&r" (t), "+m" (v->counter)
323 : "r" (&v->counter)
324 : "cc", "xer");
325
326 return t;
327}
328
329static __inline__ void arch_atomic64_dec(atomic64_t *v)
330{
331 s64 t;
332
333 __asm__ __volatile__(
334"1: ldarx %0,0,%2 # atomic64_dec\n\
335 addic %0,%0,-1\n\
336 stdcx. %0,0,%2\n\
337 bne- 1b"
338 : "=&r" (t), "+m" (v->counter)
339 : "r" (&v->counter)
340 : "cc", "xer");
341}
342#define arch_atomic64_dec arch_atomic64_dec
343
344static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
345{
346 s64 t;
347
348 __asm__ __volatile__(
349"1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
350" addic %0,%0,-1\n"
351" stdcx. %0,0,%2\n"
352" bne- 1b"
353 : "=&r" (t), "+m" (v->counter)
354 : "r" (&v->counter)
355 : "cc", "xer");
356
357 return t;
358}
359
360#define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
361#define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
362
363/*
364 * Atomically test *v and decrement if it is greater than 0.
365 * The function returns the old value of *v minus 1.
366 */
367static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
368{
369 s64 t;
370
371 __asm__ __volatile__(
372 PPC_ATOMIC_ENTRY_BARRIER
373"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
374 addic. %0,%0,-1\n\
375 blt- 2f\n\
376 stdcx. %0,0,%1\n\
377 bne- 1b"
378 PPC_ATOMIC_EXIT_BARRIER
379 "\n\
3802:" : "=&r" (t)
381 : "r" (&v->counter)
382 : "cc", "xer", "memory");
383
384 return t;
385}
386#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
387
388/**
389 * atomic64_fetch_add_unless - add unless the number is a given value
390 * @v: pointer of type atomic64_t
391 * @a: the amount to add to v...
392 * @u: ...unless v is equal to u.
393 *
394 * Atomically adds @a to @v, so long as it was not @u.
395 * Returns the old value of @v.
396 */
397static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
398{
399 s64 t;
400
401 __asm__ __volatile__ (
402 PPC_ATOMIC_ENTRY_BARRIER
403"1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
404 cmpd 0,%0,%3 \n\
405 beq 2f \n\
406 add %0,%2,%0 \n"
407" stdcx. %0,0,%1 \n\
408 bne- 1b \n"
409 PPC_ATOMIC_EXIT_BARRIER
410" subf %0,%2,%0 \n\
4112:"
412 : "=&r" (t)
413 : "r" (&v->counter), "r" (a), "r" (u)
414 : "cc", "memory");
415
416 return t;
417}
418#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
419
420/**
421 * atomic_inc64_not_zero - increment unless the number is zero
422 * @v: pointer of type atomic64_t
423 *
424 * Atomically increments @v by 1, so long as @v is non-zero.
425 * Returns non-zero if @v was non-zero, and zero otherwise.
426 */
427static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
428{
429 s64 t1, t2;
430
431 __asm__ __volatile__ (
432 PPC_ATOMIC_ENTRY_BARRIER
433"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
434 cmpdi 0,%0,0\n\
435 beq- 2f\n\
436 addic %1,%0,1\n\
437 stdcx. %1,0,%2\n\
438 bne- 1b\n"
439 PPC_ATOMIC_EXIT_BARRIER
440 "\n\
4412:"
442 : "=&r" (t1), "=&r" (t2)
443 : "r" (&v->counter)
444 : "cc", "xer", "memory");
445
446 return t1 != 0;
447}
448#define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
449
450#endif /* __powerpc64__ */
451
452#endif /* __KERNEL__ */
453#endif /* _ASM_POWERPC_ATOMIC_H_ */