Loading...
1/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/asm.h>
20#include <asm/barrier.h>
21#include <asm/compiler.h>
22#include <asm/cpu-features.h>
23#include <asm/cmpxchg.h>
24#include <asm/sync.h>
25
26#define ATOMIC_OPS(pfx, type) \
27static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
28{ \
29 return READ_ONCE(v->counter); \
30} \
31 \
32static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
33{ \
34 WRITE_ONCE(v->counter, i); \
35} \
36 \
37static __always_inline type \
38arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
39{ \
40 return arch_cmpxchg(&v->counter, o, n); \
41} \
42 \
43static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
44{ \
45 return arch_xchg(&v->counter, n); \
46}
47
48ATOMIC_OPS(atomic, int)
49
50#ifdef CONFIG_64BIT
51# define ATOMIC64_INIT(i) { (i) }
52ATOMIC_OPS(atomic64, s64)
53#endif
54
55#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
56static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
57{ \
58 type temp; \
59 \
60 if (!kernel_uses_llsc) { \
61 unsigned long flags; \
62 \
63 raw_local_irq_save(flags); \
64 v->counter c_op i; \
65 raw_local_irq_restore(flags); \
66 return; \
67 } \
68 \
69 __asm__ __volatile__( \
70 " .set push \n" \
71 " .set " MIPS_ISA_LEVEL " \n" \
72 " " __SYNC(full, loongson3_war) " \n" \
73 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
74 " " #asm_op " %0, %2 \n" \
75 " " #sc " %0, %1 \n" \
76 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
77 " .set pop \n" \
78 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
79 : "Ir" (i) : __LLSC_CLOBBER); \
80}
81
82#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
83static __inline__ type \
84arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
85{ \
86 type temp, result; \
87 \
88 if (!kernel_uses_llsc) { \
89 unsigned long flags; \
90 \
91 raw_local_irq_save(flags); \
92 result = v->counter; \
93 result c_op i; \
94 v->counter = result; \
95 raw_local_irq_restore(flags); \
96 return result; \
97 } \
98 \
99 __asm__ __volatile__( \
100 " .set push \n" \
101 " .set " MIPS_ISA_LEVEL " \n" \
102 " " __SYNC(full, loongson3_war) " \n" \
103 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
104 " " #asm_op " %0, %1, %3 \n" \
105 " " #sc " %0, %2 \n" \
106 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
107 " " #asm_op " %0, %1, %3 \n" \
108 " .set pop \n" \
109 : "=&r" (result), "=&r" (temp), \
110 "+" GCC_OFF_SMALL_ASM() (v->counter) \
111 : "Ir" (i) : __LLSC_CLOBBER); \
112 \
113 return result; \
114}
115
116#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
117static __inline__ type \
118arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
119{ \
120 int temp, result; \
121 \
122 if (!kernel_uses_llsc) { \
123 unsigned long flags; \
124 \
125 raw_local_irq_save(flags); \
126 result = v->counter; \
127 v->counter c_op i; \
128 raw_local_irq_restore(flags); \
129 return result; \
130 } \
131 \
132 __asm__ __volatile__( \
133 " .set push \n" \
134 " .set " MIPS_ISA_LEVEL " \n" \
135 " " __SYNC(full, loongson3_war) " \n" \
136 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
137 " " #asm_op " %0, %1, %3 \n" \
138 " " #sc " %0, %2 \n" \
139 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
140 " .set pop \n" \
141 " move %0, %1 \n" \
142 : "=&r" (result), "=&r" (temp), \
143 "+" GCC_OFF_SMALL_ASM() (v->counter) \
144 : "Ir" (i) : __LLSC_CLOBBER); \
145 \
146 return result; \
147}
148
149#undef ATOMIC_OPS
150#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
151 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
154
155ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
156ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
157
158#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
159#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
160#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
161#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
162
163#ifdef CONFIG_64BIT
164ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
165ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
166# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
167# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
168# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
169# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
170#endif /* CONFIG_64BIT */
171
172#undef ATOMIC_OPS
173#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
174 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
176
177ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
178ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
179ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
180
181#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
182#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
183#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
184
185#ifdef CONFIG_64BIT
186ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
187ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
188ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
189# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
190# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
191# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
192#endif
193
194#undef ATOMIC_OPS
195#undef ATOMIC_FETCH_OP
196#undef ATOMIC_OP_RETURN
197#undef ATOMIC_OP
198
199/*
200 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
201 * @i: integer value to subtract
202 * @v: pointer of type atomic_t
203 *
204 * Atomically test @v and subtract @i if @v is greater or equal than @i.
205 * The function returns the old value of @v minus @i.
206 */
207#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
208static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
209{ \
210 type temp, result; \
211 \
212 smp_mb__before_atomic(); \
213 \
214 if (!kernel_uses_llsc) { \
215 unsigned long flags; \
216 \
217 raw_local_irq_save(flags); \
218 result = v->counter; \
219 result -= i; \
220 if (result >= 0) \
221 v->counter = result; \
222 raw_local_irq_restore(flags); \
223 smp_mb__after_atomic(); \
224 return result; \
225 } \
226 \
227 __asm__ __volatile__( \
228 " .set push \n" \
229 " .set " MIPS_ISA_LEVEL " \n" \
230 " " __SYNC(full, loongson3_war) " \n" \
231 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
232 " .set pop \n" \
233 " " #op " %0, %1, %3 \n" \
234 " move %1, %0 \n" \
235 " bltz %0, 2f \n" \
236 " .set push \n" \
237 " .set " MIPS_ISA_LEVEL " \n" \
238 " " #sc " %1, %2 \n" \
239 " " __stringify(SC_BEQZ) " %1, 1b \n" \
240 "2: " __SYNC(full, loongson3_war) " \n" \
241 " .set pop \n" \
242 : "=&r" (result), "=&r" (temp), \
243 "+" GCC_OFF_SMALL_ASM() (v->counter) \
244 : "Ir" (i) \
245 : __LLSC_CLOBBER); \
246 \
247 /* \
248 * In the Loongson3 workaround case we already have a \
249 * completion barrier at 2: above, which is needed due to the \
250 * bltz that can branch to code outside of the LL/SC loop. As \
251 * such, we don't need to emit another barrier here. \
252 */ \
253 if (__SYNC_loongson3_war == 0) \
254 smp_mb__after_atomic(); \
255 \
256 return result; \
257}
258
259ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
260#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
261
262#ifdef CONFIG_64BIT
263ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
264#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
265#endif
266
267#undef ATOMIC_SIP_OP
268
269#endif /* _ASM_ATOMIC_H */
1/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/cpu-features.h>
21#include <asm/cmpxchg.h>
22#include <asm/war.h>
23
24#define ATOMIC_INIT(i) { (i) }
25
26/*
27 * atomic_read - read atomic variable
28 * @v: pointer of type atomic_t
29 *
30 * Atomically reads the value of @v.
31 */
32#define atomic_read(v) (*(volatile int *)&(v)->counter)
33
34/*
35 * atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
37 * @i: required value
38 *
39 * Atomically sets the value of @v to @i.
40 */
41#define atomic_set(v, i) ((v)->counter = (i))
42
43/*
44 * atomic_add - add integer to atomic variable
45 * @i: integer value to add
46 * @v: pointer of type atomic_t
47 *
48 * Atomically adds @i to @v.
49 */
50static __inline__ void atomic_add(int i, atomic_t * v)
51{
52 if (kernel_uses_llsc && R10000_LLSC_WAR) {
53 int temp;
54
55 __asm__ __volatile__(
56 " .set arch=r4000 \n"
57 "1: ll %0, %1 # atomic_add \n"
58 " addu %0, %2 \n"
59 " sc %0, %1 \n"
60 " beqzl %0, 1b \n"
61 " .set mips0 \n"
62 : "=&r" (temp), "+m" (v->counter)
63 : "Ir" (i));
64 } else if (kernel_uses_llsc) {
65 int temp;
66
67 do {
68 __asm__ __volatile__(
69 " .set arch=r4000 \n"
70 " ll %0, %1 # atomic_add \n"
71 " addu %0, %2 \n"
72 " sc %0, %1 \n"
73 " .set mips0 \n"
74 : "=&r" (temp), "+m" (v->counter)
75 : "Ir" (i));
76 } while (unlikely(!temp));
77 } else {
78 unsigned long flags;
79
80 raw_local_irq_save(flags);
81 v->counter += i;
82 raw_local_irq_restore(flags);
83 }
84}
85
86/*
87 * atomic_sub - subtract the atomic variable
88 * @i: integer value to subtract
89 * @v: pointer of type atomic_t
90 *
91 * Atomically subtracts @i from @v.
92 */
93static __inline__ void atomic_sub(int i, atomic_t * v)
94{
95 if (kernel_uses_llsc && R10000_LLSC_WAR) {
96 int temp;
97
98 __asm__ __volatile__(
99 " .set arch=r4000 \n"
100 "1: ll %0, %1 # atomic_sub \n"
101 " subu %0, %2 \n"
102 " sc %0, %1 \n"
103 " beqzl %0, 1b \n"
104 " .set mips0 \n"
105 : "=&r" (temp), "+m" (v->counter)
106 : "Ir" (i));
107 } else if (kernel_uses_llsc) {
108 int temp;
109
110 do {
111 __asm__ __volatile__(
112 " .set arch=r4000 \n"
113 " ll %0, %1 # atomic_sub \n"
114 " subu %0, %2 \n"
115 " sc %0, %1 \n"
116 " .set mips0 \n"
117 : "=&r" (temp), "+m" (v->counter)
118 : "Ir" (i));
119 } while (unlikely(!temp));
120 } else {
121 unsigned long flags;
122
123 raw_local_irq_save(flags);
124 v->counter -= i;
125 raw_local_irq_restore(flags);
126 }
127}
128
129/*
130 * Same as above, but return the result value
131 */
132static __inline__ int atomic_add_return(int i, atomic_t * v)
133{
134 int result;
135
136 smp_mb__before_llsc();
137
138 if (kernel_uses_llsc && R10000_LLSC_WAR) {
139 int temp;
140
141 __asm__ __volatile__(
142 " .set arch=r4000 \n"
143 "1: ll %1, %2 # atomic_add_return \n"
144 " addu %0, %1, %3 \n"
145 " sc %0, %2 \n"
146 " beqzl %0, 1b \n"
147 " addu %0, %1, %3 \n"
148 " .set mips0 \n"
149 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
150 : "Ir" (i));
151 } else if (kernel_uses_llsc) {
152 int temp;
153
154 do {
155 __asm__ __volatile__(
156 " .set arch=r4000 \n"
157 " ll %1, %2 # atomic_add_return \n"
158 " addu %0, %1, %3 \n"
159 " sc %0, %2 \n"
160 " .set mips0 \n"
161 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
162 : "Ir" (i));
163 } while (unlikely(!result));
164
165 result = temp + i;
166 } else {
167 unsigned long flags;
168
169 raw_local_irq_save(flags);
170 result = v->counter;
171 result += i;
172 v->counter = result;
173 raw_local_irq_restore(flags);
174 }
175
176 smp_llsc_mb();
177
178 return result;
179}
180
181static __inline__ int atomic_sub_return(int i, atomic_t * v)
182{
183 int result;
184
185 smp_mb__before_llsc();
186
187 if (kernel_uses_llsc && R10000_LLSC_WAR) {
188 int temp;
189
190 __asm__ __volatile__(
191 " .set arch=r4000 \n"
192 "1: ll %1, %2 # atomic_sub_return \n"
193 " subu %0, %1, %3 \n"
194 " sc %0, %2 \n"
195 " beqzl %0, 1b \n"
196 " subu %0, %1, %3 \n"
197 " .set mips0 \n"
198 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
199 : "Ir" (i), "m" (v->counter)
200 : "memory");
201
202 result = temp - i;
203 } else if (kernel_uses_llsc) {
204 int temp;
205
206 do {
207 __asm__ __volatile__(
208 " .set arch=r4000 \n"
209 " ll %1, %2 # atomic_sub_return \n"
210 " subu %0, %1, %3 \n"
211 " sc %0, %2 \n"
212 " .set mips0 \n"
213 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
214 : "Ir" (i));
215 } while (unlikely(!result));
216
217 result = temp - i;
218 } else {
219 unsigned long flags;
220
221 raw_local_irq_save(flags);
222 result = v->counter;
223 result -= i;
224 v->counter = result;
225 raw_local_irq_restore(flags);
226 }
227
228 smp_llsc_mb();
229
230 return result;
231}
232
233/*
234 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
235 * @i: integer value to subtract
236 * @v: pointer of type atomic_t
237 *
238 * Atomically test @v and subtract @i if @v is greater or equal than @i.
239 * The function returns the old value of @v minus @i.
240 */
241static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
242{
243 int result;
244
245 smp_mb__before_llsc();
246
247 if (kernel_uses_llsc && R10000_LLSC_WAR) {
248 int temp;
249
250 __asm__ __volatile__(
251 " .set arch=r4000 \n"
252 "1: ll %1, %2 # atomic_sub_if_positive\n"
253 " subu %0, %1, %3 \n"
254 " bltz %0, 1f \n"
255 " sc %0, %2 \n"
256 " .set noreorder \n"
257 " beqzl %0, 1b \n"
258 " subu %0, %1, %3 \n"
259 " .set reorder \n"
260 "1: \n"
261 " .set mips0 \n"
262 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
263 : "Ir" (i), "m" (v->counter)
264 : "memory");
265 } else if (kernel_uses_llsc) {
266 int temp;
267
268 __asm__ __volatile__(
269 " .set arch=r4000 \n"
270 "1: ll %1, %2 # atomic_sub_if_positive\n"
271 " subu %0, %1, %3 \n"
272 " bltz %0, 1f \n"
273 " sc %0, %2 \n"
274 " .set noreorder \n"
275 " beqz %0, 1b \n"
276 " subu %0, %1, %3 \n"
277 " .set reorder \n"
278 "1: \n"
279 " .set mips0 \n"
280 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
281 : "Ir" (i));
282 } else {
283 unsigned long flags;
284
285 raw_local_irq_save(flags);
286 result = v->counter;
287 result -= i;
288 if (result >= 0)
289 v->counter = result;
290 raw_local_irq_restore(flags);
291 }
292
293 smp_llsc_mb();
294
295 return result;
296}
297
298#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
299#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
300
301/**
302 * __atomic_add_unless - add unless the number is a given value
303 * @v: pointer of type atomic_t
304 * @a: the amount to add to v...
305 * @u: ...unless v is equal to u.
306 *
307 * Atomically adds @a to @v, so long as it was not @u.
308 * Returns the old value of @v.
309 */
310static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
311{
312 int c, old;
313 c = atomic_read(v);
314 for (;;) {
315 if (unlikely(c == (u)))
316 break;
317 old = atomic_cmpxchg((v), c, c + (a));
318 if (likely(old == c))
319 break;
320 c = old;
321 }
322 return c;
323}
324
325#define atomic_dec_return(v) atomic_sub_return(1, (v))
326#define atomic_inc_return(v) atomic_add_return(1, (v))
327
328/*
329 * atomic_sub_and_test - subtract value from variable and test result
330 * @i: integer value to subtract
331 * @v: pointer of type atomic_t
332 *
333 * Atomically subtracts @i from @v and returns
334 * true if the result is zero, or false for all
335 * other cases.
336 */
337#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
338
339/*
340 * atomic_inc_and_test - increment and test
341 * @v: pointer of type atomic_t
342 *
343 * Atomically increments @v by 1
344 * and returns true if the result is zero, or false for all
345 * other cases.
346 */
347#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
348
349/*
350 * atomic_dec_and_test - decrement by 1 and test
351 * @v: pointer of type atomic_t
352 *
353 * Atomically decrements @v by 1 and
354 * returns true if the result is 0, or false for all other
355 * cases.
356 */
357#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
358
359/*
360 * atomic_dec_if_positive - decrement by 1 if old value positive
361 * @v: pointer of type atomic_t
362 */
363#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
364
365/*
366 * atomic_inc - increment atomic variable
367 * @v: pointer of type atomic_t
368 *
369 * Atomically increments @v by 1.
370 */
371#define atomic_inc(v) atomic_add(1, (v))
372
373/*
374 * atomic_dec - decrement and test
375 * @v: pointer of type atomic_t
376 *
377 * Atomically decrements @v by 1.
378 */
379#define atomic_dec(v) atomic_sub(1, (v))
380
381/*
382 * atomic_add_negative - add and test if negative
383 * @v: pointer of type atomic_t
384 * @i: integer value to add
385 *
386 * Atomically adds @i to @v and returns true
387 * if the result is negative, or false when
388 * result is greater than or equal to zero.
389 */
390#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
391
392#ifdef CONFIG_64BIT
393
394#define ATOMIC64_INIT(i) { (i) }
395
396/*
397 * atomic64_read - read atomic variable
398 * @v: pointer of type atomic64_t
399 *
400 */
401#define atomic64_read(v) (*(volatile long *)&(v)->counter)
402
403/*
404 * atomic64_set - set atomic variable
405 * @v: pointer of type atomic64_t
406 * @i: required value
407 */
408#define atomic64_set(v, i) ((v)->counter = (i))
409
410/*
411 * atomic64_add - add integer to atomic variable
412 * @i: integer value to add
413 * @v: pointer of type atomic64_t
414 *
415 * Atomically adds @i to @v.
416 */
417static __inline__ void atomic64_add(long i, atomic64_t * v)
418{
419 if (kernel_uses_llsc && R10000_LLSC_WAR) {
420 long temp;
421
422 __asm__ __volatile__(
423 " .set arch=r4000 \n"
424 "1: lld %0, %1 # atomic64_add \n"
425 " daddu %0, %2 \n"
426 " scd %0, %1 \n"
427 " beqzl %0, 1b \n"
428 " .set mips0 \n"
429 : "=&r" (temp), "+m" (v->counter)
430 : "Ir" (i));
431 } else if (kernel_uses_llsc) {
432 long temp;
433
434 do {
435 __asm__ __volatile__(
436 " .set arch=r4000 \n"
437 " lld %0, %1 # atomic64_add \n"
438 " daddu %0, %2 \n"
439 " scd %0, %1 \n"
440 " .set mips0 \n"
441 : "=&r" (temp), "+m" (v->counter)
442 : "Ir" (i));
443 } while (unlikely(!temp));
444 } else {
445 unsigned long flags;
446
447 raw_local_irq_save(flags);
448 v->counter += i;
449 raw_local_irq_restore(flags);
450 }
451}
452
453/*
454 * atomic64_sub - subtract the atomic variable
455 * @i: integer value to subtract
456 * @v: pointer of type atomic64_t
457 *
458 * Atomically subtracts @i from @v.
459 */
460static __inline__ void atomic64_sub(long i, atomic64_t * v)
461{
462 if (kernel_uses_llsc && R10000_LLSC_WAR) {
463 long temp;
464
465 __asm__ __volatile__(
466 " .set arch=r4000 \n"
467 "1: lld %0, %1 # atomic64_sub \n"
468 " dsubu %0, %2 \n"
469 " scd %0, %1 \n"
470 " beqzl %0, 1b \n"
471 " .set mips0 \n"
472 : "=&r" (temp), "+m" (v->counter)
473 : "Ir" (i));
474 } else if (kernel_uses_llsc) {
475 long temp;
476
477 do {
478 __asm__ __volatile__(
479 " .set arch=r4000 \n"
480 " lld %0, %1 # atomic64_sub \n"
481 " dsubu %0, %2 \n"
482 " scd %0, %1 \n"
483 " .set mips0 \n"
484 : "=&r" (temp), "+m" (v->counter)
485 : "Ir" (i));
486 } while (unlikely(!temp));
487 } else {
488 unsigned long flags;
489
490 raw_local_irq_save(flags);
491 v->counter -= i;
492 raw_local_irq_restore(flags);
493 }
494}
495
496/*
497 * Same as above, but return the result value
498 */
499static __inline__ long atomic64_add_return(long i, atomic64_t * v)
500{
501 long result;
502
503 smp_mb__before_llsc();
504
505 if (kernel_uses_llsc && R10000_LLSC_WAR) {
506 long temp;
507
508 __asm__ __volatile__(
509 " .set arch=r4000 \n"
510 "1: lld %1, %2 # atomic64_add_return \n"
511 " daddu %0, %1, %3 \n"
512 " scd %0, %2 \n"
513 " beqzl %0, 1b \n"
514 " daddu %0, %1, %3 \n"
515 " .set mips0 \n"
516 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
517 : "Ir" (i));
518 } else if (kernel_uses_llsc) {
519 long temp;
520
521 do {
522 __asm__ __volatile__(
523 " .set arch=r4000 \n"
524 " lld %1, %2 # atomic64_add_return \n"
525 " daddu %0, %1, %3 \n"
526 " scd %0, %2 \n"
527 " .set mips0 \n"
528 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
529 : "Ir" (i), "m" (v->counter)
530 : "memory");
531 } while (unlikely(!result));
532
533 result = temp + i;
534 } else {
535 unsigned long flags;
536
537 raw_local_irq_save(flags);
538 result = v->counter;
539 result += i;
540 v->counter = result;
541 raw_local_irq_restore(flags);
542 }
543
544 smp_llsc_mb();
545
546 return result;
547}
548
549static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
550{
551 long result;
552
553 smp_mb__before_llsc();
554
555 if (kernel_uses_llsc && R10000_LLSC_WAR) {
556 long temp;
557
558 __asm__ __volatile__(
559 " .set arch=r4000 \n"
560 "1: lld %1, %2 # atomic64_sub_return \n"
561 " dsubu %0, %1, %3 \n"
562 " scd %0, %2 \n"
563 " beqzl %0, 1b \n"
564 " dsubu %0, %1, %3 \n"
565 " .set mips0 \n"
566 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
567 : "Ir" (i), "m" (v->counter)
568 : "memory");
569 } else if (kernel_uses_llsc) {
570 long temp;
571
572 do {
573 __asm__ __volatile__(
574 " .set arch=r4000 \n"
575 " lld %1, %2 # atomic64_sub_return \n"
576 " dsubu %0, %1, %3 \n"
577 " scd %0, %2 \n"
578 " .set mips0 \n"
579 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
580 : "Ir" (i), "m" (v->counter)
581 : "memory");
582 } while (unlikely(!result));
583
584 result = temp - i;
585 } else {
586 unsigned long flags;
587
588 raw_local_irq_save(flags);
589 result = v->counter;
590 result -= i;
591 v->counter = result;
592 raw_local_irq_restore(flags);
593 }
594
595 smp_llsc_mb();
596
597 return result;
598}
599
600/*
601 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable
602 * @i: integer value to subtract
603 * @v: pointer of type atomic64_t
604 *
605 * Atomically test @v and subtract @i if @v is greater or equal than @i.
606 * The function returns the old value of @v minus @i.
607 */
608static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
609{
610 long result;
611
612 smp_mb__before_llsc();
613
614 if (kernel_uses_llsc && R10000_LLSC_WAR) {
615 long temp;
616
617 __asm__ __volatile__(
618 " .set arch=r4000 \n"
619 "1: lld %1, %2 # atomic64_sub_if_positive\n"
620 " dsubu %0, %1, %3 \n"
621 " bltz %0, 1f \n"
622 " scd %0, %2 \n"
623 " .set noreorder \n"
624 " beqzl %0, 1b \n"
625 " dsubu %0, %1, %3 \n"
626 " .set reorder \n"
627 "1: \n"
628 " .set mips0 \n"
629 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
630 : "Ir" (i), "m" (v->counter)
631 : "memory");
632 } else if (kernel_uses_llsc) {
633 long temp;
634
635 __asm__ __volatile__(
636 " .set arch=r4000 \n"
637 "1: lld %1, %2 # atomic64_sub_if_positive\n"
638 " dsubu %0, %1, %3 \n"
639 " bltz %0, 1f \n"
640 " scd %0, %2 \n"
641 " .set noreorder \n"
642 " beqz %0, 1b \n"
643 " dsubu %0, %1, %3 \n"
644 " .set reorder \n"
645 "1: \n"
646 " .set mips0 \n"
647 : "=&r" (result), "=&r" (temp), "+m" (v->counter)
648 : "Ir" (i));
649 } else {
650 unsigned long flags;
651
652 raw_local_irq_save(flags);
653 result = v->counter;
654 result -= i;
655 if (result >= 0)
656 v->counter = result;
657 raw_local_irq_restore(flags);
658 }
659
660 smp_llsc_mb();
661
662 return result;
663}
664
665#define atomic64_cmpxchg(v, o, n) \
666 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
667#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
668
669/**
670 * atomic64_add_unless - add unless the number is a given value
671 * @v: pointer of type atomic64_t
672 * @a: the amount to add to v...
673 * @u: ...unless v is equal to u.
674 *
675 * Atomically adds @a to @v, so long as it was not @u.
676 * Returns the old value of @v.
677 */
678static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
679{
680 long c, old;
681 c = atomic64_read(v);
682 for (;;) {
683 if (unlikely(c == (u)))
684 break;
685 old = atomic64_cmpxchg((v), c, c + (a));
686 if (likely(old == c))
687 break;
688 c = old;
689 }
690 return c != (u);
691}
692
693#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
694
695#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
696#define atomic64_inc_return(v) atomic64_add_return(1, (v))
697
698/*
699 * atomic64_sub_and_test - subtract value from variable and test result
700 * @i: integer value to subtract
701 * @v: pointer of type atomic64_t
702 *
703 * Atomically subtracts @i from @v and returns
704 * true if the result is zero, or false for all
705 * other cases.
706 */
707#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
708
709/*
710 * atomic64_inc_and_test - increment and test
711 * @v: pointer of type atomic64_t
712 *
713 * Atomically increments @v by 1
714 * and returns true if the result is zero, or false for all
715 * other cases.
716 */
717#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
718
719/*
720 * atomic64_dec_and_test - decrement by 1 and test
721 * @v: pointer of type atomic64_t
722 *
723 * Atomically decrements @v by 1 and
724 * returns true if the result is 0, or false for all other
725 * cases.
726 */
727#define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
728
729/*
730 * atomic64_dec_if_positive - decrement by 1 if old value positive
731 * @v: pointer of type atomic64_t
732 */
733#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
734
735/*
736 * atomic64_inc - increment atomic variable
737 * @v: pointer of type atomic64_t
738 *
739 * Atomically increments @v by 1.
740 */
741#define atomic64_inc(v) atomic64_add(1, (v))
742
743/*
744 * atomic64_dec - decrement and test
745 * @v: pointer of type atomic64_t
746 *
747 * Atomically decrements @v by 1.
748 */
749#define atomic64_dec(v) atomic64_sub(1, (v))
750
751/*
752 * atomic64_add_negative - add and test if negative
753 * @v: pointer of type atomic64_t
754 * @i: integer value to add
755 *
756 * Atomically adds @i to @v and returns true
757 * if the result is negative, or false when
758 * result is greater than or equal to zero.
759 */
760#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
761
762#endif /* CONFIG_64BIT */
763
764/*
765 * atomic*_return operations are serializing but not the non-*_return
766 * versions.
767 */
768#define smp_mb__before_atomic_dec() smp_mb__before_llsc()
769#define smp_mb__after_atomic_dec() smp_llsc_mb()
770#define smp_mb__before_atomic_inc() smp_mb__before_llsc()
771#define smp_mb__after_atomic_inc() smp_llsc_mb()
772
773#endif /* _ASM_ATOMIC_H */