Loading...
1/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/asm.h>
20#include <asm/barrier.h>
21#include <asm/compiler.h>
22#include <asm/cpu-features.h>
23#include <asm/cmpxchg.h>
24#include <asm/sync.h>
25
26#define ATOMIC_OPS(pfx, type) \
27static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
28{ \
29 return READ_ONCE(v->counter); \
30} \
31 \
32static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
33{ \
34 WRITE_ONCE(v->counter, i); \
35} \
36 \
37static __always_inline type \
38arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
39{ \
40 return arch_cmpxchg(&v->counter, o, n); \
41} \
42 \
43static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
44{ \
45 return arch_xchg(&v->counter, n); \
46}
47
48ATOMIC_OPS(atomic, int)
49
50#ifdef CONFIG_64BIT
51# define ATOMIC64_INIT(i) { (i) }
52ATOMIC_OPS(atomic64, s64)
53#endif
54
55#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
56static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
57{ \
58 type temp; \
59 \
60 if (!kernel_uses_llsc) { \
61 unsigned long flags; \
62 \
63 raw_local_irq_save(flags); \
64 v->counter c_op i; \
65 raw_local_irq_restore(flags); \
66 return; \
67 } \
68 \
69 __asm__ __volatile__( \
70 " .set push \n" \
71 " .set " MIPS_ISA_LEVEL " \n" \
72 " " __SYNC(full, loongson3_war) " \n" \
73 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
74 " " #asm_op " %0, %2 \n" \
75 " " #sc " %0, %1 \n" \
76 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
77 " .set pop \n" \
78 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
79 : "Ir" (i) : __LLSC_CLOBBER); \
80}
81
82#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
83static __inline__ type \
84arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
85{ \
86 type temp, result; \
87 \
88 if (!kernel_uses_llsc) { \
89 unsigned long flags; \
90 \
91 raw_local_irq_save(flags); \
92 result = v->counter; \
93 result c_op i; \
94 v->counter = result; \
95 raw_local_irq_restore(flags); \
96 return result; \
97 } \
98 \
99 __asm__ __volatile__( \
100 " .set push \n" \
101 " .set " MIPS_ISA_LEVEL " \n" \
102 " " __SYNC(full, loongson3_war) " \n" \
103 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
104 " " #asm_op " %0, %1, %3 \n" \
105 " " #sc " %0, %2 \n" \
106 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
107 " " #asm_op " %0, %1, %3 \n" \
108 " .set pop \n" \
109 : "=&r" (result), "=&r" (temp), \
110 "+" GCC_OFF_SMALL_ASM() (v->counter) \
111 : "Ir" (i) : __LLSC_CLOBBER); \
112 \
113 return result; \
114}
115
116#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
117static __inline__ type \
118arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
119{ \
120 int temp, result; \
121 \
122 if (!kernel_uses_llsc) { \
123 unsigned long flags; \
124 \
125 raw_local_irq_save(flags); \
126 result = v->counter; \
127 v->counter c_op i; \
128 raw_local_irq_restore(flags); \
129 return result; \
130 } \
131 \
132 __asm__ __volatile__( \
133 " .set push \n" \
134 " .set " MIPS_ISA_LEVEL " \n" \
135 " " __SYNC(full, loongson3_war) " \n" \
136 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
137 " " #asm_op " %0, %1, %3 \n" \
138 " " #sc " %0, %2 \n" \
139 "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
140 " .set pop \n" \
141 " move %0, %1 \n" \
142 : "=&r" (result), "=&r" (temp), \
143 "+" GCC_OFF_SMALL_ASM() (v->counter) \
144 : "Ir" (i) : __LLSC_CLOBBER); \
145 \
146 return result; \
147}
148
149#undef ATOMIC_OPS
150#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
151 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
154
155ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
156ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
157
158#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
159#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
160#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
161#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
162
163#ifdef CONFIG_64BIT
164ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
165ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
166# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
167# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
168# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
169# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
170#endif /* CONFIG_64BIT */
171
172#undef ATOMIC_OPS
173#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
174 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
176
177ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
178ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
179ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
180
181#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
182#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
183#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
184
185#ifdef CONFIG_64BIT
186ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
187ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
188ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
189# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
190# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
191# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
192#endif
193
194#undef ATOMIC_OPS
195#undef ATOMIC_FETCH_OP
196#undef ATOMIC_OP_RETURN
197#undef ATOMIC_OP
198
199/*
200 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
201 * @i: integer value to subtract
202 * @v: pointer of type atomic_t
203 *
204 * Atomically test @v and subtract @i if @v is greater or equal than @i.
205 * The function returns the old value of @v minus @i.
206 */
207#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
208static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
209{ \
210 type temp, result; \
211 \
212 smp_mb__before_atomic(); \
213 \
214 if (!kernel_uses_llsc) { \
215 unsigned long flags; \
216 \
217 raw_local_irq_save(flags); \
218 result = v->counter; \
219 result -= i; \
220 if (result >= 0) \
221 v->counter = result; \
222 raw_local_irq_restore(flags); \
223 smp_mb__after_atomic(); \
224 return result; \
225 } \
226 \
227 __asm__ __volatile__( \
228 " .set push \n" \
229 " .set " MIPS_ISA_LEVEL " \n" \
230 " " __SYNC(full, loongson3_war) " \n" \
231 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
232 " .set pop \n" \
233 " " #op " %0, %1, %3 \n" \
234 " move %1, %0 \n" \
235 " bltz %0, 2f \n" \
236 " .set push \n" \
237 " .set " MIPS_ISA_LEVEL " \n" \
238 " " #sc " %1, %2 \n" \
239 " " __stringify(SC_BEQZ) " %1, 1b \n" \
240 "2: " __SYNC(full, loongson3_war) " \n" \
241 " .set pop \n" \
242 : "=&r" (result), "=&r" (temp), \
243 "+" GCC_OFF_SMALL_ASM() (v->counter) \
244 : "Ir" (i) \
245 : __LLSC_CLOBBER); \
246 \
247 /* \
248 * In the Loongson3 workaround case we already have a \
249 * completion barrier at 2: above, which is needed due to the \
250 * bltz that can branch to code outside of the LL/SC loop. As \
251 * such, we don't need to emit another barrier here. \
252 */ \
253 if (__SYNC_loongson3_war == 0) \
254 smp_mb__after_atomic(); \
255 \
256 return result; \
257}
258
259ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
260#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
261
262#ifdef CONFIG_64BIT
263ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
264#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
265#endif
266
267#undef ATOMIC_SIP_OP
268
269#endif /* _ASM_ATOMIC_H */
1/*
2 * Atomic operations that C can't guarantee us. Useful for
3 * resource counting etc..
4 *
5 * But use these as seldom as possible since they are much more slower
6 * than regular operations.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13 */
14#ifndef _ASM_ATOMIC_H
15#define _ASM_ATOMIC_H
16
17#include <linux/irqflags.h>
18#include <linux/types.h>
19#include <asm/barrier.h>
20#include <asm/compiler.h>
21#include <asm/cpu-features.h>
22#include <asm/cmpxchg.h>
23#include <asm/llsc.h>
24#include <asm/sync.h>
25#include <asm/war.h>
26
27#define ATOMIC_OPS(pfx, type) \
28static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
29{ \
30 return READ_ONCE(v->counter); \
31} \
32 \
33static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
34{ \
35 WRITE_ONCE(v->counter, i); \
36} \
37 \
38static __always_inline type \
39arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
40{ \
41 return arch_cmpxchg(&v->counter, o, n); \
42} \
43 \
44static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
45{ \
46 return arch_xchg(&v->counter, n); \
47}
48
49ATOMIC_OPS(atomic, int)
50
51#ifdef CONFIG_64BIT
52# define ATOMIC64_INIT(i) { (i) }
53ATOMIC_OPS(atomic64, s64)
54#endif
55
56#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
57static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
58{ \
59 type temp; \
60 \
61 if (!kernel_uses_llsc) { \
62 unsigned long flags; \
63 \
64 raw_local_irq_save(flags); \
65 v->counter c_op i; \
66 raw_local_irq_restore(flags); \
67 return; \
68 } \
69 \
70 __asm__ __volatile__( \
71 " .set push \n" \
72 " .set " MIPS_ISA_LEVEL " \n" \
73 " " __SYNC(full, loongson3_war) " \n" \
74 "1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
75 " " #asm_op " %0, %2 \n" \
76 " " #sc " %0, %1 \n" \
77 "\t" __SC_BEQZ "%0, 1b \n" \
78 " .set pop \n" \
79 : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
80 : "Ir" (i) : __LLSC_CLOBBER); \
81}
82
83#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
84static __inline__ type \
85arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
86{ \
87 type temp, result; \
88 \
89 if (!kernel_uses_llsc) { \
90 unsigned long flags; \
91 \
92 raw_local_irq_save(flags); \
93 result = v->counter; \
94 result c_op i; \
95 v->counter = result; \
96 raw_local_irq_restore(flags); \
97 return result; \
98 } \
99 \
100 __asm__ __volatile__( \
101 " .set push \n" \
102 " .set " MIPS_ISA_LEVEL " \n" \
103 " " __SYNC(full, loongson3_war) " \n" \
104 "1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
105 " " #asm_op " %0, %1, %3 \n" \
106 " " #sc " %0, %2 \n" \
107 "\t" __SC_BEQZ "%0, 1b \n" \
108 " " #asm_op " %0, %1, %3 \n" \
109 " .set pop \n" \
110 : "=&r" (result), "=&r" (temp), \
111 "+" GCC_OFF_SMALL_ASM() (v->counter) \
112 : "Ir" (i) : __LLSC_CLOBBER); \
113 \
114 return result; \
115}
116
117#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
118static __inline__ type \
119arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
120{ \
121 int temp, result; \
122 \
123 if (!kernel_uses_llsc) { \
124 unsigned long flags; \
125 \
126 raw_local_irq_save(flags); \
127 result = v->counter; \
128 v->counter c_op i; \
129 raw_local_irq_restore(flags); \
130 return result; \
131 } \
132 \
133 __asm__ __volatile__( \
134 " .set push \n" \
135 " .set " MIPS_ISA_LEVEL " \n" \
136 " " __SYNC(full, loongson3_war) " \n" \
137 "1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
138 " " #asm_op " %0, %1, %3 \n" \
139 " " #sc " %0, %2 \n" \
140 "\t" __SC_BEQZ "%0, 1b \n" \
141 " .set pop \n" \
142 " move %0, %1 \n" \
143 : "=&r" (result), "=&r" (temp), \
144 "+" GCC_OFF_SMALL_ASM() (v->counter) \
145 : "Ir" (i) : __LLSC_CLOBBER); \
146 \
147 return result; \
148}
149
150#undef ATOMIC_OPS
151#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
152 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
153 ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
154 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
155
156ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
157ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
158
159#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
160#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
161#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
162#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
163
164#ifdef CONFIG_64BIT
165ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
166ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
167# define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
168# define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
169# define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
170# define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
171#endif /* CONFIG_64BIT */
172
173#undef ATOMIC_OPS
174#define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc) \
175 ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
176 ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
177
178ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
179ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
180ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
181
182#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
183#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
184#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
185
186#ifdef CONFIG_64BIT
187ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
188ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
189ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
190# define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
191# define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
192# define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
193#endif
194
195#undef ATOMIC_OPS
196#undef ATOMIC_FETCH_OP
197#undef ATOMIC_OP_RETURN
198#undef ATOMIC_OP
199
200/*
201 * atomic_sub_if_positive - conditionally subtract integer from atomic variable
202 * @i: integer value to subtract
203 * @v: pointer of type atomic_t
204 *
205 * Atomically test @v and subtract @i if @v is greater or equal than @i.
206 * The function returns the old value of @v minus @i.
207 */
208#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
209static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
210{ \
211 type temp, result; \
212 \
213 smp_mb__before_atomic(); \
214 \
215 if (!kernel_uses_llsc) { \
216 unsigned long flags; \
217 \
218 raw_local_irq_save(flags); \
219 result = v->counter; \
220 result -= i; \
221 if (result >= 0) \
222 v->counter = result; \
223 raw_local_irq_restore(flags); \
224 smp_mb__after_atomic(); \
225 return result; \
226 } \
227 \
228 __asm__ __volatile__( \
229 " .set push \n" \
230 " .set " MIPS_ISA_LEVEL " \n" \
231 " " __SYNC(full, loongson3_war) " \n" \
232 "1: " #ll " %1, %2 # atomic_sub_if_positive\n" \
233 " .set pop \n" \
234 " " #op " %0, %1, %3 \n" \
235 " move %1, %0 \n" \
236 " bltz %0, 2f \n" \
237 " .set push \n" \
238 " .set " MIPS_ISA_LEVEL " \n" \
239 " " #sc " %1, %2 \n" \
240 " " __SC_BEQZ "%1, 1b \n" \
241 "2: " __SYNC(full, loongson3_war) " \n" \
242 " .set pop \n" \
243 : "=&r" (result), "=&r" (temp), \
244 "+" GCC_OFF_SMALL_ASM() (v->counter) \
245 : "Ir" (i) \
246 : __LLSC_CLOBBER); \
247 \
248 /* \
249 * In the Loongson3 workaround case we already have a \
250 * completion barrier at 2: above, which is needed due to the \
251 * bltz that can branch to code outside of the LL/SC loop. As \
252 * such, we don't need to emit another barrier here. \
253 */ \
254 if (__SYNC_loongson3_war == 0) \
255 smp_mb__after_atomic(); \
256 \
257 return result; \
258}
259
260ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
261#define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
262
263#ifdef CONFIG_64BIT
264ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
265#define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
266#endif
267
268#undef ATOMIC_SIP_OP
269
270#endif /* _ASM_ATOMIC_H */