Loading...
1/*
2 * Based on arch/arm/include/asm/atomic.h
3 *
4 * Copyright (C) 1996 Russell King.
5 * Copyright (C) 2002 Deep Blue Solutions Ltd.
6 * Copyright (C) 2012 ARM Ltd.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20#ifndef __ASM_ATOMIC_H
21#define __ASM_ATOMIC_H
22
23#include <linux/compiler.h>
24#include <linux/types.h>
25
26#include <asm/barrier.h>
27#include <asm/cmpxchg.h>
28
29#define ATOMIC_INIT(i) { (i) }
30
31#ifdef __KERNEL__
32
33/*
34 * On ARM, ordinary assignment (str instruction) doesn't clear the local
35 * strex/ldrex monitor on some implementations. The reason we can use it for
36 * atomic_set() is the clrex or dummy strex done on every exception return.
37 */
38#define atomic_read(v) (*(volatile int *)&(v)->counter)
39#define atomic_set(v,i) (((v)->counter) = (i))
40
41/*
42 * AArch64 UP and SMP safe atomic ops. We use load exclusive and
43 * store exclusive to ensure that these are atomic. We may loop
44 * to ensure that the update happens.
45 */
46static inline void atomic_add(int i, atomic_t *v)
47{
48 unsigned long tmp;
49 int result;
50
51 asm volatile("// atomic_add\n"
52"1: ldxr %w0, %2\n"
53" add %w0, %w0, %w3\n"
54" stxr %w1, %w0, %2\n"
55" cbnz %w1, 1b"
56 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
57 : "Ir" (i));
58}
59
60static inline int atomic_add_return(int i, atomic_t *v)
61{
62 unsigned long tmp;
63 int result;
64
65 asm volatile("// atomic_add_return\n"
66"1: ldxr %w0, %2\n"
67" add %w0, %w0, %w3\n"
68" stlxr %w1, %w0, %2\n"
69" cbnz %w1, 1b"
70 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
71 : "Ir" (i)
72 : "memory");
73
74 smp_mb();
75 return result;
76}
77
78static inline void atomic_sub(int i, atomic_t *v)
79{
80 unsigned long tmp;
81 int result;
82
83 asm volatile("// atomic_sub\n"
84"1: ldxr %w0, %2\n"
85" sub %w0, %w0, %w3\n"
86" stxr %w1, %w0, %2\n"
87" cbnz %w1, 1b"
88 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
89 : "Ir" (i));
90}
91
92static inline int atomic_sub_return(int i, atomic_t *v)
93{
94 unsigned long tmp;
95 int result;
96
97 asm volatile("// atomic_sub_return\n"
98"1: ldxr %w0, %2\n"
99" sub %w0, %w0, %w3\n"
100" stlxr %w1, %w0, %2\n"
101" cbnz %w1, 1b"
102 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
103 : "Ir" (i)
104 : "memory");
105
106 smp_mb();
107 return result;
108}
109
110static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
111{
112 unsigned long tmp;
113 int oldval;
114
115 smp_mb();
116
117 asm volatile("// atomic_cmpxchg\n"
118"1: ldxr %w1, %2\n"
119" cmp %w1, %w3\n"
120" b.ne 2f\n"
121" stxr %w0, %w4, %2\n"
122" cbnz %w0, 1b\n"
123"2:"
124 : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
125 : "Ir" (old), "r" (new)
126 : "cc");
127
128 smp_mb();
129 return oldval;
130}
131
132#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
133
134static inline int __atomic_add_unless(atomic_t *v, int a, int u)
135{
136 int c, old;
137
138 c = atomic_read(v);
139 while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
140 c = old;
141 return c;
142}
143
144#define atomic_inc(v) atomic_add(1, v)
145#define atomic_dec(v) atomic_sub(1, v)
146
147#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
148#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
149#define atomic_inc_return(v) (atomic_add_return(1, v))
150#define atomic_dec_return(v) (atomic_sub_return(1, v))
151#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
152
153#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
154
155#define smp_mb__before_atomic_dec() smp_mb()
156#define smp_mb__after_atomic_dec() smp_mb()
157#define smp_mb__before_atomic_inc() smp_mb()
158#define smp_mb__after_atomic_inc() smp_mb()
159
160/*
161 * 64-bit atomic operations.
162 */
163#define ATOMIC64_INIT(i) { (i) }
164
165#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
166#define atomic64_set(v,i) (((v)->counter) = (i))
167
168static inline void atomic64_add(u64 i, atomic64_t *v)
169{
170 long result;
171 unsigned long tmp;
172
173 asm volatile("// atomic64_add\n"
174"1: ldxr %0, %2\n"
175" add %0, %0, %3\n"
176" stxr %w1, %0, %2\n"
177" cbnz %w1, 1b"
178 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
179 : "Ir" (i));
180}
181
182static inline long atomic64_add_return(long i, atomic64_t *v)
183{
184 long result;
185 unsigned long tmp;
186
187 asm volatile("// atomic64_add_return\n"
188"1: ldxr %0, %2\n"
189" add %0, %0, %3\n"
190" stlxr %w1, %0, %2\n"
191" cbnz %w1, 1b"
192 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
193 : "Ir" (i)
194 : "memory");
195
196 smp_mb();
197 return result;
198}
199
200static inline void atomic64_sub(u64 i, atomic64_t *v)
201{
202 long result;
203 unsigned long tmp;
204
205 asm volatile("// atomic64_sub\n"
206"1: ldxr %0, %2\n"
207" sub %0, %0, %3\n"
208" stxr %w1, %0, %2\n"
209" cbnz %w1, 1b"
210 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
211 : "Ir" (i));
212}
213
214static inline long atomic64_sub_return(long i, atomic64_t *v)
215{
216 long result;
217 unsigned long tmp;
218
219 asm volatile("// atomic64_sub_return\n"
220"1: ldxr %0, %2\n"
221" sub %0, %0, %3\n"
222" stlxr %w1, %0, %2\n"
223" cbnz %w1, 1b"
224 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
225 : "Ir" (i)
226 : "memory");
227
228 smp_mb();
229 return result;
230}
231
232static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
233{
234 long oldval;
235 unsigned long res;
236
237 smp_mb();
238
239 asm volatile("// atomic64_cmpxchg\n"
240"1: ldxr %1, %2\n"
241" cmp %1, %3\n"
242" b.ne 2f\n"
243" stxr %w0, %4, %2\n"
244" cbnz %w0, 1b\n"
245"2:"
246 : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
247 : "Ir" (old), "r" (new)
248 : "cc");
249
250 smp_mb();
251 return oldval;
252}
253
254#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
255
256static inline long atomic64_dec_if_positive(atomic64_t *v)
257{
258 long result;
259 unsigned long tmp;
260
261 asm volatile("// atomic64_dec_if_positive\n"
262"1: ldxr %0, %2\n"
263" subs %0, %0, #1\n"
264" b.mi 2f\n"
265" stlxr %w1, %0, %2\n"
266" cbnz %w1, 1b\n"
267" dmb ish\n"
268"2:"
269 : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
270 :
271 : "cc", "memory");
272
273 return result;
274}
275
276static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
277{
278 long c, old;
279
280 c = atomic64_read(v);
281 while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
282 c = old;
283
284 return c != u;
285}
286
287#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
288#define atomic64_inc(v) atomic64_add(1LL, (v))
289#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
290#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
291#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
292#define atomic64_dec(v) atomic64_sub(1LL, (v))
293#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
294#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
295#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
296
297#endif
298#endif