Loading...
1#ifndef __ASM_ARM_CMPXCHG_H
2#define __ASM_ARM_CMPXCHG_H
3
4#include <linux/irqflags.h>
5#include <linux/prefetch.h>
6#include <asm/barrier.h>
7
8#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
9/*
10 * On the StrongARM, "swp" is terminally broken since it bypasses the
11 * cache totally. This means that the cache becomes inconsistent, and,
12 * since we use normal loads/stores as well, this is really bad.
13 * Typically, this causes oopsen in filp_close, but could have other,
14 * more disastrous effects. There are two work-arounds:
15 * 1. Disable interrupts and emulate the atomic swap
16 * 2. Clean the cache, perform atomic swap, flush the cache
17 *
18 * We choose (1) since its the "easiest" to achieve here and is not
19 * dependent on the processor type.
20 *
21 * NOTE that this solution won't work on an SMP system, so explcitly
22 * forbid it here.
23 */
24#define swp_is_buggy
25#endif
26
27static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28{
29 extern void __bad_xchg(volatile void *, int);
30 unsigned long ret;
31#ifdef swp_is_buggy
32 unsigned long flags;
33#endif
34#if __LINUX_ARM_ARCH__ >= 6
35 unsigned int tmp;
36#endif
37
38 smp_mb();
39 prefetchw((const void *)ptr);
40
41 switch (size) {
42#if __LINUX_ARM_ARCH__ >= 6
43 case 1:
44 asm volatile("@ __xchg1\n"
45 "1: ldrexb %0, [%3]\n"
46 " strexb %1, %2, [%3]\n"
47 " teq %1, #0\n"
48 " bne 1b"
49 : "=&r" (ret), "=&r" (tmp)
50 : "r" (x), "r" (ptr)
51 : "memory", "cc");
52 break;
53 case 4:
54 asm volatile("@ __xchg4\n"
55 "1: ldrex %0, [%3]\n"
56 " strex %1, %2, [%3]\n"
57 " teq %1, #0\n"
58 " bne 1b"
59 : "=&r" (ret), "=&r" (tmp)
60 : "r" (x), "r" (ptr)
61 : "memory", "cc");
62 break;
63#elif defined(swp_is_buggy)
64#ifdef CONFIG_SMP
65#error SMP is not supported on this platform
66#endif
67 case 1:
68 raw_local_irq_save(flags);
69 ret = *(volatile unsigned char *)ptr;
70 *(volatile unsigned char *)ptr = x;
71 raw_local_irq_restore(flags);
72 break;
73
74 case 4:
75 raw_local_irq_save(flags);
76 ret = *(volatile unsigned long *)ptr;
77 *(volatile unsigned long *)ptr = x;
78 raw_local_irq_restore(flags);
79 break;
80#else
81 case 1:
82 asm volatile("@ __xchg1\n"
83 " swpb %0, %1, [%2]"
84 : "=&r" (ret)
85 : "r" (x), "r" (ptr)
86 : "memory", "cc");
87 break;
88 case 4:
89 asm volatile("@ __xchg4\n"
90 " swp %0, %1, [%2]"
91 : "=&r" (ret)
92 : "r" (x), "r" (ptr)
93 : "memory", "cc");
94 break;
95#endif
96 default:
97 __bad_xchg(ptr, size), ret = 0;
98 break;
99 }
100 smp_mb();
101
102 return ret;
103}
104
105#define xchg(ptr,x) \
106 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
107
108#include <asm-generic/cmpxchg-local.h>
109
110#if __LINUX_ARM_ARCH__ < 6
111/* min ARCH < ARMv6 */
112
113#ifdef CONFIG_SMP
114#error "SMP is not supported on this platform"
115#endif
116
117/*
118 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
119 * them available.
120 */
121#define cmpxchg_local(ptr, o, n) \
122 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
123 (unsigned long)(n), sizeof(*(ptr))))
124#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
125
126#ifndef CONFIG_SMP
127#include <asm-generic/cmpxchg.h>
128#endif
129
130#else /* min ARCH >= ARMv6 */
131
132extern void __bad_cmpxchg(volatile void *ptr, int size);
133
134/*
135 * cmpxchg only support 32-bits operands on ARMv6.
136 */
137
138static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
139 unsigned long new, int size)
140{
141 unsigned long oldval, res;
142
143 prefetchw((const void *)ptr);
144
145 switch (size) {
146#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
147 case 1:
148 do {
149 asm volatile("@ __cmpxchg1\n"
150 " ldrexb %1, [%2]\n"
151 " mov %0, #0\n"
152 " teq %1, %3\n"
153 " strexbeq %0, %4, [%2]\n"
154 : "=&r" (res), "=&r" (oldval)
155 : "r" (ptr), "Ir" (old), "r" (new)
156 : "memory", "cc");
157 } while (res);
158 break;
159 case 2:
160 do {
161 asm volatile("@ __cmpxchg1\n"
162 " ldrexh %1, [%2]\n"
163 " mov %0, #0\n"
164 " teq %1, %3\n"
165 " strexheq %0, %4, [%2]\n"
166 : "=&r" (res), "=&r" (oldval)
167 : "r" (ptr), "Ir" (old), "r" (new)
168 : "memory", "cc");
169 } while (res);
170 break;
171#endif
172 case 4:
173 do {
174 asm volatile("@ __cmpxchg4\n"
175 " ldrex %1, [%2]\n"
176 " mov %0, #0\n"
177 " teq %1, %3\n"
178 " strexeq %0, %4, [%2]\n"
179 : "=&r" (res), "=&r" (oldval)
180 : "r" (ptr), "Ir" (old), "r" (new)
181 : "memory", "cc");
182 } while (res);
183 break;
184 default:
185 __bad_cmpxchg(ptr, size);
186 oldval = 0;
187 }
188
189 return oldval;
190}
191
192static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
193 unsigned long new, int size)
194{
195 unsigned long ret;
196
197 smp_mb();
198 ret = __cmpxchg(ptr, old, new, size);
199 smp_mb();
200
201 return ret;
202}
203
204#define cmpxchg(ptr,o,n) \
205 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
206 (unsigned long)(o), \
207 (unsigned long)(n), \
208 sizeof(*(ptr))))
209
210static inline unsigned long __cmpxchg_local(volatile void *ptr,
211 unsigned long old,
212 unsigned long new, int size)
213{
214 unsigned long ret;
215
216 switch (size) {
217#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
218 case 1:
219 case 2:
220 ret = __cmpxchg_local_generic(ptr, old, new, size);
221 break;
222#endif
223 default:
224 ret = __cmpxchg(ptr, old, new, size);
225 }
226
227 return ret;
228}
229
230static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
231 unsigned long long old,
232 unsigned long long new)
233{
234 unsigned long long oldval;
235 unsigned long res;
236
237 prefetchw(ptr);
238
239 __asm__ __volatile__(
240"1: ldrexd %1, %H1, [%3]\n"
241" teq %1, %4\n"
242" teqeq %H1, %H4\n"
243" bne 2f\n"
244" strexd %0, %5, %H5, [%3]\n"
245" teq %0, #0\n"
246" bne 1b\n"
247"2:"
248 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
249 : "r" (ptr), "r" (old), "r" (new)
250 : "cc");
251
252 return oldval;
253}
254
255static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
256 unsigned long long old,
257 unsigned long long new)
258{
259 unsigned long long ret;
260
261 smp_mb();
262 ret = __cmpxchg64(ptr, old, new);
263 smp_mb();
264
265 return ret;
266}
267
268#define cmpxchg_local(ptr,o,n) \
269 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
270 (unsigned long)(o), \
271 (unsigned long)(n), \
272 sizeof(*(ptr))))
273
274#define cmpxchg64(ptr, o, n) \
275 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
276 (unsigned long long)(o), \
277 (unsigned long long)(n)))
278
279#define cmpxchg64_relaxed(ptr, o, n) \
280 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
281 (unsigned long long)(o), \
282 (unsigned long long)(n)))
283
284#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
285
286#endif /* __LINUX_ARM_ARCH__ >= 6 */
287
288#endif /* __ASM_ARM_CMPXCHG_H */
1#ifndef __ASM_ARM_CMPXCHG_H
2#define __ASM_ARM_CMPXCHG_H
3
4#include <linux/irqflags.h>
5#include <asm/barrier.h>
6
7#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
8/*
9 * On the StrongARM, "swp" is terminally broken since it bypasses the
10 * cache totally. This means that the cache becomes inconsistent, and,
11 * since we use normal loads/stores as well, this is really bad.
12 * Typically, this causes oopsen in filp_close, but could have other,
13 * more disastrous effects. There are two work-arounds:
14 * 1. Disable interrupts and emulate the atomic swap
15 * 2. Clean the cache, perform atomic swap, flush the cache
16 *
17 * We choose (1) since its the "easiest" to achieve here and is not
18 * dependent on the processor type.
19 *
20 * NOTE that this solution won't work on an SMP system, so explcitly
21 * forbid it here.
22 */
23#define swp_is_buggy
24#endif
25
26static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
27{
28 extern void __bad_xchg(volatile void *, int);
29 unsigned long ret;
30#ifdef swp_is_buggy
31 unsigned long flags;
32#endif
33#if __LINUX_ARM_ARCH__ >= 6
34 unsigned int tmp;
35#endif
36
37 smp_mb();
38
39 switch (size) {
40#if __LINUX_ARM_ARCH__ >= 6
41 case 1:
42 asm volatile("@ __xchg1\n"
43 "1: ldrexb %0, [%3]\n"
44 " strexb %1, %2, [%3]\n"
45 " teq %1, #0\n"
46 " bne 1b"
47 : "=&r" (ret), "=&r" (tmp)
48 : "r" (x), "r" (ptr)
49 : "memory", "cc");
50 break;
51 case 4:
52 asm volatile("@ __xchg4\n"
53 "1: ldrex %0, [%3]\n"
54 " strex %1, %2, [%3]\n"
55 " teq %1, #0\n"
56 " bne 1b"
57 : "=&r" (ret), "=&r" (tmp)
58 : "r" (x), "r" (ptr)
59 : "memory", "cc");
60 break;
61#elif defined(swp_is_buggy)
62#ifdef CONFIG_SMP
63#error SMP is not supported on this platform
64#endif
65 case 1:
66 raw_local_irq_save(flags);
67 ret = *(volatile unsigned char *)ptr;
68 *(volatile unsigned char *)ptr = x;
69 raw_local_irq_restore(flags);
70 break;
71
72 case 4:
73 raw_local_irq_save(flags);
74 ret = *(volatile unsigned long *)ptr;
75 *(volatile unsigned long *)ptr = x;
76 raw_local_irq_restore(flags);
77 break;
78#else
79 case 1:
80 asm volatile("@ __xchg1\n"
81 " swpb %0, %1, [%2]"
82 : "=&r" (ret)
83 : "r" (x), "r" (ptr)
84 : "memory", "cc");
85 break;
86 case 4:
87 asm volatile("@ __xchg4\n"
88 " swp %0, %1, [%2]"
89 : "=&r" (ret)
90 : "r" (x), "r" (ptr)
91 : "memory", "cc");
92 break;
93#endif
94 default:
95 __bad_xchg(ptr, size), ret = 0;
96 break;
97 }
98 smp_mb();
99
100 return ret;
101}
102
103#define xchg(ptr,x) \
104 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
105
106#include <asm-generic/cmpxchg-local.h>
107
108#if __LINUX_ARM_ARCH__ < 6
109/* min ARCH < ARMv6 */
110
111#ifdef CONFIG_SMP
112#error "SMP is not supported on this platform"
113#endif
114
115/*
116 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
117 * them available.
118 */
119#define cmpxchg_local(ptr, o, n) \
120 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
121 (unsigned long)(n), sizeof(*(ptr))))
122#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
123
124#ifndef CONFIG_SMP
125#include <asm-generic/cmpxchg.h>
126#endif
127
128#else /* min ARCH >= ARMv6 */
129
130extern void __bad_cmpxchg(volatile void *ptr, int size);
131
132/*
133 * cmpxchg only support 32-bits operands on ARMv6.
134 */
135
136static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
137 unsigned long new, int size)
138{
139 unsigned long oldval, res;
140
141 switch (size) {
142#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
143 case 1:
144 do {
145 asm volatile("@ __cmpxchg1\n"
146 " ldrexb %1, [%2]\n"
147 " mov %0, #0\n"
148 " teq %1, %3\n"
149 " strexbeq %0, %4, [%2]\n"
150 : "=&r" (res), "=&r" (oldval)
151 : "r" (ptr), "Ir" (old), "r" (new)
152 : "memory", "cc");
153 } while (res);
154 break;
155 case 2:
156 do {
157 asm volatile("@ __cmpxchg1\n"
158 " ldrexh %1, [%2]\n"
159 " mov %0, #0\n"
160 " teq %1, %3\n"
161 " strexheq %0, %4, [%2]\n"
162 : "=&r" (res), "=&r" (oldval)
163 : "r" (ptr), "Ir" (old), "r" (new)
164 : "memory", "cc");
165 } while (res);
166 break;
167#endif
168 case 4:
169 do {
170 asm volatile("@ __cmpxchg4\n"
171 " ldrex %1, [%2]\n"
172 " mov %0, #0\n"
173 " teq %1, %3\n"
174 " strexeq %0, %4, [%2]\n"
175 : "=&r" (res), "=&r" (oldval)
176 : "r" (ptr), "Ir" (old), "r" (new)
177 : "memory", "cc");
178 } while (res);
179 break;
180 default:
181 __bad_cmpxchg(ptr, size);
182 oldval = 0;
183 }
184
185 return oldval;
186}
187
188static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
189 unsigned long new, int size)
190{
191 unsigned long ret;
192
193 smp_mb();
194 ret = __cmpxchg(ptr, old, new, size);
195 smp_mb();
196
197 return ret;
198}
199
200#define cmpxchg(ptr,o,n) \
201 ((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
202 (unsigned long)(o), \
203 (unsigned long)(n), \
204 sizeof(*(ptr))))
205
206static inline unsigned long __cmpxchg_local(volatile void *ptr,
207 unsigned long old,
208 unsigned long new, int size)
209{
210 unsigned long ret;
211
212 switch (size) {
213#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
214 case 1:
215 case 2:
216 ret = __cmpxchg_local_generic(ptr, old, new, size);
217 break;
218#endif
219 default:
220 ret = __cmpxchg(ptr, old, new, size);
221 }
222
223 return ret;
224}
225
226#define cmpxchg_local(ptr,o,n) \
227 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
228 (unsigned long)(o), \
229 (unsigned long)(n), \
230 sizeof(*(ptr))))
231
232#define cmpxchg64(ptr, o, n) \
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
234 atomic64_t, \
235 counter), \
236 (unsigned long)(o), \
237 (unsigned long)(n)))
238
239#define cmpxchg64_local(ptr, o, n) \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
241 local64_t, \
242 a), \
243 (unsigned long)(o), \
244 (unsigned long)(n)))
245
246#endif /* __LINUX_ARM_ARCH__ >= 6 */
247
248#endif /* __ASM_ARM_CMPXCHG_H */