Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __ASM_ARM_CMPXCHG_H
3#define __ASM_ARM_CMPXCHG_H
4
5#include <linux/irqflags.h>
6#include <linux/prefetch.h>
7#include <asm/barrier.h>
8#include <linux/cmpxchg-emu.h>
9
10#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
11/*
12 * On the StrongARM, "swp" is terminally broken since it bypasses the
13 * cache totally. This means that the cache becomes inconsistent, and,
14 * since we use normal loads/stores as well, this is really bad.
15 * Typically, this causes oopsen in filp_close, but could have other,
16 * more disastrous effects. There are two work-arounds:
17 * 1. Disable interrupts and emulate the atomic swap
18 * 2. Clean the cache, perform atomic swap, flush the cache
19 *
20 * We choose (1) since its the "easiest" to achieve here and is not
21 * dependent on the processor type.
22 *
23 * NOTE that this solution won't work on an SMP system, so explcitly
24 * forbid it here.
25 */
26#define swp_is_buggy
27#endif
28
29static inline unsigned long
30__arch_xchg(unsigned long x, volatile void *ptr, int size)
31{
32 extern void __bad_xchg(volatile void *, int);
33 unsigned long ret;
34#ifdef swp_is_buggy
35 unsigned long flags;
36#endif
37#if __LINUX_ARM_ARCH__ >= 6
38 unsigned int tmp;
39#endif
40
41 prefetchw((const void *)ptr);
42
43 switch (size) {
44#if __LINUX_ARM_ARCH__ >= 6
45#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
46 case 1:
47 asm volatile("@ __xchg1\n"
48 "1: ldrexb %0, [%3]\n"
49 " strexb %1, %2, [%3]\n"
50 " teq %1, #0\n"
51 " bne 1b"
52 : "=&r" (ret), "=&r" (tmp)
53 : "r" (x), "r" (ptr)
54 : "memory", "cc");
55 break;
56 case 2:
57 asm volatile("@ __xchg2\n"
58 "1: ldrexh %0, [%3]\n"
59 " strexh %1, %2, [%3]\n"
60 " teq %1, #0\n"
61 " bne 1b"
62 : "=&r" (ret), "=&r" (tmp)
63 : "r" (x), "r" (ptr)
64 : "memory", "cc");
65 break;
66#endif
67 case 4:
68 asm volatile("@ __xchg4\n"
69 "1: ldrex %0, [%3]\n"
70 " strex %1, %2, [%3]\n"
71 " teq %1, #0\n"
72 " bne 1b"
73 : "=&r" (ret), "=&r" (tmp)
74 : "r" (x), "r" (ptr)
75 : "memory", "cc");
76 break;
77#elif defined(swp_is_buggy)
78#ifdef CONFIG_SMP
79#error SMP is not supported on this platform
80#endif
81 case 1:
82 raw_local_irq_save(flags);
83 ret = *(volatile unsigned char *)ptr;
84 *(volatile unsigned char *)ptr = x;
85 raw_local_irq_restore(flags);
86 break;
87
88 case 4:
89 raw_local_irq_save(flags);
90 ret = *(volatile unsigned long *)ptr;
91 *(volatile unsigned long *)ptr = x;
92 raw_local_irq_restore(flags);
93 break;
94#else
95 case 1:
96 asm volatile("@ __xchg1\n"
97 " swpb %0, %1, [%2]"
98 : "=&r" (ret)
99 : "r" (x), "r" (ptr)
100 : "memory", "cc");
101 break;
102 case 4:
103 asm volatile("@ __xchg4\n"
104 " swp %0, %1, [%2]"
105 : "=&r" (ret)
106 : "r" (x), "r" (ptr)
107 : "memory", "cc");
108 break;
109#endif
110 default:
111 /* Cause a link-time error, the xchg() size is not supported */
112 __bad_xchg(ptr, size), ret = 0;
113 break;
114 }
115
116 return ret;
117}
118
119#define arch_xchg_relaxed(ptr, x) ({ \
120 (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \
121 sizeof(*(ptr))); \
122})
123
124#include <asm-generic/cmpxchg-local.h>
125
126#if __LINUX_ARM_ARCH__ < 6
127/* min ARCH < ARMv6 */
128
129#ifdef CONFIG_SMP
130#error "SMP is not supported on this platform"
131#endif
132
133#define arch_xchg arch_xchg_relaxed
134
135/*
136 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
137 * them available.
138 */
139#define arch_cmpxchg_local(ptr, o, n) ({ \
140 (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
141 (unsigned long)(o), \
142 (unsigned long)(n), \
143 sizeof(*(ptr))); \
144})
145
146#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
147
148#include <asm-generic/cmpxchg.h>
149
150#else /* min ARCH >= ARMv6 */
151
152extern void __bad_cmpxchg(volatile void *ptr, int size);
153
154/*
155 * cmpxchg only support 32-bits operands on ARMv6.
156 */
157
158static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
159 unsigned long new, int size)
160{
161 unsigned long oldval, res;
162
163 prefetchw((const void *)ptr);
164
165 switch (size) {
166#ifdef CONFIG_CPU_V6 /* ARCH == ARMv6 */
167 case 1:
168 oldval = cmpxchg_emu_u8((volatile u8 *)ptr, old, new);
169 break;
170#else /* min ARCH > ARMv6 */
171 case 1:
172 do {
173 asm volatile("@ __cmpxchg1\n"
174 " ldrexb %1, [%2]\n"
175 " mov %0, #0\n"
176 " teq %1, %3\n"
177 " strexbeq %0, %4, [%2]\n"
178 : "=&r" (res), "=&r" (oldval)
179 : "r" (ptr), "Ir" (old), "r" (new)
180 : "memory", "cc");
181 } while (res);
182 break;
183 case 2:
184 do {
185 asm volatile("@ __cmpxchg1\n"
186 " ldrexh %1, [%2]\n"
187 " mov %0, #0\n"
188 " teq %1, %3\n"
189 " strexheq %0, %4, [%2]\n"
190 : "=&r" (res), "=&r" (oldval)
191 : "r" (ptr), "Ir" (old), "r" (new)
192 : "memory", "cc");
193 } while (res);
194 break;
195#endif
196 case 4:
197 do {
198 asm volatile("@ __cmpxchg4\n"
199 " ldrex %1, [%2]\n"
200 " mov %0, #0\n"
201 " teq %1, %3\n"
202 " strexeq %0, %4, [%2]\n"
203 : "=&r" (res), "=&r" (oldval)
204 : "r" (ptr), "Ir" (old), "r" (new)
205 : "memory", "cc");
206 } while (res);
207 break;
208 default:
209 __bad_cmpxchg(ptr, size);
210 oldval = 0;
211 }
212
213 return oldval;
214}
215
216#define arch_cmpxchg_relaxed(ptr,o,n) ({ \
217 (__typeof__(*(ptr)))__cmpxchg((ptr), \
218 (unsigned long)(o), \
219 (unsigned long)(n), \
220 sizeof(*(ptr))); \
221})
222
223static inline unsigned long __cmpxchg_local(volatile void *ptr,
224 unsigned long old,
225 unsigned long new, int size)
226{
227 unsigned long ret;
228
229 switch (size) {
230#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
231 case 1:
232 case 2:
233 ret = __generic_cmpxchg_local(ptr, old, new, size);
234 break;
235#endif
236 default:
237 ret = __cmpxchg(ptr, old, new, size);
238 }
239
240 return ret;
241}
242
243#define arch_cmpxchg_local(ptr, o, n) ({ \
244 (__typeof(*ptr))__cmpxchg_local((ptr), \
245 (unsigned long)(o), \
246 (unsigned long)(n), \
247 sizeof(*(ptr))); \
248})
249
250static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
251 unsigned long long old,
252 unsigned long long new)
253{
254 unsigned long long oldval;
255 unsigned long res;
256
257 prefetchw(ptr);
258
259 __asm__ __volatile__(
260"1: ldrexd %1, %H1, [%3]\n"
261" teq %1, %4\n"
262" teqeq %H1, %H4\n"
263" bne 2f\n"
264" strexd %0, %5, %H5, [%3]\n"
265" teq %0, #0\n"
266" bne 1b\n"
267"2:"
268 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
269 : "r" (ptr), "r" (old), "r" (new)
270 : "cc");
271
272 return oldval;
273}
274
275#define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
276 (__typeof__(*(ptr)))__cmpxchg64((ptr), \
277 (unsigned long long)(o), \
278 (unsigned long long)(n)); \
279})
280
281#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
282
283#endif /* __LINUX_ARM_ARCH__ >= 6 */
284
285#endif /* __ASM_ARM_CMPXCHG_H */
1#ifndef __ASM_ARM_CMPXCHG_H
2#define __ASM_ARM_CMPXCHG_H
3
4#include <linux/irqflags.h>
5#include <linux/prefetch.h>
6#include <asm/barrier.h>
7
8#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
9/*
10 * On the StrongARM, "swp" is terminally broken since it bypasses the
11 * cache totally. This means that the cache becomes inconsistent, and,
12 * since we use normal loads/stores as well, this is really bad.
13 * Typically, this causes oopsen in filp_close, but could have other,
14 * more disastrous effects. There are two work-arounds:
15 * 1. Disable interrupts and emulate the atomic swap
16 * 2. Clean the cache, perform atomic swap, flush the cache
17 *
18 * We choose (1) since its the "easiest" to achieve here and is not
19 * dependent on the processor type.
20 *
21 * NOTE that this solution won't work on an SMP system, so explcitly
22 * forbid it here.
23 */
24#define swp_is_buggy
25#endif
26
27static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
28{
29 extern void __bad_xchg(volatile void *, int);
30 unsigned long ret;
31#ifdef swp_is_buggy
32 unsigned long flags;
33#endif
34#if __LINUX_ARM_ARCH__ >= 6
35 unsigned int tmp;
36#endif
37
38 prefetchw((const void *)ptr);
39
40 switch (size) {
41#if __LINUX_ARM_ARCH__ >= 6
42#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
43 case 1:
44 asm volatile("@ __xchg1\n"
45 "1: ldrexb %0, [%3]\n"
46 " strexb %1, %2, [%3]\n"
47 " teq %1, #0\n"
48 " bne 1b"
49 : "=&r" (ret), "=&r" (tmp)
50 : "r" (x), "r" (ptr)
51 : "memory", "cc");
52 break;
53 case 2:
54 asm volatile("@ __xchg2\n"
55 "1: ldrexh %0, [%3]\n"
56 " strexh %1, %2, [%3]\n"
57 " teq %1, #0\n"
58 " bne 1b"
59 : "=&r" (ret), "=&r" (tmp)
60 : "r" (x), "r" (ptr)
61 : "memory", "cc");
62 break;
63#endif
64 case 4:
65 asm volatile("@ __xchg4\n"
66 "1: ldrex %0, [%3]\n"
67 " strex %1, %2, [%3]\n"
68 " teq %1, #0\n"
69 " bne 1b"
70 : "=&r" (ret), "=&r" (tmp)
71 : "r" (x), "r" (ptr)
72 : "memory", "cc");
73 break;
74#elif defined(swp_is_buggy)
75#ifdef CONFIG_SMP
76#error SMP is not supported on this platform
77#endif
78 case 1:
79 raw_local_irq_save(flags);
80 ret = *(volatile unsigned char *)ptr;
81 *(volatile unsigned char *)ptr = x;
82 raw_local_irq_restore(flags);
83 break;
84
85 case 4:
86 raw_local_irq_save(flags);
87 ret = *(volatile unsigned long *)ptr;
88 *(volatile unsigned long *)ptr = x;
89 raw_local_irq_restore(flags);
90 break;
91#else
92 case 1:
93 asm volatile("@ __xchg1\n"
94 " swpb %0, %1, [%2]"
95 : "=&r" (ret)
96 : "r" (x), "r" (ptr)
97 : "memory", "cc");
98 break;
99 case 4:
100 asm volatile("@ __xchg4\n"
101 " swp %0, %1, [%2]"
102 : "=&r" (ret)
103 : "r" (x), "r" (ptr)
104 : "memory", "cc");
105 break;
106#endif
107 default:
108 /* Cause a link-time error, the xchg() size is not supported */
109 __bad_xchg(ptr, size), ret = 0;
110 break;
111 }
112
113 return ret;
114}
115
116#define xchg_relaxed(ptr, x) ({ \
117 (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
118 sizeof(*(ptr))); \
119})
120
121#include <asm-generic/cmpxchg-local.h>
122
123#if __LINUX_ARM_ARCH__ < 6
124/* min ARCH < ARMv6 */
125
126#ifdef CONFIG_SMP
127#error "SMP is not supported on this platform"
128#endif
129
130#define xchg xchg_relaxed
131
132/*
133 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
134 * them available.
135 */
136#define cmpxchg_local(ptr, o, n) ({ \
137 (__typeof(*ptr))__cmpxchg_local_generic((ptr), \
138 (unsigned long)(o), \
139 (unsigned long)(n), \
140 sizeof(*(ptr))); \
141})
142
143#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
144
145#include <asm-generic/cmpxchg.h>
146
147#else /* min ARCH >= ARMv6 */
148
149extern void __bad_cmpxchg(volatile void *ptr, int size);
150
151/*
152 * cmpxchg only support 32-bits operands on ARMv6.
153 */
154
155static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
156 unsigned long new, int size)
157{
158 unsigned long oldval, res;
159
160 prefetchw((const void *)ptr);
161
162 switch (size) {
163#ifndef CONFIG_CPU_V6 /* min ARCH >= ARMv6K */
164 case 1:
165 do {
166 asm volatile("@ __cmpxchg1\n"
167 " ldrexb %1, [%2]\n"
168 " mov %0, #0\n"
169 " teq %1, %3\n"
170 " strexbeq %0, %4, [%2]\n"
171 : "=&r" (res), "=&r" (oldval)
172 : "r" (ptr), "Ir" (old), "r" (new)
173 : "memory", "cc");
174 } while (res);
175 break;
176 case 2:
177 do {
178 asm volatile("@ __cmpxchg1\n"
179 " ldrexh %1, [%2]\n"
180 " mov %0, #0\n"
181 " teq %1, %3\n"
182 " strexheq %0, %4, [%2]\n"
183 : "=&r" (res), "=&r" (oldval)
184 : "r" (ptr), "Ir" (old), "r" (new)
185 : "memory", "cc");
186 } while (res);
187 break;
188#endif
189 case 4:
190 do {
191 asm volatile("@ __cmpxchg4\n"
192 " ldrex %1, [%2]\n"
193 " mov %0, #0\n"
194 " teq %1, %3\n"
195 " strexeq %0, %4, [%2]\n"
196 : "=&r" (res), "=&r" (oldval)
197 : "r" (ptr), "Ir" (old), "r" (new)
198 : "memory", "cc");
199 } while (res);
200 break;
201 default:
202 __bad_cmpxchg(ptr, size);
203 oldval = 0;
204 }
205
206 return oldval;
207}
208
209#define cmpxchg_relaxed(ptr,o,n) ({ \
210 (__typeof__(*(ptr)))__cmpxchg((ptr), \
211 (unsigned long)(o), \
212 (unsigned long)(n), \
213 sizeof(*(ptr))); \
214})
215
216static inline unsigned long __cmpxchg_local(volatile void *ptr,
217 unsigned long old,
218 unsigned long new, int size)
219{
220 unsigned long ret;
221
222 switch (size) {
223#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
224 case 1:
225 case 2:
226 ret = __cmpxchg_local_generic(ptr, old, new, size);
227 break;
228#endif
229 default:
230 ret = __cmpxchg(ptr, old, new, size);
231 }
232
233 return ret;
234}
235
236#define cmpxchg_local(ptr, o, n) ({ \
237 (__typeof(*ptr))__cmpxchg_local((ptr), \
238 (unsigned long)(o), \
239 (unsigned long)(n), \
240 sizeof(*(ptr))); \
241})
242
243static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
244 unsigned long long old,
245 unsigned long long new)
246{
247 unsigned long long oldval;
248 unsigned long res;
249
250 prefetchw(ptr);
251
252 __asm__ __volatile__(
253"1: ldrexd %1, %H1, [%3]\n"
254" teq %1, %4\n"
255" teqeq %H1, %H4\n"
256" bne 2f\n"
257" strexd %0, %5, %H5, [%3]\n"
258" teq %0, #0\n"
259" bne 1b\n"
260"2:"
261 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
262 : "r" (ptr), "r" (old), "r" (new)
263 : "cc");
264
265 return oldval;
266}
267
268#define cmpxchg64_relaxed(ptr, o, n) ({ \
269 (__typeof__(*(ptr)))__cmpxchg64((ptr), \
270 (unsigned long long)(o), \
271 (unsigned long long)(n)); \
272})
273
274#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
275
276#endif /* __LINUX_ARM_ARCH__ >= 6 */
277
278#endif /* __ASM_ARM_CMPXCHG_H */