Linux Audio

Check our new training course

Loading...
v3.15
 
  1#ifndef __ASM_ARM_CMPXCHG_H
  2#define __ASM_ARM_CMPXCHG_H
  3
  4#include <linux/irqflags.h>
  5#include <linux/prefetch.h>
  6#include <asm/barrier.h>
  7
  8#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
  9/*
 10 * On the StrongARM, "swp" is terminally broken since it bypasses the
 11 * cache totally.  This means that the cache becomes inconsistent, and,
 12 * since we use normal loads/stores as well, this is really bad.
 13 * Typically, this causes oopsen in filp_close, but could have other,
 14 * more disastrous effects.  There are two work-arounds:
 15 *  1. Disable interrupts and emulate the atomic swap
 16 *  2. Clean the cache, perform atomic swap, flush the cache
 17 *
 18 * We choose (1) since its the "easiest" to achieve here and is not
 19 * dependent on the processor type.
 20 *
 21 * NOTE that this solution won't work on an SMP system, so explcitly
 22 * forbid it here.
 23 */
 24#define swp_is_buggy
 25#endif
 26
 27static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 
 28{
 29	extern void __bad_xchg(volatile void *, int);
 30	unsigned long ret;
 31#ifdef swp_is_buggy
 32	unsigned long flags;
 33#endif
 34#if __LINUX_ARM_ARCH__ >= 6
 35	unsigned int tmp;
 36#endif
 37
 38	smp_mb();
 39	prefetchw((const void *)ptr);
 40
 41	switch (size) {
 42#if __LINUX_ARM_ARCH__ >= 6
 
 43	case 1:
 44		asm volatile("@	__xchg1\n"
 45		"1:	ldrexb	%0, [%3]\n"
 46		"	strexb	%1, %2, [%3]\n"
 47		"	teq	%1, #0\n"
 48		"	bne	1b"
 49			: "=&r" (ret), "=&r" (tmp)
 50			: "r" (x), "r" (ptr)
 51			: "memory", "cc");
 52		break;
 
 
 
 
 
 
 
 
 
 
 
 53	case 4:
 54		asm volatile("@	__xchg4\n"
 55		"1:	ldrex	%0, [%3]\n"
 56		"	strex	%1, %2, [%3]\n"
 57		"	teq	%1, #0\n"
 58		"	bne	1b"
 59			: "=&r" (ret), "=&r" (tmp)
 60			: "r" (x), "r" (ptr)
 61			: "memory", "cc");
 62		break;
 63#elif defined(swp_is_buggy)
 64#ifdef CONFIG_SMP
 65#error SMP is not supported on this platform
 66#endif
 67	case 1:
 68		raw_local_irq_save(flags);
 69		ret = *(volatile unsigned char *)ptr;
 70		*(volatile unsigned char *)ptr = x;
 71		raw_local_irq_restore(flags);
 72		break;
 73
 74	case 4:
 75		raw_local_irq_save(flags);
 76		ret = *(volatile unsigned long *)ptr;
 77		*(volatile unsigned long *)ptr = x;
 78		raw_local_irq_restore(flags);
 79		break;
 80#else
 81	case 1:
 82		asm volatile("@	__xchg1\n"
 83		"	swpb	%0, %1, [%2]"
 84			: "=&r" (ret)
 85			: "r" (x), "r" (ptr)
 86			: "memory", "cc");
 87		break;
 88	case 4:
 89		asm volatile("@	__xchg4\n"
 90		"	swp	%0, %1, [%2]"
 91			: "=&r" (ret)
 92			: "r" (x), "r" (ptr)
 93			: "memory", "cc");
 94		break;
 95#endif
 96	default:
 
 97		__bad_xchg(ptr, size), ret = 0;
 98		break;
 99	}
100	smp_mb();
101
102	return ret;
103}
104
105#define xchg(ptr,x) \
106	((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
 
107
108#include <asm-generic/cmpxchg-local.h>
109
110#if __LINUX_ARM_ARCH__ < 6
111/* min ARCH < ARMv6 */
112
113#ifdef CONFIG_SMP
114#error "SMP is not supported on this platform"
115#endif
116
 
 
117/*
118 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
119 * them available.
120 */
121#define cmpxchg_local(ptr, o, n)				  	       \
122	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
123			(unsigned long)(n), sizeof(*(ptr))))
124#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
 
 
 
 
125
126#ifndef CONFIG_SMP
127#include <asm-generic/cmpxchg.h>
128#endif
129
130#else	/* min ARCH >= ARMv6 */
131
132extern void __bad_cmpxchg(volatile void *ptr, int size);
133
134/*
135 * cmpxchg only support 32-bits operands on ARMv6.
136 */
137
138static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
139				      unsigned long new, int size)
140{
141	unsigned long oldval, res;
142
143	prefetchw((const void *)ptr);
144
145	switch (size) {
146#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
147	case 1:
148		do {
149			asm volatile("@ __cmpxchg1\n"
150			"	ldrexb	%1, [%2]\n"
151			"	mov	%0, #0\n"
152			"	teq	%1, %3\n"
153			"	strexbeq %0, %4, [%2]\n"
154				: "=&r" (res), "=&r" (oldval)
155				: "r" (ptr), "Ir" (old), "r" (new)
156				: "memory", "cc");
157		} while (res);
158		break;
159	case 2:
160		do {
161			asm volatile("@ __cmpxchg1\n"
162			"	ldrexh	%1, [%2]\n"
163			"	mov	%0, #0\n"
164			"	teq	%1, %3\n"
165			"	strexheq %0, %4, [%2]\n"
166				: "=&r" (res), "=&r" (oldval)
167				: "r" (ptr), "Ir" (old), "r" (new)
168				: "memory", "cc");
169		} while (res);
170		break;
171#endif
172	case 4:
173		do {
174			asm volatile("@ __cmpxchg4\n"
175			"	ldrex	%1, [%2]\n"
176			"	mov	%0, #0\n"
177			"	teq	%1, %3\n"
178			"	strexeq %0, %4, [%2]\n"
179				: "=&r" (res), "=&r" (oldval)
180				: "r" (ptr), "Ir" (old), "r" (new)
181				: "memory", "cc");
182		} while (res);
183		break;
184	default:
185		__bad_cmpxchg(ptr, size);
186		oldval = 0;
187	}
188
189	return oldval;
190}
191
192static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
193					 unsigned long new, int size)
194{
195	unsigned long ret;
196
197	smp_mb();
198	ret = __cmpxchg(ptr, old, new, size);
199	smp_mb();
200
201	return ret;
202}
203
204#define cmpxchg(ptr,o,n)						\
205	((__typeof__(*(ptr)))__cmpxchg_mb((ptr),			\
206					  (unsigned long)(o),		\
207					  (unsigned long)(n),		\
208					  sizeof(*(ptr))))
209
210static inline unsigned long __cmpxchg_local(volatile void *ptr,
211					    unsigned long old,
212					    unsigned long new, int size)
213{
214	unsigned long ret;
215
216	switch (size) {
217#ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
218	case 1:
219	case 2:
220		ret = __cmpxchg_local_generic(ptr, old, new, size);
221		break;
222#endif
223	default:
224		ret = __cmpxchg(ptr, old, new, size);
225	}
226
227	return ret;
228}
229
 
 
 
 
 
 
 
230static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
231					     unsigned long long old,
232					     unsigned long long new)
233{
234	unsigned long long oldval;
235	unsigned long res;
236
237	prefetchw(ptr);
238
239	__asm__ __volatile__(
240"1:	ldrexd		%1, %H1, [%3]\n"
241"	teq		%1, %4\n"
242"	teqeq		%H1, %H4\n"
243"	bne		2f\n"
244"	strexd		%0, %5, %H5, [%3]\n"
245"	teq		%0, #0\n"
246"	bne		1b\n"
247"2:"
248	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
249	: "r" (ptr), "r" (old), "r" (new)
250	: "cc");
251
252	return oldval;
253}
254
255static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
256						unsigned long long old,
257						unsigned long long new)
258{
259	unsigned long long ret;
260
261	smp_mb();
262	ret = __cmpxchg64(ptr, old, new);
263	smp_mb();
264
265	return ret;
266}
267
268#define cmpxchg_local(ptr,o,n)						\
269	((__typeof__(*(ptr)))__cmpxchg_local((ptr),			\
270				       (unsigned long)(o),		\
271				       (unsigned long)(n),		\
272				       sizeof(*(ptr))))
273
274#define cmpxchg64(ptr, o, n)						\
275	((__typeof__(*(ptr)))__cmpxchg64_mb((ptr),			\
276					(unsigned long long)(o),	\
277					(unsigned long long)(n)))
278
279#define cmpxchg64_relaxed(ptr, o, n)					\
280	((__typeof__(*(ptr)))__cmpxchg64((ptr),				\
281					(unsigned long long)(o),	\
282					(unsigned long long)(n)))
 
283
284#define cmpxchg64_local(ptr, o, n)	cmpxchg64_relaxed((ptr), (o), (n))
285
286#endif	/* __LINUX_ARM_ARCH__ >= 6 */
287
288#endif /* __ASM_ARM_CMPXCHG_H */
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef __ASM_ARM_CMPXCHG_H
  3#define __ASM_ARM_CMPXCHG_H
  4
  5#include <linux/irqflags.h>
  6#include <linux/prefetch.h>
  7#include <asm/barrier.h>
  8
  9#if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
 10/*
 11 * On the StrongARM, "swp" is terminally broken since it bypasses the
 12 * cache totally.  This means that the cache becomes inconsistent, and,
 13 * since we use normal loads/stores as well, this is really bad.
 14 * Typically, this causes oopsen in filp_close, but could have other,
 15 * more disastrous effects.  There are two work-arounds:
 16 *  1. Disable interrupts and emulate the atomic swap
 17 *  2. Clean the cache, perform atomic swap, flush the cache
 18 *
 19 * We choose (1) since its the "easiest" to achieve here and is not
 20 * dependent on the processor type.
 21 *
 22 * NOTE that this solution won't work on an SMP system, so explcitly
 23 * forbid it here.
 24 */
 25#define swp_is_buggy
 26#endif
 27
 28static inline unsigned long
 29__arch_xchg(unsigned long x, volatile void *ptr, int size)
 30{
 31	extern void __bad_xchg(volatile void *, int);
 32	unsigned long ret;
 33#ifdef swp_is_buggy
 34	unsigned long flags;
 35#endif
 36#if __LINUX_ARM_ARCH__ >= 6
 37	unsigned int tmp;
 38#endif
 39
 
 40	prefetchw((const void *)ptr);
 41
 42	switch (size) {
 43#if __LINUX_ARM_ARCH__ >= 6
 44#ifndef CONFIG_CPU_V6 /* MIN ARCH >= V6K */
 45	case 1:
 46		asm volatile("@	__xchg1\n"
 47		"1:	ldrexb	%0, [%3]\n"
 48		"	strexb	%1, %2, [%3]\n"
 49		"	teq	%1, #0\n"
 50		"	bne	1b"
 51			: "=&r" (ret), "=&r" (tmp)
 52			: "r" (x), "r" (ptr)
 53			: "memory", "cc");
 54		break;
 55	case 2:
 56		asm volatile("@	__xchg2\n"
 57		"1:	ldrexh	%0, [%3]\n"
 58		"	strexh	%1, %2, [%3]\n"
 59		"	teq	%1, #0\n"
 60		"	bne	1b"
 61			: "=&r" (ret), "=&r" (tmp)
 62			: "r" (x), "r" (ptr)
 63			: "memory", "cc");
 64		break;
 65#endif
 66	case 4:
 67		asm volatile("@	__xchg4\n"
 68		"1:	ldrex	%0, [%3]\n"
 69		"	strex	%1, %2, [%3]\n"
 70		"	teq	%1, #0\n"
 71		"	bne	1b"
 72			: "=&r" (ret), "=&r" (tmp)
 73			: "r" (x), "r" (ptr)
 74			: "memory", "cc");
 75		break;
 76#elif defined(swp_is_buggy)
 77#ifdef CONFIG_SMP
 78#error SMP is not supported on this platform
 79#endif
 80	case 1:
 81		raw_local_irq_save(flags);
 82		ret = *(volatile unsigned char *)ptr;
 83		*(volatile unsigned char *)ptr = x;
 84		raw_local_irq_restore(flags);
 85		break;
 86
 87	case 4:
 88		raw_local_irq_save(flags);
 89		ret = *(volatile unsigned long *)ptr;
 90		*(volatile unsigned long *)ptr = x;
 91		raw_local_irq_restore(flags);
 92		break;
 93#else
 94	case 1:
 95		asm volatile("@	__xchg1\n"
 96		"	swpb	%0, %1, [%2]"
 97			: "=&r" (ret)
 98			: "r" (x), "r" (ptr)
 99			: "memory", "cc");
100		break;
101	case 4:
102		asm volatile("@	__xchg4\n"
103		"	swp	%0, %1, [%2]"
104			: "=&r" (ret)
105			: "r" (x), "r" (ptr)
106			: "memory", "cc");
107		break;
108#endif
109	default:
110		/* Cause a link-time error, the xchg() size is not supported */
111		__bad_xchg(ptr, size), ret = 0;
112		break;
113	}
 
114
115	return ret;
116}
117
118#define arch_xchg_relaxed(ptr, x) ({					\
119	(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr),	\
120					sizeof(*(ptr)));		\
121})
122
123#include <asm-generic/cmpxchg-local.h>
124
125#if __LINUX_ARM_ARCH__ < 6
126/* min ARCH < ARMv6 */
127
128#ifdef CONFIG_SMP
129#error "SMP is not supported on this platform"
130#endif
131
132#define arch_xchg arch_xchg_relaxed
133
134/*
135 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
136 * them available.
137 */
138#define arch_cmpxchg_local(ptr, o, n) ({				\
139	(__typeof(*ptr))__generic_cmpxchg_local((ptr),			\
140					        (unsigned long)(o),	\
141					        (unsigned long)(n),	\
142					        sizeof(*(ptr)));	\
143})
144
145#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
146
 
147#include <asm-generic/cmpxchg.h>
 
148
149#else	/* min ARCH >= ARMv6 */
150
151extern void __bad_cmpxchg(volatile void *ptr, int size);
152
153/*
154 * cmpxchg only support 32-bits operands on ARMv6.
155 */
156
157static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
158				      unsigned long new, int size)
159{
160	unsigned long oldval, res;
161
162	prefetchw((const void *)ptr);
163
164	switch (size) {
165#ifndef CONFIG_CPU_V6	/* min ARCH >= ARMv6K */
166	case 1:
167		do {
168			asm volatile("@ __cmpxchg1\n"
169			"	ldrexb	%1, [%2]\n"
170			"	mov	%0, #0\n"
171			"	teq	%1, %3\n"
172			"	strexbeq %0, %4, [%2]\n"
173				: "=&r" (res), "=&r" (oldval)
174				: "r" (ptr), "Ir" (old), "r" (new)
175				: "memory", "cc");
176		} while (res);
177		break;
178	case 2:
179		do {
180			asm volatile("@ __cmpxchg1\n"
181			"	ldrexh	%1, [%2]\n"
182			"	mov	%0, #0\n"
183			"	teq	%1, %3\n"
184			"	strexheq %0, %4, [%2]\n"
185				: "=&r" (res), "=&r" (oldval)
186				: "r" (ptr), "Ir" (old), "r" (new)
187				: "memory", "cc");
188		} while (res);
189		break;
190#endif
191	case 4:
192		do {
193			asm volatile("@ __cmpxchg4\n"
194			"	ldrex	%1, [%2]\n"
195			"	mov	%0, #0\n"
196			"	teq	%1, %3\n"
197			"	strexeq %0, %4, [%2]\n"
198				: "=&r" (res), "=&r" (oldval)
199				: "r" (ptr), "Ir" (old), "r" (new)
200				: "memory", "cc");
201		} while (res);
202		break;
203	default:
204		__bad_cmpxchg(ptr, size);
205		oldval = 0;
206	}
207
208	return oldval;
209}
210
211#define arch_cmpxchg_relaxed(ptr,o,n) ({				\
212	(__typeof__(*(ptr)))__cmpxchg((ptr),				\
213				      (unsigned long)(o),		\
214				      (unsigned long)(n),		\
215				      sizeof(*(ptr)));			\
216})
 
 
 
 
 
 
 
 
 
 
 
217
218static inline unsigned long __cmpxchg_local(volatile void *ptr,
219					    unsigned long old,
220					    unsigned long new, int size)
221{
222	unsigned long ret;
223
224	switch (size) {
225#ifdef CONFIG_CPU_V6	/* min ARCH == ARMv6 */
226	case 1:
227	case 2:
228		ret = __generic_cmpxchg_local(ptr, old, new, size);
229		break;
230#endif
231	default:
232		ret = __cmpxchg(ptr, old, new, size);
233	}
234
235	return ret;
236}
237
238#define arch_cmpxchg_local(ptr, o, n) ({				\
239	(__typeof(*ptr))__cmpxchg_local((ptr),				\
240				        (unsigned long)(o),		\
241				        (unsigned long)(n),		\
242				        sizeof(*(ptr)));		\
243})
244
245static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
246					     unsigned long long old,
247					     unsigned long long new)
248{
249	unsigned long long oldval;
250	unsigned long res;
251
252	prefetchw(ptr);
253
254	__asm__ __volatile__(
255"1:	ldrexd		%1, %H1, [%3]\n"
256"	teq		%1, %4\n"
257"	teqeq		%H1, %H4\n"
258"	bne		2f\n"
259"	strexd		%0, %5, %H5, [%3]\n"
260"	teq		%0, #0\n"
261"	bne		1b\n"
262"2:"
263	: "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
264	: "r" (ptr), "r" (old), "r" (new)
265	: "cc");
266
267	return oldval;
268}
269
270#define arch_cmpxchg64_relaxed(ptr, o, n) ({				\
271	(__typeof__(*(ptr)))__cmpxchg64((ptr),				\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272					(unsigned long long)(o),	\
273					(unsigned long long)(n));	\
274})
275
276#define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
277
278#endif	/* __LINUX_ARM_ARCH__ >= 6 */
279
280#endif /* __ASM_ARM_CMPXCHG_H */