Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _ASM_POWERPC_CMPXCHG_H_
  3#define _ASM_POWERPC_CMPXCHG_H_
  4
  5#ifdef __KERNEL__
  6#include <linux/compiler.h>
  7#include <asm/synch.h>
  8#include <linux/bug.h>
  9
 10#ifdef __BIG_ENDIAN
 11#define BITOFF_CAL(size, off)	((sizeof(u32) - size - off) * BITS_PER_BYTE)
 12#else
 13#define BITOFF_CAL(size, off)	(off * BITS_PER_BYTE)
 14#endif
 15
 16#define XCHG_GEN(type, sfx, cl)				\
 17static inline u32 __xchg_##type##sfx(volatile void *p, u32 val)	\
 18{								\
 19	unsigned int prev, prev_mask, tmp, bitoff, off;		\
 20								\
 21	off = (unsigned long)p % sizeof(u32);			\
 22	bitoff = BITOFF_CAL(sizeof(type), off);			\
 23	p -= off;						\
 24	val <<= bitoff;						\
 25	prev_mask = (u32)(type)-1 << bitoff;			\
 26								\
 27	__asm__ __volatile__(					\
 28"1:	lwarx   %0,0,%3\n"					\
 29"	andc	%1,%0,%5\n"					\
 30"	or	%1,%1,%4\n"					\
 31"	stwcx.	%1,0,%3\n"					\
 32"	bne-	1b\n"						\
 33	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
 34	: "r" (p), "r" (val), "r" (prev_mask)			\
 35	: "cc", cl);						\
 36								\
 37	return prev >> bitoff;					\
 38}
 39
 40#define CMPXCHG_GEN(type, sfx, br, br2, cl)			\
 41static inline							\
 42u32 __cmpxchg_##type##sfx(volatile void *p, u32 old, u32 new)	\
 43{								\
 44	unsigned int prev, prev_mask, tmp, bitoff, off;		\
 45								\
 46	off = (unsigned long)p % sizeof(u32);			\
 47	bitoff = BITOFF_CAL(sizeof(type), off);			\
 48	p -= off;						\
 49	old <<= bitoff;						\
 50	new <<= bitoff;						\
 51	prev_mask = (u32)(type)-1 << bitoff;			\
 52								\
 53	__asm__ __volatile__(					\
 54	br							\
 55"1:	lwarx   %0,0,%3\n"					\
 56"	and	%1,%0,%6\n"					\
 57"	cmpw	0,%1,%4\n"					\
 58"	bne-	2f\n"						\
 59"	andc	%1,%0,%6\n"					\
 60"	or	%1,%1,%5\n"					\
 61"	stwcx.  %1,0,%3\n"					\
 62"	bne-    1b\n"						\
 63	br2							\
 64	"\n"							\
 65"2:"								\
 66	: "=&r" (prev), "=&r" (tmp), "+m" (*(u32*)p)		\
 67	: "r" (p), "r" (old), "r" (new), "r" (prev_mask)	\
 68	: "cc", cl);						\
 69								\
 70	return prev >> bitoff;					\
 71}
 72
 73/*
 74 * Atomic exchange
 75 *
 76 * Changes the memory location '*p' to be val and returns
 77 * the previous value stored there.
 78 */
 79
 80#ifndef CONFIG_PPC_HAS_LBARX_LHARX
 81XCHG_GEN(u8, _local, "memory");
 82XCHG_GEN(u8, _relaxed, "cc");
 83XCHG_GEN(u16, _local, "memory");
 84XCHG_GEN(u16, _relaxed, "cc");
 85#else
 86static __always_inline unsigned long
 87__xchg_u8_local(volatile void *p, unsigned long val)
 88{
 89	unsigned long prev;
 90
 91	__asm__ __volatile__(
 92"1:	lbarx	%0,0,%2		# __xchg_u8_local\n"
 93"	stbcx.	%3,0,%2 \n"
 94"	bne-	1b"
 95	: "=&r" (prev), "+m" (*(volatile unsigned char *)p)
 96	: "r" (p), "r" (val)
 97	: "cc", "memory");
 98
 99	return prev;
100}
101
102static __always_inline unsigned long
103__xchg_u8_relaxed(u8 *p, unsigned long val)
104{
105	unsigned long prev;
106
107	__asm__ __volatile__(
108"1:	lbarx	%0,0,%2		# __xchg_u8_relaxed\n"
109"	stbcx.	%3,0,%2\n"
110"	bne-	1b"
111	: "=&r" (prev), "+m" (*p)
112	: "r" (p), "r" (val)
113	: "cc");
114
115	return prev;
116}
117
118static __always_inline unsigned long
119__xchg_u16_local(volatile void *p, unsigned long val)
120{
121	unsigned long prev;
122
123	__asm__ __volatile__(
124"1:	lharx	%0,0,%2		# __xchg_u16_local\n"
125"	sthcx.	%3,0,%2\n"
126"	bne-	1b"
127	: "=&r" (prev), "+m" (*(volatile unsigned short *)p)
128	: "r" (p), "r" (val)
129	: "cc", "memory");
130
131	return prev;
132}
133
134static __always_inline unsigned long
135__xchg_u16_relaxed(u16 *p, unsigned long val)
136{
137	unsigned long prev;
138
139	__asm__ __volatile__(
140"1:	lharx	%0,0,%2		# __xchg_u16_relaxed\n"
141"	sthcx.	%3,0,%2\n"
142"	bne-	1b"
143	: "=&r" (prev), "+m" (*p)
144	: "r" (p), "r" (val)
145	: "cc");
146
147	return prev;
148}
149#endif
150
151static __always_inline unsigned long
152__xchg_u32_local(volatile void *p, unsigned long val)
153{
154	unsigned long prev;
155
156	__asm__ __volatile__(
157"1:	lwarx	%0,0,%2 \n"
 
158"	stwcx.	%3,0,%2 \n\
159	bne-	1b"
160	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
161	: "r" (p), "r" (val)
162	: "cc", "memory");
163
164	return prev;
165}
166
167static __always_inline unsigned long
168__xchg_u32_relaxed(u32 *p, unsigned long val)
169{
170	unsigned long prev;
171
172	__asm__ __volatile__(
173"1:	lwarx	%0,0,%2\n"
174"	stwcx.	%3,0,%2\n"
175"	bne-	1b"
176	: "=&r" (prev), "+m" (*p)
177	: "r" (p), "r" (val)
178	: "cc");
179
180	return prev;
181}
182
183#ifdef CONFIG_PPC64
184static __always_inline unsigned long
185__xchg_u64_local(volatile void *p, unsigned long val)
186{
187	unsigned long prev;
188
189	__asm__ __volatile__(
 
190"1:	ldarx	%0,0,%2 \n"
 
191"	stdcx.	%3,0,%2 \n\
192	bne-	1b"
 
193	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
194	: "r" (p), "r" (val)
195	: "cc", "memory");
196
197	return prev;
198}
199
200static __always_inline unsigned long
201__xchg_u64_relaxed(u64 *p, unsigned long val)
202{
203	unsigned long prev;
204
205	__asm__ __volatile__(
206"1:	ldarx	%0,0,%2\n"
207"	stdcx.	%3,0,%2\n"
208"	bne-	1b"
209	: "=&r" (prev), "+m" (*p)
 
210	: "r" (p), "r" (val)
211	: "cc");
212
213	return prev;
214}
215#endif
216
 
 
 
 
 
 
217static __always_inline unsigned long
218__xchg_local(void *ptr, unsigned long x, unsigned int size)
219{
220	switch (size) {
221	case 1:
222		return __xchg_u8_local(ptr, x);
223	case 2:
224		return __xchg_u16_local(ptr, x);
225	case 4:
226		return __xchg_u32_local(ptr, x);
227#ifdef CONFIG_PPC64
228	case 8:
229		return __xchg_u64_local(ptr, x);
230#endif
231	}
232	BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg");
233	return x;
234}
235
236static __always_inline unsigned long
237__xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
238{
239	switch (size) {
240	case 1:
241		return __xchg_u8_relaxed(ptr, x);
242	case 2:
243		return __xchg_u16_relaxed(ptr, x);
244	case 4:
245		return __xchg_u32_relaxed(ptr, x);
246#ifdef CONFIG_PPC64
247	case 8:
248		return __xchg_u64_relaxed(ptr, x);
249#endif
250	}
251	BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
252	return x;
253}
254#define arch_xchg_local(ptr,x)						     \
 
 
 
 
 
 
255  ({									     \
256     __typeof__(*(ptr)) _x_ = (x);					     \
257     (__typeof__(*(ptr))) __xchg_local((ptr),				     \
258     		(unsigned long)_x_, sizeof(*(ptr))); 			     \
259  })
260
261#define arch_xchg_relaxed(ptr, x)					\
262({									\
263	__typeof__(*(ptr)) _x_ = (x);					\
264	(__typeof__(*(ptr))) __xchg_relaxed((ptr),			\
265			(unsigned long)_x_, sizeof(*(ptr)));		\
266})
267
268/*
269 * Compare and exchange - if *p == old, set it to new,
270 * and return the old value of *p.
271 */
272#ifndef CONFIG_PPC_HAS_LBARX_LHARX
273CMPXCHG_GEN(u8, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
274CMPXCHG_GEN(u8, _local, , , "memory");
275CMPXCHG_GEN(u8, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
276CMPXCHG_GEN(u8, _relaxed, , , "cc");
277CMPXCHG_GEN(u16, , PPC_ATOMIC_ENTRY_BARRIER, PPC_ATOMIC_EXIT_BARRIER, "memory");
278CMPXCHG_GEN(u16, _local, , , "memory");
279CMPXCHG_GEN(u16, _acquire, , PPC_ACQUIRE_BARRIER, "memory");
280CMPXCHG_GEN(u16, _relaxed, , , "cc");
281#else
282static __always_inline unsigned long
283__cmpxchg_u8(volatile unsigned char *p, unsigned long old, unsigned long new)
284{
285	unsigned int prev;
286
287	__asm__ __volatile__ (
288	PPC_ATOMIC_ENTRY_BARRIER
289"1:	lbarx	%0,0,%2		# __cmpxchg_u8\n"
290"	cmpw	0,%0,%3\n"
291"	bne-	2f\n"
292"	stbcx.	%4,0,%2\n"
293"	bne-	1b"
294	PPC_ATOMIC_EXIT_BARRIER
295	"\n\
2962:"
297	: "=&r" (prev), "+m" (*p)
298	: "r" (p), "r" (old), "r" (new)
299	: "cc", "memory");
300
301	return prev;
302}
303
304static __always_inline unsigned long
305__cmpxchg_u8_local(volatile unsigned char *p, unsigned long old,
306			unsigned long new)
307{
308	unsigned int prev;
309
310	__asm__ __volatile__ (
311"1:	lbarx	%0,0,%2		# __cmpxchg_u8_local\n"
312"	cmpw	0,%0,%3\n"
313"	bne-	2f\n"
314"	stbcx.	%4,0,%2\n"
315"	bne-	1b\n"
316"2:"
317	: "=&r" (prev), "+m" (*p)
318	: "r" (p), "r" (old), "r" (new)
319	: "cc", "memory");
320
321	return prev;
322}
323
324static __always_inline unsigned long
325__cmpxchg_u8_relaxed(u8 *p, unsigned long old, unsigned long new)
326{
327	unsigned long prev;
328
329	__asm__ __volatile__ (
330"1:	lbarx	%0,0,%2		# __cmpxchg_u8_relaxed\n"
331"	cmpw	0,%0,%3\n"
332"	bne-	2f\n"
333"	stbcx.	%4,0,%2\n"
334"	bne-	1b\n"
335"2:"
336	: "=&r" (prev), "+m" (*p)
337	: "r" (p), "r" (old), "r" (new)
338	: "cc");
339
340	return prev;
341}
342
343static __always_inline unsigned long
344__cmpxchg_u8_acquire(u8 *p, unsigned long old, unsigned long new)
345{
346	unsigned long prev;
347
348	__asm__ __volatile__ (
349"1:	lbarx	%0,0,%2		# __cmpxchg_u8_acquire\n"
350"	cmpw	0,%0,%3\n"
351"	bne-	2f\n"
352"	stbcx.	%4,0,%2\n"
353"	bne-	1b\n"
354	PPC_ACQUIRE_BARRIER
355"2:"
356	: "=&r" (prev), "+m" (*p)
357	: "r" (p), "r" (old), "r" (new)
358	: "cc", "memory");
359
360	return prev;
361}
362
363static __always_inline unsigned long
364__cmpxchg_u16(volatile unsigned short *p, unsigned long old, unsigned long new)
365{
366	unsigned int prev;
367
368	__asm__ __volatile__ (
369	PPC_ATOMIC_ENTRY_BARRIER
370"1:	lharx	%0,0,%2		# __cmpxchg_u16\n"
371"	cmpw	0,%0,%3\n"
372"	bne-	2f\n"
373"	sthcx.	%4,0,%2\n"
374"	bne-	1b\n"
375	PPC_ATOMIC_EXIT_BARRIER
376"2:"
377	: "=&r" (prev), "+m" (*p)
378	: "r" (p), "r" (old), "r" (new)
379	: "cc", "memory");
380
381	return prev;
382}
383
384static __always_inline unsigned long
385__cmpxchg_u16_local(volatile unsigned short *p, unsigned long old,
386			unsigned long new)
387{
388	unsigned int prev;
389
390	__asm__ __volatile__ (
391"1:	lharx	%0,0,%2		# __cmpxchg_u16_local\n"
392"	cmpw	0,%0,%3\n"
393"	bne-	2f\n"
394"	sthcx.	%4,0,%2\n"
395"	bne-	1b"
396"2:"
397	: "=&r" (prev), "+m" (*p)
398	: "r" (p), "r" (old), "r" (new)
399	: "cc", "memory");
400
401	return prev;
402}
403
404static __always_inline unsigned long
405__cmpxchg_u16_relaxed(u16 *p, unsigned long old, unsigned long new)
406{
407	unsigned long prev;
408
409	__asm__ __volatile__ (
410"1:	lharx	%0,0,%2		# __cmpxchg_u16_relaxed\n"
411"	cmpw	0,%0,%3\n"
412"	bne-	2f\n"
413"	sthcx.	%4,0,%2\n"
414"	bne-	1b\n"
415"2:"
416	: "=&r" (prev), "+m" (*p)
417	: "r" (p), "r" (old), "r" (new)
418	: "cc");
419
420	return prev;
421}
422
423static __always_inline unsigned long
424__cmpxchg_u16_acquire(u16 *p, unsigned long old, unsigned long new)
425{
426	unsigned long prev;
427
428	__asm__ __volatile__ (
429"1:	lharx	%0,0,%2		# __cmpxchg_u16_acquire\n"
430"	cmpw	0,%0,%3\n"
431"	bne-	2f\n"
432"	sthcx.	%4,0,%2\n"
433"	bne-	1b\n"
434	PPC_ACQUIRE_BARRIER
435"2:"
436	: "=&r" (prev), "+m" (*p)
437	: "r" (p), "r" (old), "r" (new)
438	: "cc", "memory");
439
440	return prev;
441}
442#endif
443
444static __always_inline unsigned long
445__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
446{
447	unsigned int prev;
448
449	__asm__ __volatile__ (
450	PPC_ATOMIC_ENTRY_BARRIER
451"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
452	cmpw	0,%0,%3\n\
453	bne-	2f\n"
 
454"	stwcx.	%4,0,%2\n\
455	bne-	1b"
456	PPC_ATOMIC_EXIT_BARRIER
457	"\n\
4582:"
459	: "=&r" (prev), "+m" (*p)
460	: "r" (p), "r" (old), "r" (new)
461	: "cc", "memory");
462
463	return prev;
464}
465
466static __always_inline unsigned long
467__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
468			unsigned long new)
469{
470	unsigned int prev;
471
472	__asm__ __volatile__ (
473"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
474	cmpw	0,%0,%3\n\
475	bne-	2f\n"
 
476"	stwcx.	%4,0,%2\n\
477	bne-	1b"
478	"\n\
4792:"
480	: "=&r" (prev), "+m" (*p)
481	: "r" (p), "r" (old), "r" (new)
482	: "cc", "memory");
483
484	return prev;
485}
486
487static __always_inline unsigned long
488__cmpxchg_u32_relaxed(u32 *p, unsigned long old, unsigned long new)
489{
490	unsigned long prev;
491
492	__asm__ __volatile__ (
493"1:	lwarx	%0,0,%2		# __cmpxchg_u32_relaxed\n"
494"	cmpw	0,%0,%3\n"
495"	bne-	2f\n"
496"	stwcx.	%4,0,%2\n"
497"	bne-	1b\n"
498"2:"
499	: "=&r" (prev), "+m" (*p)
500	: "r" (p), "r" (old), "r" (new)
501	: "cc");
502
503	return prev;
504}
505
506/*
507 * cmpxchg family don't have order guarantee if cmp part fails, therefore we
508 * can avoid superfluous barriers if we use assembly code to implement
509 * cmpxchg() and cmpxchg_acquire(), however we don't do the similar for
510 * cmpxchg_release() because that will result in putting a barrier in the
511 * middle of a ll/sc loop, which is probably a bad idea. For example, this
512 * might cause the conditional store more likely to fail.
513 */
514static __always_inline unsigned long
515__cmpxchg_u32_acquire(u32 *p, unsigned long old, unsigned long new)
516{
517	unsigned long prev;
518
519	__asm__ __volatile__ (
520"1:	lwarx	%0,0,%2		# __cmpxchg_u32_acquire\n"
521"	cmpw	0,%0,%3\n"
522"	bne-	2f\n"
523"	stwcx.	%4,0,%2\n"
524"	bne-	1b\n"
525	PPC_ACQUIRE_BARRIER
526	"\n"
527"2:"
528	: "=&r" (prev), "+m" (*p)
529	: "r" (p), "r" (old), "r" (new)
530	: "cc", "memory");
531
532	return prev;
533}
534
535#ifdef CONFIG_PPC64
536static __always_inline unsigned long
537__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
538{
539	unsigned long prev;
540
541	__asm__ __volatile__ (
542	PPC_ATOMIC_ENTRY_BARRIER
543"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
544	cmpd	0,%0,%3\n\
545	bne-	2f\n\
546	stdcx.	%4,0,%2\n\
547	bne-	1b"
548	PPC_ATOMIC_EXIT_BARRIER
549	"\n\
5502:"
551	: "=&r" (prev), "+m" (*p)
552	: "r" (p), "r" (old), "r" (new)
553	: "cc", "memory");
554
555	return prev;
556}
557
558static __always_inline unsigned long
559__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
560			unsigned long new)
561{
562	unsigned long prev;
563
564	__asm__ __volatile__ (
565"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
566	cmpd	0,%0,%3\n\
567	bne-	2f\n\
568	stdcx.	%4,0,%2\n\
569	bne-	1b"
570	"\n\
5712:"
572	: "=&r" (prev), "+m" (*p)
573	: "r" (p), "r" (old), "r" (new)
574	: "cc", "memory");
575
576	return prev;
577}
578
579static __always_inline unsigned long
580__cmpxchg_u64_relaxed(u64 *p, unsigned long old, unsigned long new)
581{
582	unsigned long prev;
583
584	__asm__ __volatile__ (
585"1:	ldarx	%0,0,%2		# __cmpxchg_u64_relaxed\n"
586"	cmpd	0,%0,%3\n"
587"	bne-	2f\n"
588"	stdcx.	%4,0,%2\n"
589"	bne-	1b\n"
590"2:"
591	: "=&r" (prev), "+m" (*p)
592	: "r" (p), "r" (old), "r" (new)
593	: "cc");
594
595	return prev;
596}
597
598static __always_inline unsigned long
599__cmpxchg_u64_acquire(u64 *p, unsigned long old, unsigned long new)
600{
601	unsigned long prev;
602
603	__asm__ __volatile__ (
604"1:	ldarx	%0,0,%2		# __cmpxchg_u64_acquire\n"
605"	cmpd	0,%0,%3\n"
606"	bne-	2f\n"
607"	stdcx.	%4,0,%2\n"
608"	bne-	1b\n"
609	PPC_ACQUIRE_BARRIER
610	"\n"
611"2:"
612	: "=&r" (prev), "+m" (*p)
613	: "r" (p), "r" (old), "r" (new)
614	: "cc", "memory");
615
616	return prev;
617}
618#endif
619
 
 
 
 
620static __always_inline unsigned long
621__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
622	  unsigned int size)
623{
624	switch (size) {
625	case 1:
626		return __cmpxchg_u8(ptr, old, new);
627	case 2:
628		return __cmpxchg_u16(ptr, old, new);
629	case 4:
630		return __cmpxchg_u32(ptr, old, new);
631#ifdef CONFIG_PPC64
632	case 8:
633		return __cmpxchg_u64(ptr, old, new);
634#endif
635	}
636	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg");
637	return old;
638}
639
640static __always_inline unsigned long
641__cmpxchg_local(void *ptr, unsigned long old, unsigned long new,
642	  unsigned int size)
643{
644	switch (size) {
645	case 1:
646		return __cmpxchg_u8_local(ptr, old, new);
647	case 2:
648		return __cmpxchg_u16_local(ptr, old, new);
649	case 4:
650		return __cmpxchg_u32_local(ptr, old, new);
651#ifdef CONFIG_PPC64
652	case 8:
653		return __cmpxchg_u64_local(ptr, old, new);
654#endif
655	}
656	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_local");
657	return old;
658}
659
660static __always_inline unsigned long
661__cmpxchg_relaxed(void *ptr, unsigned long old, unsigned long new,
662		  unsigned int size)
663{
664	switch (size) {
665	case 1:
666		return __cmpxchg_u8_relaxed(ptr, old, new);
667	case 2:
668		return __cmpxchg_u16_relaxed(ptr, old, new);
669	case 4:
670		return __cmpxchg_u32_relaxed(ptr, old, new);
671#ifdef CONFIG_PPC64
672	case 8:
673		return __cmpxchg_u64_relaxed(ptr, old, new);
674#endif
675	}
676	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_relaxed");
677	return old;
678}
679
680static __always_inline unsigned long
681__cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
682		  unsigned int size)
683{
684	switch (size) {
685	case 1:
686		return __cmpxchg_u8_acquire(ptr, old, new);
687	case 2:
688		return __cmpxchg_u16_acquire(ptr, old, new);
689	case 4:
690		return __cmpxchg_u32_acquire(ptr, old, new);
691#ifdef CONFIG_PPC64
692	case 8:
693		return __cmpxchg_u64_acquire(ptr, old, new);
694#endif
695	}
696	BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
697	return old;
698}
699#define arch_cmpxchg(ptr, o, n)						 \
700  ({									 \
701     __typeof__(*(ptr)) _o_ = (o);					 \
702     __typeof__(*(ptr)) _n_ = (n);					 \
703     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
704				    (unsigned long)_n_, sizeof(*(ptr))); \
705  })
706
707
708#define arch_cmpxchg_local(ptr, o, n)					 \
709  ({									 \
710     __typeof__(*(ptr)) _o_ = (o);					 \
711     __typeof__(*(ptr)) _n_ = (n);					 \
712     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	 \
713				    (unsigned long)_n_, sizeof(*(ptr))); \
714  })
715
716#define arch_cmpxchg_relaxed(ptr, o, n)					\
717({									\
718	__typeof__(*(ptr)) _o_ = (o);					\
719	__typeof__(*(ptr)) _n_ = (n);					\
720	(__typeof__(*(ptr))) __cmpxchg_relaxed((ptr),			\
721			(unsigned long)_o_, (unsigned long)_n_,		\
722			sizeof(*(ptr)));				\
723})
724
725#define arch_cmpxchg_acquire(ptr, o, n)					\
726({									\
727	__typeof__(*(ptr)) _o_ = (o);					\
728	__typeof__(*(ptr)) _n_ = (n);					\
729	(__typeof__(*(ptr))) __cmpxchg_acquire((ptr),			\
730			(unsigned long)_o_, (unsigned long)_n_,		\
731			sizeof(*(ptr)));				\
732})
733#ifdef CONFIG_PPC64
734#define arch_cmpxchg64(ptr, o, n)					\
735  ({									\
736	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
737	arch_cmpxchg((ptr), (o), (n));					\
738  })
739#define arch_cmpxchg64_local(ptr, o, n)					\
740  ({									\
741	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
742	arch_cmpxchg_local((ptr), (o), (n));				\
743  })
744#define arch_cmpxchg64_relaxed(ptr, o, n)				\
745({									\
746	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
747	arch_cmpxchg_relaxed((ptr), (o), (n));				\
748})
749#define arch_cmpxchg64_acquire(ptr, o, n)				\
750({									\
751	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
752	arch_cmpxchg_acquire((ptr), (o), (n));				\
753})
754#else
755#include <asm-generic/cmpxchg-local.h>
756#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
757#endif
758
759#endif /* __KERNEL__ */
760#endif /* _ASM_POWERPC_CMPXCHG_H_ */
v3.5.6
 
  1#ifndef _ASM_POWERPC_CMPXCHG_H_
  2#define _ASM_POWERPC_CMPXCHG_H_
  3
  4#ifdef __KERNEL__
  5#include <linux/compiler.h>
  6#include <asm/synch.h>
  7#include <asm/asm-compat.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8
  9/*
 10 * Atomic exchange
 11 *
 12 * Changes the memory location '*ptr' to be val and returns
 13 * the previous value stored there.
 14 */
 
 
 
 
 
 
 
 15static __always_inline unsigned long
 16__xchg_u32(volatile void *p, unsigned long val)
 17{
 18	unsigned long prev;
 19
 20	__asm__ __volatile__(
 21	PPC_RELEASE_BARRIER
 22"1:	lwarx	%0,0,%2 \n"
 23	PPC405_ERR77(0,%2)
 24"	stwcx.	%3,0,%2 \n\
 25	bne-	1b"
 26	PPC_ACQUIRE_BARRIER
 27	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28	: "r" (p), "r" (val)
 29	: "cc", "memory");
 30
 31	return prev;
 32}
 33
 34/*
 35 * Atomic exchange
 36 *
 37 * Changes the memory location '*ptr' to be val and returns
 38 * the previous value stored there.
 39 */
 
 
 
 
 
 
 
 
 
 
 
 40static __always_inline unsigned long
 41__xchg_u32_local(volatile void *p, unsigned long val)
 42{
 43	unsigned long prev;
 44
 45	__asm__ __volatile__(
 46"1:	lwarx	%0,0,%2 \n"
 47	PPC405_ERR77(0,%2)
 48"	stwcx.	%3,0,%2 \n\
 49	bne-	1b"
 50	: "=&r" (prev), "+m" (*(volatile unsigned int *)p)
 51	: "r" (p), "r" (val)
 52	: "cc", "memory");
 53
 54	return prev;
 55}
 56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57#ifdef CONFIG_PPC64
 58static __always_inline unsigned long
 59__xchg_u64(volatile void *p, unsigned long val)
 60{
 61	unsigned long prev;
 62
 63	__asm__ __volatile__(
 64	PPC_RELEASE_BARRIER
 65"1:	ldarx	%0,0,%2 \n"
 66	PPC405_ERR77(0,%2)
 67"	stdcx.	%3,0,%2 \n\
 68	bne-	1b"
 69	PPC_ACQUIRE_BARRIER
 70	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
 71	: "r" (p), "r" (val)
 72	: "cc", "memory");
 73
 74	return prev;
 75}
 76
 77static __always_inline unsigned long
 78__xchg_u64_local(volatile void *p, unsigned long val)
 79{
 80	unsigned long prev;
 81
 82	__asm__ __volatile__(
 83"1:	ldarx	%0,0,%2 \n"
 84	PPC405_ERR77(0,%2)
 85"	stdcx.	%3,0,%2 \n\
 86	bne-	1b"
 87	: "=&r" (prev), "+m" (*(volatile unsigned long *)p)
 88	: "r" (p), "r" (val)
 89	: "cc", "memory");
 90
 91	return prev;
 92}
 93#endif
 94
 95/*
 96 * This function doesn't exist, so you'll get a linker error
 97 * if something tries to do an invalid xchg().
 98 */
 99extern void __xchg_called_with_bad_pointer(void);
100
101static __always_inline unsigned long
102__xchg(volatile void *ptr, unsigned long x, unsigned int size)
103{
104	switch (size) {
 
 
 
 
105	case 4:
106		return __xchg_u32(ptr, x);
107#ifdef CONFIG_PPC64
108	case 8:
109		return __xchg_u64(ptr, x);
110#endif
111	}
112	__xchg_called_with_bad_pointer();
113	return x;
114}
115
116static __always_inline unsigned long
117__xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
118{
119	switch (size) {
 
 
 
 
120	case 4:
121		return __xchg_u32_local(ptr, x);
122#ifdef CONFIG_PPC64
123	case 8:
124		return __xchg_u64_local(ptr, x);
125#endif
126	}
127	__xchg_called_with_bad_pointer();
128	return x;
129}
130#define xchg(ptr,x)							     \
131  ({									     \
132     __typeof__(*(ptr)) _x_ = (x);					     \
133     (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
134  })
135
136#define xchg_local(ptr,x)						     \
137  ({									     \
138     __typeof__(*(ptr)) _x_ = (x);					     \
139     (__typeof__(*(ptr))) __xchg_local((ptr),				     \
140     		(unsigned long)_x_, sizeof(*(ptr))); 			     \
141  })
142
 
 
 
 
 
 
 
143/*
144 * Compare and exchange - if *p == old, set it to new,
145 * and return the old value of *p.
146 */
147#define __HAVE_ARCH_CMPXCHG	1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148
149static __always_inline unsigned long
150__cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
151{
152	unsigned int prev;
153
154	__asm__ __volatile__ (
155	PPC_RELEASE_BARRIER
156"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
157	cmpw	0,%0,%3\n\
158	bne-	2f\n"
159	PPC405_ERR77(0,%2)
160"	stwcx.	%4,0,%2\n\
161	bne-	1b"
162	PPC_ACQUIRE_BARRIER
163	"\n\
1642:"
165	: "=&r" (prev), "+m" (*p)
166	: "r" (p), "r" (old), "r" (new)
167	: "cc", "memory");
168
169	return prev;
170}
171
172static __always_inline unsigned long
173__cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
174			unsigned long new)
175{
176	unsigned int prev;
177
178	__asm__ __volatile__ (
179"1:	lwarx	%0,0,%2		# __cmpxchg_u32\n\
180	cmpw	0,%0,%3\n\
181	bne-	2f\n"
182	PPC405_ERR77(0,%2)
183"	stwcx.	%4,0,%2\n\
184	bne-	1b"
185	"\n\
1862:"
187	: "=&r" (prev), "+m" (*p)
188	: "r" (p), "r" (old), "r" (new)
189	: "cc", "memory");
190
191	return prev;
192}
193
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194#ifdef CONFIG_PPC64
195static __always_inline unsigned long
196__cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
197{
198	unsigned long prev;
199
200	__asm__ __volatile__ (
201	PPC_RELEASE_BARRIER
202"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
203	cmpd	0,%0,%3\n\
204	bne-	2f\n\
205	stdcx.	%4,0,%2\n\
206	bne-	1b"
207	PPC_ACQUIRE_BARRIER
208	"\n\
2092:"
210	: "=&r" (prev), "+m" (*p)
211	: "r" (p), "r" (old), "r" (new)
212	: "cc", "memory");
213
214	return prev;
215}
216
217static __always_inline unsigned long
218__cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
219			unsigned long new)
220{
221	unsigned long prev;
222
223	__asm__ __volatile__ (
224"1:	ldarx	%0,0,%2		# __cmpxchg_u64\n\
225	cmpd	0,%0,%3\n\
226	bne-	2f\n\
227	stdcx.	%4,0,%2\n\
228	bne-	1b"
229	"\n\
2302:"
231	: "=&r" (prev), "+m" (*p)
232	: "r" (p), "r" (old), "r" (new)
233	: "cc", "memory");
234
235	return prev;
236}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237#endif
238
239/* This function doesn't exist, so you'll get a linker error
240   if something tries to do an invalid cmpxchg().  */
241extern void __cmpxchg_called_with_bad_pointer(void);
242
243static __always_inline unsigned long
244__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
245	  unsigned int size)
246{
247	switch (size) {
 
 
 
 
248	case 4:
249		return __cmpxchg_u32(ptr, old, new);
250#ifdef CONFIG_PPC64
251	case 8:
252		return __cmpxchg_u64(ptr, old, new);
253#endif
254	}
255	__cmpxchg_called_with_bad_pointer();
256	return old;
257}
258
259static __always_inline unsigned long
260__cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
261	  unsigned int size)
262{
263	switch (size) {
 
 
 
 
264	case 4:
265		return __cmpxchg_u32_local(ptr, old, new);
266#ifdef CONFIG_PPC64
267	case 8:
268		return __cmpxchg_u64_local(ptr, old, new);
269#endif
270	}
271	__cmpxchg_called_with_bad_pointer();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272	return old;
273}
274
275#define cmpxchg(ptr, o, n)						 \
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276  ({									 \
277     __typeof__(*(ptr)) _o_ = (o);					 \
278     __typeof__(*(ptr)) _n_ = (n);					 \
279     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
280				    (unsigned long)_n_, sizeof(*(ptr))); \
281  })
282
283
284#define cmpxchg_local(ptr, o, n)					 \
285  ({									 \
286     __typeof__(*(ptr)) _o_ = (o);					 \
287     __typeof__(*(ptr)) _n_ = (n);					 \
288     (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_,	 \
289				    (unsigned long)_n_, sizeof(*(ptr))); \
290  })
291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292#ifdef CONFIG_PPC64
293#define cmpxchg64(ptr, o, n)						\
294  ({									\
295	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
296	cmpxchg((ptr), (o), (n));					\
297  })
298#define cmpxchg64_local(ptr, o, n)					\
299  ({									\
300	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
301	cmpxchg_local((ptr), (o), (n));					\
302  })
 
 
 
 
 
 
 
 
 
 
303#else
304#include <asm-generic/cmpxchg-local.h>
305#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
306#endif
307
308#endif /* __KERNEL__ */
309#endif /* _ASM_POWERPC_CMPXCHG_H_ */