Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* 64-bit atomic xchg() and cmpxchg() definitions.
  3 *
  4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
  5 */
  6
  7#ifndef __ARCH_SPARC64_CMPXCHG__
  8#define __ARCH_SPARC64_CMPXCHG__
  9
 10static inline unsigned long
 11__cmpxchg_u32(volatile int *m, int old, int new)
 12{
 13	__asm__ __volatile__("cas [%2], %3, %0"
 14			     : "=&r" (new)
 15			     : "0" (new), "r" (m), "r" (old)
 16			     : "memory");
 17
 18	return new;
 19}
 20
 21static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 22{
 23	unsigned long tmp1, tmp2;
 24
 25	__asm__ __volatile__(
 26"	mov		%0, %1\n"
 27"1:	lduw		[%4], %2\n"
 28"	cas		[%4], %2, %0\n"
 29"	cmp		%2, %0\n"
 30"	bne,a,pn	%%icc, 1b\n"
 31"	 mov		%1, %0\n"
 32	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
 33	: "0" (val), "r" (m)
 34	: "cc", "memory");
 35	return val;
 36}
 37
 38static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
 39{
 40	unsigned long tmp1, tmp2;
 41
 42	__asm__ __volatile__(
 43"	mov		%0, %1\n"
 44"1:	ldx		[%4], %2\n"
 45"	casx		[%4], %2, %0\n"
 46"	cmp		%2, %0\n"
 47"	bne,a,pn	%%xcc, 1b\n"
 48"	 mov		%1, %0\n"
 49	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
 50	: "0" (val), "r" (m)
 51	: "cc", "memory");
 52	return val;
 53}
 54
 55#define xchg(ptr,x)							\
 56({	__typeof__(*(ptr)) __ret;					\
 57	__ret = (__typeof__(*(ptr)))					\
 58		__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)));	\
 59	__ret;								\
 60})
 61
 62void __xchg_called_with_bad_pointer(void);
 63
 64/*
 65 * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
 66 * here is to get the bit shift of the byte we are interested in.
 67 * The XOR is handy for reversing the bits for big-endian byte order.
 68 */
 69static inline unsigned long
 70xchg16(__volatile__ unsigned short *m, unsigned short val)
 71{
 72	unsigned long maddr = (unsigned long)m;
 73	int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
 74	unsigned int mask = 0xffff << bit_shift;
 75	unsigned int *ptr = (unsigned int  *) (maddr & ~2);
 76	unsigned int old32, new32, load32;
 77
 78	/* Read the old value */
 79	load32 = *ptr;
 80
 81	do {
 82		old32 = load32;
 83		new32 = (load32 & (~mask)) | val << bit_shift;
 84		load32 = __cmpxchg_u32(ptr, old32, new32);
 85	} while (load32 != old32);
 86
 87	return (load32 & mask) >> bit_shift;
 88}
 89
 90static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
 91				       int size)
 92{
 93	switch (size) {
 94	case 2:
 95		return xchg16(ptr, x);
 96	case 4:
 97		return xchg32(ptr, x);
 98	case 8:
 99		return xchg64(ptr, x);
100	}
101	__xchg_called_with_bad_pointer();
102	return x;
103}
104
105/*
106 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
107 * store NEW in MEM.  Return the initial value in MEM.  Success is
108 * indicated by comparing RETURN with OLD.
109 */
110
111#include <asm-generic/cmpxchg-local.h>
112
113
114static inline unsigned long
115__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
116{
117	__asm__ __volatile__("casx [%2], %3, %0"
118			     : "=&r" (new)
119			     : "0" (new), "r" (m), "r" (old)
120			     : "memory");
121
122	return new;
123}
124
125/*
126 * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
127 * here is to get the bit shift of the byte we are interested in.
128 * The XOR is handy for reversing the bits for big-endian byte order
129 */
130static inline unsigned long
131__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
132{
133	unsigned long maddr = (unsigned long)m;
134	int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
135	unsigned int mask = 0xff << bit_shift;
136	unsigned int *ptr = (unsigned int *) (maddr & ~3);
137	unsigned int old32, new32, load;
138	unsigned int load32 = *ptr;
139
140	do {
141		new32 = (load32 & ~mask) | (new << bit_shift);
142		old32 = (load32 & ~mask) | (old << bit_shift);
143		load32 = __cmpxchg_u32(ptr, old32, new32);
144		if (load32 == old32)
145			return old;
146		load = (load32 & mask) >> bit_shift;
147	} while (load == old);
148
149	return load;
150}
151
152/* This function doesn't exist, so you'll get a linker error
153   if something tries to do an invalid cmpxchg().  */
154void __cmpxchg_called_with_bad_pointer(void);
155
156static inline unsigned long
157__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
158{
159	switch (size) {
160		case 1:
161			return __cmpxchg_u8(ptr, old, new);
162		case 4:
163			return __cmpxchg_u32(ptr, old, new);
164		case 8:
165			return __cmpxchg_u64(ptr, old, new);
166	}
167	__cmpxchg_called_with_bad_pointer();
168	return old;
169}
170
171#define cmpxchg(ptr,o,n)						 \
172  ({									 \
173     __typeof__(*(ptr)) _o_ = (o);					 \
174     __typeof__(*(ptr)) _n_ = (n);					 \
175     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
176				    (unsigned long)_n_, sizeof(*(ptr))); \
177  })
178
179/*
180 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
181 * them available.
182 */
183
184static inline unsigned long __cmpxchg_local(volatile void *ptr,
185				      unsigned long old,
186				      unsigned long new, int size)
187{
188	switch (size) {
189	case 4:
190	case 8:	return __cmpxchg(ptr, old, new, size);
191	default:
192		return __cmpxchg_local_generic(ptr, old, new, size);
193	}
194
195	return old;
196}
197
198#define cmpxchg_local(ptr, o, n)				  	\
199	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
200			(unsigned long)(n), sizeof(*(ptr))))
201#define cmpxchg64_local(ptr, o, n)					\
202  ({									\
203	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
204	cmpxchg_local((ptr), (o), (n));					\
205  })
206#define cmpxchg64(ptr, o, n)	cmpxchg64_local((ptr), (o), (n))
207
208#endif /* __ARCH_SPARC64_CMPXCHG__ */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* 64-bit atomic xchg() and cmpxchg() definitions.
  3 *
  4 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
  5 */
  6
  7#ifndef __ARCH_SPARC64_CMPXCHG__
  8#define __ARCH_SPARC64_CMPXCHG__
  9
 10static inline unsigned long
 11__cmpxchg_u32(volatile int *m, int old, int new)
 12{
 13	__asm__ __volatile__("cas [%2], %3, %0"
 14			     : "=&r" (new)
 15			     : "0" (new), "r" (m), "r" (old)
 16			     : "memory");
 17
 18	return new;
 19}
 20
 21static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 22{
 23	unsigned long tmp1, tmp2;
 24
 25	__asm__ __volatile__(
 26"	mov		%0, %1\n"
 27"1:	lduw		[%4], %2\n"
 28"	cas		[%4], %2, %0\n"
 29"	cmp		%2, %0\n"
 30"	bne,a,pn	%%icc, 1b\n"
 31"	 mov		%1, %0\n"
 32	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
 33	: "0" (val), "r" (m)
 34	: "cc", "memory");
 35	return val;
 36}
 37
 38static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
 39{
 40	unsigned long tmp1, tmp2;
 41
 42	__asm__ __volatile__(
 43"	mov		%0, %1\n"
 44"1:	ldx		[%4], %2\n"
 45"	casx		[%4], %2, %0\n"
 46"	cmp		%2, %0\n"
 47"	bne,a,pn	%%xcc, 1b\n"
 48"	 mov		%1, %0\n"
 49	: "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
 50	: "0" (val), "r" (m)
 51	: "cc", "memory");
 52	return val;
 53}
 54
 55#define arch_xchg(ptr,x)							\
 56({	__typeof__(*(ptr)) __ret;					\
 57	__ret = (__typeof__(*(ptr)))					\
 58		__arch_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)));	\
 59	__ret;								\
 60})
 61
 62void __xchg_called_with_bad_pointer(void);
 63
 64/*
 65 * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
 66 * here is to get the bit shift of the byte we are interested in.
 67 * The XOR is handy for reversing the bits for big-endian byte order.
 68 */
 69static inline unsigned long
 70xchg16(__volatile__ unsigned short *m, unsigned short val)
 71{
 72	unsigned long maddr = (unsigned long)m;
 73	int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
 74	unsigned int mask = 0xffff << bit_shift;
 75	unsigned int *ptr = (unsigned int  *) (maddr & ~2);
 76	unsigned int old32, new32, load32;
 77
 78	/* Read the old value */
 79	load32 = *ptr;
 80
 81	do {
 82		old32 = load32;
 83		new32 = (load32 & (~mask)) | val << bit_shift;
 84		load32 = __cmpxchg_u32(ptr, old32, new32);
 85	} while (load32 != old32);
 86
 87	return (load32 & mask) >> bit_shift;
 88}
 89
 90static __always_inline unsigned long
 91__arch_xchg(unsigned long x, __volatile__ void * ptr, int size)
 92{
 93	switch (size) {
 94	case 2:
 95		return xchg16(ptr, x);
 96	case 4:
 97		return xchg32(ptr, x);
 98	case 8:
 99		return xchg64(ptr, x);
100	}
101	__xchg_called_with_bad_pointer();
102	return x;
103}
104
105/*
106 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
107 * store NEW in MEM.  Return the initial value in MEM.  Success is
108 * indicated by comparing RETURN with OLD.
109 */
110
111#include <asm-generic/cmpxchg-local.h>
112
113
114static inline unsigned long
115__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
116{
117	__asm__ __volatile__("casx [%2], %3, %0"
118			     : "=&r" (new)
119			     : "0" (new), "r" (m), "r" (old)
120			     : "memory");
121
122	return new;
123}
124
125/*
126 * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
127 * here is to get the bit shift of the byte we are interested in.
128 * The XOR is handy for reversing the bits for big-endian byte order
129 */
130static inline unsigned long
131__cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
132{
133	unsigned long maddr = (unsigned long)m;
134	int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
135	unsigned int mask = 0xff << bit_shift;
136	unsigned int *ptr = (unsigned int *) (maddr & ~3);
137	unsigned int old32, new32, load;
138	unsigned int load32 = *ptr;
139
140	do {
141		new32 = (load32 & ~mask) | (new << bit_shift);
142		old32 = (load32 & ~mask) | (old << bit_shift);
143		load32 = __cmpxchg_u32(ptr, old32, new32);
144		if (load32 == old32)
145			return old;
146		load = (load32 & mask) >> bit_shift;
147	} while (load == old);
148
149	return load;
150}
151
152/* This function doesn't exist, so you'll get a linker error
153   if something tries to do an invalid cmpxchg().  */
154void __cmpxchg_called_with_bad_pointer(void);
155
156static inline unsigned long
157__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
158{
159	switch (size) {
160		case 1:
161			return __cmpxchg_u8(ptr, old, new);
162		case 4:
163			return __cmpxchg_u32(ptr, old, new);
164		case 8:
165			return __cmpxchg_u64(ptr, old, new);
166	}
167	__cmpxchg_called_with_bad_pointer();
168	return old;
169}
170
171#define arch_cmpxchg(ptr,o,n)						 \
172  ({									 \
173     __typeof__(*(ptr)) _o_ = (o);					 \
174     __typeof__(*(ptr)) _n_ = (n);					 \
175     (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_,		 \
176				    (unsigned long)_n_, sizeof(*(ptr))); \
177  })
178
179/*
180 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
181 * them available.
182 */
183
184static inline unsigned long __cmpxchg_local(volatile void *ptr,
185				      unsigned long old,
186				      unsigned long new, int size)
187{
188	switch (size) {
189	case 4:
190	case 8:	return __cmpxchg(ptr, old, new, size);
191	default:
192		return __generic_cmpxchg_local(ptr, old, new, size);
193	}
194
195	return old;
196}
197
198#define arch_cmpxchg_local(ptr, o, n)				  	\
199	((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o),	\
200			(unsigned long)(n), sizeof(*(ptr))))
201#define arch_cmpxchg64_local(ptr, o, n)					\
202  ({									\
203	BUILD_BUG_ON(sizeof(*(ptr)) != 8);				\
204	arch_cmpxchg_local((ptr), (o), (n));					\
205  })
206#define arch_cmpxchg64(ptr, o, n)	arch_cmpxchg64_local((ptr), (o), (n))
207
208#endif /* __ARCH_SPARC64_CMPXCHG__ */