Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * SMP/VPE-safe functions to access "registers" (see note).
  3 *
  4 * NOTES:
  5* - These macros use ll/sc instructions, so it is your responsibility to
  6 * ensure these are available on your platform before including this file.
  7 * - The MIPS32 spec states that ll/sc results are undefined for uncached
  8 * accesses. This means they can't be used on HW registers accessed
  9 * through kseg1. Code which requires these macros for this purpose must
 10 * front-end the registers with cached memory "registers" and have a single
 11 * thread update the actual HW registers.
 12 * - A maximum of 2k of code can be inserted between ll and sc. Every
 13 * memory accesses between the instructions will increase the chance of
 14 * sc failing and having to loop.
 15 * - When using custom_read_reg32/custom_write_reg32 only perform the
 16 * necessary logical operations on the register value in between these
 17 * two calls. All other logic should be performed before the first call.
 18  * - There is a bug on the R10000 chips which has a workaround. If you
 19 * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
 20 * to be non-zero.  If you are using this header from within linux, you may
 21 * include <asm/war.h> before including this file to have this defined
 22 * appropriately for you.
 23 *
 24 * Copyright 2005-2007 PMC-Sierra, Inc.
 25 *
 26 *  This program is free software; you can redistribute  it and/or modify it
 27 *  under  the terms of  the GNU General  Public License as published by the
 28 *  Free Software Foundation;  either version 2 of the  License, or (at your
 29 *  option) any later version.
 30 *
 31 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 32 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 33 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
 34 *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 35 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 36 *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
 37 *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 38 *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 39 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 40 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41 *
 42 *  You should have received a copy of the  GNU General Public License along
 43 *  with this program; if not, write  to the Free Software Foundation, Inc., 675
 44 *  Mass Ave, Cambridge, MA 02139, USA.
 45 */
 46
 47#ifndef __ASM_REGOPS_H__
 48#define __ASM_REGOPS_H__
 49
 50#include <linux/types.h>
 51
 
 52#include <asm/war.h>
 53
 54#ifndef R10000_LLSC_WAR
 55#define R10000_LLSC_WAR 0
 56#endif
 57
 58#if R10000_LLSC_WAR == 1
 59#define __beqz	"beqzl	"
 60#else
 61#define __beqz	"beqz	"
 62#endif
 63
 64#ifndef _LINUX_TYPES_H
 65typedef unsigned int u32;
 66#endif
 67
 68/*
 69 * Sets all the masked bits to the corresponding value bits
 70 */
 71static inline void set_value_reg32(volatile u32 *const addr,
 72					u32 const mask,
 73					u32 const value)
 74{
 75	u32 temp;
 76
 77	__asm__ __volatile__(
 78	"	.set	push				\n"
 79	"	.set	arch=r4000			\n"
 80	"1:	ll	%0, %1	# set_value_reg32	\n"
 81	"	and	%0, %2				\n"
 82	"	or	%0, %3				\n"
 83	"	sc	%0, %1				\n"
 84	"	"__beqz"%0, 1b				\n"
 85	"	nop					\n"
 86	"	.set	pop				\n"
 87	: "=&r" (temp), "=m" (*addr)
 88	: "ir" (~mask), "ir" (value), "m" (*addr));
 89}
 90
 91/*
 92 * Sets all the masked bits to '1'
 93 */
 94static inline void set_reg32(volatile u32 *const addr,
 95				u32 const mask)
 96{
 97	u32 temp;
 98
 99	__asm__ __volatile__(
100	"	.set	push				\n"
101	"	.set	arch=r4000			\n"
102	"1:	ll	%0, %1		# set_reg32	\n"
103	"	or	%0, %2				\n"
104	"	sc	%0, %1				\n"
105	"	"__beqz"%0, 1b				\n"
106	"	nop					\n"
107	"	.set	pop				\n"
108	: "=&r" (temp), "=m" (*addr)
109	: "ir" (mask), "m" (*addr));
110}
111
112/*
113 * Sets all the masked bits to '0'
114 */
115static inline void clear_reg32(volatile u32 *const addr,
116				u32 const mask)
117{
118	u32 temp;
119
120	__asm__ __volatile__(
121	"	.set	push				\n"
122	"	.set	arch=r4000			\n"
123	"1:	ll	%0, %1		# clear_reg32	\n"
124	"	and	%0, %2				\n"
125	"	sc	%0, %1				\n"
126	"	"__beqz"%0, 1b				\n"
127	"	nop					\n"
128	"	.set	pop				\n"
129	: "=&r" (temp), "=m" (*addr)
130	: "ir" (~mask), "m" (*addr));
131}
132
133/*
134 * Toggles all masked bits from '0' to '1' and '1' to '0'
135 */
136static inline void toggle_reg32(volatile u32 *const addr,
137				u32 const mask)
138{
139	u32 temp;
140
141	__asm__ __volatile__(
142	"	.set	push				\n"
143	"	.set	arch=r4000			\n"
144	"1:	ll	%0, %1		# toggle_reg32	\n"
145	"	xor	%0, %2				\n"
146	"	sc	%0, %1				\n"
147	"	"__beqz"%0, 1b				\n"
148	"	nop					\n"
149	"	.set	pop				\n"
150	: "=&r" (temp), "=m" (*addr)
151	: "ir" (mask), "m" (*addr));
152}
153
154/*
155 * Read all masked bits others are returned as '0'
156 */
157static inline u32 read_reg32(volatile u32 *const addr,
158				u32 const mask)
159{
160	u32 temp;
161
162	__asm__ __volatile__(
163	"	.set	push				\n"
164	"	.set	noreorder			\n"
165	"	lw	%0, %1		# read		\n"
166	"	and	%0, %2		# mask		\n"
167	"	.set	pop				\n"
168	: "=&r" (temp)
169	: "m" (*addr), "ir" (mask));
170
171	return temp;
172}
173
174/*
175 * blocking_read_reg32 - Read address with blocking load
176 *
177 * Uncached writes need to be read back to ensure they reach RAM.
178 * The returned value must be 'used' to prevent from becoming a
179 * non-blocking load.
180 */
181static inline u32 blocking_read_reg32(volatile u32 *const addr)
182{
183	u32 temp;
184
185	__asm__ __volatile__(
186	"	.set	push				\n"
187	"	.set	noreorder			\n"
188	"	lw	%0, %1		# read		\n"
189	"	move	%0, %0		# block		\n"
190	"	.set	pop				\n"
191	: "=&r" (temp)
192	: "m" (*addr));
193
194	return temp;
195}
196
197/*
198 * For special strange cases only:
199 *
200 * If you need custom processing within a ll/sc loop, use the following macros
201 * VERY CAREFULLY:
202 *
203 *   u32 tmp;				<-- Define a variable to hold the data
204 *
205 *   custom_read_reg32(address, tmp);	<-- Reads the address and put the value
206 *						in the 'tmp' variable given
207 *
208 *	From here on out, you are (basically) atomic, so don't do anything too
209 *	fancy!
210 *	Also, this code may loop if the end of this block fails to write
211 *	everything back safely due do the other CPU, so do NOT do anything
212 *	with side-effects!
213 *
214 *   custom_write_reg32(address, tmp);	<-- Writes back 'tmp' safely.
215 */
216#define custom_read_reg32(address, tmp)				\
217	__asm__ __volatile__(					\
218	"	.set	push				\n"	\
219	"	.set	arch=r4000			\n"	\
220	"1:	ll	%0, %1	#custom_read_reg32	\n"	\
221	"	.set	pop				\n"	\
222	: "=r" (tmp), "=m" (*address)				\
223	: "m" (*address))
224
225#define custom_write_reg32(address, tmp)			\
226	__asm__ __volatile__(					\
227	"	.set	push				\n"	\
228	"	.set	arch=r4000			\n"	\
229	"	sc	%0, %1	#custom_write_reg32	\n"	\
230	"	"__beqz"%0, 1b				\n"	\
231	"	nop					\n"	\
232	"	.set	pop				\n"	\
233	: "=&r" (tmp), "=m" (*address)				\
234	: "0" (tmp), "m" (*address))
235
236#endif	/* __ASM_REGOPS_H__ */
v4.10.11
  1/*
  2 * SMP/VPE-safe functions to access "registers" (see note).
  3 *
  4 * NOTES:
  5* - These macros use ll/sc instructions, so it is your responsibility to
  6 * ensure these are available on your platform before including this file.
  7 * - The MIPS32 spec states that ll/sc results are undefined for uncached
  8 * accesses. This means they can't be used on HW registers accessed
  9 * through kseg1. Code which requires these macros for this purpose must
 10 * front-end the registers with cached memory "registers" and have a single
 11 * thread update the actual HW registers.
 12 * - A maximum of 2k of code can be inserted between ll and sc. Every
 13 * memory accesses between the instructions will increase the chance of
 14 * sc failing and having to loop.
 15 * - When using custom_read_reg32/custom_write_reg32 only perform the
 16 * necessary logical operations on the register value in between these
 17 * two calls. All other logic should be performed before the first call.
 18  * - There is a bug on the R10000 chips which has a workaround. If you
 19 * are affected by this bug, make sure to define the symbol 'R10000_LLSC_WAR'
 20 * to be non-zero.  If you are using this header from within linux, you may
 21 * include <asm/war.h> before including this file to have this defined
 22 * appropriately for you.
 23 *
 24 * Copyright 2005-2007 PMC-Sierra, Inc.
 25 *
 26 *  This program is free software; you can redistribute  it and/or modify it
 27 *  under  the terms of  the GNU General  Public License as published by the
 28 *  Free Software Foundation;  either version 2 of the  License, or (at your
 29 *  option) any later version.
 30 *
 31 *  THIS  SOFTWARE  IS PROVIDED   ``AS  IS'' AND   ANY  EXPRESS OR IMPLIED
 32 *  WARRANTIES,   INCLUDING, BUT NOT  LIMITED  TO, THE IMPLIED WARRANTIES OF
 33 *  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
 34 *  EVENT  SHALL   THE AUTHOR  BE    LIABLE FOR ANY   DIRECT, INDIRECT,
 35 *  INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 36 *  LIMITED   TO, PROCUREMENT OF  SUBSTITUTE GOODS  OR SERVICES; LOSS OF USE,
 37 *  DATA,  OR PROFITS; OR  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 38 *  THEORY OF LIABILITY, WHETHER IN  CONTRACT, STRICT LIABILITY, OR TORT
 39 *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 40 *  THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 41 *
 42 *  You should have received a copy of the  GNU General Public License along
 43 *  with this program; if not, write  to the Free Software Foundation, Inc., 675
 44 *  Mass Ave, Cambridge, MA 02139, USA.
 45 */
 46
 47#ifndef __ASM_REGOPS_H__
 48#define __ASM_REGOPS_H__
 49
 50#include <linux/types.h>
 51
 52#include <asm/compiler.h>
 53#include <asm/war.h>
 54
 55#ifndef R10000_LLSC_WAR
 56#define R10000_LLSC_WAR 0
 57#endif
 58
 59#if R10000_LLSC_WAR == 1
 60#define __beqz	"beqzl	"
 61#else
 62#define __beqz	"beqz	"
 63#endif
 64
 65#ifndef _LINUX_TYPES_H
 66typedef unsigned int u32;
 67#endif
 68
 69/*
 70 * Sets all the masked bits to the corresponding value bits
 71 */
 72static inline void set_value_reg32(volatile u32 *const addr,
 73					u32 const mask,
 74					u32 const value)
 75{
 76	u32 temp;
 77
 78	__asm__ __volatile__(
 79	"	.set	push				\n"
 80	"	.set	arch=r4000			\n"
 81	"1:	ll	%0, %1	# set_value_reg32	\n"
 82	"	and	%0, %2				\n"
 83	"	or	%0, %3				\n"
 84	"	sc	%0, %1				\n"
 85	"	"__beqz"%0, 1b				\n"
 86	"	nop					\n"
 87	"	.set	pop				\n"
 88	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
 89	: "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 90}
 91
 92/*
 93 * Sets all the masked bits to '1'
 94 */
 95static inline void set_reg32(volatile u32 *const addr,
 96				u32 const mask)
 97{
 98	u32 temp;
 99
100	__asm__ __volatile__(
101	"	.set	push				\n"
102	"	.set	arch=r4000			\n"
103	"1:	ll	%0, %1		# set_reg32	\n"
104	"	or	%0, %2				\n"
105	"	sc	%0, %1				\n"
106	"	"__beqz"%0, 1b				\n"
107	"	nop					\n"
108	"	.set	pop				\n"
109	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
110	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
111}
112
113/*
114 * Sets all the masked bits to '0'
115 */
116static inline void clear_reg32(volatile u32 *const addr,
117				u32 const mask)
118{
119	u32 temp;
120
121	__asm__ __volatile__(
122	"	.set	push				\n"
123	"	.set	arch=r4000			\n"
124	"1:	ll	%0, %1		# clear_reg32	\n"
125	"	and	%0, %2				\n"
126	"	sc	%0, %1				\n"
127	"	"__beqz"%0, 1b				\n"
128	"	nop					\n"
129	"	.set	pop				\n"
130	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
131	: "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
132}
133
134/*
135 * Toggles all masked bits from '0' to '1' and '1' to '0'
136 */
137static inline void toggle_reg32(volatile u32 *const addr,
138				u32 const mask)
139{
140	u32 temp;
141
142	__asm__ __volatile__(
143	"	.set	push				\n"
144	"	.set	arch=r4000			\n"
145	"1:	ll	%0, %1		# toggle_reg32	\n"
146	"	xor	%0, %2				\n"
147	"	sc	%0, %1				\n"
148	"	"__beqz"%0, 1b				\n"
149	"	nop					\n"
150	"	.set	pop				\n"
151	: "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
152	: "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
153}
154
155/*
156 * Read all masked bits others are returned as '0'
157 */
158static inline u32 read_reg32(volatile u32 *const addr,
159				u32 const mask)
160{
161	u32 temp;
162
163	__asm__ __volatile__(
164	"	.set	push				\n"
165	"	.set	noreorder			\n"
166	"	lw	%0, %1		# read		\n"
167	"	and	%0, %2		# mask		\n"
168	"	.set	pop				\n"
169	: "=&r" (temp)
170	: "m" (*addr), "ir" (mask));
171
172	return temp;
173}
174
175/*
176 * blocking_read_reg32 - Read address with blocking load
177 *
178 * Uncached writes need to be read back to ensure they reach RAM.
179 * The returned value must be 'used' to prevent from becoming a
180 * non-blocking load.
181 */
182static inline u32 blocking_read_reg32(volatile u32 *const addr)
183{
184	u32 temp;
185
186	__asm__ __volatile__(
187	"	.set	push				\n"
188	"	.set	noreorder			\n"
189	"	lw	%0, %1		# read		\n"
190	"	move	%0, %0		# block		\n"
191	"	.set	pop				\n"
192	: "=&r" (temp)
193	: "m" (*addr));
194
195	return temp;
196}
197
198/*
199 * For special strange cases only:
200 *
201 * If you need custom processing within a ll/sc loop, use the following macros
202 * VERY CAREFULLY:
203 *
204 *   u32 tmp;				<-- Define a variable to hold the data
205 *
206 *   custom_read_reg32(address, tmp);	<-- Reads the address and put the value
207 *						in the 'tmp' variable given
208 *
209 *	From here on out, you are (basically) atomic, so don't do anything too
210 *	fancy!
211 *	Also, this code may loop if the end of this block fails to write
212 *	everything back safely due do the other CPU, so do NOT do anything
213 *	with side-effects!
214 *
215 *   custom_write_reg32(address, tmp);	<-- Writes back 'tmp' safely.
216 */
217#define custom_read_reg32(address, tmp)				\
218	__asm__ __volatile__(					\
219	"	.set	push				\n"	\
220	"	.set	arch=r4000			\n"	\
221	"1:	ll	%0, %1	#custom_read_reg32	\n"	\
222	"	.set	pop				\n"	\
223	: "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
224	: GCC_OFF_SMALL_ASM() (*address))
225
226#define custom_write_reg32(address, tmp)			\
227	__asm__ __volatile__(					\
228	"	.set	push				\n"	\
229	"	.set	arch=r4000			\n"	\
230	"	sc	%0, %1	#custom_write_reg32	\n"	\
231	"	"__beqz"%0, 1b				\n"	\
232	"	nop					\n"	\
233	"	.set	pop				\n"	\
234	: "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)		\
235	: "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
236
237#endif	/* __ASM_REGOPS_H__ */