Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  arch/arm/include/asm/assembler.h
  3 *
  4 *  Copyright (C) 1996-2000 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 *  This file contains arm architecture specific defines
 11 *  for the different processors.
 12 *
 13 *  Do not include any C declarations in this file - it is included by
 14 *  assembler source.
 15 */
 16#ifndef __ASM_ASSEMBLER_H__
 17#define __ASM_ASSEMBLER_H__
 18
 19#ifndef __ASSEMBLY__
 20#error "Only include this from assembly code"
 21#endif
 22
 23#include <asm/ptrace.h>
 24#include <asm/domain.h>
 
 
 
 
 
 
 25
 26/*
 27 * Endian independent macros for shifting bytes within registers.
 28 */
 29#ifndef __ARMEB__
 30#define pull            lsr
 31#define push            lsl
 32#define get_byte_0      lsl #0
 33#define get_byte_1	lsr #8
 34#define get_byte_2	lsr #16
 35#define get_byte_3	lsr #24
 36#define put_byte_0      lsl #0
 37#define put_byte_1	lsl #8
 38#define put_byte_2	lsl #16
 39#define put_byte_3	lsl #24
 40#else
 41#define pull            lsl
 42#define push            lsr
 43#define get_byte_0	lsr #24
 44#define get_byte_1	lsr #16
 45#define get_byte_2	lsr #8
 46#define get_byte_3      lsl #0
 47#define put_byte_0	lsl #24
 48#define put_byte_1	lsl #16
 49#define put_byte_2	lsl #8
 50#define put_byte_3      lsl #0
 51#endif
 52
 
 
 
 
 
 
 
 53/*
 54 * Data preload for architectures that support it
 55 */
 56#if __LINUX_ARM_ARCH__ >= 5
 57#define PLD(code...)	code
 58#else
 59#define PLD(code...)
 60#endif
 61
 62/*
 63 * This can be used to enable code to cacheline align the destination
 64 * pointer when bulk writing to memory.  Experiments on StrongARM and
 65 * XScale didn't show this a worthwhile thing to do when the cache is not
 66 * set to write-allocate (this would need further testing on XScale when WA
 67 * is used).
 68 *
 69 * On Feroceon there is much to gain however, regardless of cache mode.
 70 */
 71#ifdef CONFIG_CPU_FEROCEON
 72#define CALGN(code...) code
 73#else
 74#define CALGN(code...)
 75#endif
 76
 
 
 77/*
 78 * Enable and disable interrupts
 79 */
 80#if __LINUX_ARM_ARCH__ >= 6
 81	.macro	disable_irq_notrace
 82	cpsid	i
 83	.endm
 84
 85	.macro	enable_irq_notrace
 86	cpsie	i
 87	.endm
 88#else
 89	.macro	disable_irq_notrace
 90	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
 91	.endm
 92
 93	.macro	enable_irq_notrace
 94	msr	cpsr_c, #SVC_MODE
 95	.endm
 96#endif
 97
 98	.macro asm_trace_hardirqs_off
 99#if defined(CONFIG_TRACE_IRQFLAGS)
 
100	stmdb   sp!, {r0-r3, ip, lr}
 
101	bl	trace_hardirqs_off
 
102	ldmia	sp!, {r0-r3, ip, lr}
 
103#endif
104	.endm
105
106	.macro asm_trace_hardirqs_on_cond, cond
107#if defined(CONFIG_TRACE_IRQFLAGS)
108	/*
109	 * actually the registers should be pushed and pop'd conditionally, but
110	 * after bl the flags are certainly clobbered
111	 */
 
112	stmdb   sp!, {r0-r3, ip, lr}
 
113	bl\cond	trace_hardirqs_on
 
114	ldmia	sp!, {r0-r3, ip, lr}
 
115#endif
116	.endm
117
118	.macro asm_trace_hardirqs_on
119	asm_trace_hardirqs_on_cond al
120	.endm
121
122	.macro disable_irq
123	disable_irq_notrace
124	asm_trace_hardirqs_off
125	.endm
126
127	.macro enable_irq
128	asm_trace_hardirqs_on
129	enable_irq_notrace
130	.endm
131/*
132 * Save the current IRQ state and disable IRQs.  Note that this macro
133 * assumes FIQs are enabled, and that the processor is in SVC mode.
134 */
135	.macro	save_and_disable_irqs, oldcpsr
 
 
 
136	mrs	\oldcpsr, cpsr
 
137	disable_irq
138	.endm
139
 
 
 
 
 
 
 
 
 
140/*
141 * Restore interrupt state previously stored in a register.  We don't
142 * guarantee that this will preserve the flags.
143 */
144	.macro	restore_irqs_notrace, oldcpsr
 
 
 
145	msr	cpsr_c, \oldcpsr
 
146	.endm
147
148	.macro restore_irqs, oldcpsr
149	tst	\oldcpsr, #PSR_I_BIT
150	asm_trace_hardirqs_on_cond eq
151	restore_irqs_notrace \oldcpsr
152	.endm
153
154#define USER(x...)				\
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1559999:	x;					\
156	.pushsection __ex_table,"a";		\
157	.align	3;				\
158	.long	9999b,9001f;			\
159	.popsection
160
 
 
161#ifdef CONFIG_SMP
162#define ALT_SMP(instr...)					\
1639998:	instr
164/*
165 * Note: if you get assembler errors from ALT_UP() when building with
166 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
167 * ALT_SMP( W(instr) ... )
168 */
169#define ALT_UP(instr...)					\
170	.pushsection ".alt.smp.init", "a"			;\
171	.long	9998b						;\
1729997:	instr							;\
 
 
 
173	.if . - 9997b != 4					;\
174		.error "ALT_UP() content must assemble to exactly 4 bytes";\
175	.endif							;\
176	.popsection
177#define ALT_UP_B(label)					\
178	.equ	up_b_offset, label - 9998b			;\
179	.pushsection ".alt.smp.init", "a"			;\
180	.long	9998b						;\
181	W(b)	. + up_b_offset					;\
182	.popsection
183#else
184#define ALT_SMP(instr...)
185#define ALT_UP(instr...) instr
186#define ALT_UP_B(label) b label
187#endif
188
189/*
 
 
 
 
 
 
 
 
 
 
 
190 * SMP data memory barrier
191 */
192	.macro	smp_dmb mode
193#ifdef CONFIG_SMP
194#if __LINUX_ARM_ARCH__ >= 7
195	.ifeqs "\mode","arm"
196	ALT_SMP(dmb)
197	.else
198	ALT_SMP(W(dmb))
199	.endif
200#elif __LINUX_ARM_ARCH__ == 6
201	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
202#else
203#error Incompatible SMP platform
204#endif
205	.ifeqs "\mode","arm"
206	ALT_UP(nop)
207	.else
208	ALT_UP(W(nop))
209	.endif
210#endif
211	.endm
212
213#ifdef CONFIG_THUMB2_KERNEL
 
 
 
 
 
 
 
214	.macro	setmode, mode, reg
215	mov	\reg, #\mode
216	msr	cpsr_c, \reg
217	.endm
218#else
219	.macro	setmode, mode, reg
220	msr	cpsr_c, #\mode
221	.endm
222#endif
223
224/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225 * STRT/LDRT access macros with ARM and Thumb-2 variants
226 */
227#ifdef CONFIG_THUMB2_KERNEL
228
229	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
2309999:
231	.if	\inc == 1
232	\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
233	.elseif	\inc == 4
234	\instr\cond\()\t\().w \reg, [\ptr, #\off]
235	.else
236	.error	"Unsupported inc macro argument"
237	.endif
238
239	.pushsection __ex_table,"a"
240	.align	3
241	.long	9999b, \abort
242	.popsection
243	.endm
244
245	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
246	@ explicit IT instruction needed because of the label
247	@ introduced by the USER macro
248	.ifnc	\cond,al
249	.if	\rept == 1
250	itt	\cond
251	.elseif	\rept == 2
252	ittt	\cond
253	.else
254	.error	"Unsupported rept macro argument"
255	.endif
256	.endif
257
258	@ Slightly optimised to avoid incrementing the pointer twice
259	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
260	.if	\rept == 2
261	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
262	.endif
263
264	add\cond \ptr, #\rept * \inc
265	.endm
266
267#else	/* !CONFIG_THUMB2_KERNEL */
268
269	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
270	.rept	\rept
2719999:
272	.if	\inc == 1
273	\instr\cond\()b\()\t \reg, [\ptr], #\inc
274	.elseif	\inc == 4
275	\instr\cond\()\t \reg, [\ptr], #\inc
276	.else
277	.error	"Unsupported inc macro argument"
278	.endif
279
280	.pushsection __ex_table,"a"
281	.align	3
282	.long	9999b, \abort
283	.popsection
284	.endr
285	.endm
286
287#endif	/* CONFIG_THUMB2_KERNEL */
288
289	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
290	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
291	.endm
292
293	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
294	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
295	.endm
296
297/* Utility macro for declaring string literals */
298	.macro	string name:req, string
299	.type \name , #object
300\name:
301	.asciz "\string"
302	.size \name , . - \name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303	.endm
304
305#endif /* __ASM_ASSEMBLER_H__ */
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  arch/arm/include/asm/assembler.h
  4 *
  5 *  Copyright (C) 1996-2000 Russell King
  6 *
 
 
 
 
  7 *  This file contains arm architecture specific defines
  8 *  for the different processors.
  9 *
 10 *  Do not include any C declarations in this file - it is included by
 11 *  assembler source.
 12 */
 13#ifndef __ASM_ASSEMBLER_H__
 14#define __ASM_ASSEMBLER_H__
 15
 16#ifndef __ASSEMBLY__
 17#error "Only include this from assembly code"
 18#endif
 19
 20#include <asm/ptrace.h>
 21#include <asm/opcodes-virt.h>
 22#include <asm/asm-offsets.h>
 23#include <asm/page.h>
 24#include <asm/thread_info.h>
 25#include <asm/uaccess-asm.h>
 26
 27#define IOMEM(x)	(x)
 28
 29/*
 30 * Endian independent macros for shifting bytes within registers.
 31 */
 32#ifndef __ARMEB__
 33#define lspull          lsr
 34#define lspush          lsl
 35#define get_byte_0      lsl #0
 36#define get_byte_1	lsr #8
 37#define get_byte_2	lsr #16
 38#define get_byte_3	lsr #24
 39#define put_byte_0      lsl #0
 40#define put_byte_1	lsl #8
 41#define put_byte_2	lsl #16
 42#define put_byte_3	lsl #24
 43#else
 44#define lspull          lsl
 45#define lspush          lsr
 46#define get_byte_0	lsr #24
 47#define get_byte_1	lsr #16
 48#define get_byte_2	lsr #8
 49#define get_byte_3      lsl #0
 50#define put_byte_0	lsl #24
 51#define put_byte_1	lsl #16
 52#define put_byte_2	lsl #8
 53#define put_byte_3      lsl #0
 54#endif
 55
 56/* Select code for any configuration running in BE8 mode */
 57#ifdef CONFIG_CPU_ENDIAN_BE8
 58#define ARM_BE8(code...) code
 59#else
 60#define ARM_BE8(code...)
 61#endif
 62
 63/*
 64 * Data preload for architectures that support it
 65 */
 66#if __LINUX_ARM_ARCH__ >= 5
 67#define PLD(code...)	code
 68#else
 69#define PLD(code...)
 70#endif
 71
 72/*
 73 * This can be used to enable code to cacheline align the destination
 74 * pointer when bulk writing to memory.  Experiments on StrongARM and
 75 * XScale didn't show this a worthwhile thing to do when the cache is not
 76 * set to write-allocate (this would need further testing on XScale when WA
 77 * is used).
 78 *
 79 * On Feroceon there is much to gain however, regardless of cache mode.
 80 */
 81#ifdef CONFIG_CPU_FEROCEON
 82#define CALGN(code...) code
 83#else
 84#define CALGN(code...)
 85#endif
 86
 87#define IMM12_MASK 0xfff
 88
 89/*
 90 * Enable and disable interrupts
 91 */
 92#if __LINUX_ARM_ARCH__ >= 6
 93	.macro	disable_irq_notrace
 94	cpsid	i
 95	.endm
 96
 97	.macro	enable_irq_notrace
 98	cpsie	i
 99	.endm
100#else
101	.macro	disable_irq_notrace
102	msr	cpsr_c, #PSR_I_BIT | SVC_MODE
103	.endm
104
105	.macro	enable_irq_notrace
106	msr	cpsr_c, #SVC_MODE
107	.endm
108#endif
109
110	.macro asm_trace_hardirqs_off, save=1
111#if defined(CONFIG_TRACE_IRQFLAGS)
112	.if \save
113	stmdb   sp!, {r0-r3, ip, lr}
114	.endif
115	bl	trace_hardirqs_off
116	.if \save
117	ldmia	sp!, {r0-r3, ip, lr}
118	.endif
119#endif
120	.endm
121
122	.macro asm_trace_hardirqs_on, cond=al, save=1
123#if defined(CONFIG_TRACE_IRQFLAGS)
124	/*
125	 * actually the registers should be pushed and pop'd conditionally, but
126	 * after bl the flags are certainly clobbered
127	 */
128	.if \save
129	stmdb   sp!, {r0-r3, ip, lr}
130	.endif
131	bl\cond	trace_hardirqs_on
132	.if \save
133	ldmia	sp!, {r0-r3, ip, lr}
134	.endif
135#endif
136	.endm
137
138	.macro disable_irq, save=1
 
 
 
 
139	disable_irq_notrace
140	asm_trace_hardirqs_off \save
141	.endm
142
143	.macro enable_irq
144	asm_trace_hardirqs_on
145	enable_irq_notrace
146	.endm
147/*
148 * Save the current IRQ state and disable IRQs.  Note that this macro
149 * assumes FIQs are enabled, and that the processor is in SVC mode.
150 */
151	.macro	save_and_disable_irqs, oldcpsr
152#ifdef CONFIG_CPU_V7M
153	mrs	\oldcpsr, primask
154#else
155	mrs	\oldcpsr, cpsr
156#endif
157	disable_irq
158	.endm
159
160	.macro	save_and_disable_irqs_notrace, oldcpsr
161#ifdef CONFIG_CPU_V7M
162	mrs	\oldcpsr, primask
163#else
164	mrs	\oldcpsr, cpsr
165#endif
166	disable_irq_notrace
167	.endm
168
169/*
170 * Restore interrupt state previously stored in a register.  We don't
171 * guarantee that this will preserve the flags.
172 */
173	.macro	restore_irqs_notrace, oldcpsr
174#ifdef CONFIG_CPU_V7M
175	msr	primask, \oldcpsr
176#else
177	msr	cpsr_c, \oldcpsr
178#endif
179	.endm
180
181	.macro restore_irqs, oldcpsr
182	tst	\oldcpsr, #PSR_I_BIT
183	asm_trace_hardirqs_on cond=eq
184	restore_irqs_notrace \oldcpsr
185	.endm
186
187/*
188 * Assembly version of "adr rd, BSYM(sym)".  This should only be used to
189 * reference local symbols in the same assembly file which are to be
190 * resolved by the assembler.  Other usage is undefined.
191 */
192	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
193	.macro	badr\c, rd, sym
194#ifdef CONFIG_THUMB2_KERNEL
195	adr\c	\rd, \sym + 1
196#else
197	adr\c	\rd, \sym
198#endif
199	.endm
200	.endr
201
202/*
203 * Get current thread_info.
204 */
205	.macro	get_thread_info, rd
206 ARM(	mov	\rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT	)
207 THUMB(	mov	\rd, sp			)
208 THUMB(	lsr	\rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT	)
209	mov	\rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
210	.endm
211
212/*
213 * Increment/decrement the preempt count.
214 */
215#ifdef CONFIG_PREEMPT_COUNT
216	.macro	inc_preempt_count, ti, tmp
217	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
218	add	\tmp, \tmp, #1			@ increment it
219	str	\tmp, [\ti, #TI_PREEMPT]
220	.endm
221
222	.macro	dec_preempt_count, ti, tmp
223	ldr	\tmp, [\ti, #TI_PREEMPT]	@ get preempt count
224	sub	\tmp, \tmp, #1			@ decrement it
225	str	\tmp, [\ti, #TI_PREEMPT]
226	.endm
227
228	.macro	dec_preempt_count_ti, ti, tmp
229	get_thread_info \ti
230	dec_preempt_count \ti, \tmp
231	.endm
232#else
233	.macro	inc_preempt_count, ti, tmp
234	.endm
235
236	.macro	dec_preempt_count, ti, tmp
237	.endm
238
239	.macro	dec_preempt_count_ti, ti, tmp
240	.endm
241#endif
242
243#define USERL(l, x...)				\
2449999:	x;					\
245	.pushsection __ex_table,"a";		\
246	.align	3;				\
247	.long	9999b,l;			\
248	.popsection
249
250#define USER(x...)	USERL(9001f, x)
251
252#ifdef CONFIG_SMP
253#define ALT_SMP(instr...)					\
2549998:	instr
255/*
256 * Note: if you get assembler errors from ALT_UP() when building with
257 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
258 * ALT_SMP( W(instr) ... )
259 */
260#define ALT_UP(instr...)					\
261	.pushsection ".alt.smp.init", "a"			;\
262	.long	9998b - .					;\
2639997:	instr							;\
264	.if . - 9997b == 2					;\
265		nop						;\
266	.endif							;\
267	.if . - 9997b != 4					;\
268		.error "ALT_UP() content must assemble to exactly 4 bytes";\
269	.endif							;\
270	.popsection
271#define ALT_UP_B(label)					\
 
272	.pushsection ".alt.smp.init", "a"			;\
273	.long	9998b - .					;\
274	W(b)	. + (label - 9998b)					;\
275	.popsection
276#else
277#define ALT_SMP(instr...)
278#define ALT_UP(instr...) instr
279#define ALT_UP_B(label) b label
280#endif
281
282/*
283 * Instruction barrier
284 */
285	.macro	instr_sync
286#if __LINUX_ARM_ARCH__ >= 7
287	isb
288#elif __LINUX_ARM_ARCH__ == 6
289	mcr	p15, 0, r0, c7, c5, 4
290#endif
291	.endm
292
293/*
294 * SMP data memory barrier
295 */
296	.macro	smp_dmb mode
297#ifdef CONFIG_SMP
298#if __LINUX_ARM_ARCH__ >= 7
299	.ifeqs "\mode","arm"
300	ALT_SMP(dmb	ish)
301	.else
302	ALT_SMP(W(dmb)	ish)
303	.endif
304#elif __LINUX_ARM_ARCH__ == 6
305	ALT_SMP(mcr	p15, 0, r0, c7, c10, 5)	@ dmb
306#else
307#error Incompatible SMP platform
308#endif
309	.ifeqs "\mode","arm"
310	ALT_UP(nop)
311	.else
312	ALT_UP(W(nop))
313	.endif
314#endif
315	.endm
316
317#if defined(CONFIG_CPU_V7M)
318	/*
319	 * setmode is used to assert to be in svc mode during boot. For v7-M
320	 * this is done in __v7m_setup, so setmode can be empty here.
321	 */
322	.macro	setmode, mode, reg
323	.endm
324#elif defined(CONFIG_THUMB2_KERNEL)
325	.macro	setmode, mode, reg
326	mov	\reg, #\mode
327	msr	cpsr_c, \reg
328	.endm
329#else
330	.macro	setmode, mode, reg
331	msr	cpsr_c, #\mode
332	.endm
333#endif
334
335/*
336 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
337 * a scratch register for the macro to overwrite.
338 *
339 * This macro is intended for forcing the CPU into SVC mode at boot time.
340 * you cannot return to the original mode.
341 */
342.macro safe_svcmode_maskall reg:req
343#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
344	mrs	\reg , cpsr
345	eor	\reg, \reg, #HYP_MODE
346	tst	\reg, #MODE_MASK
347	bic	\reg , \reg , #MODE_MASK
348	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
349THUMB(	orr	\reg , \reg , #PSR_T_BIT	)
350	bne	1f
351	orr	\reg, \reg, #PSR_A_BIT
352	badr	lr, 2f
353	msr	spsr_cxsf, \reg
354	__MSR_ELR_HYP(14)
355	__ERET
3561:	msr	cpsr_c, \reg
3572:
358#else
359/*
360 * workaround for possibly broken pre-v6 hardware
361 * (akita, Sharp Zaurus C-1000, PXA270-based)
362 */
363	setmode	PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
364#endif
365.endm
366
367/*
368 * STRT/LDRT access macros with ARM and Thumb-2 variants
369 */
370#ifdef CONFIG_THUMB2_KERNEL
371
372	.macro	usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3739999:
374	.if	\inc == 1
375	\instr\()b\t\cond\().w \reg, [\ptr, #\off]
376	.elseif	\inc == 4
377	\instr\t\cond\().w \reg, [\ptr, #\off]
378	.else
379	.error	"Unsupported inc macro argument"
380	.endif
381
382	.pushsection __ex_table,"a"
383	.align	3
384	.long	9999b, \abort
385	.popsection
386	.endm
387
388	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort
389	@ explicit IT instruction needed because of the label
390	@ introduced by the USER macro
391	.ifnc	\cond,al
392	.if	\rept == 1
393	itt	\cond
394	.elseif	\rept == 2
395	ittt	\cond
396	.else
397	.error	"Unsupported rept macro argument"
398	.endif
399	.endif
400
401	@ Slightly optimised to avoid incrementing the pointer twice
402	usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
403	.if	\rept == 2
404	usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
405	.endif
406
407	add\cond \ptr, #\rept * \inc
408	.endm
409
410#else	/* !CONFIG_THUMB2_KERNEL */
411
412	.macro	usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
413	.rept	\rept
4149999:
415	.if	\inc == 1
416	\instr\()b\t\cond \reg, [\ptr], #\inc
417	.elseif	\inc == 4
418	\instr\t\cond \reg, [\ptr], #\inc
419	.else
420	.error	"Unsupported inc macro argument"
421	.endif
422
423	.pushsection __ex_table,"a"
424	.align	3
425	.long	9999b, \abort
426	.popsection
427	.endr
428	.endm
429
430#endif	/* CONFIG_THUMB2_KERNEL */
431
432	.macro	strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
433	usracc	str, \reg, \ptr, \inc, \cond, \rept, \abort
434	.endm
435
436	.macro	ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
437	usracc	ldr, \reg, \ptr, \inc, \cond, \rept, \abort
438	.endm
439
440/* Utility macro for declaring string literals */
441	.macro	string name:req, string
442	.type \name , #object
443\name:
444	.asciz "\string"
445	.size \name , . - \name
446	.endm
447
448	.irp	c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
449	.macro	ret\c, reg
450#if __LINUX_ARM_ARCH__ < 6
451	mov\c	pc, \reg
452#else
453	.ifeqs	"\reg", "lr"
454	bx\c	\reg
455	.else
456	mov\c	pc, \reg
457	.endif
458#endif
459	.endm
460	.endr
461
462	.macro	ret.w, reg
463	ret	\reg
464#ifdef CONFIG_THUMB2_KERNEL
465	nop
466#endif
467	.endm
468
469	.macro	bug, msg, line
470#ifdef CONFIG_THUMB2_KERNEL
4711:	.inst	0xde02
472#else
4731:	.inst	0xe7f001f2
474#endif
475#ifdef CONFIG_DEBUG_BUGVERBOSE
476	.pushsection .rodata.str, "aMS", %progbits, 1
4772:	.asciz	"\msg"
478	.popsection
479	.pushsection __bug_table, "aw"
480	.align	2
481	.word	1b, 2b
482	.hword	\line
483	.popsection
484#endif
485	.endm
486
487#ifdef CONFIG_KPROBES
488#define _ASM_NOKPROBE(entry)				\
489	.pushsection "_kprobe_blacklist", "aw" ;	\
490	.balign 4 ;					\
491	.long entry;					\
492	.popsection
493#else
494#define _ASM_NOKPROBE(entry)
495#endif
496
497	.macro		__adldst_l, op, reg, sym, tmp, c
498	.if		__LINUX_ARM_ARCH__ < 7
499	ldr\c		\tmp, .La\@
500	.subsection	1
501	.align		2
502.La\@:	.long		\sym - .Lpc\@
503	.previous
504	.else
505	.ifnb		\c
506 THUMB(	ittt		\c			)
507	.endif
508	movw\c		\tmp, #:lower16:\sym - .Lpc\@
509	movt\c		\tmp, #:upper16:\sym - .Lpc\@
510	.endif
511
512#ifndef CONFIG_THUMB2_KERNEL
513	.set		.Lpc\@, . + 8			// PC bias
514	.ifc		\op, add
515	add\c		\reg, \tmp, pc
516	.else
517	\op\c		\reg, [pc, \tmp]
518	.endif
519#else
520.Lb\@:	add\c		\tmp, \tmp, pc
521	/*
522	 * In Thumb-2 builds, the PC bias depends on whether we are currently
523	 * emitting into a .arm or a .thumb section. The size of the add opcode
524	 * above will be 2 bytes when emitting in Thumb mode and 4 bytes when
525	 * emitting in ARM mode, so let's use this to account for the bias.
526	 */
527	.set		.Lpc\@, . + (. - .Lb\@)
528
529	.ifnc		\op, add
530	\op\c		\reg, [\tmp]
531	.endif
532#endif
533	.endm
534
535	/*
536	 * mov_l - move a constant value or [relocated] address into a register
537	 */
538	.macro		mov_l, dst:req, imm:req
539	.if		__LINUX_ARM_ARCH__ < 7
540	ldr		\dst, =\imm
541	.else
542	movw		\dst, #:lower16:\imm
543	movt		\dst, #:upper16:\imm
544	.endif
545	.endm
546
547	/*
548	 * adr_l - adr pseudo-op with unlimited range
549	 *
550	 * @dst: destination register
551	 * @sym: name of the symbol
552	 * @cond: conditional opcode suffix
553	 */
554	.macro		adr_l, dst:req, sym:req, cond
555	__adldst_l	add, \dst, \sym, \dst, \cond
556	.endm
557
558	/*
559	 * ldr_l - ldr <literal> pseudo-op with unlimited range
560	 *
561	 * @dst: destination register
562	 * @sym: name of the symbol
563	 * @cond: conditional opcode suffix
564	 */
565	.macro		ldr_l, dst:req, sym:req, cond
566	__adldst_l	ldr, \dst, \sym, \dst, \cond
567	.endm
568
569	/*
570	 * str_l - str <literal> pseudo-op with unlimited range
571	 *
572	 * @src: source register
573	 * @sym: name of the symbol
574	 * @tmp: mandatory scratch register
575	 * @cond: conditional opcode suffix
576	 */
577	.macro		str_l, src:req, sym:req, tmp:req, cond
578	__adldst_l	str, \src, \sym, \tmp, \cond
579	.endm
580
581	/*
582	 * rev_l - byte-swap a 32-bit value
583	 *
584	 * @val: source/destination register
585	 * @tmp: scratch register
586	 */
587	.macro		rev_l, val:req, tmp:req
588	.if		__LINUX_ARM_ARCH__ < 6
589	eor		\tmp, \val, \val, ror #16
590	bic		\tmp, \tmp, #0x00ff0000
591	mov		\val, \val, ror #8
592	eor		\val, \val, \tmp, lsr #8
593	.else
594	rev		\val, \val
595	.endif
596	.endm
597
598#endif /* __ASM_ASSEMBLER_H__ */