Loading...
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27
28#define IOMEM(x) (x)
29
30/*
31 * Endian independent macros for shifting bytes within registers.
32 */
33#ifndef __ARMEB__
34#define lspull lsr
35#define lspush lsl
36#define get_byte_0 lsl #0
37#define get_byte_1 lsr #8
38#define get_byte_2 lsr #16
39#define get_byte_3 lsr #24
40#define put_byte_0 lsl #0
41#define put_byte_1 lsl #8
42#define put_byte_2 lsl #16
43#define put_byte_3 lsl #24
44#else
45#define lspull lsl
46#define lspush lsr
47#define get_byte_0 lsr #24
48#define get_byte_1 lsr #16
49#define get_byte_2 lsr #8
50#define get_byte_3 lsl #0
51#define put_byte_0 lsl #24
52#define put_byte_1 lsl #16
53#define put_byte_2 lsl #8
54#define put_byte_3 lsl #0
55#endif
56
57/* Select code for any configuration running in BE8 mode */
58#ifdef CONFIG_CPU_ENDIAN_BE8
59#define ARM_BE8(code...) code
60#else
61#define ARM_BE8(code...)
62#endif
63
64/*
65 * Data preload for architectures that support it
66 */
67#if __LINUX_ARM_ARCH__ >= 5
68#define PLD(code...) code
69#else
70#define PLD(code...)
71#endif
72
73/*
74 * This can be used to enable code to cacheline align the destination
75 * pointer when bulk writing to memory. Experiments on StrongARM and
76 * XScale didn't show this a worthwhile thing to do when the cache is not
77 * set to write-allocate (this would need further testing on XScale when WA
78 * is used).
79 *
80 * On Feroceon there is much to gain however, regardless of cache mode.
81 */
82#ifdef CONFIG_CPU_FEROCEON
83#define CALGN(code...) code
84#else
85#define CALGN(code...)
86#endif
87
88/*
89 * Enable and disable interrupts
90 */
91#if __LINUX_ARM_ARCH__ >= 6
92 .macro disable_irq_notrace
93 cpsid i
94 .endm
95
96 .macro enable_irq_notrace
97 cpsie i
98 .endm
99#else
100 .macro disable_irq_notrace
101 msr cpsr_c, #PSR_I_BIT | SVC_MODE
102 .endm
103
104 .macro enable_irq_notrace
105 msr cpsr_c, #SVC_MODE
106 .endm
107#endif
108
109 .macro asm_trace_hardirqs_off
110#if defined(CONFIG_TRACE_IRQFLAGS)
111 stmdb sp!, {r0-r3, ip, lr}
112 bl trace_hardirqs_off
113 ldmia sp!, {r0-r3, ip, lr}
114#endif
115 .endm
116
117 .macro asm_trace_hardirqs_on_cond, cond
118#if defined(CONFIG_TRACE_IRQFLAGS)
119 /*
120 * actually the registers should be pushed and pop'd conditionally, but
121 * after bl the flags are certainly clobbered
122 */
123 stmdb sp!, {r0-r3, ip, lr}
124 bl\cond trace_hardirqs_on
125 ldmia sp!, {r0-r3, ip, lr}
126#endif
127 .endm
128
129 .macro asm_trace_hardirqs_on
130 asm_trace_hardirqs_on_cond al
131 .endm
132
133 .macro disable_irq
134 disable_irq_notrace
135 asm_trace_hardirqs_off
136 .endm
137
138 .macro enable_irq
139 asm_trace_hardirqs_on
140 enable_irq_notrace
141 .endm
142/*
143 * Save the current IRQ state and disable IRQs. Note that this macro
144 * assumes FIQs are enabled, and that the processor is in SVC mode.
145 */
146 .macro save_and_disable_irqs, oldcpsr
147#ifdef CONFIG_CPU_V7M
148 mrs \oldcpsr, primask
149#else
150 mrs \oldcpsr, cpsr
151#endif
152 disable_irq
153 .endm
154
155 .macro save_and_disable_irqs_notrace, oldcpsr
156 mrs \oldcpsr, cpsr
157 disable_irq_notrace
158 .endm
159
160/*
161 * Restore interrupt state previously stored in a register. We don't
162 * guarantee that this will preserve the flags.
163 */
164 .macro restore_irqs_notrace, oldcpsr
165#ifdef CONFIG_CPU_V7M
166 msr primask, \oldcpsr
167#else
168 msr cpsr_c, \oldcpsr
169#endif
170 .endm
171
172 .macro restore_irqs, oldcpsr
173 tst \oldcpsr, #PSR_I_BIT
174 asm_trace_hardirqs_on_cond eq
175 restore_irqs_notrace \oldcpsr
176 .endm
177
178/*
179 * Get current thread_info.
180 */
181 .macro get_thread_info, rd
182 ARM( mov \rd, sp, lsr #13 )
183 THUMB( mov \rd, sp )
184 THUMB( lsr \rd, \rd, #13 )
185 mov \rd, \rd, lsl #13
186 .endm
187
188/*
189 * Increment/decrement the preempt count.
190 */
191#ifdef CONFIG_PREEMPT_COUNT
192 .macro inc_preempt_count, ti, tmp
193 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
194 add \tmp, \tmp, #1 @ increment it
195 str \tmp, [\ti, #TI_PREEMPT]
196 .endm
197
198 .macro dec_preempt_count, ti, tmp
199 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
200 sub \tmp, \tmp, #1 @ decrement it
201 str \tmp, [\ti, #TI_PREEMPT]
202 .endm
203
204 .macro dec_preempt_count_ti, ti, tmp
205 get_thread_info \ti
206 dec_preempt_count \ti, \tmp
207 .endm
208#else
209 .macro inc_preempt_count, ti, tmp
210 .endm
211
212 .macro dec_preempt_count, ti, tmp
213 .endm
214
215 .macro dec_preempt_count_ti, ti, tmp
216 .endm
217#endif
218
219#define USER(x...) \
2209999: x; \
221 .pushsection __ex_table,"a"; \
222 .align 3; \
223 .long 9999b,9001f; \
224 .popsection
225
226#ifdef CONFIG_SMP
227#define ALT_SMP(instr...) \
2289998: instr
229/*
230 * Note: if you get assembler errors from ALT_UP() when building with
231 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
232 * ALT_SMP( W(instr) ... )
233 */
234#define ALT_UP(instr...) \
235 .pushsection ".alt.smp.init", "a" ;\
236 .long 9998b ;\
2379997: instr ;\
238 .if . - 9997b != 4 ;\
239 .error "ALT_UP() content must assemble to exactly 4 bytes";\
240 .endif ;\
241 .popsection
242#define ALT_UP_B(label) \
243 .equ up_b_offset, label - 9998b ;\
244 .pushsection ".alt.smp.init", "a" ;\
245 .long 9998b ;\
246 W(b) . + up_b_offset ;\
247 .popsection
248#else
249#define ALT_SMP(instr...)
250#define ALT_UP(instr...) instr
251#define ALT_UP_B(label) b label
252#endif
253
254/*
255 * Instruction barrier
256 */
257 .macro instr_sync
258#if __LINUX_ARM_ARCH__ >= 7
259 isb
260#elif __LINUX_ARM_ARCH__ == 6
261 mcr p15, 0, r0, c7, c5, 4
262#endif
263 .endm
264
265/*
266 * SMP data memory barrier
267 */
268 .macro smp_dmb mode
269#ifdef CONFIG_SMP
270#if __LINUX_ARM_ARCH__ >= 7
271 .ifeqs "\mode","arm"
272 ALT_SMP(dmb ish)
273 .else
274 ALT_SMP(W(dmb) ish)
275 .endif
276#elif __LINUX_ARM_ARCH__ == 6
277 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
278#else
279#error Incompatible SMP platform
280#endif
281 .ifeqs "\mode","arm"
282 ALT_UP(nop)
283 .else
284 ALT_UP(W(nop))
285 .endif
286#endif
287 .endm
288
289#if defined(CONFIG_CPU_V7M)
290 /*
291 * setmode is used to assert to be in svc mode during boot. For v7-M
292 * this is done in __v7m_setup, so setmode can be empty here.
293 */
294 .macro setmode, mode, reg
295 .endm
296#elif defined(CONFIG_THUMB2_KERNEL)
297 .macro setmode, mode, reg
298 mov \reg, #\mode
299 msr cpsr_c, \reg
300 .endm
301#else
302 .macro setmode, mode, reg
303 msr cpsr_c, #\mode
304 .endm
305#endif
306
307/*
308 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
309 * a scratch register for the macro to overwrite.
310 *
311 * This macro is intended for forcing the CPU into SVC mode at boot time.
312 * you cannot return to the original mode.
313 */
314.macro safe_svcmode_maskall reg:req
315#if __LINUX_ARM_ARCH__ >= 6
316 mrs \reg , cpsr
317 eor \reg, \reg, #HYP_MODE
318 tst \reg, #MODE_MASK
319 bic \reg , \reg , #MODE_MASK
320 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
321THUMB( orr \reg , \reg , #PSR_T_BIT )
322 bne 1f
323 orr \reg, \reg, #PSR_A_BIT
324 adr lr, BSYM(2f)
325 msr spsr_cxsf, \reg
326 __MSR_ELR_HYP(14)
327 __ERET
3281: msr cpsr_c, \reg
3292:
330#else
331/*
332 * workaround for possibly broken pre-v6 hardware
333 * (akita, Sharp Zaurus C-1000, PXA270-based)
334 */
335 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
336#endif
337.endm
338
339/*
340 * STRT/LDRT access macros with ARM and Thumb-2 variants
341 */
342#ifdef CONFIG_THUMB2_KERNEL
343
344 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3459999:
346 .if \inc == 1
347 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
348 .elseif \inc == 4
349 \instr\cond\()\t\().w \reg, [\ptr, #\off]
350 .else
351 .error "Unsupported inc macro argument"
352 .endif
353
354 .pushsection __ex_table,"a"
355 .align 3
356 .long 9999b, \abort
357 .popsection
358 .endm
359
360 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
361 @ explicit IT instruction needed because of the label
362 @ introduced by the USER macro
363 .ifnc \cond,al
364 .if \rept == 1
365 itt \cond
366 .elseif \rept == 2
367 ittt \cond
368 .else
369 .error "Unsupported rept macro argument"
370 .endif
371 .endif
372
373 @ Slightly optimised to avoid incrementing the pointer twice
374 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
375 .if \rept == 2
376 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
377 .endif
378
379 add\cond \ptr, #\rept * \inc
380 .endm
381
382#else /* !CONFIG_THUMB2_KERNEL */
383
384 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
385 .rept \rept
3869999:
387 .if \inc == 1
388 \instr\cond\()b\()\t \reg, [\ptr], #\inc
389 .elseif \inc == 4
390 \instr\cond\()\t \reg, [\ptr], #\inc
391 .else
392 .error "Unsupported inc macro argument"
393 .endif
394
395 .pushsection __ex_table,"a"
396 .align 3
397 .long 9999b, \abort
398 .popsection
399 .endr
400 .endm
401
402#endif /* CONFIG_THUMB2_KERNEL */
403
404 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
405 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
406 .endm
407
408 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
409 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
410 .endm
411
412/* Utility macro for declaring string literals */
413 .macro string name:req, string
414 .type \name , #object
415\name:
416 .asciz "\string"
417 .size \name , . - \name
418 .endm
419
420 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
421#ifndef CONFIG_CPU_USE_DOMAINS
422 adds \tmp, \addr, #\size - 1
423 sbcccs \tmp, \tmp, \limit
424 bcs \bad
425#endif
426 .endm
427
428#endif /* __ASM_ASSEMBLER_H__ */
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
29
30#define IOMEM(x) (x)
31
32/*
33 * Endian independent macros for shifting bytes within registers.
34 */
35#ifndef __ARMEB__
36#define lspull lsr
37#define lspush lsl
38#define get_byte_0 lsl #0
39#define get_byte_1 lsr #8
40#define get_byte_2 lsr #16
41#define get_byte_3 lsr #24
42#define put_byte_0 lsl #0
43#define put_byte_1 lsl #8
44#define put_byte_2 lsl #16
45#define put_byte_3 lsl #24
46#else
47#define lspull lsl
48#define lspush lsr
49#define get_byte_0 lsr #24
50#define get_byte_1 lsr #16
51#define get_byte_2 lsr #8
52#define get_byte_3 lsl #0
53#define put_byte_0 lsl #24
54#define put_byte_1 lsl #16
55#define put_byte_2 lsl #8
56#define put_byte_3 lsl #0
57#endif
58
59/* Select code for any configuration running in BE8 mode */
60#ifdef CONFIG_CPU_ENDIAN_BE8
61#define ARM_BE8(code...) code
62#else
63#define ARM_BE8(code...)
64#endif
65
66/*
67 * Data preload for architectures that support it
68 */
69#if __LINUX_ARM_ARCH__ >= 5
70#define PLD(code...) code
71#else
72#define PLD(code...)
73#endif
74
75/*
76 * This can be used to enable code to cacheline align the destination
77 * pointer when bulk writing to memory. Experiments on StrongARM and
78 * XScale didn't show this a worthwhile thing to do when the cache is not
79 * set to write-allocate (this would need further testing on XScale when WA
80 * is used).
81 *
82 * On Feroceon there is much to gain however, regardless of cache mode.
83 */
84#ifdef CONFIG_CPU_FEROCEON
85#define CALGN(code...) code
86#else
87#define CALGN(code...)
88#endif
89
90/*
91 * Enable and disable interrupts
92 */
93#if __LINUX_ARM_ARCH__ >= 6
94 .macro disable_irq_notrace
95 cpsid i
96 .endm
97
98 .macro enable_irq_notrace
99 cpsie i
100 .endm
101#else
102 .macro disable_irq_notrace
103 msr cpsr_c, #PSR_I_BIT | SVC_MODE
104 .endm
105
106 .macro enable_irq_notrace
107 msr cpsr_c, #SVC_MODE
108 .endm
109#endif
110
111 .macro asm_trace_hardirqs_off, save=1
112#if defined(CONFIG_TRACE_IRQFLAGS)
113 .if \save
114 stmdb sp!, {r0-r3, ip, lr}
115 .endif
116 bl trace_hardirqs_off
117 .if \save
118 ldmia sp!, {r0-r3, ip, lr}
119 .endif
120#endif
121 .endm
122
123 .macro asm_trace_hardirqs_on, cond=al, save=1
124#if defined(CONFIG_TRACE_IRQFLAGS)
125 /*
126 * actually the registers should be pushed and pop'd conditionally, but
127 * after bl the flags are certainly clobbered
128 */
129 .if \save
130 stmdb sp!, {r0-r3, ip, lr}
131 .endif
132 bl\cond trace_hardirqs_on
133 .if \save
134 ldmia sp!, {r0-r3, ip, lr}
135 .endif
136#endif
137 .endm
138
139 .macro disable_irq, save=1
140 disable_irq_notrace
141 asm_trace_hardirqs_off \save
142 .endm
143
144 .macro enable_irq
145 asm_trace_hardirqs_on
146 enable_irq_notrace
147 .endm
148/*
149 * Save the current IRQ state and disable IRQs. Note that this macro
150 * assumes FIQs are enabled, and that the processor is in SVC mode.
151 */
152 .macro save_and_disable_irqs, oldcpsr
153#ifdef CONFIG_CPU_V7M
154 mrs \oldcpsr, primask
155#else
156 mrs \oldcpsr, cpsr
157#endif
158 disable_irq
159 .endm
160
161 .macro save_and_disable_irqs_notrace, oldcpsr
162#ifdef CONFIG_CPU_V7M
163 mrs \oldcpsr, primask
164#else
165 mrs \oldcpsr, cpsr
166#endif
167 disable_irq_notrace
168 .endm
169
170/*
171 * Restore interrupt state previously stored in a register. We don't
172 * guarantee that this will preserve the flags.
173 */
174 .macro restore_irqs_notrace, oldcpsr
175#ifdef CONFIG_CPU_V7M
176 msr primask, \oldcpsr
177#else
178 msr cpsr_c, \oldcpsr
179#endif
180 .endm
181
182 .macro restore_irqs, oldcpsr
183 tst \oldcpsr, #PSR_I_BIT
184 asm_trace_hardirqs_on cond=eq
185 restore_irqs_notrace \oldcpsr
186 .endm
187
188/*
189 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
190 * reference local symbols in the same assembly file which are to be
191 * resolved by the assembler. Other usage is undefined.
192 */
193 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
194 .macro badr\c, rd, sym
195#ifdef CONFIG_THUMB2_KERNEL
196 adr\c \rd, \sym + 1
197#else
198 adr\c \rd, \sym
199#endif
200 .endm
201 .endr
202
203/*
204 * Get current thread_info.
205 */
206 .macro get_thread_info, rd
207 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
208 THUMB( mov \rd, sp )
209 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
210 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
211 .endm
212
213/*
214 * Increment/decrement the preempt count.
215 */
216#ifdef CONFIG_PREEMPT_COUNT
217 .macro inc_preempt_count, ti, tmp
218 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
219 add \tmp, \tmp, #1 @ increment it
220 str \tmp, [\ti, #TI_PREEMPT]
221 .endm
222
223 .macro dec_preempt_count, ti, tmp
224 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
225 sub \tmp, \tmp, #1 @ decrement it
226 str \tmp, [\ti, #TI_PREEMPT]
227 .endm
228
229 .macro dec_preempt_count_ti, ti, tmp
230 get_thread_info \ti
231 dec_preempt_count \ti, \tmp
232 .endm
233#else
234 .macro inc_preempt_count, ti, tmp
235 .endm
236
237 .macro dec_preempt_count, ti, tmp
238 .endm
239
240 .macro dec_preempt_count_ti, ti, tmp
241 .endm
242#endif
243
244#define USER(x...) \
2459999: x; \
246 .pushsection __ex_table,"a"; \
247 .align 3; \
248 .long 9999b,9001f; \
249 .popsection
250
251#ifdef CONFIG_SMP
252#define ALT_SMP(instr...) \
2539998: instr
254/*
255 * Note: if you get assembler errors from ALT_UP() when building with
256 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
257 * ALT_SMP( W(instr) ... )
258 */
259#define ALT_UP(instr...) \
260 .pushsection ".alt.smp.init", "a" ;\
261 .long 9998b ;\
2629997: instr ;\
263 .if . - 9997b == 2 ;\
264 nop ;\
265 .endif ;\
266 .if . - 9997b != 4 ;\
267 .error "ALT_UP() content must assemble to exactly 4 bytes";\
268 .endif ;\
269 .popsection
270#define ALT_UP_B(label) \
271 .equ up_b_offset, label - 9998b ;\
272 .pushsection ".alt.smp.init", "a" ;\
273 .long 9998b ;\
274 W(b) . + up_b_offset ;\
275 .popsection
276#else
277#define ALT_SMP(instr...)
278#define ALT_UP(instr...) instr
279#define ALT_UP_B(label) b label
280#endif
281
282/*
283 * Instruction barrier
284 */
285 .macro instr_sync
286#if __LINUX_ARM_ARCH__ >= 7
287 isb
288#elif __LINUX_ARM_ARCH__ == 6
289 mcr p15, 0, r0, c7, c5, 4
290#endif
291 .endm
292
293/*
294 * SMP data memory barrier
295 */
296 .macro smp_dmb mode
297#ifdef CONFIG_SMP
298#if __LINUX_ARM_ARCH__ >= 7
299 .ifeqs "\mode","arm"
300 ALT_SMP(dmb ish)
301 .else
302 ALT_SMP(W(dmb) ish)
303 .endif
304#elif __LINUX_ARM_ARCH__ == 6
305 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
306#else
307#error Incompatible SMP platform
308#endif
309 .ifeqs "\mode","arm"
310 ALT_UP(nop)
311 .else
312 ALT_UP(W(nop))
313 .endif
314#endif
315 .endm
316
317#if defined(CONFIG_CPU_V7M)
318 /*
319 * setmode is used to assert to be in svc mode during boot. For v7-M
320 * this is done in __v7m_setup, so setmode can be empty here.
321 */
322 .macro setmode, mode, reg
323 .endm
324#elif defined(CONFIG_THUMB2_KERNEL)
325 .macro setmode, mode, reg
326 mov \reg, #\mode
327 msr cpsr_c, \reg
328 .endm
329#else
330 .macro setmode, mode, reg
331 msr cpsr_c, #\mode
332 .endm
333#endif
334
335/*
336 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
337 * a scratch register for the macro to overwrite.
338 *
339 * This macro is intended for forcing the CPU into SVC mode at boot time.
340 * you cannot return to the original mode.
341 */
342.macro safe_svcmode_maskall reg:req
343#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
344 mrs \reg , cpsr
345 eor \reg, \reg, #HYP_MODE
346 tst \reg, #MODE_MASK
347 bic \reg , \reg , #MODE_MASK
348 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
349THUMB( orr \reg , \reg , #PSR_T_BIT )
350 bne 1f
351 orr \reg, \reg, #PSR_A_BIT
352 badr lr, 2f
353 msr spsr_cxsf, \reg
354 __MSR_ELR_HYP(14)
355 __ERET
3561: msr cpsr_c, \reg
3572:
358#else
359/*
360 * workaround for possibly broken pre-v6 hardware
361 * (akita, Sharp Zaurus C-1000, PXA270-based)
362 */
363 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
364#endif
365.endm
366
367/*
368 * STRT/LDRT access macros with ARM and Thumb-2 variants
369 */
370#ifdef CONFIG_THUMB2_KERNEL
371
372 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3739999:
374 .if \inc == 1
375 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
376 .elseif \inc == 4
377 \instr\cond\()\t\().w \reg, [\ptr, #\off]
378 .else
379 .error "Unsupported inc macro argument"
380 .endif
381
382 .pushsection __ex_table,"a"
383 .align 3
384 .long 9999b, \abort
385 .popsection
386 .endm
387
388 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
389 @ explicit IT instruction needed because of the label
390 @ introduced by the USER macro
391 .ifnc \cond,al
392 .if \rept == 1
393 itt \cond
394 .elseif \rept == 2
395 ittt \cond
396 .else
397 .error "Unsupported rept macro argument"
398 .endif
399 .endif
400
401 @ Slightly optimised to avoid incrementing the pointer twice
402 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
403 .if \rept == 2
404 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
405 .endif
406
407 add\cond \ptr, #\rept * \inc
408 .endm
409
410#else /* !CONFIG_THUMB2_KERNEL */
411
412 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
413 .rept \rept
4149999:
415 .if \inc == 1
416 \instr\cond\()b\()\t \reg, [\ptr], #\inc
417 .elseif \inc == 4
418 \instr\cond\()\t \reg, [\ptr], #\inc
419 .else
420 .error "Unsupported inc macro argument"
421 .endif
422
423 .pushsection __ex_table,"a"
424 .align 3
425 .long 9999b, \abort
426 .popsection
427 .endr
428 .endm
429
430#endif /* CONFIG_THUMB2_KERNEL */
431
432 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
433 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
434 .endm
435
436 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
437 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
438 .endm
439
440/* Utility macro for declaring string literals */
441 .macro string name:req, string
442 .type \name , #object
443\name:
444 .asciz "\string"
445 .size \name , . - \name
446 .endm
447
448 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
449#ifndef CONFIG_CPU_USE_DOMAINS
450 adds \tmp, \addr, #\size - 1
451 sbcccs \tmp, \tmp, \limit
452 bcs \bad
453#endif
454 .endm
455
456 .macro uaccess_disable, tmp, isb=1
457#ifdef CONFIG_CPU_SW_DOMAIN_PAN
458 /*
459 * Whenever we re-enter userspace, the domains should always be
460 * set appropriately.
461 */
462 mov \tmp, #DACR_UACCESS_DISABLE
463 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
464 .if \isb
465 instr_sync
466 .endif
467#endif
468 .endm
469
470 .macro uaccess_enable, tmp, isb=1
471#ifdef CONFIG_CPU_SW_DOMAIN_PAN
472 /*
473 * Whenever we re-enter userspace, the domains should always be
474 * set appropriately.
475 */
476 mov \tmp, #DACR_UACCESS_ENABLE
477 mcr p15, 0, \tmp, c3, c0, 0
478 .if \isb
479 instr_sync
480 .endif
481#endif
482 .endm
483
484 .macro uaccess_save, tmp
485#ifdef CONFIG_CPU_SW_DOMAIN_PAN
486 mrc p15, 0, \tmp, c3, c0, 0
487 str \tmp, [sp, #SVC_DACR]
488#endif
489 .endm
490
491 .macro uaccess_restore
492#ifdef CONFIG_CPU_SW_DOMAIN_PAN
493 ldr r0, [sp, #SVC_DACR]
494 mcr p15, 0, r0, c3, c0, 0
495#endif
496 .endm
497
498 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
499 .macro ret\c, reg
500#if __LINUX_ARM_ARCH__ < 6
501 mov\c pc, \reg
502#else
503 .ifeqs "\reg", "lr"
504 bx\c \reg
505 .else
506 mov\c pc, \reg
507 .endif
508#endif
509 .endm
510 .endr
511
512 .macro ret.w, reg
513 ret \reg
514#ifdef CONFIG_THUMB2_KERNEL
515 nop
516#endif
517 .endm
518
519#endif /* __ASM_ASSEMBLER_H__ */