Loading...
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
29
30#define IOMEM(x) (x)
31
32/*
33 * Endian independent macros for shifting bytes within registers.
34 */
35#ifndef __ARMEB__
36#define lspull lsr
37#define lspush lsl
38#define get_byte_0 lsl #0
39#define get_byte_1 lsr #8
40#define get_byte_2 lsr #16
41#define get_byte_3 lsr #24
42#define put_byte_0 lsl #0
43#define put_byte_1 lsl #8
44#define put_byte_2 lsl #16
45#define put_byte_3 lsl #24
46#else
47#define lspull lsl
48#define lspush lsr
49#define get_byte_0 lsr #24
50#define get_byte_1 lsr #16
51#define get_byte_2 lsr #8
52#define get_byte_3 lsl #0
53#define put_byte_0 lsl #24
54#define put_byte_1 lsl #16
55#define put_byte_2 lsl #8
56#define put_byte_3 lsl #0
57#endif
58
59/* Select code for any configuration running in BE8 mode */
60#ifdef CONFIG_CPU_ENDIAN_BE8
61#define ARM_BE8(code...) code
62#else
63#define ARM_BE8(code...)
64#endif
65
66/*
67 * Data preload for architectures that support it
68 */
69#if __LINUX_ARM_ARCH__ >= 5
70#define PLD(code...) code
71#else
72#define PLD(code...)
73#endif
74
75/*
76 * This can be used to enable code to cacheline align the destination
77 * pointer when bulk writing to memory. Experiments on StrongARM and
78 * XScale didn't show this a worthwhile thing to do when the cache is not
79 * set to write-allocate (this would need further testing on XScale when WA
80 * is used).
81 *
82 * On Feroceon there is much to gain however, regardless of cache mode.
83 */
84#ifdef CONFIG_CPU_FEROCEON
85#define CALGN(code...) code
86#else
87#define CALGN(code...)
88#endif
89
90/*
91 * Enable and disable interrupts
92 */
93#if __LINUX_ARM_ARCH__ >= 6
94 .macro disable_irq_notrace
95 cpsid i
96 .endm
97
98 .macro enable_irq_notrace
99 cpsie i
100 .endm
101#else
102 .macro disable_irq_notrace
103 msr cpsr_c, #PSR_I_BIT | SVC_MODE
104 .endm
105
106 .macro enable_irq_notrace
107 msr cpsr_c, #SVC_MODE
108 .endm
109#endif
110
111 .macro asm_trace_hardirqs_off, save=1
112#if defined(CONFIG_TRACE_IRQFLAGS)
113 .if \save
114 stmdb sp!, {r0-r3, ip, lr}
115 .endif
116 bl trace_hardirqs_off
117 .if \save
118 ldmia sp!, {r0-r3, ip, lr}
119 .endif
120#endif
121 .endm
122
123 .macro asm_trace_hardirqs_on, cond=al, save=1
124#if defined(CONFIG_TRACE_IRQFLAGS)
125 /*
126 * actually the registers should be pushed and pop'd conditionally, but
127 * after bl the flags are certainly clobbered
128 */
129 .if \save
130 stmdb sp!, {r0-r3, ip, lr}
131 .endif
132 bl\cond trace_hardirqs_on
133 .if \save
134 ldmia sp!, {r0-r3, ip, lr}
135 .endif
136#endif
137 .endm
138
139 .macro disable_irq, save=1
140 disable_irq_notrace
141 asm_trace_hardirqs_off \save
142 .endm
143
144 .macro enable_irq
145 asm_trace_hardirqs_on
146 enable_irq_notrace
147 .endm
148/*
149 * Save the current IRQ state and disable IRQs. Note that this macro
150 * assumes FIQs are enabled, and that the processor is in SVC mode.
151 */
152 .macro save_and_disable_irqs, oldcpsr
153#ifdef CONFIG_CPU_V7M
154 mrs \oldcpsr, primask
155#else
156 mrs \oldcpsr, cpsr
157#endif
158 disable_irq
159 .endm
160
161 .macro save_and_disable_irqs_notrace, oldcpsr
162 mrs \oldcpsr, cpsr
163 disable_irq_notrace
164 .endm
165
166/*
167 * Restore interrupt state previously stored in a register. We don't
168 * guarantee that this will preserve the flags.
169 */
170 .macro restore_irqs_notrace, oldcpsr
171#ifdef CONFIG_CPU_V7M
172 msr primask, \oldcpsr
173#else
174 msr cpsr_c, \oldcpsr
175#endif
176 .endm
177
178 .macro restore_irqs, oldcpsr
179 tst \oldcpsr, #PSR_I_BIT
180 asm_trace_hardirqs_on cond=eq
181 restore_irqs_notrace \oldcpsr
182 .endm
183
184/*
185 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
186 * reference local symbols in the same assembly file which are to be
187 * resolved by the assembler. Other usage is undefined.
188 */
189 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
190 .macro badr\c, rd, sym
191#ifdef CONFIG_THUMB2_KERNEL
192 adr\c \rd, \sym + 1
193#else
194 adr\c \rd, \sym
195#endif
196 .endm
197 .endr
198
199/*
200 * Get current thread_info.
201 */
202 .macro get_thread_info, rd
203 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
204 THUMB( mov \rd, sp )
205 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
206 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
207 .endm
208
209/*
210 * Increment/decrement the preempt count.
211 */
212#ifdef CONFIG_PREEMPT_COUNT
213 .macro inc_preempt_count, ti, tmp
214 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
215 add \tmp, \tmp, #1 @ increment it
216 str \tmp, [\ti, #TI_PREEMPT]
217 .endm
218
219 .macro dec_preempt_count, ti, tmp
220 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
221 sub \tmp, \tmp, #1 @ decrement it
222 str \tmp, [\ti, #TI_PREEMPT]
223 .endm
224
225 .macro dec_preempt_count_ti, ti, tmp
226 get_thread_info \ti
227 dec_preempt_count \ti, \tmp
228 .endm
229#else
230 .macro inc_preempt_count, ti, tmp
231 .endm
232
233 .macro dec_preempt_count, ti, tmp
234 .endm
235
236 .macro dec_preempt_count_ti, ti, tmp
237 .endm
238#endif
239
240#define USER(x...) \
2419999: x; \
242 .pushsection __ex_table,"a"; \
243 .align 3; \
244 .long 9999b,9001f; \
245 .popsection
246
247#ifdef CONFIG_SMP
248#define ALT_SMP(instr...) \
2499998: instr
250/*
251 * Note: if you get assembler errors from ALT_UP() when building with
252 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
253 * ALT_SMP( W(instr) ... )
254 */
255#define ALT_UP(instr...) \
256 .pushsection ".alt.smp.init", "a" ;\
257 .long 9998b ;\
2589997: instr ;\
259 .if . - 9997b == 2 ;\
260 nop ;\
261 .endif ;\
262 .if . - 9997b != 4 ;\
263 .error "ALT_UP() content must assemble to exactly 4 bytes";\
264 .endif ;\
265 .popsection
266#define ALT_UP_B(label) \
267 .equ up_b_offset, label - 9998b ;\
268 .pushsection ".alt.smp.init", "a" ;\
269 .long 9998b ;\
270 W(b) . + up_b_offset ;\
271 .popsection
272#else
273#define ALT_SMP(instr...)
274#define ALT_UP(instr...) instr
275#define ALT_UP_B(label) b label
276#endif
277
278/*
279 * Instruction barrier
280 */
281 .macro instr_sync
282#if __LINUX_ARM_ARCH__ >= 7
283 isb
284#elif __LINUX_ARM_ARCH__ == 6
285 mcr p15, 0, r0, c7, c5, 4
286#endif
287 .endm
288
289/*
290 * SMP data memory barrier
291 */
292 .macro smp_dmb mode
293#ifdef CONFIG_SMP
294#if __LINUX_ARM_ARCH__ >= 7
295 .ifeqs "\mode","arm"
296 ALT_SMP(dmb ish)
297 .else
298 ALT_SMP(W(dmb) ish)
299 .endif
300#elif __LINUX_ARM_ARCH__ == 6
301 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
302#else
303#error Incompatible SMP platform
304#endif
305 .ifeqs "\mode","arm"
306 ALT_UP(nop)
307 .else
308 ALT_UP(W(nop))
309 .endif
310#endif
311 .endm
312
313#if defined(CONFIG_CPU_V7M)
314 /*
315 * setmode is used to assert to be in svc mode during boot. For v7-M
316 * this is done in __v7m_setup, so setmode can be empty here.
317 */
318 .macro setmode, mode, reg
319 .endm
320#elif defined(CONFIG_THUMB2_KERNEL)
321 .macro setmode, mode, reg
322 mov \reg, #\mode
323 msr cpsr_c, \reg
324 .endm
325#else
326 .macro setmode, mode, reg
327 msr cpsr_c, #\mode
328 .endm
329#endif
330
331/*
332 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
333 * a scratch register for the macro to overwrite.
334 *
335 * This macro is intended for forcing the CPU into SVC mode at boot time.
336 * you cannot return to the original mode.
337 */
338.macro safe_svcmode_maskall reg:req
339#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
340 mrs \reg , cpsr
341 eor \reg, \reg, #HYP_MODE
342 tst \reg, #MODE_MASK
343 bic \reg , \reg , #MODE_MASK
344 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
345THUMB( orr \reg , \reg , #PSR_T_BIT )
346 bne 1f
347 orr \reg, \reg, #PSR_A_BIT
348 badr lr, 2f
349 msr spsr_cxsf, \reg
350 __MSR_ELR_HYP(14)
351 __ERET
3521: msr cpsr_c, \reg
3532:
354#else
355/*
356 * workaround for possibly broken pre-v6 hardware
357 * (akita, Sharp Zaurus C-1000, PXA270-based)
358 */
359 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
360#endif
361.endm
362
363/*
364 * STRT/LDRT access macros with ARM and Thumb-2 variants
365 */
366#ifdef CONFIG_THUMB2_KERNEL
367
368 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3699999:
370 .if \inc == 1
371 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
372 .elseif \inc == 4
373 \instr\cond\()\t\().w \reg, [\ptr, #\off]
374 .else
375 .error "Unsupported inc macro argument"
376 .endif
377
378 .pushsection __ex_table,"a"
379 .align 3
380 .long 9999b, \abort
381 .popsection
382 .endm
383
384 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
385 @ explicit IT instruction needed because of the label
386 @ introduced by the USER macro
387 .ifnc \cond,al
388 .if \rept == 1
389 itt \cond
390 .elseif \rept == 2
391 ittt \cond
392 .else
393 .error "Unsupported rept macro argument"
394 .endif
395 .endif
396
397 @ Slightly optimised to avoid incrementing the pointer twice
398 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
399 .if \rept == 2
400 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
401 .endif
402
403 add\cond \ptr, #\rept * \inc
404 .endm
405
406#else /* !CONFIG_THUMB2_KERNEL */
407
408 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
409 .rept \rept
4109999:
411 .if \inc == 1
412 \instr\cond\()b\()\t \reg, [\ptr], #\inc
413 .elseif \inc == 4
414 \instr\cond\()\t \reg, [\ptr], #\inc
415 .else
416 .error "Unsupported inc macro argument"
417 .endif
418
419 .pushsection __ex_table,"a"
420 .align 3
421 .long 9999b, \abort
422 .popsection
423 .endr
424 .endm
425
426#endif /* CONFIG_THUMB2_KERNEL */
427
428 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
429 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
430 .endm
431
432 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
433 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
434 .endm
435
436/* Utility macro for declaring string literals */
437 .macro string name:req, string
438 .type \name , #object
439\name:
440 .asciz "\string"
441 .size \name , . - \name
442 .endm
443
444 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
445#ifndef CONFIG_CPU_USE_DOMAINS
446 adds \tmp, \addr, #\size - 1
447 sbcccs \tmp, \tmp, \limit
448 bcs \bad
449#endif
450 .endm
451
452 .macro uaccess_disable, tmp, isb=1
453#ifdef CONFIG_CPU_SW_DOMAIN_PAN
454 /*
455 * Whenever we re-enter userspace, the domains should always be
456 * set appropriately.
457 */
458 mov \tmp, #DACR_UACCESS_DISABLE
459 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
460 .if \isb
461 instr_sync
462 .endif
463#endif
464 .endm
465
466 .macro uaccess_enable, tmp, isb=1
467#ifdef CONFIG_CPU_SW_DOMAIN_PAN
468 /*
469 * Whenever we re-enter userspace, the domains should always be
470 * set appropriately.
471 */
472 mov \tmp, #DACR_UACCESS_ENABLE
473 mcr p15, 0, \tmp, c3, c0, 0
474 .if \isb
475 instr_sync
476 .endif
477#endif
478 .endm
479
480 .macro uaccess_save, tmp
481#ifdef CONFIG_CPU_SW_DOMAIN_PAN
482 mrc p15, 0, \tmp, c3, c0, 0
483 str \tmp, [sp, #S_FRAME_SIZE]
484#endif
485 .endm
486
487 .macro uaccess_restore
488#ifdef CONFIG_CPU_SW_DOMAIN_PAN
489 ldr r0, [sp, #S_FRAME_SIZE]
490 mcr p15, 0, r0, c3, c0, 0
491#endif
492 .endm
493
494 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
495 .macro ret\c, reg
496#if __LINUX_ARM_ARCH__ < 6
497 mov\c pc, \reg
498#else
499 .ifeqs "\reg", "lr"
500 bx\c \reg
501 .else
502 mov\c pc, \reg
503 .endif
504#endif
505 .endm
506 .endr
507
508 .macro ret.w, reg
509 ret \reg
510#ifdef CONFIG_THUMB2_KERNEL
511 nop
512#endif
513 .endm
514
515#endif /* __ASM_ASSEMBLER_H__ */
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27
28#define IOMEM(x) (x)
29
30/*
31 * Endian independent macros for shifting bytes within registers.
32 */
33#ifndef __ARMEB__
34#define lspull lsr
35#define lspush lsl
36#define get_byte_0 lsl #0
37#define get_byte_1 lsr #8
38#define get_byte_2 lsr #16
39#define get_byte_3 lsr #24
40#define put_byte_0 lsl #0
41#define put_byte_1 lsl #8
42#define put_byte_2 lsl #16
43#define put_byte_3 lsl #24
44#else
45#define lspull lsl
46#define lspush lsr
47#define get_byte_0 lsr #24
48#define get_byte_1 lsr #16
49#define get_byte_2 lsr #8
50#define get_byte_3 lsl #0
51#define put_byte_0 lsl #24
52#define put_byte_1 lsl #16
53#define put_byte_2 lsl #8
54#define put_byte_3 lsl #0
55#endif
56
57/* Select code for any configuration running in BE8 mode */
58#ifdef CONFIG_CPU_ENDIAN_BE8
59#define ARM_BE8(code...) code
60#else
61#define ARM_BE8(code...)
62#endif
63
64/*
65 * Data preload for architectures that support it
66 */
67#if __LINUX_ARM_ARCH__ >= 5
68#define PLD(code...) code
69#else
70#define PLD(code...)
71#endif
72
73/*
74 * This can be used to enable code to cacheline align the destination
75 * pointer when bulk writing to memory. Experiments on StrongARM and
76 * XScale didn't show this a worthwhile thing to do when the cache is not
77 * set to write-allocate (this would need further testing on XScale when WA
78 * is used).
79 *
80 * On Feroceon there is much to gain however, regardless of cache mode.
81 */
82#ifdef CONFIG_CPU_FEROCEON
83#define CALGN(code...) code
84#else
85#define CALGN(code...)
86#endif
87
88/*
89 * Enable and disable interrupts
90 */
91#if __LINUX_ARM_ARCH__ >= 6
92 .macro disable_irq_notrace
93 cpsid i
94 .endm
95
96 .macro enable_irq_notrace
97 cpsie i
98 .endm
99#else
100 .macro disable_irq_notrace
101 msr cpsr_c, #PSR_I_BIT | SVC_MODE
102 .endm
103
104 .macro enable_irq_notrace
105 msr cpsr_c, #SVC_MODE
106 .endm
107#endif
108
109 .macro asm_trace_hardirqs_off
110#if defined(CONFIG_TRACE_IRQFLAGS)
111 stmdb sp!, {r0-r3, ip, lr}
112 bl trace_hardirqs_off
113 ldmia sp!, {r0-r3, ip, lr}
114#endif
115 .endm
116
117 .macro asm_trace_hardirqs_on_cond, cond
118#if defined(CONFIG_TRACE_IRQFLAGS)
119 /*
120 * actually the registers should be pushed and pop'd conditionally, but
121 * after bl the flags are certainly clobbered
122 */
123 stmdb sp!, {r0-r3, ip, lr}
124 bl\cond trace_hardirqs_on
125 ldmia sp!, {r0-r3, ip, lr}
126#endif
127 .endm
128
129 .macro asm_trace_hardirqs_on
130 asm_trace_hardirqs_on_cond al
131 .endm
132
133 .macro disable_irq
134 disable_irq_notrace
135 asm_trace_hardirqs_off
136 .endm
137
138 .macro enable_irq
139 asm_trace_hardirqs_on
140 enable_irq_notrace
141 .endm
142/*
143 * Save the current IRQ state and disable IRQs. Note that this macro
144 * assumes FIQs are enabled, and that the processor is in SVC mode.
145 */
146 .macro save_and_disable_irqs, oldcpsr
147#ifdef CONFIG_CPU_V7M
148 mrs \oldcpsr, primask
149#else
150 mrs \oldcpsr, cpsr
151#endif
152 disable_irq
153 .endm
154
155 .macro save_and_disable_irqs_notrace, oldcpsr
156 mrs \oldcpsr, cpsr
157 disable_irq_notrace
158 .endm
159
160/*
161 * Restore interrupt state previously stored in a register. We don't
162 * guarantee that this will preserve the flags.
163 */
164 .macro restore_irqs_notrace, oldcpsr
165#ifdef CONFIG_CPU_V7M
166 msr primask, \oldcpsr
167#else
168 msr cpsr_c, \oldcpsr
169#endif
170 .endm
171
172 .macro restore_irqs, oldcpsr
173 tst \oldcpsr, #PSR_I_BIT
174 asm_trace_hardirqs_on_cond eq
175 restore_irqs_notrace \oldcpsr
176 .endm
177
178/*
179 * Get current thread_info.
180 */
181 .macro get_thread_info, rd
182 ARM( mov \rd, sp, lsr #13 )
183 THUMB( mov \rd, sp )
184 THUMB( lsr \rd, \rd, #13 )
185 mov \rd, \rd, lsl #13
186 .endm
187
188/*
189 * Increment/decrement the preempt count.
190 */
191#ifdef CONFIG_PREEMPT_COUNT
192 .macro inc_preempt_count, ti, tmp
193 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
194 add \tmp, \tmp, #1 @ increment it
195 str \tmp, [\ti, #TI_PREEMPT]
196 .endm
197
198 .macro dec_preempt_count, ti, tmp
199 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
200 sub \tmp, \tmp, #1 @ decrement it
201 str \tmp, [\ti, #TI_PREEMPT]
202 .endm
203
204 .macro dec_preempt_count_ti, ti, tmp
205 get_thread_info \ti
206 dec_preempt_count \ti, \tmp
207 .endm
208#else
209 .macro inc_preempt_count, ti, tmp
210 .endm
211
212 .macro dec_preempt_count, ti, tmp
213 .endm
214
215 .macro dec_preempt_count_ti, ti, tmp
216 .endm
217#endif
218
219#define USER(x...) \
2209999: x; \
221 .pushsection __ex_table,"a"; \
222 .align 3; \
223 .long 9999b,9001f; \
224 .popsection
225
226#ifdef CONFIG_SMP
227#define ALT_SMP(instr...) \
2289998: instr
229/*
230 * Note: if you get assembler errors from ALT_UP() when building with
231 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
232 * ALT_SMP( W(instr) ... )
233 */
234#define ALT_UP(instr...) \
235 .pushsection ".alt.smp.init", "a" ;\
236 .long 9998b ;\
2379997: instr ;\
238 .if . - 9997b != 4 ;\
239 .error "ALT_UP() content must assemble to exactly 4 bytes";\
240 .endif ;\
241 .popsection
242#define ALT_UP_B(label) \
243 .equ up_b_offset, label - 9998b ;\
244 .pushsection ".alt.smp.init", "a" ;\
245 .long 9998b ;\
246 W(b) . + up_b_offset ;\
247 .popsection
248#else
249#define ALT_SMP(instr...)
250#define ALT_UP(instr...) instr
251#define ALT_UP_B(label) b label
252#endif
253
254/*
255 * Instruction barrier
256 */
257 .macro instr_sync
258#if __LINUX_ARM_ARCH__ >= 7
259 isb
260#elif __LINUX_ARM_ARCH__ == 6
261 mcr p15, 0, r0, c7, c5, 4
262#endif
263 .endm
264
265/*
266 * SMP data memory barrier
267 */
268 .macro smp_dmb mode
269#ifdef CONFIG_SMP
270#if __LINUX_ARM_ARCH__ >= 7
271 .ifeqs "\mode","arm"
272 ALT_SMP(dmb ish)
273 .else
274 ALT_SMP(W(dmb) ish)
275 .endif
276#elif __LINUX_ARM_ARCH__ == 6
277 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
278#else
279#error Incompatible SMP platform
280#endif
281 .ifeqs "\mode","arm"
282 ALT_UP(nop)
283 .else
284 ALT_UP(W(nop))
285 .endif
286#endif
287 .endm
288
289#if defined(CONFIG_CPU_V7M)
290 /*
291 * setmode is used to assert to be in svc mode during boot. For v7-M
292 * this is done in __v7m_setup, so setmode can be empty here.
293 */
294 .macro setmode, mode, reg
295 .endm
296#elif defined(CONFIG_THUMB2_KERNEL)
297 .macro setmode, mode, reg
298 mov \reg, #\mode
299 msr cpsr_c, \reg
300 .endm
301#else
302 .macro setmode, mode, reg
303 msr cpsr_c, #\mode
304 .endm
305#endif
306
307/*
308 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
309 * a scratch register for the macro to overwrite.
310 *
311 * This macro is intended for forcing the CPU into SVC mode at boot time.
312 * you cannot return to the original mode.
313 */
314.macro safe_svcmode_maskall reg:req
315#if __LINUX_ARM_ARCH__ >= 6
316 mrs \reg , cpsr
317 eor \reg, \reg, #HYP_MODE
318 tst \reg, #MODE_MASK
319 bic \reg , \reg , #MODE_MASK
320 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
321THUMB( orr \reg , \reg , #PSR_T_BIT )
322 bne 1f
323 orr \reg, \reg, #PSR_A_BIT
324 adr lr, BSYM(2f)
325 msr spsr_cxsf, \reg
326 __MSR_ELR_HYP(14)
327 __ERET
3281: msr cpsr_c, \reg
3292:
330#else
331/*
332 * workaround for possibly broken pre-v6 hardware
333 * (akita, Sharp Zaurus C-1000, PXA270-based)
334 */
335 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
336#endif
337.endm
338
339/*
340 * STRT/LDRT access macros with ARM and Thumb-2 variants
341 */
342#ifdef CONFIG_THUMB2_KERNEL
343
344 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3459999:
346 .if \inc == 1
347 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
348 .elseif \inc == 4
349 \instr\cond\()\t\().w \reg, [\ptr, #\off]
350 .else
351 .error "Unsupported inc macro argument"
352 .endif
353
354 .pushsection __ex_table,"a"
355 .align 3
356 .long 9999b, \abort
357 .popsection
358 .endm
359
360 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
361 @ explicit IT instruction needed because of the label
362 @ introduced by the USER macro
363 .ifnc \cond,al
364 .if \rept == 1
365 itt \cond
366 .elseif \rept == 2
367 ittt \cond
368 .else
369 .error "Unsupported rept macro argument"
370 .endif
371 .endif
372
373 @ Slightly optimised to avoid incrementing the pointer twice
374 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
375 .if \rept == 2
376 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
377 .endif
378
379 add\cond \ptr, #\rept * \inc
380 .endm
381
382#else /* !CONFIG_THUMB2_KERNEL */
383
384 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
385 .rept \rept
3869999:
387 .if \inc == 1
388 \instr\cond\()b\()\t \reg, [\ptr], #\inc
389 .elseif \inc == 4
390 \instr\cond\()\t \reg, [\ptr], #\inc
391 .else
392 .error "Unsupported inc macro argument"
393 .endif
394
395 .pushsection __ex_table,"a"
396 .align 3
397 .long 9999b, \abort
398 .popsection
399 .endr
400 .endm
401
402#endif /* CONFIG_THUMB2_KERNEL */
403
404 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
405 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
406 .endm
407
408 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
409 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
410 .endm
411
412/* Utility macro for declaring string literals */
413 .macro string name:req, string
414 .type \name , #object
415\name:
416 .asciz "\string"
417 .size \name , . - \name
418 .endm
419
420 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
421#ifndef CONFIG_CPU_USE_DOMAINS
422 adds \tmp, \addr, #\size - 1
423 sbcccs \tmp, \tmp, \limit
424 bcs \bad
425#endif
426 .endm
427
428#endif /* __ASM_ASSEMBLER_H__ */