Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/assembler.h
4 *
5 * Copyright (C) 1996-2000 Russell King
6 *
7 * This file contains arm architecture specific defines
8 * for the different processors.
9 *
10 * Do not include any C declarations in this file - it is included by
11 * assembler source.
12 */
13#ifndef __ASM_ASSEMBLER_H__
14#define __ASM_ASSEMBLER_H__
15
16#ifndef __ASSEMBLY__
17#error "Only include this from assembly code"
18#endif
19
20#include <asm/ptrace.h>
21#include <asm/domain.h>
22#include <asm/opcodes-virt.h>
23#include <asm/asm-offsets.h>
24#include <asm/page.h>
25#include <asm/thread_info.h>
26
27#define IOMEM(x) (x)
28
29/*
30 * Endian independent macros for shifting bytes within registers.
31 */
32#ifndef __ARMEB__
33#define lspull lsr
34#define lspush lsl
35#define get_byte_0 lsl #0
36#define get_byte_1 lsr #8
37#define get_byte_2 lsr #16
38#define get_byte_3 lsr #24
39#define put_byte_0 lsl #0
40#define put_byte_1 lsl #8
41#define put_byte_2 lsl #16
42#define put_byte_3 lsl #24
43#else
44#define lspull lsl
45#define lspush lsr
46#define get_byte_0 lsr #24
47#define get_byte_1 lsr #16
48#define get_byte_2 lsr #8
49#define get_byte_3 lsl #0
50#define put_byte_0 lsl #24
51#define put_byte_1 lsl #16
52#define put_byte_2 lsl #8
53#define put_byte_3 lsl #0
54#endif
55
56/* Select code for any configuration running in BE8 mode */
57#ifdef CONFIG_CPU_ENDIAN_BE8
58#define ARM_BE8(code...) code
59#else
60#define ARM_BE8(code...)
61#endif
62
63/*
64 * Data preload for architectures that support it
65 */
66#if __LINUX_ARM_ARCH__ >= 5
67#define PLD(code...) code
68#else
69#define PLD(code...)
70#endif
71
72/*
73 * This can be used to enable code to cacheline align the destination
74 * pointer when bulk writing to memory. Experiments on StrongARM and
75 * XScale didn't show this a worthwhile thing to do when the cache is not
76 * set to write-allocate (this would need further testing on XScale when WA
77 * is used).
78 *
79 * On Feroceon there is much to gain however, regardless of cache mode.
80 */
81#ifdef CONFIG_CPU_FEROCEON
82#define CALGN(code...) code
83#else
84#define CALGN(code...)
85#endif
86
87#define IMM12_MASK 0xfff
88
89/*
90 * Enable and disable interrupts
91 */
92#if __LINUX_ARM_ARCH__ >= 6
93 .macro disable_irq_notrace
94 cpsid i
95 .endm
96
97 .macro enable_irq_notrace
98 cpsie i
99 .endm
100#else
101 .macro disable_irq_notrace
102 msr cpsr_c, #PSR_I_BIT | SVC_MODE
103 .endm
104
105 .macro enable_irq_notrace
106 msr cpsr_c, #SVC_MODE
107 .endm
108#endif
109
110 .macro asm_trace_hardirqs_off, save=1
111#if defined(CONFIG_TRACE_IRQFLAGS)
112 .if \save
113 stmdb sp!, {r0-r3, ip, lr}
114 .endif
115 bl trace_hardirqs_off
116 .if \save
117 ldmia sp!, {r0-r3, ip, lr}
118 .endif
119#endif
120 .endm
121
122 .macro asm_trace_hardirqs_on, cond=al, save=1
123#if defined(CONFIG_TRACE_IRQFLAGS)
124 /*
125 * actually the registers should be pushed and pop'd conditionally, but
126 * after bl the flags are certainly clobbered
127 */
128 .if \save
129 stmdb sp!, {r0-r3, ip, lr}
130 .endif
131 bl\cond trace_hardirqs_on
132 .if \save
133 ldmia sp!, {r0-r3, ip, lr}
134 .endif
135#endif
136 .endm
137
138 .macro disable_irq, save=1
139 disable_irq_notrace
140 asm_trace_hardirqs_off \save
141 .endm
142
143 .macro enable_irq
144 asm_trace_hardirqs_on
145 enable_irq_notrace
146 .endm
147/*
148 * Save the current IRQ state and disable IRQs. Note that this macro
149 * assumes FIQs are enabled, and that the processor is in SVC mode.
150 */
151 .macro save_and_disable_irqs, oldcpsr
152#ifdef CONFIG_CPU_V7M
153 mrs \oldcpsr, primask
154#else
155 mrs \oldcpsr, cpsr
156#endif
157 disable_irq
158 .endm
159
160 .macro save_and_disable_irqs_notrace, oldcpsr
161#ifdef CONFIG_CPU_V7M
162 mrs \oldcpsr, primask
163#else
164 mrs \oldcpsr, cpsr
165#endif
166 disable_irq_notrace
167 .endm
168
169/*
170 * Restore interrupt state previously stored in a register. We don't
171 * guarantee that this will preserve the flags.
172 */
173 .macro restore_irqs_notrace, oldcpsr
174#ifdef CONFIG_CPU_V7M
175 msr primask, \oldcpsr
176#else
177 msr cpsr_c, \oldcpsr
178#endif
179 .endm
180
181 .macro restore_irqs, oldcpsr
182 tst \oldcpsr, #PSR_I_BIT
183 asm_trace_hardirqs_on cond=eq
184 restore_irqs_notrace \oldcpsr
185 .endm
186
187/*
188 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
189 * reference local symbols in the same assembly file which are to be
190 * resolved by the assembler. Other usage is undefined.
191 */
192 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
193 .macro badr\c, rd, sym
194#ifdef CONFIG_THUMB2_KERNEL
195 adr\c \rd, \sym + 1
196#else
197 adr\c \rd, \sym
198#endif
199 .endm
200 .endr
201
202/*
203 * Get current thread_info.
204 */
205 .macro get_thread_info, rd
206 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
207 THUMB( mov \rd, sp )
208 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
209 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
210 .endm
211
212/*
213 * Increment/decrement the preempt count.
214 */
215#ifdef CONFIG_PREEMPT_COUNT
216 .macro inc_preempt_count, ti, tmp
217 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
218 add \tmp, \tmp, #1 @ increment it
219 str \tmp, [\ti, #TI_PREEMPT]
220 .endm
221
222 .macro dec_preempt_count, ti, tmp
223 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
224 sub \tmp, \tmp, #1 @ decrement it
225 str \tmp, [\ti, #TI_PREEMPT]
226 .endm
227
228 .macro dec_preempt_count_ti, ti, tmp
229 get_thread_info \ti
230 dec_preempt_count \ti, \tmp
231 .endm
232#else
233 .macro inc_preempt_count, ti, tmp
234 .endm
235
236 .macro dec_preempt_count, ti, tmp
237 .endm
238
239 .macro dec_preempt_count_ti, ti, tmp
240 .endm
241#endif
242
243#define USERL(l, x...) \
2449999: x; \
245 .pushsection __ex_table,"a"; \
246 .align 3; \
247 .long 9999b,l; \
248 .popsection
249
250#define USER(x...) USERL(9001f, x)
251
252#ifdef CONFIG_SMP
253#define ALT_SMP(instr...) \
2549998: instr
255/*
256 * Note: if you get assembler errors from ALT_UP() when building with
257 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
258 * ALT_SMP( W(instr) ... )
259 */
260#define ALT_UP(instr...) \
261 .pushsection ".alt.smp.init", "a" ;\
262 .long 9998b ;\
2639997: instr ;\
264 .if . - 9997b == 2 ;\
265 nop ;\
266 .endif ;\
267 .if . - 9997b != 4 ;\
268 .error "ALT_UP() content must assemble to exactly 4 bytes";\
269 .endif ;\
270 .popsection
271#define ALT_UP_B(label) \
272 .equ up_b_offset, label - 9998b ;\
273 .pushsection ".alt.smp.init", "a" ;\
274 .long 9998b ;\
275 W(b) . + up_b_offset ;\
276 .popsection
277#else
278#define ALT_SMP(instr...)
279#define ALT_UP(instr...) instr
280#define ALT_UP_B(label) b label
281#endif
282
283/*
284 * Instruction barrier
285 */
286 .macro instr_sync
287#if __LINUX_ARM_ARCH__ >= 7
288 isb
289#elif __LINUX_ARM_ARCH__ == 6
290 mcr p15, 0, r0, c7, c5, 4
291#endif
292 .endm
293
294/*
295 * SMP data memory barrier
296 */
297 .macro smp_dmb mode
298#ifdef CONFIG_SMP
299#if __LINUX_ARM_ARCH__ >= 7
300 .ifeqs "\mode","arm"
301 ALT_SMP(dmb ish)
302 .else
303 ALT_SMP(W(dmb) ish)
304 .endif
305#elif __LINUX_ARM_ARCH__ == 6
306 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
307#else
308#error Incompatible SMP platform
309#endif
310 .ifeqs "\mode","arm"
311 ALT_UP(nop)
312 .else
313 ALT_UP(W(nop))
314 .endif
315#endif
316 .endm
317
318#if defined(CONFIG_CPU_V7M)
319 /*
320 * setmode is used to assert to be in svc mode during boot. For v7-M
321 * this is done in __v7m_setup, so setmode can be empty here.
322 */
323 .macro setmode, mode, reg
324 .endm
325#elif defined(CONFIG_THUMB2_KERNEL)
326 .macro setmode, mode, reg
327 mov \reg, #\mode
328 msr cpsr_c, \reg
329 .endm
330#else
331 .macro setmode, mode, reg
332 msr cpsr_c, #\mode
333 .endm
334#endif
335
336/*
337 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
338 * a scratch register for the macro to overwrite.
339 *
340 * This macro is intended for forcing the CPU into SVC mode at boot time.
341 * you cannot return to the original mode.
342 */
343.macro safe_svcmode_maskall reg:req
344#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
345 mrs \reg , cpsr
346 eor \reg, \reg, #HYP_MODE
347 tst \reg, #MODE_MASK
348 bic \reg , \reg , #MODE_MASK
349 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
350THUMB( orr \reg , \reg , #PSR_T_BIT )
351 bne 1f
352 orr \reg, \reg, #PSR_A_BIT
353 badr lr, 2f
354 msr spsr_cxsf, \reg
355 __MSR_ELR_HYP(14)
356 __ERET
3571: msr cpsr_c, \reg
3582:
359#else
360/*
361 * workaround for possibly broken pre-v6 hardware
362 * (akita, Sharp Zaurus C-1000, PXA270-based)
363 */
364 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
365#endif
366.endm
367
368/*
369 * STRT/LDRT access macros with ARM and Thumb-2 variants
370 */
371#ifdef CONFIG_THUMB2_KERNEL
372
373 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3749999:
375 .if \inc == 1
376 \instr\()b\t\cond\().w \reg, [\ptr, #\off]
377 .elseif \inc == 4
378 \instr\t\cond\().w \reg, [\ptr, #\off]
379 .else
380 .error "Unsupported inc macro argument"
381 .endif
382
383 .pushsection __ex_table,"a"
384 .align 3
385 .long 9999b, \abort
386 .popsection
387 .endm
388
389 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
390 @ explicit IT instruction needed because of the label
391 @ introduced by the USER macro
392 .ifnc \cond,al
393 .if \rept == 1
394 itt \cond
395 .elseif \rept == 2
396 ittt \cond
397 .else
398 .error "Unsupported rept macro argument"
399 .endif
400 .endif
401
402 @ Slightly optimised to avoid incrementing the pointer twice
403 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
404 .if \rept == 2
405 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
406 .endif
407
408 add\cond \ptr, #\rept * \inc
409 .endm
410
411#else /* !CONFIG_THUMB2_KERNEL */
412
413 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
414 .rept \rept
4159999:
416 .if \inc == 1
417 \instr\()b\t\cond \reg, [\ptr], #\inc
418 .elseif \inc == 4
419 \instr\t\cond \reg, [\ptr], #\inc
420 .else
421 .error "Unsupported inc macro argument"
422 .endif
423
424 .pushsection __ex_table,"a"
425 .align 3
426 .long 9999b, \abort
427 .popsection
428 .endr
429 .endm
430
431#endif /* CONFIG_THUMB2_KERNEL */
432
433 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
434 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
435 .endm
436
437 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
438 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
439 .endm
440
441/* Utility macro for declaring string literals */
442 .macro string name:req, string
443 .type \name , #object
444\name:
445 .asciz "\string"
446 .size \name , . - \name
447 .endm
448
449 .macro csdb
450#ifdef CONFIG_THUMB2_KERNEL
451 .inst.w 0xf3af8014
452#else
453 .inst 0xe320f014
454#endif
455 .endm
456
457 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
458#ifndef CONFIG_CPU_USE_DOMAINS
459 adds \tmp, \addr, #\size - 1
460 sbcscc \tmp, \tmp, \limit
461 bcs \bad
462#ifdef CONFIG_CPU_SPECTRE
463 movcs \addr, #0
464 csdb
465#endif
466#endif
467 .endm
468
469 .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
470#ifdef CONFIG_CPU_SPECTRE
471 sub \tmp, \limit, #1
472 subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
473 addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
474 subshs \tmp, \tmp, \size @ tmp = limit - (addr + size) }
475 movlo \addr, #0 @ if (tmp < 0) addr = NULL
476 csdb
477#endif
478 .endm
479
480 .macro uaccess_disable, tmp, isb=1
481#ifdef CONFIG_CPU_SW_DOMAIN_PAN
482 /*
483 * Whenever we re-enter userspace, the domains should always be
484 * set appropriately.
485 */
486 mov \tmp, #DACR_UACCESS_DISABLE
487 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
488 .if \isb
489 instr_sync
490 .endif
491#endif
492 .endm
493
494 .macro uaccess_enable, tmp, isb=1
495#ifdef CONFIG_CPU_SW_DOMAIN_PAN
496 /*
497 * Whenever we re-enter userspace, the domains should always be
498 * set appropriately.
499 */
500 mov \tmp, #DACR_UACCESS_ENABLE
501 mcr p15, 0, \tmp, c3, c0, 0
502 .if \isb
503 instr_sync
504 .endif
505#endif
506 .endm
507
508 .macro uaccess_save, tmp
509#ifdef CONFIG_CPU_SW_DOMAIN_PAN
510 mrc p15, 0, \tmp, c3, c0, 0
511 str \tmp, [sp, #SVC_DACR]
512#endif
513 .endm
514
515 .macro uaccess_restore
516#ifdef CONFIG_CPU_SW_DOMAIN_PAN
517 ldr r0, [sp, #SVC_DACR]
518 mcr p15, 0, r0, c3, c0, 0
519#endif
520 .endm
521
522 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
523 .macro ret\c, reg
524#if __LINUX_ARM_ARCH__ < 6
525 mov\c pc, \reg
526#else
527 .ifeqs "\reg", "lr"
528 bx\c \reg
529 .else
530 mov\c pc, \reg
531 .endif
532#endif
533 .endm
534 .endr
535
536 .macro ret.w, reg
537 ret \reg
538#ifdef CONFIG_THUMB2_KERNEL
539 nop
540#endif
541 .endm
542
543 .macro bug, msg, line
544#ifdef CONFIG_THUMB2_KERNEL
5451: .inst 0xde02
546#else
5471: .inst 0xe7f001f2
548#endif
549#ifdef CONFIG_DEBUG_BUGVERBOSE
550 .pushsection .rodata.str, "aMS", %progbits, 1
5512: .asciz "\msg"
552 .popsection
553 .pushsection __bug_table, "aw"
554 .align 2
555 .word 1b, 2b
556 .hword \line
557 .popsection
558#endif
559 .endm
560
561#ifdef CONFIG_KPROBES
562#define _ASM_NOKPROBE(entry) \
563 .pushsection "_kprobe_blacklist", "aw" ; \
564 .balign 4 ; \
565 .long entry; \
566 .popsection
567#else
568#define _ASM_NOKPROBE(entry)
569#endif
570
571#endif /* __ASM_ASSEMBLER_H__ */
1/*
2 * arch/arm/include/asm/assembler.h
3 *
4 * Copyright (C) 1996-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This file contains arm architecture specific defines
11 * for the different processors.
12 *
13 * Do not include any C declarations in this file - it is included by
14 * assembler source.
15 */
16#ifndef __ASM_ASSEMBLER_H__
17#define __ASM_ASSEMBLER_H__
18
19#ifndef __ASSEMBLY__
20#error "Only include this from assembly code"
21#endif
22
23#include <asm/ptrace.h>
24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
26#include <asm/asm-offsets.h>
27#include <asm/page.h>
28#include <asm/thread_info.h>
29
30#define IOMEM(x) (x)
31
32/*
33 * Endian independent macros for shifting bytes within registers.
34 */
35#ifndef __ARMEB__
36#define lspull lsr
37#define lspush lsl
38#define get_byte_0 lsl #0
39#define get_byte_1 lsr #8
40#define get_byte_2 lsr #16
41#define get_byte_3 lsr #24
42#define put_byte_0 lsl #0
43#define put_byte_1 lsl #8
44#define put_byte_2 lsl #16
45#define put_byte_3 lsl #24
46#else
47#define lspull lsl
48#define lspush lsr
49#define get_byte_0 lsr #24
50#define get_byte_1 lsr #16
51#define get_byte_2 lsr #8
52#define get_byte_3 lsl #0
53#define put_byte_0 lsl #24
54#define put_byte_1 lsl #16
55#define put_byte_2 lsl #8
56#define put_byte_3 lsl #0
57#endif
58
59/* Select code for any configuration running in BE8 mode */
60#ifdef CONFIG_CPU_ENDIAN_BE8
61#define ARM_BE8(code...) code
62#else
63#define ARM_BE8(code...)
64#endif
65
66/*
67 * Data preload for architectures that support it
68 */
69#if __LINUX_ARM_ARCH__ >= 5
70#define PLD(code...) code
71#else
72#define PLD(code...)
73#endif
74
75/*
76 * This can be used to enable code to cacheline align the destination
77 * pointer when bulk writing to memory. Experiments on StrongARM and
78 * XScale didn't show this a worthwhile thing to do when the cache is not
79 * set to write-allocate (this would need further testing on XScale when WA
80 * is used).
81 *
82 * On Feroceon there is much to gain however, regardless of cache mode.
83 */
84#ifdef CONFIG_CPU_FEROCEON
85#define CALGN(code...) code
86#else
87#define CALGN(code...)
88#endif
89
90#define IMM12_MASK 0xfff
91
92/*
93 * Enable and disable interrupts
94 */
95#if __LINUX_ARM_ARCH__ >= 6
96 .macro disable_irq_notrace
97 cpsid i
98 .endm
99
100 .macro enable_irq_notrace
101 cpsie i
102 .endm
103#else
104 .macro disable_irq_notrace
105 msr cpsr_c, #PSR_I_BIT | SVC_MODE
106 .endm
107
108 .macro enable_irq_notrace
109 msr cpsr_c, #SVC_MODE
110 .endm
111#endif
112
113 .macro asm_trace_hardirqs_off, save=1
114#if defined(CONFIG_TRACE_IRQFLAGS)
115 .if \save
116 stmdb sp!, {r0-r3, ip, lr}
117 .endif
118 bl trace_hardirqs_off
119 .if \save
120 ldmia sp!, {r0-r3, ip, lr}
121 .endif
122#endif
123 .endm
124
125 .macro asm_trace_hardirqs_on, cond=al, save=1
126#if defined(CONFIG_TRACE_IRQFLAGS)
127 /*
128 * actually the registers should be pushed and pop'd conditionally, but
129 * after bl the flags are certainly clobbered
130 */
131 .if \save
132 stmdb sp!, {r0-r3, ip, lr}
133 .endif
134 bl\cond trace_hardirqs_on
135 .if \save
136 ldmia sp!, {r0-r3, ip, lr}
137 .endif
138#endif
139 .endm
140
141 .macro disable_irq, save=1
142 disable_irq_notrace
143 asm_trace_hardirqs_off \save
144 .endm
145
146 .macro enable_irq
147 asm_trace_hardirqs_on
148 enable_irq_notrace
149 .endm
150/*
151 * Save the current IRQ state and disable IRQs. Note that this macro
152 * assumes FIQs are enabled, and that the processor is in SVC mode.
153 */
154 .macro save_and_disable_irqs, oldcpsr
155#ifdef CONFIG_CPU_V7M
156 mrs \oldcpsr, primask
157#else
158 mrs \oldcpsr, cpsr
159#endif
160 disable_irq
161 .endm
162
163 .macro save_and_disable_irqs_notrace, oldcpsr
164#ifdef CONFIG_CPU_V7M
165 mrs \oldcpsr, primask
166#else
167 mrs \oldcpsr, cpsr
168#endif
169 disable_irq_notrace
170 .endm
171
172/*
173 * Restore interrupt state previously stored in a register. We don't
174 * guarantee that this will preserve the flags.
175 */
176 .macro restore_irqs_notrace, oldcpsr
177#ifdef CONFIG_CPU_V7M
178 msr primask, \oldcpsr
179#else
180 msr cpsr_c, \oldcpsr
181#endif
182 .endm
183
184 .macro restore_irqs, oldcpsr
185 tst \oldcpsr, #PSR_I_BIT
186 asm_trace_hardirqs_on cond=eq
187 restore_irqs_notrace \oldcpsr
188 .endm
189
190/*
191 * Assembly version of "adr rd, BSYM(sym)". This should only be used to
192 * reference local symbols in the same assembly file which are to be
193 * resolved by the assembler. Other usage is undefined.
194 */
195 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
196 .macro badr\c, rd, sym
197#ifdef CONFIG_THUMB2_KERNEL
198 adr\c \rd, \sym + 1
199#else
200 adr\c \rd, \sym
201#endif
202 .endm
203 .endr
204
205/*
206 * Get current thread_info.
207 */
208 .macro get_thread_info, rd
209 ARM( mov \rd, sp, lsr #THREAD_SIZE_ORDER + PAGE_SHIFT )
210 THUMB( mov \rd, sp )
211 THUMB( lsr \rd, \rd, #THREAD_SIZE_ORDER + PAGE_SHIFT )
212 mov \rd, \rd, lsl #THREAD_SIZE_ORDER + PAGE_SHIFT
213 .endm
214
215/*
216 * Increment/decrement the preempt count.
217 */
218#ifdef CONFIG_PREEMPT_COUNT
219 .macro inc_preempt_count, ti, tmp
220 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
221 add \tmp, \tmp, #1 @ increment it
222 str \tmp, [\ti, #TI_PREEMPT]
223 .endm
224
225 .macro dec_preempt_count, ti, tmp
226 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
227 sub \tmp, \tmp, #1 @ decrement it
228 str \tmp, [\ti, #TI_PREEMPT]
229 .endm
230
231 .macro dec_preempt_count_ti, ti, tmp
232 get_thread_info \ti
233 dec_preempt_count \ti, \tmp
234 .endm
235#else
236 .macro inc_preempt_count, ti, tmp
237 .endm
238
239 .macro dec_preempt_count, ti, tmp
240 .endm
241
242 .macro dec_preempt_count_ti, ti, tmp
243 .endm
244#endif
245
246#define USER(x...) \
2479999: x; \
248 .pushsection __ex_table,"a"; \
249 .align 3; \
250 .long 9999b,9001f; \
251 .popsection
252
253#ifdef CONFIG_SMP
254#define ALT_SMP(instr...) \
2559998: instr
256/*
257 * Note: if you get assembler errors from ALT_UP() when building with
258 * CONFIG_THUMB2_KERNEL, you almost certainly need to use
259 * ALT_SMP( W(instr) ... )
260 */
261#define ALT_UP(instr...) \
262 .pushsection ".alt.smp.init", "a" ;\
263 .long 9998b ;\
2649997: instr ;\
265 .if . - 9997b == 2 ;\
266 nop ;\
267 .endif ;\
268 .if . - 9997b != 4 ;\
269 .error "ALT_UP() content must assemble to exactly 4 bytes";\
270 .endif ;\
271 .popsection
272#define ALT_UP_B(label) \
273 .equ up_b_offset, label - 9998b ;\
274 .pushsection ".alt.smp.init", "a" ;\
275 .long 9998b ;\
276 W(b) . + up_b_offset ;\
277 .popsection
278#else
279#define ALT_SMP(instr...)
280#define ALT_UP(instr...) instr
281#define ALT_UP_B(label) b label
282#endif
283
284/*
285 * Instruction barrier
286 */
287 .macro instr_sync
288#if __LINUX_ARM_ARCH__ >= 7
289 isb
290#elif __LINUX_ARM_ARCH__ == 6
291 mcr p15, 0, r0, c7, c5, 4
292#endif
293 .endm
294
295/*
296 * SMP data memory barrier
297 */
298 .macro smp_dmb mode
299#ifdef CONFIG_SMP
300#if __LINUX_ARM_ARCH__ >= 7
301 .ifeqs "\mode","arm"
302 ALT_SMP(dmb ish)
303 .else
304 ALT_SMP(W(dmb) ish)
305 .endif
306#elif __LINUX_ARM_ARCH__ == 6
307 ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
308#else
309#error Incompatible SMP platform
310#endif
311 .ifeqs "\mode","arm"
312 ALT_UP(nop)
313 .else
314 ALT_UP(W(nop))
315 .endif
316#endif
317 .endm
318
319#if defined(CONFIG_CPU_V7M)
320 /*
321 * setmode is used to assert to be in svc mode during boot. For v7-M
322 * this is done in __v7m_setup, so setmode can be empty here.
323 */
324 .macro setmode, mode, reg
325 .endm
326#elif defined(CONFIG_THUMB2_KERNEL)
327 .macro setmode, mode, reg
328 mov \reg, #\mode
329 msr cpsr_c, \reg
330 .endm
331#else
332 .macro setmode, mode, reg
333 msr cpsr_c, #\mode
334 .endm
335#endif
336
337/*
338 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
339 * a scratch register for the macro to overwrite.
340 *
341 * This macro is intended for forcing the CPU into SVC mode at boot time.
342 * you cannot return to the original mode.
343 */
344.macro safe_svcmode_maskall reg:req
345#if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
346 mrs \reg , cpsr
347 eor \reg, \reg, #HYP_MODE
348 tst \reg, #MODE_MASK
349 bic \reg , \reg , #MODE_MASK
350 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE
351THUMB( orr \reg , \reg , #PSR_T_BIT )
352 bne 1f
353 orr \reg, \reg, #PSR_A_BIT
354 badr lr, 2f
355 msr spsr_cxsf, \reg
356 __MSR_ELR_HYP(14)
357 __ERET
3581: msr cpsr_c, \reg
3592:
360#else
361/*
362 * workaround for possibly broken pre-v6 hardware
363 * (akita, Sharp Zaurus C-1000, PXA270-based)
364 */
365 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, \reg
366#endif
367.endm
368
369/*
370 * STRT/LDRT access macros with ARM and Thumb-2 variants
371 */
372#ifdef CONFIG_THUMB2_KERNEL
373
374 .macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
3759999:
376 .if \inc == 1
377 \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
378 .elseif \inc == 4
379 \instr\cond\()\t\().w \reg, [\ptr, #\off]
380 .else
381 .error "Unsupported inc macro argument"
382 .endif
383
384 .pushsection __ex_table,"a"
385 .align 3
386 .long 9999b, \abort
387 .popsection
388 .endm
389
390 .macro usracc, instr, reg, ptr, inc, cond, rept, abort
391 @ explicit IT instruction needed because of the label
392 @ introduced by the USER macro
393 .ifnc \cond,al
394 .if \rept == 1
395 itt \cond
396 .elseif \rept == 2
397 ittt \cond
398 .else
399 .error "Unsupported rept macro argument"
400 .endif
401 .endif
402
403 @ Slightly optimised to avoid incrementing the pointer twice
404 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
405 .if \rept == 2
406 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
407 .endif
408
409 add\cond \ptr, #\rept * \inc
410 .endm
411
412#else /* !CONFIG_THUMB2_KERNEL */
413
414 .macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
415 .rept \rept
4169999:
417 .if \inc == 1
418 \instr\cond\()b\()\t \reg, [\ptr], #\inc
419 .elseif \inc == 4
420 \instr\cond\()\t \reg, [\ptr], #\inc
421 .else
422 .error "Unsupported inc macro argument"
423 .endif
424
425 .pushsection __ex_table,"a"
426 .align 3
427 .long 9999b, \abort
428 .popsection
429 .endr
430 .endm
431
432#endif /* CONFIG_THUMB2_KERNEL */
433
434 .macro strusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
435 usracc str, \reg, \ptr, \inc, \cond, \rept, \abort
436 .endm
437
438 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
439 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
440 .endm
441
442/* Utility macro for declaring string literals */
443 .macro string name:req, string
444 .type \name , #object
445\name:
446 .asciz "\string"
447 .size \name , . - \name
448 .endm
449
450 .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
451#ifndef CONFIG_CPU_USE_DOMAINS
452 adds \tmp, \addr, #\size - 1
453 sbcccs \tmp, \tmp, \limit
454 bcs \bad
455#endif
456 .endm
457
458 .macro uaccess_disable, tmp, isb=1
459#ifdef CONFIG_CPU_SW_DOMAIN_PAN
460 /*
461 * Whenever we re-enter userspace, the domains should always be
462 * set appropriately.
463 */
464 mov \tmp, #DACR_UACCESS_DISABLE
465 mcr p15, 0, \tmp, c3, c0, 0 @ Set domain register
466 .if \isb
467 instr_sync
468 .endif
469#endif
470 .endm
471
472 .macro uaccess_enable, tmp, isb=1
473#ifdef CONFIG_CPU_SW_DOMAIN_PAN
474 /*
475 * Whenever we re-enter userspace, the domains should always be
476 * set appropriately.
477 */
478 mov \tmp, #DACR_UACCESS_ENABLE
479 mcr p15, 0, \tmp, c3, c0, 0
480 .if \isb
481 instr_sync
482 .endif
483#endif
484 .endm
485
486 .macro uaccess_save, tmp
487#ifdef CONFIG_CPU_SW_DOMAIN_PAN
488 mrc p15, 0, \tmp, c3, c0, 0
489 str \tmp, [sp, #SVC_DACR]
490#endif
491 .endm
492
493 .macro uaccess_restore
494#ifdef CONFIG_CPU_SW_DOMAIN_PAN
495 ldr r0, [sp, #SVC_DACR]
496 mcr p15, 0, r0, c3, c0, 0
497#endif
498 .endm
499
500 .irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
501 .macro ret\c, reg
502#if __LINUX_ARM_ARCH__ < 6
503 mov\c pc, \reg
504#else
505 .ifeqs "\reg", "lr"
506 bx\c \reg
507 .else
508 mov\c pc, \reg
509 .endif
510#endif
511 .endm
512 .endr
513
514 .macro ret.w, reg
515 ret \reg
516#ifdef CONFIG_THUMB2_KERNEL
517 nop
518#endif
519 .endm
520
521 .macro bug, msg, line
522#ifdef CONFIG_THUMB2_KERNEL
5231: .inst 0xde02
524#else
5251: .inst 0xe7f001f2
526#endif
527#ifdef CONFIG_DEBUG_BUGVERBOSE
528 .pushsection .rodata.str, "aMS", %progbits, 1
5292: .asciz "\msg"
530 .popsection
531 .pushsection __bug_table, "aw"
532 .align 2
533 .word 1b, 2b
534 .hword \line
535 .popsection
536#endif
537 .endm
538
539#ifdef CONFIG_KPROBES
540#define _ASM_NOKPROBE(entry) \
541 .pushsection "_kprobe_blacklist", "aw" ; \
542 .balign 4 ; \
543 .long entry; \
544 .popsection
545#else
546#define _ASM_NOKPROBE(entry)
547#endif
548
549#endif /* __ASM_ASSEMBLER_H__ */