Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  4 *
  5 * Copyright (C) 1996-2000 Russell King
  6 * Copyright (C) 2012 ARM Ltd.
  7 */
  8#ifndef __ASSEMBLY__
  9#error "Only include this from assembly code"
 10#endif
 11
 12#ifndef __ASM_ASSEMBLER_H
 13#define __ASM_ASSEMBLER_H
 14
 15#include <asm-generic/export.h>
 16
 
 
 
 17#include <asm/asm-offsets.h>
 18#include <asm/cpufeature.h>
 19#include <asm/cputype.h>
 20#include <asm/debug-monitors.h>
 21#include <asm/page.h>
 22#include <asm/pgtable-hwdef.h>
 23#include <asm/ptrace.h>
 24#include <asm/thread_info.h>
 25
 26	.macro save_and_disable_daif, flags
 27	mrs	\flags, daif
 28	msr	daifset, #0xf
 29	.endm
 
 
 
 30
 31	.macro disable_daif
 32	msr	daifset, #0xf
 33	.endm
 34
 35	.macro enable_daif
 36	msr	daifclr, #0xf
 37	.endm
 38
 39	.macro	restore_daif, flags:req
 40	msr	daif, \flags
 41	.endm
 42
 43	/* Only on aarch64 pstate, PSR_D_BIT is different for aarch32 */
 44	.macro	inherit_daif, pstate:req, tmp:req
 45	and	\tmp, \pstate, #(PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
 46	msr	daif, \tmp
 47	.endm
 48
 49	/* IRQ is the lowest priority flag, unconditionally unmask the rest. */
 50	.macro enable_da_f
 51	msr	daifclr, #(8 | 4 | 1)
 52	.endm
 53
 54/*
 55 * Save/restore interrupts.
 56 */
 57	.macro	save_and_disable_irq, flags
 58	mrs	\flags, daif
 59	msr	daifset, #2
 60	.endm
 61
 62	.macro	restore_irq, flags
 63	msr	daif, \flags
 64	.endm
 65
 66	.macro	enable_dbg
 67	msr	daifclr, #8
 68	.endm
 69
 70	.macro	disable_step_tsk, flgs, tmp
 71	tbz	\flgs, #TIF_SINGLESTEP, 9990f
 72	mrs	\tmp, mdscr_el1
 73	bic	\tmp, \tmp, #DBG_MDSCR_SS
 74	msr	mdscr_el1, \tmp
 75	isb	// Synchronise with enable_dbg
 769990:
 77	.endm
 78
 79	/* call with daif masked */
 80	.macro	enable_step_tsk, flgs, tmp
 81	tbz	\flgs, #TIF_SINGLESTEP, 9990f
 82	mrs	\tmp, mdscr_el1
 83	orr	\tmp, \tmp, #DBG_MDSCR_SS
 84	msr	mdscr_el1, \tmp
 859990:
 86	.endm
 87
 88/*
 89 * SMP data memory barrier
 90 */
 91	.macro	smp_dmb, opt
 92	dmb	\opt
 93	.endm
 94
 95/*
 96 * RAS Error Synchronization barrier
 97 */
 98	.macro  esb
 99#ifdef CONFIG_ARM64_RAS_EXTN
100	hint    #16
101#else
102	nop
103#endif
104	.endm
105
106/*
107 * Value prediction barrier
108 */
109	.macro	csdb
110	hint	#20
111	.endm
112
113/*
 
 
 
 
 
 
 
114 * Speculation barrier
115 */
116	.macro	sb
117alternative_if_not ARM64_HAS_SB
118	dsb	nsh
119	isb
120alternative_else
121	SB_BARRIER_INSN
122	nop
123alternative_endif
124	.endm
125
126/*
127 * NOP sequence
128 */
129	.macro	nops, num
130	.rept	\num
131	nop
132	.endr
133	.endm
134
135/*
136 * Emit an entry into the exception table
137 */
138	.macro		_asm_extable, from, to
139	.pushsection	__ex_table, "a"
140	.align		3
141	.long		(\from - .), (\to - .)
142	.popsection
143	.endm
144
145#define USER(l, x...)				\
1469999:	x;					\
147	_asm_extable	9999b, l
148
149/*
150 * Register aliases.
151 */
152lr	.req	x30		// link register
153
154/*
155 * Vector entry
156 */
157	 .macro	ventry	label
158	.align	7
159	b	\label
160	.endm
161
162/*
163 * Select code when configured for BE.
164 */
165#ifdef CONFIG_CPU_BIG_ENDIAN
166#define CPU_BE(code...) code
167#else
168#define CPU_BE(code...)
169#endif
170
171/*
172 * Select code when configured for LE.
173 */
174#ifdef CONFIG_CPU_BIG_ENDIAN
175#define CPU_LE(code...)
176#else
177#define CPU_LE(code...) code
178#endif
179
180/*
181 * Define a macro that constructs a 64-bit value by concatenating two
182 * 32-bit registers. Note that on big endian systems the order of the
183 * registers is swapped.
184 */
185#ifndef CONFIG_CPU_BIG_ENDIAN
186	.macro	regs_to_64, rd, lbits, hbits
187#else
188	.macro	regs_to_64, rd, hbits, lbits
189#endif
190	orr	\rd, \lbits, \hbits, lsl #32
191	.endm
192
193/*
194 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
195 * <symbol> is within the range +/- 4 GB of the PC.
196 */
197	/*
198	 * @dst: destination register (64 bit wide)
199	 * @sym: name of the symbol
200	 */
201	.macro	adr_l, dst, sym
202	adrp	\dst, \sym
203	add	\dst, \dst, :lo12:\sym
204	.endm
205
206	/*
207	 * @dst: destination register (32 or 64 bit wide)
208	 * @sym: name of the symbol
209	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
210	 *       32-bit wide register, in which case it cannot be used to hold
211	 *       the address
212	 */
213	.macro	ldr_l, dst, sym, tmp=
214	.ifb	\tmp
215	adrp	\dst, \sym
216	ldr	\dst, [\dst, :lo12:\sym]
217	.else
218	adrp	\tmp, \sym
219	ldr	\dst, [\tmp, :lo12:\sym]
220	.endif
221	.endm
222
223	/*
224	 * @src: source register (32 or 64 bit wide)
225	 * @sym: name of the symbol
226	 * @tmp: mandatory 64-bit scratch register to calculate the address
227	 *       while <src> needs to be preserved.
228	 */
229	.macro	str_l, src, sym, tmp
230	adrp	\tmp, \sym
231	str	\src, [\tmp, :lo12:\sym]
232	.endm
233
234	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
236	 * @sym: The name of the per-cpu variable
237	 * @tmp: scratch register
238	 */
239	.macro adr_this_cpu, dst, sym, tmp
240	adrp	\tmp, \sym
241	add	\dst, \tmp, #:lo12:\sym
242alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
243	mrs	\tmp, tpidr_el1
244alternative_else
245	mrs	\tmp, tpidr_el2
246alternative_endif
247	add	\dst, \dst, \tmp
248	.endm
249
250	/*
251	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
252	 * @sym: The name of the per-cpu variable
253	 * @tmp: scratch register
254	 */
255	.macro ldr_this_cpu dst, sym, tmp
256	adr_l	\dst, \sym
257alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
258	mrs	\tmp, tpidr_el1
259alternative_else
260	mrs	\tmp, tpidr_el2
261alternative_endif
262	ldr	\dst, [\dst, \tmp]
263	.endm
264
265/*
266 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
267 */
268	.macro	vma_vm_mm, rd, rn
269	ldr	\rd, [\rn, #VMA_VM_MM]
270	.endm
271
272/*
273 * mmid - get context id from mm pointer (mm->context.id)
274 */
275	.macro	mmid, rd, rn
276	ldr	\rd, [\rn, #MM_CONTEXT_ID]
277	.endm
278/*
279 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
280 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
281 */
282	.macro	read_ctr, reg
 
283alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
284	mrs	\reg, ctr_el0			// read CTR
285	nop
286alternative_else
287	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
288alternative_endif
 
 
 
 
 
 
 
 
 
 
 
289	.endm
290
291
292/*
293 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
294 * from the CTR register.
295 */
296	.macro	raw_dcache_line_size, reg, tmp
297	mrs	\tmp, ctr_el0			// read CTR
298	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
299	mov	\reg, #4			// bytes per word
300	lsl	\reg, \reg, \tmp		// actual cache line size
301	.endm
302
303/*
304 * dcache_line_size - get the safe D-cache line size across all CPUs
305 */
306	.macro	dcache_line_size, reg, tmp
307	read_ctr	\tmp
308	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
309	mov		\reg, #4		// bytes per word
310	lsl		\reg, \reg, \tmp	// actual cache line size
311	.endm
312
313/*
314 * raw_icache_line_size - get the minimum I-cache line size on this CPU
315 * from the CTR register.
316 */
317	.macro	raw_icache_line_size, reg, tmp
318	mrs	\tmp, ctr_el0			// read CTR
319	and	\tmp, \tmp, #0xf		// cache line size encoding
320	mov	\reg, #4			// bytes per word
321	lsl	\reg, \reg, \tmp		// actual cache line size
322	.endm
323
324/*
325 * icache_line_size - get the safe I-cache line size across all CPUs
326 */
327	.macro	icache_line_size, reg, tmp
328	read_ctr	\tmp
329	and		\tmp, \tmp, #0xf	// cache line size encoding
330	mov		\reg, #4		// bytes per word
331	lsl		\reg, \reg, \tmp	// actual cache line size
332	.endm
333
334/*
335 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
336 */
337	.macro	tcr_set_t0sz, valreg, t0sz
338	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
339	.endm
340
341/*
342 * tcr_set_t1sz - update TCR.T1SZ
343 */
344	.macro	tcr_set_t1sz, valreg, t1sz
345	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
346	.endm
347
348/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
350 * ID_AA64MMFR0_EL1.PARange value
351 *
352 *	tcr:		register with the TCR_ELx value to be updated
353 *	pos:		IPS or PS bitfield position
354 *	tmp{0,1}:	temporary registers
355 */
356	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
357	mrs	\tmp0, ID_AA64MMFR0_EL1
358	// Narrow PARange to fit the PS field in TCR_ELx
359	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
360	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
361	cmp	\tmp0, \tmp1
362	csel	\tmp0, \tmp1, \tmp0, hi
363	bfi	\tcr, \tmp0, \pos, #3
364	.endm
365
366/*
367 * Macro to perform a data cache maintenance for the interval
368 * [kaddr, kaddr + size)
369 *
370 * 	op:		operation passed to dc instruction
371 * 	domain:		domain used in dsb instruciton
372 * 	kaddr:		starting virtual address of the region
373 * 	size:		size of the region
374 * 	Corrupts:	kaddr, size, tmp1, tmp2
375 */
376	.macro __dcache_op_workaround_clean_cache, op, kaddr
377alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
378	dc	\op, \kaddr
379alternative_else
380	dc	civac, \kaddr
381alternative_endif
382	.endm
383
384	.macro dcache_by_line_op op, domain, kaddr, size, tmp1, tmp2
385	dcache_line_size \tmp1, \tmp2
386	add	\size, \kaddr, \size
387	sub	\tmp2, \tmp1, #1
388	bic	\kaddr, \kaddr, \tmp2
3899998:
 
 
 
 
 
 
 
 
 
 
390	.ifc	\op, cvau
391	__dcache_op_workaround_clean_cache \op, \kaddr
392	.else
393	.ifc	\op, cvac
394	__dcache_op_workaround_clean_cache \op, \kaddr
395	.else
396	.ifc	\op, cvap
397	sys	3, c7, c12, 1, \kaddr	// dc cvap
398	.else
399	.ifc	\op, cvadp
400	sys	3, c7, c13, 1, \kaddr	// dc cvadp
401	.else
402	dc	\op, \kaddr
403	.endif
404	.endif
405	.endif
406	.endif
407	add	\kaddr, \kaddr, \tmp1
408	cmp	\kaddr, \size
409	b.lo	9998b
410	dsb	\domain
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
411	.endm
412
413/*
414 * Macro to perform an instruction cache maintenance for the interval
415 * [start, end)
416 *
417 * 	start, end:	virtual addresses describing the region
418 *	label:		A label to branch to on user fault.
419 * 	Corrupts:	tmp1, tmp2
420 */
421	.macro invalidate_icache_by_line start, end, tmp1, tmp2, label
422	icache_line_size \tmp1, \tmp2
423	sub	\tmp2, \tmp1, #1
424	bic	\tmp2, \start, \tmp2
4259997:
426USER(\label, ic	ivau, \tmp2)			// invalidate I line PoU
427	add	\tmp2, \tmp2, \tmp1
428	cmp	\tmp2, \end
429	b.lo	9997b
430	dsb	ish
431	isb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
432	.endm
433
434/*
435 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
436 */
437	.macro	reset_pmuserenr_el0, tmpreg
438	mrs	\tmpreg, id_aa64dfr0_el1
439	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
440	cmp	\tmpreg, #1			// Skip if no PMU present
441	b.lt	9000f
442	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
4439000:
444	.endm
445
446/*
 
 
 
 
 
 
 
 
 
 
447 * copy_page - copy src to dest using temp registers t1-t8
448 */
449	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
4509998:	ldp	\t1, \t2, [\src]
451	ldp	\t3, \t4, [\src, #16]
452	ldp	\t5, \t6, [\src, #32]
453	ldp	\t7, \t8, [\src, #48]
454	add	\src, \src, #64
455	stnp	\t1, \t2, [\dest]
456	stnp	\t3, \t4, [\dest, #16]
457	stnp	\t5, \t6, [\dest, #32]
458	stnp	\t7, \t8, [\dest, #48]
459	add	\dest, \dest, #64
460	tst	\src, #(PAGE_SIZE - 1)
461	b.ne	9998b
462	.endm
463
464/*
465 * Annotate a function as position independent, i.e., safe to be called before
466 * the kernel virtual mapping is activated.
467 */
468#define ENDPIPROC(x)			\
469	.globl	__pi_##x;		\
470	.type 	__pi_##x, %function;	\
471	.set	__pi_##x, x;		\
472	.size	__pi_##x, . - x;	\
473	ENDPROC(x)
474
475/*
476 * Annotate a function as being unsuitable for kprobes.
477 */
478#ifdef CONFIG_KPROBES
479#define NOKPROBE(x)				\
480	.pushsection "_kprobe_blacklist", "aw";	\
481	.quad	x;				\
482	.popsection;
483#else
484#define NOKPROBE(x)
485#endif
486
487#ifdef CONFIG_KASAN
488#define EXPORT_SYMBOL_NOKASAN(name)
489#else
490#define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
491#endif
492
493	/*
494	 * Emit a 64-bit absolute little endian symbol reference in a way that
495	 * ensures that it will be resolved at build time, even when building a
496	 * PIE binary. This requires cooperation from the linker script, which
497	 * must emit the lo32/hi32 halves individually.
498	 */
499	.macro	le64sym, sym
500	.long	\sym\()_lo32
501	.long	\sym\()_hi32
502	.endm
503
504	/*
505	 * mov_q - move an immediate constant into a 64-bit register using
506	 *         between 2 and 4 movz/movk instructions (depending on the
507	 *         magnitude and sign of the operand)
508	 */
509	.macro	mov_q, reg, val
510	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
511	movz	\reg, :abs_g1_s:\val
512	.else
513	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
514	movz	\reg, :abs_g2_s:\val
515	.else
516	movz	\reg, :abs_g3:\val
517	movk	\reg, :abs_g2_nc:\val
518	.endif
519	movk	\reg, :abs_g1_nc:\val
520	.endif
521	movk	\reg, :abs_g0_nc:\val
522	.endm
523
524/*
525 * Return the current task_struct.
526 */
527	.macro	get_current_task, rd
528	mrs	\rd, sp_el0
529	.endm
530
531/*
532 * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
533 * orr is used as it can cover the immediate value (and is idempotent).
534 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
535 * 	ttbr: Value of ttbr to set, modified.
536 */
537	.macro	offset_ttbr1, ttbr, tmp
538#ifdef CONFIG_ARM64_VA_BITS_52
539	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
540	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
541	cbnz	\tmp, .Lskipoffs_\@
542	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
543.Lskipoffs_\@ :
544#endif
545	.endm
546
547/*
548 * Perform the reverse of offset_ttbr1.
549 * bic is used as it can cover the immediate value and, in future, won't need
550 * to be nop'ed out when dealing with 52-bit kernel VAs.
551 */
552	.macro	restore_ttbr1, ttbr
553#ifdef CONFIG_ARM64_VA_BITS_52
554	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
555#endif
556	.endm
557
558/*
559 * Arrange a physical address in a TTBR register, taking care of 52-bit
560 * addresses.
561 *
562 * 	phys:	physical address, preserved
563 * 	ttbr:	returns the TTBR value
564 */
565	.macro	phys_to_ttbr, ttbr, phys
566#ifdef CONFIG_ARM64_PA_BITS_52
567	orr	\ttbr, \phys, \phys, lsr #46
568	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
569#else
570	mov	\ttbr, \phys
571#endif
572	.endm
573
574	.macro	phys_to_pte, pte, phys
575#ifdef CONFIG_ARM64_PA_BITS_52
576	/*
577	 * We assume \phys is 64K aligned and this is guaranteed by only
578	 * supporting this configuration with 64K pages.
579	 */
580	orr	\pte, \phys, \phys, lsr #36
581	and	\pte, \pte, #PTE_ADDR_MASK
582#else
583	mov	\pte, \phys
584#endif
585	.endm
586
587	.macro	pte_to_phys, phys, pte
588#ifdef CONFIG_ARM64_PA_BITS_52
589	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
590	bfxil	\phys, \pte, #16, #32
591	lsl	\phys, \phys, #16
592#else
593	and	\phys, \pte, #PTE_ADDR_MASK
 
 
 
594#endif
595	.endm
596
597/*
598 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
599 */
600	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
601#ifdef CONFIG_FUJITSU_ERRATUM_010001
602	mrs	\tmp1, midr_el1
603
604	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
605	and	\tmp1, \tmp1, \tmp2
606	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
607	cmp	\tmp1, \tmp2
608	b.ne	10f
609
610	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
611	bic	\tcr, \tcr, \tmp2
61210:
613#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
614	.endm
615
616/**
617 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
618 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
619 */
620	.macro pre_disable_mmu_workaround
621#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
622	isb
623#endif
624	.endm
625
626	/*
627	 * frame_push - Push @regcount callee saved registers to the stack,
628	 *              starting at x19, as well as x29/x30, and set x29 to
629	 *              the new value of sp. Add @extra bytes of stack space
630	 *              for locals.
631	 */
632	.macro		frame_push, regcount:req, extra
633	__frame		st, \regcount, \extra
634	.endm
635
636	/*
637	 * frame_pop  - Pop the callee saved registers from the stack that were
638	 *              pushed in the most recent call to frame_push, as well
639	 *              as x29/x30 and any extra stack space that may have been
640	 *              allocated.
641	 */
642	.macro		frame_pop
643	__frame		ld
644	.endm
645
646	.macro		__frame_regs, reg1, reg2, op, num
647	.if		.Lframe_regcount == \num
648	\op\()r		\reg1, [sp, #(\num + 1) * 8]
649	.elseif		.Lframe_regcount > \num
650	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
651	.endif
652	.endm
653
654	.macro		__frame, op, regcount, extra=0
655	.ifc		\op, st
656	.if		(\regcount) < 0 || (\regcount) > 10
657	.error		"regcount should be in the range [0 ... 10]"
658	.endif
659	.if		((\extra) % 16) != 0
660	.error		"extra should be a multiple of 16 bytes"
661	.endif
662	.ifdef		.Lframe_regcount
663	.if		.Lframe_regcount != -1
664	.error		"frame_push/frame_pop may not be nested"
665	.endif
666	.endif
667	.set		.Lframe_regcount, \regcount
668	.set		.Lframe_extra, \extra
669	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
670	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
671	mov		x29, sp
672	.endif
673
674	__frame_regs	x19, x20, \op, 1
675	__frame_regs	x21, x22, \op, 3
676	__frame_regs	x23, x24, \op, 5
677	__frame_regs	x25, x26, \op, 7
678	__frame_regs	x27, x28, \op, 9
679
680	.ifc		\op, ld
681	.if		.Lframe_regcount == -1
682	.error		"frame_push/frame_pop may not be nested"
683	.endif
684	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
685	.set		.Lframe_regcount, -1
686	.endif
687	.endm
688
689/*
690 * Check whether to yield to another runnable task from kernel mode NEON code
691 * (which runs with preemption disabled).
692 *
693 * if_will_cond_yield_neon
694 *        // pre-yield patchup code
695 * do_cond_yield_neon
696 *        // post-yield patchup code
697 * endif_yield_neon    <label>
698 *
699 * where <label> is optional, and marks the point where execution will resume
700 * after a yield has been performed. If omitted, execution resumes right after
701 * the endif_yield_neon invocation. Note that the entire sequence, including
702 * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT
703 * is not defined.
704 *
705 * As a convenience, in the case where no patchup code is required, the above
706 * sequence may be abbreviated to
707 *
708 * cond_yield_neon <label>
709 *
710 * Note that the patchup code does not support assembler directives that change
711 * the output section, any use of such directives is undefined.
712 *
713 * The yield itself consists of the following:
714 * - Check whether the preempt count is exactly 1 and a reschedule is also
715 *   needed. If so, calling of preempt_enable() in kernel_neon_end() will
716 *   trigger a reschedule. If it is not the case, yielding is pointless.
717 * - Disable and re-enable kernel mode NEON, and branch to the yield fixup
718 *   code.
719 *
720 * This macro sequence may clobber all CPU state that is not guaranteed by the
721 * AAPCS to be preserved across an ordinary function call.
722 */
 
 
 
 
 
 
 
 
 
 
 
 
723
724	.macro		cond_yield_neon, lbl
725	if_will_cond_yield_neon
726	do_cond_yield_neon
727	endif_yield_neon	\lbl
728	.endm
 
 
729
730	.macro		if_will_cond_yield_neon
731#ifdef CONFIG_PREEMPT
732	get_current_task	x0
733	ldr		x0, [x0, #TSK_TI_PREEMPT]
734	sub		x0, x0, #PREEMPT_DISABLE_OFFSET
735	cbz		x0, .Lyield_\@
736	/* fall through to endif_yield_neon */
737	.subsection	1
738.Lyield_\@ :
739#else
740	.section	".discard.cond_yield_neon", "ax"
 
 
 
 
 
 
 
 
741#endif
742	.endm
743
744	.macro		do_cond_yield_neon
745	bl		kernel_neon_end
746	bl		kernel_neon_begin
 
 
 
 
 
747	.endm
748
749	.macro		endif_yield_neon, lbl
750	.ifnb		\lbl
751	b		\lbl
752	.else
753	b		.Lyield_out_\@
754	.endif
755	.previous
756.Lyield_out_\@ :
757	.endm
 
 
 
 
 
 
 
 
758
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759#endif	/* __ASM_ASSEMBLER_H */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
  4 *
  5 * Copyright (C) 1996-2000 Russell King
  6 * Copyright (C) 2012 ARM Ltd.
  7 */
  8#ifndef __ASSEMBLY__
  9#error "Only include this from assembly code"
 10#endif
 11
 12#ifndef __ASM_ASSEMBLER_H
 13#define __ASM_ASSEMBLER_H
 14
 15#include <linux/export.h>
 16
 17#include <asm/alternative.h>
 18#include <asm/asm-bug.h>
 19#include <asm/asm-extable.h>
 20#include <asm/asm-offsets.h>
 21#include <asm/cpufeature.h>
 22#include <asm/cputype.h>
 23#include <asm/debug-monitors.h>
 24#include <asm/page.h>
 25#include <asm/pgtable-hwdef.h>
 26#include <asm/ptrace.h>
 27#include <asm/thread_info.h>
 28
 29	/*
 30	 * Provide a wxN alias for each wN register so what we can paste a xN
 31	 * reference after a 'w' to obtain the 32-bit version.
 32	 */
 33	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
 34	wx\n	.req	w\n
 35	.endr
 36
 37	.macro disable_daif
 38	msr	daifset, #0xf
 39	.endm
 40
 41	.macro enable_daif
 42	msr	daifclr, #0xf
 43	.endm
 44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 45/*
 46 * Save/restore interrupts.
 47 */
 48	.macro	save_and_disable_irq, flags
 49	mrs	\flags, daif
 50	msr	daifset, #3
 51	.endm
 52
 53	.macro	restore_irq, flags
 54	msr	daif, \flags
 55	.endm
 56
 57	.macro	enable_dbg
 58	msr	daifclr, #8
 59	.endm
 60
 61	.macro	disable_step_tsk, flgs, tmp
 62	tbz	\flgs, #TIF_SINGLESTEP, 9990f
 63	mrs	\tmp, mdscr_el1
 64	bic	\tmp, \tmp, #DBG_MDSCR_SS
 65	msr	mdscr_el1, \tmp
 66	isb	// Synchronise with enable_dbg
 679990:
 68	.endm
 69
 70	/* call with daif masked */
 71	.macro	enable_step_tsk, flgs, tmp
 72	tbz	\flgs, #TIF_SINGLESTEP, 9990f
 73	mrs	\tmp, mdscr_el1
 74	orr	\tmp, \tmp, #DBG_MDSCR_SS
 75	msr	mdscr_el1, \tmp
 769990:
 77	.endm
 78
 79/*
 
 
 
 
 
 
 
 80 * RAS Error Synchronization barrier
 81 */
 82	.macro  esb
 83#ifdef CONFIG_ARM64_RAS_EXTN
 84	hint    #16
 85#else
 86	nop
 87#endif
 88	.endm
 89
 90/*
 91 * Value prediction barrier
 92 */
 93	.macro	csdb
 94	hint	#20
 95	.endm
 96
 97/*
 98 * Clear Branch History instruction
 99 */
100	.macro clearbhb
101	hint	#22
102	.endm
103
104/*
105 * Speculation barrier
106 */
107	.macro	sb
108alternative_if_not ARM64_HAS_SB
109	dsb	nsh
110	isb
111alternative_else
112	SB_BARRIER_INSN
113	nop
114alternative_endif
115	.endm
116
117/*
118 * NOP sequence
119 */
120	.macro	nops, num
121	.rept	\num
122	nop
123	.endr
124	.endm
125
126/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127 * Register aliases.
128 */
129lr	.req	x30		// link register
130
131/*
132 * Vector entry
133 */
134	 .macro	ventry	label
135	.align	7
136	b	\label
137	.endm
138
139/*
140 * Select code when configured for BE.
141 */
142#ifdef CONFIG_CPU_BIG_ENDIAN
143#define CPU_BE(code...) code
144#else
145#define CPU_BE(code...)
146#endif
147
148/*
149 * Select code when configured for LE.
150 */
151#ifdef CONFIG_CPU_BIG_ENDIAN
152#define CPU_LE(code...)
153#else
154#define CPU_LE(code...) code
155#endif
156
157/*
158 * Define a macro that constructs a 64-bit value by concatenating two
159 * 32-bit registers. Note that on big endian systems the order of the
160 * registers is swapped.
161 */
162#ifndef CONFIG_CPU_BIG_ENDIAN
163	.macro	regs_to_64, rd, lbits, hbits
164#else
165	.macro	regs_to_64, rd, hbits, lbits
166#endif
167	orr	\rd, \lbits, \hbits, lsl #32
168	.endm
169
170/*
171 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
172 * <symbol> is within the range +/- 4 GB of the PC.
173 */
174	/*
175	 * @dst: destination register (64 bit wide)
176	 * @sym: name of the symbol
177	 */
178	.macro	adr_l, dst, sym
179	adrp	\dst, \sym
180	add	\dst, \dst, :lo12:\sym
181	.endm
182
183	/*
184	 * @dst: destination register (32 or 64 bit wide)
185	 * @sym: name of the symbol
186	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
187	 *       32-bit wide register, in which case it cannot be used to hold
188	 *       the address
189	 */
190	.macro	ldr_l, dst, sym, tmp=
191	.ifb	\tmp
192	adrp	\dst, \sym
193	ldr	\dst, [\dst, :lo12:\sym]
194	.else
195	adrp	\tmp, \sym
196	ldr	\dst, [\tmp, :lo12:\sym]
197	.endif
198	.endm
199
200	/*
201	 * @src: source register (32 or 64 bit wide)
202	 * @sym: name of the symbol
203	 * @tmp: mandatory 64-bit scratch register to calculate the address
204	 *       while <src> needs to be preserved.
205	 */
206	.macro	str_l, src, sym, tmp
207	adrp	\tmp, \sym
208	str	\src, [\tmp, :lo12:\sym]
209	.endm
210
211	/*
212	 * @dst: destination register
213	 */
214#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
215	.macro	get_this_cpu_offset, dst
216	mrs	\dst, tpidr_el2
217	.endm
218#else
219	.macro	get_this_cpu_offset, dst
220alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
221	mrs	\dst, tpidr_el1
222alternative_else
223	mrs	\dst, tpidr_el2
224alternative_endif
225	.endm
226
227	.macro	set_this_cpu_offset, src
228alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
229	msr	tpidr_el1, \src
230alternative_else
231	msr	tpidr_el2, \src
232alternative_endif
233	.endm
234#endif
235
236	/*
237	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
238	 * @sym: The name of the per-cpu variable
239	 * @tmp: scratch register
240	 */
241	.macro adr_this_cpu, dst, sym, tmp
242	adrp	\tmp, \sym
243	add	\dst, \tmp, #:lo12:\sym
244	get_this_cpu_offset \tmp
 
 
 
 
245	add	\dst, \dst, \tmp
246	.endm
247
248	/*
249	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
250	 * @sym: The name of the per-cpu variable
251	 * @tmp: scratch register
252	 */
253	.macro ldr_this_cpu dst, sym, tmp
254	adr_l	\dst, \sym
255	get_this_cpu_offset \tmp
 
 
 
 
256	ldr	\dst, [\dst, \tmp]
257	.endm
258
259/*
260 * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
261 */
262	.macro	vma_vm_mm, rd, rn
263	ldr	\rd, [\rn, #VMA_VM_MM]
264	.endm
265
266/*
 
 
 
 
 
 
267 * read_ctr - read CTR_EL0. If the system has mismatched register fields,
268 * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
269 */
270	.macro	read_ctr, reg
271#ifndef __KVM_NVHE_HYPERVISOR__
272alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
273	mrs	\reg, ctr_el0			// read CTR
274	nop
275alternative_else
276	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
277alternative_endif
278#else
279alternative_if_not ARM64_KVM_PROTECTED_MODE
280	ASM_BUG()
281alternative_else_nop_endif
282alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
283	movz	\reg, #0
284	movk	\reg, #0, lsl #16
285	movk	\reg, #0, lsl #32
286	movk	\reg, #0, lsl #48
287alternative_cb_end
288#endif
289	.endm
290
291
292/*
293 * raw_dcache_line_size - get the minimum D-cache line size on this CPU
294 * from the CTR register.
295 */
296	.macro	raw_dcache_line_size, reg, tmp
297	mrs	\tmp, ctr_el0			// read CTR
298	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
299	mov	\reg, #4			// bytes per word
300	lsl	\reg, \reg, \tmp		// actual cache line size
301	.endm
302
303/*
304 * dcache_line_size - get the safe D-cache line size across all CPUs
305 */
306	.macro	dcache_line_size, reg, tmp
307	read_ctr	\tmp
308	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
309	mov		\reg, #4		// bytes per word
310	lsl		\reg, \reg, \tmp	// actual cache line size
311	.endm
312
313/*
314 * raw_icache_line_size - get the minimum I-cache line size on this CPU
315 * from the CTR register.
316 */
317	.macro	raw_icache_line_size, reg, tmp
318	mrs	\tmp, ctr_el0			// read CTR
319	and	\tmp, \tmp, #0xf		// cache line size encoding
320	mov	\reg, #4			// bytes per word
321	lsl	\reg, \reg, \tmp		// actual cache line size
322	.endm
323
324/*
325 * icache_line_size - get the safe I-cache line size across all CPUs
326 */
327	.macro	icache_line_size, reg, tmp
328	read_ctr	\tmp
329	and		\tmp, \tmp, #0xf	// cache line size encoding
330	mov		\reg, #4		// bytes per word
331	lsl		\reg, \reg, \tmp	// actual cache line size
332	.endm
333
334/*
335 * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
336 */
337	.macro	tcr_set_t0sz, valreg, t0sz
338	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
339	.endm
340
341/*
342 * tcr_set_t1sz - update TCR.T1SZ
343 */
344	.macro	tcr_set_t1sz, valreg, t1sz
345	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
346	.endm
347
348/*
349 * idmap_get_t0sz - get the T0SZ value needed to cover the ID map
350 *
351 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
352 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
353 * this number conveniently equals the number of leading zeroes in
354 * the physical address of _end.
355 */
356	.macro	idmap_get_t0sz, reg
357	adrp	\reg, _end
358	orr	\reg, \reg, #(1 << VA_BITS_MIN) - 1
359	clz	\reg, \reg
360	.endm
361
362/*
363 * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
364 * ID_AA64MMFR0_EL1.PARange value
365 *
366 *	tcr:		register with the TCR_ELx value to be updated
367 *	pos:		IPS or PS bitfield position
368 *	tmp{0,1}:	temporary registers
369 */
370	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
371	mrs	\tmp0, ID_AA64MMFR0_EL1
372	// Narrow PARange to fit the PS field in TCR_ELx
373	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
374	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
375	cmp	\tmp0, \tmp1
376	csel	\tmp0, \tmp1, \tmp0, hi
377	bfi	\tcr, \tmp0, \pos, #3
378	.endm
379
380	.macro __dcache_op_workaround_clean_cache, op, addr
 
 
 
 
 
 
 
 
 
 
381alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
382	dc	\op, \addr
383alternative_else
384	dc	civac, \addr
385alternative_endif
386	.endm
387
388/*
389 * Macro to perform a data cache maintenance for the interval
390 * [start, end) with dcache line size explicitly provided.
391 *
392 * 	op:		operation passed to dc instruction
393 * 	domain:		domain used in dsb instruciton
394 * 	start:          starting virtual address of the region
395 * 	end:            end virtual address of the region
396 *	linesz:		dcache line size
397 * 	fixup:		optional label to branch to on user fault
398 * 	Corrupts:       start, end, tmp
399 */
400	.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
401	sub	\tmp, \linesz, #1
402	bic	\start, \start, \tmp
403.Ldcache_op\@:
404	.ifc	\op, cvau
405	__dcache_op_workaround_clean_cache \op, \start
406	.else
407	.ifc	\op, cvac
408	__dcache_op_workaround_clean_cache \op, \start
409	.else
410	.ifc	\op, cvap
411	sys	3, c7, c12, 1, \start	// dc cvap
412	.else
413	.ifc	\op, cvadp
414	sys	3, c7, c13, 1, \start	// dc cvadp
415	.else
416	dc	\op, \start
417	.endif
418	.endif
419	.endif
420	.endif
421	add	\start, \start, \linesz
422	cmp	\start, \end
423	b.lo	.Ldcache_op\@
424	dsb	\domain
425
426	_cond_uaccess_extable .Ldcache_op\@, \fixup
427	.endm
428
429/*
430 * Macro to perform a data cache maintenance for the interval
431 * [start, end)
432 *
433 * 	op:		operation passed to dc instruction
434 * 	domain:		domain used in dsb instruciton
435 * 	start:          starting virtual address of the region
436 * 	end:            end virtual address of the region
437 * 	fixup:		optional label to branch to on user fault
438 * 	Corrupts:       start, end, tmp1, tmp2
439 */
440	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
441	dcache_line_size \tmp1, \tmp2
442	dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
443	.endm
444
445/*
446 * Macro to perform an instruction cache maintenance for the interval
447 * [start, end)
448 *
449 * 	start, end:	virtual addresses describing the region
450 *	fixup:		optional label to branch to on user fault
451 * 	Corrupts:	tmp1, tmp2
452 */
453	.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
454	icache_line_size \tmp1, \tmp2
455	sub	\tmp2, \tmp1, #1
456	bic	\tmp2, \start, \tmp2
457.Licache_op\@:
458	ic	ivau, \tmp2			// invalidate I line PoU
459	add	\tmp2, \tmp2, \tmp1
460	cmp	\tmp2, \end
461	b.lo	.Licache_op\@
462	dsb	ish
463	isb
464
465	_cond_uaccess_extable .Licache_op\@, \fixup
466	.endm
467
468/*
469 * load_ttbr1 - install @pgtbl as a TTBR1 page table
470 * pgtbl preserved
471 * tmp1/tmp2 clobbered, either may overlap with pgtbl
472 */
473	.macro		load_ttbr1, pgtbl, tmp1, tmp2
474	phys_to_ttbr	\tmp1, \pgtbl
475	offset_ttbr1 	\tmp1, \tmp2
476	msr		ttbr1_el1, \tmp1
477	isb
478	.endm
479
480/*
481 * To prevent the possibility of old and new partial table walks being visible
482 * in the tlb, switch the ttbr to a zero page when we invalidate the old
483 * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
484 * Even switching to our copied tables will cause a changed output address at
485 * each stage of the walk.
486 */
487	.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
488	phys_to_ttbr \tmp, \zero_page
489	msr	ttbr1_el1, \tmp
490	isb
491	tlbi	vmalle1
492	dsb	nsh
493	load_ttbr1 \page_table, \tmp, \tmp2
494	.endm
495
496/*
497 * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
498 */
499	.macro	reset_pmuserenr_el0, tmpreg
500	mrs	\tmpreg, id_aa64dfr0_el1
501	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
502	cmp	\tmpreg, #1			// Skip if no PMU present
503	b.lt	9000f
504	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
5059000:
506	.endm
507
508/*
509 * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
510 */
511	.macro	reset_amuserenr_el0, tmpreg
512	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
513	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
514	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
515	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
516.Lskip_\@:
517	.endm
518/*
519 * copy_page - copy src to dest using temp registers t1-t8
520 */
521	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
5229998:	ldp	\t1, \t2, [\src]
523	ldp	\t3, \t4, [\src, #16]
524	ldp	\t5, \t6, [\src, #32]
525	ldp	\t7, \t8, [\src, #48]
526	add	\src, \src, #64
527	stnp	\t1, \t2, [\dest]
528	stnp	\t3, \t4, [\dest, #16]
529	stnp	\t5, \t6, [\dest, #32]
530	stnp	\t7, \t8, [\dest, #48]
531	add	\dest, \dest, #64
532	tst	\src, #(PAGE_SIZE - 1)
533	b.ne	9998b
534	.endm
535
536/*
 
 
 
 
 
 
 
 
 
 
 
537 * Annotate a function as being unsuitable for kprobes.
538 */
539#ifdef CONFIG_KPROBES
540#define NOKPROBE(x)				\
541	.pushsection "_kprobe_blacklist", "aw";	\
542	.quad	x;				\
543	.popsection;
544#else
545#define NOKPROBE(x)
546#endif
547
548#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
549#define EXPORT_SYMBOL_NOKASAN(name)
550#else
551#define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
552#endif
553
554	/*
555	 * Emit a 64-bit absolute little endian symbol reference in a way that
556	 * ensures that it will be resolved at build time, even when building a
557	 * PIE binary. This requires cooperation from the linker script, which
558	 * must emit the lo32/hi32 halves individually.
559	 */
560	.macro	le64sym, sym
561	.long	\sym\()_lo32
562	.long	\sym\()_hi32
563	.endm
564
565	/*
566	 * mov_q - move an immediate constant into a 64-bit register using
567	 *         between 2 and 4 movz/movk instructions (depending on the
568	 *         magnitude and sign of the operand)
569	 */
570	.macro	mov_q, reg, val
571	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
572	movz	\reg, :abs_g1_s:\val
573	.else
574	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
575	movz	\reg, :abs_g2_s:\val
576	.else
577	movz	\reg, :abs_g3:\val
578	movk	\reg, :abs_g2_nc:\val
579	.endif
580	movk	\reg, :abs_g1_nc:\val
581	.endif
582	movk	\reg, :abs_g0_nc:\val
583	.endm
584
585/*
586 * Return the current task_struct.
587 */
588	.macro	get_current_task, rd
589	mrs	\rd, sp_el0
590	.endm
591
592/*
593 * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
594 * orr is used as it can cover the immediate value (and is idempotent).
595 * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
596 * 	ttbr: Value of ttbr to set, modified.
597 */
598	.macro	offset_ttbr1, ttbr, tmp
599#ifdef CONFIG_ARM64_VA_BITS_52
600	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
601	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_EL1_VARange_SHIFT)
602	cbnz	\tmp, .Lskipoffs_\@
603	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
604.Lskipoffs_\@ :
605#endif
606	.endm
607
608/*
 
 
 
 
 
 
 
 
 
 
 
609 * Arrange a physical address in a TTBR register, taking care of 52-bit
610 * addresses.
611 *
612 * 	phys:	physical address, preserved
613 * 	ttbr:	returns the TTBR value
614 */
615	.macro	phys_to_ttbr, ttbr, phys
616#ifdef CONFIG_ARM64_PA_BITS_52
617	orr	\ttbr, \phys, \phys, lsr #46
618	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
619#else
620	mov	\ttbr, \phys
621#endif
622	.endm
623
624	.macro	phys_to_pte, pte, phys
625#ifdef CONFIG_ARM64_PA_BITS_52
626	/*
627	 * We assume \phys is 64K aligned and this is guaranteed by only
628	 * supporting this configuration with 64K pages.
629	 */
630	orr	\pte, \phys, \phys, lsr #36
631	and	\pte, \pte, #PTE_ADDR_MASK
632#else
633	mov	\pte, \phys
634#endif
635	.endm
636
637	.macro	pte_to_phys, phys, pte
 
 
 
 
 
638	and	\phys, \pte, #PTE_ADDR_MASK
639#ifdef CONFIG_ARM64_PA_BITS_52
640	orr	\phys, \phys, \phys, lsl #PTE_ADDR_HIGH_SHIFT
641	and	\phys, \phys, GENMASK_ULL(PHYS_MASK_SHIFT - 1, PAGE_SHIFT)
642#endif
643	.endm
644
645/*
646 * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
647 */
648	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
649#ifdef CONFIG_FUJITSU_ERRATUM_010001
650	mrs	\tmp1, midr_el1
651
652	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
653	and	\tmp1, \tmp1, \tmp2
654	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
655	cmp	\tmp1, \tmp2
656	b.ne	10f
657
658	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
659	bic	\tcr, \tcr, \tmp2
66010:
661#endif /* CONFIG_FUJITSU_ERRATUM_010001 */
662	.endm
663
664/**
665 * Errata workaround prior to disable MMU. Insert an ISB immediately prior
666 * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
667 */
668	.macro pre_disable_mmu_workaround
669#ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
670	isb
671#endif
672	.endm
673
674	/*
675	 * frame_push - Push @regcount callee saved registers to the stack,
676	 *              starting at x19, as well as x29/x30, and set x29 to
677	 *              the new value of sp. Add @extra bytes of stack space
678	 *              for locals.
679	 */
680	.macro		frame_push, regcount:req, extra
681	__frame		st, \regcount, \extra
682	.endm
683
684	/*
685	 * frame_pop  - Pop the callee saved registers from the stack that were
686	 *              pushed in the most recent call to frame_push, as well
687	 *              as x29/x30 and any extra stack space that may have been
688	 *              allocated.
689	 */
690	.macro		frame_pop
691	__frame		ld
692	.endm
693
694	.macro		__frame_regs, reg1, reg2, op, num
695	.if		.Lframe_regcount == \num
696	\op\()r		\reg1, [sp, #(\num + 1) * 8]
697	.elseif		.Lframe_regcount > \num
698	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
699	.endif
700	.endm
701
702	.macro		__frame, op, regcount, extra=0
703	.ifc		\op, st
704	.if		(\regcount) < 0 || (\regcount) > 10
705	.error		"regcount should be in the range [0 ... 10]"
706	.endif
707	.if		((\extra) % 16) != 0
708	.error		"extra should be a multiple of 16 bytes"
709	.endif
710	.ifdef		.Lframe_regcount
711	.if		.Lframe_regcount != -1
712	.error		"frame_push/frame_pop may not be nested"
713	.endif
714	.endif
715	.set		.Lframe_regcount, \regcount
716	.set		.Lframe_extra, \extra
717	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
718	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
719	mov		x29, sp
720	.endif
721
722	__frame_regs	x19, x20, \op, 1
723	__frame_regs	x21, x22, \op, 3
724	__frame_regs	x23, x24, \op, 5
725	__frame_regs	x25, x26, \op, 7
726	__frame_regs	x27, x28, \op, 9
727
728	.ifc		\op, ld
729	.if		.Lframe_regcount == -1
730	.error		"frame_push/frame_pop may not be nested"
731	.endif
732	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
733	.set		.Lframe_regcount, -1
734	.endif
735	.endm
736
737/*
738 * Set SCTLR_ELx to the @reg value, and invalidate the local icache
739 * in the process. This is called when setting the MMU on.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
740 */
741.macro set_sctlr, sreg, reg
742	msr	\sreg, \reg
743	isb
744	/*
745	 * Invalidate the local I-cache so that any instructions fetched
746	 * speculatively from the PoC are discarded, since they may have
747	 * been dynamically patched at the PoU.
748	 */
749	ic	iallu
750	dsb	nsh
751	isb
752.endm
753
754.macro set_sctlr_el1, reg
755	set_sctlr sctlr_el1, \reg
756.endm
757
758.macro set_sctlr_el2, reg
759	set_sctlr sctlr_el2, \reg
760.endm
761
762	/*
763	 * Check whether asm code should yield as soon as it is able. This is
764	 * the case if we are currently running in task context, and the
765	 * TIF_NEED_RESCHED flag is set. (Note that the TIF_NEED_RESCHED flag
766	 * is stored negated in the top word of the thread_info::preempt_count
767	 * field)
768	 */
769	.macro		cond_yield, lbl:req, tmp:req, tmp2
770#ifdef CONFIG_PREEMPT_VOLUNTARY
771	get_current_task \tmp
772	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
773	/*
774	 * If we are serving a softirq, there is no point in yielding: the
775	 * softirq will not be preempted no matter what we do, so we should
776	 * run to completion as quickly as we can. The preempt_count field will
777	 * have BIT(SOFTIRQ_SHIFT) set in this case, so the zero check will
778	 * catch this case too.
779	 */
780	cbz		\tmp, \lbl
781#endif
782	.endm
783
784/*
785 * Branch Target Identifier (BTI)
786 */
787	.macro  bti, targets
788	.equ	.L__bti_targets_c, 34
789	.equ	.L__bti_targets_j, 36
790	.equ	.L__bti_targets_jc,38
791	hint	#.L__bti_targets_\targets
792	.endm
793
794/*
795 * This macro emits a program property note section identifying
796 * architecture features which require special handling, mainly for
797 * use in assembly files included in the VDSO.
798 */
799
800#define NT_GNU_PROPERTY_TYPE_0  5
801#define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
802
803#define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
804#define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
805
806#ifdef CONFIG_ARM64_BTI_KERNEL
807#define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
808		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
809		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
810#endif
811
812#ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
813.macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
814	.pushsection .note.gnu.property, "a"
815	.align  3
816	.long   2f - 1f
817	.long   6f - 3f
818	.long   NT_GNU_PROPERTY_TYPE_0
8191:      .string "GNU"
8202:
821	.align  3
8223:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
823	.long   5f - 4f
8244:
825	/*
826	 * This is described with an array of char in the Linux API
827	 * spec but the text and all other usage (including binutils,
828	 * clang and GCC) treat this as a 32 bit value so no swizzling
829	 * is required for big endian.
830	 */
831	.long   \feat
8325:
833	.align  3
8346:
835	.popsection
836.endm
837
838#else
839.macro emit_aarch64_feature_1_and, feat=0
840.endm
841
842#endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
843
844	.macro __mitigate_spectre_bhb_loop      tmp
845#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
846alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
847	mov	\tmp, #32		// Patched to correct the immediate
848alternative_cb_end
849.Lspectre_bhb_loop\@:
850	b	. + 4
851	subs	\tmp, \tmp, #1
852	b.ne	.Lspectre_bhb_loop\@
853	sb
854#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
855	.endm
856
857	.macro mitigate_spectre_bhb_loop	tmp
858#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
859alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
860	b	.L_spectre_bhb_loop_done\@	// Patched to NOP
861alternative_cb_end
862	__mitigate_spectre_bhb_loop	\tmp
863.L_spectre_bhb_loop_done\@:
864#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
865	.endm
866
867	/* Save/restores x0-x3 to the stack */
868	.macro __mitigate_spectre_bhb_fw
869#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
870	stp	x0, x1, [sp, #-16]!
871	stp	x2, x3, [sp, #-16]!
872	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
873alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
874	nop					// Patched to SMC/HVC #0
875alternative_cb_end
876	ldp	x2, x3, [sp], #16
877	ldp	x0, x1, [sp], #16
878#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
879	.endm
880
881	.macro mitigate_spectre_bhb_clear_insn
882#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
883alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
884	/* Patched to NOP when not supported */
885	clearbhb
886	isb
887alternative_cb_end
888#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
889	.endm
890#endif	/* __ASM_ASSEMBLER_H */