Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Low-level exception handling code
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
   7 *		Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#include <linux/arm-smccc.h>
  11#include <linux/init.h>
  12#include <linux/linkage.h>
  13
  14#include <asm/alternative.h>
  15#include <asm/assembler.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/asm_pointer_auth.h>
  18#include <asm/bug.h>
  19#include <asm/cpufeature.h>
  20#include <asm/errno.h>
  21#include <asm/esr.h>
  22#include <asm/irq.h>
  23#include <asm/memory.h>
  24#include <asm/mmu.h>
  25#include <asm/processor.h>
  26#include <asm/ptrace.h>
  27#include <asm/scs.h>
  28#include <asm/thread_info.h>
  29#include <asm/asm-uaccess.h>
  30#include <asm/unistd.h>
  31
  32	.macro	clear_gp_regs
  33	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
  34	mov	x\n, xzr
  35	.endr
  36	.endm
  37
  38	.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
  39	.align 7
  40.Lventry_start\@:
  41	.if	\el == 0
  42	/*
  43	 * This must be the first instruction of the EL0 vector entries. It is
  44	 * skipped by the trampoline vectors, to trigger the cleanup.
  45	 */
  46	b	.Lskip_tramp_vectors_cleanup\@
  47	.if	\regsize == 64
  48	mrs	x30, tpidrro_el0
  49	msr	tpidrro_el0, xzr
  50	.else
  51	mov	x30, xzr
  52	.endif
  53.Lskip_tramp_vectors_cleanup\@:
  54	.endif
  55
  56	sub	sp, sp, #PT_REGS_SIZE
  57#ifdef CONFIG_VMAP_STACK
  58	/*
  59	 * Test whether the SP has overflowed, without corrupting a GPR.
  60	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
  61	 * should always be zero.
  62	 */
  63	add	sp, sp, x0			// sp' = sp + x0
  64	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
  65	tbnz	x0, #THREAD_SHIFT, 0f
  66	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
  67	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
  68	b	el\el\ht\()_\regsize\()_\label
  69
  700:
  71	/*
  72	 * Either we've just detected an overflow, or we've taken an exception
  73	 * while on the overflow stack. Either way, we won't return to
  74	 * userspace, and can clobber EL0 registers to free up GPRs.
  75	 */
  76
  77	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
  78	msr	tpidr_el0, x0
  79
  80	/* Recover the original x0 value and stash it in tpidrro_el0 */
  81	sub	x0, sp, x0
  82	msr	tpidrro_el0, x0
  83
  84	/* Switch to the overflow stack */
  85	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
  86
  87	/*
  88	 * Check whether we were already on the overflow stack. This may happen
  89	 * after panic() re-enables interrupts.
  90	 */
  91	mrs	x0, tpidr_el0			// sp of interrupted context
  92	sub	x0, sp, x0			// delta with top of overflow stack
  93	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
  94	b.ne	__bad_stack			// no? -> bad stack pointer
  95
  96	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
  97	sub	sp, sp, x0
  98	mrs	x0, tpidrro_el0
  99#endif
 100	b	el\el\ht\()_\regsize\()_\label
 101.org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
 102	.endm
 103
 104	.macro	tramp_alias, dst, sym
 105	.set	.Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text
 106	movz	\dst, :abs_g2_s:.Lalias\@
 107	movk	\dst, :abs_g1_nc:.Lalias\@
 108	movk	\dst, :abs_g0_nc:.Lalias\@
 109	.endm
 110
 111	/*
 112	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
 113	 * them if required.
 114	 */
 115	.macro	apply_ssbd, state, tmp1, tmp2
 116alternative_cb	ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
 117	b	.L__asm_ssbd_skip\@		// Patched to NOP
 118alternative_cb_end
 119	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
 120	cbz	\tmp2,	.L__asm_ssbd_skip\@
 121	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
 122	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
 123	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
 124	mov	w1, #\state
 125alternative_cb	ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
 126	nop					// Patched to SMC/HVC #0
 127alternative_cb_end
 128.L__asm_ssbd_skip\@:
 129	.endm
 130
 131	/* Check for MTE asynchronous tag check faults */
 132	.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
 133#ifdef CONFIG_ARM64_MTE
 134	.arch_extension lse
 135alternative_if_not ARM64_MTE
 136	b	1f
 137alternative_else_nop_endif
 138	/*
 139	 * Asynchronous tag check faults are only possible in ASYNC (2) or
 140	 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
 141	 * set, so skip the check if it is unset.
 142	 */
 143	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
 144	mrs_s	\tmp, SYS_TFSRE0_EL1
 145	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
 146	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
 147	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
 148	add	\ti_flags, tsk, #TSK_TI_FLAGS
 149	stset	\tmp, [\ti_flags]
 1501:
 151#endif
 152	.endm
 153
 154	/* Clear the MTE asynchronous tag check faults */
 155	.macro clear_mte_async_tcf thread_sctlr
 156#ifdef CONFIG_ARM64_MTE
 157alternative_if ARM64_MTE
 158	/* See comment in check_mte_async_tcf above. */
 159	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
 160	dsb	ish
 161	msr_s	SYS_TFSRE0_EL1, xzr
 1621:
 163alternative_else_nop_endif
 164#endif
 165	.endm
 166
 167	.macro mte_set_gcr, mte_ctrl, tmp
 168#ifdef CONFIG_ARM64_MTE
 169	ubfx	\tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
 170	orr	\tmp, \tmp, #SYS_GCR_EL1_RRND
 171	msr_s	SYS_GCR_EL1, \tmp
 172#endif
 173	.endm
 174
 175	.macro mte_set_kernel_gcr, tmp, tmp2
 176#ifdef CONFIG_KASAN_HW_TAGS
 177alternative_cb	ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
 178	b	1f
 179alternative_cb_end
 180	mov	\tmp, KERNEL_GCR_EL1
 181	msr_s	SYS_GCR_EL1, \tmp
 1821:
 183#endif
 184	.endm
 185
 186	.macro mte_set_user_gcr, tsk, tmp, tmp2
 187#ifdef CONFIG_KASAN_HW_TAGS
 188alternative_cb	ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
 189	b	1f
 190alternative_cb_end
 191	ldr	\tmp, [\tsk, #THREAD_MTE_CTRL]
 192
 193	mte_set_gcr \tmp, \tmp2
 1941:
 195#endif
 196	.endm
 197
 198	.macro	kernel_entry, el, regsize = 64
 199	.if	\el == 0
 200	alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT
 201	.endif
 202	.if	\regsize == 32
 203	mov	w0, w0				// zero upper 32 bits of x0
 204	.endif
 205	stp	x0, x1, [sp, #16 * 0]
 206	stp	x2, x3, [sp, #16 * 1]
 207	stp	x4, x5, [sp, #16 * 2]
 208	stp	x6, x7, [sp, #16 * 3]
 209	stp	x8, x9, [sp, #16 * 4]
 210	stp	x10, x11, [sp, #16 * 5]
 211	stp	x12, x13, [sp, #16 * 6]
 212	stp	x14, x15, [sp, #16 * 7]
 213	stp	x16, x17, [sp, #16 * 8]
 214	stp	x18, x19, [sp, #16 * 9]
 215	stp	x20, x21, [sp, #16 * 10]
 216	stp	x22, x23, [sp, #16 * 11]
 217	stp	x24, x25, [sp, #16 * 12]
 218	stp	x26, x27, [sp, #16 * 13]
 219	stp	x28, x29, [sp, #16 * 14]
 220
 221	.if	\el == 0
 222	clear_gp_regs
 223	mrs	x21, sp_el0
 224	ldr_this_cpu	tsk, __entry_task, x20
 225	msr	sp_el0, tsk
 226
 227	/*
 228	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
 229	 * when scheduling.
 230	 */
 231	ldr	x19, [tsk, #TSK_TI_FLAGS]
 232	disable_step_tsk x19, x20
 233
 234	/* Check for asynchronous tag check faults in user space */
 235	ldr	x0, [tsk, THREAD_SCTLR_USER]
 236	check_mte_async_tcf x22, x23, x0
 237
 238#ifdef CONFIG_ARM64_PTR_AUTH
 239alternative_if ARM64_HAS_ADDRESS_AUTH
 240	/*
 241	 * Enable IA for in-kernel PAC if the task had it disabled. Although
 242	 * this could be implemented with an unconditional MRS which would avoid
 243	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
 244	 *
 245	 * Install the kernel IA key only if IA was enabled in the task. If IA
 246	 * was disabled on kernel exit then we would have left the kernel IA
 247	 * installed so there is no need to install it again.
 248	 */
 249	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
 250	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
 251	b	2f
 2521:
 253	mrs	x0, sctlr_el1
 254	orr	x0, x0, SCTLR_ELx_ENIA
 255	msr	sctlr_el1, x0
 2562:
 257alternative_else_nop_endif
 258#endif
 259
 260	apply_ssbd 1, x22, x23
 261
 262	mte_set_kernel_gcr x22, x23
 263
 264	/*
 265	 * Any non-self-synchronizing system register updates required for
 266	 * kernel entry should be placed before this point.
 267	 */
 268alternative_if ARM64_MTE
 269	isb
 270	b	1f
 271alternative_else_nop_endif
 272alternative_if ARM64_HAS_ADDRESS_AUTH
 273	isb
 274alternative_else_nop_endif
 2751:
 276
 277	scs_load_current
 278	.else
 279	add	x21, sp, #PT_REGS_SIZE
 280	get_current_task tsk
 281	.endif /* \el == 0 */
 282	mrs	x22, elr_el1
 283	mrs	x23, spsr_el1
 284	stp	lr, x21, [sp, #S_LR]
 285
 286	/*
 287	 * For exceptions from EL0, create a final frame record.
 288	 * For exceptions from EL1, create a synthetic frame record so the
 289	 * interrupted code shows up in the backtrace.
 290	 */
 291	.if \el == 0
 292	stp	xzr, xzr, [sp, #S_STACKFRAME]
 293	.else
 294	stp	x29, x22, [sp, #S_STACKFRAME]
 295	.endif
 296	add	x29, sp, #S_STACKFRAME
 297
 298#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 299alternative_if_not ARM64_HAS_PAN
 300	bl	__swpan_entry_el\el
 301alternative_else_nop_endif
 302#endif
 303
 304	stp	x22, x23, [sp, #S_PC]
 305
 306	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
 307	.if	\el == 0
 308	mov	w21, #NO_SYSCALL
 309	str	w21, [sp, #S_SYSCALLNO]
 310	.endif
 311
 312#ifdef CONFIG_ARM64_PSEUDO_NMI
 313alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
 314	b	.Lskip_pmr_save\@
 315alternative_else_nop_endif
 316
 317	mrs_s	x20, SYS_ICC_PMR_EL1
 318	str	x20, [sp, #S_PMR_SAVE]
 319	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
 320	msr_s	SYS_ICC_PMR_EL1, x20
 321
 322.Lskip_pmr_save\@:
 323#endif
 324
 325	/*
 326	 * Registers that may be useful after this macro is invoked:
 327	 *
 328	 * x20 - ICC_PMR_EL1
 329	 * x21 - aborted SP
 330	 * x22 - aborted PC
 331	 * x23 - aborted PSTATE
 332	*/
 333	.endm
 334
 335	.macro	kernel_exit, el
 336	.if	\el != 0
 337	disable_daif
 338	.endif
 339
 340#ifdef CONFIG_ARM64_PSEUDO_NMI
 341alternative_if_not ARM64_HAS_GIC_PRIO_MASKING
 342	b	.Lskip_pmr_restore\@
 343alternative_else_nop_endif
 344
 345	ldr	x20, [sp, #S_PMR_SAVE]
 346	msr_s	SYS_ICC_PMR_EL1, x20
 347
 348	/* Ensure priority change is seen by redistributor */
 349alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC
 350	dsb	sy
 351alternative_else_nop_endif
 352
 353.Lskip_pmr_restore\@:
 354#endif
 355
 356	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 357
 358#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 359alternative_if_not ARM64_HAS_PAN
 360	bl	__swpan_exit_el\el
 361alternative_else_nop_endif
 362#endif
 363
 364	.if	\el == 0
 365	ldr	x23, [sp, #S_SP]		// load return stack pointer
 366	msr	sp_el0, x23
 367	tst	x22, #PSR_MODE32_BIT		// native task?
 368	b.eq	3f
 369
 370#ifdef CONFIG_ARM64_ERRATUM_845719
 371alternative_if ARM64_WORKAROUND_845719
 372#ifdef CONFIG_PID_IN_CONTEXTIDR
 373	mrs	x29, contextidr_el1
 374	msr	contextidr_el1, x29
 375#else
 376	msr contextidr_el1, xzr
 377#endif
 378alternative_else_nop_endif
 379#endif
 3803:
 381	scs_save tsk
 382
 383	/* Ignore asynchronous tag check faults in the uaccess routines */
 384	ldr	x0, [tsk, THREAD_SCTLR_USER]
 385	clear_mte_async_tcf x0
 386
 387#ifdef CONFIG_ARM64_PTR_AUTH
 388alternative_if ARM64_HAS_ADDRESS_AUTH
 389	/*
 390	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
 391	 * alternatively install the user's IA. All other per-task keys and
 392	 * SCTLR bits were updated on task switch.
 393	 *
 394	 * No kernel C function calls after this.
 395	 */
 396	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
 397	__ptrauth_keys_install_user tsk, x0, x1, x2
 398	b	2f
 3991:
 400	mrs	x0, sctlr_el1
 401	bic	x0, x0, SCTLR_ELx_ENIA
 402	msr	sctlr_el1, x0
 4032:
 404alternative_else_nop_endif
 405#endif
 406
 407	mte_set_user_gcr tsk, x0, x1
 408
 409	apply_ssbd 0, x0, x1
 410	.endif
 411
 412	msr	elr_el1, x21			// set up the return data
 413	msr	spsr_el1, x22
 414	ldp	x0, x1, [sp, #16 * 0]
 415	ldp	x2, x3, [sp, #16 * 1]
 416	ldp	x4, x5, [sp, #16 * 2]
 417	ldp	x6, x7, [sp, #16 * 3]
 418	ldp	x8, x9, [sp, #16 * 4]
 419	ldp	x10, x11, [sp, #16 * 5]
 420	ldp	x12, x13, [sp, #16 * 6]
 421	ldp	x14, x15, [sp, #16 * 7]
 422	ldp	x16, x17, [sp, #16 * 8]
 423	ldp	x18, x19, [sp, #16 * 9]
 424	ldp	x20, x21, [sp, #16 * 10]
 425	ldp	x22, x23, [sp, #16 * 11]
 426	ldp	x24, x25, [sp, #16 * 12]
 427	ldp	x26, x27, [sp, #16 * 13]
 428	ldp	x28, x29, [sp, #16 * 14]
 429
 430	.if	\el == 0
 431#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 432	alternative_insn "b .L_skip_tramp_exit_\@", nop, ARM64_UNMAP_KERNEL_AT_EL0
 433
 434	msr	far_el1, x29
 435
 436	ldr_this_cpu	x30, this_cpu_vector, x29
 437	tramp_alias	x29, tramp_exit
 438	msr		vbar_el1, x30		// install vector table
 439	ldr		lr, [sp, #S_LR]		// restore x30
 440	add		sp, sp, #PT_REGS_SIZE	// restore sp
 441	br		x29
 442
 443.L_skip_tramp_exit_\@:
 444#endif
 445	.endif
 446
 447	ldr	lr, [sp, #S_LR]
 448	add	sp, sp, #PT_REGS_SIZE		// restore sp
 449
 450	.if \el == 0
 451	/* This must be after the last explicit memory access */
 452alternative_if ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
 453	tlbi	vale1, xzr
 454	dsb	nsh
 455alternative_else_nop_endif
 456	.else
 457	/* Ensure any device/NC reads complete */
 458	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
 459	.endif
 460
 461	eret
 462	sb
 463	.endm
 464
 465#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 466	/*
 467	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 468	 * EL0, there is no need to check the state of TTBR0_EL1 since
 469	 * accesses are always enabled.
 470	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
 471	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
 472	 * user mappings.
 473	 */
 474SYM_CODE_START_LOCAL(__swpan_entry_el1)
 475	mrs	x21, ttbr0_el1
 476	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
 477	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 478	b.eq	1f				// TTBR0 access already disabled
 479	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 480SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
 481	__uaccess_ttbr0_disable x21
 4821:	ret
 483SYM_CODE_END(__swpan_entry_el1)
 484
 485	/*
 486	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 487	 * PAN bit checking.
 488	 */
 489SYM_CODE_START_LOCAL(__swpan_exit_el1)
 490	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 491	__uaccess_ttbr0_enable x0, x1
 4921:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
 493	ret
 494SYM_CODE_END(__swpan_exit_el1)
 495
 496SYM_CODE_START_LOCAL(__swpan_exit_el0)
 497	__uaccess_ttbr0_enable x0, x1
 498	/*
 499	 * Enable errata workarounds only if returning to user. The only
 500	 * workaround currently required for TTBR0_EL1 changes are for the
 501	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 502	 * corruption).
 503	 */
 504	b	post_ttbr_update_workaround
 505SYM_CODE_END(__swpan_exit_el0)
 506#endif
 507
 508/* GPRs used by entry code */
 509tsk	.req	x28		// current thread_info
 510
 511	.text
 512
 513/*
 514 * Exception vectors.
 515 */
 516	.pushsection ".entry.text", "ax"
 517
 518	.align	11
 519SYM_CODE_START(vectors)
 520	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
 521	kernel_ventry	1, t, 64, irq		// IRQ EL1t
 522	kernel_ventry	1, t, 64, fiq		// FIQ EL1t
 523	kernel_ventry	1, t, 64, error		// Error EL1t
 524
 525	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
 526	kernel_ventry	1, h, 64, irq		// IRQ EL1h
 527	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
 528	kernel_ventry	1, h, 64, error		// Error EL1h
 529
 530	kernel_ventry	0, t, 64, sync		// Synchronous 64-bit EL0
 531	kernel_ventry	0, t, 64, irq		// IRQ 64-bit EL0
 532	kernel_ventry	0, t, 64, fiq		// FIQ 64-bit EL0
 533	kernel_ventry	0, t, 64, error		// Error 64-bit EL0
 534
 535	kernel_ventry	0, t, 32, sync		// Synchronous 32-bit EL0
 536	kernel_ventry	0, t, 32, irq		// IRQ 32-bit EL0
 537	kernel_ventry	0, t, 32, fiq		// FIQ 32-bit EL0
 538	kernel_ventry	0, t, 32, error		// Error 32-bit EL0
 539SYM_CODE_END(vectors)
 540
 541#ifdef CONFIG_VMAP_STACK
 542SYM_CODE_START_LOCAL(__bad_stack)
 543	/*
 544	 * We detected an overflow in kernel_ventry, which switched to the
 545	 * overflow stack. Stash the exception regs, and head to our overflow
 546	 * handler.
 547	 */
 548
 549	/* Restore the original x0 value */
 550	mrs	x0, tpidrro_el0
 551
 552	/*
 553	 * Store the original GPRs to the new stack. The orginal SP (minus
 554	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 555	 */
 556	sub	sp, sp, #PT_REGS_SIZE
 557	kernel_entry 1
 558	mrs	x0, tpidr_el0
 559	add	x0, x0, #PT_REGS_SIZE
 560	str	x0, [sp, #S_SP]
 561
 562	/* Stash the regs for handle_bad_stack */
 563	mov	x0, sp
 564
 565	/* Time to die */
 566	bl	handle_bad_stack
 567	ASM_BUG()
 568SYM_CODE_END(__bad_stack)
 569#endif /* CONFIG_VMAP_STACK */
 570
 571
 572	.macro entry_handler el:req, ht:req, regsize:req, label:req
 573SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
 574	kernel_entry \el, \regsize
 575	mov	x0, sp
 576	bl	el\el\ht\()_\regsize\()_\label\()_handler
 577	.if \el == 0
 578	b	ret_to_user
 579	.else
 580	b	ret_to_kernel
 581	.endif
 582SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
 583	.endm
 584
 585/*
 586 * Early exception handlers
 587 */
 588	entry_handler	1, t, 64, sync
 589	entry_handler	1, t, 64, irq
 590	entry_handler	1, t, 64, fiq
 591	entry_handler	1, t, 64, error
 592
 593	entry_handler	1, h, 64, sync
 594	entry_handler	1, h, 64, irq
 595	entry_handler	1, h, 64, fiq
 596	entry_handler	1, h, 64, error
 597
 598	entry_handler	0, t, 64, sync
 599	entry_handler	0, t, 64, irq
 600	entry_handler	0, t, 64, fiq
 601	entry_handler	0, t, 64, error
 602
 603	entry_handler	0, t, 32, sync
 604	entry_handler	0, t, 32, irq
 605	entry_handler	0, t, 32, fiq
 606	entry_handler	0, t, 32, error
 607
 608SYM_CODE_START_LOCAL(ret_to_kernel)
 609	kernel_exit 1
 610SYM_CODE_END(ret_to_kernel)
 611
 612SYM_CODE_START_LOCAL(ret_to_user)
 613	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
 614	enable_step_tsk x19, x2
 615#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
 616	bl	stackleak_erase_on_task_stack
 617#endif
 618	kernel_exit 0
 619SYM_CODE_END(ret_to_user)
 620
 621	.popsection				// .entry.text
 622
 623	// Move from tramp_pg_dir to swapper_pg_dir
 624	.macro tramp_map_kernel, tmp
 625	mrs	\tmp, ttbr1_el1
 626	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
 627	bic	\tmp, \tmp, #USER_ASID_FLAG
 628	msr	ttbr1_el1, \tmp
 629#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 630alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
 631	/* ASID already in \tmp[63:48] */
 632	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
 633	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
 634	/* 2MB boundary containing the vectors, so we nobble the walk cache */
 635	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
 636	isb
 637	tlbi	vae1, \tmp
 638	dsb	nsh
 639alternative_else_nop_endif
 640#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
 641	.endm
 642
 643	// Move from swapper_pg_dir to tramp_pg_dir
 644	.macro tramp_unmap_kernel, tmp
 645	mrs	\tmp, ttbr1_el1
 646	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
 647	orr	\tmp, \tmp, #USER_ASID_FLAG
 648	msr	ttbr1_el1, \tmp
 649	/*
 650	 * We avoid running the post_ttbr_update_workaround here because
 651	 * it's only needed by Cavium ThunderX, which requires KPTI to be
 652	 * disabled.
 653	 */
 654	.endm
 655
 656	.macro		tramp_data_read_var	dst, var
 657#ifdef CONFIG_RELOCATABLE
 658	ldr		\dst, .L__tramp_data_\var
 659	.ifndef		.L__tramp_data_\var
 660	.pushsection	".entry.tramp.rodata", "a", %progbits
 661	.align		3
 662.L__tramp_data_\var:
 663	.quad		\var
 664	.popsection
 665	.endif
 666#else
 667	/*
 668	 * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
 669	 * compile time constant (and hence not secret and not worth hiding).
 670	 *
 671	 * As statically allocated kernel code and data always live in the top
 672	 * 47 bits of the address space we can sign-extend bit 47 and avoid an
 673	 * instruction to load the upper 16 bits (which must be 0xFFFF).
 674	 */
 675	movz		\dst, :abs_g2_s:\var
 676	movk		\dst, :abs_g1_nc:\var
 677	movk		\dst, :abs_g0_nc:\var
 678#endif
 679	.endm
 680
 681#define BHB_MITIGATION_NONE	0
 682#define BHB_MITIGATION_LOOP	1
 683#define BHB_MITIGATION_FW	2
 684#define BHB_MITIGATION_INSN	3
 685
 686	.macro tramp_ventry, vector_start, regsize, kpti, bhb
 687	.align	7
 6881:
 689	.if	\regsize == 64
 690	msr	tpidrro_el0, x30	// Restored in kernel_ventry
 691	.endif
 692
 693	.if	\bhb == BHB_MITIGATION_LOOP
 694	/*
 695	 * This sequence must appear before the first indirect branch. i.e. the
 696	 * ret out of tramp_ventry. It appears here because x30 is free.
 697	 */
 698	__mitigate_spectre_bhb_loop	x30
 699	.endif // \bhb == BHB_MITIGATION_LOOP
 700
 701	.if	\bhb == BHB_MITIGATION_INSN
 702	clearbhb
 703	isb
 704	.endif // \bhb == BHB_MITIGATION_INSN
 705
 706	.if	\kpti == 1
 707	/*
 708	 * Defend against branch aliasing attacks by pushing a dummy
 709	 * entry onto the return stack and using a RET instruction to
 710	 * enter the full-fat kernel vectors.
 711	 */
 712	bl	2f
 713	b	.
 7142:
 715	tramp_map_kernel	x30
 716alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
 717	tramp_data_read_var	x30, vectors
 718alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
 719	prfm	plil1strm, [x30, #(1b - \vector_start)]
 720alternative_else_nop_endif
 721
 722	msr	vbar_el1, x30
 723	isb
 724	.else
 725	adr_l	x30, vectors
 726	.endif // \kpti == 1
 727
 728	.if	\bhb == BHB_MITIGATION_FW
 729	/*
 730	 * The firmware sequence must appear before the first indirect branch.
 731	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
 732	 * mapped to save/restore the registers the SMC clobbers.
 733	 */
 734	__mitigate_spectre_bhb_fw
 735	.endif // \bhb == BHB_MITIGATION_FW
 736
 737	add	x30, x30, #(1b - \vector_start + 4)
 738	ret
 739.org 1b + 128	// Did we overflow the ventry slot?
 740	.endm
 741
 742	.macro	generate_tramp_vector,	kpti, bhb
 743.Lvector_start\@:
 744	.space	0x400
 745
 746	.rept	4
 747	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
 748	.endr
 749	.rept	4
 750	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
 751	.endr
 752	.endm
 753
 754#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 755/*
 756 * Exception vectors trampoline.
 757 * The order must match __bp_harden_el1_vectors and the
 758 * arm64_bp_harden_el1_vectors enum.
 759 */
 760	.pushsection ".entry.tramp.text", "ax"
 761	.align	11
 762SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors)
 763#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
 764	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
 765	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
 766	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
 767#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
 768	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
 769SYM_CODE_END(tramp_vectors)
 770
 771SYM_CODE_START_LOCAL(tramp_exit)
 772	tramp_unmap_kernel	x29
 773	mrs		x29, far_el1		// restore x29
 774	eret
 775	sb
 776SYM_CODE_END(tramp_exit)
 777	.popsection				// .entry.tramp.text
 778#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 779
 780/*
 781 * Exception vectors for spectre mitigations on entry from EL1 when
 782 * kpti is not in use.
 783 */
 784	.macro generate_el1_vector, bhb
 785.Lvector_start\@:
 786	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
 787	kernel_ventry	1, t, 64, irq		// IRQ EL1t
 788	kernel_ventry	1, t, 64, fiq		// FIQ EL1h
 789	kernel_ventry	1, t, 64, error		// Error EL1t
 790
 791	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
 792	kernel_ventry	1, h, 64, irq		// IRQ EL1h
 793	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
 794	kernel_ventry	1, h, 64, error		// Error EL1h
 795
 796	.rept	4
 797	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
 798	.endr
 799	.rept 4
 800	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
 801	.endr
 802	.endm
 803
 804/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
 805	.pushsection ".entry.text", "ax"
 806	.align	11
 807SYM_CODE_START(__bp_harden_el1_vectors)
 808#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
 809	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
 810	generate_el1_vector	bhb=BHB_MITIGATION_FW
 811	generate_el1_vector	bhb=BHB_MITIGATION_INSN
 812#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
 813SYM_CODE_END(__bp_harden_el1_vectors)
 814	.popsection
 815
 816
 817/*
 818 * Register switch for AArch64. The callee-saved registers need to be saved
 819 * and restored. On entry:
 820 *   x0 = previous task_struct (must be preserved across the switch)
 821 *   x1 = next task_struct
 822 * Previous and next are guaranteed not to be the same.
 823 *
 824 */
 825SYM_FUNC_START(cpu_switch_to)
 826	mov	x10, #THREAD_CPU_CONTEXT
 827	add	x8, x0, x10
 828	mov	x9, sp
 829	stp	x19, x20, [x8], #16		// store callee-saved registers
 830	stp	x21, x22, [x8], #16
 831	stp	x23, x24, [x8], #16
 832	stp	x25, x26, [x8], #16
 833	stp	x27, x28, [x8], #16
 834	stp	x29, x9, [x8], #16
 835	str	lr, [x8]
 836	add	x8, x1, x10
 837	ldp	x19, x20, [x8], #16		// restore callee-saved registers
 838	ldp	x21, x22, [x8], #16
 839	ldp	x23, x24, [x8], #16
 840	ldp	x25, x26, [x8], #16
 841	ldp	x27, x28, [x8], #16
 842	ldp	x29, x9, [x8], #16
 843	ldr	lr, [x8]
 844	mov	sp, x9
 845	msr	sp_el0, x1
 846	ptrauth_keys_install_kernel x1, x8, x9, x10
 847	scs_save x0
 848	scs_load_current
 849	ret
 850SYM_FUNC_END(cpu_switch_to)
 851NOKPROBE(cpu_switch_to)
 852
 853/*
 854 * This is how we return from a fork.
 855 */
 856SYM_CODE_START(ret_from_fork)
 857	bl	schedule_tail
 858	cbz	x19, 1f				// not a kernel thread
 859	mov	x0, x20
 860	blr	x19
 8611:	get_current_task tsk
 862	mov	x0, sp
 863	bl	asm_exit_to_user_mode
 864	b	ret_to_user
 865SYM_CODE_END(ret_from_fork)
 866NOKPROBE(ret_from_fork)
 867
 868/*
 869 * void call_on_irq_stack(struct pt_regs *regs,
 870 * 		          void (*func)(struct pt_regs *));
 871 *
 872 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
 873 */
 874SYM_FUNC_START(call_on_irq_stack)
 875#ifdef CONFIG_SHADOW_CALL_STACK
 876	get_current_task x16
 877	scs_save x16
 878	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
 879#endif
 880
 881	/* Create a frame record to save our LR and SP (implicit in FP) */
 882	stp	x29, x30, [sp, #-16]!
 883	mov	x29, sp
 884
 885	ldr_this_cpu x16, irq_stack_ptr, x17
 886
 887	/* Move to the new stack and call the function there */
 888	add	sp, x16, #IRQ_STACK_SIZE
 889	blr	x1
 890
 891	/*
 892	 * Restore the SP from the FP, and restore the FP and LR from the frame
 893	 * record.
 894	 */
 895	mov	sp, x29
 896	ldp	x29, x30, [sp], #16
 897	scs_load_current
 898	ret
 899SYM_FUNC_END(call_on_irq_stack)
 900NOKPROBE(call_on_irq_stack)
 901
 902#ifdef CONFIG_ARM_SDE_INTERFACE
 903
 904#include <asm/sdei.h>
 905#include <uapi/linux/arm_sdei.h>
 906
 907.macro sdei_handler_exit exit_mode
 908	/* On success, this call never returns... */
 909	cmp	\exit_mode, #SDEI_EXIT_SMC
 910	b.ne	99f
 911	smc	#0
 912	b	.
 91399:	hvc	#0
 914	b	.
 915.endm
 916
 917#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 918/*
 919 * The regular SDEI entry point may have been unmapped along with the rest of
 920 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
 921 * argument accessible.
 922 *
 923 * This clobbers x4, __sdei_handler() will restore this from firmware's
 924 * copy.
 925 */
 926.pushsection ".entry.tramp.text", "ax"
 927SYM_CODE_START(__sdei_asm_entry_trampoline)
 928	mrs	x4, ttbr1_el1
 929	tbz	x4, #USER_ASID_BIT, 1f
 930
 931	tramp_map_kernel tmp=x4
 932	isb
 933	mov	x4, xzr
 934
 935	/*
 936	 * Remember whether to unmap the kernel on exit.
 937	 */
 9381:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
 939	tramp_data_read_var     x4, __sdei_asm_handler
 940	br	x4
 941SYM_CODE_END(__sdei_asm_entry_trampoline)
 942NOKPROBE(__sdei_asm_entry_trampoline)
 943
 944/*
 945 * Make the exit call and restore the original ttbr1_el1
 946 *
 947 * x0 & x1: setup for the exit API call
 948 * x2: exit_mode
 949 * x4: struct sdei_registered_event argument from registration time.
 950 */
 951SYM_CODE_START(__sdei_asm_exit_trampoline)
 952	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
 953	cbnz	x4, 1f
 954
 955	tramp_unmap_kernel	tmp=x4
 956
 9571:	sdei_handler_exit exit_mode=x2
 958SYM_CODE_END(__sdei_asm_exit_trampoline)
 959NOKPROBE(__sdei_asm_exit_trampoline)
 960.popsection		// .entry.tramp.text
 961#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 962
 963/*
 964 * Software Delegated Exception entry point.
 965 *
 966 * x0: Event number
 967 * x1: struct sdei_registered_event argument from registration time.
 968 * x2: interrupted PC
 969 * x3: interrupted PSTATE
 970 * x4: maybe clobbered by the trampoline
 971 *
 972 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
 973 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
 974 * want them.
 975 */
 976SYM_CODE_START(__sdei_asm_handler)
 977	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
 978	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
 979	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
 980	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
 981	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
 982	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
 983	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
 984	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
 985	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
 986	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
 987	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
 988	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
 989	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
 990	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
 991	mov	x4, sp
 992	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
 993
 994	mov	x19, x1
 995
 996	/* Store the registered-event for crash_smp_send_stop() */
 997	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
 998	cbnz	w4, 1f
 999	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1000	b	2f
10011:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
10022:	str	x19, [x5]
1003
1004#ifdef CONFIG_VMAP_STACK
1005	/*
1006	 * entry.S may have been using sp as a scratch register, find whether
1007	 * this is a normal or critical event and switch to the appropriate
1008	 * stack for this CPU.
1009	 */
1010	cbnz	w4, 1f
1011	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1012	b	2f
10131:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10142:	mov	x6, #SDEI_STACK_SIZE
1015	add	x5, x5, x6
1016	mov	sp, x5
1017#endif
1018
1019#ifdef CONFIG_SHADOW_CALL_STACK
1020	/* Use a separate shadow call stack for normal and critical events */
1021	cbnz	w4, 3f
1022	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1023	b	4f
10243:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
10254:
1026#endif
1027
1028	/*
1029	 * We may have interrupted userspace, or a guest, or exit-from or
1030	 * return-to either of these. We can't trust sp_el0, restore it.
1031	 */
1032	mrs	x28, sp_el0
1033	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1034	msr	sp_el0, x0
1035
1036	/* If we interrupted the kernel point to the previous stack/frame. */
1037	and     x0, x3, #0xc
1038	mrs     x1, CurrentEL
1039	cmp     x0, x1
1040	csel	x29, x29, xzr, eq	// fp, or zero
1041	csel	x4, x2, xzr, eq		// elr, or zero
1042
1043	stp	x29, x4, [sp, #-16]!
1044	mov	x29, sp
1045
1046	add	x0, x19, #SDEI_EVENT_INTREGS
1047	mov	x1, x19
1048	bl	__sdei_handler
1049
1050	msr	sp_el0, x28
1051	/* restore regs >x17 that we clobbered */
1052	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1053	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1054	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1055	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1056	mov	sp, x1
1057
1058	mov	x1, x0			// address to complete_and_resume
1059	/* x0 = (x0 <= SDEI_EV_FAILED) ?
1060	 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
1061	 */
1062	cmp	x0, #SDEI_EV_FAILED
1063	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1064	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1065	csel	x0, x2, x3, ls
1066
1067	ldr_l	x2, sdei_exit_mode
1068
1069	/* Clear the registered-event seen by crash_smp_send_stop() */
1070	ldrb	w3, [x4, #SDEI_EVENT_PRIORITY]
1071	cbnz	w3, 1f
1072	adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6
1073	b	2f
10741:	adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6
10752:	str	xzr, [x5]
1076
1077alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1078	sdei_handler_exit exit_mode=x2
1079alternative_else_nop_endif
1080
1081#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1082	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1083	br	x5
1084#endif
1085SYM_CODE_END(__sdei_asm_handler)
1086NOKPROBE(__sdei_asm_handler)
1087
1088SYM_CODE_START(__sdei_handler_abort)
1089	mov_q	x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1090	adr	x1, 1f
1091	ldr_l	x2, sdei_exit_mode
1092	sdei_handler_exit exit_mode=x2
1093	// exit the handler and jump to the next instruction.
1094	// Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx.
10951:	ret
1096SYM_CODE_END(__sdei_handler_abort)
1097NOKPROBE(__sdei_handler_abort)
1098#endif /* CONFIG_ARM_SDE_INTERFACE */