Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v5.4
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Low-level exception handling code
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
   7 *		Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#include <linux/arm-smccc.h>
  11#include <linux/init.h>
  12#include <linux/linkage.h>
  13
  14#include <asm/alternative.h>
  15#include <asm/assembler.h>
  16#include <asm/asm-offsets.h>
 
 
  17#include <asm/cpufeature.h>
  18#include <asm/errno.h>
  19#include <asm/esr.h>
  20#include <asm/irq.h>
  21#include <asm/memory.h>
  22#include <asm/mmu.h>
  23#include <asm/processor.h>
  24#include <asm/ptrace.h>
 
  25#include <asm/thread_info.h>
  26#include <asm/asm-uaccess.h>
  27#include <asm/unistd.h>
  28
  29/*
  30 * Context tracking subsystem.  Used to instrument transitions
  31 * between user and kernel mode.
  32 */
  33	.macro ct_user_exit_irqoff
  34#ifdef CONFIG_CONTEXT_TRACKING
  35	bl	enter_from_user_mode
  36#endif
  37	.endm
  38
  39	.macro ct_user_enter
  40#ifdef CONFIG_CONTEXT_TRACKING
  41	bl	context_tracking_user_enter
  42#endif
  43	.endm
  44
  45	.macro	clear_gp_regs
  46	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
  47	mov	x\n, xzr
  48	.endr
  49	.endm
  50
  51/*
  52 * Bad Abort numbers
  53 *-----------------
  54 */
  55#define BAD_SYNC	0
  56#define BAD_IRQ		1
  57#define BAD_FIQ		2
  58#define BAD_ERROR	3
  59
  60	.macro kernel_ventry, el, label, regsize = 64
  61	.align 7
  62#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  63alternative_if ARM64_UNMAP_KERNEL_AT_EL0
  64	.if	\el == 0
 
 
 
 
 
  65	.if	\regsize == 64
  66	mrs	x30, tpidrro_el0
  67	msr	tpidrro_el0, xzr
  68	.else
  69	mov	x30, xzr
  70	.endif
 
  71	.endif
  72alternative_else_nop_endif
  73#endif
  74
  75	sub	sp, sp, #S_FRAME_SIZE
  76#ifdef CONFIG_VMAP_STACK
  77	/*
  78	 * Test whether the SP has overflowed, without corrupting a GPR.
  79	 * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
 
  80	 */
  81	add	sp, sp, x0			// sp' = sp + x0
  82	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
  83	tbnz	x0, #THREAD_SHIFT, 0f
  84	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
  85	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
  86	b	el\()\el\()_\label
  87
  880:
  89	/*
  90	 * Either we've just detected an overflow, or we've taken an exception
  91	 * while on the overflow stack. Either way, we won't return to
  92	 * userspace, and can clobber EL0 registers to free up GPRs.
  93	 */
  94
  95	/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
  96	msr	tpidr_el0, x0
  97
  98	/* Recover the original x0 value and stash it in tpidrro_el0 */
  99	sub	x0, sp, x0
 100	msr	tpidrro_el0, x0
 101
 102	/* Switch to the overflow stack */
 103	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
 104
 105	/*
 106	 * Check whether we were already on the overflow stack. This may happen
 107	 * after panic() re-enables interrupts.
 108	 */
 109	mrs	x0, tpidr_el0			// sp of interrupted context
 110	sub	x0, sp, x0			// delta with top of overflow stack
 111	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
 112	b.ne	__bad_stack			// no? -> bad stack pointer
 113
 114	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
 115	sub	sp, sp, x0
 116	mrs	x0, tpidrro_el0
 117#endif
 118	b	el\()\el\()_\label
 
 119	.endm
 120
 121	.macro tramp_alias, dst, sym
 122	mov_q	\dst, TRAMP_VALIAS
 123	add	\dst, \dst, #(\sym - .entry.tramp.text)
 
 
 
 124	.endm
 125
 126	// This macro corrupts x0-x3. It is the caller's duty
 127	// to save/restore them if required.
 
 
 128	.macro	apply_ssbd, state, tmp1, tmp2
 129#ifdef CONFIG_ARM64_SSBD
 130alternative_cb	arm64_enable_wa2_handling
 131	b	.L__asm_ssbd_skip\@
 132alternative_cb_end
 133	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
 134	cbz	\tmp2,	.L__asm_ssbd_skip\@
 135	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
 136	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
 137	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
 138	mov	w1, #\state
 139alternative_cb	arm64_update_smccc_conduit
 140	nop					// Patched to SMC/HVC #0
 141alternative_cb_end
 142.L__asm_ssbd_skip\@:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143#endif
 144	.endm
 145
 146	.macro	kernel_entry, el, regsize = 64
 
 
 
 147	.if	\regsize == 32
 148	mov	w0, w0				// zero upper 32 bits of x0
 149	.endif
 150	stp	x0, x1, [sp, #16 * 0]
 151	stp	x2, x3, [sp, #16 * 1]
 152	stp	x4, x5, [sp, #16 * 2]
 153	stp	x6, x7, [sp, #16 * 3]
 154	stp	x8, x9, [sp, #16 * 4]
 155	stp	x10, x11, [sp, #16 * 5]
 156	stp	x12, x13, [sp, #16 * 6]
 157	stp	x14, x15, [sp, #16 * 7]
 158	stp	x16, x17, [sp, #16 * 8]
 159	stp	x18, x19, [sp, #16 * 9]
 160	stp	x20, x21, [sp, #16 * 10]
 161	stp	x22, x23, [sp, #16 * 11]
 162	stp	x24, x25, [sp, #16 * 12]
 163	stp	x26, x27, [sp, #16 * 13]
 164	stp	x28, x29, [sp, #16 * 14]
 165
 166	.if	\el == 0
 167	clear_gp_regs
 168	mrs	x21, sp_el0
 169	ldr_this_cpu	tsk, __entry_task, x20	// Ensure MDSCR_EL1.SS is clear,
 170	ldr	x19, [tsk, #TSK_TI_FLAGS]	// since we can unmask debug
 171	disable_step_tsk x19, x20		// exceptions when scheduling.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172
 173	apply_ssbd 1, x22, x23
 174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 175	.else
 176	add	x21, sp, #S_FRAME_SIZE
 177	get_current_task tsk
 178	/* Save the task's original addr_limit and set USER_DS */
 179	ldr	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 180	str	x20, [sp, #S_ORIG_ADDR_LIMIT]
 181	mov	x20, #USER_DS
 182	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 183	/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 184	.endif /* \el == 0 */
 185	mrs	x22, elr_el1
 186	mrs	x23, spsr_el1
 187	stp	lr, x21, [sp, #S_LR]
 188
 189	/*
 190	 * In order to be able to dump the contents of struct pt_regs at the
 191	 * time the exception was taken (in case we attempt to walk the call
 192	 * stack later), chain it together with the stack frames.
 193	 */
 194	.if \el == 0
 195	stp	xzr, xzr, [sp, #S_STACKFRAME]
 196	.else
 197	stp	x29, x22, [sp, #S_STACKFRAME]
 198	.endif
 199	add	x29, sp, #S_STACKFRAME
 200
 201#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 202	/*
 203	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 204	 * EL0, there is no need to check the state of TTBR0_EL1 since
 205	 * accesses are always enabled.
 206	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
 207	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
 208	 * user mappings.
 209	 */
 210alternative_if ARM64_HAS_PAN
 211	b	1f				// skip TTBR0 PAN
 212alternative_else_nop_endif
 213
 214	.if	\el != 0
 215	mrs	x21, ttbr0_el1
 216	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
 217	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 218	b.eq	1f				// TTBR0 access already disabled
 219	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 220	.endif
 221
 222	__uaccess_ttbr0_disable x21
 2231:
 224#endif
 225
 226	stp	x22, x23, [sp, #S_PC]
 227
 228	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
 229	.if	\el == 0
 230	mov	w21, #NO_SYSCALL
 231	str	w21, [sp, #S_SYSCALLNO]
 232	.endif
 233
 234	/*
 235	 * Set sp_el0 to current thread_info.
 236	 */
 237	.if	\el == 0
 238	msr	sp_el0, tsk
 239	.endif
 240
 241	/* Save pmr */
 242alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 243	mrs_s	x20, SYS_ICC_PMR_EL1
 244	str	x20, [sp, #S_PMR_SAVE]
 
 
 245alternative_else_nop_endif
 
 246
 247	/*
 248	 * Registers that may be useful after this macro is invoked:
 249	 *
 250	 * x20 - ICC_PMR_EL1
 251	 * x21 - aborted SP
 252	 * x22 - aborted PC
 253	 * x23 - aborted PSTATE
 254	*/
 255	.endm
 256
 257	.macro	kernel_exit, el
 258	.if	\el != 0
 259	disable_daif
 260
 261	/* Restore the task's original addr_limit. */
 262	ldr	x20, [sp, #S_ORIG_ADDR_LIMIT]
 263	str	x20, [tsk, #TSK_TI_ADDR_LIMIT]
 264
 265	/* No need to restore UAO, it will be restored from SPSR_EL1 */
 266	.endif
 267
 
 268	/* Restore pmr */
 269alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 270	ldr	x20, [sp, #S_PMR_SAVE]
 271	msr_s	SYS_ICC_PMR_EL1, x20
 272	/* Ensure priority change is seen by redistributor */
 273	dsb	sy
 
 
 274alternative_else_nop_endif
 
 275
 276	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 277	.if	\el == 0
 278	ct_user_enter
 279	.endif
 280
 281#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 282	/*
 283	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 284	 * PAN bit checking.
 285	 */
 286alternative_if ARM64_HAS_PAN
 287	b	2f				// skip TTBR0 PAN
 288alternative_else_nop_endif
 289
 290	.if	\el != 0
 291	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 292	.endif
 293
 294	__uaccess_ttbr0_enable x0, x1
 295
 296	.if	\el == 0
 297	/*
 298	 * Enable errata workarounds only if returning to user. The only
 299	 * workaround currently required for TTBR0_EL1 changes are for the
 300	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 301	 * corruption).
 302	 */
 303	bl	post_ttbr_update_workaround
 304	.endif
 3051:
 306	.if	\el != 0
 307	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
 308	.endif
 3092:
 310#endif
 311
 312	.if	\el == 0
 313	ldr	x23, [sp, #S_SP]		// load return stack pointer
 314	msr	sp_el0, x23
 315	tst	x22, #PSR_MODE32_BIT		// native task?
 316	b.eq	3f
 317
 318#ifdef CONFIG_ARM64_ERRATUM_845719
 319alternative_if ARM64_WORKAROUND_845719
 320#ifdef CONFIG_PID_IN_CONTEXTIDR
 321	mrs	x29, contextidr_el1
 322	msr	contextidr_el1, x29
 323#else
 324	msr contextidr_el1, xzr
 325#endif
 326alternative_else_nop_endif
 327#endif
 3283:
 329#ifdef CONFIG_ARM64_ERRATUM_1418040
 330alternative_if_not ARM64_WORKAROUND_1418040
 331	b	4f
 332alternative_else_nop_endif
 333	/*
 334	 * if (x22.mode32 == cntkctl_el1.el0vcten)
 335	 *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
 
 
 
 
 
 
 
 336	 */
 337	mrs	x1, cntkctl_el1
 338	eon	x0, x1, x22, lsr #3
 339	tbz	x0, #1, 4f
 340	eor	x1, x1, #2	// ARCH_TIMER_USR_VCT_ACCESS_EN
 341	msr	cntkctl_el1, x1
 3424:
 
 
 
 343#endif
 
 
 
 344	apply_ssbd 0, x0, x1
 345	.endif
 346
 347	msr	elr_el1, x21			// set up the return data
 348	msr	spsr_el1, x22
 349	ldp	x0, x1, [sp, #16 * 0]
 350	ldp	x2, x3, [sp, #16 * 1]
 351	ldp	x4, x5, [sp, #16 * 2]
 352	ldp	x6, x7, [sp, #16 * 3]
 353	ldp	x8, x9, [sp, #16 * 4]
 354	ldp	x10, x11, [sp, #16 * 5]
 355	ldp	x12, x13, [sp, #16 * 6]
 356	ldp	x14, x15, [sp, #16 * 7]
 357	ldp	x16, x17, [sp, #16 * 8]
 358	ldp	x18, x19, [sp, #16 * 9]
 359	ldp	x20, x21, [sp, #16 * 10]
 360	ldp	x22, x23, [sp, #16 * 11]
 361	ldp	x24, x25, [sp, #16 * 12]
 362	ldp	x26, x27, [sp, #16 * 13]
 363	ldp	x28, x29, [sp, #16 * 14]
 364	ldr	lr, [sp, #S_LR]
 365	add	sp, sp, #S_FRAME_SIZE		// restore sp
 366
 367	.if	\el == 0
 368alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 
 
 
 
 369#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 370	bne	5f
 371	msr	far_el1, x30
 372	tramp_alias	x30, tramp_exit_native
 373	br	x30
 3745:
 375	tramp_alias	x30, tramp_exit_compat
 376	br	x30
 377#endif
 378	.else
 
 
 
 
 
 
 379	eret
 380	.endif
 381	sb
 382	.endm
 383
 384	.macro	irq_stack_entry
 385	mov	x19, sp			// preserve the original sp
 386
 387	/*
 388	 * Compare sp with the base of the task stack.
 389	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
 390	 * and should switch to the irq stack.
 391	 */
 392	ldr	x25, [tsk, TSK_STACK]
 393	eor	x25, x25, x19
 394	and	x25, x25, #~(THREAD_SIZE - 1)
 395	cbnz	x25, 9998f
 396
 397	ldr_this_cpu x25, irq_stack_ptr, x26
 398	mov	x26, #IRQ_STACK_SIZE
 399	add	x26, x25, x26
 400
 401	/* switch to the irq stack */
 402	mov	sp, x26
 4039998:
 404	.endm
 405
 406	/*
 407	 * x19 should be preserved between irq_stack_entry and
 408	 * irq_stack_exit.
 409	 */
 410	.macro	irq_stack_exit
 411	mov	sp, x19
 412	.endm
 413
 414/* GPRs used by entry code */
 415tsk	.req	x28		// current thread_info
 416
 417/*
 418 * Interrupt handling.
 419 */
 420	.macro	irq_handler
 421	ldr_l	x1, handle_arch_irq
 422	mov	x0, sp
 423	irq_stack_entry
 424	blr	x1
 425	irq_stack_exit
 426	.endm
 427
 428#ifdef CONFIG_ARM64_PSEUDO_NMI
 
 429	/*
 430	 * Set res to 0 if irqs were unmasked in interrupted context.
 431	 * Otherwise set res to non-0 value.
 
 
 432	 */
 433	.macro	test_irqs_unmasked res:req, pmr:req
 434alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 435	sub	\res, \pmr, #GIC_PRIO_IRQON
 436alternative_else
 437	mov	\res, xzr
 438alternative_endif
 439	.endm
 440#endif
 441
 442	.macro	gic_prio_kentry_setup, tmp:req
 443#ifdef CONFIG_ARM64_PSEUDO_NMI
 444	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 445	mov	\tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
 446	msr_s	SYS_ICC_PMR_EL1, \tmp
 447	alternative_else_nop_endif
 448#endif
 449	.endm
 450
 451	.macro	gic_prio_irq_setup, pmr:req, tmp:req
 452#ifdef CONFIG_ARM64_PSEUDO_NMI
 453	alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 454	orr	\tmp, \pmr, #GIC_PRIO_PSR_I_SET
 455	msr_s	SYS_ICC_PMR_EL1, \tmp
 456	alternative_else_nop_endif
 457#endif
 458	.endm
 459
 460	.text
 461
 462/*
 463 * Exception vectors.
 464 */
 465	.pushsection ".entry.text", "ax"
 466
 467	.align	11
 468ENTRY(vectors)
 469	kernel_ventry	1, sync_invalid			// Synchronous EL1t
 470	kernel_ventry	1, irq_invalid			// IRQ EL1t
 471	kernel_ventry	1, fiq_invalid			// FIQ EL1t
 472	kernel_ventry	1, error_invalid		// Error EL1t
 473
 474	kernel_ventry	1, sync				// Synchronous EL1h
 475	kernel_ventry	1, irq				// IRQ EL1h
 476	kernel_ventry	1, fiq_invalid			// FIQ EL1h
 477	kernel_ventry	1, error			// Error EL1h
 478
 479	kernel_ventry	0, sync				// Synchronous 64-bit EL0
 480	kernel_ventry	0, irq				// IRQ 64-bit EL0
 481	kernel_ventry	0, fiq_invalid			// FIQ 64-bit EL0
 482	kernel_ventry	0, error			// Error 64-bit EL0
 483
 484#ifdef CONFIG_COMPAT
 485	kernel_ventry	0, sync_compat, 32		// Synchronous 32-bit EL0
 486	kernel_ventry	0, irq_compat, 32		// IRQ 32-bit EL0
 487	kernel_ventry	0, fiq_invalid_compat, 32	// FIQ 32-bit EL0
 488	kernel_ventry	0, error_compat, 32		// Error 32-bit EL0
 489#else
 490	kernel_ventry	0, sync_invalid, 32		// Synchronous 32-bit EL0
 491	kernel_ventry	0, irq_invalid, 32		// IRQ 32-bit EL0
 492	kernel_ventry	0, fiq_invalid, 32		// FIQ 32-bit EL0
 493	kernel_ventry	0, error_invalid, 32		// Error 32-bit EL0
 494#endif
 495END(vectors)
 496
 497#ifdef CONFIG_VMAP_STACK
 
 498	/*
 499	 * We detected an overflow in kernel_ventry, which switched to the
 500	 * overflow stack. Stash the exception regs, and head to our overflow
 501	 * handler.
 502	 */
 503__bad_stack:
 504	/* Restore the original x0 value */
 505	mrs	x0, tpidrro_el0
 506
 507	/*
 508	 * Store the original GPRs to the new stack. The orginal SP (minus
 509	 * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 510	 */
 511	sub	sp, sp, #S_FRAME_SIZE
 512	kernel_entry 1
 513	mrs	x0, tpidr_el0
 514	add	x0, x0, #S_FRAME_SIZE
 515	str	x0, [sp, #S_SP]
 516
 517	/* Stash the regs for handle_bad_stack */
 518	mov	x0, sp
 519
 520	/* Time to die */
 521	bl	handle_bad_stack
 522	ASM_BUG()
 
 523#endif /* CONFIG_VMAP_STACK */
 524
 525/*
 526 * Invalid mode handlers
 527 */
 528	.macro	inv_entry, el, reason, regsize = 64
 529	kernel_entry \el, \regsize
 530	mov	x0, sp
 531	mov	x1, #\reason
 532	mrs	x2, esr_el1
 533	bl	bad_mode
 534	ASM_BUG()
 
 
 
 535	.endm
 536
 537el0_sync_invalid:
 538	inv_entry 0, BAD_SYNC
 539ENDPROC(el0_sync_invalid)
 540
 541el0_irq_invalid:
 542	inv_entry 0, BAD_IRQ
 543ENDPROC(el0_irq_invalid)
 544
 545el0_fiq_invalid:
 546	inv_entry 0, BAD_FIQ
 547ENDPROC(el0_fiq_invalid)
 548
 549el0_error_invalid:
 550	inv_entry 0, BAD_ERROR
 551ENDPROC(el0_error_invalid)
 552
 553#ifdef CONFIG_COMPAT
 554el0_fiq_invalid_compat:
 555	inv_entry 0, BAD_FIQ, 32
 556ENDPROC(el0_fiq_invalid_compat)
 557#endif
 558
 559el1_sync_invalid:
 560	inv_entry 1, BAD_SYNC
 561ENDPROC(el1_sync_invalid)
 562
 563el1_irq_invalid:
 564	inv_entry 1, BAD_IRQ
 565ENDPROC(el1_irq_invalid)
 566
 567el1_fiq_invalid:
 568	inv_entry 1, BAD_FIQ
 569ENDPROC(el1_fiq_invalid)
 570
 571el1_error_invalid:
 572	inv_entry 1, BAD_ERROR
 573ENDPROC(el1_error_invalid)
 574
 575/*
 576 * EL1 mode handlers.
 577 */
 578	.align	6
 579el1_sync:
 580	kernel_entry 1
 581	mrs	x1, esr_el1			// read the syndrome register
 582	lsr	x24, x1, #ESR_ELx_EC_SHIFT	// exception class
 583	cmp	x24, #ESR_ELx_EC_DABT_CUR	// data abort in EL1
 584	b.eq	el1_da
 585	cmp	x24, #ESR_ELx_EC_IABT_CUR	// instruction abort in EL1
 586	b.eq	el1_ia
 587	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
 588	b.eq	el1_undef
 589	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 590	b.eq	el1_pc
 591	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL1
 592	b.eq	el1_undef
 593	cmp	x24, #ESR_ELx_EC_BREAKPT_CUR	// debug exception in EL1
 594	b.ge	el1_dbg
 595	b	el1_inv
 596
 597el1_ia:
 598	/*
 599	 * Fall through to the Data abort case
 600	 */
 601el1_da:
 602	/*
 603	 * Data abort handling
 604	 */
 605	mrs	x3, far_el1
 606	inherit_daif	pstate=x23, tmp=x2
 607	untagged_addr	x0, x3
 608	mov	x2, sp				// struct pt_regs
 609	bl	do_mem_abort
 610
 611	kernel_exit 1
 612el1_pc:
 613	/*
 614	 * PC alignment exception handling. We don't handle SP alignment faults,
 615	 * since we will have hit a recursive exception when trying to push the
 616	 * initial pt_regs.
 617	 */
 618	mrs	x0, far_el1
 619	inherit_daif	pstate=x23, tmp=x2
 620	mov	x2, sp
 621	bl	do_sp_pc_abort
 622	ASM_BUG()
 623el1_undef:
 624	/*
 625	 * Undefined instruction
 626	 */
 627	inherit_daif	pstate=x23, tmp=x2
 628	mov	x0, sp
 629	bl	do_undefinstr
 630	kernel_exit 1
 631el1_dbg:
 632	/*
 633	 * Debug exception handling
 634	 */
 635	cmp	x24, #ESR_ELx_EC_BRK64		// if BRK64
 636	cinc	x24, x24, eq			// set bit '0'
 637	tbz	x24, #0, el1_inv		// EL1 only
 638	gic_prio_kentry_setup tmp=x3
 639	mrs	x0, far_el1
 640	mov	x2, sp				// struct pt_regs
 641	bl	do_debug_exception
 642	kernel_exit 1
 643el1_inv:
 644	// TODO: add support for undefined instructions in kernel mode
 645	inherit_daif	pstate=x23, tmp=x2
 646	mov	x0, sp
 647	mov	x2, x1
 648	mov	x1, #BAD_SYNC
 649	bl	bad_mode
 650	ASM_BUG()
 651ENDPROC(el1_sync)
 652
 653	.align	6
 654el1_irq:
 655	kernel_entry 1
 656	gic_prio_irq_setup pmr=x20, tmp=x1
 657	enable_da_f
 658
 659#ifdef CONFIG_ARM64_PSEUDO_NMI
 660	test_irqs_unmasked	res=x0, pmr=x20
 661	cbz	x0, 1f
 662	bl	asm_nmi_enter
 6631:
 664#endif
 665
 666#ifdef CONFIG_TRACE_IRQFLAGS
 667	bl	trace_hardirqs_off
 668#endif
 669
 670	irq_handler
 671
 672#ifdef CONFIG_PREEMPT
 673	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 674alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 675	/*
 676	 * DA_F were cleared at start of handling. If anything is set in DAIF,
 677	 * we come back from an NMI, so skip preemption
 678	 */
 679	mrs	x0, daif
 680	orr	x24, x24, x0
 681alternative_else_nop_endif
 682	cbnz	x24, 1f				// preempt count != 0 || NMI return path
 683	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
 6841:
 685#endif
 686
 687#ifdef CONFIG_ARM64_PSEUDO_NMI
 688	/*
 689	 * When using IRQ priority masking, we can get spurious interrupts while
 690	 * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
 691	 * section with interrupts disabled. Skip tracing in those cases.
 692	 */
 693	test_irqs_unmasked	res=x0, pmr=x20
 694	cbz	x0, 1f
 695	bl	asm_nmi_exit
 6961:
 697#endif
 698
 699#ifdef CONFIG_TRACE_IRQFLAGS
 700#ifdef CONFIG_ARM64_PSEUDO_NMI
 701	test_irqs_unmasked	res=x0, pmr=x20
 702	cbnz	x0, 1f
 703#endif
 704	bl	trace_hardirqs_on
 7051:
 706#endif
 707
 708	kernel_exit 1
 709ENDPROC(el1_irq)
 710
 711/*
 712 * EL0 mode handlers.
 713 */
 714	.align	6
 715el0_sync:
 716	kernel_entry 0
 717	mrs	x25, esr_el1			// read the syndrome register
 718	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
 719	cmp	x24, #ESR_ELx_EC_SVC64		// SVC in 64-bit state
 720	b.eq	el0_svc
 721	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
 722	b.eq	el0_da
 723	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
 724	b.eq	el0_ia
 725	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
 726	b.eq	el0_fpsimd_acc
 727	cmp	x24, #ESR_ELx_EC_SVE		// SVE access
 728	b.eq	el0_sve_acc
 729	cmp	x24, #ESR_ELx_EC_FP_EXC64	// FP/ASIMD exception
 730	b.eq	el0_fpsimd_exc
 731	cmp	x24, #ESR_ELx_EC_SYS64		// configurable trap
 732	ccmp	x24, #ESR_ELx_EC_WFx, #4, ne
 733	b.eq	el0_sys
 734	cmp	x24, #ESR_ELx_EC_SP_ALIGN	// stack alignment exception
 735	b.eq	el0_sp
 736	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 737	b.eq	el0_pc
 738	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
 739	b.eq	el0_undef
 740	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
 741	b.ge	el0_dbg
 742	b	el0_inv
 743
 744#ifdef CONFIG_COMPAT
 745	.align	6
 746el0_sync_compat:
 747	kernel_entry 0, 32
 748	mrs	x25, esr_el1			// read the syndrome register
 749	lsr	x24, x25, #ESR_ELx_EC_SHIFT	// exception class
 750	cmp	x24, #ESR_ELx_EC_SVC32		// SVC in 32-bit state
 751	b.eq	el0_svc_compat
 752	cmp	x24, #ESR_ELx_EC_DABT_LOW	// data abort in EL0
 753	b.eq	el0_da
 754	cmp	x24, #ESR_ELx_EC_IABT_LOW	// instruction abort in EL0
 755	b.eq	el0_ia
 756	cmp	x24, #ESR_ELx_EC_FP_ASIMD	// FP/ASIMD access
 757	b.eq	el0_fpsimd_acc
 758	cmp	x24, #ESR_ELx_EC_FP_EXC32	// FP/ASIMD exception
 759	b.eq	el0_fpsimd_exc
 760	cmp	x24, #ESR_ELx_EC_PC_ALIGN	// pc alignment exception
 761	b.eq	el0_pc
 762	cmp	x24, #ESR_ELx_EC_UNKNOWN	// unknown exception in EL0
 763	b.eq	el0_undef
 764	cmp	x24, #ESR_ELx_EC_CP15_32	// CP15 MRC/MCR trap
 765	b.eq	el0_cp15
 766	cmp	x24, #ESR_ELx_EC_CP15_64	// CP15 MRRC/MCRR trap
 767	b.eq	el0_cp15
 768	cmp	x24, #ESR_ELx_EC_CP14_MR	// CP14 MRC/MCR trap
 769	b.eq	el0_undef
 770	cmp	x24, #ESR_ELx_EC_CP14_LS	// CP14 LDC/STC trap
 771	b.eq	el0_undef
 772	cmp	x24, #ESR_ELx_EC_CP14_64	// CP14 MRRC/MCRR trap
 773	b.eq	el0_undef
 774	cmp	x24, #ESR_ELx_EC_BREAKPT_LOW	// debug exception in EL0
 775	b.ge	el0_dbg
 776	b	el0_inv
 777el0_svc_compat:
 778	gic_prio_kentry_setup tmp=x1
 779	mov	x0, sp
 780	bl	el0_svc_compat_handler
 781	b	ret_to_user
 782
 783	.align	6
 784el0_irq_compat:
 785	kernel_entry 0, 32
 786	b	el0_irq_naked
 787
 788el0_error_compat:
 789	kernel_entry 0, 32
 790	b	el0_error_naked
 791
 792el0_cp15:
 793	/*
 794	 * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions
 795	 */
 796	ct_user_exit_irqoff
 797	enable_daif
 798	mov	x0, x25
 799	mov	x1, sp
 800	bl	do_cp15instr
 801	b	ret_to_user
 802#endif
 803
 804el0_da:
 805	/*
 806	 * Data abort handling
 807	 */
 808	mrs	x26, far_el1
 809	ct_user_exit_irqoff
 810	enable_daif
 811	untagged_addr	x0, x26
 812	mov	x1, x25
 813	mov	x2, sp
 814	bl	do_mem_abort
 815	b	ret_to_user
 816el0_ia:
 817	/*
 818	 * Instruction abort handling
 819	 */
 820	mrs	x26, far_el1
 821	gic_prio_kentry_setup tmp=x0
 822	ct_user_exit_irqoff
 823	enable_da_f
 824#ifdef CONFIG_TRACE_IRQFLAGS
 825	bl	trace_hardirqs_off
 826#endif
 827	mov	x0, x26
 828	mov	x1, x25
 829	mov	x2, sp
 830	bl	do_el0_ia_bp_hardening
 831	b	ret_to_user
 832el0_fpsimd_acc:
 833	/*
 834	 * Floating Point or Advanced SIMD access
 835	 */
 836	ct_user_exit_irqoff
 837	enable_daif
 838	mov	x0, x25
 839	mov	x1, sp
 840	bl	do_fpsimd_acc
 841	b	ret_to_user
 842el0_sve_acc:
 843	/*
 844	 * Scalable Vector Extension access
 845	 */
 846	ct_user_exit_irqoff
 847	enable_daif
 848	mov	x0, x25
 849	mov	x1, sp
 850	bl	do_sve_acc
 851	b	ret_to_user
 852el0_fpsimd_exc:
 853	/*
 854	 * Floating Point, Advanced SIMD or SVE exception
 855	 */
 856	ct_user_exit_irqoff
 857	enable_daif
 858	mov	x0, x25
 859	mov	x1, sp
 860	bl	do_fpsimd_exc
 861	b	ret_to_user
 862el0_sp:
 863	ldr	x26, [sp, #S_SP]
 864	b	el0_sp_pc
 865el0_pc:
 866	mrs	x26, far_el1
 867el0_sp_pc:
 868	/*
 869	 * Stack or PC alignment exception handling
 870	 */
 871	gic_prio_kentry_setup tmp=x0
 872	ct_user_exit_irqoff
 873	enable_da_f
 874#ifdef CONFIG_TRACE_IRQFLAGS
 875	bl	trace_hardirqs_off
 876#endif
 877	mov	x0, x26
 878	mov	x1, x25
 879	mov	x2, sp
 880	bl	do_sp_pc_abort
 881	b	ret_to_user
 882el0_undef:
 883	/*
 884	 * Undefined instruction
 885	 */
 886	ct_user_exit_irqoff
 887	enable_daif
 888	mov	x0, sp
 889	bl	do_undefinstr
 890	b	ret_to_user
 891el0_sys:
 892	/*
 893	 * System instructions, for trapped cache maintenance instructions
 894	 */
 895	ct_user_exit_irqoff
 896	enable_daif
 897	mov	x0, x25
 898	mov	x1, sp
 899	bl	do_sysinstr
 900	b	ret_to_user
 901el0_dbg:
 902	/*
 903	 * Debug exception handling
 904	 */
 905	tbnz	x24, #0, el0_inv		// EL0 only
 906	mrs	x24, far_el1
 907	gic_prio_kentry_setup tmp=x3
 908	ct_user_exit_irqoff
 909	mov	x0, x24
 910	mov	x1, x25
 911	mov	x2, sp
 912	bl	do_debug_exception
 913	enable_da_f
 914	b	ret_to_user
 915el0_inv:
 916	ct_user_exit_irqoff
 917	enable_daif
 918	mov	x0, sp
 919	mov	x1, #BAD_SYNC
 920	mov	x2, x25
 921	bl	bad_el0_sync
 922	b	ret_to_user
 923ENDPROC(el0_sync)
 924
 925	.align	6
 926el0_irq:
 927	kernel_entry 0
 928el0_irq_naked:
 929	gic_prio_irq_setup pmr=x20, tmp=x0
 930	ct_user_exit_irqoff
 931	enable_da_f
 932
 933#ifdef CONFIG_TRACE_IRQFLAGS
 934	bl	trace_hardirqs_off
 935#endif
 936
 937#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 938	tbz	x22, #55, 1f
 939	bl	do_el0_irq_bp_hardening
 9401:
 941#endif
 942	irq_handler
 943
 944#ifdef CONFIG_TRACE_IRQFLAGS
 945	bl	trace_hardirqs_on
 946#endif
 947	b	ret_to_user
 948ENDPROC(el0_irq)
 949
 950el1_error:
 951	kernel_entry 1
 952	mrs	x1, esr_el1
 953	gic_prio_kentry_setup tmp=x2
 954	enable_dbg
 955	mov	x0, sp
 956	bl	do_serror
 957	kernel_exit 1
 958ENDPROC(el1_error)
 959
 960el0_error:
 961	kernel_entry 0
 962el0_error_naked:
 963	mrs	x25, esr_el1
 964	gic_prio_kentry_setup tmp=x2
 965	ct_user_exit_irqoff
 966	enable_dbg
 967	mov	x0, sp
 968	mov	x1, x25
 969	bl	do_serror
 970	enable_da_f
 971	b	ret_to_user
 972ENDPROC(el0_error)
 973
 974/*
 975 * Ok, we need to do extra processing, enter the slow path.
 976 */
 977work_pending:
 978	mov	x0, sp				// 'regs'
 979	bl	do_notify_resume
 980#ifdef CONFIG_TRACE_IRQFLAGS
 981	bl	trace_hardirqs_on		// enabled while in userspace
 982#endif
 983	ldr	x1, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
 984	b	finish_ret_to_user
 985/*
 986 * "slow" syscall return path.
 987 */
 988ret_to_user:
 989	disable_daif
 990	gic_prio_kentry_setup tmp=x3
 991	ldr	x1, [tsk, #TSK_TI_FLAGS]
 992	and	x2, x1, #_TIF_WORK_MASK
 993	cbnz	x2, work_pending
 994finish_ret_to_user:
 995	enable_step_tsk x1, x2
 996#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
 997	bl	stackleak_erase
 998#endif
 999	kernel_exit 0
1000ENDPROC(ret_to_user)
1001
1002/*
1003 * SVC handler.
1004 */
1005	.align	6
1006el0_svc:
1007	gic_prio_kentry_setup tmp=x1
1008	mov	x0, sp
1009	bl	el0_svc_handler
1010	b	ret_to_user
1011ENDPROC(el0_svc)
1012
1013	.popsection				// .entry.text
1014
1015#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1016/*
1017 * Exception vectors trampoline.
1018 */
1019	.pushsection ".entry.tramp.text", "ax"
1020
1021	.macro tramp_map_kernel, tmp
1022	mrs	\tmp, ttbr1_el1
1023	add	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1024	bic	\tmp, \tmp, #USER_ASID_FLAG
1025	msr	ttbr1_el1, \tmp
1026#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1027alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1028	/* ASID already in \tmp[63:48] */
1029	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1030	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1031	/* 2MB boundary containing the vectors, so we nobble the walk cache */
1032	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1033	isb
1034	tlbi	vae1, \tmp
1035	dsb	nsh
1036alternative_else_nop_endif
1037#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1038	.endm
1039
 
1040	.macro tramp_unmap_kernel, tmp
1041	mrs	\tmp, ttbr1_el1
1042	sub	\tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1043	orr	\tmp, \tmp, #USER_ASID_FLAG
1044	msr	ttbr1_el1, \tmp
1045	/*
1046	 * We avoid running the post_ttbr_update_workaround here because
1047	 * it's only needed by Cavium ThunderX, which requires KPTI to be
1048	 * disabled.
1049	 */
1050	.endm
1051
1052	.macro tramp_ventry, regsize = 64
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053	.align	7
10541:
1055	.if	\regsize == 64
1056	msr	tpidrro_el0, x30	// Restored in kernel_ventry
1057	.endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058	/*
1059	 * Defend against branch aliasing attacks by pushing a dummy
1060	 * entry onto the return stack and using a RET instruction to
1061	 * enter the full-fat kernel vectors.
1062	 */
1063	bl	2f
1064	b	.
10652:
1066	tramp_map_kernel	x30
1067#ifdef CONFIG_RANDOMIZE_BASE
1068	adr	x30, tramp_vectors + PAGE_SIZE
1069alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1070	ldr	x30, [x30]
1071#else
1072	ldr	x30, =vectors
1073#endif
1074alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
1075	prfm	plil1strm, [x30, #(1b - tramp_vectors)]
1076alternative_else_nop_endif
 
1077	msr	vbar_el1, x30
1078	add	x30, x30, #(1b - tramp_vectors)
1079	isb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1080	ret
 
1081	.endm
1082
1083	.macro tramp_exit, regsize = 64
1084	adr	x30, tramp_vectors
 
 
 
1085	msr	vbar_el1, x30
1086	tramp_unmap_kernel	x30
 
1087	.if	\regsize == 64
1088	mrs	x30, far_el1
1089	.endif
 
1090	eret
1091	sb
1092	.endm
1093
1094	.align	11
1095ENTRY(tramp_vectors)
1096	.space	0x400
1097
1098	tramp_ventry
1099	tramp_ventry
1100	tramp_ventry
1101	tramp_ventry
1102
1103	tramp_ventry	32
1104	tramp_ventry	32
1105	tramp_ventry	32
1106	tramp_ventry	32
1107END(tramp_vectors)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
1109ENTRY(tramp_exit_native)
1110	tramp_exit
1111END(tramp_exit_native)
1112
1113ENTRY(tramp_exit_compat)
1114	tramp_exit	32
1115END(tramp_exit_compat)
1116
1117	.ltorg
1118	.popsection				// .entry.tramp.text
1119#ifdef CONFIG_RANDOMIZE_BASE
1120	.pushsection ".rodata", "a"
1121	.align PAGE_SHIFT
1122	.globl	__entry_tramp_data_start
1123__entry_tramp_data_start:
1124	.quad	vectors
1125	.popsection				// .rodata
1126#endif /* CONFIG_RANDOMIZE_BASE */
1127#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1128
1129/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1130 * Register switch for AArch64. The callee-saved registers need to be saved
1131 * and restored. On entry:
1132 *   x0 = previous task_struct (must be preserved across the switch)
1133 *   x1 = next task_struct
1134 * Previous and next are guaranteed not to be the same.
1135 *
1136 */
1137ENTRY(cpu_switch_to)
1138	mov	x10, #THREAD_CPU_CONTEXT
1139	add	x8, x0, x10
1140	mov	x9, sp
1141	stp	x19, x20, [x8], #16		// store callee-saved registers
1142	stp	x21, x22, [x8], #16
1143	stp	x23, x24, [x8], #16
1144	stp	x25, x26, [x8], #16
1145	stp	x27, x28, [x8], #16
1146	stp	x29, x9, [x8], #16
1147	str	lr, [x8]
1148	add	x8, x1, x10
1149	ldp	x19, x20, [x8], #16		// restore callee-saved registers
1150	ldp	x21, x22, [x8], #16
1151	ldp	x23, x24, [x8], #16
1152	ldp	x25, x26, [x8], #16
1153	ldp	x27, x28, [x8], #16
1154	ldp	x29, x9, [x8], #16
1155	ldr	lr, [x8]
1156	mov	sp, x9
1157	msr	sp_el0, x1
 
 
 
1158	ret
1159ENDPROC(cpu_switch_to)
1160NOKPROBE(cpu_switch_to)
1161
1162/*
1163 * This is how we return from a fork.
1164 */
1165ENTRY(ret_from_fork)
1166	bl	schedule_tail
1167	cbz	x19, 1f				// not a kernel thread
1168	mov	x0, x20
1169	blr	x19
11701:	get_current_task tsk
 
 
1171	b	ret_to_user
1172ENDPROC(ret_from_fork)
1173NOKPROBE(ret_from_fork)
1174
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1175#ifdef CONFIG_ARM_SDE_INTERFACE
1176
1177#include <asm/sdei.h>
1178#include <uapi/linux/arm_sdei.h>
1179
1180.macro sdei_handler_exit exit_mode
1181	/* On success, this call never returns... */
1182	cmp	\exit_mode, #SDEI_EXIT_SMC
1183	b.ne	99f
1184	smc	#0
1185	b	.
118699:	hvc	#0
1187	b	.
1188.endm
1189
1190#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1191/*
1192 * The regular SDEI entry point may have been unmapped along with the rest of
1193 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1194 * argument accessible.
1195 *
1196 * This clobbers x4, __sdei_handler() will restore this from firmware's
1197 * copy.
1198 */
1199.ltorg
1200.pushsection ".entry.tramp.text", "ax"
1201ENTRY(__sdei_asm_entry_trampoline)
1202	mrs	x4, ttbr1_el1
1203	tbz	x4, #USER_ASID_BIT, 1f
1204
1205	tramp_map_kernel tmp=x4
1206	isb
1207	mov	x4, xzr
1208
1209	/*
1210	 * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1211	 * the kernel on exit.
1212	 */
12131:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1214
1215#ifdef CONFIG_RANDOMIZE_BASE
1216	adr	x4, tramp_vectors + PAGE_SIZE
1217	add	x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1218	ldr	x4, [x4]
1219#else
1220	ldr	x4, =__sdei_asm_handler
1221#endif
1222	br	x4
1223ENDPROC(__sdei_asm_entry_trampoline)
1224NOKPROBE(__sdei_asm_entry_trampoline)
1225
1226/*
1227 * Make the exit call and restore the original ttbr1_el1
1228 *
1229 * x0 & x1: setup for the exit API call
1230 * x2: exit_mode
1231 * x4: struct sdei_registered_event argument from registration time.
1232 */
1233ENTRY(__sdei_asm_exit_trampoline)
1234	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1235	cbnz	x4, 1f
1236
1237	tramp_unmap_kernel	tmp=x4
1238
12391:	sdei_handler_exit exit_mode=x2
1240ENDPROC(__sdei_asm_exit_trampoline)
1241NOKPROBE(__sdei_asm_exit_trampoline)
1242	.ltorg
1243.popsection		// .entry.tramp.text
1244#ifdef CONFIG_RANDOMIZE_BASE
1245.pushsection ".rodata", "a"
1246__sdei_asm_trampoline_next_handler:
1247	.quad	__sdei_asm_handler
1248.popsection		// .rodata
1249#endif /* CONFIG_RANDOMIZE_BASE */
1250#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1251
1252/*
1253 * Software Delegated Exception entry point.
1254 *
1255 * x0: Event number
1256 * x1: struct sdei_registered_event argument from registration time.
1257 * x2: interrupted PC
1258 * x3: interrupted PSTATE
1259 * x4: maybe clobbered by the trampoline
1260 *
1261 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1262 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1263 * want them.
1264 */
1265ENTRY(__sdei_asm_handler)
1266	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1267	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1268	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1269	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1270	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1271	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1272	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1273	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1274	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1275	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1276	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1277	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1278	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1279	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1280	mov	x4, sp
1281	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1282
1283	mov	x19, x1
1284
 
 
 
 
1285#ifdef CONFIG_VMAP_STACK
1286	/*
1287	 * entry.S may have been using sp as a scratch register, find whether
1288	 * this is a normal or critical event and switch to the appropriate
1289	 * stack for this CPU.
1290	 */
1291	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1292	cbnz	w4, 1f
1293	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1294	b	2f
12951:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12962:	mov	x6, #SDEI_STACK_SIZE
1297	add	x5, x5, x6
1298	mov	sp, x5
1299#endif
1300
 
 
 
 
 
 
 
 
 
1301	/*
1302	 * We may have interrupted userspace, or a guest, or exit-from or
1303	 * return-to either of these. We can't trust sp_el0, restore it.
1304	 */
1305	mrs	x28, sp_el0
1306	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1307	msr	sp_el0, x0
1308
1309	/* If we interrupted the kernel point to the previous stack/frame. */
1310	and     x0, x3, #0xc
1311	mrs     x1, CurrentEL
1312	cmp     x0, x1
1313	csel	x29, x29, xzr, eq	// fp, or zero
1314	csel	x4, x2, xzr, eq		// elr, or zero
1315
1316	stp	x29, x4, [sp, #-16]!
1317	mov	x29, sp
1318
1319	add	x0, x19, #SDEI_EVENT_INTREGS
1320	mov	x1, x19
1321	bl	__sdei_handler
1322
1323	msr	sp_el0, x28
1324	/* restore regs >x17 that we clobbered */
1325	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1326	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1327	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1328	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1329	mov	sp, x1
1330
1331	mov	x1, x0			// address to complete_and_resume
1332	/* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1333	cmp	x0, #1
 
 
1334	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1335	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1336	csel	x0, x2, x3, ls
1337
1338	ldr_l	x2, sdei_exit_mode
1339
1340alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1341	sdei_handler_exit exit_mode=x2
1342alternative_else_nop_endif
1343
1344#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1345	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline
1346	br	x5
1347#endif
1348ENDPROC(__sdei_asm_handler)
1349NOKPROBE(__sdei_asm_handler)
1350#endif /* CONFIG_ARM_SDE_INTERFACE */
v6.2
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 * Low-level exception handling code
   4 *
   5 * Copyright (C) 2012 ARM Ltd.
   6 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
   7 *		Will Deacon <will.deacon@arm.com>
   8 */
   9
  10#include <linux/arm-smccc.h>
  11#include <linux/init.h>
  12#include <linux/linkage.h>
  13
  14#include <asm/alternative.h>
  15#include <asm/assembler.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/asm_pointer_auth.h>
  18#include <asm/bug.h>
  19#include <asm/cpufeature.h>
  20#include <asm/errno.h>
  21#include <asm/esr.h>
  22#include <asm/irq.h>
  23#include <asm/memory.h>
  24#include <asm/mmu.h>
  25#include <asm/processor.h>
  26#include <asm/ptrace.h>
  27#include <asm/scs.h>
  28#include <asm/thread_info.h>
  29#include <asm/asm-uaccess.h>
  30#include <asm/unistd.h>
  31
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  32	.macro	clear_gp_regs
  33	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
  34	mov	x\n, xzr
  35	.endr
  36	.endm
  37
  38	.macro kernel_ventry, el:req, ht:req, regsize:req, label:req
 
 
 
 
 
 
 
 
 
  39	.align 7
  40.Lventry_start\@:
 
  41	.if	\el == 0
  42	/*
  43	 * This must be the first instruction of the EL0 vector entries. It is
  44	 * skipped by the trampoline vectors, to trigger the cleanup.
  45	 */
  46	b	.Lskip_tramp_vectors_cleanup\@
  47	.if	\regsize == 64
  48	mrs	x30, tpidrro_el0
  49	msr	tpidrro_el0, xzr
  50	.else
  51	mov	x30, xzr
  52	.endif
  53.Lskip_tramp_vectors_cleanup\@:
  54	.endif
 
 
  55
  56	sub	sp, sp, #PT_REGS_SIZE
  57#ifdef CONFIG_VMAP_STACK
  58	/*
  59	 * Test whether the SP has overflowed, without corrupting a GPR.
  60	 * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT)
  61	 * should always be zero.
  62	 */
  63	add	sp, sp, x0			// sp' = sp + x0
  64	sub	x0, sp, x0			// x0' = sp' - x0 = (sp + x0) - x0 = sp
  65	tbnz	x0, #THREAD_SHIFT, 0f
  66	sub	x0, sp, x0			// x0'' = sp' - x0' = (sp + x0) - sp = x0
  67	sub	sp, sp, x0			// sp'' = sp' - x0 = (sp + x0) - x0 = sp
  68	b	el\el\ht\()_\regsize\()_\label
  69
  700:
  71	/*
  72	 * Either we've just detected an overflow, or we've taken an exception
  73	 * while on the overflow stack. Either way, we won't return to
  74	 * userspace, and can clobber EL0 registers to free up GPRs.
  75	 */
  76
  77	/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
  78	msr	tpidr_el0, x0
  79
  80	/* Recover the original x0 value and stash it in tpidrro_el0 */
  81	sub	x0, sp, x0
  82	msr	tpidrro_el0, x0
  83
  84	/* Switch to the overflow stack */
  85	adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
  86
  87	/*
  88	 * Check whether we were already on the overflow stack. This may happen
  89	 * after panic() re-enables interrupts.
  90	 */
  91	mrs	x0, tpidr_el0			// sp of interrupted context
  92	sub	x0, sp, x0			// delta with top of overflow stack
  93	tst	x0, #~(OVERFLOW_STACK_SIZE - 1)	// within range?
  94	b.ne	__bad_stack			// no? -> bad stack pointer
  95
  96	/* We were already on the overflow stack. Restore sp/x0 and carry on. */
  97	sub	sp, sp, x0
  98	mrs	x0, tpidrro_el0
  99#endif
 100	b	el\el\ht\()_\regsize\()_\label
 101.org .Lventry_start\@ + 128	// Did we overflow the ventry slot?
 102	.endm
 103
 104	.macro tramp_alias, dst, sym, tmp
 105	mov_q	\dst, TRAMP_VALIAS
 106	adr_l	\tmp, \sym
 107	add	\dst, \dst, \tmp
 108	adr_l	\tmp, .entry.tramp.text
 109	sub	\dst, \dst, \tmp
 110	.endm
 111
 112	/*
 113	 * This macro corrupts x0-x3. It is the caller's duty  to save/restore
 114	 * them if required.
 115	 */
 116	.macro	apply_ssbd, state, tmp1, tmp2
 117alternative_cb	ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable
 118	b	.L__asm_ssbd_skip\@		// Patched to NOP
 
 119alternative_cb_end
 120	ldr_this_cpu	\tmp2, arm64_ssbd_callback_required, \tmp1
 121	cbz	\tmp2,	.L__asm_ssbd_skip\@
 122	ldr	\tmp2, [tsk, #TSK_TI_FLAGS]
 123	tbnz	\tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@
 124	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_2
 125	mov	w1, #\state
 126alternative_cb	ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
 127	nop					// Patched to SMC/HVC #0
 128alternative_cb_end
 129.L__asm_ssbd_skip\@:
 130	.endm
 131
 132	/* Check for MTE asynchronous tag check faults */
 133	.macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr
 134#ifdef CONFIG_ARM64_MTE
 135	.arch_extension lse
 136alternative_if_not ARM64_MTE
 137	b	1f
 138alternative_else_nop_endif
 139	/*
 140	 * Asynchronous tag check faults are only possible in ASYNC (2) or
 141	 * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is
 142	 * set, so skip the check if it is unset.
 143	 */
 144	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
 145	mrs_s	\tmp, SYS_TFSRE0_EL1
 146	tbz	\tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f
 147	/* Asynchronous TCF occurred for TTBR0 access, set the TI flag */
 148	mov	\tmp, #_TIF_MTE_ASYNC_FAULT
 149	add	\ti_flags, tsk, #TSK_TI_FLAGS
 150	stset	\tmp, [\ti_flags]
 1511:
 152#endif
 153	.endm
 154
 155	/* Clear the MTE asynchronous tag check faults */
 156	.macro clear_mte_async_tcf thread_sctlr
 157#ifdef CONFIG_ARM64_MTE
 158alternative_if ARM64_MTE
 159	/* See comment in check_mte_async_tcf above. */
 160	tbz	\thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f
 161	dsb	ish
 162	msr_s	SYS_TFSRE0_EL1, xzr
 1631:
 164alternative_else_nop_endif
 165#endif
 166	.endm
 167
 168	.macro mte_set_gcr, mte_ctrl, tmp
 169#ifdef CONFIG_ARM64_MTE
 170	ubfx	\tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16
 171	orr	\tmp, \tmp, #SYS_GCR_EL1_RRND
 172	msr_s	SYS_GCR_EL1, \tmp
 173#endif
 174	.endm
 175
 176	.macro mte_set_kernel_gcr, tmp, tmp2
 177#ifdef CONFIG_KASAN_HW_TAGS
 178alternative_cb	ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
 179	b	1f
 180alternative_cb_end
 181	mov	\tmp, KERNEL_GCR_EL1
 182	msr_s	SYS_GCR_EL1, \tmp
 1831:
 184#endif
 185	.endm
 186
 187	.macro mte_set_user_gcr, tsk, tmp, tmp2
 188#ifdef CONFIG_KASAN_HW_TAGS
 189alternative_cb	ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable
 190	b	1f
 191alternative_cb_end
 192	ldr	\tmp, [\tsk, #THREAD_MTE_CTRL]
 193
 194	mte_set_gcr \tmp, \tmp2
 1951:
 196#endif
 197	.endm
 198
 199	.macro	kernel_entry, el, regsize = 64
 200	.if	\el == 0
 201	alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT
 202	.endif
 203	.if	\regsize == 32
 204	mov	w0, w0				// zero upper 32 bits of x0
 205	.endif
 206	stp	x0, x1, [sp, #16 * 0]
 207	stp	x2, x3, [sp, #16 * 1]
 208	stp	x4, x5, [sp, #16 * 2]
 209	stp	x6, x7, [sp, #16 * 3]
 210	stp	x8, x9, [sp, #16 * 4]
 211	stp	x10, x11, [sp, #16 * 5]
 212	stp	x12, x13, [sp, #16 * 6]
 213	stp	x14, x15, [sp, #16 * 7]
 214	stp	x16, x17, [sp, #16 * 8]
 215	stp	x18, x19, [sp, #16 * 9]
 216	stp	x20, x21, [sp, #16 * 10]
 217	stp	x22, x23, [sp, #16 * 11]
 218	stp	x24, x25, [sp, #16 * 12]
 219	stp	x26, x27, [sp, #16 * 13]
 220	stp	x28, x29, [sp, #16 * 14]
 221
 222	.if	\el == 0
 223	clear_gp_regs
 224	mrs	x21, sp_el0
 225	ldr_this_cpu	tsk, __entry_task, x20
 226	msr	sp_el0, tsk
 227
 228	/*
 229	 * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
 230	 * when scheduling.
 231	 */
 232	ldr	x19, [tsk, #TSK_TI_FLAGS]
 233	disable_step_tsk x19, x20
 234
 235	/* Check for asynchronous tag check faults in user space */
 236	ldr	x0, [tsk, THREAD_SCTLR_USER]
 237	check_mte_async_tcf x22, x23, x0
 238
 239#ifdef CONFIG_ARM64_PTR_AUTH
 240alternative_if ARM64_HAS_ADDRESS_AUTH
 241	/*
 242	 * Enable IA for in-kernel PAC if the task had it disabled. Although
 243	 * this could be implemented with an unconditional MRS which would avoid
 244	 * a load, this was measured to be slower on Cortex-A75 and Cortex-A76.
 245	 *
 246	 * Install the kernel IA key only if IA was enabled in the task. If IA
 247	 * was disabled on kernel exit then we would have left the kernel IA
 248	 * installed so there is no need to install it again.
 249	 */
 250	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
 251	__ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23
 252	b	2f
 2531:
 254	mrs	x0, sctlr_el1
 255	orr	x0, x0, SCTLR_ELx_ENIA
 256	msr	sctlr_el1, x0
 2572:
 258alternative_else_nop_endif
 259#endif
 260
 261	apply_ssbd 1, x22, x23
 262
 263	mte_set_kernel_gcr x22, x23
 264
 265	/*
 266	 * Any non-self-synchronizing system register updates required for
 267	 * kernel entry should be placed before this point.
 268	 */
 269alternative_if ARM64_MTE
 270	isb
 271	b	1f
 272alternative_else_nop_endif
 273alternative_if ARM64_HAS_ADDRESS_AUTH
 274	isb
 275alternative_else_nop_endif
 2761:
 277
 278	scs_load tsk
 279	.else
 280	add	x21, sp, #PT_REGS_SIZE
 281	get_current_task tsk
 
 
 
 
 
 
 282	.endif /* \el == 0 */
 283	mrs	x22, elr_el1
 284	mrs	x23, spsr_el1
 285	stp	lr, x21, [sp, #S_LR]
 286
 287	/*
 288	 * For exceptions from EL0, create a final frame record.
 289	 * For exceptions from EL1, create a synthetic frame record so the
 290	 * interrupted code shows up in the backtrace.
 291	 */
 292	.if \el == 0
 293	stp	xzr, xzr, [sp, #S_STACKFRAME]
 294	.else
 295	stp	x29, x22, [sp, #S_STACKFRAME]
 296	.endif
 297	add	x29, sp, #S_STACKFRAME
 298
 299#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 300alternative_if_not ARM64_HAS_PAN
 301	bl	__swpan_entry_el\el
 
 
 
 
 
 
 
 
 302alternative_else_nop_endif
 
 
 
 
 
 
 
 
 
 
 
 303#endif
 304
 305	stp	x22, x23, [sp, #S_PC]
 306
 307	/* Not in a syscall by default (el0_svc overwrites for real syscall) */
 308	.if	\el == 0
 309	mov	w21, #NO_SYSCALL
 310	str	w21, [sp, #S_SYSCALLNO]
 311	.endif
 312
 313#ifdef CONFIG_ARM64_PSEUDO_NMI
 
 
 
 
 
 
 314	/* Save pmr */
 315alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 316	mrs_s	x20, SYS_ICC_PMR_EL1
 317	str	x20, [sp, #S_PMR_SAVE]
 318	mov	x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET
 319	msr_s	SYS_ICC_PMR_EL1, x20
 320alternative_else_nop_endif
 321#endif
 322
 323	/*
 324	 * Registers that may be useful after this macro is invoked:
 325	 *
 326	 * x20 - ICC_PMR_EL1
 327	 * x21 - aborted SP
 328	 * x22 - aborted PC
 329	 * x23 - aborted PSTATE
 330	*/
 331	.endm
 332
 333	.macro	kernel_exit, el
 334	.if	\el != 0
 335	disable_daif
 
 
 
 
 
 
 336	.endif
 337
 338#ifdef CONFIG_ARM64_PSEUDO_NMI
 339	/* Restore pmr */
 340alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 341	ldr	x20, [sp, #S_PMR_SAVE]
 342	msr_s	SYS_ICC_PMR_EL1, x20
 343	mrs_s	x21, SYS_ICC_CTLR_EL1
 344	tbz	x21, #6, .L__skip_pmr_sync\@	// Check for ICC_CTLR_EL1.PMHE
 345	dsb	sy				// Ensure priority change is seen by redistributor
 346.L__skip_pmr_sync\@:
 347alternative_else_nop_endif
 348#endif
 349
 350	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 
 
 
 351
 352#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 353alternative_if_not ARM64_HAS_PAN
 354	bl	__swpan_exit_el\el
 
 
 
 
 355alternative_else_nop_endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 356#endif
 357
 358	.if	\el == 0
 359	ldr	x23, [sp, #S_SP]		// load return stack pointer
 360	msr	sp_el0, x23
 361	tst	x22, #PSR_MODE32_BIT		// native task?
 362	b.eq	3f
 363
 364#ifdef CONFIG_ARM64_ERRATUM_845719
 365alternative_if ARM64_WORKAROUND_845719
 366#ifdef CONFIG_PID_IN_CONTEXTIDR
 367	mrs	x29, contextidr_el1
 368	msr	contextidr_el1, x29
 369#else
 370	msr contextidr_el1, xzr
 371#endif
 372alternative_else_nop_endif
 373#endif
 3743:
 375	scs_save tsk
 376
 377	/* Ignore asynchronous tag check faults in the uaccess routines */
 378	ldr	x0, [tsk, THREAD_SCTLR_USER]
 379	clear_mte_async_tcf x0
 380
 381#ifdef CONFIG_ARM64_PTR_AUTH
 382alternative_if ARM64_HAS_ADDRESS_AUTH
 383	/*
 384	 * IA was enabled for in-kernel PAC. Disable it now if needed, or
 385	 * alternatively install the user's IA. All other per-task keys and
 386	 * SCTLR bits were updated on task switch.
 387	 *
 388	 * No kernel C function calls after this.
 389	 */
 390	tbz	x0, SCTLR_ELx_ENIA_SHIFT, 1f
 391	__ptrauth_keys_install_user tsk, x0, x1, x2
 392	b	2f
 3931:
 394	mrs	x0, sctlr_el1
 395	bic	x0, x0, SCTLR_ELx_ENIA
 396	msr	sctlr_el1, x0
 3972:
 398alternative_else_nop_endif
 399#endif
 400
 401	mte_set_user_gcr tsk, x0, x1
 402
 403	apply_ssbd 0, x0, x1
 404	.endif
 405
 406	msr	elr_el1, x21			// set up the return data
 407	msr	spsr_el1, x22
 408	ldp	x0, x1, [sp, #16 * 0]
 409	ldp	x2, x3, [sp, #16 * 1]
 410	ldp	x4, x5, [sp, #16 * 2]
 411	ldp	x6, x7, [sp, #16 * 3]
 412	ldp	x8, x9, [sp, #16 * 4]
 413	ldp	x10, x11, [sp, #16 * 5]
 414	ldp	x12, x13, [sp, #16 * 6]
 415	ldp	x14, x15, [sp, #16 * 7]
 416	ldp	x16, x17, [sp, #16 * 8]
 417	ldp	x18, x19, [sp, #16 * 9]
 418	ldp	x20, x21, [sp, #16 * 10]
 419	ldp	x22, x23, [sp, #16 * 11]
 420	ldp	x24, x25, [sp, #16 * 12]
 421	ldp	x26, x27, [sp, #16 * 13]
 422	ldp	x28, x29, [sp, #16 * 14]
 
 
 423
 424	.if	\el == 0
 425alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
 426	ldr	lr, [sp, #S_LR]
 427	add	sp, sp, #PT_REGS_SIZE		// restore sp
 428	eret
 429alternative_else_nop_endif
 430#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 431	bne	4f
 432	msr	far_el1, x29
 433	tramp_alias	x30, tramp_exit_native, x29
 434	br	x30
 4354:
 436	tramp_alias	x30, tramp_exit_compat, x29
 437	br	x30
 438#endif
 439	.else
 440	ldr	lr, [sp, #S_LR]
 441	add	sp, sp, #PT_REGS_SIZE		// restore sp
 442
 443	/* Ensure any device/NC reads complete */
 444	alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412
 445
 446	eret
 447	.endif
 448	sb
 449	.endm
 450
 451#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 
 
 452	/*
 453	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 454	 * EL0, there is no need to check the state of TTBR0_EL1 since
 455	 * accesses are always enabled.
 456	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
 457	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
 458	 * user mappings.
 459	 */
 460SYM_CODE_START_LOCAL(__swpan_entry_el1)
 461	mrs	x21, ttbr0_el1
 462	tst	x21, #TTBR_ASID_MASK		// Check for the reserved ASID
 463	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
 464	b.eq	1f				// TTBR0 access already disabled
 465	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
 466SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL)
 467	__uaccess_ttbr0_disable x21
 4681:	ret
 469SYM_CODE_END(__swpan_entry_el1)
 470
 471	/*
 472	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 473	 * PAN bit checking.
 474	 */
 475SYM_CODE_START_LOCAL(__swpan_exit_el1)
 476	tbnz	x22, #22, 1f			// Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 477	__uaccess_ttbr0_enable x0, x1
 4781:	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
 479	ret
 480SYM_CODE_END(__swpan_exit_el1)
 
 
 
 
 
 
 
 
 
 
 
 481
 482SYM_CODE_START_LOCAL(__swpan_exit_el0)
 483	__uaccess_ttbr0_enable x0, x1
 484	/*
 485	 * Enable errata workarounds only if returning to user. The only
 486	 * workaround currently required for TTBR0_EL1 changes are for the
 487	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 488	 * corruption).
 489	 */
 490	b	post_ttbr_update_workaround
 491SYM_CODE_END(__swpan_exit_el0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 492#endif
 
 493
 494/* GPRs used by entry code */
 495tsk	.req	x28		// current thread_info
 
 
 
 
 
 
 496
 497	.text
 498
 499/*
 500 * Exception vectors.
 501 */
 502	.pushsection ".entry.text", "ax"
 503
 504	.align	11
 505SYM_CODE_START(vectors)
 506	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
 507	kernel_ventry	1, t, 64, irq		// IRQ EL1t
 508	kernel_ventry	1, t, 64, fiq		// FIQ EL1t
 509	kernel_ventry	1, t, 64, error		// Error EL1t
 510
 511	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
 512	kernel_ventry	1, h, 64, irq		// IRQ EL1h
 513	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
 514	kernel_ventry	1, h, 64, error		// Error EL1h
 515
 516	kernel_ventry	0, t, 64, sync		// Synchronous 64-bit EL0
 517	kernel_ventry	0, t, 64, irq		// IRQ 64-bit EL0
 518	kernel_ventry	0, t, 64, fiq		// FIQ 64-bit EL0
 519	kernel_ventry	0, t, 64, error		// Error 64-bit EL0
 520
 521	kernel_ventry	0, t, 32, sync		// Synchronous 32-bit EL0
 522	kernel_ventry	0, t, 32, irq		// IRQ 32-bit EL0
 523	kernel_ventry	0, t, 32, fiq		// FIQ 32-bit EL0
 524	kernel_ventry	0, t, 32, error		// Error 32-bit EL0
 525SYM_CODE_END(vectors)
 
 
 
 
 
 
 
 526
 527#ifdef CONFIG_VMAP_STACK
 528SYM_CODE_START_LOCAL(__bad_stack)
 529	/*
 530	 * We detected an overflow in kernel_ventry, which switched to the
 531	 * overflow stack. Stash the exception regs, and head to our overflow
 532	 * handler.
 533	 */
 534
 535	/* Restore the original x0 value */
 536	mrs	x0, tpidrro_el0
 537
 538	/*
 539	 * Store the original GPRs to the new stack. The orginal SP (minus
 540	 * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 541	 */
 542	sub	sp, sp, #PT_REGS_SIZE
 543	kernel_entry 1
 544	mrs	x0, tpidr_el0
 545	add	x0, x0, #PT_REGS_SIZE
 546	str	x0, [sp, #S_SP]
 547
 548	/* Stash the regs for handle_bad_stack */
 549	mov	x0, sp
 550
 551	/* Time to die */
 552	bl	handle_bad_stack
 553	ASM_BUG()
 554SYM_CODE_END(__bad_stack)
 555#endif /* CONFIG_VMAP_STACK */
 556
 557
 558	.macro entry_handler el:req, ht:req, regsize:req, label:req
 559SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label)
 
 560	kernel_entry \el, \regsize
 561	mov	x0, sp
 562	bl	el\el\ht\()_\regsize\()_\label\()_handler
 563	.if \el == 0
 564	b	ret_to_user
 565	.else
 566	b	ret_to_kernel
 567	.endif
 568SYM_CODE_END(el\el\ht\()_\regsize\()_\label)
 569	.endm
 570
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571/*
 572 * Early exception handlers
 573 */
 574	entry_handler	1, t, 64, sync
 575	entry_handler	1, t, 64, irq
 576	entry_handler	1, t, 64, fiq
 577	entry_handler	1, t, 64, error
 578
 579	entry_handler	1, h, 64, sync
 580	entry_handler	1, h, 64, irq
 581	entry_handler	1, h, 64, fiq
 582	entry_handler	1, h, 64, error
 583
 584	entry_handler	0, t, 64, sync
 585	entry_handler	0, t, 64, irq
 586	entry_handler	0, t, 64, fiq
 587	entry_handler	0, t, 64, error
 588
 589	entry_handler	0, t, 32, sync
 590	entry_handler	0, t, 32, irq
 591	entry_handler	0, t, 32, fiq
 592	entry_handler	0, t, 32, error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 593
 594SYM_CODE_START_LOCAL(ret_to_kernel)
 
 
 
 
 
 
 595	kernel_exit 1
 596SYM_CODE_END(ret_to_kernel)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 597
 598SYM_CODE_START_LOCAL(ret_to_user)
 599	ldr	x19, [tsk, #TSK_TI_FLAGS]	// re-check for single-step
 600	enable_step_tsk x19, x2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 601#ifdef CONFIG_GCC_PLUGIN_STACKLEAK
 602	bl	stackleak_erase_on_task_stack
 603#endif
 604	kernel_exit 0
 605SYM_CODE_END(ret_to_user)
 
 
 
 
 
 
 
 
 
 
 
 606
 607	.popsection				// .entry.text
 608
 609	// Move from tramp_pg_dir to swapper_pg_dir
 
 
 
 
 
 610	.macro tramp_map_kernel, tmp
 611	mrs	\tmp, ttbr1_el1
 612	add	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
 613	bic	\tmp, \tmp, #USER_ASID_FLAG
 614	msr	ttbr1_el1, \tmp
 615#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
 616alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
 617	/* ASID already in \tmp[63:48] */
 618	movk	\tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
 619	movk	\tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
 620	/* 2MB boundary containing the vectors, so we nobble the walk cache */
 621	movk	\tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
 622	isb
 623	tlbi	vae1, \tmp
 624	dsb	nsh
 625alternative_else_nop_endif
 626#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
 627	.endm
 628
 629	// Move from swapper_pg_dir to tramp_pg_dir
 630	.macro tramp_unmap_kernel, tmp
 631	mrs	\tmp, ttbr1_el1
 632	sub	\tmp, \tmp, #TRAMP_SWAPPER_OFFSET
 633	orr	\tmp, \tmp, #USER_ASID_FLAG
 634	msr	ttbr1_el1, \tmp
 635	/*
 636	 * We avoid running the post_ttbr_update_workaround here because
 637	 * it's only needed by Cavium ThunderX, which requires KPTI to be
 638	 * disabled.
 639	 */
 640	.endm
 641
 642	.macro		tramp_data_read_var	dst, var
 643#ifdef CONFIG_RELOCATABLE
 644	ldr		\dst, .L__tramp_data_\var
 645	.ifndef		.L__tramp_data_\var
 646	.pushsection	".entry.tramp.rodata", "a", %progbits
 647	.align		3
 648.L__tramp_data_\var:
 649	.quad		\var
 650	.popsection
 651	.endif
 652#else
 653	/*
 654	 * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a
 655	 * compile time constant (and hence not secret and not worth hiding).
 656	 *
 657	 * As statically allocated kernel code and data always live in the top
 658	 * 47 bits of the address space we can sign-extend bit 47 and avoid an
 659	 * instruction to load the upper 16 bits (which must be 0xFFFF).
 660	 */
 661	movz		\dst, :abs_g2_s:\var
 662	movk		\dst, :abs_g1_nc:\var
 663	movk		\dst, :abs_g0_nc:\var
 664#endif
 665	.endm
 666
 667#define BHB_MITIGATION_NONE	0
 668#define BHB_MITIGATION_LOOP	1
 669#define BHB_MITIGATION_FW	2
 670#define BHB_MITIGATION_INSN	3
 671
 672	.macro tramp_ventry, vector_start, regsize, kpti, bhb
 673	.align	7
 6741:
 675	.if	\regsize == 64
 676	msr	tpidrro_el0, x30	// Restored in kernel_ventry
 677	.endif
 678
 679	.if	\bhb == BHB_MITIGATION_LOOP
 680	/*
 681	 * This sequence must appear before the first indirect branch. i.e. the
 682	 * ret out of tramp_ventry. It appears here because x30 is free.
 683	 */
 684	__mitigate_spectre_bhb_loop	x30
 685	.endif // \bhb == BHB_MITIGATION_LOOP
 686
 687	.if	\bhb == BHB_MITIGATION_INSN
 688	clearbhb
 689	isb
 690	.endif // \bhb == BHB_MITIGATION_INSN
 691
 692	.if	\kpti == 1
 693	/*
 694	 * Defend against branch aliasing attacks by pushing a dummy
 695	 * entry onto the return stack and using a RET instruction to
 696	 * enter the full-fat kernel vectors.
 697	 */
 698	bl	2f
 699	b	.
 7002:
 701	tramp_map_kernel	x30
 
 
 702alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
 703	tramp_data_read_var	x30, vectors
 
 
 
 704alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
 705	prfm	plil1strm, [x30, #(1b - \vector_start)]
 706alternative_else_nop_endif
 707
 708	msr	vbar_el1, x30
 
 709	isb
 710	.else
 711	adr_l	x30, vectors
 712	.endif // \kpti == 1
 713
 714	.if	\bhb == BHB_MITIGATION_FW
 715	/*
 716	 * The firmware sequence must appear before the first indirect branch.
 717	 * i.e. the ret out of tramp_ventry. But it also needs the stack to be
 718	 * mapped to save/restore the registers the SMC clobbers.
 719	 */
 720	__mitigate_spectre_bhb_fw
 721	.endif // \bhb == BHB_MITIGATION_FW
 722
 723	add	x30, x30, #(1b - \vector_start + 4)
 724	ret
 725.org 1b + 128	// Did we overflow the ventry slot?
 726	.endm
 727
 728	.macro tramp_exit, regsize = 64
 729	tramp_data_read_var	x30, this_cpu_vector
 730	get_this_cpu_offset x29
 731	ldr	x30, [x30, x29]
 732
 733	msr	vbar_el1, x30
 734	ldr	lr, [sp, #S_LR]
 735	tramp_unmap_kernel	x29
 736	.if	\regsize == 64
 737	mrs	x29, far_el1
 738	.endif
 739	add	sp, sp, #PT_REGS_SIZE		// restore sp
 740	eret
 741	sb
 742	.endm
 743
 744	.macro	generate_tramp_vector,	kpti, bhb
 745.Lvector_start\@:
 746	.space	0x400
 747
 748	.rept	4
 749	tramp_ventry	.Lvector_start\@, 64, \kpti, \bhb
 750	.endr
 751	.rept	4
 752	tramp_ventry	.Lvector_start\@, 32, \kpti, \bhb
 753	.endr
 754	.endm
 755
 756#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 757/*
 758 * Exception vectors trampoline.
 759 * The order must match __bp_harden_el1_vectors and the
 760 * arm64_bp_harden_el1_vectors enum.
 761 */
 762	.pushsection ".entry.tramp.text", "ax"
 763	.align	11
 764SYM_CODE_START_NOALIGN(tramp_vectors)
 765#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
 766	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_LOOP
 767	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_FW
 768	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_INSN
 769#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
 770	generate_tramp_vector	kpti=1, bhb=BHB_MITIGATION_NONE
 771SYM_CODE_END(tramp_vectors)
 772
 773SYM_CODE_START(tramp_exit_native)
 774	tramp_exit
 775SYM_CODE_END(tramp_exit_native)
 776
 777SYM_CODE_START(tramp_exit_compat)
 778	tramp_exit	32
 779SYM_CODE_END(tramp_exit_compat)
 
 
 780	.popsection				// .entry.tramp.text
 
 
 
 
 
 
 
 
 781#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 782
 783/*
 784 * Exception vectors for spectre mitigations on entry from EL1 when
 785 * kpti is not in use.
 786 */
 787	.macro generate_el1_vector, bhb
 788.Lvector_start\@:
 789	kernel_ventry	1, t, 64, sync		// Synchronous EL1t
 790	kernel_ventry	1, t, 64, irq		// IRQ EL1t
 791	kernel_ventry	1, t, 64, fiq		// FIQ EL1h
 792	kernel_ventry	1, t, 64, error		// Error EL1t
 793
 794	kernel_ventry	1, h, 64, sync		// Synchronous EL1h
 795	kernel_ventry	1, h, 64, irq		// IRQ EL1h
 796	kernel_ventry	1, h, 64, fiq		// FIQ EL1h
 797	kernel_ventry	1, h, 64, error		// Error EL1h
 798
 799	.rept	4
 800	tramp_ventry	.Lvector_start\@, 64, 0, \bhb
 801	.endr
 802	.rept 4
 803	tramp_ventry	.Lvector_start\@, 32, 0, \bhb
 804	.endr
 805	.endm
 806
 807/* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */
 808	.pushsection ".entry.text", "ax"
 809	.align	11
 810SYM_CODE_START(__bp_harden_el1_vectors)
 811#ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
 812	generate_el1_vector	bhb=BHB_MITIGATION_LOOP
 813	generate_el1_vector	bhb=BHB_MITIGATION_FW
 814	generate_el1_vector	bhb=BHB_MITIGATION_INSN
 815#endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
 816SYM_CODE_END(__bp_harden_el1_vectors)
 817	.popsection
 818
 819
 820/*
 821 * Register switch for AArch64. The callee-saved registers need to be saved
 822 * and restored. On entry:
 823 *   x0 = previous task_struct (must be preserved across the switch)
 824 *   x1 = next task_struct
 825 * Previous and next are guaranteed not to be the same.
 826 *
 827 */
 828SYM_FUNC_START(cpu_switch_to)
 829	mov	x10, #THREAD_CPU_CONTEXT
 830	add	x8, x0, x10
 831	mov	x9, sp
 832	stp	x19, x20, [x8], #16		// store callee-saved registers
 833	stp	x21, x22, [x8], #16
 834	stp	x23, x24, [x8], #16
 835	stp	x25, x26, [x8], #16
 836	stp	x27, x28, [x8], #16
 837	stp	x29, x9, [x8], #16
 838	str	lr, [x8]
 839	add	x8, x1, x10
 840	ldp	x19, x20, [x8], #16		// restore callee-saved registers
 841	ldp	x21, x22, [x8], #16
 842	ldp	x23, x24, [x8], #16
 843	ldp	x25, x26, [x8], #16
 844	ldp	x27, x28, [x8], #16
 845	ldp	x29, x9, [x8], #16
 846	ldr	lr, [x8]
 847	mov	sp, x9
 848	msr	sp_el0, x1
 849	ptrauth_keys_install_kernel x1, x8, x9, x10
 850	scs_save x0
 851	scs_load x1
 852	ret
 853SYM_FUNC_END(cpu_switch_to)
 854NOKPROBE(cpu_switch_to)
 855
 856/*
 857 * This is how we return from a fork.
 858 */
 859SYM_CODE_START(ret_from_fork)
 860	bl	schedule_tail
 861	cbz	x19, 1f				// not a kernel thread
 862	mov	x0, x20
 863	blr	x19
 8641:	get_current_task tsk
 865	mov	x0, sp
 866	bl	asm_exit_to_user_mode
 867	b	ret_to_user
 868SYM_CODE_END(ret_from_fork)
 869NOKPROBE(ret_from_fork)
 870
 871/*
 872 * void call_on_irq_stack(struct pt_regs *regs,
 873 * 		          void (*func)(struct pt_regs *));
 874 *
 875 * Calls func(regs) using this CPU's irq stack and shadow irq stack.
 876 */
 877SYM_FUNC_START(call_on_irq_stack)
 878#ifdef CONFIG_SHADOW_CALL_STACK
 879	stp	scs_sp, xzr, [sp, #-16]!
 880	ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17
 881#endif
 882	/* Create a frame record to save our LR and SP (implicit in FP) */
 883	stp	x29, x30, [sp, #-16]!
 884	mov	x29, sp
 885
 886	ldr_this_cpu x16, irq_stack_ptr, x17
 887	mov	x15, #IRQ_STACK_SIZE
 888	add	x16, x16, x15
 889
 890	/* Move to the new stack and call the function there */
 891	mov	sp, x16
 892	blr	x1
 893
 894	/*
 895	 * Restore the SP from the FP, and restore the FP and LR from the frame
 896	 * record.
 897	 */
 898	mov	sp, x29
 899	ldp	x29, x30, [sp], #16
 900#ifdef CONFIG_SHADOW_CALL_STACK
 901	ldp	scs_sp, xzr, [sp], #16
 902#endif
 903	ret
 904SYM_FUNC_END(call_on_irq_stack)
 905NOKPROBE(call_on_irq_stack)
 906
 907#ifdef CONFIG_ARM_SDE_INTERFACE
 908
 909#include <asm/sdei.h>
 910#include <uapi/linux/arm_sdei.h>
 911
 912.macro sdei_handler_exit exit_mode
 913	/* On success, this call never returns... */
 914	cmp	\exit_mode, #SDEI_EXIT_SMC
 915	b.ne	99f
 916	smc	#0
 917	b	.
 91899:	hvc	#0
 919	b	.
 920.endm
 921
 922#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 923/*
 924 * The regular SDEI entry point may have been unmapped along with the rest of
 925 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
 926 * argument accessible.
 927 *
 928 * This clobbers x4, __sdei_handler() will restore this from firmware's
 929 * copy.
 930 */
 
 931.pushsection ".entry.tramp.text", "ax"
 932SYM_CODE_START(__sdei_asm_entry_trampoline)
 933	mrs	x4, ttbr1_el1
 934	tbz	x4, #USER_ASID_BIT, 1f
 935
 936	tramp_map_kernel tmp=x4
 937	isb
 938	mov	x4, xzr
 939
 940	/*
 941	 * Remember whether to unmap the kernel on exit.
 
 942	 */
 9431:	str	x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
 944	tramp_data_read_var     x4, __sdei_asm_handler
 
 
 
 
 
 
 
 945	br	x4
 946SYM_CODE_END(__sdei_asm_entry_trampoline)
 947NOKPROBE(__sdei_asm_entry_trampoline)
 948
 949/*
 950 * Make the exit call and restore the original ttbr1_el1
 951 *
 952 * x0 & x1: setup for the exit API call
 953 * x2: exit_mode
 954 * x4: struct sdei_registered_event argument from registration time.
 955 */
 956SYM_CODE_START(__sdei_asm_exit_trampoline)
 957	ldr	x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
 958	cbnz	x4, 1f
 959
 960	tramp_unmap_kernel	tmp=x4
 961
 9621:	sdei_handler_exit exit_mode=x2
 963SYM_CODE_END(__sdei_asm_exit_trampoline)
 964NOKPROBE(__sdei_asm_exit_trampoline)
 
 965.popsection		// .entry.tramp.text
 
 
 
 
 
 
 966#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
 967
 968/*
 969 * Software Delegated Exception entry point.
 970 *
 971 * x0: Event number
 972 * x1: struct sdei_registered_event argument from registration time.
 973 * x2: interrupted PC
 974 * x3: interrupted PSTATE
 975 * x4: maybe clobbered by the trampoline
 976 *
 977 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
 978 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
 979 * want them.
 980 */
 981SYM_CODE_START(__sdei_asm_handler)
 982	stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
 983	stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
 984	stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
 985	stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
 986	stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
 987	stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
 988	stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
 989	stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
 990	stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
 991	stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
 992	stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
 993	stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
 994	stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
 995	stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
 996	mov	x4, sp
 997	stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
 998
 999	mov	x19, x1
1000
1001#if defined(CONFIG_VMAP_STACK) || defined(CONFIG_SHADOW_CALL_STACK)
1002	ldrb	w4, [x19, #SDEI_EVENT_PRIORITY]
1003#endif
1004
1005#ifdef CONFIG_VMAP_STACK
1006	/*
1007	 * entry.S may have been using sp as a scratch register, find whether
1008	 * this is a normal or critical event and switch to the appropriate
1009	 * stack for this CPU.
1010	 */
 
1011	cbnz	w4, 1f
1012	ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1013	b	2f
10141:	ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
10152:	mov	x6, #SDEI_STACK_SIZE
1016	add	x5, x5, x6
1017	mov	sp, x5
1018#endif
1019
1020#ifdef CONFIG_SHADOW_CALL_STACK
1021	/* Use a separate shadow call stack for normal and critical events */
1022	cbnz	w4, 3f
1023	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6
1024	b	4f
10253:	ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6
10264:
1027#endif
1028
1029	/*
1030	 * We may have interrupted userspace, or a guest, or exit-from or
1031	 * return-to either of these. We can't trust sp_el0, restore it.
1032	 */
1033	mrs	x28, sp_el0
1034	ldr_this_cpu	dst=x0, sym=__entry_task, tmp=x1
1035	msr	sp_el0, x0
1036
1037	/* If we interrupted the kernel point to the previous stack/frame. */
1038	and     x0, x3, #0xc
1039	mrs     x1, CurrentEL
1040	cmp     x0, x1
1041	csel	x29, x29, xzr, eq	// fp, or zero
1042	csel	x4, x2, xzr, eq		// elr, or zero
1043
1044	stp	x29, x4, [sp, #-16]!
1045	mov	x29, sp
1046
1047	add	x0, x19, #SDEI_EVENT_INTREGS
1048	mov	x1, x19
1049	bl	__sdei_handler
1050
1051	msr	sp_el0, x28
1052	/* restore regs >x17 that we clobbered */
1053	mov	x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1054	ldp	x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1055	ldp	x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1056	ldp	lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1057	mov	sp, x1
1058
1059	mov	x1, x0			// address to complete_and_resume
1060	/* x0 = (x0 <= SDEI_EV_FAILED) ?
1061	 * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME
1062	 */
1063	cmp	x0, #SDEI_EV_FAILED
1064	mov_q	x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1065	mov_q	x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1066	csel	x0, x2, x3, ls
1067
1068	ldr_l	x2, sdei_exit_mode
1069
1070alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1071	sdei_handler_exit exit_mode=x2
1072alternative_else_nop_endif
1073
1074#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1075	tramp_alias	dst=x5, sym=__sdei_asm_exit_trampoline, tmp=x3
1076	br	x5
1077#endif
1078SYM_CODE_END(__sdei_asm_handler)
1079NOKPROBE(__sdei_asm_handler)
1080#endif /* CONFIG_ARM_SDE_INTERFACE */