Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * This file contains the 64-bit "server" PowerPC variant
   4 * of the low level exception handling including exception
   5 * vectors, exception return, part of the slb and stab
   6 * handling and other fixed offset specific things.
   7 *
   8 * This file is meant to be #included from head_64.S due to
   9 * position dependent assembly.
  10 *
  11 * Most of this originates from head_64.S and thus has the same
  12 * copyright history.
  13 *
  14 */
  15
  16#include <asm/hw_irq.h>
  17#include <asm/exception-64s.h>
  18#include <asm/ptrace.h>
  19#include <asm/cpuidle.h>
  20#include <asm/head-64.h>
  21#include <asm/feature-fixups.h>
  22#include <asm/kup.h>
  23
  24/* PACA save area offsets (exgen, exmc, etc) */
  25#define EX_R9		0
  26#define EX_R10		8
  27#define EX_R11		16
  28#define EX_R12		24
  29#define EX_R13		32
  30#define EX_DAR		40
  31#define EX_DSISR	48
  32#define EX_CCR		52
  33#define EX_CFAR		56
  34#define EX_PPR		64
 
  35#define EX_CTR		72
  36.if EX_SIZE != 10
  37	.error "EX_SIZE is wrong"
  38.endif
 
 
 
 
 
  39
  40/*
  41 * Following are fixed section helper macros.
  42 *
  43 * EXC_REAL_BEGIN/END  - real, unrelocated exception vectors
  44 * EXC_VIRT_BEGIN/END  - virt (AIL), unrelocated exception vectors
  45 * TRAMP_REAL_BEGIN    - real, unrelocated helpers (virt may call these)
  46 * TRAMP_VIRT_BEGIN    - virt, unreloc helpers (in practice, real can use)
 
  47 * EXC_COMMON          - After switching to virtual, relocated mode.
  48 */
  49
  50#define EXC_REAL_BEGIN(name, start, size)			\
  51	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  52
  53#define EXC_REAL_END(name, start, size)				\
  54	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  55
  56#define EXC_VIRT_BEGIN(name, start, size)			\
  57	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  58
  59#define EXC_VIRT_END(name, start, size)				\
  60	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  61
  62#define EXC_COMMON_BEGIN(name)					\
  63	USE_TEXT_SECTION();					\
  64	.balign IFETCH_ALIGN_BYTES;				\
  65	.global name;						\
  66	_ASM_NOKPROBE_SYMBOL(name);				\
  67	DEFINE_FIXED_SYMBOL(name);				\
  68name:
  69
  70#define TRAMP_REAL_BEGIN(name)					\
  71	FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
  72
  73#define TRAMP_VIRT_BEGIN(name)					\
  74	FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
  75
 
 
 
 
 
 
 
  76#define EXC_REAL_NONE(start, size)				\
  77	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
  78	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
  79
  80#define EXC_VIRT_NONE(start, size)				\
  81	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
  82	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
  83
  84/*
  85 * We're short on space and time in the exception prolog, so we can't
  86 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
  87 * Instead we get the base of the kernel from paca->kernelbase and or in the low
  88 * part of label. This requires that the label be within 64KB of kernelbase, and
  89 * that kernelbase be 64K aligned.
  90 */
  91#define LOAD_HANDLER(reg, label)					\
  92	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
  93	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
  94
  95#define __LOAD_HANDLER(reg, label)					\
  96	ld	reg,PACAKBASE(r13);					\
  97	ori	reg,reg,(ABS_ADDR(label))@l
  98
  99/*
 100 * Branches from unrelocated code (e.g., interrupts) to labels outside
 101 * head-y require >64K offsets.
 102 */
 103#define __LOAD_FAR_HANDLER(reg, label)					\
 104	ld	reg,PACAKBASE(r13);					\
 105	ori	reg,reg,(ABS_ADDR(label))@l;				\
 106	addis	reg,reg,(ABS_ADDR(label))@h
 107
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 108/*
 109 * Branch to label using its 0xC000 address. This results in instruction
 110 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
 111 * on using mtmsr rather than rfid.
 112 *
 113 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
 114 * load KBASE for a slight optimisation.
 115 */
 116#define BRANCH_TO_C000(reg, label)					\
 117	__LOAD_FAR_HANDLER(reg, label);					\
 118	mtctr	reg;							\
 119	bctr
 120
 121/*
 122 * Interrupt code generation macros
 123 */
 124#define IVEC		.L_IVEC_\name\()	/* Interrupt vector address */
 125#define IHSRR		.L_IHSRR_\name\()	/* Sets SRR or HSRR registers */
 126#define IHSRR_IF_HVMODE	.L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
 127#define IAREA		.L_IAREA_\name\()	/* PACA save area */
 128#define IVIRT		.L_IVIRT_\name\()	/* Has virt mode entry point */
 129#define IISIDE		.L_IISIDE_\name\()	/* Uses SRR0/1 not DAR/DSISR */
 130#define IDAR		.L_IDAR_\name\()	/* Uses DAR (or SRR0) */
 131#define IDSISR		.L_IDSISR_\name\()	/* Uses DSISR (or SRR1) */
 132#define ISET_RI		.L_ISET_RI_\name\()	/* Run common code w/ MSR[RI]=1 */
 133#define IBRANCH_TO_COMMON	.L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
 134#define IREALMODE_COMMON	.L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
 135#define IMASK		.L_IMASK_\name\()	/* IRQ soft-mask bit */
 136#define IKVM_SKIP	.L_IKVM_SKIP_\name\()	/* Generate KVM skip handler */
 137#define IKVM_REAL	.L_IKVM_REAL_\name\()	/* Real entry tests KVM */
 138#define __IKVM_REAL(name)	.L_IKVM_REAL_ ## name
 139#define IKVM_VIRT	.L_IKVM_VIRT_\name\()	/* Virt entry tests KVM */
 140#define ISTACK		.L_ISTACK_\name\()	/* Set regular kernel stack */
 141#define __ISTACK(name)	.L_ISTACK_ ## name
 142#define IRECONCILE	.L_IRECONCILE_\name\()	/* Do RECONCILE_IRQ_STATE */
 143#define IKUAP		.L_IKUAP_\name\()	/* Do KUAP lock */
 144
 145#define INT_DEFINE_BEGIN(n)						\
 146.macro int_define_ ## n name
 147
 148#define INT_DEFINE_END(n)						\
 149.endm ;									\
 150int_define_ ## n n ;							\
 151do_define_int n
 152
 153.macro do_define_int name
 154	.ifndef IVEC
 155		.error "IVEC not defined"
 156	.endif
 157	.ifndef IHSRR
 158		IHSRR=0
 159	.endif
 160	.ifndef IHSRR_IF_HVMODE
 161		IHSRR_IF_HVMODE=0
 162	.endif
 163	.ifndef IAREA
 164		IAREA=PACA_EXGEN
 165	.endif
 166	.ifndef IVIRT
 167		IVIRT=1
 168	.endif
 169	.ifndef IISIDE
 170		IISIDE=0
 171	.endif
 172	.ifndef IDAR
 173		IDAR=0
 174	.endif
 175	.ifndef IDSISR
 176		IDSISR=0
 177	.endif
 178	.ifndef ISET_RI
 179		ISET_RI=1
 180	.endif
 181	.ifndef IBRANCH_TO_COMMON
 182		IBRANCH_TO_COMMON=1
 183	.endif
 184	.ifndef IREALMODE_COMMON
 185		IREALMODE_COMMON=0
 186	.else
 187		.if ! IBRANCH_TO_COMMON
 188			.error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
 189		.endif
 190	.endif
 191	.ifndef IMASK
 192		IMASK=0
 193	.endif
 194	.ifndef IKVM_SKIP
 195		IKVM_SKIP=0
 196	.endif
 197	.ifndef IKVM_REAL
 198		IKVM_REAL=0
 199	.endif
 200	.ifndef IKVM_VIRT
 201		IKVM_VIRT=0
 202	.endif
 203	.ifndef ISTACK
 204		ISTACK=1
 205	.endif
 206	.ifndef IRECONCILE
 207		IRECONCILE=1
 208	.endif
 209	.ifndef IKUAP
 210		IKUAP=1
 211	.endif
 212.endm
 213
 214#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 215#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 216/*
 217 * All interrupts which set HSRR registers, as well as SRESET and MCE and
 218 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
 219 * so they all generally need to test whether they were taken in guest context.
 220 *
 221 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
 222 * taken with MSR[HV]=0.
 223 *
 224 * Interrupts which set SRR registers (with the above exceptions) do not
 225 * elevate to MSR[HV]=1 mode, though most can be taken when running with
 226 * MSR[HV]=1  (e.g., bare metal kernel and userspace). So these interrupts do
 227 * not need to test whether a guest is running because they get delivered to
 228 * the guest directly, including nested HV KVM guests.
 229 *
 230 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
 231 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
 232 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
 233 * delivered to the real-mode entry point, therefore such interrupts only test
 234 * KVM in their real mode handlers, and only when PR KVM is possible.
 235 *
 236 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
 237 * delivered in real-mode when the MMU is in hash mode because the MMU
 238 * registers are not set appropriately to translate host addresses. In nested
 239 * radix mode these can be delivered in virt-mode as the host translations are
 240 * used implicitly (see: effective LPID, effective PID).
 241 */
 242
 243/*
 244 * If an interrupt is taken while a guest is running, it is immediately routed
 245 * to KVM to handle. If both HV and PR KVM arepossible, KVM interrupts go first
 246 * to kvmppc_interrupt_hv, which handles the PR guest case.
 247 */
 248#define kvmppc_interrupt kvmppc_interrupt_hv
 249#else
 250#define kvmppc_interrupt kvmppc_interrupt_pr
 251#endif
 252
 253.macro KVMTEST name
 254	lbz	r10,HSTATE_IN_GUEST(r13)
 255	cmpwi	r10,0
 256	bne	\name\()_kvm
 257.endm
 258
 259.macro GEN_KVM name
 260	.balign IFETCH_ALIGN_BYTES
 261\name\()_kvm:
 262
 263	.if IKVM_SKIP
 264	cmpwi	r10,KVM_GUEST_MODE_SKIP
 265	beq	89f
 266	.else
 267BEGIN_FTR_SECTION
 268	ld	r10,IAREA+EX_CFAR(r13)
 269	std	r10,HSTATE_CFAR(r13)
 270END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 271	.endif
 272
 273	ld	r10,IAREA+EX_CTR(r13)
 274	mtctr	r10
 275BEGIN_FTR_SECTION
 276	ld	r10,IAREA+EX_PPR(r13)
 277	std	r10,HSTATE_PPR(r13)
 278END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 279	ld	r11,IAREA+EX_R11(r13)
 280	ld	r12,IAREA+EX_R12(r13)
 281	std	r12,HSTATE_SCRATCH0(r13)
 282	sldi	r12,r9,32
 283	ld	r9,IAREA+EX_R9(r13)
 284	ld	r10,IAREA+EX_R10(r13)
 285	/* HSRR variants have the 0x2 bit added to their trap number */
 286	.if IHSRR_IF_HVMODE
 287	BEGIN_FTR_SECTION
 288	ori	r12,r12,(IVEC + 0x2)
 289	FTR_SECTION_ELSE
 290	ori	r12,r12,(IVEC)
 291	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 292	.elseif IHSRR
 293	ori	r12,r12,(IVEC+ 0x2)
 294	.else
 295	ori	r12,r12,(IVEC)
 296	.endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297	b	kvmppc_interrupt
 
 
 298
 299	.if IKVM_SKIP
 30089:	mtocrf	0x80,r9
 301	ld	r10,IAREA+EX_CTR(r13)
 302	mtctr	r10
 303	ld	r9,IAREA+EX_R9(r13)
 304	ld	r10,IAREA+EX_R10(r13)
 305	ld	r11,IAREA+EX_R11(r13)
 306	ld	r12,IAREA+EX_R12(r13)
 307	.if IHSRR_IF_HVMODE
 308	BEGIN_FTR_SECTION
 309	b	kvmppc_skip_Hinterrupt
 310	FTR_SECTION_ELSE
 311	b	kvmppc_skip_interrupt
 312	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 313	.elseif IHSRR
 314	b	kvmppc_skip_Hinterrupt
 315	.else
 316	b	kvmppc_skip_interrupt
 317	.endif
 318	.endif
 319.endm
 320
 321#else
 322.macro KVMTEST name
 
 
 323.endm
 324.macro GEN_KVM name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 325.endm
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326#endif
 
 327
 328/*
 329 * This is the BOOK3S interrupt entry code macro.
 330 *
 331 * This can result in one of several things happening:
 332 * - Branch to the _common handler, relocated, in virtual mode.
 333 *   These are normal interrupts (synchronous and asynchronous) handled by
 334 *   the kernel.
 335 * - Branch to KVM, relocated but real mode interrupts remain in real mode.
 336 *   These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
 337 *   / intended for host or guest kernel, but KVM must always be involved
 338 *   because the machine state is set for guest execution.
 339 * - Branch to the masked handler, unrelocated.
 340 *   These occur when maskable asynchronous interrupts are taken with the
 341 *   irq_soft_mask set.
 342 * - Branch to an "early" handler in real mode but relocated.
 343 *   This is done if early=1. MCE and HMI use these to handle errors in real
 344 *   mode.
 345 * - Fall through and continue executing in real, unrelocated mode.
 346 *   This is done if early=2.
 347 */
 348
 349.macro GEN_BRANCH_TO_COMMON name, virt
 350	.if IREALMODE_COMMON
 351	LOAD_HANDLER(r10, \name\()_common)
 352	mtctr	r10
 353	bctr
 354	.else
 355	.if \virt
 356#ifndef CONFIG_RELOCATABLE
 357	b	\name\()_common_virt
 358#else
 359	LOAD_HANDLER(r10, \name\()_common_virt)
 360	mtctr	r10
 361	bctr
 362#endif
 363	.else
 364	LOAD_HANDLER(r10, \name\()_common_real)
 365	mtctr	r10
 366	bctr
 367	.endif
 368	.endif
 369.endm
 370
 371.macro GEN_INT_ENTRY name, virt, ool=0
 372	SET_SCRATCH0(r13)			/* save r13 */
 373	GET_PACA(r13)
 374	std	r9,IAREA+EX_R9(r13)		/* save r9 */
 375BEGIN_FTR_SECTION
 376	mfspr	r9,SPRN_PPR
 377END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 378	HMT_MEDIUM
 379	std	r10,IAREA+EX_R10(r13)		/* save r10 - r12 */
 380BEGIN_FTR_SECTION
 381	mfspr	r10,SPRN_CFAR
 382END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 383	.if \ool
 384	.if !\virt
 385	b	tramp_real_\name
 386	.pushsection .text
 387	TRAMP_REAL_BEGIN(tramp_real_\name)
 388	.else
 389	b	tramp_virt_\name
 390	.pushsection .text
 391	TRAMP_VIRT_BEGIN(tramp_virt_\name)
 392	.endif
 393	.endif
 394
 395BEGIN_FTR_SECTION
 396	std	r9,IAREA+EX_PPR(r13)
 397END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 398BEGIN_FTR_SECTION
 399	std	r10,IAREA+EX_CFAR(r13)
 400END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 401	INTERRUPT_TO_KERNEL
 402	mfctr	r10
 403	std	r10,IAREA+EX_CTR(r13)
 404	mfcr	r9
 405	std	r11,IAREA+EX_R11(r13)
 406	std	r12,IAREA+EX_R12(r13)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 407
 408	/*
 409	 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
 410	 * because a d-side MCE will clobber those registers so is
 411	 * not recoverable if they are live.
 412	 */
 413	GET_SCRATCH0(r10)
 414	std	r10,IAREA+EX_R13(r13)
 415	.if IDAR && !IISIDE
 416	.if IHSRR
 417	mfspr	r10,SPRN_HDAR
 418	.else
 419	mfspr	r10,SPRN_DAR
 420	.endif
 421	std	r10,IAREA+EX_DAR(r13)
 422	.endif
 423	.if IDSISR && !IISIDE
 424	.if IHSRR
 425	mfspr	r10,SPRN_HDSISR
 426	.else
 427	mfspr	r10,SPRN_DSISR
 428	.endif
 429	stw	r10,IAREA+EX_DSISR(r13)
 430	.endif
 431
 432	.if IHSRR_IF_HVMODE
 433	BEGIN_FTR_SECTION
 434	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 435	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 436	FTR_SECTION_ELSE
 437	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 438	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 439	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 440	.elseif IHSRR
 441	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 442	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 443	.else
 444	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 445	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 446	.endif
 447
 448	.if IBRANCH_TO_COMMON
 449	GEN_BRANCH_TO_COMMON \name \virt
 450	.endif
 451
 452	.if \ool
 453	.popsection
 454	.endif
 455.endm
 456
 457/*
 458 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
 459 * entry, except in the case of the real-mode handlers which require
 460 * __GEN_REALMODE_COMMON_ENTRY.
 461 *
 462 * This switches to virtual mode and sets MSR[RI].
 463 */
 464.macro __GEN_COMMON_ENTRY name
 465DEFINE_FIXED_SYMBOL(\name\()_common_real)
 466\name\()_common_real:
 467	.if IKVM_REAL
 468		KVMTEST \name
 469	.endif
 470
 471	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
 472	/* MSR[RI] is clear iff using SRR regs */
 473	.if IHSRR == EXC_HV_OR_STD
 474	BEGIN_FTR_SECTION
 475	xori	r10,r10,MSR_RI
 476	END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
 477	.elseif ! IHSRR
 478	xori	r10,r10,MSR_RI
 479	.endif
 480	mtmsrd	r10
 481
 482	.if IVIRT
 483	.if IKVM_VIRT
 484	b	1f /* skip the virt test coming from real */
 485	.endif
 486
 487	.balign IFETCH_ALIGN_BYTES
 488DEFINE_FIXED_SYMBOL(\name\()_common_virt)
 489\name\()_common_virt:
 490	.if IKVM_VIRT
 491		KVMTEST \name
 4921:
 493	.endif
 494	.endif /* IVIRT */
 495.endm
 496
 497/*
 498 * Don't switch to virt mode. Used for early MCE and HMI handlers that
 499 * want to run in real mode.
 500 */
 501.macro __GEN_REALMODE_COMMON_ENTRY name
 502DEFINE_FIXED_SYMBOL(\name\()_common_real)
 503\name\()_common_real:
 504	.if IKVM_REAL
 505		KVMTEST \name
 506	.endif
 507.endm
 508
 509.macro __GEN_COMMON_BODY name
 510	.if IMASK
 511		.if ! ISTACK
 512		.error "No support for masked interrupt to use custom stack"
 513		.endif
 514
 515		/* If coming from user, skip soft-mask tests. */
 516		andi.	r10,r12,MSR_PR
 517		bne	2f
 518
 519		/* Kernel code running below __end_interrupts is implicitly
 520		 * soft-masked */
 521		LOAD_HANDLER(r10, __end_interrupts)
 522		cmpld	r11,r10
 523		li	r10,IMASK
 524		blt-	1f
 525
 526		/* Test the soft mask state against our interrupt's bit */
 527		lbz	r10,PACAIRQSOFTMASK(r13)
 5281:		andi.	r10,r10,IMASK
 529		/* Associate vector numbers with bits in paca->irq_happened */
 530		.if IVEC == 0x500 || IVEC == 0xea0
 531		li	r10,PACA_IRQ_EE
 532		.elseif IVEC == 0x900
 533		li	r10,PACA_IRQ_DEC
 534		.elseif IVEC == 0xa00 || IVEC == 0xe80
 535		li	r10,PACA_IRQ_DBELL
 536		.elseif IVEC == 0xe60
 537		li	r10,PACA_IRQ_HMI
 538		.elseif IVEC == 0xf00
 539		li	r10,PACA_IRQ_PMI
 540		.else
 541		.abort "Bad maskable vector"
 542		.endif
 543
 544		.if IHSRR_IF_HVMODE
 545		BEGIN_FTR_SECTION
 546		bne	masked_Hinterrupt
 547		FTR_SECTION_ELSE
 548		bne	masked_interrupt
 549		ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 550		.elseif IHSRR
 551		bne	masked_Hinterrupt
 552		.else
 553		bne	masked_interrupt
 554		.endif
 555	.endif
 556
 557	.if ISTACK
 558	andi.	r10,r12,MSR_PR		/* See if coming from user	*/
 5592:	mr	r10,r1			/* Save r1			*/
 560	subi	r1,r1,INT_FRAME_SIZE	/* alloc frame on kernel stack	*/
 561	beq-	100f
 562	ld	r1,PACAKSAVE(r13)	/* kernel stack to use		*/
 563100:	tdgei	r1,-INT_FRAME_SIZE	/* trap if r1 is in userspace	*/
 564	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
 565	.endif
 566
 567	std	r9,_CCR(r1)		/* save CR in stackframe	*/
 568	std	r11,_NIP(r1)		/* save SRR0 in stackframe	*/
 569	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
 570	std	r10,0(r1)		/* make stack chain pointer	*/
 571	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
 572	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
 573
 574	.if ISET_RI
 575	li	r10,MSR_RI
 576	mtmsrd	r10,1			/* Set MSR_RI */
 577	.endif
 578
 579	.if ISTACK
 580	.if IKUAP
 581	kuap_save_amr_and_lock r9, r10, cr1, cr0
 582	.endif
 583	beq	101f			/* if from kernel mode		*/
 584	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
 585BEGIN_FTR_SECTION
 586	ld	r9,IAREA+EX_PPR(r13)	/* Read PPR from paca		*/
 587	std	r9,_PPR(r1)
 588END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
 589101:
 590	.else
 591	.if IKUAP
 592	kuap_save_amr_and_lock r9, r10, cr1
 593	.endif
 594	.endif
 595
 596	/* Save original regs values from save area to stack frame. */
 597	ld	r9,IAREA+EX_R9(r13)	/* move r9, r10 to stackframe	*/
 598	ld	r10,IAREA+EX_R10(r13)
 599	std	r9,GPR9(r1)
 600	std	r10,GPR10(r1)
 601	ld	r9,IAREA+EX_R11(r13)	/* move r11 - r13 to stackframe	*/
 602	ld	r10,IAREA+EX_R12(r13)
 603	ld	r11,IAREA+EX_R13(r13)
 604	std	r9,GPR11(r1)
 605	std	r10,GPR12(r1)
 606	std	r11,GPR13(r1)
 607
 608	SAVE_NVGPRS(r1)
 609
 610	.if IDAR
 611	.if IISIDE
 612	ld	r10,_NIP(r1)
 613	.else
 614	ld	r10,IAREA+EX_DAR(r13)
 615	.endif
 616	std	r10,_DAR(r1)
 617	.endif
 618
 619	.if IDSISR
 620	.if IISIDE
 621	ld	r10,_MSR(r1)
 622	lis	r11,DSISR_SRR1_MATCH_64S@h
 623	and	r10,r10,r11
 624	.else
 625	lwz	r10,IAREA+EX_DSISR(r13)
 626	.endif
 627	std	r10,_DSISR(r1)
 628	.endif
 629
 630BEGIN_FTR_SECTION
 631	ld	r10,IAREA+EX_CFAR(r13)
 632	std	r10,ORIG_GPR3(r1)
 633END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 634	ld	r10,IAREA+EX_CTR(r13)
 635	std	r10,_CTR(r1)
 636	std	r2,GPR2(r1)		/* save r2 in stackframe	*/
 637	SAVE_4GPRS(3, r1)		/* save r3 - r6 in stackframe   */
 638	SAVE_2GPRS(7, r1)		/* save r7, r8 in stackframe	*/
 639	mflr	r9			/* Get LR, later save to stack	*/
 640	ld	r2,PACATOC(r13)		/* get kernel TOC into r2	*/
 641	std	r9,_LINK(r1)
 642	lbz	r10,PACAIRQSOFTMASK(r13)
 643	mfspr	r11,SPRN_XER		/* save XER in stackframe	*/
 644	std	r10,SOFTE(r1)
 645	std	r11,_XER(r1)
 646	li	r9,IVEC
 647	std	r9,_TRAP(r1)		/* set trap number		*/
 648	li	r10,0
 649	ld	r11,exception_marker@toc(r2)
 650	std	r10,RESULT(r1)		/* clear regs->result		*/
 651	std	r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame	*/
 652
 653	.if ISTACK
 654	ACCOUNT_STOLEN_TIME
 655	.endif
 656
 657	.if IRECONCILE
 658	RECONCILE_IRQ_STATE(r10, r11)
 659	.endif
 660.endm
 661
 662/*
 663 * On entry r13 points to the paca, r9-r13 are saved in the paca,
 664 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
 665 * SRR1, and relocation is on.
 666 *
 667 * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
 668 * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
 669 */
 670.macro GEN_COMMON name
 671	__GEN_COMMON_ENTRY \name
 672	__GEN_COMMON_BODY \name
 673.endm
 674
 675/*
 676 * Restore all registers including H/SRR0/1 saved in a stack frame of a
 677 * standard exception.
 678 */
 679.macro EXCEPTION_RESTORE_REGS hsrr=0
 680	/* Move original SRR0 and SRR1 into the respective regs */
 681	ld	r9,_MSR(r1)
 
 
 
 682	.if \hsrr
 683	mtspr	SPRN_HSRR1,r9
 684	.else
 685	mtspr	SPRN_SRR1,r9
 686	.endif
 687	ld	r9,_NIP(r1)
 688	.if \hsrr
 689	mtspr	SPRN_HSRR0,r9
 690	.else
 691	mtspr	SPRN_SRR0,r9
 692	.endif
 693	ld	r9,_CTR(r1)
 694	mtctr	r9
 695	ld	r9,_XER(r1)
 696	mtxer	r9
 697	ld	r9,_LINK(r1)
 698	mtlr	r9
 699	ld	r9,_CCR(r1)
 700	mtcr	r9
 701	REST_8GPRS(2, r1)
 702	REST_4GPRS(10, r1)
 703	REST_GPR(0, r1)
 704	/* restore original r1. */
 705	ld	r1,GPR1(r1)
 706.endm
 707
 708#define RUNLATCH_ON				\
 709BEGIN_FTR_SECTION				\
 710	ld	r3, PACA_THREAD_INFO(r13);	\
 711	ld	r4,TI_LOCAL_FLAGS(r3);		\
 712	andi.	r0,r4,_TLF_RUNLATCH;		\
 713	beql	ppc64_runlatch_on_trampoline;	\
 714END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
 715
 716/*
 717 * When the idle code in power4_idle puts the CPU into NAP mode,
 718 * it has to do so in a loop, and relies on the external interrupt
 719 * and decrementer interrupt entry code to get it out of the loop.
 720 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
 721 * to signal that it is in the loop and needs help to get out.
 722 */
 723#ifdef CONFIG_PPC_970_NAP
 724#define FINISH_NAP				\
 725BEGIN_FTR_SECTION				\
 726	ld	r11, PACA_THREAD_INFO(r13);	\
 727	ld	r9,TI_LOCAL_FLAGS(r11);		\
 728	andi.	r10,r9,_TLF_NAPPING;		\
 729	bnel	power4_fixup_nap;		\
 730END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
 731#else
 732#define FINISH_NAP
 733#endif
 734
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 735/*
 736 * There are a few constraints to be concerned with.
 737 * - Real mode exceptions code/data must be located at their physical location.
 738 * - Virtual mode exceptions must be mapped at their 0xc000... location.
 739 * - Fixed location code must not call directly beyond the __end_interrupts
 740 *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
 741 *   must be used.
 742 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
 743 *   virtual 0xc00...
 744 * - Conditional branch targets must be within +/-32K of caller.
 745 *
 746 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
 747 * therefore don't have to run in physically located code or rfid to
 748 * virtual mode kernel code. However on relocatable kernels they do have
 749 * to branch to KERNELBASE offset because the rest of the kernel (outside
 750 * the exception vectors) may be located elsewhere.
 751 *
 752 * Virtual exceptions correspond with physical, except their entry points
 753 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
 754 * offset applied. Virtual exceptions are enabled with the Alternate
 755 * Interrupt Location (AIL) bit set in the LPCR. However this does not
 756 * guarantee they will be delivered virtually. Some conditions (see the ISA)
 757 * cause exceptions to be delivered in real mode.
 758 *
 759 * The scv instructions are a special case. They get a 0x3000 offset applied.
 760 * scv exceptions have unique reentrancy properties, see below.
 761 *
 762 * It's impossible to receive interrupts below 0x300 via AIL.
 763 *
 764 * KVM: None of the virtual exceptions are from the guest. Anything that
 765 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
 766 *
 767 *
 768 * We layout physical memory as follows:
 769 * 0x0000 - 0x00ff : Secondary processor spin code
 770 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
 771 * 0x1900 - 0x2fff : Real mode trampolines
 772 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
 773 * 0x5900 - 0x6fff : Relon mode trampolines
 774 * 0x7000 - 0x7fff : FWNMI data area
 775 * 0x8000 -   .... : Common interrupt handlers, remaining early
 776 *                   setup code, rest of kernel.
 777 *
 778 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
 779 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
 780 * vectors there.
 781 */
 782OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
 783OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x3000)
 784OPEN_FIXED_SECTION(virt_vectors,        0x3000, 0x5900)
 785OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
 786
 787#ifdef CONFIG_PPC_POWERNV
 788	.globl start_real_trampolines
 789	.globl end_real_trampolines
 790	.globl start_virt_trampolines
 791	.globl end_virt_trampolines
 792#endif
 793
 794#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 795/*
 796 * Data area reserved for FWNMI option.
 797 * This address (0x7000) is fixed by the RPA.
 798 * pseries and powernv need to keep the whole page from
 799 * 0x7000 to 0x8000 free for use by the firmware
 800 */
 801ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
 802OPEN_TEXT_SECTION(0x8000)
 803#else
 804OPEN_TEXT_SECTION(0x7000)
 805#endif
 806
 807USE_FIXED_SECTION(real_vectors)
 808
 809/*
 810 * This is the start of the interrupt handlers for pSeries
 811 * This code runs with relocation off.
 812 * Code from here to __end_interrupts gets copied down to real
 813 * address 0x100 when we are running a relocatable kernel.
 814 * Therefore any relative branches in this section must only
 815 * branch to labels in this section.
 816 */
 817	.globl __start_interrupts
 818__start_interrupts:
 819
 820/**
 821 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
 822 * This is a synchronous interrupt invoked with the "scv" instruction. The
 823 * system call does not alter the HV bit, so it is directed to the OS.
 824 *
 825 * Handling:
 826 * scv instructions enter the kernel without changing EE, RI, ME, or HV.
 827 * In particular, this means we can take a maskable interrupt at any point
 828 * in the scv handler, which is unlike any other interrupt. This is solved
 829 * by treating the instruction addresses below __end_interrupts as being
 830 * soft-masked.
 831 *
 832 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
 833 * ensure scv is never executed with relocation off, which means AIL-0
 834 * should never happen.
 835 *
 836 * Before leaving the below __end_interrupts text, at least of the following
 837 * must be true:
 838 * - MSR[PR]=1 (i.e., return to userspace)
 839 * - MSR_EE|MSR_RI is set (no reentrant exceptions)
 840 * - Standard kernel environment is set up (stack, paca, etc)
 841 *
 842 * Call convention:
 843 *
 844 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
 845 */
 846EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
 847	/* SCV 0 */
 848	mr	r9,r13
 849	GET_PACA(r13)
 850	mflr	r11
 851	mfctr	r12
 852	li	r10,IRQS_ALL_DISABLED
 853	stb	r10,PACAIRQSOFTMASK(r13)
 854#ifdef CONFIG_RELOCATABLE
 855	b	system_call_vectored_tramp
 856#else
 857	b	system_call_vectored_common
 858#endif
 859	nop
 860
 861	/* SCV 1 - 127 */
 862	.rept	127
 863	mr	r9,r13
 864	GET_PACA(r13)
 865	mflr	r11
 866	mfctr	r12
 867	li	r10,IRQS_ALL_DISABLED
 868	stb	r10,PACAIRQSOFTMASK(r13)
 869	li	r0,-1 /* cause failure */
 870#ifdef CONFIG_RELOCATABLE
 871	b	system_call_vectored_sigill_tramp
 872#else
 873	b	system_call_vectored_sigill
 874#endif
 875	.endr
 876EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
 877
 878#ifdef CONFIG_RELOCATABLE
 879TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
 880	__LOAD_HANDLER(r10, system_call_vectored_common)
 881	mtctr	r10
 882	bctr
 883
 884TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
 885	__LOAD_HANDLER(r10, system_call_vectored_sigill)
 886	mtctr	r10
 887	bctr
 888#endif
 889
 890
 891/* No virt vectors corresponding with 0x0..0x100 */
 892EXC_VIRT_NONE(0x4000, 0x100)
 893
 894
 895/**
 896 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
 897 * This is a non-maskable, asynchronous interrupt always taken in real-mode.
 898 * It is caused by:
 899 * - Wake from power-saving state, on powernv.
 900 * - An NMI from another CPU, triggered by firmware or hypercall.
 901 * - As crash/debug signal injected from BMC, firmware or hypervisor.
 902 *
 903 * Handling:
 904 * Power-save wakeup is the only performance critical path, so this is
 905 * determined quickly as possible first. In this case volatile registers
 906 * can be discarded and SPRs like CFAR don't need to be read.
 907 *
 908 * If not a powersave wakeup, then it's run as a regular interrupt, however
 909 * it uses its own stack and PACA save area to preserve the regular kernel
 910 * environment for debugging.
 911 *
 912 * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
 913 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
 914 * correct to switch to virtual mode to run the regular interrupt handler
 915 * because it might be interrupted when the MMU is in a bad state (e.g., SLB
 916 * is clear).
 917 *
 918 * FWNMI:
 919 * PAPR specifies a "fwnmi" facility which sends the sreset to a different
 920 * entry point with a different register set up. Some hypervisors will
 921 * send the sreset to 0x100 in the guest if it is not fwnmi capable.
 922 *
 923 * KVM:
 924 * Unlike most SRR interrupts, this may be taken by the host while executing
 925 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
 926 * mode and then raise the sreset.
 927 */
 928INT_DEFINE_BEGIN(system_reset)
 929	IVEC=0x100
 930	IAREA=PACA_EXNMI
 931	IVIRT=0 /* no virt entry point */
 932	/*
 933	 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
 934	 * being used, so a nested NMI exception would corrupt it.
 935	 */
 936	ISET_RI=0
 937	ISTACK=0
 938	IRECONCILE=0
 939	IKVM_REAL=1
 940INT_DEFINE_END(system_reset)
 941
 942EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
 943#ifdef CONFIG_PPC_P7_NAP
 944	/*
 945	 * If running native on arch 2.06 or later, check if we are waking up
 946	 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
 947	 * bits 46:47. A non-0 value indicates that we are coming from a power
 948	 * saving state. The idle wakeup handler initially runs in real mode,
 949	 * but we branch to the 0xc000... address so we can turn on relocation
 950	 * with mtmsrd later, after SPRs are restored.
 951	 *
 952	 * Careful to minimise cost for the fast path (idle wakeup) while
 953	 * also avoiding clobbering CFAR for the debug path (non-idle).
 954	 *
 955	 * For the idle wake case volatile registers can be clobbered, which
 956	 * is why we use those initially. If it turns out to not be an idle
 957	 * wake, carefully put everything back the way it was, so we can use
 958	 * common exception macros to handle it.
 959	 */
 960BEGIN_FTR_SECTION
 961	SET_SCRATCH0(r13)
 962	GET_PACA(r13)
 963	std	r3,PACA_EXNMI+0*8(r13)
 964	std	r4,PACA_EXNMI+1*8(r13)
 965	std	r5,PACA_EXNMI+2*8(r13)
 966	mfspr	r3,SPRN_SRR1
 967	mfocrf	r4,0x80
 968	rlwinm.	r5,r3,47-31,30,31
 969	bne+	system_reset_idle_wake
 970	/* Not powersave wakeup. Restore regs for regular interrupt handler. */
 971	mtocrf	0x80,r4
 972	ld	r3,PACA_EXNMI+0*8(r13)
 973	ld	r4,PACA_EXNMI+1*8(r13)
 974	ld	r5,PACA_EXNMI+2*8(r13)
 975	GET_SCRATCH0(r13)
 976END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 977#endif
 978
 979	GEN_INT_ENTRY system_reset, virt=0
 980	/*
 
 
 
 981	 * In theory, we should not enable relocation here if it was disabled
 982	 * in SRR1, because the MMU may not be configured to support it (e.g.,
 983	 * SLB may have been cleared). In practice, there should only be a few
 984	 * small windows where that's the case, and sreset is considered to
 985	 * be dangerous anyway.
 986	 */
 987EXC_REAL_END(system_reset, 0x100, 0x100)
 988EXC_VIRT_NONE(0x4100, 0x100)
 
 989
 990#ifdef CONFIG_PPC_P7_NAP
 991TRAMP_REAL_BEGIN(system_reset_idle_wake)
 992	/* We are waking up from idle, so may clobber any volatile register */
 993	cmpwi	cr1,r5,2
 994	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
 995	BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
 996#endif
 997
 998#ifdef CONFIG_PPC_PSERIES
 999/*
1000 * Vectors for the FWNMI option.  Share common code.
1001 */
1002TRAMP_REAL_BEGIN(system_reset_fwnmi)
1003	/* XXX: fwnmi guest could run a nested/PR guest, so why no test?  */
1004	__IKVM_REAL(system_reset)=0
1005	GEN_INT_ENTRY system_reset, virt=0
1006
1007#endif /* CONFIG_PPC_PSERIES */
1008
1009EXC_COMMON_BEGIN(system_reset_common)
1010	__GEN_COMMON_ENTRY system_reset
1011	/*
1012	 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
1013	 * to recover, but nested NMI will notice in_nmi and not recover
1014	 * because of the use of the NMI stack. in_nmi reentrancy is tested in
1015	 * system_reset_exception.
1016	 */
1017	lhz	r10,PACA_IN_NMI(r13)
1018	addi	r10,r10,1
1019	sth	r10,PACA_IN_NMI(r13)
1020	li	r10,MSR_RI
1021	mtmsrd 	r10,1
1022
1023	mr	r10,r1
1024	ld	r1,PACA_NMI_EMERG_SP(r13)
1025	subi	r1,r1,INT_FRAME_SIZE
1026	__GEN_COMMON_BODY system_reset
 
1027	/*
1028	 * Set IRQS_ALL_DISABLED unconditionally so irqs_disabled() does
1029	 * the right thing. We do not want to reconcile because that goes
1030	 * through irq tracing which we don't want in NMI.
1031	 *
1032	 * Save PACAIRQHAPPENED to RESULT (otherwise unused), and set HARD_DIS
1033	 * as we are running with MSR[EE]=0.
 
1034	 */
1035	li	r10,IRQS_ALL_DISABLED
1036	stb	r10,PACAIRQSOFTMASK(r13)
1037	lbz	r10,PACAIRQHAPPENED(r13)
1038	std	r10,RESULT(r1)
1039	ori	r10,r10,PACA_IRQ_HARD_DIS
1040	stb	r10,PACAIRQHAPPENED(r13)
1041
1042	addi	r3,r1,STACK_FRAME_OVERHEAD
1043	bl	system_reset_exception
1044
1045	/* Clear MSR_RI before setting SRR0 and SRR1. */
1046	li	r9,0
1047	mtmsrd	r9,1
1048
1049	/*
1050	 * MSR_RI is clear, now we can decrement paca->in_nmi.
1051	 */
1052	lhz	r10,PACA_IN_NMI(r13)
1053	subi	r10,r10,1
1054	sth	r10,PACA_IN_NMI(r13)
1055
1056	/*
1057	 * Restore soft mask settings.
1058	 */
1059	ld	r10,RESULT(r1)
1060	stb	r10,PACAIRQHAPPENED(r13)
1061	ld	r10,SOFTE(r1)
1062	stb	r10,PACAIRQSOFTMASK(r13)
1063
1064	kuap_restore_amr r9, r10
1065	EXCEPTION_RESTORE_REGS
1066	RFI_TO_USER_OR_KERNEL
1067
1068	GEN_KVM system_reset
1069
1070
1071/**
1072 * Interrupt 0x200 - Machine Check Interrupt (MCE).
1073 * This is a non-maskable interrupt always taken in real-mode. It can be
1074 * synchronous or asynchronous, caused by hardware or software, and it may be
1075 * taken in a power-saving state.
1076 *
1077 * Handling:
1078 * Similarly to system reset, this uses its own stack and PACA save area,
1079 * the difference is re-entrancy is allowed on the machine check stack.
1080 *
1081 * machine_check_early is run in real mode, and carefully decodes the
1082 * machine check and tries to handle it (e.g., flush the SLB if there was an
1083 * error detected there), determines if it was recoverable and logs the
1084 * event.
1085 *
1086 * This early code does not "reconcile" irq soft-mask state like SRESET or
1087 * regular interrupts do, so irqs_disabled() among other things may not work
1088 * properly (irq disable/enable already doesn't work because irq tracing can
1089 * not work in real mode).
1090 *
1091 * Then, depending on the execution context when the interrupt is taken, there
1092 * are 3 main actions:
1093 * - Executing in kernel mode. The event is queued with irq_work, which means
1094 *   it is handled when it is next safe to do so (i.e., the kernel has enabled
1095 *   interrupts), which could be immediately when the interrupt returns. This
1096 *   avoids nasty issues like switching to virtual mode when the MMU is in a
1097 *   bad state, or when executing OPAL code. (SRESET is exposed to such issues,
1098 *   but it has different priorities). Check to see if the CPU was in power
1099 *   save, and return via the wake up code if it was.
1100 *
1101 * - Executing in user mode. machine_check_exception is run like a normal
1102 *   interrupt handler, which processes the data generated by the early handler.
1103 *
1104 * - Executing in guest mode. The interrupt is run with its KVM test, and
1105 *   branches to KVM to deal with. KVM may queue the event for the host
1106 *   to report later.
1107 *
1108 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
1109 * or SCRATCH0 is in use, it may cause a crash.
1110 *
1111 * KVM:
1112 * See SRESET.
1113 */
1114INT_DEFINE_BEGIN(machine_check_early)
1115	IVEC=0x200
1116	IAREA=PACA_EXMC
1117	IVIRT=0 /* no virt entry point */
1118	IREALMODE_COMMON=1
1119	/*
1120	 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1121	 * nested machine check corrupts it. machine_check_common enables
1122	 * MSR_RI.
1123	 */
1124	ISET_RI=0
1125	ISTACK=0
1126	IDAR=1
1127	IDSISR=1
1128	IRECONCILE=0
1129	IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
1130INT_DEFINE_END(machine_check_early)
1131
1132INT_DEFINE_BEGIN(machine_check)
1133	IVEC=0x200
1134	IAREA=PACA_EXMC
1135	IVIRT=0 /* no virt entry point */
1136	ISET_RI=0
1137	IDAR=1
1138	IDSISR=1
1139	IKVM_SKIP=1
1140	IKVM_REAL=1
1141INT_DEFINE_END(machine_check)
1142
1143EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
1144	GEN_INT_ENTRY machine_check_early, virt=0
1145EXC_REAL_END(machine_check, 0x200, 0x100)
1146EXC_VIRT_NONE(0x4200, 0x100)
1147
1148#ifdef CONFIG_PPC_PSERIES
1149TRAMP_REAL_BEGIN(machine_check_fwnmi)
1150	/* See comment at machine_check exception, don't turn on RI */
1151	GEN_INT_ENTRY machine_check_early, virt=0
1152#endif
1153
 
 
1154#define MACHINE_CHECK_HANDLER_WINDUP			\
1155	/* Clear MSR_RI before setting SRR0 and SRR1. */\
1156	li	r9,0;					\
1157	mtmsrd	r9,1;		/* Clear MSR_RI */	\
1158	/* Decrement paca->in_mce now RI is clear. */	\
1159	lhz	r12,PACA_IN_MCE(r13);			\
1160	subi	r12,r12,1;				\
1161	sth	r12,PACA_IN_MCE(r13);			\
1162	EXCEPTION_RESTORE_REGS
1163
1164EXC_COMMON_BEGIN(machine_check_early_common)
1165	__GEN_REALMODE_COMMON_ENTRY machine_check_early
 
 
1166
1167	/*
1168	 * Switch to mc_emergency stack and handle re-entrancy (we limit
1169	 * the nested MCE upto level 4 to avoid stack overflow).
1170	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
1171	 *
1172	 * We use paca->in_mce to check whether this is the first entry or
1173	 * nested machine check. We increment paca->in_mce to track nested
1174	 * machine checks.
1175	 *
1176	 * If this is the first entry then set stack pointer to
1177	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
1178	 * stack frame on mc_emergency stack.
1179	 *
1180	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
1181	 * checkstop if we get another machine check exception before we do
1182	 * rfid with MSR_ME=1.
1183	 *
1184	 * This interrupt can wake directly from idle. If that is the case,
1185	 * the machine check is handled then the idle wakeup code is called
1186	 * to restore state.
1187	 */
1188	lhz	r10,PACA_IN_MCE(r13)
1189	cmpwi	r10,0			/* Are we in nested machine check */
1190	cmpwi	cr1,r10,MAX_MCE_DEPTH	/* Are we at maximum nesting */
1191	addi	r10,r10,1		/* increment paca->in_mce */
1192	sth	r10,PACA_IN_MCE(r13)
1193
1194	mr	r10,r1			/* Save r1 */
1195	bne	1f
1196	/* First machine check entry */
1197	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
11981:	/* Limit nested MCE to level 4 to avoid stack overflow */
1199	bgt	cr1,unrecoverable_mce	/* Check if we hit limit of 4 */
1200	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
1201
1202	__GEN_COMMON_BODY machine_check_early
 
1203
1204BEGIN_FTR_SECTION
1205	bl	enable_machine_check
1206END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1207	li	r10,MSR_RI
1208	mtmsrd	r10,1
1209
1210	/*
1211	 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
1212	 * system_reset_common)
1213	 */
1214	li	r10,IRQS_ALL_DISABLED
1215	stb	r10,PACAIRQSOFTMASK(r13)
1216	lbz	r10,PACAIRQHAPPENED(r13)
1217	std	r10,RESULT(r1)
1218	ori	r10,r10,PACA_IRQ_HARD_DIS
1219	stb	r10,PACAIRQHAPPENED(r13)
1220
1221	addi	r3,r1,STACK_FRAME_OVERHEAD
1222	bl	machine_check_early
1223	std	r3,RESULT(r1)	/* Save result */
1224	ld	r12,_MSR(r1)
1225
1226	/*
1227	 * Restore soft mask settings.
1228	 */
1229	ld	r10,RESULT(r1)
1230	stb	r10,PACAIRQHAPPENED(r13)
1231	ld	r10,SOFTE(r1)
1232	stb	r10,PACAIRQSOFTMASK(r13)
1233
1234#ifdef CONFIG_PPC_P7_NAP
1235	/*
1236	 * Check if thread was in power saving mode. We come here when any
1237	 * of the following is true:
1238	 * a. thread wasn't in power saving mode
1239	 * b. thread was in power saving mode with no state loss,
1240	 *    supervisor state loss or hypervisor state loss.
1241	 *
1242	 * Go back to nap/sleep/winkle mode again if (b) is true.
1243	 */
1244BEGIN_FTR_SECTION
1245	rlwinm.	r11,r12,47-31,30,31
1246	bne	machine_check_idle_common
1247END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1248#endif
1249
1250#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1251	/*
1252	 * Check if we are coming from guest. If yes, then run the normal
1253	 * exception handler which will take the
1254	 * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
1255	 * to guest.
1256	 */
1257	lbz	r11,HSTATE_IN_GUEST(r13)
1258	cmpwi	r11,0			/* Check if coming from guest */
1259	bne	mce_deliver		/* continue if we are. */
1260#endif
1261
1262	/*
1263	 * Check if we are coming from userspace. If yes, then run the normal
1264	 * exception handler which will deliver the MC event to this kernel.
1265	 */
1266	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1267	bne	mce_deliver		/* continue in V mode if we are. */
1268
1269	/*
1270	 * At this point we are coming from kernel context.
1271	 * Queue up the MCE event and return from the interrupt.
1272	 * But before that, check if this is an un-recoverable exception.
1273	 * If yes, then stay on emergency stack and panic.
1274	 */
1275	andi.	r11,r12,MSR_RI
1276	beq	unrecoverable_mce
1277
1278	/*
1279	 * Check if we have successfully handled/recovered from error, if not
1280	 * then stay on emergency stack and panic.
1281	 */
1282	ld	r3,RESULT(r1)	/* Load result */
1283	cmpdi	r3,0		/* see if we handled MCE successfully */
1284	beq	unrecoverable_mce /* if !handled then panic */
1285
1286	/*
1287	 * Return from MC interrupt.
1288	 * Queue up the MCE event so that we can log it later, while
1289	 * returning from kernel or opal call.
1290	 */
1291	bl	machine_check_queue_event
1292	MACHINE_CHECK_HANDLER_WINDUP
1293	RFI_TO_KERNEL
1294
1295mce_deliver:
1296	/*
1297	 * This is a host user or guest MCE. Restore all registers, then
1298	 * run the "late" handler. For host user, this will run the
1299	 * machine_check_exception handler in virtual mode like a normal
1300	 * interrupt handler. For guest, this will trigger the KVM test
1301	 * and branch to the KVM interrupt similarly to other interrupts.
1302	 */
1303BEGIN_FTR_SECTION
1304	ld	r10,ORIG_GPR3(r1)
1305	mtspr	SPRN_CFAR,r10
1306END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1307	MACHINE_CHECK_HANDLER_WINDUP
1308	GEN_INT_ENTRY machine_check, virt=0
 
1309
1310EXC_COMMON_BEGIN(machine_check_common)
1311	/*
1312	 * Machine check is different because we use a different
1313	 * save area: PACA_EXMC instead of PACA_EXGEN.
1314	 */
1315	GEN_COMMON machine_check
1316
1317	FINISH_NAP
1318	/* Enable MSR_RI when finished with PACA_EXMC */
1319	li	r10,MSR_RI
1320	mtmsrd 	r10,1
 
1321	addi	r3,r1,STACK_FRAME_OVERHEAD
1322	bl	machine_check_exception
1323	b	interrupt_return
1324
1325	GEN_KVM machine_check
1326
1327
1328#ifdef CONFIG_PPC_P7_NAP
1329/*
1330 * This is an idle wakeup. Low level machine check has already been
1331 * done. Queue the event then call the idle code to do the wake up.
1332 */
1333EXC_COMMON_BEGIN(machine_check_idle_common)
1334	bl	machine_check_queue_event
1335
1336	/*
1337	 * GPR-loss wakeups are relatively straightforward, because the
1338	 * idle sleep code has saved all non-volatile registers on its
1339	 * own stack, and r1 in PACAR1.
 
1340	 *
1341	 * For no-loss wakeups the r1 and lr registers used by the
1342	 * early machine check handler have to be restored first. r2 is
1343	 * the kernel TOC, so no need to restore it.
1344	 *
1345	 * Then decrement MCE nesting after finishing with the stack.
1346	 */
1347	ld	r3,_MSR(r1)
1348	ld	r4,_LINK(r1)
1349	ld	r1,GPR1(r1)
1350
1351	lhz	r11,PACA_IN_MCE(r13)
1352	subi	r11,r11,1
1353	sth	r11,PACA_IN_MCE(r13)
1354
1355	mtlr	r4
1356	rlwinm	r10,r3,47-31,30,31
1357	cmpwi	cr1,r10,2
1358	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
1359	b	idle_return_gpr_loss
1360#endif
1361
1362EXC_COMMON_BEGIN(unrecoverable_mce)
1363	/*
1364	 * We are going down. But there are chances that we might get hit by
1365	 * another MCE during panic path and we may run into unstable state
1366	 * with no way out. Hence, turn ME bit off while going down, so that
1367	 * when another MCE is hit during panic path, system will checkstop
1368	 * and hypervisor will get restarted cleanly by SP.
1369	 */
1370BEGIN_FTR_SECTION
1371	li	r10,0 /* clear MSR_RI */
1372	mtmsrd	r10,1
1373	bl	disable_machine_check
1374END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1375	ld	r10,PACAKMSR(r13)
1376	li	r3,MSR_ME
1377	andc	r10,r10,r3
1378	mtmsrd	r10
1379
1380	lhz	r12,PACA_IN_MCE(r13)
1381	subi	r12,r12,1
1382	sth	r12,PACA_IN_MCE(r13)
1383
1384	/* Invoke machine_check_exception to print MCE event and panic. */
1385	addi	r3,r1,STACK_FRAME_OVERHEAD
1386	bl	machine_check_exception
1387
1388	/*
1389	 * We will not reach here. Even if we did, there is no way out.
1390	 * Call unrecoverable_exception and die.
1391	 */
1392	addi	r3,r1,STACK_FRAME_OVERHEAD
1393	bl	unrecoverable_exception
1394	b	.
1395
1396
1397/**
1398 * Interrupt 0x300 - Data Storage Interrupt (DSI).
1399 * This is a synchronous interrupt generated due to a data access exception,
1400 * e.g., a load orstore which does not have a valid page table entry with
1401 * permissions. DAWR matches also fault here, as do RC updates, and minor misc
1402 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
1403 *
1404 * Handling:
1405 * - Hash MMU
1406 *   Go to do_hash_page first to see if the HPT can be filled from an entry in
1407 *   the Linux page table. Hash faults can hit in kernel mode in a fairly
1408 *   arbitrary state (e.g., interrupts disabled, locks held) when accessing
1409 *   "non-bolted" regions, e.g., vmalloc space. However these should always be
1410 *   backed by Linux page tables.
1411 *
1412 *   If none is found, do a Linux page fault. Linux page faults can happen in
1413 *   kernel mode due to user copy operations of course.
1414 *
1415 * - Radix MMU
1416 *   The hardware loads from the Linux page table directly, so a fault goes
1417 *   immediately to Linux page fault.
1418 *
1419 * Conditions like DAWR match are handled on the way in to Linux page fault.
1420 */
1421INT_DEFINE_BEGIN(data_access)
1422	IVEC=0x300
1423	IDAR=1
1424	IDSISR=1
1425#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1426	IKVM_SKIP=1
1427	IKVM_REAL=1
1428#endif
1429INT_DEFINE_END(data_access)
1430
1431EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1432	GEN_INT_ENTRY data_access, virt=0
1433EXC_REAL_END(data_access, 0x300, 0x80)
1434EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1435	GEN_INT_ENTRY data_access, virt=1
1436EXC_VIRT_END(data_access, 0x4300, 0x80)
 
1437EXC_COMMON_BEGIN(data_access_common)
1438	GEN_COMMON data_access
 
 
 
 
 
 
1439	ld	r4,_DAR(r1)
1440	ld	r5,_DSISR(r1)
1441BEGIN_MMU_FTR_SECTION
1442	ld	r6,_MSR(r1)
1443	li	r3,0x300
1444	b	do_hash_page		/* Try to handle as hpte fault */
1445MMU_FTR_SECTION_ELSE
1446	b	handle_page_fault
1447ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1448
1449	GEN_KVM data_access
1450
1451
1452/**
1453 * Interrupt 0x380 - Data Segment Interrupt (DSLB).
1454 * This is a synchronous interrupt in response to an MMU fault missing SLB
1455 * entry for HPT, or an address outside RPT translation range.
1456 *
1457 * Handling:
1458 * - HPT:
1459 *   This refills the SLB, or reports an access fault similarly to a bad page
1460 *   fault. When coming from user-mode, the SLB handler may access any kernel
1461 *   data, though it may itself take a DSLB. When coming from kernel mode,
1462 *   recursive faults must be avoided so access is restricted to the kernel
1463 *   image text/data, kernel stack, and any data allocated below
1464 *   ppc64_bolted_size (first segment). The kernel handler must avoid stomping
1465 *   on user-handler data structures.
1466 *
1467 * A dedicated save area EXSLB is used (XXX: but it actually need not be
1468 * these days, we could use EXGEN).
1469 */
1470INT_DEFINE_BEGIN(data_access_slb)
1471	IVEC=0x380
1472	IAREA=PACA_EXSLB
1473	IRECONCILE=0
1474	IDAR=1
1475#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1476	IKVM_SKIP=1
1477	IKVM_REAL=1
1478#endif
1479INT_DEFINE_END(data_access_slb)
1480
1481EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1482	GEN_INT_ENTRY data_access_slb, virt=0
1483EXC_REAL_END(data_access_slb, 0x380, 0x80)
1484EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1485	GEN_INT_ENTRY data_access_slb, virt=1
1486EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
 
1487EXC_COMMON_BEGIN(data_access_slb_common)
1488	GEN_COMMON data_access_slb
1489	ld	r4,_DAR(r1)
1490	addi	r3,r1,STACK_FRAME_OVERHEAD
1491BEGIN_MMU_FTR_SECTION
1492	/* HPT case, do SLB fault */
1493	bl	do_slb_fault
1494	cmpdi	r3,0
1495	bne-	1f
1496	b	fast_interrupt_return
14971:	/* Error case */
1498MMU_FTR_SECTION_ELSE
1499	/* Radix case, access is outside page table range */
1500	li	r3,-EFAULT
1501ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1502	std	r3,RESULT(r1)
 
1503	RECONCILE_IRQ_STATE(r10, r11)
1504	ld	r4,_DAR(r1)
1505	ld	r5,RESULT(r1)
1506	addi	r3,r1,STACK_FRAME_OVERHEAD
1507	bl	do_bad_slb_fault
1508	b	interrupt_return
1509
1510	GEN_KVM data_access_slb
1511
1512
1513/**
1514 * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
1515 * This is a synchronous interrupt in response to an MMU fault due to an
1516 * instruction fetch.
1517 *
1518 * Handling:
1519 * Similar to DSI, though in response to fetch. The faulting address is found
1520 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
1521 */
1522INT_DEFINE_BEGIN(instruction_access)
1523	IVEC=0x400
1524	IISIDE=1
1525	IDAR=1
1526	IDSISR=1
1527#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1528	IKVM_REAL=1
1529#endif
1530INT_DEFINE_END(instruction_access)
1531
1532EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1533	GEN_INT_ENTRY instruction_access, virt=0
1534EXC_REAL_END(instruction_access, 0x400, 0x80)
1535EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1536	GEN_INT_ENTRY instruction_access, virt=1
1537EXC_VIRT_END(instruction_access, 0x4400, 0x80)
 
1538EXC_COMMON_BEGIN(instruction_access_common)
1539	GEN_COMMON instruction_access
1540	ld	r4,_DAR(r1)
1541	ld	r5,_DSISR(r1)
1542BEGIN_MMU_FTR_SECTION
1543	ld      r6,_MSR(r1)
1544	li	r3,0x400
1545	b	do_hash_page		/* Try to handle as hpte fault */
1546MMU_FTR_SECTION_ELSE
1547	b	handle_page_fault
1548ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1549
1550	GEN_KVM instruction_access
1551
1552
1553/**
1554 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
1555 * This is a synchronous interrupt in response to an MMU fault due to an
1556 * instruction fetch.
1557 *
1558 * Handling:
1559 * Similar to DSLB, though in response to fetch. The faulting address is found
1560 * in SRR0 (rather than DAR).
1561 */
1562INT_DEFINE_BEGIN(instruction_access_slb)
1563	IVEC=0x480
1564	IAREA=PACA_EXSLB
1565	IRECONCILE=0
1566	IISIDE=1
1567	IDAR=1
1568#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1569	IKVM_REAL=1
1570#endif
1571INT_DEFINE_END(instruction_access_slb)
1572
1573EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
1574	GEN_INT_ENTRY instruction_access_slb, virt=0
1575EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
1576EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
1577	GEN_INT_ENTRY instruction_access_slb, virt=1
1578EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
 
1579EXC_COMMON_BEGIN(instruction_access_slb_common)
1580	GEN_COMMON instruction_access_slb
1581	ld	r4,_DAR(r1)
1582	addi	r3,r1,STACK_FRAME_OVERHEAD
1583BEGIN_MMU_FTR_SECTION
1584	/* HPT case, do SLB fault */
1585	bl	do_slb_fault
1586	cmpdi	r3,0
1587	bne-	1f
1588	b	fast_interrupt_return
15891:	/* Error case */
1590MMU_FTR_SECTION_ELSE
1591	/* Radix case, access is outside page table range */
1592	li	r3,-EFAULT
1593ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1594	std	r3,RESULT(r1)
 
1595	RECONCILE_IRQ_STATE(r10, r11)
1596	ld	r4,_DAR(r1)
1597	ld	r5,RESULT(r1)
1598	addi	r3,r1,STACK_FRAME_OVERHEAD
1599	bl	do_bad_slb_fault
1600	b	interrupt_return
1601
1602	GEN_KVM instruction_access_slb
1603
1604
1605/**
1606 * Interrupt 0x500 - External Interrupt.
1607 * This is an asynchronous maskable interrupt in response to an "external
1608 * exception" from the interrupt controller or hypervisor (e.g., device
1609 * interrupt). It is maskable in hardware by clearing MSR[EE], and
1610 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
1611 *
1612 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
1613 * interrupts are delivered with HSRR registers, guests use SRRs, which
1614 * reqiures IHSRR_IF_HVMODE.
1615 *
1616 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
1617 * external interrupts are delivered as Hypervisor Virtualization Interrupts
1618 * rather than External Interrupts.
1619 *
1620 * Handling:
1621 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
1622 * because registers at the time of the interrupt are not so important as it is
1623 * asynchronous.
1624 *
1625 * If soft masked, the masked handler will note the pending interrupt for
1626 * replay, and clear MSR[EE] in the interrupted context.
1627 */
1628INT_DEFINE_BEGIN(hardware_interrupt)
1629	IVEC=0x500
1630	IHSRR_IF_HVMODE=1
1631	IMASK=IRQS_DISABLED
1632	IKVM_REAL=1
1633	IKVM_VIRT=1
1634INT_DEFINE_END(hardware_interrupt)
1635
1636EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1637	GEN_INT_ENTRY hardware_interrupt, virt=0
1638EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1639EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1640	GEN_INT_ENTRY hardware_interrupt, virt=1
1641EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1642EXC_COMMON_BEGIN(hardware_interrupt_common)
1643	GEN_COMMON hardware_interrupt
1644	FINISH_NAP
1645	RUNLATCH_ON
1646	addi	r3,r1,STACK_FRAME_OVERHEAD
1647	bl	do_IRQ
1648	b	interrupt_return
1649
1650	GEN_KVM hardware_interrupt
1651
1652
1653/**
1654 * Interrupt 0x600 - Alignment Interrupt
1655 * This is a synchronous interrupt in response to data alignment fault.
1656 */
1657INT_DEFINE_BEGIN(alignment)
1658	IVEC=0x600
1659	IDAR=1
1660	IDSISR=1
1661#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1662	IKVM_REAL=1
1663#endif
1664INT_DEFINE_END(alignment)
1665
1666EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1667	GEN_INT_ENTRY alignment, virt=0
1668EXC_REAL_END(alignment, 0x600, 0x100)
1669EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1670	GEN_INT_ENTRY alignment, virt=1
1671EXC_VIRT_END(alignment, 0x4600, 0x100)
 
1672EXC_COMMON_BEGIN(alignment_common)
1673	GEN_COMMON alignment
 
1674	addi	r3,r1,STACK_FRAME_OVERHEAD
1675	bl	alignment_exception
1676	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1677	b	interrupt_return
1678
1679	GEN_KVM alignment
1680
1681
1682/**
1683 * Interrupt 0x700 - Program Interrupt (program check).
1684 * This is a synchronous interrupt in response to various instruction faults:
1685 * traps, privilege errors, TM errors, floating point exceptions.
1686 *
1687 * Handling:
1688 * This interrupt may use the "emergency stack" in some cases when being taken
1689 * from kernel context, which complicates handling.
1690 */
1691INT_DEFINE_BEGIN(program_check)
1692	IVEC=0x700
1693#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1694	IKVM_REAL=1
1695#endif
1696INT_DEFINE_END(program_check)
1697
1698EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1699	GEN_INT_ENTRY program_check, virt=0
1700EXC_REAL_END(program_check, 0x700, 0x100)
1701EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1702	GEN_INT_ENTRY program_check, virt=1
1703EXC_VIRT_END(program_check, 0x4700, 0x100)
 
1704EXC_COMMON_BEGIN(program_check_common)
1705	__GEN_COMMON_ENTRY program_check
1706
1707	/*
1708	 * It's possible to receive a TM Bad Thing type program check with
1709	 * userspace register values (in particular r1), but with SRR1 reporting
1710	 * that we came from the kernel. Normally that would confuse the bad
1711	 * stack logic, and we would report a bad kernel stack pointer. Instead
1712	 * we switch to the emergency stack if we're taking a TM Bad Thing from
1713	 * the kernel.
1714	 */
1715
1716	andi.	r10,r12,MSR_PR
1717	bne	2f			/* If userspace, go normal path */
1718
1719	andis.	r10,r12,(SRR1_PROGTM)@h
1720	bne	1f			/* If TM, emergency		*/
1721
1722	cmpdi	r1,-INT_FRAME_SIZE	/* check if r1 is in userspace	*/
1723	blt	2f			/* normal path if not		*/
1724
1725	/* Use the emergency stack					*/
17261:	andi.	r10,r12,MSR_PR		/* Set CR0 correctly for label	*/
1727					/* 3 in EXCEPTION_PROLOG_COMMON	*/
1728	mr	r10,r1			/* Save r1			*/
1729	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1730	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1731	__ISTACK(program_check)=0
1732	__GEN_COMMON_BODY program_check
1733	b 3f
17342:
1735	__ISTACK(program_check)=1
1736	__GEN_COMMON_BODY program_check
17373:
 
1738	addi	r3,r1,STACK_FRAME_OVERHEAD
1739	bl	program_check_exception
1740	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1741	b	interrupt_return
1742
1743	GEN_KVM program_check
1744
1745
1746/*
1747 * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
1748 * This is a synchronous interrupt in response to executing an fp instruction
1749 * with MSR[FP]=0.
1750 *
1751 * Handling:
1752 * This will load FP registers and enable the FP bit if coming from userspace,
1753 * otherwise report a bad kernel use of FP.
1754 */
1755INT_DEFINE_BEGIN(fp_unavailable)
1756	IVEC=0x800
1757	IRECONCILE=0
1758#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1759	IKVM_REAL=1
1760#endif
1761INT_DEFINE_END(fp_unavailable)
1762
1763EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1764	GEN_INT_ENTRY fp_unavailable, virt=0
1765EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1766EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1767	GEN_INT_ENTRY fp_unavailable, virt=1
1768EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
 
1769EXC_COMMON_BEGIN(fp_unavailable_common)
1770	GEN_COMMON fp_unavailable
1771	bne	1f			/* if from user, just load it up */
 
1772	RECONCILE_IRQ_STATE(r10, r11)
1773	addi	r3,r1,STACK_FRAME_OVERHEAD
1774	bl	kernel_fp_unavailable_exception
17750:	trap
1776	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
17771:
1778#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1779BEGIN_FTR_SECTION
1780	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1781	 * transaction), go do TM stuff
1782	 */
1783	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1784	bne-	2f
1785END_FTR_SECTION_IFSET(CPU_FTR_TM)
1786#endif
1787	bl	load_up_fpu
1788	b	fast_interrupt_return
1789#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17902:	/* User process was in a transaction */
 
1791	RECONCILE_IRQ_STATE(r10, r11)
1792	addi	r3,r1,STACK_FRAME_OVERHEAD
1793	bl	fp_unavailable_tm
1794	b	interrupt_return
1795#endif
1796
1797	GEN_KVM fp_unavailable
1798
1799
1800/**
1801 * Interrupt 0x900 - Decrementer Interrupt.
1802 * This is an asynchronous interrupt in response to a decrementer exception
1803 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
1804 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
1805 * local_irq_disable()).
1806 *
1807 * Handling:
1808 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
1809 *
1810 * If soft masked, the masked handler will note the pending interrupt for
1811 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
1812 * in the interrupted context.
1813 * If PPC_WATCHDOG is configured, the soft masked handler will actually set
1814 * things back up to run soft_nmi_interrupt as a regular interrupt handler
1815 * on the emergency stack.
1816 */
1817INT_DEFINE_BEGIN(decrementer)
1818	IVEC=0x900
1819	IMASK=IRQS_DISABLED
1820#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1821	IKVM_REAL=1
1822#endif
1823INT_DEFINE_END(decrementer)
1824
1825EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1826	GEN_INT_ENTRY decrementer, virt=0
1827EXC_REAL_END(decrementer, 0x900, 0x80)
1828EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1829	GEN_INT_ENTRY decrementer, virt=1
1830EXC_VIRT_END(decrementer, 0x4900, 0x80)
1831EXC_COMMON_BEGIN(decrementer_common)
1832	GEN_COMMON decrementer
1833	FINISH_NAP
1834	RUNLATCH_ON
1835	addi	r3,r1,STACK_FRAME_OVERHEAD
1836	bl	timer_interrupt
1837	b	interrupt_return
1838
1839	GEN_KVM decrementer
1840
1841
1842/**
1843 * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
1844 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
1845 * register.
1846 *
1847 * Handling:
1848 * Linux does not use this outside KVM where it's used to keep a host timer
1849 * while the guest is given control of DEC. It should normally be caught by
1850 * the KVM test and routed there.
1851 */
1852INT_DEFINE_BEGIN(hdecrementer)
1853	IVEC=0x980
1854	IHSRR=1
1855	ISTACK=0
1856	IRECONCILE=0
1857	IKVM_REAL=1
1858	IKVM_VIRT=1
1859INT_DEFINE_END(hdecrementer)
1860
1861EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1862	GEN_INT_ENTRY hdecrementer, virt=0
1863EXC_REAL_END(hdecrementer, 0x980, 0x80)
1864EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1865	GEN_INT_ENTRY hdecrementer, virt=1
1866EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1867EXC_COMMON_BEGIN(hdecrementer_common)
1868	__GEN_COMMON_ENTRY hdecrementer
1869	/*
1870	 * Hypervisor decrementer interrupts not caught by the KVM test
1871	 * shouldn't occur but are sometimes left pending on exit from a KVM
1872	 * guest.  We don't need to do anything to clear them, as they are
1873	 * edge-triggered.
1874	 *
1875	 * Be careful to avoid touching the kernel stack.
1876	 */
1877	ld	r10,PACA_EXGEN+EX_CTR(r13)
1878	mtctr	r10
1879	mtcrf	0x80,r9
1880	ld	r9,PACA_EXGEN+EX_R9(r13)
1881	ld	r10,PACA_EXGEN+EX_R10(r13)
1882	ld	r11,PACA_EXGEN+EX_R11(r13)
1883	ld	r12,PACA_EXGEN+EX_R12(r13)
1884	ld	r13,PACA_EXGEN+EX_R13(r13)
1885	HRFI_TO_KERNEL
1886
1887	GEN_KVM hdecrementer
1888
1889
1890/**
1891 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
1892 * This is an asynchronous interrupt in response to a msgsndp doorbell.
1893 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
1894 * IRQS_DISABLED mask (i.e., local_irq_disable()).
1895 *
1896 * Handling:
1897 * Guests may use this for IPIs between threads in a core if the
1898 * hypervisor supports it. NVGPRS are not saved (see 0x500).
1899 *
1900 * If soft masked, the masked handler will note the pending interrupt for
1901 * replay, leaving MSR[EE] enabled in the interrupted context because the
1902 * doorbells are edge triggered.
1903 */
1904INT_DEFINE_BEGIN(doorbell_super)
1905	IVEC=0xa00
1906	IMASK=IRQS_DISABLED
1907#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1908	IKVM_REAL=1
1909#endif
1910INT_DEFINE_END(doorbell_super)
1911
1912EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1913	GEN_INT_ENTRY doorbell_super, virt=0
1914EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1915EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1916	GEN_INT_ENTRY doorbell_super, virt=1
1917EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1918EXC_COMMON_BEGIN(doorbell_super_common)
1919	GEN_COMMON doorbell_super
1920	FINISH_NAP
1921	RUNLATCH_ON
1922	addi	r3,r1,STACK_FRAME_OVERHEAD
1923#ifdef CONFIG_PPC_DOORBELL
1924	bl	doorbell_exception
1925#else
1926	bl	unknown_exception
1927#endif
1928	b	interrupt_return
1929
1930	GEN_KVM doorbell_super
1931
1932
1933EXC_REAL_NONE(0xb00, 0x100)
1934EXC_VIRT_NONE(0x4b00, 0x100)
1935
1936/**
1937 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
1938 * This is a synchronous interrupt invoked with the "sc" instruction. The
1939 * system call is invoked with "sc 0" and does not alter the HV bit, so it
1940 * is directed to the currently running OS. The hypercall is invoked with
1941 * "sc 1" and it sets HV=1, so it elevates to hypervisor.
1942 *
1943 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1944 * 0x4c00 virtual mode.
1945 *
1946 * Handling:
1947 * If the KVM test fires then it was due to a hypercall and is accordingly
1948 * routed to KVM. Otherwise this executes a normal Linux system call.
1949 *
1950 * Call convention:
1951 *
1952 * syscall and hypercalls register conventions are documented in
1953 * Documentation/powerpc/syscall64-abi.rst and
1954 * Documentation/powerpc/papr_hcalls.rst respectively.
 
 
 
 
 
 
 
 
 
 
 
 
 
1955 *
1956 * The intersection of volatile registers that don't contain possible
1957 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1958 * without saving, though xer is not a good idea to use, as hardware may
1959 * interpret some bits so it may be costly to change them.
1960 */
1961INT_DEFINE_BEGIN(system_call)
1962	IVEC=0xc00
1963	IKVM_REAL=1
1964	IKVM_VIRT=1
1965INT_DEFINE_END(system_call)
1966
1967.macro SYSTEM_CALL virt
1968#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1969	/*
1970	 * There is a little bit of juggling to get syscall and hcall
1971	 * working well. Save r13 in ctr to avoid using SPRG scratch
1972	 * register.
1973	 *
1974	 * Userspace syscalls have already saved the PPR, hcalls must save
1975	 * it before setting HMT_MEDIUM.
1976	 */
1977	mtctr	r13
1978	GET_PACA(r13)
1979	std	r10,PACA_EXGEN+EX_R10(r13)
1980	INTERRUPT_TO_KERNEL
1981	KVMTEST system_call /* uses r10, branch to system_call_kvm */
1982	mfctr	r9
1983#else
1984	mr	r9,r13
1985	GET_PACA(r13)
1986	INTERRUPT_TO_KERNEL
1987#endif
1988
1989#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1990BEGIN_FTR_SECTION
1991	cmpdi	r0,0x1ebe
1992	beq-	1f
1993END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1994#endif
1995
1996	/* We reach here with PACA in r13, r13 in r9. */
1997	mfspr	r11,SPRN_SRR0
1998	mfspr	r12,SPRN_SRR1
1999
2000	HMT_MEDIUM
2001
2002	.if ! \virt
2003	__LOAD_HANDLER(r10, system_call_common)
2004	mtspr	SPRN_SRR0,r10
2005	ld	r10,PACAKMSR(r13)
2006	mtspr	SPRN_SRR1,r10
2007	RFI_TO_KERNEL
2008	b	.	/* prevent speculative execution */
2009	.else
2010	li	r10,MSR_RI
2011	mtmsrd 	r10,1			/* Set RI (EE=0) */
2012#ifdef CONFIG_RELOCATABLE
2013	__LOAD_HANDLER(r10, system_call_common)
2014	mtctr	r10
2015	bctr
2016#else
2017	b	system_call_common
2018#endif
2019	.endif
2020
2021#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
2022	/* Fast LE/BE switch system call */
20231:	mfspr	r12,SPRN_SRR1
2024	xori	r12,r12,MSR_LE
2025	mtspr	SPRN_SRR1,r12
2026	mr	r13,r9
2027	RFI_TO_USER	/* return to userspace */
2028	b	.	/* prevent speculative execution */
2029#endif
2030.endm
2031
2032EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
2033	SYSTEM_CALL 0
2034EXC_REAL_END(system_call, 0xc00, 0x100)
2035EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
2036	SYSTEM_CALL 1
2037EXC_VIRT_END(system_call, 0x4c00, 0x100)
2038
2039#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2040TRAMP_REAL_BEGIN(system_call_kvm)
2041	/*
2042	 * This is a hcall, so register convention is as above, with these
2043	 * differences:
2044	 * r13 = PACA
2045	 * ctr = orig r13
2046	 * orig r10 saved in PACA
2047	 */
 
2048	 /*
2049	  * Save the PPR (on systems that support it) before changing to
2050	  * HMT_MEDIUM. That allows the KVM code to save that value into the
2051	  * guest state (it is the guest's PPR value).
2052	  */
2053BEGIN_FTR_SECTION
2054	mfspr	r10,SPRN_PPR
2055	std	r10,HSTATE_PPR(r13)
2056END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2057	HMT_MEDIUM
 
2058	mfctr	r10
2059	SET_SCRATCH0(r10)
2060	mfcr	r10
2061	std	r12,HSTATE_SCRATCH0(r13)
2062	sldi	r12,r10,32
2063	ori	r12,r12,0xc00
2064#ifdef CONFIG_RELOCATABLE
2065	/*
2066	 * Requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
2067	 * outside the head section.
2068	 */
2069	__LOAD_FAR_HANDLER(r10, kvmppc_interrupt)
2070	mtctr   r10
2071	ld	r10,PACA_EXGEN+EX_R10(r13)
2072	bctr
2073#else
2074	ld	r10,PACA_EXGEN+EX_R10(r13)
2075	b       kvmppc_interrupt
2076#endif
2077#endif
2078
2079
2080/**
2081 * Interrupt 0xd00 - Trace Interrupt.
2082 * This is a synchronous interrupt in response to instruction step or
2083 * breakpoint faults.
2084 */
2085INT_DEFINE_BEGIN(single_step)
2086	IVEC=0xd00
2087#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2088	IKVM_REAL=1
2089#endif
2090INT_DEFINE_END(single_step)
2091
2092EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
2093	GEN_INT_ENTRY single_step, virt=0
2094EXC_REAL_END(single_step, 0xd00, 0x100)
2095EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
2096	GEN_INT_ENTRY single_step, virt=1
2097EXC_VIRT_END(single_step, 0x4d00, 0x100)
2098EXC_COMMON_BEGIN(single_step_common)
2099	GEN_COMMON single_step
2100	addi	r3,r1,STACK_FRAME_OVERHEAD
2101	bl	single_step_exception
2102	b	interrupt_return
2103
2104	GEN_KVM single_step
2105
2106
2107/**
2108 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
2109 * This is a synchronous interrupt in response to an MMU fault caused by a
2110 * guest data access.
2111 *
2112 * Handling:
2113 * This should always get routed to KVM. In radix MMU mode, this is caused
2114 * by a guest nested radix access that can't be performed due to the
2115 * partition scope page table. In hash mode, this can be caused by guests
2116 * running with translation disabled (virtual real mode) or with VPM enabled.
2117 * KVM will update the page table structures or disallow the access.
2118 */
2119INT_DEFINE_BEGIN(h_data_storage)
2120	IVEC=0xe00
2121	IHSRR=1
2122	IDAR=1
2123	IDSISR=1
2124	IKVM_SKIP=1
2125	IKVM_REAL=1
2126	IKVM_VIRT=1
2127INT_DEFINE_END(h_data_storage)
2128
2129EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
2130	GEN_INT_ENTRY h_data_storage, virt=0, ool=1
2131EXC_REAL_END(h_data_storage, 0xe00, 0x20)
2132EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
2133	GEN_INT_ENTRY h_data_storage, virt=1, ool=1
2134EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
 
2135EXC_COMMON_BEGIN(h_data_storage_common)
2136	GEN_COMMON h_data_storage
 
2137	addi    r3,r1,STACK_FRAME_OVERHEAD
2138BEGIN_MMU_FTR_SECTION
2139	ld	r4,_DAR(r1)
2140	li	r5,SIGSEGV
2141	bl      bad_page_fault
2142MMU_FTR_SECTION_ELSE
2143	bl      unknown_exception
2144ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
2145	b       interrupt_return
2146
2147	GEN_KVM h_data_storage
2148
2149
2150/**
2151 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
2152 * This is a synchronous interrupt in response to an MMU fault caused by a
2153 * guest instruction fetch, similar to HDSI.
2154 */
2155INT_DEFINE_BEGIN(h_instr_storage)
2156	IVEC=0xe20
2157	IHSRR=1
2158	IKVM_REAL=1
2159	IKVM_VIRT=1
2160INT_DEFINE_END(h_instr_storage)
2161
2162EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
2163	GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
2164EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
2165EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
2166	GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
2167EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
2168EXC_COMMON_BEGIN(h_instr_storage_common)
2169	GEN_COMMON h_instr_storage
2170	addi	r3,r1,STACK_FRAME_OVERHEAD
2171	bl	unknown_exception
2172	b	interrupt_return
2173
2174	GEN_KVM h_instr_storage
2175
2176
2177/**
2178 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
2179 */
2180INT_DEFINE_BEGIN(emulation_assist)
2181	IVEC=0xe40
2182	IHSRR=1
2183	IKVM_REAL=1
2184	IKVM_VIRT=1
2185INT_DEFINE_END(emulation_assist)
2186
2187EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
2188	GEN_INT_ENTRY emulation_assist, virt=0, ool=1
2189EXC_REAL_END(emulation_assist, 0xe40, 0x20)
2190EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
2191	GEN_INT_ENTRY emulation_assist, virt=1, ool=1
2192EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
2193EXC_COMMON_BEGIN(emulation_assist_common)
2194	GEN_COMMON emulation_assist
2195	addi	r3,r1,STACK_FRAME_OVERHEAD
2196	bl	emulation_assist_interrupt
2197	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2198	b	interrupt_return
2199
2200	GEN_KVM emulation_assist
2201
2202
2203/**
2204 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
2205 * This is an asynchronous interrupt caused by a Hypervisor Maintenance
2206 * Exception. It is always taken in real mode but uses HSRR registers
2207 * unlike SRESET and MCE.
2208 *
2209 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
2210 * with IRQS_DISABLED mask (i.e., local_irq_disable()).
2211 *
2212 * Handling:
2213 * This is a special case, this is handled similarly to machine checks, with an
2214 * initial real mode handler that is not soft-masked, which attempts to fix the
2215 * problem. Then a regular handler which is soft-maskable and reports the
2216 * problem.
2217 *
2218 * The emergency stack is used for the early real mode handler.
2219 *
2220 * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
2221 * either use soft-masking for the MCE, or use irq_work for the HMI.
2222 *
2223 * KVM:
2224 * Unlike MCE, this calls into KVM without calling the real mode handler
2225 * first.
2226 */
2227INT_DEFINE_BEGIN(hmi_exception_early)
2228	IVEC=0xe60
2229	IHSRR=1
2230	IREALMODE_COMMON=1
2231	ISTACK=0
2232	IRECONCILE=0
2233	IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
2234	IKVM_REAL=1
2235INT_DEFINE_END(hmi_exception_early)
2236
2237INT_DEFINE_BEGIN(hmi_exception)
2238	IVEC=0xe60
2239	IHSRR=1
2240	IMASK=IRQS_DISABLED
2241	IKVM_REAL=1
2242INT_DEFINE_END(hmi_exception)
2243
 
 
 
 
 
2244EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
2245	GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
2246EXC_REAL_END(hmi_exception, 0xe60, 0x20)
2247EXC_VIRT_NONE(0x4e60, 0x20)
2248
2249EXC_COMMON_BEGIN(hmi_exception_early_common)
2250	__GEN_REALMODE_COMMON_ENTRY hmi_exception_early
2251
 
2252	mr	r10,r1			/* Save r1 */
2253	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack for realmode */
2254	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
2255
2256	__GEN_COMMON_BODY hmi_exception_early
 
2257
2258	addi	r3,r1,STACK_FRAME_OVERHEAD
2259	bl	hmi_exception_realmode
2260	cmpdi	cr0,r3,0
2261	bne	1f
2262
2263	EXCEPTION_RESTORE_REGS hsrr=1
2264	HRFI_TO_USER_OR_KERNEL
2265
22661:
2267	/*
2268	 * Go to virtual mode and pull the HMI event information from
2269	 * firmware.
2270	 */
2271	EXCEPTION_RESTORE_REGS hsrr=1
2272	GEN_INT_ENTRY hmi_exception, virt=0
2273
2274	GEN_KVM hmi_exception_early
2275
2276EXC_COMMON_BEGIN(hmi_exception_common)
2277	GEN_COMMON hmi_exception
2278	FINISH_NAP
2279	RUNLATCH_ON
 
2280	addi	r3,r1,STACK_FRAME_OVERHEAD
2281	bl	handle_hmi_exception
2282	b	interrupt_return
2283
2284	GEN_KVM hmi_exception
2285
2286
2287/**
2288 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
2289 * This is an asynchronous interrupt in response to a msgsnd doorbell.
2290 * Similar to the 0xa00 doorbell but for host rather than guest.
2291 */
2292INT_DEFINE_BEGIN(h_doorbell)
2293	IVEC=0xe80
2294	IHSRR=1
2295	IMASK=IRQS_DISABLED
2296	IKVM_REAL=1
2297	IKVM_VIRT=1
2298INT_DEFINE_END(h_doorbell)
2299
2300EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
2301	GEN_INT_ENTRY h_doorbell, virt=0, ool=1
2302EXC_REAL_END(h_doorbell, 0xe80, 0x20)
2303EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
2304	GEN_INT_ENTRY h_doorbell, virt=1, ool=1
2305EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
2306EXC_COMMON_BEGIN(h_doorbell_common)
2307	GEN_COMMON h_doorbell
2308	FINISH_NAP
2309	RUNLATCH_ON
2310	addi	r3,r1,STACK_FRAME_OVERHEAD
2311#ifdef CONFIG_PPC_DOORBELL
2312	bl	doorbell_exception
2313#else
2314	bl	unknown_exception
2315#endif
2316	b	interrupt_return
2317
2318	GEN_KVM h_doorbell
2319
2320
2321/**
2322 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
2323 * This is an asynchronous interrupt in response to an "external exception".
2324 * Similar to 0x500 but for host only.
2325 */
2326INT_DEFINE_BEGIN(h_virt_irq)
2327	IVEC=0xea0
2328	IHSRR=1
2329	IMASK=IRQS_DISABLED
2330	IKVM_REAL=1
2331	IKVM_VIRT=1
2332INT_DEFINE_END(h_virt_irq)
2333
2334EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
2335	GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
2336EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
2337EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
2338	GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
2339EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
2340EXC_COMMON_BEGIN(h_virt_irq_common)
2341	GEN_COMMON h_virt_irq
2342	FINISH_NAP
2343	RUNLATCH_ON
2344	addi	r3,r1,STACK_FRAME_OVERHEAD
2345	bl	do_IRQ
2346	b	interrupt_return
2347
2348	GEN_KVM h_virt_irq
2349
2350
2351EXC_REAL_NONE(0xec0, 0x20)
2352EXC_VIRT_NONE(0x4ec0, 0x20)
2353EXC_REAL_NONE(0xee0, 0x20)
2354EXC_VIRT_NONE(0x4ee0, 0x20)
2355
2356
2357/*
2358 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
2359 * This is an asynchronous interrupt in response to a PMU exception.
2360 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
2361 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
2362 *
2363 * Handling:
2364 * This calls into the perf subsystem.
2365 *
2366 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
2367 * runs under local_irq_disable. However it may be soft-masked in
2368 * powerpc-specific code.
2369 *
2370 * If soft masked, the masked handler will note the pending interrupt for
2371 * replay, and clear MSR[EE] in the interrupted context.
2372 */
2373INT_DEFINE_BEGIN(performance_monitor)
2374	IVEC=0xf00
2375	IMASK=IRQS_PMI_DISABLED
2376#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2377	IKVM_REAL=1
2378#endif
2379INT_DEFINE_END(performance_monitor)
2380
2381EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
2382	GEN_INT_ENTRY performance_monitor, virt=0, ool=1
2383EXC_REAL_END(performance_monitor, 0xf00, 0x20)
2384EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
2385	GEN_INT_ENTRY performance_monitor, virt=1, ool=1
2386EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
2387EXC_COMMON_BEGIN(performance_monitor_common)
2388	GEN_COMMON performance_monitor
2389	FINISH_NAP
2390	RUNLATCH_ON
2391	addi	r3,r1,STACK_FRAME_OVERHEAD
2392	bl	performance_monitor_exception
2393	b	interrupt_return
2394
2395	GEN_KVM performance_monitor
2396
2397
2398/**
2399 * Interrupt 0xf20 - Vector Unavailable Interrupt.
2400 * This is a synchronous interrupt in response to
2401 * executing a vector (or altivec) instruction with MSR[VEC]=0.
2402 * Similar to FP unavailable.
2403 */
2404INT_DEFINE_BEGIN(altivec_unavailable)
2405	IVEC=0xf20
2406	IRECONCILE=0
2407#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2408	IKVM_REAL=1
2409#endif
2410INT_DEFINE_END(altivec_unavailable)
2411
2412EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
2413	GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
2414EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
2415EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
2416	GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
2417EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
 
2418EXC_COMMON_BEGIN(altivec_unavailable_common)
2419	GEN_COMMON altivec_unavailable
2420#ifdef CONFIG_ALTIVEC
2421BEGIN_FTR_SECTION
2422	beq	1f
2423#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2424  BEGIN_FTR_SECTION_NESTED(69)
2425	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
2426	 * transaction), go do TM stuff
2427	 */
2428	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
2429	bne-	2f
2430  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2431#endif
2432	bl	load_up_altivec
2433	b	fast_interrupt_return
2434#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
24352:	/* User process was in a transaction */
 
2436	RECONCILE_IRQ_STATE(r10, r11)
2437	addi	r3,r1,STACK_FRAME_OVERHEAD
2438	bl	altivec_unavailable_tm
2439	b	interrupt_return
2440#endif
24411:
2442END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2443#endif
 
2444	RECONCILE_IRQ_STATE(r10, r11)
2445	addi	r3,r1,STACK_FRAME_OVERHEAD
2446	bl	altivec_unavailable_exception
2447	b	interrupt_return
2448
2449	GEN_KVM altivec_unavailable
2450
2451
2452/**
2453 * Interrupt 0xf40 - VSX Unavailable Interrupt.
2454 * This is a synchronous interrupt in response to
2455 * executing a VSX instruction with MSR[VSX]=0.
2456 * Similar to FP unavailable.
2457 */
2458INT_DEFINE_BEGIN(vsx_unavailable)
2459	IVEC=0xf40
2460	IRECONCILE=0
2461#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2462	IKVM_REAL=1
2463#endif
2464INT_DEFINE_END(vsx_unavailable)
2465
2466EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
2467	GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
2468EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
2469EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
2470	GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
2471EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
 
2472EXC_COMMON_BEGIN(vsx_unavailable_common)
2473	GEN_COMMON vsx_unavailable
2474#ifdef CONFIG_VSX
2475BEGIN_FTR_SECTION
2476	beq	1f
2477#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2478  BEGIN_FTR_SECTION_NESTED(69)
2479	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
2480	 * transaction), go do TM stuff
2481	 */
2482	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
2483	bne-	2f
2484  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2485#endif
2486	b	load_up_vsx
2487#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
24882:	/* User process was in a transaction */
 
2489	RECONCILE_IRQ_STATE(r10, r11)
2490	addi	r3,r1,STACK_FRAME_OVERHEAD
2491	bl	vsx_unavailable_tm
2492	b	interrupt_return
2493#endif
24941:
2495END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2496#endif
 
2497	RECONCILE_IRQ_STATE(r10, r11)
2498	addi	r3,r1,STACK_FRAME_OVERHEAD
2499	bl	vsx_unavailable_exception
2500	b	interrupt_return
2501
2502	GEN_KVM vsx_unavailable
2503
2504
2505/**
2506 * Interrupt 0xf60 - Facility Unavailable Interrupt.
2507 * This is a synchronous interrupt in response to
2508 * executing an instruction without access to the facility that can be
2509 * resolved by the OS (e.g., FSCR, MSR).
2510 * Similar to FP unavailable.
2511 */
2512INT_DEFINE_BEGIN(facility_unavailable)
2513	IVEC=0xf60
2514#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2515	IKVM_REAL=1
2516#endif
2517INT_DEFINE_END(facility_unavailable)
2518
2519EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
2520	GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
2521EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
2522EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
2523	GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
2524EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
2525EXC_COMMON_BEGIN(facility_unavailable_common)
2526	GEN_COMMON facility_unavailable
2527	addi	r3,r1,STACK_FRAME_OVERHEAD
2528	bl	facility_unavailable_exception
2529	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2530	b	interrupt_return
2531
2532	GEN_KVM facility_unavailable
2533
2534
2535/**
2536 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
2537 * This is a synchronous interrupt in response to
2538 * executing an instruction without access to the facility that can only
2539 * be resolved in HV mode (e.g., HFSCR).
2540 * Similar to FP unavailable.
2541 */
2542INT_DEFINE_BEGIN(h_facility_unavailable)
2543	IVEC=0xf80
2544	IHSRR=1
2545	IKVM_REAL=1
2546	IKVM_VIRT=1
2547INT_DEFINE_END(h_facility_unavailable)
2548
2549EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
2550	GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
2551EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
2552EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
2553	GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
2554EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
2555EXC_COMMON_BEGIN(h_facility_unavailable_common)
2556	GEN_COMMON h_facility_unavailable
2557	addi	r3,r1,STACK_FRAME_OVERHEAD
2558	bl	facility_unavailable_exception
2559	REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
2560	b	interrupt_return
2561
2562	GEN_KVM h_facility_unavailable
2563
2564
2565EXC_REAL_NONE(0xfa0, 0x20)
2566EXC_VIRT_NONE(0x4fa0, 0x20)
2567EXC_REAL_NONE(0xfc0, 0x20)
2568EXC_VIRT_NONE(0x4fc0, 0x20)
2569EXC_REAL_NONE(0xfe0, 0x20)
2570EXC_VIRT_NONE(0x4fe0, 0x20)
2571
2572EXC_REAL_NONE(0x1000, 0x100)
2573EXC_VIRT_NONE(0x5000, 0x100)
2574EXC_REAL_NONE(0x1100, 0x100)
2575EXC_VIRT_NONE(0x5100, 0x100)
2576
2577#ifdef CONFIG_CBE_RAS
2578INT_DEFINE_BEGIN(cbe_system_error)
2579	IVEC=0x1200
2580	IHSRR=1
2581	IKVM_SKIP=1
2582	IKVM_REAL=1
2583INT_DEFINE_END(cbe_system_error)
2584
2585EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
2586	GEN_INT_ENTRY cbe_system_error, virt=0
2587EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
2588EXC_VIRT_NONE(0x5200, 0x100)
2589EXC_COMMON_BEGIN(cbe_system_error_common)
2590	GEN_COMMON cbe_system_error
2591	addi	r3,r1,STACK_FRAME_OVERHEAD
2592	bl	cbe_system_error_exception
2593	b	interrupt_return
2594
2595	GEN_KVM cbe_system_error
2596
2597#else /* CONFIG_CBE_RAS */
2598EXC_REAL_NONE(0x1200, 0x100)
2599EXC_VIRT_NONE(0x5200, 0x100)
2600#endif
2601
2602
2603INT_DEFINE_BEGIN(instruction_breakpoint)
2604	IVEC=0x1300
2605#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2606	IKVM_SKIP=1
2607	IKVM_REAL=1
2608#endif
2609INT_DEFINE_END(instruction_breakpoint)
2610
2611EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
2612	GEN_INT_ENTRY instruction_breakpoint, virt=0
2613EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
2614EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
2615	GEN_INT_ENTRY instruction_breakpoint, virt=1
2616EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
2617EXC_COMMON_BEGIN(instruction_breakpoint_common)
2618	GEN_COMMON instruction_breakpoint
2619	addi	r3,r1,STACK_FRAME_OVERHEAD
2620	bl	instruction_breakpoint_exception
2621	b	interrupt_return
2622
2623	GEN_KVM instruction_breakpoint
2624
2625
2626EXC_REAL_NONE(0x1400, 0x100)
2627EXC_VIRT_NONE(0x5400, 0x100)
2628
2629/**
2630 * Interrupt 0x1500 - Soft Patch Interrupt
2631 *
2632 * Handling:
2633 * This is an implementation specific interrupt which can be used for a
2634 * range of exceptions.
2635 *
2636 * This interrupt handler is unique in that it runs the denormal assist
2637 * code even for guests (and even in guest context) without going to KVM,
2638 * for speed. POWER9 does not raise denorm exceptions, so this special case
2639 * could be phased out in future to reduce special cases.
2640 */
2641INT_DEFINE_BEGIN(denorm_exception)
2642	IVEC=0x1500
2643	IHSRR=1
2644	IBRANCH_TO_COMMON=0
2645	IKVM_REAL=1
2646INT_DEFINE_END(denorm_exception)
2647
2648EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
2649	GEN_INT_ENTRY denorm_exception, virt=0
2650#ifdef CONFIG_PPC_DENORMALISATION
2651	andis.	r10,r12,(HSRR1_DENORM)@h /* denorm? */
 
2652	bne+	denorm_assist
2653#endif
2654	GEN_BRANCH_TO_COMMON denorm_exception, virt=0
2655EXC_REAL_END(denorm_exception, 0x1500, 0x100)
 
 
2656#ifdef CONFIG_PPC_DENORMALISATION
2657EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
2658	GEN_INT_ENTRY denorm_exception, virt=1
2659	andis.	r10,r12,(HSRR1_DENORM)@h /* denorm? */
 
2660	bne+	denorm_assist
2661	GEN_BRANCH_TO_COMMON denorm_exception, virt=1
2662EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
2663#else
2664EXC_VIRT_NONE(0x5500, 0x100)
2665#endif
2666
 
 
2667#ifdef CONFIG_PPC_DENORMALISATION
2668TRAMP_REAL_BEGIN(denorm_assist)
2669BEGIN_FTR_SECTION
2670/*
2671 * To denormalise we need to move a copy of the register to itself.
2672 * For POWER6 do that here for all FP regs.
2673 */
2674	mfmsr	r10
2675	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2676	xori	r10,r10,(MSR_FE0|MSR_FE1)
2677	mtmsrd	r10
2678	sync
2679
2680	.Lreg=0
2681	.rept 32
2682	fmr	.Lreg,.Lreg
2683	.Lreg=.Lreg+1
2684	.endr
2685
2686FTR_SECTION_ELSE
2687/*
2688 * To denormalise we need to move a copy of the register to itself.
2689 * For POWER7 do that here for the first 32 VSX registers only.
2690 */
2691	mfmsr	r10
2692	oris	r10,r10,MSR_VSX@h
2693	mtmsrd	r10
2694	sync
2695
2696	.Lreg=0
2697	.rept 32
2698	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2699	.Lreg=.Lreg+1
2700	.endr
2701
2702ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
2703
2704BEGIN_FTR_SECTION
2705	b	denorm_done
2706END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2707/*
2708 * To denormalise we need to move a copy of the register to itself.
2709 * For POWER8 we need to do that for all 64 VSX registers
2710 */
2711	.Lreg=32
2712	.rept 32
2713	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2714	.Lreg=.Lreg+1
2715	.endr
2716
2717denorm_done:
2718	mfspr	r11,SPRN_HSRR0
2719	subi	r11,r11,4
2720	mtspr	SPRN_HSRR0,r11
2721	mtcrf	0x80,r9
2722	ld	r9,PACA_EXGEN+EX_R9(r13)
2723BEGIN_FTR_SECTION
2724	ld	r10,PACA_EXGEN+EX_PPR(r13)
2725	mtspr	SPRN_PPR,r10
2726END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2727BEGIN_FTR_SECTION
2728	ld	r10,PACA_EXGEN+EX_CFAR(r13)
2729	mtspr	SPRN_CFAR,r10
2730END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2731	ld	r10,PACA_EXGEN+EX_R10(r13)
2732	ld	r11,PACA_EXGEN+EX_R11(r13)
2733	ld	r12,PACA_EXGEN+EX_R12(r13)
2734	ld	r13,PACA_EXGEN+EX_R13(r13)
2735	HRFI_TO_UNKNOWN
2736	b	.
2737#endif
2738
2739EXC_COMMON_BEGIN(denorm_exception_common)
2740	GEN_COMMON denorm_exception
2741	addi	r3,r1,STACK_FRAME_OVERHEAD
2742	bl	unknown_exception
2743	b	interrupt_return
2744
2745	GEN_KVM denorm_exception
2746
2747
2748#ifdef CONFIG_CBE_RAS
2749INT_DEFINE_BEGIN(cbe_maintenance)
2750	IVEC=0x1600
2751	IHSRR=1
2752	IKVM_SKIP=1
2753	IKVM_REAL=1
2754INT_DEFINE_END(cbe_maintenance)
2755
2756EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
2757	GEN_INT_ENTRY cbe_maintenance, virt=0
2758EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
2759EXC_VIRT_NONE(0x5600, 0x100)
2760EXC_COMMON_BEGIN(cbe_maintenance_common)
2761	GEN_COMMON cbe_maintenance
2762	addi	r3,r1,STACK_FRAME_OVERHEAD
2763	bl	cbe_maintenance_exception
2764	b	interrupt_return
2765
2766	GEN_KVM cbe_maintenance
2767
2768#else /* CONFIG_CBE_RAS */
2769EXC_REAL_NONE(0x1600, 0x100)
2770EXC_VIRT_NONE(0x5600, 0x100)
2771#endif
2772
2773
2774INT_DEFINE_BEGIN(altivec_assist)
2775	IVEC=0x1700
2776#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2777	IKVM_REAL=1
2778#endif
2779INT_DEFINE_END(altivec_assist)
2780
2781EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
2782	GEN_INT_ENTRY altivec_assist, virt=0
2783EXC_REAL_END(altivec_assist, 0x1700, 0x100)
2784EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
2785	GEN_INT_ENTRY altivec_assist, virt=1
2786EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
2787EXC_COMMON_BEGIN(altivec_assist_common)
2788	GEN_COMMON altivec_assist
2789	addi	r3,r1,STACK_FRAME_OVERHEAD
2790#ifdef CONFIG_ALTIVEC
2791	bl	altivec_assist_exception
2792	REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2793#else
2794	bl	unknown_exception
2795#endif
2796	b	interrupt_return
2797
2798	GEN_KVM altivec_assist
2799
2800
2801#ifdef CONFIG_CBE_RAS
2802INT_DEFINE_BEGIN(cbe_thermal)
2803	IVEC=0x1800
2804	IHSRR=1
2805	IKVM_SKIP=1
2806	IKVM_REAL=1
2807INT_DEFINE_END(cbe_thermal)
2808
2809EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
2810	GEN_INT_ENTRY cbe_thermal, virt=0
2811EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
2812EXC_VIRT_NONE(0x5800, 0x100)
2813EXC_COMMON_BEGIN(cbe_thermal_common)
2814	GEN_COMMON cbe_thermal
2815	addi	r3,r1,STACK_FRAME_OVERHEAD
2816	bl	cbe_thermal_exception
2817	b	interrupt_return
2818
2819	GEN_KVM cbe_thermal
2820
2821#else /* CONFIG_CBE_RAS */
2822EXC_REAL_NONE(0x1800, 0x100)
2823EXC_VIRT_NONE(0x5800, 0x100)
2824#endif
2825
2826
2827#ifdef CONFIG_PPC_WATCHDOG
2828
2829INT_DEFINE_BEGIN(soft_nmi)
2830	IVEC=0x900
2831	ISTACK=0
2832	IRECONCILE=0	/* Soft-NMI may fire under local_irq_disable */
2833INT_DEFINE_END(soft_nmi)
 
 
 
2834
2835/*
2836 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2837 * stack is one that is usable by maskable interrupts so long as MSR_EE
2838 * remains off. It is used for recovery when something has corrupted the
2839 * normal kernel stack, for example. The "soft NMI" must not use the process
2840 * stack because we want irq disabled sections to avoid touching the stack
2841 * at all (other than PMU interrupts), so use the emergency stack for this,
2842 * and run it entirely with interrupts hard disabled.
2843 */
2844EXC_COMMON_BEGIN(soft_nmi_common)
2845	mfspr	r11,SPRN_SRR0
2846	mr	r10,r1
2847	ld	r1,PACAEMERGSP(r13)
2848	subi	r1,r1,INT_FRAME_SIZE
2849	__GEN_COMMON_BODY soft_nmi
2850
2851	/*
2852	 * Set IRQS_ALL_DISABLED and save PACAIRQHAPPENED (see
2853	 * system_reset_common)
2854	 */
2855	li	r10,IRQS_ALL_DISABLED
2856	stb	r10,PACAIRQSOFTMASK(r13)
2857	lbz	r10,PACAIRQHAPPENED(r13)
2858	std	r10,RESULT(r1)
2859	ori	r10,r10,PACA_IRQ_HARD_DIS
2860	stb	r10,PACAIRQHAPPENED(r13)
2861
2862	addi	r3,r1,STACK_FRAME_OVERHEAD
2863	bl	soft_nmi_interrupt
 
2864
2865	/* Clear MSR_RI before setting SRR0 and SRR1. */
2866	li	r9,0
2867	mtmsrd	r9,1
2868
2869	/*
2870	 * Restore soft mask settings.
2871	 */
2872	ld	r10,RESULT(r1)
2873	stb	r10,PACAIRQHAPPENED(r13)
2874	ld	r10,SOFTE(r1)
2875	stb	r10,PACAIRQSOFTMASK(r13)
2876
2877	kuap_restore_amr r9, r10
2878	EXCEPTION_RESTORE_REGS hsrr=0
2879	RFI_TO_KERNEL
2880
2881#endif /* CONFIG_PPC_WATCHDOG */
2882
2883/*
2884 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2885 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2886 * - If it was a doorbell we return immediately since doorbells are edge
2887 *   triggered and won't automatically refire.
2888 * - If it was a HMI we return immediately since we handled it in realmode
2889 *   and it won't refire.
2890 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
2891 * This is called with r10 containing the value to OR to the paca field.
2892 */
2893.macro MASKED_INTERRUPT hsrr=0
2894	.if \hsrr
2895masked_Hinterrupt:
2896	.else
2897masked_interrupt:
2898	.endif
 
2899	lbz	r11,PACAIRQHAPPENED(r13)
2900	or	r11,r11,r10
2901	stb	r11,PACAIRQHAPPENED(r13)
2902	cmpwi	r10,PACA_IRQ_DEC
2903	bne	1f
2904	lis	r10,0x7fff
2905	ori	r10,r10,0xffff
2906	mtspr	SPRN_DEC,r10
2907#ifdef CONFIG_PPC_WATCHDOG
2908	b	soft_nmi_common
2909#else
2910	b	2f
2911#endif
29121:	andi.	r10,r10,PACA_IRQ_MUST_HARD_MASK
2913	beq	2f
2914	xori	r12,r12,MSR_EE	/* clear MSR_EE */
2915	.if \hsrr
2916	mtspr	SPRN_HSRR1,r12
 
 
2917	.else
2918	mtspr	SPRN_SRR1,r12
 
 
2919	.endif
2920	ori	r11,r11,PACA_IRQ_HARD_DIS
2921	stb	r11,PACAIRQHAPPENED(r13)
29222:	/* done */
2923	ld	r10,PACA_EXGEN+EX_CTR(r13)
2924	mtctr	r10
2925	mtcrf	0x80,r9
2926	std	r1,PACAR1(r13)
2927	ld	r9,PACA_EXGEN+EX_R9(r13)
2928	ld	r10,PACA_EXGEN+EX_R10(r13)
2929	ld	r11,PACA_EXGEN+EX_R11(r13)
2930	ld	r12,PACA_EXGEN+EX_R12(r13)
2931	ld	r13,PACA_EXGEN+EX_R13(r13)
2932	/* May return to masked low address where r13 is not set up */
2933	.if \hsrr
2934	HRFI_TO_KERNEL
2935	.else
2936	RFI_TO_KERNEL
2937	.endif
2938	b	.
 
2939.endm
2940
2941TRAMP_REAL_BEGIN(stf_barrier_fallback)
2942	std	r9,PACA_EXRFI+EX_R9(r13)
2943	std	r10,PACA_EXRFI+EX_R10(r13)
2944	sync
2945	ld	r9,PACA_EXRFI+EX_R9(r13)
2946	ld	r10,PACA_EXRFI+EX_R10(r13)
2947	ori	31,31,0
2948	.rept 14
2949	b	1f
29501:
2951	.endr
2952	blr
2953
2954TRAMP_REAL_BEGIN(rfi_flush_fallback)
2955	SET_SCRATCH0(r13);
2956	GET_PACA(r13);
2957	std	r1,PACA_EXRFI+EX_R12(r13)
2958	ld	r1,PACAKSAVE(r13)
2959	std	r9,PACA_EXRFI+EX_R9(r13)
2960	std	r10,PACA_EXRFI+EX_R10(r13)
2961	std	r11,PACA_EXRFI+EX_R11(r13)
2962	mfctr	r9
2963	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2964	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2965	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2966	mtctr	r11
2967	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2968
2969	/* order ld/st prior to dcbt stop all streams with flushing */
2970	sync
2971
2972	/*
2973	 * The load adresses are at staggered offsets within cachelines,
2974	 * which suits some pipelines better (on others it should not
2975	 * hurt).
2976	 */
29771:
2978	ld	r11,(0x80 + 8)*0(r10)
2979	ld	r11,(0x80 + 8)*1(r10)
2980	ld	r11,(0x80 + 8)*2(r10)
2981	ld	r11,(0x80 + 8)*3(r10)
2982	ld	r11,(0x80 + 8)*4(r10)
2983	ld	r11,(0x80 + 8)*5(r10)
2984	ld	r11,(0x80 + 8)*6(r10)
2985	ld	r11,(0x80 + 8)*7(r10)
2986	addi	r10,r10,0x80*8
2987	bdnz	1b
2988
2989	mtctr	r9
2990	ld	r9,PACA_EXRFI+EX_R9(r13)
2991	ld	r10,PACA_EXRFI+EX_R10(r13)
2992	ld	r11,PACA_EXRFI+EX_R11(r13)
2993	ld	r1,PACA_EXRFI+EX_R12(r13)
2994	GET_SCRATCH0(r13);
2995	rfid
2996
2997TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2998	SET_SCRATCH0(r13);
2999	GET_PACA(r13);
3000	std	r1,PACA_EXRFI+EX_R12(r13)
3001	ld	r1,PACAKSAVE(r13)
3002	std	r9,PACA_EXRFI+EX_R9(r13)
3003	std	r10,PACA_EXRFI+EX_R10(r13)
3004	std	r11,PACA_EXRFI+EX_R11(r13)
3005	mfctr	r9
3006	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
3007	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
3008	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
3009	mtctr	r11
3010	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
3011
3012	/* order ld/st prior to dcbt stop all streams with flushing */
3013	sync
3014
3015	/*
3016	 * The load adresses are at staggered offsets within cachelines,
3017	 * which suits some pipelines better (on others it should not
3018	 * hurt).
3019	 */
30201:
3021	ld	r11,(0x80 + 8)*0(r10)
3022	ld	r11,(0x80 + 8)*1(r10)
3023	ld	r11,(0x80 + 8)*2(r10)
3024	ld	r11,(0x80 + 8)*3(r10)
3025	ld	r11,(0x80 + 8)*4(r10)
3026	ld	r11,(0x80 + 8)*5(r10)
3027	ld	r11,(0x80 + 8)*6(r10)
3028	ld	r11,(0x80 + 8)*7(r10)
3029	addi	r10,r10,0x80*8
3030	bdnz	1b
3031
3032	mtctr	r9
3033	ld	r9,PACA_EXRFI+EX_R9(r13)
3034	ld	r10,PACA_EXRFI+EX_R10(r13)
3035	ld	r11,PACA_EXRFI+EX_R11(r13)
3036	ld	r1,PACA_EXRFI+EX_R12(r13)
3037	GET_SCRATCH0(r13);
3038	hrfid
3039
3040TRAMP_REAL_BEGIN(rfscv_flush_fallback)
3041	/* system call volatile */
3042	mr	r7,r13
3043	GET_PACA(r13);
3044	mr	r8,r1
3045	ld	r1,PACAKSAVE(r13)
3046	mfctr	r9
3047	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
3048	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
3049	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
3050	mtctr	r11
3051	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
3052
3053	/* order ld/st prior to dcbt stop all streams with flushing */
3054	sync
3055
3056	/*
3057	 * The load adresses are at staggered offsets within cachelines,
3058	 * which suits some pipelines better (on others it should not
3059	 * hurt).
3060	 */
30611:
3062	ld	r11,(0x80 + 8)*0(r10)
3063	ld	r11,(0x80 + 8)*1(r10)
3064	ld	r11,(0x80 + 8)*2(r10)
3065	ld	r11,(0x80 + 8)*3(r10)
3066	ld	r11,(0x80 + 8)*4(r10)
3067	ld	r11,(0x80 + 8)*5(r10)
3068	ld	r11,(0x80 + 8)*6(r10)
3069	ld	r11,(0x80 + 8)*7(r10)
3070	addi	r10,r10,0x80*8
3071	bdnz	1b
3072
3073	mtctr	r9
3074	li	r9,0
3075	li	r10,0
3076	li	r11,0
3077	mr	r1,r8
3078	mr	r13,r7
3079	RFSCV
3080
3081USE_TEXT_SECTION()
3082	MASKED_INTERRUPT
3083	MASKED_INTERRUPT hsrr=1
3084
3085#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
3086kvmppc_skip_interrupt:
3087	/*
3088	 * Here all GPRs are unchanged from when the interrupt happened
3089	 * except for r13, which is saved in SPRG_SCRATCH0.
3090	 */
3091	mfspr	r13, SPRN_SRR0
3092	addi	r13, r13, 4
3093	mtspr	SPRN_SRR0, r13
3094	GET_SCRATCH0(r13)
3095	RFI_TO_KERNEL
3096	b	.
3097
3098kvmppc_skip_Hinterrupt:
3099	/*
3100	 * Here all GPRs are unchanged from when the interrupt happened
3101	 * except for r13, which is saved in SPRG_SCRATCH0.
3102	 */
3103	mfspr	r13, SPRN_HSRR0
3104	addi	r13, r13, 4
3105	mtspr	SPRN_HSRR0, r13
3106	GET_SCRATCH0(r13)
3107	HRFI_TO_KERNEL
3108	b	.
3109#endif
3110
 
 
 
 
 
 
 
 
 
 
3111	/*
3112	 * Relocation-on interrupts: A subset of the interrupts can be delivered
3113	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
3114	 * it.  Addresses are the same as the original interrupt addresses, but
3115	 * offset by 0xc000000000004000.
3116	 * It's impossible to receive interrupts below 0x300 via this mechanism.
3117	 * KVM: None of these traps are from the guest ; anything that escalated
3118	 * to HV=1 from HV=0 is delivered via real mode handlers.
3119	 */
3120
3121	/*
3122	 * This uses the standard macro, since the original 0x300 vector
3123	 * only has extra guff for STAB-based processors -- which never
3124	 * come here.
3125	 */
3126
3127EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
3128	b	__ppc64_runlatch_on
3129
3130USE_FIXED_SECTION(virt_trampolines)
3131	/*
3132	 * All code below __end_interrupts is treated as soft-masked. If
3133	 * any code runs here with MSR[EE]=1, it must then cope with pending
3134	 * soft interrupt being raised (i.e., by ensuring it is replayed).
3135	 *
3136	 * The __end_interrupts marker must be past the out-of-line (OOL)
3137	 * handlers, so that they are copied to real address 0x100 when running
3138	 * a relocatable kernel. This ensures they can be reached from the short
3139	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
3140	 * directly, without using LOAD_HANDLER().
3141	 */
3142	.align	7
3143	.globl	__end_interrupts
3144__end_interrupts:
3145DEFINE_FIXED_SYMBOL(__end_interrupts)
3146
3147#ifdef CONFIG_PPC_970_NAP
3148	/*
3149	 * Called by exception entry code if _TLF_NAPPING was set, this clears
3150	 * the NAPPING flag, and redirects the exception exit to
3151	 * power4_fixup_nap_return.
3152	 */
3153	.globl power4_fixup_nap
3154EXC_COMMON_BEGIN(power4_fixup_nap)
3155	andc	r9,r9,r10
3156	std	r9,TI_LOCAL_FLAGS(r11)
3157	LOAD_REG_ADDR(r10, power4_idle_nap_return)
3158	std	r10,_NIP(r1)
3159	blr
3160
3161power4_idle_nap_return:
3162	blr
3163#endif
3164
3165CLOSE_FIXED_SECTION(real_vectors);
3166CLOSE_FIXED_SECTION(real_trampolines);
3167CLOSE_FIXED_SECTION(virt_vectors);
3168CLOSE_FIXED_SECTION(virt_trampolines);
3169
3170USE_TEXT_SECTION()
3171
3172/* MSR[RI] should be clear because this uses SRR[01] */
3173enable_machine_check:
3174	mflr	r0
3175	bcl	20,31,$+4
31760:	mflr	r3
3177	addi	r3,r3,(1f - 0b)
3178	mtspr	SPRN_SRR0,r3
3179	mfmsr	r3
3180	ori	r3,r3,MSR_ME
3181	mtspr	SPRN_SRR1,r3
3182	RFI_TO_KERNEL
31831:	mtlr	r0
3184	blr
3185
3186/* MSR[RI] should be clear because this uses SRR[01] */
3187disable_machine_check:
3188	mflr	r0
3189	bcl	20,31,$+4
31900:	mflr	r3
3191	addi	r3,r3,(1f - 0b)
3192	mtspr	SPRN_SRR0,r3
3193	mfmsr	r3
3194	li	r4,MSR_ME
3195	andc	r3,r3,r4
3196	mtspr	SPRN_SRR1,r3
3197	RFI_TO_KERNEL
31981:	mtlr	r0
3199	blr
3200
3201/*
3202 * Hash table stuff
3203 */
3204	.balign	IFETCH_ALIGN_BYTES
3205do_hash_page:
3206#ifdef CONFIG_PPC_BOOK3S_64
3207	lis	r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
3208	ori	r0,r0,DSISR_BAD_FAULT_64S@l
3209	and.	r0,r5,r0		/* weird error? */
3210	bne-	handle_page_fault	/* if not, try to insert a HPTE */
3211
3212	/*
3213	 * If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
3214	 * don't call hash_page, just fail the fault. This is required to
3215	 * prevent re-entrancy problems in the hash code, namely perf
3216	 * interrupts hitting while something holds H_PAGE_BUSY, and taking a
3217	 * hash fault. See the comment in hash_preload().
3218	 */
3219	ld	r11, PACA_THREAD_INFO(r13)
3220	lwz	r0,TI_PREEMPT(r11)
3221	andis.	r0,r0,NMI_MASK@h
3222	bne	77f
3223
3224	/*
3225	 * r3 contains the trap number
3226	 * r4 contains the faulting address
3227	 * r5 contains dsisr
3228	 * r6 msr
3229	 *
3230	 * at return r3 = 0 for success, 1 for page fault, negative for error
3231	 */
3232	bl	__hash_page		/* build HPTE if possible */
3233        cmpdi	r3,0			/* see if __hash_page succeeded */
3234
3235	/* Success */
3236	beq	interrupt_return	/* Return from exception on success */
3237
3238	/* Error */
3239	blt-	13f
3240
3241	/* Reload DAR/DSISR into r4/r5 for the DABR check below */
3242	ld	r4,_DAR(r1)
3243	ld      r5,_DSISR(r1)
3244#endif /* CONFIG_PPC_BOOK3S_64 */
3245
3246/* Here we have a page fault that hash_page can't handle. */
3247handle_page_fault:
324811:	andis.  r0,r5,DSISR_DABRMATCH@h
3249	bne-    handle_dabr_fault
3250	addi	r3,r1,STACK_FRAME_OVERHEAD
3251	bl	do_page_fault
3252	cmpdi	r3,0
3253	beq+	interrupt_return
 
3254	mr	r5,r3
3255	addi	r3,r1,STACK_FRAME_OVERHEAD
3256	ld	r4,_DAR(r1)
3257	bl	bad_page_fault
3258	b	interrupt_return
3259
3260/* We have a data breakpoint exception - handle it */
3261handle_dabr_fault:
 
3262	ld      r4,_DAR(r1)
3263	ld      r5,_DSISR(r1)
3264	addi    r3,r1,STACK_FRAME_OVERHEAD
3265	bl      do_break
3266	/*
3267	 * do_break() may have changed the NV GPRS while handling a breakpoint.
3268	 * If so, we need to restore them with their updated values.
 
3269	 */
3270	REST_NVGPRS(r1)
3271	b       interrupt_return
3272
3273
3274#ifdef CONFIG_PPC_BOOK3S_64
3275/* We have a page fault that hash_page could handle but HV refused
3276 * the PTE insertion
3277 */
327813:	mr	r5,r3
 
3279	addi	r3,r1,STACK_FRAME_OVERHEAD
3280	ld	r4,_DAR(r1)
3281	bl	low_hash_fault
3282	b	interrupt_return
3283#endif
3284
3285/*
3286 * We come here as a result of a DSI at a point where we don't want
3287 * to call hash_page, such as when we are accessing memory (possibly
3288 * user memory) inside a PMU interrupt that occurred while interrupts
3289 * were soft-disabled.  We want to invoke the exception handler for
3290 * the access, or panic if there isn't a handler.
3291 */
329277:	addi	r3,r1,STACK_FRAME_OVERHEAD
 
3293	li	r5,SIGSEGV
3294	bl	bad_page_fault
3295	b	interrupt_return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v5.4
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 * This file contains the 64-bit "server" PowerPC variant
   4 * of the low level exception handling including exception
   5 * vectors, exception return, part of the slb and stab
   6 * handling and other fixed offset specific things.
   7 *
   8 * This file is meant to be #included from head_64.S due to
   9 * position dependent assembly.
  10 *
  11 * Most of this originates from head_64.S and thus has the same
  12 * copyright history.
  13 *
  14 */
  15
  16#include <asm/hw_irq.h>
  17#include <asm/exception-64s.h>
  18#include <asm/ptrace.h>
  19#include <asm/cpuidle.h>
  20#include <asm/head-64.h>
  21#include <asm/feature-fixups.h>
  22#include <asm/kup.h>
  23
  24/* PACA save area offsets (exgen, exmc, etc) */
  25#define EX_R9		0
  26#define EX_R10		8
  27#define EX_R11		16
  28#define EX_R12		24
  29#define EX_R13		32
  30#define EX_DAR		40
  31#define EX_DSISR	48
  32#define EX_CCR		52
  33#define EX_CFAR		56
  34#define EX_PPR		64
  35#if defined(CONFIG_RELOCATABLE)
  36#define EX_CTR		72
  37.if EX_SIZE != 10
  38	.error "EX_SIZE is wrong"
  39.endif
  40#else
  41.if EX_SIZE != 9
  42	.error "EX_SIZE is wrong"
  43.endif
  44#endif
  45
  46/*
  47 * Following are fixed section helper macros.
  48 *
  49 * EXC_REAL_BEGIN/END  - real, unrelocated exception vectors
  50 * EXC_VIRT_BEGIN/END  - virt (AIL), unrelocated exception vectors
  51 * TRAMP_REAL_BEGIN    - real, unrelocated helpers (virt may call these)
  52 * TRAMP_VIRT_BEGIN    - virt, unreloc helpers (in practice, real can use)
  53 * TRAMP_KVM_BEGIN     - KVM handlers, these are put into real, unrelocated
  54 * EXC_COMMON          - After switching to virtual, relocated mode.
  55 */
  56
  57#define EXC_REAL_BEGIN(name, start, size)			\
  58	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  59
  60#define EXC_REAL_END(name, start, size)				\
  61	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
  62
  63#define EXC_VIRT_BEGIN(name, start, size)			\
  64	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  65
  66#define EXC_VIRT_END(name, start, size)				\
  67	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
  68
  69#define EXC_COMMON_BEGIN(name)					\
  70	USE_TEXT_SECTION();					\
  71	.balign IFETCH_ALIGN_BYTES;				\
  72	.global name;						\
  73	_ASM_NOKPROBE_SYMBOL(name);				\
  74	DEFINE_FIXED_SYMBOL(name);				\
  75name:
  76
  77#define TRAMP_REAL_BEGIN(name)					\
  78	FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
  79
  80#define TRAMP_VIRT_BEGIN(name)					\
  81	FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
  82
  83#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  84#define TRAMP_KVM_BEGIN(name)					\
  85	TRAMP_VIRT_BEGIN(name)
  86#else
  87#define TRAMP_KVM_BEGIN(name)
  88#endif
  89
  90#define EXC_REAL_NONE(start, size)				\
  91	FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
  92	FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
  93
  94#define EXC_VIRT_NONE(start, size)				\
  95	FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
  96	FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
  97
  98/*
  99 * We're short on space and time in the exception prolog, so we can't
 100 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
 101 * Instead we get the base of the kernel from paca->kernelbase and or in the low
 102 * part of label. This requires that the label be within 64KB of kernelbase, and
 103 * that kernelbase be 64K aligned.
 104 */
 105#define LOAD_HANDLER(reg, label)					\
 106	ld	reg,PACAKBASE(r13);	/* get high part of &label */	\
 107	ori	reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
 108
 109#define __LOAD_HANDLER(reg, label)					\
 110	ld	reg,PACAKBASE(r13);					\
 111	ori	reg,reg,(ABS_ADDR(label))@l
 112
 113/*
 114 * Branches from unrelocated code (e.g., interrupts) to labels outside
 115 * head-y require >64K offsets.
 116 */
 117#define __LOAD_FAR_HANDLER(reg, label)					\
 118	ld	reg,PACAKBASE(r13);					\
 119	ori	reg,reg,(ABS_ADDR(label))@l;				\
 120	addis	reg,reg,(ABS_ADDR(label))@h
 121
 122/* Exception register prefixes */
 123#define EXC_HV_OR_STD	2 /* depends on HVMODE */
 124#define EXC_HV		1
 125#define EXC_STD		0
 126
 127#if defined(CONFIG_RELOCATABLE)
 128/*
 129 * If we support interrupts with relocation on AND we're a relocatable kernel,
 130 * we need to use CTR to get to the 2nd level handler.  So, save/restore it
 131 * when required.
 132 */
 133#define SAVE_CTR(reg, area)	mfctr	reg ; 	std	reg,area+EX_CTR(r13)
 134#define GET_CTR(reg, area) 			ld	reg,area+EX_CTR(r13)
 135#define RESTORE_CTR(reg, area)	ld	reg,area+EX_CTR(r13) ; mtctr reg
 136#else
 137/* ...else CTR is unused and in register. */
 138#define SAVE_CTR(reg, area)
 139#define GET_CTR(reg, area) 	mfctr	reg
 140#define RESTORE_CTR(reg, area)
 141#endif
 142
 143/*
 144 * PPR save/restore macros used in exceptions-64s.S
 145 * Used for P7 or later processors
 146 */
 147#define SAVE_PPR(area, ra)						\
 148BEGIN_FTR_SECTION_NESTED(940)						\
 149	ld	ra,area+EX_PPR(r13);	/* Read PPR from paca */	\
 150	std	ra,_PPR(r1);						\
 151END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,940)
 152
 153#define RESTORE_PPR_PACA(area, ra)					\
 154BEGIN_FTR_SECTION_NESTED(941)						\
 155	ld	ra,area+EX_PPR(r13);					\
 156	mtspr	SPRN_PPR,ra;						\
 157END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,941)
 158
 159/*
 160 * Get an SPR into a register if the CPU has the given feature
 161 */
 162#define OPT_GET_SPR(ra, spr, ftr)					\
 163BEGIN_FTR_SECTION_NESTED(943)						\
 164	mfspr	ra,spr;							\
 165END_FTR_SECTION_NESTED(ftr,ftr,943)
 166
 167/*
 168 * Set an SPR from a register if the CPU has the given feature
 169 */
 170#define OPT_SET_SPR(ra, spr, ftr)					\
 171BEGIN_FTR_SECTION_NESTED(943)						\
 172	mtspr	spr,ra;							\
 173END_FTR_SECTION_NESTED(ftr,ftr,943)
 174
 175/*
 176 * Save a register to the PACA if the CPU has the given feature
 177 */
 178#define OPT_SAVE_REG_TO_PACA(offset, ra, ftr)				\
 179BEGIN_FTR_SECTION_NESTED(943)						\
 180	std	ra,offset(r13);						\
 181END_FTR_SECTION_NESTED(ftr,ftr,943)
 182
 183/*
 184 * Branch to label using its 0xC000 address. This results in instruction
 185 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
 186 * on using mtmsr rather than rfid.
 187 *
 188 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
 189 * load KBASE for a slight optimisation.
 190 */
 191#define BRANCH_TO_C000(reg, label)					\
 192	__LOAD_FAR_HANDLER(reg, label);					\
 193	mtctr	reg;							\
 194	bctr
 195
 196.macro INT_KVM_HANDLER name, vec, hsrr, area, skip
 197	TRAMP_KVM_BEGIN(\name\()_kvm)
 198	KVM_HANDLER \vec, \hsrr, \area, \skip
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 199.endm
 200
 201#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 202#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 203/*
 204 * If hv is possible, interrupts come into to the hv version
 205 * of the kvmppc_interrupt code, which then jumps to the PR handler,
 206 * kvmppc_interrupt_pr, if the guest is a PR guest.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207 */
 208#define kvmppc_interrupt kvmppc_interrupt_hv
 209#else
 210#define kvmppc_interrupt kvmppc_interrupt_pr
 211#endif
 212
 213.macro KVMTEST name, hsrr, n
 214	lbz	r10,HSTATE_IN_GUEST(r13)
 215	cmpwi	r10,0
 216	bne	\name\()_kvm
 217.endm
 218
 219.macro KVM_HANDLER vec, hsrr, area, skip
 220	.if \skip
 
 
 
 221	cmpwi	r10,KVM_GUEST_MODE_SKIP
 222	beq	89f
 223	.else
 224BEGIN_FTR_SECTION_NESTED(947)
 225	ld	r10,\area+EX_CFAR(r13)
 226	std	r10,HSTATE_CFAR(r13)
 227END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947)
 228	.endif
 229
 230BEGIN_FTR_SECTION_NESTED(948)
 231	ld	r10,\area+EX_PPR(r13)
 
 
 232	std	r10,HSTATE_PPR(r13)
 233END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948)
 234	ld	r10,\area+EX_R10(r13)
 
 235	std	r12,HSTATE_SCRATCH0(r13)
 236	sldi	r12,r9,32
 
 
 237	/* HSRR variants have the 0x2 bit added to their trap number */
 238	.if \hsrr == EXC_HV_OR_STD
 239	BEGIN_FTR_SECTION
 240	ori	r12,r12,(\vec + 0x2)
 241	FTR_SECTION_ELSE
 242	ori	r12,r12,(\vec)
 243	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 244	.elseif \hsrr
 245	ori	r12,r12,(\vec + 0x2)
 246	.else
 247	ori	r12,r12,(\vec)
 248	.endif
 249
 250#ifdef CONFIG_RELOCATABLE
 251	/*
 252	 * KVM requires __LOAD_FAR_HANDLER beause kvmppc_interrupt lives
 253	 * outside the head section. CONFIG_RELOCATABLE KVM expects CTR
 254	 * to be saved in HSTATE_SCRATCH1.
 255	 */
 256	mfctr	r9
 257	std	r9,HSTATE_SCRATCH1(r13)
 258	__LOAD_FAR_HANDLER(r9, kvmppc_interrupt)
 259	mtctr	r9
 260	ld	r9,\area+EX_R9(r13)
 261	bctr
 262#else
 263	ld	r9,\area+EX_R9(r13)
 264	b	kvmppc_interrupt
 265#endif
 266
 267
 268	.if \skip
 26989:	mtocrf	0x80,r9
 270	ld	r9,\area+EX_R9(r13)
 271	ld	r10,\area+EX_R10(r13)
 272	.if \hsrr == EXC_HV_OR_STD
 
 
 
 
 273	BEGIN_FTR_SECTION
 274	b	kvmppc_skip_Hinterrupt
 275	FTR_SECTION_ELSE
 276	b	kvmppc_skip_interrupt
 277	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 278	.elseif \hsrr
 279	b	kvmppc_skip_Hinterrupt
 280	.else
 281	b	kvmppc_skip_interrupt
 282	.endif
 283	.endif
 284.endm
 285
 286#else
 287.macro KVMTEST name, hsrr, n
 288.endm
 289.macro KVM_HANDLER name, vec, hsrr, area, skip
 290.endm
 291#endif
 292
 293.macro INT_SAVE_SRR_AND_JUMP label, hsrr, set_ri
 294	ld	r10,PACAKMSR(r13)	/* get MSR value for kernel */
 295	.if ! \set_ri
 296	xori	r10,r10,MSR_RI		/* Clear MSR_RI */
 297	.endif
 298	.if \hsrr == EXC_HV_OR_STD
 299	BEGIN_FTR_SECTION
 300	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 301	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 302	mtspr	SPRN_HSRR1,r10
 303	FTR_SECTION_ELSE
 304	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 305	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 306	mtspr	SPRN_SRR1,r10
 307	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 308	.elseif \hsrr
 309	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 310	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 311	mtspr	SPRN_HSRR1,r10
 312	.else
 313	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 314	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 315	mtspr	SPRN_SRR1,r10
 316	.endif
 317	LOAD_HANDLER(r10, \label\())
 318	.if \hsrr == EXC_HV_OR_STD
 319	BEGIN_FTR_SECTION
 320	mtspr	SPRN_HSRR0,r10
 321	HRFI_TO_KERNEL
 322	FTR_SECTION_ELSE
 323	mtspr	SPRN_SRR0,r10
 324	RFI_TO_KERNEL
 325	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 326	.elseif \hsrr
 327	mtspr	SPRN_HSRR0,r10
 328	HRFI_TO_KERNEL
 329	.else
 330	mtspr	SPRN_SRR0,r10
 331	RFI_TO_KERNEL
 332	.endif
 333	b	.	/* prevent speculative execution */
 334.endm
 335
 336/* INT_SAVE_SRR_AND_JUMP works for real or virt, this is faster but virt only */
 337.macro INT_VIRT_SAVE_SRR_AND_JUMP label, hsrr
 338#ifdef CONFIG_RELOCATABLE
 339	.if \hsrr == EXC_HV_OR_STD
 340	BEGIN_FTR_SECTION
 341	mfspr	r11,SPRN_HSRR0	/* save HSRR0 */
 342	FTR_SECTION_ELSE
 343	mfspr	r11,SPRN_SRR0	/* save SRR0 */
 344	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 345	.elseif \hsrr
 346	mfspr	r11,SPRN_HSRR0	/* save HSRR0 */
 347	.else
 348	mfspr	r11,SPRN_SRR0	/* save SRR0 */
 349	.endif
 350	LOAD_HANDLER(r12, \label\())
 351	mtctr	r12
 352	.if \hsrr == EXC_HV_OR_STD
 353	BEGIN_FTR_SECTION
 354	mfspr	r12,SPRN_HSRR1	/* and HSRR1 */
 355	FTR_SECTION_ELSE
 356	mfspr	r12,SPRN_SRR1	/* and HSRR1 */
 357	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 358	.elseif \hsrr
 359	mfspr	r12,SPRN_HSRR1	/* and HSRR1 */
 360	.else
 361	mfspr	r12,SPRN_SRR1	/* and HSRR1 */
 362	.endif
 363	li	r10,MSR_RI
 364	mtmsrd 	r10,1		/* Set RI (EE=0) */
 365	bctr
 366#else
 367	.if \hsrr == EXC_HV_OR_STD
 368	BEGIN_FTR_SECTION
 369	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 370	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 371	FTR_SECTION_ELSE
 372	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 373	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 374	ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 375	.elseif \hsrr
 376	mfspr	r11,SPRN_HSRR0		/* save HSRR0 */
 377	mfspr	r12,SPRN_HSRR1		/* and HSRR1 */
 378	.else
 379	mfspr	r11,SPRN_SRR0		/* save SRR0 */
 380	mfspr	r12,SPRN_SRR1		/* and SRR1 */
 381	.endif
 382	li	r10,MSR_RI
 383	mtmsrd 	r10,1			/* Set RI (EE=0) */
 384	b	\label
 385#endif
 386.endm
 387
 388/*
 389 * This is the BOOK3S interrupt entry code macro.
 390 *
 391 * This can result in one of several things happening:
 392 * - Branch to the _common handler, relocated, in virtual mode.
 393 *   These are normal interrupts (synchronous and asynchronous) handled by
 394 *   the kernel.
 395 * - Branch to KVM, relocated but real mode interrupts remain in real mode.
 396 *   These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
 397 *   / intended for host or guest kernel, but KVM must always be involved
 398 *   because the machine state is set for guest execution.
 399 * - Branch to the masked handler, unrelocated.
 400 *   These occur when maskable asynchronous interrupts are taken with the
 401 *   irq_soft_mask set.
 402 * - Branch to an "early" handler in real mode but relocated.
 403 *   This is done if early=1. MCE and HMI use these to handle errors in real
 404 *   mode.
 405 * - Fall through and continue executing in real, unrelocated mode.
 406 *   This is done if early=2.
 407 */
 408.macro INT_HANDLER name, vec, ool=0, early=0, virt=0, hsrr=0, area=PACA_EXGEN, ri=1, dar=0, dsisr=0, bitmask=0, kvm=0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 409	SET_SCRATCH0(r13)			/* save r13 */
 410	GET_PACA(r13)
 411	std	r9,\area\()+EX_R9(r13)		/* save r9 */
 412	OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR)
 
 
 413	HMT_MEDIUM
 414	std	r10,\area\()+EX_R10(r13)	/* save r10 - r12 */
 415	OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR)
 
 
 416	.if \ool
 417	.if !\virt
 418	b	tramp_real_\name
 419	.pushsection .text
 420	TRAMP_REAL_BEGIN(tramp_real_\name)
 421	.else
 422	b	tramp_virt_\name
 423	.pushsection .text
 424	TRAMP_VIRT_BEGIN(tramp_virt_\name)
 425	.endif
 426	.endif
 427
 428	OPT_SAVE_REG_TO_PACA(\area\()+EX_PPR, r9, CPU_FTR_HAS_PPR)
 429	OPT_SAVE_REG_TO_PACA(\area\()+EX_CFAR, r10, CPU_FTR_CFAR)
 
 
 
 
 430	INTERRUPT_TO_KERNEL
 431	SAVE_CTR(r10, \area\())
 
 432	mfcr	r9
 433	.if \kvm
 434		KVMTEST \name \hsrr \vec
 435	.endif
 436	.if \bitmask
 437		lbz	r10,PACAIRQSOFTMASK(r13)
 438		andi.	r10,r10,\bitmask
 439		/* Associate vector numbers with bits in paca->irq_happened */
 440		.if \vec == 0x500 || \vec == 0xea0
 441		li	r10,PACA_IRQ_EE
 442		.elseif \vec == 0x900
 443		li	r10,PACA_IRQ_DEC
 444		.elseif \vec == 0xa00 || \vec == 0xe80
 445		li	r10,PACA_IRQ_DBELL
 446		.elseif \vec == 0xe60
 447		li	r10,PACA_IRQ_HMI
 448		.elseif \vec == 0xf00
 449		li	r10,PACA_IRQ_PMI
 450		.else
 451		.abort "Bad maskable vector"
 452		.endif
 453
 454		.if \hsrr == EXC_HV_OR_STD
 455		BEGIN_FTR_SECTION
 456		bne	masked_Hinterrupt
 457		FTR_SECTION_ELSE
 458		bne	masked_interrupt
 459		ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 460		.elseif \hsrr
 461		bne	masked_Hinterrupt
 462		.else
 463		bne	masked_interrupt
 464		.endif
 465	.endif
 466
 467	std	r11,\area\()+EX_R11(r13)
 468	std	r12,\area\()+EX_R12(r13)
 469
 470	/*
 471	 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
 472	 * because a d-side MCE will clobber those registers so is
 473	 * not recoverable if they are live.
 474	 */
 475	GET_SCRATCH0(r10)
 476	std	r10,\area\()+EX_R13(r13)
 477	.if \dar
 478	.if \hsrr
 479	mfspr	r10,SPRN_HDAR
 480	.else
 481	mfspr	r10,SPRN_DAR
 482	.endif
 483	std	r10,\area\()+EX_DAR(r13)
 484	.endif
 485	.if \dsisr
 486	.if \hsrr
 487	mfspr	r10,SPRN_HDSISR
 488	.else
 489	mfspr	r10,SPRN_DSISR
 490	.endif
 491	stw	r10,\area\()+EX_DSISR(r13)
 492	.endif
 493
 494	.if \early == 2
 495	/* nothing more */
 496	.elseif \early
 497	mfctr	r10			/* save ctr, even for !RELOCATABLE */
 498	BRANCH_TO_C000(r11, \name\()_early_common)
 499	.elseif !\virt
 500	INT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr, \ri
 
 
 
 
 501	.else
 502	INT_VIRT_SAVE_SRR_AND_JUMP \name\()_common, \hsrr
 
 503	.endif
 
 
 
 
 
 504	.if \ool
 505	.popsection
 506	.endif
 507.endm
 508
 509/*
 510 * On entry r13 points to the paca, r9-r13 are saved in the paca,
 511 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
 512 * SRR1, and relocation is on.
 513 *
 514 * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
 515 * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 516 */
 517.macro INT_COMMON vec, area, stack, kaup, reconcile, dar, dsisr
 518	.if \stack
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519	andi.	r10,r12,MSR_PR		/* See if coming from user	*/
 520	mr	r10,r1			/* Save r1			*/
 521	subi	r1,r1,INT_FRAME_SIZE	/* alloc frame on kernel stack	*/
 522	beq-	100f
 523	ld	r1,PACAKSAVE(r13)	/* kernel stack to use		*/
 524100:	tdgei	r1,-INT_FRAME_SIZE	/* trap if r1 is in userspace	*/
 525	EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
 526	.endif
 527
 528	std	r9,_CCR(r1)		/* save CR in stackframe	*/
 529	std	r11,_NIP(r1)		/* save SRR0 in stackframe	*/
 530	std	r12,_MSR(r1)		/* save SRR1 in stackframe	*/
 531	std	r10,0(r1)		/* make stack chain pointer	*/
 532	std	r0,GPR0(r1)		/* save r0 in stackframe	*/
 533	std	r10,GPR1(r1)		/* save r1 in stackframe	*/
 534
 535	.if \stack
 536	.if \kaup
 
 
 
 
 
 537	kuap_save_amr_and_lock r9, r10, cr1, cr0
 538	.endif
 539	beq	101f			/* if from kernel mode		*/
 540	ACCOUNT_CPU_USER_ENTRY(r13, r9, r10)
 541	SAVE_PPR(\area, r9)
 
 
 
 542101:
 543	.else
 544	.if \kaup
 545	kuap_save_amr_and_lock r9, r10, cr1
 546	.endif
 547	.endif
 548
 549	/* Save original regs values from save area to stack frame. */
 550	ld	r9,\area+EX_R9(r13)	/* move r9, r10 to stackframe	*/
 551	ld	r10,\area+EX_R10(r13)
 552	std	r9,GPR9(r1)
 553	std	r10,GPR10(r1)
 554	ld	r9,\area+EX_R11(r13)	/* move r11 - r13 to stackframe	*/
 555	ld	r10,\area+EX_R12(r13)
 556	ld	r11,\area+EX_R13(r13)
 557	std	r9,GPR11(r1)
 558	std	r10,GPR12(r1)
 559	std	r11,GPR13(r1)
 560	.if \dar
 561	.if \dar == 2
 
 
 
 562	ld	r10,_NIP(r1)
 563	.else
 564	ld	r10,\area+EX_DAR(r13)
 565	.endif
 566	std	r10,_DAR(r1)
 567	.endif
 568	.if \dsisr
 569	.if \dsisr == 2
 
 570	ld	r10,_MSR(r1)
 571	lis	r11,DSISR_SRR1_MATCH_64S@h
 572	and	r10,r10,r11
 573	.else
 574	lwz	r10,\area+EX_DSISR(r13)
 575	.endif
 576	std	r10,_DSISR(r1)
 577	.endif
 578BEGIN_FTR_SECTION_NESTED(66)
 579	ld	r10,\area+EX_CFAR(r13)
 
 580	std	r10,ORIG_GPR3(r1)
 581END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66)
 582	GET_CTR(r10, \area)
 583	std	r10,_CTR(r1)
 584	std	r2,GPR2(r1)		/* save r2 in stackframe	*/
 585	SAVE_4GPRS(3, r1)		/* save r3 - r6 in stackframe   */
 586	SAVE_2GPRS(7, r1)		/* save r7, r8 in stackframe	*/
 587	mflr	r9			/* Get LR, later save to stack	*/
 588	ld	r2,PACATOC(r13)		/* get kernel TOC into r2	*/
 589	std	r9,_LINK(r1)
 590	lbz	r10,PACAIRQSOFTMASK(r13)
 591	mfspr	r11,SPRN_XER		/* save XER in stackframe	*/
 592	std	r10,SOFTE(r1)
 593	std	r11,_XER(r1)
 594	li	r9,(\vec)+1
 595	std	r9,_TRAP(r1)		/* set trap number		*/
 596	li	r10,0
 597	ld	r11,exception_marker@toc(r2)
 598	std	r10,RESULT(r1)		/* clear regs->result		*/
 599	std	r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame	*/
 600
 601	.if \stack
 602	ACCOUNT_STOLEN_TIME
 603	.endif
 604
 605	.if \reconcile
 606	RECONCILE_IRQ_STATE(r10, r11)
 607	.endif
 608.endm
 609
 610/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 611 * Restore all registers including H/SRR0/1 saved in a stack frame of a
 612 * standard exception.
 613 */
 614.macro EXCEPTION_RESTORE_REGS hsrr
 615	/* Move original SRR0 and SRR1 into the respective regs */
 616	ld	r9,_MSR(r1)
 617	.if \hsrr == EXC_HV_OR_STD
 618	.error "EXC_HV_OR_STD Not implemented for EXCEPTION_RESTORE_REGS"
 619	.endif
 620	.if \hsrr
 621	mtspr	SPRN_HSRR1,r9
 622	.else
 623	mtspr	SPRN_SRR1,r9
 624	.endif
 625	ld	r9,_NIP(r1)
 626	.if \hsrr
 627	mtspr	SPRN_HSRR0,r9
 628	.else
 629	mtspr	SPRN_SRR0,r9
 630	.endif
 631	ld	r9,_CTR(r1)
 632	mtctr	r9
 633	ld	r9,_XER(r1)
 634	mtxer	r9
 635	ld	r9,_LINK(r1)
 636	mtlr	r9
 637	ld	r9,_CCR(r1)
 638	mtcr	r9
 639	REST_8GPRS(2, r1)
 640	REST_4GPRS(10, r1)
 641	REST_GPR(0, r1)
 642	/* restore original r1. */
 643	ld	r1,GPR1(r1)
 644.endm
 645
 646#define RUNLATCH_ON				\
 647BEGIN_FTR_SECTION				\
 648	ld	r3, PACA_THREAD_INFO(r13);	\
 649	ld	r4,TI_LOCAL_FLAGS(r3);		\
 650	andi.	r0,r4,_TLF_RUNLATCH;		\
 651	beql	ppc64_runlatch_on_trampoline;	\
 652END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
 653
 654/*
 655 * When the idle code in power4_idle puts the CPU into NAP mode,
 656 * it has to do so in a loop, and relies on the external interrupt
 657 * and decrementer interrupt entry code to get it out of the loop.
 658 * It sets the _TLF_NAPPING bit in current_thread_info()->local_flags
 659 * to signal that it is in the loop and needs help to get out.
 660 */
 661#ifdef CONFIG_PPC_970_NAP
 662#define FINISH_NAP				\
 663BEGIN_FTR_SECTION				\
 664	ld	r11, PACA_THREAD_INFO(r13);	\
 665	ld	r9,TI_LOCAL_FLAGS(r11);		\
 666	andi.	r10,r9,_TLF_NAPPING;		\
 667	bnel	power4_fixup_nap;		\
 668END_FTR_SECTION_IFSET(CPU_FTR_CAN_NAP)
 669#else
 670#define FINISH_NAP
 671#endif
 672
 673#define EXC_COMMON(name, realvec, hdlr)					\
 674	EXC_COMMON_BEGIN(name);						\
 675	INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ;			\
 676	bl	save_nvgprs;						\
 677	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 678	bl	hdlr;							\
 679	b	ret_from_except
 680
 681/*
 682 * Like EXC_COMMON, but for exceptions that can occur in the idle task and
 683 * therefore need the special idle handling (finish nap and runlatch)
 684 */
 685#define EXC_COMMON_ASYNC(name, realvec, hdlr)				\
 686	EXC_COMMON_BEGIN(name);						\
 687	INT_COMMON realvec, PACA_EXGEN, 1, 1, 1, 0, 0 ;			\
 688	FINISH_NAP;							\
 689	RUNLATCH_ON;							\
 690	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 691	bl	hdlr;							\
 692	b	ret_from_except_lite
 693
 694
 695/*
 696 * There are a few constraints to be concerned with.
 697 * - Real mode exceptions code/data must be located at their physical location.
 698 * - Virtual mode exceptions must be mapped at their 0xc000... location.
 699 * - Fixed location code must not call directly beyond the __end_interrupts
 700 *   area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
 701 *   must be used.
 702 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
 703 *   virtual 0xc00...
 704 * - Conditional branch targets must be within +/-32K of caller.
 705 *
 706 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
 707 * therefore don't have to run in physically located code or rfid to
 708 * virtual mode kernel code. However on relocatable kernels they do have
 709 * to branch to KERNELBASE offset because the rest of the kernel (outside
 710 * the exception vectors) may be located elsewhere.
 711 *
 712 * Virtual exceptions correspond with physical, except their entry points
 713 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
 714 * offset applied. Virtual exceptions are enabled with the Alternate
 715 * Interrupt Location (AIL) bit set in the LPCR. However this does not
 716 * guarantee they will be delivered virtually. Some conditions (see the ISA)
 717 * cause exceptions to be delivered in real mode.
 718 *
 
 
 
 719 * It's impossible to receive interrupts below 0x300 via AIL.
 720 *
 721 * KVM: None of the virtual exceptions are from the guest. Anything that
 722 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
 723 *
 724 *
 725 * We layout physical memory as follows:
 726 * 0x0000 - 0x00ff : Secondary processor spin code
 727 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
 728 * 0x1900 - 0x3fff : Real mode trampolines
 729 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
 730 * 0x5900 - 0x6fff : Relon mode trampolines
 731 * 0x7000 - 0x7fff : FWNMI data area
 732 * 0x8000 -   .... : Common interrupt handlers, remaining early
 733 *                   setup code, rest of kernel.
 734 *
 735 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
 736 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
 737 * vectors there.
 738 */
 739OPEN_FIXED_SECTION(real_vectors,        0x0100, 0x1900)
 740OPEN_FIXED_SECTION(real_trampolines,    0x1900, 0x4000)
 741OPEN_FIXED_SECTION(virt_vectors,        0x4000, 0x5900)
 742OPEN_FIXED_SECTION(virt_trampolines,    0x5900, 0x7000)
 743
 744#ifdef CONFIG_PPC_POWERNV
 745	.globl start_real_trampolines
 746	.globl end_real_trampolines
 747	.globl start_virt_trampolines
 748	.globl end_virt_trampolines
 749#endif
 750
 751#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 752/*
 753 * Data area reserved for FWNMI option.
 754 * This address (0x7000) is fixed by the RPA.
 755 * pseries and powernv need to keep the whole page from
 756 * 0x7000 to 0x8000 free for use by the firmware
 757 */
 758ZERO_FIXED_SECTION(fwnmi_page,          0x7000, 0x8000)
 759OPEN_TEXT_SECTION(0x8000)
 760#else
 761OPEN_TEXT_SECTION(0x7000)
 762#endif
 763
 764USE_FIXED_SECTION(real_vectors)
 765
 766/*
 767 * This is the start of the interrupt handlers for pSeries
 768 * This code runs with relocation off.
 769 * Code from here to __end_interrupts gets copied down to real
 770 * address 0x100 when we are running a relocatable kernel.
 771 * Therefore any relative branches in this section must only
 772 * branch to labels in this section.
 773 */
 774	.globl __start_interrupts
 775__start_interrupts:
 776
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 777/* No virt vectors corresponding with 0x0..0x100 */
 778EXC_VIRT_NONE(0x4000, 0x100)
 779
 780
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 781EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
 782#ifdef CONFIG_PPC_P7_NAP
 783	/*
 784	 * If running native on arch 2.06 or later, check if we are waking up
 785	 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
 786	 * bits 46:47. A non-0 value indicates that we are coming from a power
 787	 * saving state. The idle wakeup handler initially runs in real mode,
 788	 * but we branch to the 0xc000... address so we can turn on relocation
 789	 * with mtmsrd later, after SPRs are restored.
 790	 *
 791	 * Careful to minimise cost for the fast path (idle wakeup) while
 792	 * also avoiding clobbering CFAR for the debug path (non-idle).
 793	 *
 794	 * For the idle wake case volatile registers can be clobbered, which
 795	 * is why we use those initially. If it turns out to not be an idle
 796	 * wake, carefully put everything back the way it was, so we can use
 797	 * common exception macros to handle it.
 798	 */
 799BEGIN_FTR_SECTION
 800	SET_SCRATCH0(r13)
 801	GET_PACA(r13)
 802	std	r3,PACA_EXNMI+0*8(r13)
 803	std	r4,PACA_EXNMI+1*8(r13)
 804	std	r5,PACA_EXNMI+2*8(r13)
 805	mfspr	r3,SPRN_SRR1
 806	mfocrf	r4,0x80
 807	rlwinm.	r5,r3,47-31,30,31
 808	bne+	system_reset_idle_wake
 809	/* Not powersave wakeup. Restore regs for regular interrupt handler. */
 810	mtocrf	0x80,r4
 811	ld	r3,PACA_EXNMI+0*8(r13)
 812	ld	r4,PACA_EXNMI+1*8(r13)
 813	ld	r5,PACA_EXNMI+2*8(r13)
 814	GET_SCRATCH0(r13)
 815END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 816#endif
 817
 818	INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0, kvm=1
 819	/*
 820	 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
 821	 * being used, so a nested NMI exception would corrupt it.
 822	 *
 823	 * In theory, we should not enable relocation here if it was disabled
 824	 * in SRR1, because the MMU may not be configured to support it (e.g.,
 825	 * SLB may have been cleared). In practice, there should only be a few
 826	 * small windows where that's the case, and sreset is considered to
 827	 * be dangerous anyway.
 828	 */
 829EXC_REAL_END(system_reset, 0x100, 0x100)
 830EXC_VIRT_NONE(0x4100, 0x100)
 831INT_KVM_HANDLER system_reset 0x100, EXC_STD, PACA_EXNMI, 0
 832
 833#ifdef CONFIG_PPC_P7_NAP
 834TRAMP_REAL_BEGIN(system_reset_idle_wake)
 835	/* We are waking up from idle, so may clobber any volatile register */
 836	cmpwi	cr1,r5,2
 837	bltlr	cr1	/* no state loss, return to idle caller with r3=SRR1 */
 838	BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
 839#endif
 840
 841#ifdef CONFIG_PPC_PSERIES
 842/*
 843 * Vectors for the FWNMI option.  Share common code.
 844 */
 845TRAMP_REAL_BEGIN(system_reset_fwnmi)
 846	/* See comment at system_reset exception, don't turn on RI */
 847	INT_HANDLER system_reset, 0x100, area=PACA_EXNMI, ri=0
 
 848
 849#endif /* CONFIG_PPC_PSERIES */
 850
 851EXC_COMMON_BEGIN(system_reset_common)
 
 852	/*
 853	 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
 854	 * to recover, but nested NMI will notice in_nmi and not recover
 855	 * because of the use of the NMI stack. in_nmi reentrancy is tested in
 856	 * system_reset_exception.
 857	 */
 858	lhz	r10,PACA_IN_NMI(r13)
 859	addi	r10,r10,1
 860	sth	r10,PACA_IN_NMI(r13)
 861	li	r10,MSR_RI
 862	mtmsrd 	r10,1
 863
 864	mr	r10,r1
 865	ld	r1,PACA_NMI_EMERG_SP(r13)
 866	subi	r1,r1,INT_FRAME_SIZE
 867	INT_COMMON 0x100, PACA_EXNMI, 0, 1, 0, 0, 0
 868	bl	save_nvgprs
 869	/*
 870	 * Set IRQS_ALL_DISABLED unconditionally so arch_irqs_disabled does
 871	 * the right thing. We do not want to reconcile because that goes
 872	 * through irq tracing which we don't want in NMI.
 873	 *
 874	 * Save PACAIRQHAPPENED because some code will do a hard disable
 875	 * (e.g., xmon). So we want to restore this back to where it was
 876	 * when we return. DAR is unused in the stack, so save it there.
 877	 */
 878	li	r10,IRQS_ALL_DISABLED
 879	stb	r10,PACAIRQSOFTMASK(r13)
 880	lbz	r10,PACAIRQHAPPENED(r13)
 881	std	r10,_DAR(r1)
 
 
 882
 883	addi	r3,r1,STACK_FRAME_OVERHEAD
 884	bl	system_reset_exception
 885
 886	/* Clear MSR_RI before setting SRR0 and SRR1. */
 887	li	r9,0
 888	mtmsrd	r9,1
 889
 890	/*
 891	 * MSR_RI is clear, now we can decrement paca->in_nmi.
 892	 */
 893	lhz	r10,PACA_IN_NMI(r13)
 894	subi	r10,r10,1
 895	sth	r10,PACA_IN_NMI(r13)
 896
 897	/*
 898	 * Restore soft mask settings.
 899	 */
 900	ld	r10,_DAR(r1)
 901	stb	r10,PACAIRQHAPPENED(r13)
 902	ld	r10,SOFTE(r1)
 903	stb	r10,PACAIRQSOFTMASK(r13)
 904
 905	EXCEPTION_RESTORE_REGS EXC_STD
 
 906	RFI_TO_USER_OR_KERNEL
 907
 
 908
 909EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
 910	INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 911	/*
 912	 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
 913	 * nested machine check corrupts it. machine_check_common enables
 914	 * MSR_RI.
 915	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 916EXC_REAL_END(machine_check, 0x200, 0x100)
 917EXC_VIRT_NONE(0x4200, 0x100)
 918
 919#ifdef CONFIG_PPC_PSERIES
 920TRAMP_REAL_BEGIN(machine_check_fwnmi)
 921	/* See comment at machine_check exception, don't turn on RI */
 922	INT_HANDLER machine_check, 0x200, early=1, area=PACA_EXMC, dar=1, dsisr=1
 923#endif
 924
 925INT_KVM_HANDLER machine_check 0x200, EXC_STD, PACA_EXMC, 1
 926
 927#define MACHINE_CHECK_HANDLER_WINDUP			\
 928	/* Clear MSR_RI before setting SRR0 and SRR1. */\
 929	li	r9,0;					\
 930	mtmsrd	r9,1;		/* Clear MSR_RI */	\
 931	/* Decrement paca->in_mce now RI is clear. */	\
 932	lhz	r12,PACA_IN_MCE(r13);			\
 933	subi	r12,r12,1;				\
 934	sth	r12,PACA_IN_MCE(r13);			\
 935	EXCEPTION_RESTORE_REGS EXC_STD
 936
 937EXC_COMMON_BEGIN(machine_check_early_common)
 938	mtctr	r10			/* Restore ctr */
 939	mfspr	r11,SPRN_SRR0
 940	mfspr	r12,SPRN_SRR1
 941
 942	/*
 943	 * Switch to mc_emergency stack and handle re-entrancy (we limit
 944	 * the nested MCE upto level 4 to avoid stack overflow).
 945	 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
 946	 *
 947	 * We use paca->in_mce to check whether this is the first entry or
 948	 * nested machine check. We increment paca->in_mce to track nested
 949	 * machine checks.
 950	 *
 951	 * If this is the first entry then set stack pointer to
 952	 * paca->mc_emergency_sp, otherwise r1 is already pointing to
 953	 * stack frame on mc_emergency stack.
 954	 *
 955	 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
 956	 * checkstop if we get another machine check exception before we do
 957	 * rfid with MSR_ME=1.
 958	 *
 959	 * This interrupt can wake directly from idle. If that is the case,
 960	 * the machine check is handled then the idle wakeup code is called
 961	 * to restore state.
 962	 */
 963	lhz	r10,PACA_IN_MCE(r13)
 964	cmpwi	r10,0			/* Are we in nested machine check */
 965	cmpwi	cr1,r10,MAX_MCE_DEPTH	/* Are we at maximum nesting */
 966	addi	r10,r10,1		/* increment paca->in_mce */
 967	sth	r10,PACA_IN_MCE(r13)
 968
 969	mr	r10,r1			/* Save r1 */
 970	bne	1f
 971	/* First machine check entry */
 972	ld	r1,PACAMCEMERGSP(r13)	/* Use MC emergency stack */
 9731:	/* Limit nested MCE to level 4 to avoid stack overflow */
 974	bgt	cr1,unrecoverable_mce	/* Check if we hit limit of 4 */
 975	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame */
 976
 977	/* We don't touch AMR here, we never go to virtual mode */
 978	INT_COMMON 0x200, PACA_EXMC, 0, 0, 0, 1, 1
 979
 980BEGIN_FTR_SECTION
 981	bl	enable_machine_check
 982END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 983	li	r10,MSR_RI
 984	mtmsrd	r10,1
 985
 986	bl	save_nvgprs
 
 
 
 
 
 
 
 
 
 
 987	addi	r3,r1,STACK_FRAME_OVERHEAD
 988	bl	machine_check_early
 989	std	r3,RESULT(r1)	/* Save result */
 990	ld	r12,_MSR(r1)
 991
 
 
 
 
 
 
 
 
 992#ifdef CONFIG_PPC_P7_NAP
 993	/*
 994	 * Check if thread was in power saving mode. We come here when any
 995	 * of the following is true:
 996	 * a. thread wasn't in power saving mode
 997	 * b. thread was in power saving mode with no state loss,
 998	 *    supervisor state loss or hypervisor state loss.
 999	 *
1000	 * Go back to nap/sleep/winkle mode again if (b) is true.
1001	 */
1002BEGIN_FTR_SECTION
1003	rlwinm.	r11,r12,47-31,30,31
1004	bne	machine_check_idle_common
1005END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1006#endif
1007
1008#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1009	/*
1010	 * Check if we are coming from guest. If yes, then run the normal
1011	 * exception handler which will take the
1012	 * machine_check_kvm->kvmppc_interrupt branch to deliver the MC event
1013	 * to guest.
1014	 */
1015	lbz	r11,HSTATE_IN_GUEST(r13)
1016	cmpwi	r11,0			/* Check if coming from guest */
1017	bne	mce_deliver		/* continue if we are. */
1018#endif
1019
1020	/*
1021	 * Check if we are coming from userspace. If yes, then run the normal
1022	 * exception handler which will deliver the MC event to this kernel.
1023	 */
1024	andi.	r11,r12,MSR_PR		/* See if coming from user. */
1025	bne	mce_deliver		/* continue in V mode if we are. */
1026
1027	/*
1028	 * At this point we are coming from kernel context.
1029	 * Queue up the MCE event and return from the interrupt.
1030	 * But before that, check if this is an un-recoverable exception.
1031	 * If yes, then stay on emergency stack and panic.
1032	 */
1033	andi.	r11,r12,MSR_RI
1034	beq	unrecoverable_mce
1035
1036	/*
1037	 * Check if we have successfully handled/recovered from error, if not
1038	 * then stay on emergency stack and panic.
1039	 */
1040	ld	r3,RESULT(r1)	/* Load result */
1041	cmpdi	r3,0		/* see if we handled MCE successfully */
1042	beq	unrecoverable_mce /* if !handled then panic */
1043
1044	/*
1045	 * Return from MC interrupt.
1046	 * Queue up the MCE event so that we can log it later, while
1047	 * returning from kernel or opal call.
1048	 */
1049	bl	machine_check_queue_event
1050	MACHINE_CHECK_HANDLER_WINDUP
1051	RFI_TO_KERNEL
1052
1053mce_deliver:
1054	/*
1055	 * This is a host user or guest MCE. Restore all registers, then
1056	 * run the "late" handler. For host user, this will run the
1057	 * machine_check_exception handler in virtual mode like a normal
1058	 * interrupt handler. For guest, this will trigger the KVM test
1059	 * and branch to the KVM interrupt similarly to other interrupts.
1060	 */
1061BEGIN_FTR_SECTION
1062	ld	r10,ORIG_GPR3(r1)
1063	mtspr	SPRN_CFAR,r10
1064END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1065	MACHINE_CHECK_HANDLER_WINDUP
1066	/* See comment at machine_check exception, don't turn on RI */
1067	INT_HANDLER machine_check, 0x200, area=PACA_EXMC, ri=0, dar=1, dsisr=1, kvm=1
1068
1069EXC_COMMON_BEGIN(machine_check_common)
1070	/*
1071	 * Machine check is different because we use a different
1072	 * save area: PACA_EXMC instead of PACA_EXGEN.
1073	 */
1074	INT_COMMON 0x200, PACA_EXMC, 1, 1, 1, 1, 1
 
1075	FINISH_NAP
1076	/* Enable MSR_RI when finished with PACA_EXMC */
1077	li	r10,MSR_RI
1078	mtmsrd 	r10,1
1079	bl	save_nvgprs
1080	addi	r3,r1,STACK_FRAME_OVERHEAD
1081	bl	machine_check_exception
1082	b	ret_from_except
 
 
 
1083
1084#ifdef CONFIG_PPC_P7_NAP
1085/*
1086 * This is an idle wakeup. Low level machine check has already been
1087 * done. Queue the event then call the idle code to do the wake up.
1088 */
1089EXC_COMMON_BEGIN(machine_check_idle_common)
1090	bl	machine_check_queue_event
1091
1092	/*
1093	 * We have not used any non-volatile GPRs here, and as a rule
1094	 * most exception code including machine check does not.
1095	 * Therefore PACA_NAPSTATELOST does not need to be set. Idle
1096	 * wakeup will restore volatile registers.
1097	 *
1098	 * Load the original SRR1 into r3 for pnv_powersave_wakeup_mce.
 
 
1099	 *
1100	 * Then decrement MCE nesting after finishing with the stack.
1101	 */
1102	ld	r3,_MSR(r1)
1103	ld	r4,_LINK(r1)
 
1104
1105	lhz	r11,PACA_IN_MCE(r13)
1106	subi	r11,r11,1
1107	sth	r11,PACA_IN_MCE(r13)
1108
1109	mtlr	r4
1110	rlwinm	r10,r3,47-31,30,31
1111	cmpwi	cr1,r10,2
1112	bltlr	cr1	/* no state loss, return to idle caller */
1113	b	idle_return_gpr_loss
1114#endif
1115
1116EXC_COMMON_BEGIN(unrecoverable_mce)
1117	/*
1118	 * We are going down. But there are chances that we might get hit by
1119	 * another MCE during panic path and we may run into unstable state
1120	 * with no way out. Hence, turn ME bit off while going down, so that
1121	 * when another MCE is hit during panic path, system will checkstop
1122	 * and hypervisor will get restarted cleanly by SP.
1123	 */
1124BEGIN_FTR_SECTION
1125	li	r10,0 /* clear MSR_RI */
1126	mtmsrd	r10,1
1127	bl	disable_machine_check
1128END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1129	ld	r10,PACAKMSR(r13)
1130	li	r3,MSR_ME
1131	andc	r10,r10,r3
1132	mtmsrd	r10
1133
 
 
 
 
1134	/* Invoke machine_check_exception to print MCE event and panic. */
1135	addi	r3,r1,STACK_FRAME_OVERHEAD
1136	bl	machine_check_exception
1137
1138	/*
1139	 * We will not reach here. Even if we did, there is no way out.
1140	 * Call unrecoverable_exception and die.
1141	 */
1142	addi	r3,r1,STACK_FRAME_OVERHEAD
1143	bl	unrecoverable_exception
1144	b	.
1145
1146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1147EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1148	INT_HANDLER data_access, 0x300, ool=1, dar=1, dsisr=1, kvm=1
1149EXC_REAL_END(data_access, 0x300, 0x80)
1150EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1151	INT_HANDLER data_access, 0x300, virt=1, dar=1, dsisr=1
1152EXC_VIRT_END(data_access, 0x4300, 0x80)
1153INT_KVM_HANDLER data_access, 0x300, EXC_STD, PACA_EXGEN, 1
1154EXC_COMMON_BEGIN(data_access_common)
1155	/*
1156	 * Here r13 points to the paca, r9 contains the saved CR,
1157	 * SRR0 and SRR1 are saved in r11 and r12,
1158	 * r9 - r13 are saved in paca->exgen.
1159	 * EX_DAR and EX_DSISR have saved DAR/DSISR
1160	 */
1161	INT_COMMON 0x300, PACA_EXGEN, 1, 1, 1, 1, 1
1162	ld	r4,_DAR(r1)
1163	ld	r5,_DSISR(r1)
1164BEGIN_MMU_FTR_SECTION
1165	ld	r6,_MSR(r1)
1166	li	r3,0x300
1167	b	do_hash_page		/* Try to handle as hpte fault */
1168MMU_FTR_SECTION_ELSE
1169	b	handle_page_fault
1170ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1172
1173EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1174	INT_HANDLER data_access_slb, 0x380, ool=1, area=PACA_EXSLB, dar=1, kvm=1
1175EXC_REAL_END(data_access_slb, 0x380, 0x80)
1176EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1177	INT_HANDLER data_access_slb, 0x380, virt=1, area=PACA_EXSLB, dar=1
1178EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
1179INT_KVM_HANDLER data_access_slb, 0x380, EXC_STD, PACA_EXSLB, 1
1180EXC_COMMON_BEGIN(data_access_slb_common)
1181	INT_COMMON 0x380, PACA_EXSLB, 1, 1, 0, 1, 0
1182	ld	r4,_DAR(r1)
1183	addi	r3,r1,STACK_FRAME_OVERHEAD
1184BEGIN_MMU_FTR_SECTION
1185	/* HPT case, do SLB fault */
1186	bl	do_slb_fault
1187	cmpdi	r3,0
1188	bne-	1f
1189	b	fast_exception_return
11901:	/* Error case */
1191MMU_FTR_SECTION_ELSE
1192	/* Radix case, access is outside page table range */
1193	li	r3,-EFAULT
1194ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1195	std	r3,RESULT(r1)
1196	bl	save_nvgprs
1197	RECONCILE_IRQ_STATE(r10, r11)
1198	ld	r4,_DAR(r1)
1199	ld	r5,RESULT(r1)
1200	addi	r3,r1,STACK_FRAME_OVERHEAD
1201	bl	do_bad_slb_fault
1202	b	ret_from_except
 
 
 
1203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1204
1205EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1206	INT_HANDLER instruction_access, 0x400, kvm=1
1207EXC_REAL_END(instruction_access, 0x400, 0x80)
1208EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1209	INT_HANDLER instruction_access, 0x400, virt=1
1210EXC_VIRT_END(instruction_access, 0x4400, 0x80)
1211INT_KVM_HANDLER instruction_access, 0x400, EXC_STD, PACA_EXGEN, 0
1212EXC_COMMON_BEGIN(instruction_access_common)
1213	INT_COMMON 0x400, PACA_EXGEN, 1, 1, 1, 2, 2
1214	ld	r4,_DAR(r1)
1215	ld	r5,_DSISR(r1)
1216BEGIN_MMU_FTR_SECTION
1217	ld      r6,_MSR(r1)
1218	li	r3,0x400
1219	b	do_hash_page		/* Try to handle as hpte fault */
1220MMU_FTR_SECTION_ELSE
1221	b	handle_page_fault
1222ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1223
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1224
1225EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
1226	INT_HANDLER instruction_access_slb, 0x480, area=PACA_EXSLB, kvm=1
1227EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
1228EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
1229	INT_HANDLER instruction_access_slb, 0x480, virt=1, area=PACA_EXSLB
1230EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
1231INT_KVM_HANDLER instruction_access_slb, 0x480, EXC_STD, PACA_EXSLB, 0
1232EXC_COMMON_BEGIN(instruction_access_slb_common)
1233	INT_COMMON 0x480, PACA_EXSLB, 1, 1, 0, 2, 0
1234	ld	r4,_DAR(r1)
1235	addi	r3,r1,STACK_FRAME_OVERHEAD
1236BEGIN_MMU_FTR_SECTION
1237	/* HPT case, do SLB fault */
1238	bl	do_slb_fault
1239	cmpdi	r3,0
1240	bne-	1f
1241	b	fast_exception_return
12421:	/* Error case */
1243MMU_FTR_SECTION_ELSE
1244	/* Radix case, access is outside page table range */
1245	li	r3,-EFAULT
1246ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1247	std	r3,RESULT(r1)
1248	bl	save_nvgprs
1249	RECONCILE_IRQ_STATE(r10, r11)
1250	ld	r4,_DAR(r1)
1251	ld	r5,RESULT(r1)
1252	addi	r3,r1,STACK_FRAME_OVERHEAD
1253	bl	do_bad_slb_fault
1254	b	ret_from_except
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1255
1256EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1257	INT_HANDLER hardware_interrupt, 0x500, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
1258EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1259EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1260	INT_HANDLER hardware_interrupt, 0x500, virt=1, hsrr=EXC_HV_OR_STD, bitmask=IRQS_DISABLED, kvm=1
1261EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1262INT_KVM_HANDLER hardware_interrupt, 0x500, EXC_HV_OR_STD, PACA_EXGEN, 0
1263EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
 
 
 
 
 
 
 
1264
1265
 
 
 
 
 
 
 
 
 
 
 
 
 
1266EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1267	INT_HANDLER alignment, 0x600, dar=1, dsisr=1, kvm=1
1268EXC_REAL_END(alignment, 0x600, 0x100)
1269EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1270	INT_HANDLER alignment, 0x600, virt=1, dar=1, dsisr=1
1271EXC_VIRT_END(alignment, 0x4600, 0x100)
1272INT_KVM_HANDLER alignment, 0x600, EXC_STD, PACA_EXGEN, 0
1273EXC_COMMON_BEGIN(alignment_common)
1274	INT_COMMON 0x600, PACA_EXGEN, 1, 1, 1, 1, 1
1275	bl	save_nvgprs
1276	addi	r3,r1,STACK_FRAME_OVERHEAD
1277	bl	alignment_exception
1278	b	ret_from_except
 
 
 
1279
1280
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1282	INT_HANDLER program_check, 0x700, kvm=1
1283EXC_REAL_END(program_check, 0x700, 0x100)
1284EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1285	INT_HANDLER program_check, 0x700, virt=1
1286EXC_VIRT_END(program_check, 0x4700, 0x100)
1287INT_KVM_HANDLER program_check, 0x700, EXC_STD, PACA_EXGEN, 0
1288EXC_COMMON_BEGIN(program_check_common)
 
 
1289	/*
1290	 * It's possible to receive a TM Bad Thing type program check with
1291	 * userspace register values (in particular r1), but with SRR1 reporting
1292	 * that we came from the kernel. Normally that would confuse the bad
1293	 * stack logic, and we would report a bad kernel stack pointer. Instead
1294	 * we switch to the emergency stack if we're taking a TM Bad Thing from
1295	 * the kernel.
1296	 */
1297
1298	andi.	r10,r12,MSR_PR
1299	bne	2f			/* If userspace, go normal path */
1300
1301	andis.	r10,r12,(SRR1_PROGTM)@h
1302	bne	1f			/* If TM, emergency		*/
1303
1304	cmpdi	r1,-INT_FRAME_SIZE	/* check if r1 is in userspace	*/
1305	blt	2f			/* normal path if not		*/
1306
1307	/* Use the emergency stack					*/
13081:	andi.	r10,r12,MSR_PR		/* Set CR0 correctly for label	*/
1309					/* 3 in EXCEPTION_PROLOG_COMMON	*/
1310	mr	r10,r1			/* Save r1			*/
1311	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack		*/
1312	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1313	INT_COMMON 0x700, PACA_EXGEN, 0, 1, 1, 0, 0
 
1314	b 3f
13152:
1316	INT_COMMON 0x700, PACA_EXGEN, 1, 1, 1, 0, 0
 
13173:
1318	bl	save_nvgprs
1319	addi	r3,r1,STACK_FRAME_OVERHEAD
1320	bl	program_check_exception
1321	b	ret_from_except
 
 
 
 
1322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1323
1324EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1325	INT_HANDLER fp_unavailable, 0x800, kvm=1
1326EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1327EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1328	INT_HANDLER fp_unavailable, 0x800, virt=1
1329EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
1330INT_KVM_HANDLER fp_unavailable, 0x800, EXC_STD, PACA_EXGEN, 0
1331EXC_COMMON_BEGIN(fp_unavailable_common)
1332	INT_COMMON 0x800, PACA_EXGEN, 1, 1, 0, 0, 0
1333	bne	1f			/* if from user, just load it up */
1334	bl	save_nvgprs
1335	RECONCILE_IRQ_STATE(r10, r11)
1336	addi	r3,r1,STACK_FRAME_OVERHEAD
1337	bl	kernel_fp_unavailable_exception
13380:	trap
1339	EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
13401:
1341#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1342BEGIN_FTR_SECTION
1343	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1344	 * transaction), go do TM stuff
1345	 */
1346	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1347	bne-	2f
1348END_FTR_SECTION_IFSET(CPU_FTR_TM)
1349#endif
1350	bl	load_up_fpu
1351	b	fast_exception_return
1352#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
13532:	/* User process was in a transaction */
1354	bl	save_nvgprs
1355	RECONCILE_IRQ_STATE(r10, r11)
1356	addi	r3,r1,STACK_FRAME_OVERHEAD
1357	bl	fp_unavailable_tm
1358	b	ret_from_except
1359#endif
1360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361
1362EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1363	INT_HANDLER decrementer, 0x900, ool=1, bitmask=IRQS_DISABLED, kvm=1
1364EXC_REAL_END(decrementer, 0x900, 0x80)
1365EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1366	INT_HANDLER decrementer, 0x900, virt=1, bitmask=IRQS_DISABLED
1367EXC_VIRT_END(decrementer, 0x4900, 0x80)
1368INT_KVM_HANDLER decrementer, 0x900, EXC_STD, PACA_EXGEN, 0
1369EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
 
 
 
 
 
1370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1371
1372EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1373	INT_HANDLER hdecrementer, 0x980, hsrr=EXC_HV, kvm=1
1374EXC_REAL_END(hdecrementer, 0x980, 0x80)
1375EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1376	INT_HANDLER hdecrementer, 0x980, virt=1, hsrr=EXC_HV, kvm=1
1377EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1378INT_KVM_HANDLER hdecrementer, 0x980, EXC_HV, PACA_EXGEN, 0
1379EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1381
1382EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1383	INT_HANDLER doorbell_super, 0xa00, bitmask=IRQS_DISABLED, kvm=1
1384EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1385EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1386	INT_HANDLER doorbell_super, 0xa00, virt=1, bitmask=IRQS_DISABLED
1387EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1388INT_KVM_HANDLER doorbell_super, 0xa00, EXC_STD, PACA_EXGEN, 0
 
 
 
 
1389#ifdef CONFIG_PPC_DOORBELL
1390EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
1391#else
1392EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
1393#endif
 
 
 
1394
1395
1396EXC_REAL_NONE(0xb00, 0x100)
1397EXC_VIRT_NONE(0x4b00, 0x100)
1398
1399/*
1400 * system call / hypercall (0xc00, 0x4c00)
1401 *
1402 * The system call exception is invoked with "sc 0" and does not alter HV bit.
1403 *
1404 * The hypercall is invoked with "sc 1" and sets HV=1.
1405 *
1406 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1407 * 0x4c00 virtual mode.
1408 *
 
 
 
 
1409 * Call convention:
1410 *
1411 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
1412 *
1413 * For hypercalls, the register convention is as follows:
1414 * r0 volatile
1415 * r1-2 nonvolatile
1416 * r3 volatile parameter and return value for status
1417 * r4-r10 volatile input and output value
1418 * r11 volatile hypercall number and output value
1419 * r12 volatile input and output value
1420 * r13-r31 nonvolatile
1421 * LR nonvolatile
1422 * CTR volatile
1423 * XER volatile
1424 * CR0-1 CR5-7 volatile
1425 * CR2-4 nonvolatile
1426 * Other registers nonvolatile
1427 *
1428 * The intersection of volatile registers that don't contain possible
1429 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1430 * without saving, though xer is not a good idea to use, as hardware may
1431 * interpret some bits so it may be costly to change them.
1432 */
 
 
 
 
 
 
1433.macro SYSTEM_CALL virt
1434#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1435	/*
1436	 * There is a little bit of juggling to get syscall and hcall
1437	 * working well. Save r13 in ctr to avoid using SPRG scratch
1438	 * register.
1439	 *
1440	 * Userspace syscalls have already saved the PPR, hcalls must save
1441	 * it before setting HMT_MEDIUM.
1442	 */
1443	mtctr	r13
1444	GET_PACA(r13)
1445	std	r10,PACA_EXGEN+EX_R10(r13)
1446	INTERRUPT_TO_KERNEL
1447	KVMTEST system_call EXC_STD 0xc00 /* uses r10, branch to system_call_kvm */
1448	mfctr	r9
1449#else
1450	mr	r9,r13
1451	GET_PACA(r13)
1452	INTERRUPT_TO_KERNEL
1453#endif
1454
1455#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1456BEGIN_FTR_SECTION
1457	cmpdi	r0,0x1ebe
1458	beq-	1f
1459END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1460#endif
1461
1462	/* We reach here with PACA in r13, r13 in r9. */
1463	mfspr	r11,SPRN_SRR0
1464	mfspr	r12,SPRN_SRR1
1465
1466	HMT_MEDIUM
1467
1468	.if ! \virt
1469	__LOAD_HANDLER(r10, system_call_common)
1470	mtspr	SPRN_SRR0,r10
1471	ld	r10,PACAKMSR(r13)
1472	mtspr	SPRN_SRR1,r10
1473	RFI_TO_KERNEL
1474	b	.	/* prevent speculative execution */
1475	.else
1476	li	r10,MSR_RI
1477	mtmsrd 	r10,1			/* Set RI (EE=0) */
1478#ifdef CONFIG_RELOCATABLE
1479	__LOAD_HANDLER(r10, system_call_common)
1480	mtctr	r10
1481	bctr
1482#else
1483	b	system_call_common
1484#endif
1485	.endif
1486
1487#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1488	/* Fast LE/BE switch system call */
14891:	mfspr	r12,SPRN_SRR1
1490	xori	r12,r12,MSR_LE
1491	mtspr	SPRN_SRR1,r12
1492	mr	r13,r9
1493	RFI_TO_USER	/* return to userspace */
1494	b	.	/* prevent speculative execution */
1495#endif
1496.endm
1497
1498EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1499	SYSTEM_CALL 0
1500EXC_REAL_END(system_call, 0xc00, 0x100)
1501EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1502	SYSTEM_CALL 1
1503EXC_VIRT_END(system_call, 0x4c00, 0x100)
1504
1505#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 
1506	/*
1507	 * This is a hcall, so register convention is as above, with these
1508	 * differences:
1509	 * r13 = PACA
1510	 * ctr = orig r13
1511	 * orig r10 saved in PACA
1512	 */
1513TRAMP_KVM_BEGIN(system_call_kvm)
1514	 /*
1515	  * Save the PPR (on systems that support it) before changing to
1516	  * HMT_MEDIUM. That allows the KVM code to save that value into the
1517	  * guest state (it is the guest's PPR value).
1518	  */
1519	OPT_GET_SPR(r10, SPRN_PPR, CPU_FTR_HAS_PPR)
 
 
 
1520	HMT_MEDIUM
1521	OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r10, CPU_FTR_HAS_PPR)
1522	mfctr	r10
1523	SET_SCRATCH0(r10)
1524	std	r9,PACA_EXGEN+EX_R9(r13)
1525	mfcr	r9
1526	KVM_HANDLER 0xc00, EXC_STD, PACA_EXGEN, 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1527#endif
1528
1529
 
 
 
 
 
 
 
 
 
 
 
 
1530EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
1531	INT_HANDLER single_step, 0xd00, kvm=1
1532EXC_REAL_END(single_step, 0xd00, 0x100)
1533EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
1534	INT_HANDLER single_step, 0xd00, virt=1
1535EXC_VIRT_END(single_step, 0x4d00, 0x100)
1536INT_KVM_HANDLER single_step, 0xd00, EXC_STD, PACA_EXGEN, 0
1537EXC_COMMON(single_step_common, 0xd00, single_step_exception)
 
 
 
 
 
1538
1539
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1540EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
1541	INT_HANDLER h_data_storage, 0xe00, ool=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
1542EXC_REAL_END(h_data_storage, 0xe00, 0x20)
1543EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
1544	INT_HANDLER h_data_storage, 0xe00, ool=1, virt=1, hsrr=EXC_HV, dar=1, dsisr=1, kvm=1
1545EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
1546INT_KVM_HANDLER h_data_storage, 0xe00, EXC_HV, PACA_EXGEN, 1
1547EXC_COMMON_BEGIN(h_data_storage_common)
1548	INT_COMMON 0xe00, PACA_EXGEN, 1, 1, 1, 1, 1
1549	bl      save_nvgprs
1550	addi    r3,r1,STACK_FRAME_OVERHEAD
1551BEGIN_MMU_FTR_SECTION
1552	ld	r4,_DAR(r1)
1553	li	r5,SIGSEGV
1554	bl      bad_page_fault
1555MMU_FTR_SECTION_ELSE
1556	bl      unknown_exception
1557ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
1558	b       ret_from_except
 
 
1559
1560
 
 
 
 
 
 
 
 
 
 
 
 
1561EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
1562	INT_HANDLER h_instr_storage, 0xe20, ool=1, hsrr=EXC_HV, kvm=1
1563EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
1564EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
1565	INT_HANDLER h_instr_storage, 0xe20, ool=1, virt=1, hsrr=EXC_HV, kvm=1
1566EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
1567INT_KVM_HANDLER h_instr_storage, 0xe20, EXC_HV, PACA_EXGEN, 0
1568EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
 
 
 
 
 
1569
1570
 
 
 
 
 
 
 
 
 
 
1571EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
1572	INT_HANDLER emulation_assist, 0xe40, ool=1, hsrr=EXC_HV, kvm=1
1573EXC_REAL_END(emulation_assist, 0xe40, 0x20)
1574EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
1575	INT_HANDLER emulation_assist, 0xe40, ool=1, virt=1, hsrr=EXC_HV, kvm=1
1576EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
1577INT_KVM_HANDLER emulation_assist, 0xe40, EXC_HV, PACA_EXGEN, 0
1578EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
1579
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1580
1581/*
1582 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
1583 * first, and then eventaully from there to the trampoline to get into virtual
1584 * mode.
1585 */
1586EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
1587	INT_HANDLER hmi_exception, 0xe60, ool=1, early=1, hsrr=EXC_HV, ri=0, kvm=1
1588EXC_REAL_END(hmi_exception, 0xe60, 0x20)
1589EXC_VIRT_NONE(0x4e60, 0x20)
1590INT_KVM_HANDLER hmi_exception, 0xe60, EXC_HV, PACA_EXGEN, 0
1591EXC_COMMON_BEGIN(hmi_exception_early_common)
1592	mtctr	r10			/* Restore ctr */
1593	mfspr	r11,SPRN_HSRR0		/* Save HSRR0 */
1594	mfspr	r12,SPRN_HSRR1		/* Save HSRR1 */
1595	mr	r10,r1			/* Save r1 */
1596	ld	r1,PACAEMERGSP(r13)	/* Use emergency stack for realmode */
1597	subi	r1,r1,INT_FRAME_SIZE	/* alloc stack frame		*/
1598
1599	/* We don't touch AMR here, we never go to virtual mode */
1600	INT_COMMON 0xe60, PACA_EXGEN, 0, 0, 0, 0, 0
1601
1602	addi	r3,r1,STACK_FRAME_OVERHEAD
1603	bl	hmi_exception_realmode
1604	cmpdi	cr0,r3,0
1605	bne	1f
1606
1607	EXCEPTION_RESTORE_REGS EXC_HV
1608	HRFI_TO_USER_OR_KERNEL
1609
16101:
1611	/*
1612	 * Go to virtual mode and pull the HMI event information from
1613	 * firmware.
1614	 */
1615	EXCEPTION_RESTORE_REGS EXC_HV
1616	INT_HANDLER hmi_exception, 0xe60, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
 
 
1617
1618EXC_COMMON_BEGIN(hmi_exception_common)
1619	INT_COMMON 0xe60, PACA_EXGEN, 1, 1, 1, 0, 0
1620	FINISH_NAP
1621	RUNLATCH_ON
1622	bl	save_nvgprs
1623	addi	r3,r1,STACK_FRAME_OVERHEAD
1624	bl	handle_hmi_exception
1625	b	ret_from_except
 
 
 
1626
 
 
 
 
 
 
 
 
 
 
 
 
1627
1628EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
1629	INT_HANDLER h_doorbell, 0xe80, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
1630EXC_REAL_END(h_doorbell, 0xe80, 0x20)
1631EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
1632	INT_HANDLER h_doorbell, 0xe80, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
1633EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
1634INT_KVM_HANDLER h_doorbell, 0xe80, EXC_HV, PACA_EXGEN, 0
 
 
 
 
1635#ifdef CONFIG_PPC_DOORBELL
1636EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1637#else
1638EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1639#endif
 
 
 
1640
1641
 
 
 
 
 
 
 
 
 
 
 
 
 
1642EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
1643	INT_HANDLER h_virt_irq, 0xea0, ool=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
1644EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
1645EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
1646	INT_HANDLER h_virt_irq, 0xea0, ool=1, virt=1, hsrr=EXC_HV, bitmask=IRQS_DISABLED, kvm=1
1647EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
1648INT_KVM_HANDLER h_virt_irq, 0xea0, EXC_HV, PACA_EXGEN, 0
1649EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
 
 
 
 
 
 
 
1650
1651
1652EXC_REAL_NONE(0xec0, 0x20)
1653EXC_VIRT_NONE(0x4ec0, 0x20)
1654EXC_REAL_NONE(0xee0, 0x20)
1655EXC_VIRT_NONE(0x4ee0, 0x20)
1656
1657
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1658EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
1659	INT_HANDLER performance_monitor, 0xf00, ool=1, bitmask=IRQS_PMI_DISABLED, kvm=1
1660EXC_REAL_END(performance_monitor, 0xf00, 0x20)
1661EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
1662	INT_HANDLER performance_monitor, 0xf00, ool=1, virt=1, bitmask=IRQS_PMI_DISABLED
1663EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
1664INT_KVM_HANDLER performance_monitor, 0xf00, EXC_STD, PACA_EXGEN, 0
1665EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
 
 
 
 
 
 
 
 
1666
 
 
 
 
 
 
 
 
 
 
 
 
 
1667
1668EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
1669	INT_HANDLER altivec_unavailable, 0xf20, ool=1, kvm=1
1670EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
1671EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
1672	INT_HANDLER altivec_unavailable, 0xf20, ool=1, virt=1
1673EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
1674INT_KVM_HANDLER altivec_unavailable, 0xf20, EXC_STD, PACA_EXGEN, 0
1675EXC_COMMON_BEGIN(altivec_unavailable_common)
1676	INT_COMMON 0xf20, PACA_EXGEN, 1, 1, 0, 0, 0
1677#ifdef CONFIG_ALTIVEC
1678BEGIN_FTR_SECTION
1679	beq	1f
1680#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1681  BEGIN_FTR_SECTION_NESTED(69)
1682	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1683	 * transaction), go do TM stuff
1684	 */
1685	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1686	bne-	2f
1687  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1688#endif
1689	bl	load_up_altivec
1690	b	fast_exception_return
1691#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
16922:	/* User process was in a transaction */
1693	bl	save_nvgprs
1694	RECONCILE_IRQ_STATE(r10, r11)
1695	addi	r3,r1,STACK_FRAME_OVERHEAD
1696	bl	altivec_unavailable_tm
1697	b	ret_from_except
1698#endif
16991:
1700END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1701#endif
1702	bl	save_nvgprs
1703	RECONCILE_IRQ_STATE(r10, r11)
1704	addi	r3,r1,STACK_FRAME_OVERHEAD
1705	bl	altivec_unavailable_exception
1706	b	ret_from_except
 
 
1707
1708
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1709EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
1710	INT_HANDLER vsx_unavailable, 0xf40, ool=1, kvm=1
1711EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
1712EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
1713	INT_HANDLER vsx_unavailable, 0xf40, ool=1, virt=1
1714EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
1715INT_KVM_HANDLER vsx_unavailable, 0xf40, EXC_STD, PACA_EXGEN, 0
1716EXC_COMMON_BEGIN(vsx_unavailable_common)
1717	INT_COMMON 0xf40, PACA_EXGEN, 1, 1, 0, 0, 0
1718#ifdef CONFIG_VSX
1719BEGIN_FTR_SECTION
1720	beq	1f
1721#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1722  BEGIN_FTR_SECTION_NESTED(69)
1723	/* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1724	 * transaction), go do TM stuff
1725	 */
1726	rldicl.	r0, r12, (64-MSR_TS_LG), (64-2)
1727	bne-	2f
1728  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1729#endif
1730	b	load_up_vsx
1731#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17322:	/* User process was in a transaction */
1733	bl	save_nvgprs
1734	RECONCILE_IRQ_STATE(r10, r11)
1735	addi	r3,r1,STACK_FRAME_OVERHEAD
1736	bl	vsx_unavailable_tm
1737	b	ret_from_except
1738#endif
17391:
1740END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1741#endif
1742	bl	save_nvgprs
1743	RECONCILE_IRQ_STATE(r10, r11)
1744	addi	r3,r1,STACK_FRAME_OVERHEAD
1745	bl	vsx_unavailable_exception
1746	b	ret_from_except
 
 
 
1747
 
 
 
 
 
 
 
 
 
 
 
 
 
1748
1749EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
1750	INT_HANDLER facility_unavailable, 0xf60, ool=1, kvm=1
1751EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
1752EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
1753	INT_HANDLER facility_unavailable, 0xf60, ool=1, virt=1
1754EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
1755INT_KVM_HANDLER facility_unavailable, 0xf60, EXC_STD, PACA_EXGEN, 0
1756EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1757
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1758
1759EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
1760	INT_HANDLER h_facility_unavailable, 0xf80, ool=1, hsrr=EXC_HV, kvm=1
1761EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
1762EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
1763	INT_HANDLER h_facility_unavailable, 0xf80, ool=1, virt=1, hsrr=EXC_HV, kvm=1
1764EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
1765INT_KVM_HANDLER h_facility_unavailable, 0xf80, EXC_HV, PACA_EXGEN, 0
1766EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
 
 
 
 
 
 
1767
1768
1769EXC_REAL_NONE(0xfa0, 0x20)
1770EXC_VIRT_NONE(0x4fa0, 0x20)
1771EXC_REAL_NONE(0xfc0, 0x20)
1772EXC_VIRT_NONE(0x4fc0, 0x20)
1773EXC_REAL_NONE(0xfe0, 0x20)
1774EXC_VIRT_NONE(0x4fe0, 0x20)
1775
1776EXC_REAL_NONE(0x1000, 0x100)
1777EXC_VIRT_NONE(0x5000, 0x100)
1778EXC_REAL_NONE(0x1100, 0x100)
1779EXC_VIRT_NONE(0x5100, 0x100)
1780
1781#ifdef CONFIG_CBE_RAS
 
 
 
 
 
 
 
1782EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
1783	INT_HANDLER cbe_system_error, 0x1200, ool=1, hsrr=EXC_HV, kvm=1
1784EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
1785EXC_VIRT_NONE(0x5200, 0x100)
1786INT_KVM_HANDLER cbe_system_error, 0x1200, EXC_HV, PACA_EXGEN, 1
1787EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
 
 
 
 
 
 
1788#else /* CONFIG_CBE_RAS */
1789EXC_REAL_NONE(0x1200, 0x100)
1790EXC_VIRT_NONE(0x5200, 0x100)
1791#endif
1792
1793
 
 
 
 
 
 
 
 
1794EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
1795	INT_HANDLER instruction_breakpoint, 0x1300, kvm=1
1796EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
1797EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
1798	INT_HANDLER instruction_breakpoint, 0x1300, virt=1
1799EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
1800INT_KVM_HANDLER instruction_breakpoint, 0x1300, EXC_STD, PACA_EXGEN, 1
1801EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
 
 
 
 
 
1802
1803
1804EXC_REAL_NONE(0x1400, 0x100)
1805EXC_VIRT_NONE(0x5400, 0x100)
1806
1807EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
1808	INT_HANDLER denorm_exception_hv, 0x1500, early=2, hsrr=EXC_HV
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1809#ifdef CONFIG_PPC_DENORMALISATION
1810	mfspr	r10,SPRN_HSRR1
1811	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
1812	bne+	denorm_assist
1813#endif
1814	KVMTEST denorm_exception_hv, EXC_HV 0x1500
1815	INT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV, 1
1816EXC_REAL_END(denorm_exception_hv, 0x1500, 0x100)
1817
1818#ifdef CONFIG_PPC_DENORMALISATION
1819EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
1820	INT_HANDLER denorm_exception, 0x1500, 0, 2, 1, EXC_HV, PACA_EXGEN, 1, 0, 0, 0, 0
1821	mfspr	r10,SPRN_HSRR1
1822	andis.	r10,r10,(HSRR1_DENORM)@h /* denorm? */
1823	bne+	denorm_assist
1824	INT_VIRT_SAVE_SRR_AND_JUMP denorm_common, EXC_HV
1825EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
1826#else
1827EXC_VIRT_NONE(0x5500, 0x100)
1828#endif
1829
1830INT_KVM_HANDLER denorm_exception_hv, 0x1500, EXC_HV, PACA_EXGEN, 0
1831
1832#ifdef CONFIG_PPC_DENORMALISATION
1833TRAMP_REAL_BEGIN(denorm_assist)
1834BEGIN_FTR_SECTION
1835/*
1836 * To denormalise we need to move a copy of the register to itself.
1837 * For POWER6 do that here for all FP regs.
1838 */
1839	mfmsr	r10
1840	ori	r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
1841	xori	r10,r10,(MSR_FE0|MSR_FE1)
1842	mtmsrd	r10
1843	sync
1844
1845	.Lreg=0
1846	.rept 32
1847	fmr	.Lreg,.Lreg
1848	.Lreg=.Lreg+1
1849	.endr
1850
1851FTR_SECTION_ELSE
1852/*
1853 * To denormalise we need to move a copy of the register to itself.
1854 * For POWER7 do that here for the first 32 VSX registers only.
1855 */
1856	mfmsr	r10
1857	oris	r10,r10,MSR_VSX@h
1858	mtmsrd	r10
1859	sync
1860
1861	.Lreg=0
1862	.rept 32
1863	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
1864	.Lreg=.Lreg+1
1865	.endr
1866
1867ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
1868
1869BEGIN_FTR_SECTION
1870	b	denorm_done
1871END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1872/*
1873 * To denormalise we need to move a copy of the register to itself.
1874 * For POWER8 we need to do that for all 64 VSX registers
1875 */
1876	.Lreg=32
1877	.rept 32
1878	XVCPSGNDP(.Lreg,.Lreg,.Lreg)
1879	.Lreg=.Lreg+1
1880	.endr
1881
1882denorm_done:
1883	mfspr	r11,SPRN_HSRR0
1884	subi	r11,r11,4
1885	mtspr	SPRN_HSRR0,r11
1886	mtcrf	0x80,r9
1887	ld	r9,PACA_EXGEN+EX_R9(r13)
1888	RESTORE_PPR_PACA(PACA_EXGEN, r10)
 
 
 
1889BEGIN_FTR_SECTION
1890	ld	r10,PACA_EXGEN+EX_CFAR(r13)
1891	mtspr	SPRN_CFAR,r10
1892END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1893	ld	r10,PACA_EXGEN+EX_R10(r13)
1894	ld	r11,PACA_EXGEN+EX_R11(r13)
1895	ld	r12,PACA_EXGEN+EX_R12(r13)
1896	ld	r13,PACA_EXGEN+EX_R13(r13)
1897	HRFI_TO_UNKNOWN
1898	b	.
1899#endif
1900
1901EXC_COMMON(denorm_common, 0x1500, unknown_exception)
 
 
 
 
 
 
1902
1903
1904#ifdef CONFIG_CBE_RAS
 
 
 
 
 
 
 
1905EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
1906	INT_HANDLER cbe_maintenance, 0x1600, ool=1, hsrr=EXC_HV, kvm=1
1907EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
1908EXC_VIRT_NONE(0x5600, 0x100)
1909INT_KVM_HANDLER cbe_maintenance, 0x1600, EXC_HV, PACA_EXGEN, 1
1910EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
 
 
 
 
 
 
1911#else /* CONFIG_CBE_RAS */
1912EXC_REAL_NONE(0x1600, 0x100)
1913EXC_VIRT_NONE(0x5600, 0x100)
1914#endif
1915
1916
 
 
 
 
 
 
 
1917EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
1918	INT_HANDLER altivec_assist, 0x1700, kvm=1
1919EXC_REAL_END(altivec_assist, 0x1700, 0x100)
1920EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
1921	INT_HANDLER altivec_assist, 0x1700, virt=1
1922EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
1923INT_KVM_HANDLER altivec_assist, 0x1700, EXC_STD, PACA_EXGEN, 0
 
 
1924#ifdef CONFIG_ALTIVEC
1925EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
 
1926#else
1927EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
1928#endif
 
 
 
1929
1930
1931#ifdef CONFIG_CBE_RAS
 
 
 
 
 
 
 
1932EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
1933	INT_HANDLER cbe_thermal, 0x1800, ool=1, hsrr=EXC_HV, kvm=1
1934EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
1935EXC_VIRT_NONE(0x5800, 0x100)
1936INT_KVM_HANDLER cbe_thermal, 0x1800, EXC_HV, PACA_EXGEN, 1
1937EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
 
 
 
 
 
 
1938#else /* CONFIG_CBE_RAS */
1939EXC_REAL_NONE(0x1800, 0x100)
1940EXC_VIRT_NONE(0x5800, 0x100)
1941#endif
1942
1943
1944#ifdef CONFIG_PPC_WATCHDOG
1945
1946#define MASKED_DEC_HANDLER_LABEL 3f
1947
1948#define MASKED_DEC_HANDLER(_H)				\
19493: /* soft-nmi */					\
1950	std	r12,PACA_EXGEN+EX_R12(r13);		\
1951	GET_SCRATCH0(r10);				\
1952	std	r10,PACA_EXGEN+EX_R13(r13);		\
1953	INT_SAVE_SRR_AND_JUMP soft_nmi_common, _H, 1
1954
1955/*
1956 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
1957 * stack is one that is usable by maskable interrupts so long as MSR_EE
1958 * remains off. It is used for recovery when something has corrupted the
1959 * normal kernel stack, for example. The "soft NMI" must not use the process
1960 * stack because we want irq disabled sections to avoid touching the stack
1961 * at all (other than PMU interrupts), so use the emergency stack for this,
1962 * and run it entirely with interrupts hard disabled.
1963 */
1964EXC_COMMON_BEGIN(soft_nmi_common)
 
1965	mr	r10,r1
1966	ld	r1,PACAEMERGSP(r13)
1967	subi	r1,r1,INT_FRAME_SIZE
1968	INT_COMMON 0x900, PACA_EXGEN, 0, 1, 1, 0, 0
1969	bl	save_nvgprs
 
 
 
 
 
 
 
 
 
 
 
1970	addi	r3,r1,STACK_FRAME_OVERHEAD
1971	bl	soft_nmi_interrupt
1972	b	ret_from_except
1973
1974#else /* CONFIG_PPC_WATCHDOG */
1975#define MASKED_DEC_HANDLER_LABEL 2f /* normal return */
1976#define MASKED_DEC_HANDLER(_H)
 
 
 
 
 
 
 
 
 
 
 
 
 
1977#endif /* CONFIG_PPC_WATCHDOG */
1978
1979/*
1980 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
1981 * - If it was a decrementer interrupt, we bump the dec to max and and return.
1982 * - If it was a doorbell we return immediately since doorbells are edge
1983 *   triggered and won't automatically refire.
1984 * - If it was a HMI we return immediately since we handled it in realmode
1985 *   and it won't refire.
1986 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
1987 * This is called with r10 containing the value to OR to the paca field.
1988 */
1989.macro MASKED_INTERRUPT hsrr
1990	.if \hsrr
1991masked_Hinterrupt:
1992	.else
1993masked_interrupt:
1994	.endif
1995	std	r11,PACA_EXGEN+EX_R11(r13)
1996	lbz	r11,PACAIRQHAPPENED(r13)
1997	or	r11,r11,r10
1998	stb	r11,PACAIRQHAPPENED(r13)
1999	cmpwi	r10,PACA_IRQ_DEC
2000	bne	1f
2001	lis	r10,0x7fff
2002	ori	r10,r10,0xffff
2003	mtspr	SPRN_DEC,r10
2004	b	MASKED_DEC_HANDLER_LABEL
 
 
 
 
20051:	andi.	r10,r10,PACA_IRQ_MUST_HARD_MASK
2006	beq	2f
 
2007	.if \hsrr
2008	mfspr	r10,SPRN_HSRR1
2009	xori	r10,r10,MSR_EE	/* clear MSR_EE */
2010	mtspr	SPRN_HSRR1,r10
2011	.else
2012	mfspr	r10,SPRN_SRR1
2013	xori	r10,r10,MSR_EE	/* clear MSR_EE */
2014	mtspr	SPRN_SRR1,r10
2015	.endif
2016	ori	r11,r11,PACA_IRQ_HARD_DIS
2017	stb	r11,PACAIRQHAPPENED(r13)
20182:	/* done */
 
 
2019	mtcrf	0x80,r9
2020	std	r1,PACAR1(r13)
2021	ld	r9,PACA_EXGEN+EX_R9(r13)
2022	ld	r10,PACA_EXGEN+EX_R10(r13)
2023	ld	r11,PACA_EXGEN+EX_R11(r13)
2024	/* returns to kernel where r13 must be set up, so don't restore it */
 
 
2025	.if \hsrr
2026	HRFI_TO_KERNEL
2027	.else
2028	RFI_TO_KERNEL
2029	.endif
2030	b	.
2031	MASKED_DEC_HANDLER(\hsrr\())
2032.endm
2033
2034TRAMP_REAL_BEGIN(stf_barrier_fallback)
2035	std	r9,PACA_EXRFI+EX_R9(r13)
2036	std	r10,PACA_EXRFI+EX_R10(r13)
2037	sync
2038	ld	r9,PACA_EXRFI+EX_R9(r13)
2039	ld	r10,PACA_EXRFI+EX_R10(r13)
2040	ori	31,31,0
2041	.rept 14
2042	b	1f
20431:
2044	.endr
2045	blr
2046
2047TRAMP_REAL_BEGIN(rfi_flush_fallback)
2048	SET_SCRATCH0(r13);
2049	GET_PACA(r13);
2050	std	r1,PACA_EXRFI+EX_R12(r13)
2051	ld	r1,PACAKSAVE(r13)
2052	std	r9,PACA_EXRFI+EX_R9(r13)
2053	std	r10,PACA_EXRFI+EX_R10(r13)
2054	std	r11,PACA_EXRFI+EX_R11(r13)
2055	mfctr	r9
2056	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2057	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2058	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2059	mtctr	r11
2060	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2061
2062	/* order ld/st prior to dcbt stop all streams with flushing */
2063	sync
2064
2065	/*
2066	 * The load adresses are at staggered offsets within cachelines,
2067	 * which suits some pipelines better (on others it should not
2068	 * hurt).
2069	 */
20701:
2071	ld	r11,(0x80 + 8)*0(r10)
2072	ld	r11,(0x80 + 8)*1(r10)
2073	ld	r11,(0x80 + 8)*2(r10)
2074	ld	r11,(0x80 + 8)*3(r10)
2075	ld	r11,(0x80 + 8)*4(r10)
2076	ld	r11,(0x80 + 8)*5(r10)
2077	ld	r11,(0x80 + 8)*6(r10)
2078	ld	r11,(0x80 + 8)*7(r10)
2079	addi	r10,r10,0x80*8
2080	bdnz	1b
2081
2082	mtctr	r9
2083	ld	r9,PACA_EXRFI+EX_R9(r13)
2084	ld	r10,PACA_EXRFI+EX_R10(r13)
2085	ld	r11,PACA_EXRFI+EX_R11(r13)
2086	ld	r1,PACA_EXRFI+EX_R12(r13)
2087	GET_SCRATCH0(r13);
2088	rfid
2089
2090TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2091	SET_SCRATCH0(r13);
2092	GET_PACA(r13);
2093	std	r1,PACA_EXRFI+EX_R12(r13)
2094	ld	r1,PACAKSAVE(r13)
2095	std	r9,PACA_EXRFI+EX_R9(r13)
2096	std	r10,PACA_EXRFI+EX_R10(r13)
2097	std	r11,PACA_EXRFI+EX_R11(r13)
2098	mfctr	r9
2099	ld	r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2100	ld	r11,PACA_L1D_FLUSH_SIZE(r13)
2101	srdi	r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2102	mtctr	r11
2103	DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2104
2105	/* order ld/st prior to dcbt stop all streams with flushing */
2106	sync
2107
2108	/*
2109	 * The load adresses are at staggered offsets within cachelines,
2110	 * which suits some pipelines better (on others it should not
2111	 * hurt).
2112	 */
21131:
2114	ld	r11,(0x80 + 8)*0(r10)
2115	ld	r11,(0x80 + 8)*1(r10)
2116	ld	r11,(0x80 + 8)*2(r10)
2117	ld	r11,(0x80 + 8)*3(r10)
2118	ld	r11,(0x80 + 8)*4(r10)
2119	ld	r11,(0x80 + 8)*5(r10)
2120	ld	r11,(0x80 + 8)*6(r10)
2121	ld	r11,(0x80 + 8)*7(r10)
2122	addi	r10,r10,0x80*8
2123	bdnz	1b
2124
2125	mtctr	r9
2126	ld	r9,PACA_EXRFI+EX_R9(r13)
2127	ld	r10,PACA_EXRFI+EX_R10(r13)
2128	ld	r11,PACA_EXRFI+EX_R11(r13)
2129	ld	r1,PACA_EXRFI+EX_R12(r13)
2130	GET_SCRATCH0(r13);
2131	hrfid
2132
2133/*
2134 * Real mode exceptions actually use this too, but alternate
2135 * instruction code patches (which end up in the common .text area)
2136 * cannot reach these if they are put there.
2137 */
2138USE_FIXED_SECTION(virt_trampolines)
2139	MASKED_INTERRUPT EXC_STD
2140	MASKED_INTERRUPT EXC_HV
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2141
2142#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2143TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
2144	/*
2145	 * Here all GPRs are unchanged from when the interrupt happened
2146	 * except for r13, which is saved in SPRG_SCRATCH0.
2147	 */
2148	mfspr	r13, SPRN_SRR0
2149	addi	r13, r13, 4
2150	mtspr	SPRN_SRR0, r13
2151	GET_SCRATCH0(r13)
2152	RFI_TO_KERNEL
2153	b	.
2154
2155TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
2156	/*
2157	 * Here all GPRs are unchanged from when the interrupt happened
2158	 * except for r13, which is saved in SPRG_SCRATCH0.
2159	 */
2160	mfspr	r13, SPRN_HSRR0
2161	addi	r13, r13, 4
2162	mtspr	SPRN_HSRR0, r13
2163	GET_SCRATCH0(r13)
2164	HRFI_TO_KERNEL
2165	b	.
2166#endif
2167
2168/*
2169 * Ensure that any handlers that get invoked from the exception prologs
2170 * above are below the first 64KB (0x10000) of the kernel image because
2171 * the prologs assemble the addresses of these handlers using the
2172 * LOAD_HANDLER macro, which uses an ori instruction.
2173 */
2174
2175/*** Common interrupt handlers ***/
2176
2177
2178	/*
2179	 * Relocation-on interrupts: A subset of the interrupts can be delivered
2180	 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
2181	 * it.  Addresses are the same as the original interrupt addresses, but
2182	 * offset by 0xc000000000004000.
2183	 * It's impossible to receive interrupts below 0x300 via this mechanism.
2184	 * KVM: None of these traps are from the guest ; anything that escalated
2185	 * to HV=1 from HV=0 is delivered via real mode handlers.
2186	 */
2187
2188	/*
2189	 * This uses the standard macro, since the original 0x300 vector
2190	 * only has extra guff for STAB-based processors -- which never
2191	 * come here.
2192	 */
2193
2194EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
2195	b	__ppc64_runlatch_on
2196
2197USE_FIXED_SECTION(virt_trampolines)
2198	/*
 
 
 
 
2199	 * The __end_interrupts marker must be past the out-of-line (OOL)
2200	 * handlers, so that they are copied to real address 0x100 when running
2201	 * a relocatable kernel. This ensures they can be reached from the short
2202	 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
2203	 * directly, without using LOAD_HANDLER().
2204	 */
2205	.align	7
2206	.globl	__end_interrupts
2207__end_interrupts:
2208DEFINE_FIXED_SYMBOL(__end_interrupts)
2209
2210#ifdef CONFIG_PPC_970_NAP
 
 
 
 
 
 
2211EXC_COMMON_BEGIN(power4_fixup_nap)
2212	andc	r9,r9,r10
2213	std	r9,TI_LOCAL_FLAGS(r11)
2214	ld	r10,_LINK(r1)		/* make idle task do the */
2215	std	r10,_NIP(r1)		/* equivalent of a blr */
 
 
 
2216	blr
2217#endif
2218
2219CLOSE_FIXED_SECTION(real_vectors);
2220CLOSE_FIXED_SECTION(real_trampolines);
2221CLOSE_FIXED_SECTION(virt_vectors);
2222CLOSE_FIXED_SECTION(virt_trampolines);
2223
2224USE_TEXT_SECTION()
2225
2226/* MSR[RI] should be clear because this uses SRR[01] */
2227enable_machine_check:
2228	mflr	r0
2229	bcl	20,31,$+4
22300:	mflr	r3
2231	addi	r3,r3,(1f - 0b)
2232	mtspr	SPRN_SRR0,r3
2233	mfmsr	r3
2234	ori	r3,r3,MSR_ME
2235	mtspr	SPRN_SRR1,r3
2236	RFI_TO_KERNEL
22371:	mtlr	r0
2238	blr
2239
2240/* MSR[RI] should be clear because this uses SRR[01] */
2241disable_machine_check:
2242	mflr	r0
2243	bcl	20,31,$+4
22440:	mflr	r3
2245	addi	r3,r3,(1f - 0b)
2246	mtspr	SPRN_SRR0,r3
2247	mfmsr	r3
2248	li	r4,MSR_ME
2249	andc	r3,r3,r4
2250	mtspr	SPRN_SRR1,r3
2251	RFI_TO_KERNEL
22521:	mtlr	r0
2253	blr
2254
2255/*
2256 * Hash table stuff
2257 */
2258	.balign	IFETCH_ALIGN_BYTES
2259do_hash_page:
2260#ifdef CONFIG_PPC_BOOK3S_64
2261	lis	r0,(DSISR_BAD_FAULT_64S | DSISR_DABRMATCH | DSISR_KEYFAULT)@h
2262	ori	r0,r0,DSISR_BAD_FAULT_64S@l
2263	and.	r0,r5,r0		/* weird error? */
2264	bne-	handle_page_fault	/* if not, try to insert a HPTE */
 
 
 
 
 
 
 
 
2265	ld	r11, PACA_THREAD_INFO(r13)
2266	lwz	r0,TI_PREEMPT(r11)	/* If we're in an "NMI" */
2267	andis.	r0,r0,NMI_MASK@h	/* (i.e. an irq when soft-disabled) */
2268	bne	77f			/* then don't call hash_page now */
2269
2270	/*
2271	 * r3 contains the trap number
2272	 * r4 contains the faulting address
2273	 * r5 contains dsisr
2274	 * r6 msr
2275	 *
2276	 * at return r3 = 0 for success, 1 for page fault, negative for error
2277	 */
2278	bl	__hash_page		/* build HPTE if possible */
2279        cmpdi	r3,0			/* see if __hash_page succeeded */
2280
2281	/* Success */
2282	beq	fast_exc_return_irq	/* Return from exception on success */
2283
2284	/* Error */
2285	blt-	13f
2286
2287	/* Reload DAR/DSISR into r4/r5 for the DABR check below */
2288	ld	r4,_DAR(r1)
2289	ld      r5,_DSISR(r1)
2290#endif /* CONFIG_PPC_BOOK3S_64 */
2291
2292/* Here we have a page fault that hash_page can't handle. */
2293handle_page_fault:
229411:	andis.  r0,r5,DSISR_DABRMATCH@h
2295	bne-    handle_dabr_fault
2296	addi	r3,r1,STACK_FRAME_OVERHEAD
2297	bl	do_page_fault
2298	cmpdi	r3,0
2299	beq+	ret_from_except_lite
2300	bl	save_nvgprs
2301	mr	r5,r3
2302	addi	r3,r1,STACK_FRAME_OVERHEAD
2303	ld	r4,_DAR(r1)
2304	bl	bad_page_fault
2305	b	ret_from_except
2306
2307/* We have a data breakpoint exception - handle it */
2308handle_dabr_fault:
2309	bl	save_nvgprs
2310	ld      r4,_DAR(r1)
2311	ld      r5,_DSISR(r1)
2312	addi    r3,r1,STACK_FRAME_OVERHEAD
2313	bl      do_break
2314	/*
2315	 * do_break() may have changed the NV GPRS while handling a breakpoint.
2316	 * If so, we need to restore them with their updated values. Don't use
2317	 * ret_from_except_lite here.
2318	 */
2319	b       ret_from_except
 
2320
2321
2322#ifdef CONFIG_PPC_BOOK3S_64
2323/* We have a page fault that hash_page could handle but HV refused
2324 * the PTE insertion
2325 */
232613:	bl	save_nvgprs
2327	mr	r5,r3
2328	addi	r3,r1,STACK_FRAME_OVERHEAD
2329	ld	r4,_DAR(r1)
2330	bl	low_hash_fault
2331	b	ret_from_except
2332#endif
2333
2334/*
2335 * We come here as a result of a DSI at a point where we don't want
2336 * to call hash_page, such as when we are accessing memory (possibly
2337 * user memory) inside a PMU interrupt that occurred while interrupts
2338 * were soft-disabled.  We want to invoke the exception handler for
2339 * the access, or panic if there isn't a handler.
2340 */
234177:	bl	save_nvgprs
2342	addi	r3,r1,STACK_FRAME_OVERHEAD
2343	li	r5,SIGSEGV
2344	bl	bad_page_fault
2345	b	ret_from_except
2346
2347/*
2348 * When doorbell is triggered from system reset wakeup, the message is
2349 * not cleared, so it would fire again when EE is enabled.
2350 *
2351 * When coming from local_irq_enable, there may be the same problem if
2352 * we were hard disabled.
2353 *
2354 * Execute msgclr to clear pending exceptions before handling it.
2355 */
2356h_doorbell_common_msgclr:
2357	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2358	PPC_MSGCLR(3)
2359	b 	h_doorbell_common
2360
2361doorbell_super_common_msgclr:
2362	LOAD_REG_IMMEDIATE(r3, PPC_DBELL_MSGTYPE << (63-36))
2363	PPC_MSGCLRP(3)
2364	b 	doorbell_super_common
2365
2366/*
2367 * Called from arch_local_irq_enable when an interrupt needs
2368 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
2369 * which kind of interrupt. MSR:EE is already off. We generate a
2370 * stackframe like if a real interrupt had happened.
2371 *
2372 * Note: While MSR:EE is off, we need to make sure that _MSR
2373 * in the generated frame has EE set to 1 or the exception
2374 * handler will not properly re-enable them.
2375 *
2376 * Note that we don't specify LR as the NIP (return address) for
2377 * the interrupt because that would unbalance the return branch
2378 * predictor.
2379 */
2380_GLOBAL(__replay_interrupt)
2381	/* We are going to jump to the exception common code which
2382	 * will retrieve various register values from the PACA which
2383	 * we don't give a damn about, so we don't bother storing them.
2384	 */
2385	mfmsr	r12
2386	LOAD_REG_ADDR(r11, replay_interrupt_return)
2387	mfcr	r9
2388	ori	r12,r12,MSR_EE
2389	cmpwi	r3,0x900
2390	beq	decrementer_common
2391	cmpwi	r3,0x500
2392BEGIN_FTR_SECTION
2393	beq	h_virt_irq_common
2394FTR_SECTION_ELSE
2395	beq	hardware_interrupt_common
2396ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_300)
2397	cmpwi	r3,0xf00
2398	beq	performance_monitor_common
2399BEGIN_FTR_SECTION
2400	cmpwi	r3,0xa00
2401	beq	h_doorbell_common_msgclr
2402	cmpwi	r3,0xe60
2403	beq	hmi_exception_common
2404FTR_SECTION_ELSE
2405	cmpwi	r3,0xa00
2406	beq	doorbell_super_common_msgclr
2407ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
2408replay_interrupt_return:
2409	blr
2410
2411_ASM_NOKPROBE_SYMBOL(__replay_interrupt)