Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * This file contains the 64-bit "server" PowerPC variant
4 * of the low level exception handling including exception
5 * vectors, exception return, part of the slb and stab
6 * handling and other fixed offset specific things.
7 *
8 * This file is meant to be #included from head_64.S due to
9 * position dependent assembly.
10 *
11 * Most of this originates from head_64.S and thus has the same
12 * copyright history.
13 *
14 */
15
16#include <asm/hw_irq.h>
17#include <asm/exception-64s.h>
18#include <asm/ptrace.h>
19#include <asm/cpuidle.h>
20#include <asm/head-64.h>
21#include <asm/feature-fixups.h>
22#include <asm/kup.h>
23
24/*
25 * Following are fixed section helper macros.
26 *
27 * EXC_REAL_BEGIN/END - real, unrelocated exception vectors
28 * EXC_VIRT_BEGIN/END - virt (AIL), unrelocated exception vectors
29 * TRAMP_REAL_BEGIN - real, unrelocated helpers (virt may call these)
30 * TRAMP_VIRT_BEGIN - virt, unreloc helpers (in practice, real can use)
31 * EXC_COMMON - After switching to virtual, relocated mode.
32 */
33
34#define EXC_REAL_BEGIN(name, start, size) \
35 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
36
37#define EXC_REAL_END(name, start, size) \
38 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##name, start, size)
39
40#define EXC_VIRT_BEGIN(name, start, size) \
41 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
42
43#define EXC_VIRT_END(name, start, size) \
44 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##name, start, size)
45
46#define EXC_COMMON_BEGIN(name) \
47 USE_TEXT_SECTION(); \
48 .balign IFETCH_ALIGN_BYTES; \
49 .global name; \
50 _ASM_NOKPROBE_SYMBOL(name); \
51 DEFINE_FIXED_SYMBOL(name); \
52name:
53
54#define TRAMP_REAL_BEGIN(name) \
55 FIXED_SECTION_ENTRY_BEGIN(real_trampolines, name)
56
57#define TRAMP_VIRT_BEGIN(name) \
58 FIXED_SECTION_ENTRY_BEGIN(virt_trampolines, name)
59
60#define EXC_REAL_NONE(start, size) \
61 FIXED_SECTION_ENTRY_BEGIN_LOCATION(real_vectors, exc_real_##start##_##unused, start, size); \
62 FIXED_SECTION_ENTRY_END_LOCATION(real_vectors, exc_real_##start##_##unused, start, size)
63
64#define EXC_VIRT_NONE(start, size) \
65 FIXED_SECTION_ENTRY_BEGIN_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size); \
66 FIXED_SECTION_ENTRY_END_LOCATION(virt_vectors, exc_virt_##start##_##unused, start, size)
67
68/*
69 * We're short on space and time in the exception prolog, so we can't
70 * use the normal LOAD_REG_IMMEDIATE macro to load the address of label.
71 * Instead we get the base of the kernel from paca->kernelbase and or in the low
72 * part of label. This requires that the label be within 64KB of kernelbase, and
73 * that kernelbase be 64K aligned.
74 */
75#define LOAD_HANDLER(reg, label) \
76 ld reg,PACAKBASE(r13); /* get high part of &label */ \
77 ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
78
79#define __LOAD_HANDLER(reg, label) \
80 ld reg,PACAKBASE(r13); \
81 ori reg,reg,(ABS_ADDR(label))@l
82
83/*
84 * Branches from unrelocated code (e.g., interrupts) to labels outside
85 * head-y require >64K offsets.
86 */
87#define __LOAD_FAR_HANDLER(reg, label) \
88 ld reg,PACAKBASE(r13); \
89 ori reg,reg,(ABS_ADDR(label))@l; \
90 addis reg,reg,(ABS_ADDR(label))@h
91
92/*
93 * Branch to label using its 0xC000 address. This results in instruction
94 * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
95 * on using mtmsr rather than rfid.
96 *
97 * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
98 * load KBASE for a slight optimisation.
99 */
100#define BRANCH_TO_C000(reg, label) \
101 __LOAD_FAR_HANDLER(reg, label); \
102 mtctr reg; \
103 bctr
104
105/*
106 * Interrupt code generation macros
107 */
108#define IVEC .L_IVEC_\name\() /* Interrupt vector address */
109#define IHSRR .L_IHSRR_\name\() /* Sets SRR or HSRR registers */
110#define IHSRR_IF_HVMODE .L_IHSRR_IF_HVMODE_\name\() /* HSRR if HV else SRR */
111#define IAREA .L_IAREA_\name\() /* PACA save area */
112#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
113#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
114#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
115#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
116#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
117#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
118#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
119#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
120#define IKVM_REAL .L_IKVM_REAL_\name\() /* Real entry tests KVM */
121#define __IKVM_REAL(name) .L_IKVM_REAL_ ## name
122#define IKVM_VIRT .L_IKVM_VIRT_\name\() /* Virt entry tests KVM */
123#define ISTACK .L_ISTACK_\name\() /* Set regular kernel stack */
124#define __ISTACK(name) .L_ISTACK_ ## name
125#define IKUAP .L_IKUAP_\name\() /* Do KUAP lock */
126
127#define INT_DEFINE_BEGIN(n) \
128.macro int_define_ ## n name
129
130#define INT_DEFINE_END(n) \
131.endm ; \
132int_define_ ## n n ; \
133do_define_int n
134
135.macro do_define_int name
136 .ifndef IVEC
137 .error "IVEC not defined"
138 .endif
139 .ifndef IHSRR
140 IHSRR=0
141 .endif
142 .ifndef IHSRR_IF_HVMODE
143 IHSRR_IF_HVMODE=0
144 .endif
145 .ifndef IAREA
146 IAREA=PACA_EXGEN
147 .endif
148 .ifndef IVIRT
149 IVIRT=1
150 .endif
151 .ifndef IISIDE
152 IISIDE=0
153 .endif
154 .ifndef IDAR
155 IDAR=0
156 .endif
157 .ifndef IDSISR
158 IDSISR=0
159 .endif
160 .ifndef ISET_RI
161 ISET_RI=1
162 .endif
163 .ifndef IBRANCH_TO_COMMON
164 IBRANCH_TO_COMMON=1
165 .endif
166 .ifndef IREALMODE_COMMON
167 IREALMODE_COMMON=0
168 .else
169 .if ! IBRANCH_TO_COMMON
170 .error "IREALMODE_COMMON=1 but IBRANCH_TO_COMMON=0"
171 .endif
172 .endif
173 .ifndef IMASK
174 IMASK=0
175 .endif
176 .ifndef IKVM_REAL
177 IKVM_REAL=0
178 .endif
179 .ifndef IKVM_VIRT
180 IKVM_VIRT=0
181 .endif
182 .ifndef ISTACK
183 ISTACK=1
184 .endif
185 .ifndef IKUAP
186 IKUAP=1
187 .endif
188.endm
189
190/*
191 * All interrupts which set HSRR registers, as well as SRESET and MCE and
192 * syscall when invoked with "sc 1" switch to MSR[HV]=1 (HVMODE) to be taken,
193 * so they all generally need to test whether they were taken in guest context.
194 *
195 * Note: SRESET and MCE may also be sent to the guest by the hypervisor, and be
196 * taken with MSR[HV]=0.
197 *
198 * Interrupts which set SRR registers (with the above exceptions) do not
199 * elevate to MSR[HV]=1 mode, though most can be taken when running with
200 * MSR[HV]=1 (e.g., bare metal kernel and userspace). So these interrupts do
201 * not need to test whether a guest is running because they get delivered to
202 * the guest directly, including nested HV KVM guests.
203 *
204 * The exception is PR KVM, where the guest runs with MSR[PR]=1 and the host
205 * runs with MSR[HV]=0, so the host takes all interrupts on behalf of the
206 * guest. PR KVM runs with LPCR[AIL]=0 which causes interrupts to always be
207 * delivered to the real-mode entry point, therefore such interrupts only test
208 * KVM in their real mode handlers, and only when PR KVM is possible.
209 *
210 * Interrupts that are taken in MSR[HV]=0 and escalate to MSR[HV]=1 are always
211 * delivered in real-mode when the MMU is in hash mode because the MMU
212 * registers are not set appropriately to translate host addresses. In nested
213 * radix mode these can be delivered in virt-mode as the host translations are
214 * used implicitly (see: effective LPID, effective PID).
215 */
216
217/*
218 * If an interrupt is taken while a guest is running, it is immediately routed
219 * to KVM to handle.
220 */
221
222.macro KVMTEST name handler
223#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
224 lbz r10,HSTATE_IN_GUEST(r13)
225 cmpwi r10,0
226 /* HSRR variants have the 0x2 bit added to their trap number */
227 .if IHSRR_IF_HVMODE
228 BEGIN_FTR_SECTION
229 li r10,(IVEC + 0x2)
230 FTR_SECTION_ELSE
231 li r10,(IVEC)
232 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
233 .elseif IHSRR
234 li r10,(IVEC + 0x2)
235 .else
236 li r10,(IVEC)
237 .endif
238 bne \handler
239#endif
240.endm
241
242/*
243 * This is the BOOK3S interrupt entry code macro.
244 *
245 * This can result in one of several things happening:
246 * - Branch to the _common handler, relocated, in virtual mode.
247 * These are normal interrupts (synchronous and asynchronous) handled by
248 * the kernel.
249 * - Branch to KVM, relocated but real mode interrupts remain in real mode.
250 * These occur when HSTATE_IN_GUEST is set. The interrupt may be caused by
251 * / intended for host or guest kernel, but KVM must always be involved
252 * because the machine state is set for guest execution.
253 * - Branch to the masked handler, unrelocated.
254 * These occur when maskable asynchronous interrupts are taken with the
255 * irq_soft_mask set.
256 * - Branch to an "early" handler in real mode but relocated.
257 * This is done if early=1. MCE and HMI use these to handle errors in real
258 * mode.
259 * - Fall through and continue executing in real, unrelocated mode.
260 * This is done if early=2.
261 */
262
263.macro GEN_BRANCH_TO_COMMON name, virt
264 .if IREALMODE_COMMON
265 LOAD_HANDLER(r10, \name\()_common)
266 mtctr r10
267 bctr
268 .else
269 .if \virt
270#ifndef CONFIG_RELOCATABLE
271 b \name\()_common_virt
272#else
273 LOAD_HANDLER(r10, \name\()_common_virt)
274 mtctr r10
275 bctr
276#endif
277 .else
278 LOAD_HANDLER(r10, \name\()_common_real)
279 mtctr r10
280 bctr
281 .endif
282 .endif
283.endm
284
285.macro GEN_INT_ENTRY name, virt, ool=0
286 SET_SCRATCH0(r13) /* save r13 */
287 GET_PACA(r13)
288 std r9,IAREA+EX_R9(r13) /* save r9 */
289BEGIN_FTR_SECTION
290 mfspr r9,SPRN_PPR
291END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
292 HMT_MEDIUM
293 std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
294BEGIN_FTR_SECTION
295 mfspr r10,SPRN_CFAR
296END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
297 .if \ool
298 .if !\virt
299 b tramp_real_\name
300 .pushsection .text
301 TRAMP_REAL_BEGIN(tramp_real_\name)
302 .else
303 b tramp_virt_\name
304 .pushsection .text
305 TRAMP_VIRT_BEGIN(tramp_virt_\name)
306 .endif
307 .endif
308
309BEGIN_FTR_SECTION
310 std r9,IAREA+EX_PPR(r13)
311END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
312BEGIN_FTR_SECTION
313 std r10,IAREA+EX_CFAR(r13)
314END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
315 INTERRUPT_TO_KERNEL
316 mfctr r10
317 std r10,IAREA+EX_CTR(r13)
318 mfcr r9
319 std r11,IAREA+EX_R11(r13)
320 std r12,IAREA+EX_R12(r13)
321
322 /*
323 * DAR/DSISR, SCRATCH0 must be read before setting MSR[RI],
324 * because a d-side MCE will clobber those registers so is
325 * not recoverable if they are live.
326 */
327 GET_SCRATCH0(r10)
328 std r10,IAREA+EX_R13(r13)
329 .if IDAR && !IISIDE
330 .if IHSRR
331 mfspr r10,SPRN_HDAR
332 .else
333 mfspr r10,SPRN_DAR
334 .endif
335 std r10,IAREA+EX_DAR(r13)
336 .endif
337 .if IDSISR && !IISIDE
338 .if IHSRR
339 mfspr r10,SPRN_HDSISR
340 .else
341 mfspr r10,SPRN_DSISR
342 .endif
343 stw r10,IAREA+EX_DSISR(r13)
344 .endif
345
346 .if IHSRR_IF_HVMODE
347 BEGIN_FTR_SECTION
348 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
349 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
350 FTR_SECTION_ELSE
351 mfspr r11,SPRN_SRR0 /* save SRR0 */
352 mfspr r12,SPRN_SRR1 /* and SRR1 */
353 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
354 .elseif IHSRR
355 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
356 mfspr r12,SPRN_HSRR1 /* and HSRR1 */
357 .else
358 mfspr r11,SPRN_SRR0 /* save SRR0 */
359 mfspr r12,SPRN_SRR1 /* and SRR1 */
360 .endif
361
362 .if IBRANCH_TO_COMMON
363 GEN_BRANCH_TO_COMMON \name \virt
364 .endif
365
366 .if \ool
367 .popsection
368 .endif
369.endm
370
371/*
372 * __GEN_COMMON_ENTRY is required to receive the branch from interrupt
373 * entry, except in the case of the real-mode handlers which require
374 * __GEN_REALMODE_COMMON_ENTRY.
375 *
376 * This switches to virtual mode and sets MSR[RI].
377 */
378.macro __GEN_COMMON_ENTRY name
379DEFINE_FIXED_SYMBOL(\name\()_common_real)
380\name\()_common_real:
381 .if IKVM_REAL
382 KVMTEST \name kvm_interrupt
383 .endif
384
385 ld r10,PACAKMSR(r13) /* get MSR value for kernel */
386 /* MSR[RI] is clear iff using SRR regs */
387 .if IHSRR_IF_HVMODE
388 BEGIN_FTR_SECTION
389 xori r10,r10,MSR_RI
390 END_FTR_SECTION_IFCLR(CPU_FTR_HVMODE)
391 .elseif ! IHSRR
392 xori r10,r10,MSR_RI
393 .endif
394 mtmsrd r10
395
396 .if IVIRT
397 .if IKVM_VIRT
398 b 1f /* skip the virt test coming from real */
399 .endif
400
401 .balign IFETCH_ALIGN_BYTES
402DEFINE_FIXED_SYMBOL(\name\()_common_virt)
403\name\()_common_virt:
404 .if IKVM_VIRT
405 KVMTEST \name kvm_interrupt
4061:
407 .endif
408 .endif /* IVIRT */
409.endm
410
411/*
412 * Don't switch to virt mode. Used for early MCE and HMI handlers that
413 * want to run in real mode.
414 */
415.macro __GEN_REALMODE_COMMON_ENTRY name
416DEFINE_FIXED_SYMBOL(\name\()_common_real)
417\name\()_common_real:
418 .if IKVM_REAL
419 KVMTEST \name kvm_interrupt
420 .endif
421.endm
422
423.macro __GEN_COMMON_BODY name
424 .if IMASK
425 .if ! ISTACK
426 .error "No support for masked interrupt to use custom stack"
427 .endif
428
429 /* If coming from user, skip soft-mask tests. */
430 andi. r10,r12,MSR_PR
431 bne 3f
432
433 /*
434 * Kernel code running below __end_soft_masked may be
435 * implicitly soft-masked if it is within the regions
436 * in the soft mask table.
437 */
438 LOAD_HANDLER(r10, __end_soft_masked)
439 cmpld r11,r10
440 bge+ 1f
441
442 /* SEARCH_SOFT_MASK_TABLE clobbers r9,r10,r12 */
443 mtctr r12
444 stw r9,PACA_EXGEN+EX_CCR(r13)
445 SEARCH_SOFT_MASK_TABLE
446 cmpdi r12,0
447 mfctr r12 /* Restore r12 to SRR1 */
448 lwz r9,PACA_EXGEN+EX_CCR(r13)
449 beq 1f /* Not in soft-mask table */
450 li r10,IMASK
451 b 2f /* In soft-mask table, always mask */
452
453 /* Test the soft mask state against our interrupt's bit */
4541: lbz r10,PACAIRQSOFTMASK(r13)
4552: andi. r10,r10,IMASK
456 /* Associate vector numbers with bits in paca->irq_happened */
457 .if IVEC == 0x500 || IVEC == 0xea0
458 li r10,PACA_IRQ_EE
459 .elseif IVEC == 0x900
460 li r10,PACA_IRQ_DEC
461 .elseif IVEC == 0xa00 || IVEC == 0xe80
462 li r10,PACA_IRQ_DBELL
463 .elseif IVEC == 0xe60
464 li r10,PACA_IRQ_HMI
465 .elseif IVEC == 0xf00
466 li r10,PACA_IRQ_PMI
467 .else
468 .abort "Bad maskable vector"
469 .endif
470
471 .if IHSRR_IF_HVMODE
472 BEGIN_FTR_SECTION
473 bne masked_Hinterrupt
474 FTR_SECTION_ELSE
475 bne masked_interrupt
476 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
477 .elseif IHSRR
478 bne masked_Hinterrupt
479 .else
480 bne masked_interrupt
481 .endif
482 .endif
483
484 .if ISTACK
485 andi. r10,r12,MSR_PR /* See if coming from user */
4863: mr r10,r1 /* Save r1 */
487 subi r1,r1,INT_FRAME_SIZE /* alloc frame on kernel stack */
488 beq- 100f
489 ld r1,PACAKSAVE(r13) /* kernel stack to use */
490100: tdgei r1,-INT_FRAME_SIZE /* trap if r1 is in userspace */
491 EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,0
492 .endif
493
494 std r9,_CCR(r1) /* save CR in stackframe */
495 std r11,_NIP(r1) /* save SRR0 in stackframe */
496 std r12,_MSR(r1) /* save SRR1 in stackframe */
497 std r10,0(r1) /* make stack chain pointer */
498 std r0,GPR0(r1) /* save r0 in stackframe */
499 std r10,GPR1(r1) /* save r1 in stackframe */
500
501 /* Mark our [H]SRRs valid for return */
502 li r10,1
503 .if IHSRR_IF_HVMODE
504 BEGIN_FTR_SECTION
505 stb r10,PACAHSRR_VALID(r13)
506 FTR_SECTION_ELSE
507 stb r10,PACASRR_VALID(r13)
508 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
509 .elseif IHSRR
510 stb r10,PACAHSRR_VALID(r13)
511 .else
512 stb r10,PACASRR_VALID(r13)
513 .endif
514
515 .if ISET_RI
516 li r10,MSR_RI
517 mtmsrd r10,1 /* Set MSR_RI */
518 .endif
519
520 .if ISTACK
521 .if IKUAP
522 kuap_save_amr_and_lock r9, r10, cr1, cr0
523 .endif
524 beq 101f /* if from kernel mode */
525BEGIN_FTR_SECTION
526 ld r9,IAREA+EX_PPR(r13) /* Read PPR from paca */
527 std r9,_PPR(r1)
528END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
529101:
530 .else
531 .if IKUAP
532 kuap_save_amr_and_lock r9, r10, cr1
533 .endif
534 .endif
535
536 /* Save original regs values from save area to stack frame. */
537 ld r9,IAREA+EX_R9(r13) /* move r9, r10 to stackframe */
538 ld r10,IAREA+EX_R10(r13)
539 std r9,GPR9(r1)
540 std r10,GPR10(r1)
541 ld r9,IAREA+EX_R11(r13) /* move r11 - r13 to stackframe */
542 ld r10,IAREA+EX_R12(r13)
543 ld r11,IAREA+EX_R13(r13)
544 std r9,GPR11(r1)
545 std r10,GPR12(r1)
546 std r11,GPR13(r1)
547
548 SAVE_NVGPRS(r1)
549
550 .if IDAR
551 .if IISIDE
552 ld r10,_NIP(r1)
553 .else
554 ld r10,IAREA+EX_DAR(r13)
555 .endif
556 std r10,_DAR(r1)
557 .endif
558
559 .if IDSISR
560 .if IISIDE
561 ld r10,_MSR(r1)
562 lis r11,DSISR_SRR1_MATCH_64S@h
563 and r10,r10,r11
564 .else
565 lwz r10,IAREA+EX_DSISR(r13)
566 .endif
567 std r10,_DSISR(r1)
568 .endif
569
570BEGIN_FTR_SECTION
571 ld r10,IAREA+EX_CFAR(r13)
572 std r10,ORIG_GPR3(r1)
573END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
574 ld r10,IAREA+EX_CTR(r13)
575 std r10,_CTR(r1)
576 std r2,GPR2(r1) /* save r2 in stackframe */
577 SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
578 SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */
579 mflr r9 /* Get LR, later save to stack */
580 ld r2,PACATOC(r13) /* get kernel TOC into r2 */
581 std r9,_LINK(r1)
582 lbz r10,PACAIRQSOFTMASK(r13)
583 mfspr r11,SPRN_XER /* save XER in stackframe */
584 std r10,SOFTE(r1)
585 std r11,_XER(r1)
586 li r9,IVEC
587 std r9,_TRAP(r1) /* set trap number */
588 li r10,0
589 ld r11,exception_marker@toc(r2)
590 std r10,RESULT(r1) /* clear regs->result */
591 std r11,STACK_FRAME_OVERHEAD-16(r1) /* mark the frame */
592.endm
593
594/*
595 * On entry r13 points to the paca, r9-r13 are saved in the paca,
596 * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and
597 * SRR1, and relocation is on.
598 *
599 * If stack=0, then the stack is already set in r1, and r1 is saved in r10.
600 * PPR save and CPU accounting is not done for the !stack case (XXX why not?)
601 */
602.macro GEN_COMMON name
603 __GEN_COMMON_ENTRY \name
604 __GEN_COMMON_BODY \name
605.endm
606
607.macro SEARCH_RESTART_TABLE
608#ifdef CONFIG_RELOCATABLE
609 mr r12,r2
610 ld r2,PACATOC(r13)
611 LOAD_REG_ADDR(r9, __start___restart_table)
612 LOAD_REG_ADDR(r10, __stop___restart_table)
613 mr r2,r12
614#else
615 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___restart_table)
616 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___restart_table)
617#endif
618300:
619 cmpd r9,r10
620 beq 302f
621 ld r12,0(r9)
622 cmpld r11,r12
623 blt 301f
624 ld r12,8(r9)
625 cmpld r11,r12
626 bge 301f
627 ld r12,16(r9)
628 b 303f
629301:
630 addi r9,r9,24
631 b 300b
632302:
633 li r12,0
634303:
635.endm
636
637.macro SEARCH_SOFT_MASK_TABLE
638#ifdef CONFIG_RELOCATABLE
639 mr r12,r2
640 ld r2,PACATOC(r13)
641 LOAD_REG_ADDR(r9, __start___soft_mask_table)
642 LOAD_REG_ADDR(r10, __stop___soft_mask_table)
643 mr r2,r12
644#else
645 LOAD_REG_IMMEDIATE_SYM(r9, r12, __start___soft_mask_table)
646 LOAD_REG_IMMEDIATE_SYM(r10, r12, __stop___soft_mask_table)
647#endif
648300:
649 cmpd r9,r10
650 beq 302f
651 ld r12,0(r9)
652 cmpld r11,r12
653 blt 301f
654 ld r12,8(r9)
655 cmpld r11,r12
656 bge 301f
657 li r12,1
658 b 303f
659301:
660 addi r9,r9,16
661 b 300b
662302:
663 li r12,0
664303:
665.endm
666
667/*
668 * Restore all registers including H/SRR0/1 saved in a stack frame of a
669 * standard exception.
670 */
671.macro EXCEPTION_RESTORE_REGS hsrr=0
672 /* Move original SRR0 and SRR1 into the respective regs */
673 ld r9,_MSR(r1)
674 li r10,0
675 .if \hsrr
676 mtspr SPRN_HSRR1,r9
677 stb r10,PACAHSRR_VALID(r13)
678 .else
679 mtspr SPRN_SRR1,r9
680 stb r10,PACASRR_VALID(r13)
681 .endif
682 ld r9,_NIP(r1)
683 .if \hsrr
684 mtspr SPRN_HSRR0,r9
685 .else
686 mtspr SPRN_SRR0,r9
687 .endif
688 ld r9,_CTR(r1)
689 mtctr r9
690 ld r9,_XER(r1)
691 mtxer r9
692 ld r9,_LINK(r1)
693 mtlr r9
694 ld r9,_CCR(r1)
695 mtcr r9
696 REST_8GPRS(2, r1)
697 REST_4GPRS(10, r1)
698 REST_GPR(0, r1)
699 /* restore original r1. */
700 ld r1,GPR1(r1)
701.endm
702
703/*
704 * There are a few constraints to be concerned with.
705 * - Real mode exceptions code/data must be located at their physical location.
706 * - Virtual mode exceptions must be mapped at their 0xc000... location.
707 * - Fixed location code must not call directly beyond the __end_interrupts
708 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
709 * must be used.
710 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
711 * virtual 0xc00...
712 * - Conditional branch targets must be within +/-32K of caller.
713 *
714 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
715 * therefore don't have to run in physically located code or rfid to
716 * virtual mode kernel code. However on relocatable kernels they do have
717 * to branch to KERNELBASE offset because the rest of the kernel (outside
718 * the exception vectors) may be located elsewhere.
719 *
720 * Virtual exceptions correspond with physical, except their entry points
721 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
722 * offset applied. Virtual exceptions are enabled with the Alternate
723 * Interrupt Location (AIL) bit set in the LPCR. However this does not
724 * guarantee they will be delivered virtually. Some conditions (see the ISA)
725 * cause exceptions to be delivered in real mode.
726 *
727 * The scv instructions are a special case. They get a 0x3000 offset applied.
728 * scv exceptions have unique reentrancy properties, see below.
729 *
730 * It's impossible to receive interrupts below 0x300 via AIL.
731 *
732 * KVM: None of the virtual exceptions are from the guest. Anything that
733 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
734 *
735 *
736 * We layout physical memory as follows:
737 * 0x0000 - 0x00ff : Secondary processor spin code
738 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
739 * 0x1900 - 0x2fff : Real mode trampolines
740 * 0x3000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
741 * 0x5900 - 0x6fff : Relon mode trampolines
742 * 0x7000 - 0x7fff : FWNMI data area
743 * 0x8000 - .... : Common interrupt handlers, remaining early
744 * setup code, rest of kernel.
745 *
746 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
747 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
748 * vectors there.
749 */
750OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
751OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x3000)
752OPEN_FIXED_SECTION(virt_vectors, 0x3000, 0x5900)
753OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
754
755#ifdef CONFIG_PPC_POWERNV
756 .globl start_real_trampolines
757 .globl end_real_trampolines
758 .globl start_virt_trampolines
759 .globl end_virt_trampolines
760#endif
761
762#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
763/*
764 * Data area reserved for FWNMI option.
765 * This address (0x7000) is fixed by the RPA.
766 * pseries and powernv need to keep the whole page from
767 * 0x7000 to 0x8000 free for use by the firmware
768 */
769ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
770OPEN_TEXT_SECTION(0x8000)
771#else
772OPEN_TEXT_SECTION(0x7000)
773#endif
774
775USE_FIXED_SECTION(real_vectors)
776
777/*
778 * This is the start of the interrupt handlers for pSeries
779 * This code runs with relocation off.
780 * Code from here to __end_interrupts gets copied down to real
781 * address 0x100 when we are running a relocatable kernel.
782 * Therefore any relative branches in this section must only
783 * branch to labels in this section.
784 */
785 .globl __start_interrupts
786__start_interrupts:
787
788/**
789 * Interrupt 0x3000 - System Call Vectored Interrupt (syscall).
790 * This is a synchronous interrupt invoked with the "scv" instruction. The
791 * system call does not alter the HV bit, so it is directed to the OS.
792 *
793 * Handling:
794 * scv instructions enter the kernel without changing EE, RI, ME, or HV.
795 * In particular, this means we can take a maskable interrupt at any point
796 * in the scv handler, which is unlike any other interrupt. This is solved
797 * by treating the instruction addresses in the handler as being soft-masked,
798 * by adding a SOFT_MASK_TABLE entry for them.
799 *
800 * AIL-0 mode scv exceptions go to 0x17000-0x17fff, but we set AIL-3 and
801 * ensure scv is never executed with relocation off, which means AIL-0
802 * should never happen.
803 *
804 * Before leaving the following inside-__end_soft_masked text, at least of the
805 * following must be true:
806 * - MSR[PR]=1 (i.e., return to userspace)
807 * - MSR_EE|MSR_RI is clear (no reentrant exceptions)
808 * - Standard kernel environment is set up (stack, paca, etc)
809 *
810 * Call convention:
811 *
812 * syscall register convention is in Documentation/powerpc/syscall64-abi.rst
813 */
814EXC_VIRT_BEGIN(system_call_vectored, 0x3000, 0x1000)
815 /* SCV 0 */
816 mr r9,r13
817 GET_PACA(r13)
818 mflr r11
819 mfctr r12
820 li r10,IRQS_ALL_DISABLED
821 stb r10,PACAIRQSOFTMASK(r13)
822#ifdef CONFIG_RELOCATABLE
823 b system_call_vectored_tramp
824#else
825 b system_call_vectored_common
826#endif
827 nop
828
829 /* SCV 1 - 127 */
830 .rept 127
831 mr r9,r13
832 GET_PACA(r13)
833 mflr r11
834 mfctr r12
835 li r10,IRQS_ALL_DISABLED
836 stb r10,PACAIRQSOFTMASK(r13)
837 li r0,-1 /* cause failure */
838#ifdef CONFIG_RELOCATABLE
839 b system_call_vectored_sigill_tramp
840#else
841 b system_call_vectored_sigill
842#endif
843 .endr
844EXC_VIRT_END(system_call_vectored, 0x3000, 0x1000)
845
846// Treat scv vectors as soft-masked, see comment above.
847// Use absolute values rather than labels here, so they don't get relocated,
848// because this code runs unrelocated.
849SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
850
851#ifdef CONFIG_RELOCATABLE
852TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
853 __LOAD_HANDLER(r10, system_call_vectored_common)
854 mtctr r10
855 bctr
856
857TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
858 __LOAD_HANDLER(r10, system_call_vectored_sigill)
859 mtctr r10
860 bctr
861#endif
862
863
864/* No virt vectors corresponding with 0x0..0x100 */
865EXC_VIRT_NONE(0x4000, 0x100)
866
867
868/**
869 * Interrupt 0x100 - System Reset Interrupt (SRESET aka NMI).
870 * This is a non-maskable, asynchronous interrupt always taken in real-mode.
871 * It is caused by:
872 * - Wake from power-saving state, on powernv.
873 * - An NMI from another CPU, triggered by firmware or hypercall.
874 * - As crash/debug signal injected from BMC, firmware or hypervisor.
875 *
876 * Handling:
877 * Power-save wakeup is the only performance critical path, so this is
878 * determined quickly as possible first. In this case volatile registers
879 * can be discarded and SPRs like CFAR don't need to be read.
880 *
881 * If not a powersave wakeup, then it's run as a regular interrupt, however
882 * it uses its own stack and PACA save area to preserve the regular kernel
883 * environment for debugging.
884 *
885 * This interrupt is not maskable, so triggering it when MSR[RI] is clear,
886 * or SCRATCH0 is in use, etc. may cause a crash. It's also not entirely
887 * correct to switch to virtual mode to run the regular interrupt handler
888 * because it might be interrupted when the MMU is in a bad state (e.g., SLB
889 * is clear).
890 *
891 * FWNMI:
892 * PAPR specifies a "fwnmi" facility which sends the sreset to a different
893 * entry point with a different register set up. Some hypervisors will
894 * send the sreset to 0x100 in the guest if it is not fwnmi capable.
895 *
896 * KVM:
897 * Unlike most SRR interrupts, this may be taken by the host while executing
898 * in a guest, so a KVM test is required. KVM will pull the CPU out of guest
899 * mode and then raise the sreset.
900 */
901INT_DEFINE_BEGIN(system_reset)
902 IVEC=0x100
903 IAREA=PACA_EXNMI
904 IVIRT=0 /* no virt entry point */
905 /*
906 * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
907 * being used, so a nested NMI exception would corrupt it.
908 */
909 ISET_RI=0
910 ISTACK=0
911 IKVM_REAL=1
912INT_DEFINE_END(system_reset)
913
914EXC_REAL_BEGIN(system_reset, 0x100, 0x100)
915#ifdef CONFIG_PPC_P7_NAP
916 /*
917 * If running native on arch 2.06 or later, check if we are waking up
918 * from nap/sleep/winkle, and branch to idle handler. This tests SRR1
919 * bits 46:47. A non-0 value indicates that we are coming from a power
920 * saving state. The idle wakeup handler initially runs in real mode,
921 * but we branch to the 0xc000... address so we can turn on relocation
922 * with mtmsrd later, after SPRs are restored.
923 *
924 * Careful to minimise cost for the fast path (idle wakeup) while
925 * also avoiding clobbering CFAR for the debug path (non-idle).
926 *
927 * For the idle wake case volatile registers can be clobbered, which
928 * is why we use those initially. If it turns out to not be an idle
929 * wake, carefully put everything back the way it was, so we can use
930 * common exception macros to handle it.
931 */
932BEGIN_FTR_SECTION
933 SET_SCRATCH0(r13)
934 GET_PACA(r13)
935 std r3,PACA_EXNMI+0*8(r13)
936 std r4,PACA_EXNMI+1*8(r13)
937 std r5,PACA_EXNMI+2*8(r13)
938 mfspr r3,SPRN_SRR1
939 mfocrf r4,0x80
940 rlwinm. r5,r3,47-31,30,31
941 bne+ system_reset_idle_wake
942 /* Not powersave wakeup. Restore regs for regular interrupt handler. */
943 mtocrf 0x80,r4
944 ld r3,PACA_EXNMI+0*8(r13)
945 ld r4,PACA_EXNMI+1*8(r13)
946 ld r5,PACA_EXNMI+2*8(r13)
947 GET_SCRATCH0(r13)
948END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
949#endif
950
951 GEN_INT_ENTRY system_reset, virt=0
952 /*
953 * In theory, we should not enable relocation here if it was disabled
954 * in SRR1, because the MMU may not be configured to support it (e.g.,
955 * SLB may have been cleared). In practice, there should only be a few
956 * small windows where that's the case, and sreset is considered to
957 * be dangerous anyway.
958 */
959EXC_REAL_END(system_reset, 0x100, 0x100)
960EXC_VIRT_NONE(0x4100, 0x100)
961
962#ifdef CONFIG_PPC_P7_NAP
963TRAMP_REAL_BEGIN(system_reset_idle_wake)
964 /* We are waking up from idle, so may clobber any volatile register */
965 cmpwi cr1,r5,2
966 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
967 BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
968#endif
969
970#ifdef CONFIG_PPC_PSERIES
971/*
972 * Vectors for the FWNMI option. Share common code.
973 */
974TRAMP_REAL_BEGIN(system_reset_fwnmi)
975 GEN_INT_ENTRY system_reset, virt=0
976
977#endif /* CONFIG_PPC_PSERIES */
978
979EXC_COMMON_BEGIN(system_reset_common)
980 __GEN_COMMON_ENTRY system_reset
981 /*
982 * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
983 * to recover, but nested NMI will notice in_nmi and not recover
984 * because of the use of the NMI stack. in_nmi reentrancy is tested in
985 * system_reset_exception.
986 */
987 lhz r10,PACA_IN_NMI(r13)
988 addi r10,r10,1
989 sth r10,PACA_IN_NMI(r13)
990 li r10,MSR_RI
991 mtmsrd r10,1
992
993 mr r10,r1
994 ld r1,PACA_NMI_EMERG_SP(r13)
995 subi r1,r1,INT_FRAME_SIZE
996 __GEN_COMMON_BODY system_reset
997
998 addi r3,r1,STACK_FRAME_OVERHEAD
999 bl system_reset_exception
1000
1001 /* Clear MSR_RI before setting SRR0 and SRR1. */
1002 li r9,0
1003 mtmsrd r9,1
1004
1005 /*
1006 * MSR_RI is clear, now we can decrement paca->in_nmi.
1007 */
1008 lhz r10,PACA_IN_NMI(r13)
1009 subi r10,r10,1
1010 sth r10,PACA_IN_NMI(r13)
1011
1012 kuap_kernel_restore r9, r10
1013 EXCEPTION_RESTORE_REGS
1014 RFI_TO_USER_OR_KERNEL
1015
1016
1017/**
1018 * Interrupt 0x200 - Machine Check Interrupt (MCE).
1019 * This is a non-maskable interrupt always taken in real-mode. It can be
1020 * synchronous or asynchronous, caused by hardware or software, and it may be
1021 * taken in a power-saving state.
1022 *
1023 * Handling:
1024 * Similarly to system reset, this uses its own stack and PACA save area,
1025 * the difference is re-entrancy is allowed on the machine check stack.
1026 *
1027 * machine_check_early is run in real mode, and carefully decodes the
1028 * machine check and tries to handle it (e.g., flush the SLB if there was an
1029 * error detected there), determines if it was recoverable and logs the
1030 * event.
1031 *
1032 * This early code does not "reconcile" irq soft-mask state like SRESET or
1033 * regular interrupts do, so irqs_disabled() among other things may not work
1034 * properly (irq disable/enable already doesn't work because irq tracing can
1035 * not work in real mode).
1036 *
1037 * Then, depending on the execution context when the interrupt is taken, there
1038 * are 3 main actions:
1039 * - Executing in kernel mode. The event is queued with irq_work, which means
1040 * it is handled when it is next safe to do so (i.e., the kernel has enabled
1041 * interrupts), which could be immediately when the interrupt returns. This
1042 * avoids nasty issues like switching to virtual mode when the MMU is in a
1043 * bad state, or when executing OPAL code. (SRESET is exposed to such issues,
1044 * but it has different priorities). Check to see if the CPU was in power
1045 * save, and return via the wake up code if it was.
1046 *
1047 * - Executing in user mode. machine_check_exception is run like a normal
1048 * interrupt handler, which processes the data generated by the early handler.
1049 *
1050 * - Executing in guest mode. The interrupt is run with its KVM test, and
1051 * branches to KVM to deal with. KVM may queue the event for the host
1052 * to report later.
1053 *
1054 * This interrupt is not maskable, so if it triggers when MSR[RI] is clear,
1055 * or SCRATCH0 is in use, it may cause a crash.
1056 *
1057 * KVM:
1058 * See SRESET.
1059 */
1060INT_DEFINE_BEGIN(machine_check_early)
1061 IVEC=0x200
1062 IAREA=PACA_EXMC
1063 IVIRT=0 /* no virt entry point */
1064 IREALMODE_COMMON=1
1065 /*
1066 * MSR_RI is not enabled, because PACA_EXMC is being used, so a
1067 * nested machine check corrupts it. machine_check_common enables
1068 * MSR_RI.
1069 */
1070 ISET_RI=0
1071 ISTACK=0
1072 IDAR=1
1073 IDSISR=1
1074 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
1075INT_DEFINE_END(machine_check_early)
1076
1077INT_DEFINE_BEGIN(machine_check)
1078 IVEC=0x200
1079 IAREA=PACA_EXMC
1080 IVIRT=0 /* no virt entry point */
1081 ISET_RI=0
1082 IDAR=1
1083 IDSISR=1
1084 IKVM_REAL=1
1085INT_DEFINE_END(machine_check)
1086
1087EXC_REAL_BEGIN(machine_check, 0x200, 0x100)
1088 GEN_INT_ENTRY machine_check_early, virt=0
1089EXC_REAL_END(machine_check, 0x200, 0x100)
1090EXC_VIRT_NONE(0x4200, 0x100)
1091
1092#ifdef CONFIG_PPC_PSERIES
1093TRAMP_REAL_BEGIN(machine_check_fwnmi)
1094 /* See comment at machine_check exception, don't turn on RI */
1095 GEN_INT_ENTRY machine_check_early, virt=0
1096#endif
1097
1098#define MACHINE_CHECK_HANDLER_WINDUP \
1099 /* Clear MSR_RI before setting SRR0 and SRR1. */\
1100 li r9,0; \
1101 mtmsrd r9,1; /* Clear MSR_RI */ \
1102 /* Decrement paca->in_mce now RI is clear. */ \
1103 lhz r12,PACA_IN_MCE(r13); \
1104 subi r12,r12,1; \
1105 sth r12,PACA_IN_MCE(r13); \
1106 EXCEPTION_RESTORE_REGS
1107
1108EXC_COMMON_BEGIN(machine_check_early_common)
1109 __GEN_REALMODE_COMMON_ENTRY machine_check_early
1110
1111 /*
1112 * Switch to mc_emergency stack and handle re-entrancy (we limit
1113 * the nested MCE upto level 4 to avoid stack overflow).
1114 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
1115 *
1116 * We use paca->in_mce to check whether this is the first entry or
1117 * nested machine check. We increment paca->in_mce to track nested
1118 * machine checks.
1119 *
1120 * If this is the first entry then set stack pointer to
1121 * paca->mc_emergency_sp, otherwise r1 is already pointing to
1122 * stack frame on mc_emergency stack.
1123 *
1124 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
1125 * checkstop if we get another machine check exception before we do
1126 * rfid with MSR_ME=1.
1127 *
1128 * This interrupt can wake directly from idle. If that is the case,
1129 * the machine check is handled then the idle wakeup code is called
1130 * to restore state.
1131 */
1132 lhz r10,PACA_IN_MCE(r13)
1133 cmpwi r10,0 /* Are we in nested machine check */
1134 cmpwi cr1,r10,MAX_MCE_DEPTH /* Are we at maximum nesting */
1135 addi r10,r10,1 /* increment paca->in_mce */
1136 sth r10,PACA_IN_MCE(r13)
1137
1138 mr r10,r1 /* Save r1 */
1139 bne 1f
1140 /* First machine check entry */
1141 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
11421: /* Limit nested MCE to level 4 to avoid stack overflow */
1143 bgt cr1,unrecoverable_mce /* Check if we hit limit of 4 */
1144 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1145
1146 __GEN_COMMON_BODY machine_check_early
1147
1148BEGIN_FTR_SECTION
1149 bl enable_machine_check
1150END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1151 li r10,MSR_RI
1152 mtmsrd r10,1
1153
1154 addi r3,r1,STACK_FRAME_OVERHEAD
1155 bl machine_check_early
1156 std r3,RESULT(r1) /* Save result */
1157 ld r12,_MSR(r1)
1158
1159#ifdef CONFIG_PPC_P7_NAP
1160 /*
1161 * Check if thread was in power saving mode. We come here when any
1162 * of the following is true:
1163 * a. thread wasn't in power saving mode
1164 * b. thread was in power saving mode with no state loss,
1165 * supervisor state loss or hypervisor state loss.
1166 *
1167 * Go back to nap/sleep/winkle mode again if (b) is true.
1168 */
1169BEGIN_FTR_SECTION
1170 rlwinm. r11,r12,47-31,30,31
1171 bne machine_check_idle_common
1172END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1173#endif
1174
1175#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1176 /*
1177 * Check if we are coming from guest. If yes, then run the normal
1178 * exception handler which will take the
1179 * machine_check_kvm->kvm_interrupt branch to deliver the MC event
1180 * to guest.
1181 */
1182 lbz r11,HSTATE_IN_GUEST(r13)
1183 cmpwi r11,0 /* Check if coming from guest */
1184 bne mce_deliver /* continue if we are. */
1185#endif
1186
1187 /*
1188 * Check if we are coming from userspace. If yes, then run the normal
1189 * exception handler which will deliver the MC event to this kernel.
1190 */
1191 andi. r11,r12,MSR_PR /* See if coming from user. */
1192 bne mce_deliver /* continue in V mode if we are. */
1193
1194 /*
1195 * At this point we are coming from kernel context.
1196 * Queue up the MCE event and return from the interrupt.
1197 * But before that, check if this is an un-recoverable exception.
1198 * If yes, then stay on emergency stack and panic.
1199 */
1200 andi. r11,r12,MSR_RI
1201 beq unrecoverable_mce
1202
1203 /*
1204 * Check if we have successfully handled/recovered from error, if not
1205 * then stay on emergency stack and panic.
1206 */
1207 ld r3,RESULT(r1) /* Load result */
1208 cmpdi r3,0 /* see if we handled MCE successfully */
1209 beq unrecoverable_mce /* if !handled then panic */
1210
1211 /*
1212 * Return from MC interrupt.
1213 * Queue up the MCE event so that we can log it later, while
1214 * returning from kernel or opal call.
1215 */
1216 bl machine_check_queue_event
1217 MACHINE_CHECK_HANDLER_WINDUP
1218 RFI_TO_KERNEL
1219
1220mce_deliver:
1221 /*
1222 * This is a host user or guest MCE. Restore all registers, then
1223 * run the "late" handler. For host user, this will run the
1224 * machine_check_exception handler in virtual mode like a normal
1225 * interrupt handler. For guest, this will trigger the KVM test
1226 * and branch to the KVM interrupt similarly to other interrupts.
1227 */
1228BEGIN_FTR_SECTION
1229 ld r10,ORIG_GPR3(r1)
1230 mtspr SPRN_CFAR,r10
1231END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1232 MACHINE_CHECK_HANDLER_WINDUP
1233 GEN_INT_ENTRY machine_check, virt=0
1234
1235EXC_COMMON_BEGIN(machine_check_common)
1236 /*
1237 * Machine check is different because we use a different
1238 * save area: PACA_EXMC instead of PACA_EXGEN.
1239 */
1240 GEN_COMMON machine_check
1241
1242 /* Enable MSR_RI when finished with PACA_EXMC */
1243 li r10,MSR_RI
1244 mtmsrd r10,1
1245 addi r3,r1,STACK_FRAME_OVERHEAD
1246 bl machine_check_exception_async
1247 b interrupt_return_srr
1248
1249
1250#ifdef CONFIG_PPC_P7_NAP
1251/*
1252 * This is an idle wakeup. Low level machine check has already been
1253 * done. Queue the event then call the idle code to do the wake up.
1254 */
1255EXC_COMMON_BEGIN(machine_check_idle_common)
1256 bl machine_check_queue_event
1257
1258 /*
1259 * GPR-loss wakeups are relatively straightforward, because the
1260 * idle sleep code has saved all non-volatile registers on its
1261 * own stack, and r1 in PACAR1.
1262 *
1263 * For no-loss wakeups the r1 and lr registers used by the
1264 * early machine check handler have to be restored first. r2 is
1265 * the kernel TOC, so no need to restore it.
1266 *
1267 * Then decrement MCE nesting after finishing with the stack.
1268 */
1269 ld r3,_MSR(r1)
1270 ld r4,_LINK(r1)
1271 ld r1,GPR1(r1)
1272
1273 lhz r11,PACA_IN_MCE(r13)
1274 subi r11,r11,1
1275 sth r11,PACA_IN_MCE(r13)
1276
1277 mtlr r4
1278 rlwinm r10,r3,47-31,30,31
1279 cmpwi cr1,r10,2
1280 bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
1281 b idle_return_gpr_loss
1282#endif
1283
1284EXC_COMMON_BEGIN(unrecoverable_mce)
1285 /*
1286 * We are going down. But there are chances that we might get hit by
1287 * another MCE during panic path and we may run into unstable state
1288 * with no way out. Hence, turn ME bit off while going down, so that
1289 * when another MCE is hit during panic path, system will checkstop
1290 * and hypervisor will get restarted cleanly by SP.
1291 */
1292BEGIN_FTR_SECTION
1293 li r10,0 /* clear MSR_RI */
1294 mtmsrd r10,1
1295 bl disable_machine_check
1296END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
1297 ld r10,PACAKMSR(r13)
1298 li r3,MSR_ME
1299 andc r10,r10,r3
1300 mtmsrd r10
1301
1302 lhz r12,PACA_IN_MCE(r13)
1303 subi r12,r12,1
1304 sth r12,PACA_IN_MCE(r13)
1305
1306 /*
1307 * Invoke machine_check_exception to print MCE event and panic.
1308 * This is the NMI version of the handler because we are called from
1309 * the early handler which is a true NMI.
1310 */
1311 addi r3,r1,STACK_FRAME_OVERHEAD
1312 bl machine_check_exception
1313
1314 /*
1315 * We will not reach here. Even if we did, there is no way out.
1316 * Call unrecoverable_exception and die.
1317 */
1318 addi r3,r1,STACK_FRAME_OVERHEAD
1319 bl unrecoverable_exception
1320 b .
1321
1322
1323/**
1324 * Interrupt 0x300 - Data Storage Interrupt (DSI).
1325 * This is a synchronous interrupt generated due to a data access exception,
1326 * e.g., a load orstore which does not have a valid page table entry with
1327 * permissions. DAWR matches also fault here, as do RC updates, and minor misc
1328 * errors e.g., copy/paste, AMO, certain invalid CI accesses, etc.
1329 *
1330 * Handling:
1331 * - Hash MMU
1332 * Go to do_hash_fault, which attempts to fill the HPT from an entry in the
1333 * Linux page table. Hash faults can hit in kernel mode in a fairly
1334 * arbitrary state (e.g., interrupts disabled, locks held) when accessing
1335 * "non-bolted" regions, e.g., vmalloc space. However these should always be
1336 * backed by Linux page table entries.
1337 *
1338 * If no entry is found the Linux page fault handler is invoked (by
1339 * do_hash_fault). Linux page faults can happen in kernel mode due to user
1340 * copy operations of course.
1341 *
1342 * KVM: The KVM HDSI handler may perform a load with MSR[DR]=1 in guest
1343 * MMU context, which may cause a DSI in the host, which must go to the
1344 * KVM handler. MSR[IR] is not enabled, so the real-mode handler will
1345 * always be used regardless of AIL setting.
1346 *
1347 * - Radix MMU
1348 * The hardware loads from the Linux page table directly, so a fault goes
1349 * immediately to Linux page fault.
1350 *
1351 * Conditions like DAWR match are handled on the way in to Linux page fault.
1352 */
1353INT_DEFINE_BEGIN(data_access)
1354 IVEC=0x300
1355 IDAR=1
1356 IDSISR=1
1357 IKVM_REAL=1
1358INT_DEFINE_END(data_access)
1359
1360EXC_REAL_BEGIN(data_access, 0x300, 0x80)
1361 GEN_INT_ENTRY data_access, virt=0
1362EXC_REAL_END(data_access, 0x300, 0x80)
1363EXC_VIRT_BEGIN(data_access, 0x4300, 0x80)
1364 GEN_INT_ENTRY data_access, virt=1
1365EXC_VIRT_END(data_access, 0x4300, 0x80)
1366EXC_COMMON_BEGIN(data_access_common)
1367 GEN_COMMON data_access
1368 ld r4,_DSISR(r1)
1369 addi r3,r1,STACK_FRAME_OVERHEAD
1370 andis. r0,r4,DSISR_DABRMATCH@h
1371 bne- 1f
1372BEGIN_MMU_FTR_SECTION
1373 bl do_hash_fault
1374MMU_FTR_SECTION_ELSE
1375 bl do_page_fault
1376ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1377 b interrupt_return_srr
1378
13791: bl do_break
1380 /*
1381 * do_break() may have changed the NV GPRS while handling a breakpoint.
1382 * If so, we need to restore them with their updated values.
1383 */
1384 REST_NVGPRS(r1)
1385 b interrupt_return_srr
1386
1387
1388/**
1389 * Interrupt 0x380 - Data Segment Interrupt (DSLB).
1390 * This is a synchronous interrupt in response to an MMU fault missing SLB
1391 * entry for HPT, or an address outside RPT translation range.
1392 *
1393 * Handling:
1394 * - HPT:
1395 * This refills the SLB, or reports an access fault similarly to a bad page
1396 * fault. When coming from user-mode, the SLB handler may access any kernel
1397 * data, though it may itself take a DSLB. When coming from kernel mode,
1398 * recursive faults must be avoided so access is restricted to the kernel
1399 * image text/data, kernel stack, and any data allocated below
1400 * ppc64_bolted_size (first segment). The kernel handler must avoid stomping
1401 * on user-handler data structures.
1402 *
1403 * KVM: Same as 0x300, DSLB must test for KVM guest.
1404 */
1405INT_DEFINE_BEGIN(data_access_slb)
1406 IVEC=0x380
1407 IDAR=1
1408 IKVM_REAL=1
1409INT_DEFINE_END(data_access_slb)
1410
1411EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
1412 GEN_INT_ENTRY data_access_slb, virt=0
1413EXC_REAL_END(data_access_slb, 0x380, 0x80)
1414EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
1415 GEN_INT_ENTRY data_access_slb, virt=1
1416EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
1417EXC_COMMON_BEGIN(data_access_slb_common)
1418 GEN_COMMON data_access_slb
1419BEGIN_MMU_FTR_SECTION
1420 /* HPT case, do SLB fault */
1421 addi r3,r1,STACK_FRAME_OVERHEAD
1422 bl do_slb_fault
1423 cmpdi r3,0
1424 bne- 1f
1425 b fast_interrupt_return_srr
14261: /* Error case */
1427MMU_FTR_SECTION_ELSE
1428 /* Radix case, access is outside page table range */
1429 li r3,-EFAULT
1430ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1431 std r3,RESULT(r1)
1432 addi r3,r1,STACK_FRAME_OVERHEAD
1433 bl do_bad_slb_fault
1434 b interrupt_return_srr
1435
1436
1437/**
1438 * Interrupt 0x400 - Instruction Storage Interrupt (ISI).
1439 * This is a synchronous interrupt in response to an MMU fault due to an
1440 * instruction fetch.
1441 *
1442 * Handling:
1443 * Similar to DSI, though in response to fetch. The faulting address is found
1444 * in SRR0 (rather than DAR), and status in SRR1 (rather than DSISR).
1445 */
1446INT_DEFINE_BEGIN(instruction_access)
1447 IVEC=0x400
1448 IISIDE=1
1449 IDAR=1
1450 IDSISR=1
1451#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1452 IKVM_REAL=1
1453#endif
1454INT_DEFINE_END(instruction_access)
1455
1456EXC_REAL_BEGIN(instruction_access, 0x400, 0x80)
1457 GEN_INT_ENTRY instruction_access, virt=0
1458EXC_REAL_END(instruction_access, 0x400, 0x80)
1459EXC_VIRT_BEGIN(instruction_access, 0x4400, 0x80)
1460 GEN_INT_ENTRY instruction_access, virt=1
1461EXC_VIRT_END(instruction_access, 0x4400, 0x80)
1462EXC_COMMON_BEGIN(instruction_access_common)
1463 GEN_COMMON instruction_access
1464 addi r3,r1,STACK_FRAME_OVERHEAD
1465BEGIN_MMU_FTR_SECTION
1466 bl do_hash_fault
1467MMU_FTR_SECTION_ELSE
1468 bl do_page_fault
1469ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1470 b interrupt_return_srr
1471
1472
1473/**
1474 * Interrupt 0x480 - Instruction Segment Interrupt (ISLB).
1475 * This is a synchronous interrupt in response to an MMU fault due to an
1476 * instruction fetch.
1477 *
1478 * Handling:
1479 * Similar to DSLB, though in response to fetch. The faulting address is found
1480 * in SRR0 (rather than DAR).
1481 */
1482INT_DEFINE_BEGIN(instruction_access_slb)
1483 IVEC=0x480
1484 IISIDE=1
1485 IDAR=1
1486#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1487 IKVM_REAL=1
1488#endif
1489INT_DEFINE_END(instruction_access_slb)
1490
1491EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
1492 GEN_INT_ENTRY instruction_access_slb, virt=0
1493EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
1494EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
1495 GEN_INT_ENTRY instruction_access_slb, virt=1
1496EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
1497EXC_COMMON_BEGIN(instruction_access_slb_common)
1498 GEN_COMMON instruction_access_slb
1499BEGIN_MMU_FTR_SECTION
1500 /* HPT case, do SLB fault */
1501 addi r3,r1,STACK_FRAME_OVERHEAD
1502 bl do_slb_fault
1503 cmpdi r3,0
1504 bne- 1f
1505 b fast_interrupt_return_srr
15061: /* Error case */
1507MMU_FTR_SECTION_ELSE
1508 /* Radix case, access is outside page table range */
1509 li r3,-EFAULT
1510ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
1511 std r3,RESULT(r1)
1512 addi r3,r1,STACK_FRAME_OVERHEAD
1513 bl do_bad_slb_fault
1514 b interrupt_return_srr
1515
1516
1517/**
1518 * Interrupt 0x500 - External Interrupt.
1519 * This is an asynchronous maskable interrupt in response to an "external
1520 * exception" from the interrupt controller or hypervisor (e.g., device
1521 * interrupt). It is maskable in hardware by clearing MSR[EE], and
1522 * soft-maskable with IRQS_DISABLED mask (i.e., local_irq_disable()).
1523 *
1524 * When running in HV mode, Linux sets up the LPCR[LPES] bit such that
1525 * interrupts are delivered with HSRR registers, guests use SRRs, which
1526 * reqiures IHSRR_IF_HVMODE.
1527 *
1528 * On bare metal POWER9 and later, Linux sets the LPCR[HVICE] bit such that
1529 * external interrupts are delivered as Hypervisor Virtualization Interrupts
1530 * rather than External Interrupts.
1531 *
1532 * Handling:
1533 * This calls into Linux IRQ handler. NVGPRs are not saved to reduce overhead,
1534 * because registers at the time of the interrupt are not so important as it is
1535 * asynchronous.
1536 *
1537 * If soft masked, the masked handler will note the pending interrupt for
1538 * replay, and clear MSR[EE] in the interrupted context.
1539 */
1540INT_DEFINE_BEGIN(hardware_interrupt)
1541 IVEC=0x500
1542 IHSRR_IF_HVMODE=1
1543 IMASK=IRQS_DISABLED
1544 IKVM_REAL=1
1545 IKVM_VIRT=1
1546INT_DEFINE_END(hardware_interrupt)
1547
1548EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
1549 GEN_INT_ENTRY hardware_interrupt, virt=0
1550EXC_REAL_END(hardware_interrupt, 0x500, 0x100)
1551EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x100)
1552 GEN_INT_ENTRY hardware_interrupt, virt=1
1553EXC_VIRT_END(hardware_interrupt, 0x4500, 0x100)
1554EXC_COMMON_BEGIN(hardware_interrupt_common)
1555 GEN_COMMON hardware_interrupt
1556 addi r3,r1,STACK_FRAME_OVERHEAD
1557 bl do_IRQ
1558 BEGIN_FTR_SECTION
1559 b interrupt_return_hsrr
1560 FTR_SECTION_ELSE
1561 b interrupt_return_srr
1562 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
1563
1564
1565/**
1566 * Interrupt 0x600 - Alignment Interrupt
1567 * This is a synchronous interrupt in response to data alignment fault.
1568 */
1569INT_DEFINE_BEGIN(alignment)
1570 IVEC=0x600
1571 IDAR=1
1572 IDSISR=1
1573#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1574 IKVM_REAL=1
1575#endif
1576INT_DEFINE_END(alignment)
1577
1578EXC_REAL_BEGIN(alignment, 0x600, 0x100)
1579 GEN_INT_ENTRY alignment, virt=0
1580EXC_REAL_END(alignment, 0x600, 0x100)
1581EXC_VIRT_BEGIN(alignment, 0x4600, 0x100)
1582 GEN_INT_ENTRY alignment, virt=1
1583EXC_VIRT_END(alignment, 0x4600, 0x100)
1584EXC_COMMON_BEGIN(alignment_common)
1585 GEN_COMMON alignment
1586 addi r3,r1,STACK_FRAME_OVERHEAD
1587 bl alignment_exception
1588 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1589 b interrupt_return_srr
1590
1591
1592/**
1593 * Interrupt 0x700 - Program Interrupt (program check).
1594 * This is a synchronous interrupt in response to various instruction faults:
1595 * traps, privilege errors, TM errors, floating point exceptions.
1596 *
1597 * Handling:
1598 * This interrupt may use the "emergency stack" in some cases when being taken
1599 * from kernel context, which complicates handling.
1600 */
1601INT_DEFINE_BEGIN(program_check)
1602 IVEC=0x700
1603#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1604 IKVM_REAL=1
1605#endif
1606INT_DEFINE_END(program_check)
1607
1608EXC_REAL_BEGIN(program_check, 0x700, 0x100)
1609
1610#ifdef CONFIG_CPU_LITTLE_ENDIAN
1611 /*
1612 * There's a short window during boot where although the kernel is
1613 * running little endian, any exceptions will cause the CPU to switch
1614 * back to big endian. For example a WARN() boils down to a trap
1615 * instruction, which will cause a program check, and we end up here but
1616 * with the CPU in big endian mode. The first instruction of the program
1617 * check handler (in GEN_INT_ENTRY below) is an mtsprg, which when
1618 * executed in the wrong endian is an lhzu with a ~3GB displacement from
1619 * r3. The content of r3 is random, so that is a load from some random
1620 * location, and depending on the system can easily lead to a checkstop,
1621 * or an infinitely recursive page fault.
1622 *
1623 * So to handle that case we have a trampoline here that can detect we
1624 * are in the wrong endian and flip us back to the correct endian. We
1625 * can't flip MSR[LE] using mtmsr, so we have to use rfid. That requires
1626 * backing up SRR0/1 as well as a GPR. To do that we use SPRG0/2/3, as
1627 * SPRG1 is already used for the paca. SPRG3 is user readable, but this
1628 * trampoline is only active very early in boot, and SPRG3 will be
1629 * reinitialised in vdso_getcpu_init() before userspace starts.
1630 */
1631BEGIN_FTR_SECTION
1632 tdi 0,0,0x48 // Trap never, or in reverse endian: b . + 8
1633 b 1f // Skip trampoline if endian is correct
1634 .long 0xa643707d // mtsprg 0, r11 Backup r11
1635 .long 0xa6027a7d // mfsrr0 r11
1636 .long 0xa643727d // mtsprg 2, r11 Backup SRR0 in SPRG2
1637 .long 0xa6027b7d // mfsrr1 r11
1638 .long 0xa643737d // mtsprg 3, r11 Backup SRR1 in SPRG3
1639 .long 0xa600607d // mfmsr r11
1640 .long 0x01006b69 // xori r11, r11, 1 Invert MSR[LE]
1641 .long 0xa6037b7d // mtsrr1 r11
1642 .long 0x34076039 // li r11, 0x734
1643 .long 0xa6037a7d // mtsrr0 r11
1644 .long 0x2400004c // rfid
1645 mfsprg r11, 3
1646 mtsrr1 r11 // Restore SRR1
1647 mfsprg r11, 2
1648 mtsrr0 r11 // Restore SRR0
1649 mfsprg r11, 0 // Restore r11
16501:
1651END_FTR_SECTION(0, 1) // nop out after boot
1652#endif /* CONFIG_CPU_LITTLE_ENDIAN */
1653
1654 GEN_INT_ENTRY program_check, virt=0
1655EXC_REAL_END(program_check, 0x700, 0x100)
1656EXC_VIRT_BEGIN(program_check, 0x4700, 0x100)
1657 GEN_INT_ENTRY program_check, virt=1
1658EXC_VIRT_END(program_check, 0x4700, 0x100)
1659EXC_COMMON_BEGIN(program_check_common)
1660 __GEN_COMMON_ENTRY program_check
1661
1662 /*
1663 * It's possible to receive a TM Bad Thing type program check with
1664 * userspace register values (in particular r1), but with SRR1 reporting
1665 * that we came from the kernel. Normally that would confuse the bad
1666 * stack logic, and we would report a bad kernel stack pointer. Instead
1667 * we switch to the emergency stack if we're taking a TM Bad Thing from
1668 * the kernel.
1669 */
1670
1671 andi. r10,r12,MSR_PR
1672 bne .Lnormal_stack /* If userspace, go normal path */
1673
1674 andis. r10,r12,(SRR1_PROGTM)@h
1675 bne .Lemergency_stack /* If TM, emergency */
1676
1677 cmpdi r1,-INT_FRAME_SIZE /* check if r1 is in userspace */
1678 blt .Lnormal_stack /* normal path if not */
1679
1680 /* Use the emergency stack */
1681.Lemergency_stack:
1682 andi. r10,r12,MSR_PR /* Set CR0 correctly for label */
1683 /* 3 in EXCEPTION_PROLOG_COMMON */
1684 mr r10,r1 /* Save r1 */
1685 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
1686 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
1687 __ISTACK(program_check)=0
1688 __GEN_COMMON_BODY program_check
1689 b .Ldo_program_check
1690
1691.Lnormal_stack:
1692 __ISTACK(program_check)=1
1693 __GEN_COMMON_BODY program_check
1694
1695.Ldo_program_check:
1696 addi r3,r1,STACK_FRAME_OVERHEAD
1697 bl program_check_exception
1698 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
1699 b interrupt_return_srr
1700
1701
1702/*
1703 * Interrupt 0x800 - Floating-Point Unavailable Interrupt.
1704 * This is a synchronous interrupt in response to executing an fp instruction
1705 * with MSR[FP]=0.
1706 *
1707 * Handling:
1708 * This will load FP registers and enable the FP bit if coming from userspace,
1709 * otherwise report a bad kernel use of FP.
1710 */
1711INT_DEFINE_BEGIN(fp_unavailable)
1712 IVEC=0x800
1713#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1714 IKVM_REAL=1
1715#endif
1716INT_DEFINE_END(fp_unavailable)
1717
1718EXC_REAL_BEGIN(fp_unavailable, 0x800, 0x100)
1719 GEN_INT_ENTRY fp_unavailable, virt=0
1720EXC_REAL_END(fp_unavailable, 0x800, 0x100)
1721EXC_VIRT_BEGIN(fp_unavailable, 0x4800, 0x100)
1722 GEN_INT_ENTRY fp_unavailable, virt=1
1723EXC_VIRT_END(fp_unavailable, 0x4800, 0x100)
1724EXC_COMMON_BEGIN(fp_unavailable_common)
1725 GEN_COMMON fp_unavailable
1726 bne 1f /* if from user, just load it up */
1727 addi r3,r1,STACK_FRAME_OVERHEAD
1728 bl kernel_fp_unavailable_exception
17290: trap
1730 EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0
17311:
1732#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1733BEGIN_FTR_SECTION
1734 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1735 * transaction), go do TM stuff
1736 */
1737 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1738 bne- 2f
1739END_FTR_SECTION_IFSET(CPU_FTR_TM)
1740#endif
1741 bl load_up_fpu
1742 b fast_interrupt_return_srr
1743#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
17442: /* User process was in a transaction */
1745 addi r3,r1,STACK_FRAME_OVERHEAD
1746 bl fp_unavailable_tm
1747 b interrupt_return_srr
1748#endif
1749
1750
1751/**
1752 * Interrupt 0x900 - Decrementer Interrupt.
1753 * This is an asynchronous interrupt in response to a decrementer exception
1754 * (e.g., DEC has wrapped below zero). It is maskable in hardware by clearing
1755 * MSR[EE], and soft-maskable with IRQS_DISABLED mask (i.e.,
1756 * local_irq_disable()).
1757 *
1758 * Handling:
1759 * This calls into Linux timer handler. NVGPRs are not saved (see 0x500).
1760 *
1761 * If soft masked, the masked handler will note the pending interrupt for
1762 * replay, and bump the decrementer to a high value, leaving MSR[EE] enabled
1763 * in the interrupted context.
1764 * If PPC_WATCHDOG is configured, the soft masked handler will actually set
1765 * things back up to run soft_nmi_interrupt as a regular interrupt handler
1766 * on the emergency stack.
1767 */
1768INT_DEFINE_BEGIN(decrementer)
1769 IVEC=0x900
1770 IMASK=IRQS_DISABLED
1771#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1772 IKVM_REAL=1
1773#endif
1774INT_DEFINE_END(decrementer)
1775
1776EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
1777 GEN_INT_ENTRY decrementer, virt=0
1778EXC_REAL_END(decrementer, 0x900, 0x80)
1779EXC_VIRT_BEGIN(decrementer, 0x4900, 0x80)
1780 GEN_INT_ENTRY decrementer, virt=1
1781EXC_VIRT_END(decrementer, 0x4900, 0x80)
1782EXC_COMMON_BEGIN(decrementer_common)
1783 GEN_COMMON decrementer
1784 addi r3,r1,STACK_FRAME_OVERHEAD
1785 bl timer_interrupt
1786 b interrupt_return_srr
1787
1788
1789/**
1790 * Interrupt 0x980 - Hypervisor Decrementer Interrupt.
1791 * This is an asynchronous interrupt, similar to 0x900 but for the HDEC
1792 * register.
1793 *
1794 * Handling:
1795 * Linux does not use this outside KVM where it's used to keep a host timer
1796 * while the guest is given control of DEC. It should normally be caught by
1797 * the KVM test and routed there.
1798 */
1799INT_DEFINE_BEGIN(hdecrementer)
1800 IVEC=0x980
1801 IHSRR=1
1802 ISTACK=0
1803 IKVM_REAL=1
1804 IKVM_VIRT=1
1805INT_DEFINE_END(hdecrementer)
1806
1807EXC_REAL_BEGIN(hdecrementer, 0x980, 0x80)
1808 GEN_INT_ENTRY hdecrementer, virt=0
1809EXC_REAL_END(hdecrementer, 0x980, 0x80)
1810EXC_VIRT_BEGIN(hdecrementer, 0x4980, 0x80)
1811 GEN_INT_ENTRY hdecrementer, virt=1
1812EXC_VIRT_END(hdecrementer, 0x4980, 0x80)
1813EXC_COMMON_BEGIN(hdecrementer_common)
1814 __GEN_COMMON_ENTRY hdecrementer
1815 /*
1816 * Hypervisor decrementer interrupts not caught by the KVM test
1817 * shouldn't occur but are sometimes left pending on exit from a KVM
1818 * guest. We don't need to do anything to clear them, as they are
1819 * edge-triggered.
1820 *
1821 * Be careful to avoid touching the kernel stack.
1822 */
1823 li r10,0
1824 stb r10,PACAHSRR_VALID(r13)
1825 ld r10,PACA_EXGEN+EX_CTR(r13)
1826 mtctr r10
1827 mtcrf 0x80,r9
1828 ld r9,PACA_EXGEN+EX_R9(r13)
1829 ld r10,PACA_EXGEN+EX_R10(r13)
1830 ld r11,PACA_EXGEN+EX_R11(r13)
1831 ld r12,PACA_EXGEN+EX_R12(r13)
1832 ld r13,PACA_EXGEN+EX_R13(r13)
1833 HRFI_TO_KERNEL
1834
1835
1836/**
1837 * Interrupt 0xa00 - Directed Privileged Doorbell Interrupt.
1838 * This is an asynchronous interrupt in response to a msgsndp doorbell.
1839 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
1840 * IRQS_DISABLED mask (i.e., local_irq_disable()).
1841 *
1842 * Handling:
1843 * Guests may use this for IPIs between threads in a core if the
1844 * hypervisor supports it. NVGPRS are not saved (see 0x500).
1845 *
1846 * If soft masked, the masked handler will note the pending interrupt for
1847 * replay, leaving MSR[EE] enabled in the interrupted context because the
1848 * doorbells are edge triggered.
1849 */
1850INT_DEFINE_BEGIN(doorbell_super)
1851 IVEC=0xa00
1852 IMASK=IRQS_DISABLED
1853#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1854 IKVM_REAL=1
1855#endif
1856INT_DEFINE_END(doorbell_super)
1857
1858EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
1859 GEN_INT_ENTRY doorbell_super, virt=0
1860EXC_REAL_END(doorbell_super, 0xa00, 0x100)
1861EXC_VIRT_BEGIN(doorbell_super, 0x4a00, 0x100)
1862 GEN_INT_ENTRY doorbell_super, virt=1
1863EXC_VIRT_END(doorbell_super, 0x4a00, 0x100)
1864EXC_COMMON_BEGIN(doorbell_super_common)
1865 GEN_COMMON doorbell_super
1866 addi r3,r1,STACK_FRAME_OVERHEAD
1867#ifdef CONFIG_PPC_DOORBELL
1868 bl doorbell_exception
1869#else
1870 bl unknown_async_exception
1871#endif
1872 b interrupt_return_srr
1873
1874
1875EXC_REAL_NONE(0xb00, 0x100)
1876EXC_VIRT_NONE(0x4b00, 0x100)
1877
1878/**
1879 * Interrupt 0xc00 - System Call Interrupt (syscall, hcall).
1880 * This is a synchronous interrupt invoked with the "sc" instruction. The
1881 * system call is invoked with "sc 0" and does not alter the HV bit, so it
1882 * is directed to the currently running OS. The hypercall is invoked with
1883 * "sc 1" and it sets HV=1, so it elevates to hypervisor.
1884 *
1885 * In HPT, sc 1 always goes to 0xc00 real mode. In RADIX, sc 1 can go to
1886 * 0x4c00 virtual mode.
1887 *
1888 * Handling:
1889 * If the KVM test fires then it was due to a hypercall and is accordingly
1890 * routed to KVM. Otherwise this executes a normal Linux system call.
1891 *
1892 * Call convention:
1893 *
1894 * syscall and hypercalls register conventions are documented in
1895 * Documentation/powerpc/syscall64-abi.rst and
1896 * Documentation/powerpc/papr_hcalls.rst respectively.
1897 *
1898 * The intersection of volatile registers that don't contain possible
1899 * inputs is: cr0, xer, ctr. We may use these as scratch regs upon entry
1900 * without saving, though xer is not a good idea to use, as hardware may
1901 * interpret some bits so it may be costly to change them.
1902 */
1903INT_DEFINE_BEGIN(system_call)
1904 IVEC=0xc00
1905 IKVM_REAL=1
1906 IKVM_VIRT=1
1907INT_DEFINE_END(system_call)
1908
1909.macro SYSTEM_CALL virt
1910#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1911 /*
1912 * There is a little bit of juggling to get syscall and hcall
1913 * working well. Save r13 in ctr to avoid using SPRG scratch
1914 * register.
1915 *
1916 * Userspace syscalls have already saved the PPR, hcalls must save
1917 * it before setting HMT_MEDIUM.
1918 */
1919 mtctr r13
1920 GET_PACA(r13)
1921 std r10,PACA_EXGEN+EX_R10(r13)
1922 INTERRUPT_TO_KERNEL
1923 KVMTEST system_call kvm_hcall /* uses r10, branch to kvm_hcall */
1924 mfctr r9
1925#else
1926 mr r9,r13
1927 GET_PACA(r13)
1928 INTERRUPT_TO_KERNEL
1929#endif
1930
1931#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1932BEGIN_FTR_SECTION
1933 cmpdi r0,0x1ebe
1934 beq- 1f
1935END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
1936#endif
1937
1938 /* We reach here with PACA in r13, r13 in r9. */
1939 mfspr r11,SPRN_SRR0
1940 mfspr r12,SPRN_SRR1
1941
1942 HMT_MEDIUM
1943
1944 .if ! \virt
1945 __LOAD_HANDLER(r10, system_call_common_real)
1946 mtctr r10
1947 bctr
1948 .else
1949#ifdef CONFIG_RELOCATABLE
1950 __LOAD_HANDLER(r10, system_call_common)
1951 mtctr r10
1952 bctr
1953#else
1954 b system_call_common
1955#endif
1956 .endif
1957
1958#ifdef CONFIG_PPC_FAST_ENDIAN_SWITCH
1959 /* Fast LE/BE switch system call */
19601: mfspr r12,SPRN_SRR1
1961 xori r12,r12,MSR_LE
1962 mtspr SPRN_SRR1,r12
1963 mr r13,r9
1964 RFI_TO_USER /* return to userspace */
1965 b . /* prevent speculative execution */
1966#endif
1967.endm
1968
1969EXC_REAL_BEGIN(system_call, 0xc00, 0x100)
1970 SYSTEM_CALL 0
1971EXC_REAL_END(system_call, 0xc00, 0x100)
1972EXC_VIRT_BEGIN(system_call, 0x4c00, 0x100)
1973 SYSTEM_CALL 1
1974EXC_VIRT_END(system_call, 0x4c00, 0x100)
1975
1976#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1977TRAMP_REAL_BEGIN(kvm_hcall)
1978 std r9,PACA_EXGEN+EX_R9(r13)
1979 std r11,PACA_EXGEN+EX_R11(r13)
1980 std r12,PACA_EXGEN+EX_R12(r13)
1981 mfcr r9
1982 mfctr r10
1983 std r10,PACA_EXGEN+EX_R13(r13)
1984 li r10,0
1985 std r10,PACA_EXGEN+EX_CFAR(r13)
1986 std r10,PACA_EXGEN+EX_CTR(r13)
1987 /*
1988 * Save the PPR (on systems that support it) before changing to
1989 * HMT_MEDIUM. That allows the KVM code to save that value into the
1990 * guest state (it is the guest's PPR value).
1991 */
1992BEGIN_FTR_SECTION
1993 mfspr r10,SPRN_PPR
1994 std r10,PACA_EXGEN+EX_PPR(r13)
1995END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1996
1997 HMT_MEDIUM
1998
1999#ifdef CONFIG_RELOCATABLE
2000 /*
2001 * Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
2002 * outside the head section.
2003 */
2004 __LOAD_FAR_HANDLER(r10, kvmppc_hcall)
2005 mtctr r10
2006 bctr
2007#else
2008 b kvmppc_hcall
2009#endif
2010#endif
2011
2012/**
2013 * Interrupt 0xd00 - Trace Interrupt.
2014 * This is a synchronous interrupt in response to instruction step or
2015 * breakpoint faults.
2016 */
2017INT_DEFINE_BEGIN(single_step)
2018 IVEC=0xd00
2019#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2020 IKVM_REAL=1
2021#endif
2022INT_DEFINE_END(single_step)
2023
2024EXC_REAL_BEGIN(single_step, 0xd00, 0x100)
2025 GEN_INT_ENTRY single_step, virt=0
2026EXC_REAL_END(single_step, 0xd00, 0x100)
2027EXC_VIRT_BEGIN(single_step, 0x4d00, 0x100)
2028 GEN_INT_ENTRY single_step, virt=1
2029EXC_VIRT_END(single_step, 0x4d00, 0x100)
2030EXC_COMMON_BEGIN(single_step_common)
2031 GEN_COMMON single_step
2032 addi r3,r1,STACK_FRAME_OVERHEAD
2033 bl single_step_exception
2034 b interrupt_return_srr
2035
2036
2037/**
2038 * Interrupt 0xe00 - Hypervisor Data Storage Interrupt (HDSI).
2039 * This is a synchronous interrupt in response to an MMU fault caused by a
2040 * guest data access.
2041 *
2042 * Handling:
2043 * This should always get routed to KVM. In radix MMU mode, this is caused
2044 * by a guest nested radix access that can't be performed due to the
2045 * partition scope page table. In hash mode, this can be caused by guests
2046 * running with translation disabled (virtual real mode) or with VPM enabled.
2047 * KVM will update the page table structures or disallow the access.
2048 */
2049INT_DEFINE_BEGIN(h_data_storage)
2050 IVEC=0xe00
2051 IHSRR=1
2052 IDAR=1
2053 IDSISR=1
2054 IKVM_REAL=1
2055 IKVM_VIRT=1
2056INT_DEFINE_END(h_data_storage)
2057
2058EXC_REAL_BEGIN(h_data_storage, 0xe00, 0x20)
2059 GEN_INT_ENTRY h_data_storage, virt=0, ool=1
2060EXC_REAL_END(h_data_storage, 0xe00, 0x20)
2061EXC_VIRT_BEGIN(h_data_storage, 0x4e00, 0x20)
2062 GEN_INT_ENTRY h_data_storage, virt=1, ool=1
2063EXC_VIRT_END(h_data_storage, 0x4e00, 0x20)
2064EXC_COMMON_BEGIN(h_data_storage_common)
2065 GEN_COMMON h_data_storage
2066 addi r3,r1,STACK_FRAME_OVERHEAD
2067BEGIN_MMU_FTR_SECTION
2068 bl do_bad_page_fault_segv
2069MMU_FTR_SECTION_ELSE
2070 bl unknown_exception
2071ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_TYPE_RADIX)
2072 b interrupt_return_hsrr
2073
2074
2075/**
2076 * Interrupt 0xe20 - Hypervisor Instruction Storage Interrupt (HISI).
2077 * This is a synchronous interrupt in response to an MMU fault caused by a
2078 * guest instruction fetch, similar to HDSI.
2079 */
2080INT_DEFINE_BEGIN(h_instr_storage)
2081 IVEC=0xe20
2082 IHSRR=1
2083 IKVM_REAL=1
2084 IKVM_VIRT=1
2085INT_DEFINE_END(h_instr_storage)
2086
2087EXC_REAL_BEGIN(h_instr_storage, 0xe20, 0x20)
2088 GEN_INT_ENTRY h_instr_storage, virt=0, ool=1
2089EXC_REAL_END(h_instr_storage, 0xe20, 0x20)
2090EXC_VIRT_BEGIN(h_instr_storage, 0x4e20, 0x20)
2091 GEN_INT_ENTRY h_instr_storage, virt=1, ool=1
2092EXC_VIRT_END(h_instr_storage, 0x4e20, 0x20)
2093EXC_COMMON_BEGIN(h_instr_storage_common)
2094 GEN_COMMON h_instr_storage
2095 addi r3,r1,STACK_FRAME_OVERHEAD
2096 bl unknown_exception
2097 b interrupt_return_hsrr
2098
2099
2100/**
2101 * Interrupt 0xe40 - Hypervisor Emulation Assistance Interrupt.
2102 */
2103INT_DEFINE_BEGIN(emulation_assist)
2104 IVEC=0xe40
2105 IHSRR=1
2106 IKVM_REAL=1
2107 IKVM_VIRT=1
2108INT_DEFINE_END(emulation_assist)
2109
2110EXC_REAL_BEGIN(emulation_assist, 0xe40, 0x20)
2111 GEN_INT_ENTRY emulation_assist, virt=0, ool=1
2112EXC_REAL_END(emulation_assist, 0xe40, 0x20)
2113EXC_VIRT_BEGIN(emulation_assist, 0x4e40, 0x20)
2114 GEN_INT_ENTRY emulation_assist, virt=1, ool=1
2115EXC_VIRT_END(emulation_assist, 0x4e40, 0x20)
2116EXC_COMMON_BEGIN(emulation_assist_common)
2117 GEN_COMMON emulation_assist
2118 addi r3,r1,STACK_FRAME_OVERHEAD
2119 bl emulation_assist_interrupt
2120 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2121 b interrupt_return_hsrr
2122
2123
2124/**
2125 * Interrupt 0xe60 - Hypervisor Maintenance Interrupt (HMI).
2126 * This is an asynchronous interrupt caused by a Hypervisor Maintenance
2127 * Exception. It is always taken in real mode but uses HSRR registers
2128 * unlike SRESET and MCE.
2129 *
2130 * It is maskable in hardware by clearing MSR[EE], and partially soft-maskable
2131 * with IRQS_DISABLED mask (i.e., local_irq_disable()).
2132 *
2133 * Handling:
2134 * This is a special case, this is handled similarly to machine checks, with an
2135 * initial real mode handler that is not soft-masked, which attempts to fix the
2136 * problem. Then a regular handler which is soft-maskable and reports the
2137 * problem.
2138 *
2139 * The emergency stack is used for the early real mode handler.
2140 *
2141 * XXX: unclear why MCE and HMI schemes could not be made common, e.g.,
2142 * either use soft-masking for the MCE, or use irq_work for the HMI.
2143 *
2144 * KVM:
2145 * Unlike MCE, this calls into KVM without calling the real mode handler
2146 * first.
2147 */
2148INT_DEFINE_BEGIN(hmi_exception_early)
2149 IVEC=0xe60
2150 IHSRR=1
2151 IREALMODE_COMMON=1
2152 ISTACK=0
2153 IKUAP=0 /* We don't touch AMR here, we never go to virtual mode */
2154 IKVM_REAL=1
2155INT_DEFINE_END(hmi_exception_early)
2156
2157INT_DEFINE_BEGIN(hmi_exception)
2158 IVEC=0xe60
2159 IHSRR=1
2160 IMASK=IRQS_DISABLED
2161 IKVM_REAL=1
2162INT_DEFINE_END(hmi_exception)
2163
2164EXC_REAL_BEGIN(hmi_exception, 0xe60, 0x20)
2165 GEN_INT_ENTRY hmi_exception_early, virt=0, ool=1
2166EXC_REAL_END(hmi_exception, 0xe60, 0x20)
2167EXC_VIRT_NONE(0x4e60, 0x20)
2168
2169EXC_COMMON_BEGIN(hmi_exception_early_common)
2170 __GEN_REALMODE_COMMON_ENTRY hmi_exception_early
2171
2172 mr r10,r1 /* Save r1 */
2173 ld r1,PACAEMERGSP(r13) /* Use emergency stack for realmode */
2174 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
2175
2176 __GEN_COMMON_BODY hmi_exception_early
2177
2178 addi r3,r1,STACK_FRAME_OVERHEAD
2179 bl hmi_exception_realmode
2180 cmpdi cr0,r3,0
2181 bne 1f
2182
2183 EXCEPTION_RESTORE_REGS hsrr=1
2184 HRFI_TO_USER_OR_KERNEL
2185
21861:
2187 /*
2188 * Go to virtual mode and pull the HMI event information from
2189 * firmware.
2190 */
2191 EXCEPTION_RESTORE_REGS hsrr=1
2192 GEN_INT_ENTRY hmi_exception, virt=0
2193
2194EXC_COMMON_BEGIN(hmi_exception_common)
2195 GEN_COMMON hmi_exception
2196 addi r3,r1,STACK_FRAME_OVERHEAD
2197 bl handle_hmi_exception
2198 b interrupt_return_hsrr
2199
2200
2201/**
2202 * Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
2203 * This is an asynchronous interrupt in response to a msgsnd doorbell.
2204 * Similar to the 0xa00 doorbell but for host rather than guest.
2205 */
2206INT_DEFINE_BEGIN(h_doorbell)
2207 IVEC=0xe80
2208 IHSRR=1
2209 IMASK=IRQS_DISABLED
2210 IKVM_REAL=1
2211 IKVM_VIRT=1
2212INT_DEFINE_END(h_doorbell)
2213
2214EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
2215 GEN_INT_ENTRY h_doorbell, virt=0, ool=1
2216EXC_REAL_END(h_doorbell, 0xe80, 0x20)
2217EXC_VIRT_BEGIN(h_doorbell, 0x4e80, 0x20)
2218 GEN_INT_ENTRY h_doorbell, virt=1, ool=1
2219EXC_VIRT_END(h_doorbell, 0x4e80, 0x20)
2220EXC_COMMON_BEGIN(h_doorbell_common)
2221 GEN_COMMON h_doorbell
2222 addi r3,r1,STACK_FRAME_OVERHEAD
2223#ifdef CONFIG_PPC_DOORBELL
2224 bl doorbell_exception
2225#else
2226 bl unknown_async_exception
2227#endif
2228 b interrupt_return_hsrr
2229
2230
2231/**
2232 * Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
2233 * This is an asynchronous interrupt in response to an "external exception".
2234 * Similar to 0x500 but for host only.
2235 */
2236INT_DEFINE_BEGIN(h_virt_irq)
2237 IVEC=0xea0
2238 IHSRR=1
2239 IMASK=IRQS_DISABLED
2240 IKVM_REAL=1
2241 IKVM_VIRT=1
2242INT_DEFINE_END(h_virt_irq)
2243
2244EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
2245 GEN_INT_ENTRY h_virt_irq, virt=0, ool=1
2246EXC_REAL_END(h_virt_irq, 0xea0, 0x20)
2247EXC_VIRT_BEGIN(h_virt_irq, 0x4ea0, 0x20)
2248 GEN_INT_ENTRY h_virt_irq, virt=1, ool=1
2249EXC_VIRT_END(h_virt_irq, 0x4ea0, 0x20)
2250EXC_COMMON_BEGIN(h_virt_irq_common)
2251 GEN_COMMON h_virt_irq
2252 addi r3,r1,STACK_FRAME_OVERHEAD
2253 bl do_IRQ
2254 b interrupt_return_hsrr
2255
2256
2257EXC_REAL_NONE(0xec0, 0x20)
2258EXC_VIRT_NONE(0x4ec0, 0x20)
2259EXC_REAL_NONE(0xee0, 0x20)
2260EXC_VIRT_NONE(0x4ee0, 0x20)
2261
2262
2263/*
2264 * Interrupt 0xf00 - Performance Monitor Interrupt (PMI, PMU).
2265 * This is an asynchronous interrupt in response to a PMU exception.
2266 * It is maskable in hardware by clearing MSR[EE], and soft-maskable with
2267 * IRQS_PMI_DISABLED mask (NOTE: NOT local_irq_disable()).
2268 *
2269 * Handling:
2270 * This calls into the perf subsystem.
2271 *
2272 * Like the watchdog soft-nmi, it appears an NMI interrupt to Linux, in that it
2273 * runs under local_irq_disable. However it may be soft-masked in
2274 * powerpc-specific code.
2275 *
2276 * If soft masked, the masked handler will note the pending interrupt for
2277 * replay, and clear MSR[EE] in the interrupted context.
2278 */
2279INT_DEFINE_BEGIN(performance_monitor)
2280 IVEC=0xf00
2281 IMASK=IRQS_PMI_DISABLED
2282#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2283 IKVM_REAL=1
2284#endif
2285INT_DEFINE_END(performance_monitor)
2286
2287EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
2288 GEN_INT_ENTRY performance_monitor, virt=0, ool=1
2289EXC_REAL_END(performance_monitor, 0xf00, 0x20)
2290EXC_VIRT_BEGIN(performance_monitor, 0x4f00, 0x20)
2291 GEN_INT_ENTRY performance_monitor, virt=1, ool=1
2292EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
2293EXC_COMMON_BEGIN(performance_monitor_common)
2294 GEN_COMMON performance_monitor
2295 addi r3,r1,STACK_FRAME_OVERHEAD
2296 bl performance_monitor_exception
2297 b interrupt_return_srr
2298
2299
2300/**
2301 * Interrupt 0xf20 - Vector Unavailable Interrupt.
2302 * This is a synchronous interrupt in response to
2303 * executing a vector (or altivec) instruction with MSR[VEC]=0.
2304 * Similar to FP unavailable.
2305 */
2306INT_DEFINE_BEGIN(altivec_unavailable)
2307 IVEC=0xf20
2308#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2309 IKVM_REAL=1
2310#endif
2311INT_DEFINE_END(altivec_unavailable)
2312
2313EXC_REAL_BEGIN(altivec_unavailable, 0xf20, 0x20)
2314 GEN_INT_ENTRY altivec_unavailable, virt=0, ool=1
2315EXC_REAL_END(altivec_unavailable, 0xf20, 0x20)
2316EXC_VIRT_BEGIN(altivec_unavailable, 0x4f20, 0x20)
2317 GEN_INT_ENTRY altivec_unavailable, virt=1, ool=1
2318EXC_VIRT_END(altivec_unavailable, 0x4f20, 0x20)
2319EXC_COMMON_BEGIN(altivec_unavailable_common)
2320 GEN_COMMON altivec_unavailable
2321#ifdef CONFIG_ALTIVEC
2322BEGIN_FTR_SECTION
2323 beq 1f
2324#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2325 BEGIN_FTR_SECTION_NESTED(69)
2326 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
2327 * transaction), go do TM stuff
2328 */
2329 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
2330 bne- 2f
2331 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2332#endif
2333 bl load_up_altivec
2334 b fast_interrupt_return_srr
2335#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
23362: /* User process was in a transaction */
2337 addi r3,r1,STACK_FRAME_OVERHEAD
2338 bl altivec_unavailable_tm
2339 b interrupt_return_srr
2340#endif
23411:
2342END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2343#endif
2344 addi r3,r1,STACK_FRAME_OVERHEAD
2345 bl altivec_unavailable_exception
2346 b interrupt_return_srr
2347
2348
2349/**
2350 * Interrupt 0xf40 - VSX Unavailable Interrupt.
2351 * This is a synchronous interrupt in response to
2352 * executing a VSX instruction with MSR[VSX]=0.
2353 * Similar to FP unavailable.
2354 */
2355INT_DEFINE_BEGIN(vsx_unavailable)
2356 IVEC=0xf40
2357#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2358 IKVM_REAL=1
2359#endif
2360INT_DEFINE_END(vsx_unavailable)
2361
2362EXC_REAL_BEGIN(vsx_unavailable, 0xf40, 0x20)
2363 GEN_INT_ENTRY vsx_unavailable, virt=0, ool=1
2364EXC_REAL_END(vsx_unavailable, 0xf40, 0x20)
2365EXC_VIRT_BEGIN(vsx_unavailable, 0x4f40, 0x20)
2366 GEN_INT_ENTRY vsx_unavailable, virt=1, ool=1
2367EXC_VIRT_END(vsx_unavailable, 0x4f40, 0x20)
2368EXC_COMMON_BEGIN(vsx_unavailable_common)
2369 GEN_COMMON vsx_unavailable
2370#ifdef CONFIG_VSX
2371BEGIN_FTR_SECTION
2372 beq 1f
2373#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2374 BEGIN_FTR_SECTION_NESTED(69)
2375 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
2376 * transaction), go do TM stuff
2377 */
2378 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
2379 bne- 2f
2380 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
2381#endif
2382 b load_up_vsx
2383#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
23842: /* User process was in a transaction */
2385 addi r3,r1,STACK_FRAME_OVERHEAD
2386 bl vsx_unavailable_tm
2387 b interrupt_return_srr
2388#endif
23891:
2390END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2391#endif
2392 addi r3,r1,STACK_FRAME_OVERHEAD
2393 bl vsx_unavailable_exception
2394 b interrupt_return_srr
2395
2396
2397/**
2398 * Interrupt 0xf60 - Facility Unavailable Interrupt.
2399 * This is a synchronous interrupt in response to
2400 * executing an instruction without access to the facility that can be
2401 * resolved by the OS (e.g., FSCR, MSR).
2402 * Similar to FP unavailable.
2403 */
2404INT_DEFINE_BEGIN(facility_unavailable)
2405 IVEC=0xf60
2406#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2407 IKVM_REAL=1
2408#endif
2409INT_DEFINE_END(facility_unavailable)
2410
2411EXC_REAL_BEGIN(facility_unavailable, 0xf60, 0x20)
2412 GEN_INT_ENTRY facility_unavailable, virt=0, ool=1
2413EXC_REAL_END(facility_unavailable, 0xf60, 0x20)
2414EXC_VIRT_BEGIN(facility_unavailable, 0x4f60, 0x20)
2415 GEN_INT_ENTRY facility_unavailable, virt=1, ool=1
2416EXC_VIRT_END(facility_unavailable, 0x4f60, 0x20)
2417EXC_COMMON_BEGIN(facility_unavailable_common)
2418 GEN_COMMON facility_unavailable
2419 addi r3,r1,STACK_FRAME_OVERHEAD
2420 bl facility_unavailable_exception
2421 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2422 b interrupt_return_srr
2423
2424
2425/**
2426 * Interrupt 0xf60 - Hypervisor Facility Unavailable Interrupt.
2427 * This is a synchronous interrupt in response to
2428 * executing an instruction without access to the facility that can only
2429 * be resolved in HV mode (e.g., HFSCR).
2430 * Similar to FP unavailable.
2431 */
2432INT_DEFINE_BEGIN(h_facility_unavailable)
2433 IVEC=0xf80
2434 IHSRR=1
2435 IKVM_REAL=1
2436 IKVM_VIRT=1
2437INT_DEFINE_END(h_facility_unavailable)
2438
2439EXC_REAL_BEGIN(h_facility_unavailable, 0xf80, 0x20)
2440 GEN_INT_ENTRY h_facility_unavailable, virt=0, ool=1
2441EXC_REAL_END(h_facility_unavailable, 0xf80, 0x20)
2442EXC_VIRT_BEGIN(h_facility_unavailable, 0x4f80, 0x20)
2443 GEN_INT_ENTRY h_facility_unavailable, virt=1, ool=1
2444EXC_VIRT_END(h_facility_unavailable, 0x4f80, 0x20)
2445EXC_COMMON_BEGIN(h_facility_unavailable_common)
2446 GEN_COMMON h_facility_unavailable
2447 addi r3,r1,STACK_FRAME_OVERHEAD
2448 bl facility_unavailable_exception
2449 REST_NVGPRS(r1) /* XXX Shouldn't be necessary in practice */
2450 b interrupt_return_hsrr
2451
2452
2453EXC_REAL_NONE(0xfa0, 0x20)
2454EXC_VIRT_NONE(0x4fa0, 0x20)
2455EXC_REAL_NONE(0xfc0, 0x20)
2456EXC_VIRT_NONE(0x4fc0, 0x20)
2457EXC_REAL_NONE(0xfe0, 0x20)
2458EXC_VIRT_NONE(0x4fe0, 0x20)
2459
2460EXC_REAL_NONE(0x1000, 0x100)
2461EXC_VIRT_NONE(0x5000, 0x100)
2462EXC_REAL_NONE(0x1100, 0x100)
2463EXC_VIRT_NONE(0x5100, 0x100)
2464
2465#ifdef CONFIG_CBE_RAS
2466INT_DEFINE_BEGIN(cbe_system_error)
2467 IVEC=0x1200
2468 IHSRR=1
2469INT_DEFINE_END(cbe_system_error)
2470
2471EXC_REAL_BEGIN(cbe_system_error, 0x1200, 0x100)
2472 GEN_INT_ENTRY cbe_system_error, virt=0
2473EXC_REAL_END(cbe_system_error, 0x1200, 0x100)
2474EXC_VIRT_NONE(0x5200, 0x100)
2475EXC_COMMON_BEGIN(cbe_system_error_common)
2476 GEN_COMMON cbe_system_error
2477 addi r3,r1,STACK_FRAME_OVERHEAD
2478 bl cbe_system_error_exception
2479 b interrupt_return_hsrr
2480
2481#else /* CONFIG_CBE_RAS */
2482EXC_REAL_NONE(0x1200, 0x100)
2483EXC_VIRT_NONE(0x5200, 0x100)
2484#endif
2485
2486/**
2487 * Interrupt 0x1300 - Instruction Address Breakpoint Interrupt.
2488 * This has been removed from the ISA before 2.01, which is the earliest
2489 * 64-bit BookS ISA supported, however the G5 / 970 implements this
2490 * interrupt with a non-architected feature available through the support
2491 * processor interface.
2492 */
2493INT_DEFINE_BEGIN(instruction_breakpoint)
2494 IVEC=0x1300
2495#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2496 IKVM_REAL=1
2497#endif
2498INT_DEFINE_END(instruction_breakpoint)
2499
2500EXC_REAL_BEGIN(instruction_breakpoint, 0x1300, 0x100)
2501 GEN_INT_ENTRY instruction_breakpoint, virt=0
2502EXC_REAL_END(instruction_breakpoint, 0x1300, 0x100)
2503EXC_VIRT_BEGIN(instruction_breakpoint, 0x5300, 0x100)
2504 GEN_INT_ENTRY instruction_breakpoint, virt=1
2505EXC_VIRT_END(instruction_breakpoint, 0x5300, 0x100)
2506EXC_COMMON_BEGIN(instruction_breakpoint_common)
2507 GEN_COMMON instruction_breakpoint
2508 addi r3,r1,STACK_FRAME_OVERHEAD
2509 bl instruction_breakpoint_exception
2510 b interrupt_return_srr
2511
2512
2513EXC_REAL_NONE(0x1400, 0x100)
2514EXC_VIRT_NONE(0x5400, 0x100)
2515
2516/**
2517 * Interrupt 0x1500 - Soft Patch Interrupt
2518 *
2519 * Handling:
2520 * This is an implementation specific interrupt which can be used for a
2521 * range of exceptions.
2522 *
2523 * This interrupt handler is unique in that it runs the denormal assist
2524 * code even for guests (and even in guest context) without going to KVM,
2525 * for speed. POWER9 does not raise denorm exceptions, so this special case
2526 * could be phased out in future to reduce special cases.
2527 */
2528INT_DEFINE_BEGIN(denorm_exception)
2529 IVEC=0x1500
2530 IHSRR=1
2531 IBRANCH_TO_COMMON=0
2532 IKVM_REAL=1
2533INT_DEFINE_END(denorm_exception)
2534
2535EXC_REAL_BEGIN(denorm_exception, 0x1500, 0x100)
2536 GEN_INT_ENTRY denorm_exception, virt=0
2537#ifdef CONFIG_PPC_DENORMALISATION
2538 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
2539 bne+ denorm_assist
2540#endif
2541 GEN_BRANCH_TO_COMMON denorm_exception, virt=0
2542EXC_REAL_END(denorm_exception, 0x1500, 0x100)
2543#ifdef CONFIG_PPC_DENORMALISATION
2544EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x100)
2545 GEN_INT_ENTRY denorm_exception, virt=1
2546 andis. r10,r12,(HSRR1_DENORM)@h /* denorm? */
2547 bne+ denorm_assist
2548 GEN_BRANCH_TO_COMMON denorm_exception, virt=1
2549EXC_VIRT_END(denorm_exception, 0x5500, 0x100)
2550#else
2551EXC_VIRT_NONE(0x5500, 0x100)
2552#endif
2553
2554#ifdef CONFIG_PPC_DENORMALISATION
2555TRAMP_REAL_BEGIN(denorm_assist)
2556BEGIN_FTR_SECTION
2557/*
2558 * To denormalise we need to move a copy of the register to itself.
2559 * For POWER6 do that here for all FP regs.
2560 */
2561 mfmsr r10
2562 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
2563 xori r10,r10,(MSR_FE0|MSR_FE1)
2564 mtmsrd r10
2565 sync
2566
2567 .Lreg=0
2568 .rept 32
2569 fmr .Lreg,.Lreg
2570 .Lreg=.Lreg+1
2571 .endr
2572
2573FTR_SECTION_ELSE
2574/*
2575 * To denormalise we need to move a copy of the register to itself.
2576 * For POWER7 do that here for the first 32 VSX registers only.
2577 */
2578 mfmsr r10
2579 oris r10,r10,MSR_VSX@h
2580 mtmsrd r10
2581 sync
2582
2583 .Lreg=0
2584 .rept 32
2585 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2586 .Lreg=.Lreg+1
2587 .endr
2588
2589ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
2590
2591BEGIN_FTR_SECTION
2592 b denorm_done
2593END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
2594/*
2595 * To denormalise we need to move a copy of the register to itself.
2596 * For POWER8 we need to do that for all 64 VSX registers
2597 */
2598 .Lreg=32
2599 .rept 32
2600 XVCPSGNDP(.Lreg,.Lreg,.Lreg)
2601 .Lreg=.Lreg+1
2602 .endr
2603
2604denorm_done:
2605 mfspr r11,SPRN_HSRR0
2606 subi r11,r11,4
2607 mtspr SPRN_HSRR0,r11
2608 mtcrf 0x80,r9
2609 ld r9,PACA_EXGEN+EX_R9(r13)
2610BEGIN_FTR_SECTION
2611 ld r10,PACA_EXGEN+EX_PPR(r13)
2612 mtspr SPRN_PPR,r10
2613END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
2614BEGIN_FTR_SECTION
2615 ld r10,PACA_EXGEN+EX_CFAR(r13)
2616 mtspr SPRN_CFAR,r10
2617END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
2618 li r10,0
2619 stb r10,PACAHSRR_VALID(r13)
2620 ld r10,PACA_EXGEN+EX_R10(r13)
2621 ld r11,PACA_EXGEN+EX_R11(r13)
2622 ld r12,PACA_EXGEN+EX_R12(r13)
2623 ld r13,PACA_EXGEN+EX_R13(r13)
2624 HRFI_TO_UNKNOWN
2625 b .
2626#endif
2627
2628EXC_COMMON_BEGIN(denorm_exception_common)
2629 GEN_COMMON denorm_exception
2630 addi r3,r1,STACK_FRAME_OVERHEAD
2631 bl unknown_exception
2632 b interrupt_return_hsrr
2633
2634
2635#ifdef CONFIG_CBE_RAS
2636INT_DEFINE_BEGIN(cbe_maintenance)
2637 IVEC=0x1600
2638 IHSRR=1
2639INT_DEFINE_END(cbe_maintenance)
2640
2641EXC_REAL_BEGIN(cbe_maintenance, 0x1600, 0x100)
2642 GEN_INT_ENTRY cbe_maintenance, virt=0
2643EXC_REAL_END(cbe_maintenance, 0x1600, 0x100)
2644EXC_VIRT_NONE(0x5600, 0x100)
2645EXC_COMMON_BEGIN(cbe_maintenance_common)
2646 GEN_COMMON cbe_maintenance
2647 addi r3,r1,STACK_FRAME_OVERHEAD
2648 bl cbe_maintenance_exception
2649 b interrupt_return_hsrr
2650
2651#else /* CONFIG_CBE_RAS */
2652EXC_REAL_NONE(0x1600, 0x100)
2653EXC_VIRT_NONE(0x5600, 0x100)
2654#endif
2655
2656
2657INT_DEFINE_BEGIN(altivec_assist)
2658 IVEC=0x1700
2659#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
2660 IKVM_REAL=1
2661#endif
2662INT_DEFINE_END(altivec_assist)
2663
2664EXC_REAL_BEGIN(altivec_assist, 0x1700, 0x100)
2665 GEN_INT_ENTRY altivec_assist, virt=0
2666EXC_REAL_END(altivec_assist, 0x1700, 0x100)
2667EXC_VIRT_BEGIN(altivec_assist, 0x5700, 0x100)
2668 GEN_INT_ENTRY altivec_assist, virt=1
2669EXC_VIRT_END(altivec_assist, 0x5700, 0x100)
2670EXC_COMMON_BEGIN(altivec_assist_common)
2671 GEN_COMMON altivec_assist
2672 addi r3,r1,STACK_FRAME_OVERHEAD
2673#ifdef CONFIG_ALTIVEC
2674 bl altivec_assist_exception
2675 REST_NVGPRS(r1) /* instruction emulation may change GPRs */
2676#else
2677 bl unknown_exception
2678#endif
2679 b interrupt_return_srr
2680
2681
2682#ifdef CONFIG_CBE_RAS
2683INT_DEFINE_BEGIN(cbe_thermal)
2684 IVEC=0x1800
2685 IHSRR=1
2686INT_DEFINE_END(cbe_thermal)
2687
2688EXC_REAL_BEGIN(cbe_thermal, 0x1800, 0x100)
2689 GEN_INT_ENTRY cbe_thermal, virt=0
2690EXC_REAL_END(cbe_thermal, 0x1800, 0x100)
2691EXC_VIRT_NONE(0x5800, 0x100)
2692EXC_COMMON_BEGIN(cbe_thermal_common)
2693 GEN_COMMON cbe_thermal
2694 addi r3,r1,STACK_FRAME_OVERHEAD
2695 bl cbe_thermal_exception
2696 b interrupt_return_hsrr
2697
2698#else /* CONFIG_CBE_RAS */
2699EXC_REAL_NONE(0x1800, 0x100)
2700EXC_VIRT_NONE(0x5800, 0x100)
2701#endif
2702
2703
2704#ifdef CONFIG_PPC_WATCHDOG
2705
2706INT_DEFINE_BEGIN(soft_nmi)
2707 IVEC=0x900
2708 ISTACK=0
2709INT_DEFINE_END(soft_nmi)
2710
2711/*
2712 * Branch to soft_nmi_interrupt using the emergency stack. The emergency
2713 * stack is one that is usable by maskable interrupts so long as MSR_EE
2714 * remains off. It is used for recovery when something has corrupted the
2715 * normal kernel stack, for example. The "soft NMI" must not use the process
2716 * stack because we want irq disabled sections to avoid touching the stack
2717 * at all (other than PMU interrupts), so use the emergency stack for this,
2718 * and run it entirely with interrupts hard disabled.
2719 */
2720EXC_COMMON_BEGIN(soft_nmi_common)
2721 mr r10,r1
2722 ld r1,PACAEMERGSP(r13)
2723 subi r1,r1,INT_FRAME_SIZE
2724 __GEN_COMMON_BODY soft_nmi
2725
2726 addi r3,r1,STACK_FRAME_OVERHEAD
2727 bl soft_nmi_interrupt
2728
2729 /* Clear MSR_RI before setting SRR0 and SRR1. */
2730 li r9,0
2731 mtmsrd r9,1
2732
2733 kuap_kernel_restore r9, r10
2734
2735 EXCEPTION_RESTORE_REGS hsrr=0
2736 RFI_TO_KERNEL
2737
2738#endif /* CONFIG_PPC_WATCHDOG */
2739
2740/*
2741 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
2742 * - If it was a decrementer interrupt, we bump the dec to max and and return.
2743 * - If it was a doorbell we return immediately since doorbells are edge
2744 * triggered and won't automatically refire.
2745 * - If it was a HMI we return immediately since we handled it in realmode
2746 * and it won't refire.
2747 * - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
2748 * This is called with r10 containing the value to OR to the paca field.
2749 */
2750.macro MASKED_INTERRUPT hsrr=0
2751 .if \hsrr
2752masked_Hinterrupt:
2753 .else
2754masked_interrupt:
2755 .endif
2756 stw r9,PACA_EXGEN+EX_CCR(r13)
2757 lbz r9,PACAIRQHAPPENED(r13)
2758 or r9,r9,r10
2759 stb r9,PACAIRQHAPPENED(r13)
2760
2761 .if ! \hsrr
2762 cmpwi r10,PACA_IRQ_DEC
2763 bne 1f
2764 LOAD_REG_IMMEDIATE(r9, 0x7fffffff)
2765 mtspr SPRN_DEC,r9
2766#ifdef CONFIG_PPC_WATCHDOG
2767 lwz r9,PACA_EXGEN+EX_CCR(r13)
2768 b soft_nmi_common
2769#else
2770 b 2f
2771#endif
2772 .endif
2773
27741: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK
2775 beq 2f
2776 xori r12,r12,MSR_EE /* clear MSR_EE */
2777 .if \hsrr
2778 mtspr SPRN_HSRR1,r12
2779 .else
2780 mtspr SPRN_SRR1,r12
2781 .endif
2782 ori r9,r9,PACA_IRQ_HARD_DIS
2783 stb r9,PACAIRQHAPPENED(r13)
27842: /* done */
2785 li r9,0
2786 .if \hsrr
2787 stb r9,PACAHSRR_VALID(r13)
2788 .else
2789 stb r9,PACASRR_VALID(r13)
2790 .endif
2791
2792 SEARCH_RESTART_TABLE
2793 cmpdi r12,0
2794 beq 3f
2795 .if \hsrr
2796 mtspr SPRN_HSRR0,r12
2797 .else
2798 mtspr SPRN_SRR0,r12
2799 .endif
28003:
2801
2802 ld r9,PACA_EXGEN+EX_CTR(r13)
2803 mtctr r9
2804 lwz r9,PACA_EXGEN+EX_CCR(r13)
2805 mtcrf 0x80,r9
2806 std r1,PACAR1(r13)
2807 ld r9,PACA_EXGEN+EX_R9(r13)
2808 ld r10,PACA_EXGEN+EX_R10(r13)
2809 ld r11,PACA_EXGEN+EX_R11(r13)
2810 ld r12,PACA_EXGEN+EX_R12(r13)
2811 ld r13,PACA_EXGEN+EX_R13(r13)
2812 /* May return to masked low address where r13 is not set up */
2813 .if \hsrr
2814 HRFI_TO_KERNEL
2815 .else
2816 RFI_TO_KERNEL
2817 .endif
2818 b .
2819.endm
2820
2821TRAMP_REAL_BEGIN(stf_barrier_fallback)
2822 std r9,PACA_EXRFI+EX_R9(r13)
2823 std r10,PACA_EXRFI+EX_R10(r13)
2824 sync
2825 ld r9,PACA_EXRFI+EX_R9(r13)
2826 ld r10,PACA_EXRFI+EX_R10(r13)
2827 ori 31,31,0
2828 .rept 14
2829 b 1f
28301:
2831 .endr
2832 blr
2833
2834/* Clobbers r10, r11, ctr */
2835.macro L1D_DISPLACEMENT_FLUSH
2836 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2837 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2838 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2839 mtctr r11
2840 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2841
2842 /* order ld/st prior to dcbt stop all streams with flushing */
2843 sync
2844
2845 /*
2846 * The load addresses are at staggered offsets within cachelines,
2847 * which suits some pipelines better (on others it should not
2848 * hurt).
2849 */
28501:
2851 ld r11,(0x80 + 8)*0(r10)
2852 ld r11,(0x80 + 8)*1(r10)
2853 ld r11,(0x80 + 8)*2(r10)
2854 ld r11,(0x80 + 8)*3(r10)
2855 ld r11,(0x80 + 8)*4(r10)
2856 ld r11,(0x80 + 8)*5(r10)
2857 ld r11,(0x80 + 8)*6(r10)
2858 ld r11,(0x80 + 8)*7(r10)
2859 addi r10,r10,0x80*8
2860 bdnz 1b
2861.endm
2862
2863TRAMP_REAL_BEGIN(entry_flush_fallback)
2864 std r9,PACA_EXRFI+EX_R9(r13)
2865 std r10,PACA_EXRFI+EX_R10(r13)
2866 std r11,PACA_EXRFI+EX_R11(r13)
2867 mfctr r9
2868 L1D_DISPLACEMENT_FLUSH
2869 mtctr r9
2870 ld r9,PACA_EXRFI+EX_R9(r13)
2871 ld r10,PACA_EXRFI+EX_R10(r13)
2872 ld r11,PACA_EXRFI+EX_R11(r13)
2873 blr
2874
2875/*
2876 * The SCV entry flush happens with interrupts enabled, so it must disable
2877 * to prevent EXRFI being clobbered by NMIs (e.g., soft_nmi_common). r10
2878 * (containing LR) does not need to be preserved here because scv entry
2879 * puts 0 in the pt_regs, CTR can be clobbered for the same reason.
2880 */
2881TRAMP_REAL_BEGIN(scv_entry_flush_fallback)
2882 li r10,0
2883 mtmsrd r10,1
2884 lbz r10,PACAIRQHAPPENED(r13)
2885 ori r10,r10,PACA_IRQ_HARD_DIS
2886 stb r10,PACAIRQHAPPENED(r13)
2887 std r11,PACA_EXRFI+EX_R11(r13)
2888 L1D_DISPLACEMENT_FLUSH
2889 ld r11,PACA_EXRFI+EX_R11(r13)
2890 li r10,MSR_RI
2891 mtmsrd r10,1
2892 blr
2893
2894TRAMP_REAL_BEGIN(rfi_flush_fallback)
2895 SET_SCRATCH0(r13);
2896 GET_PACA(r13);
2897 std r1,PACA_EXRFI+EX_R12(r13)
2898 ld r1,PACAKSAVE(r13)
2899 std r9,PACA_EXRFI+EX_R9(r13)
2900 std r10,PACA_EXRFI+EX_R10(r13)
2901 std r11,PACA_EXRFI+EX_R11(r13)
2902 mfctr r9
2903 L1D_DISPLACEMENT_FLUSH
2904 mtctr r9
2905 ld r9,PACA_EXRFI+EX_R9(r13)
2906 ld r10,PACA_EXRFI+EX_R10(r13)
2907 ld r11,PACA_EXRFI+EX_R11(r13)
2908 ld r1,PACA_EXRFI+EX_R12(r13)
2909 GET_SCRATCH0(r13);
2910 rfid
2911
2912TRAMP_REAL_BEGIN(hrfi_flush_fallback)
2913 SET_SCRATCH0(r13);
2914 GET_PACA(r13);
2915 std r1,PACA_EXRFI+EX_R12(r13)
2916 ld r1,PACAKSAVE(r13)
2917 std r9,PACA_EXRFI+EX_R9(r13)
2918 std r10,PACA_EXRFI+EX_R10(r13)
2919 std r11,PACA_EXRFI+EX_R11(r13)
2920 mfctr r9
2921 L1D_DISPLACEMENT_FLUSH
2922 mtctr r9
2923 ld r9,PACA_EXRFI+EX_R9(r13)
2924 ld r10,PACA_EXRFI+EX_R10(r13)
2925 ld r11,PACA_EXRFI+EX_R11(r13)
2926 ld r1,PACA_EXRFI+EX_R12(r13)
2927 GET_SCRATCH0(r13);
2928 hrfid
2929
2930TRAMP_REAL_BEGIN(rfscv_flush_fallback)
2931 /* system call volatile */
2932 mr r7,r13
2933 GET_PACA(r13);
2934 mr r8,r1
2935 ld r1,PACAKSAVE(r13)
2936 mfctr r9
2937 ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
2938 ld r11,PACA_L1D_FLUSH_SIZE(r13)
2939 srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
2940 mtctr r11
2941 DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
2942
2943 /* order ld/st prior to dcbt stop all streams with flushing */
2944 sync
2945
2946 /*
2947 * The load adresses are at staggered offsets within cachelines,
2948 * which suits some pipelines better (on others it should not
2949 * hurt).
2950 */
29511:
2952 ld r11,(0x80 + 8)*0(r10)
2953 ld r11,(0x80 + 8)*1(r10)
2954 ld r11,(0x80 + 8)*2(r10)
2955 ld r11,(0x80 + 8)*3(r10)
2956 ld r11,(0x80 + 8)*4(r10)
2957 ld r11,(0x80 + 8)*5(r10)
2958 ld r11,(0x80 + 8)*6(r10)
2959 ld r11,(0x80 + 8)*7(r10)
2960 addi r10,r10,0x80*8
2961 bdnz 1b
2962
2963 mtctr r9
2964 li r9,0
2965 li r10,0
2966 li r11,0
2967 mr r1,r8
2968 mr r13,r7
2969 RFSCV
2970
2971USE_TEXT_SECTION()
2972
2973#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2974kvm_interrupt:
2975 /*
2976 * The conditional branch in KVMTEST can't reach all the way,
2977 * make a stub.
2978 */
2979 b kvmppc_interrupt
2980#endif
2981
2982_GLOBAL(do_uaccess_flush)
2983 UACCESS_FLUSH_FIXUP_SECTION
2984 nop
2985 nop
2986 nop
2987 blr
2988 L1D_DISPLACEMENT_FLUSH
2989 blr
2990_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
2991EXPORT_SYMBOL(do_uaccess_flush)
2992
2993
2994MASKED_INTERRUPT
2995MASKED_INTERRUPT hsrr=1
2996
2997 /*
2998 * Relocation-on interrupts: A subset of the interrupts can be delivered
2999 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
3000 * it. Addresses are the same as the original interrupt addresses, but
3001 * offset by 0xc000000000004000.
3002 * It's impossible to receive interrupts below 0x300 via this mechanism.
3003 * KVM: None of these traps are from the guest ; anything that escalated
3004 * to HV=1 from HV=0 is delivered via real mode handlers.
3005 */
3006
3007 /*
3008 * This uses the standard macro, since the original 0x300 vector
3009 * only has extra guff for STAB-based processors -- which never
3010 * come here.
3011 */
3012
3013USE_FIXED_SECTION(virt_trampolines)
3014 /*
3015 * All code below __end_soft_masked is treated as soft-masked. If
3016 * any code runs here with MSR[EE]=1, it must then cope with pending
3017 * soft interrupt being raised (i.e., by ensuring it is replayed).
3018 *
3019 * The __end_interrupts marker must be past the out-of-line (OOL)
3020 * handlers, so that they are copied to real address 0x100 when running
3021 * a relocatable kernel. This ensures they can be reached from the short
3022 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
3023 * directly, without using LOAD_HANDLER().
3024 */
3025 .align 7
3026 .globl __end_interrupts
3027__end_interrupts:
3028DEFINE_FIXED_SYMBOL(__end_interrupts)
3029
3030CLOSE_FIXED_SECTION(real_vectors);
3031CLOSE_FIXED_SECTION(real_trampolines);
3032CLOSE_FIXED_SECTION(virt_vectors);
3033CLOSE_FIXED_SECTION(virt_trampolines);
3034
3035USE_TEXT_SECTION()
3036
3037/* MSR[RI] should be clear because this uses SRR[01] */
3038enable_machine_check:
3039 mflr r0
3040 bcl 20,31,$+4
30410: mflr r3
3042 addi r3,r3,(1f - 0b)
3043 mtspr SPRN_SRR0,r3
3044 mfmsr r3
3045 ori r3,r3,MSR_ME
3046 mtspr SPRN_SRR1,r3
3047 RFI_TO_KERNEL
30481: mtlr r0
3049 blr
3050
3051/* MSR[RI] should be clear because this uses SRR[01] */
3052disable_machine_check:
3053 mflr r0
3054 bcl 20,31,$+4
30550: mflr r3
3056 addi r3,r3,(1f - 0b)
3057 mtspr SPRN_SRR0,r3
3058 mfmsr r3
3059 li r4,MSR_ME
3060 andc r3,r3,r4
3061 mtspr SPRN_SRR1,r3
3062 RFI_TO_KERNEL
30631: mtlr r0
3064 blr
1/*
2 * This file contains the 64-bit "server" PowerPC variant
3 * of the low level exception handling including exception
4 * vectors, exception return, part of the slb and stab
5 * handling and other fixed offset specific things.
6 *
7 * This file is meant to be #included from head_64.S due to
8 * position dependent assembly.
9 *
10 * Most of this originates from head_64.S and thus has the same
11 * copyright history.
12 *
13 */
14
15#include <asm/hw_irq.h>
16#include <asm/exception-64s.h>
17#include <asm/ptrace.h>
18#include <asm/cpuidle.h>
19#include <asm/head-64.h>
20
21/*
22 * There are a few constraints to be concerned with.
23 * - Real mode exceptions code/data must be located at their physical location.
24 * - Virtual mode exceptions must be mapped at their 0xc000... location.
25 * - Fixed location code must not call directly beyond the __end_interrupts
26 * area when built with CONFIG_RELOCATABLE. LOAD_HANDLER / bctr sequence
27 * must be used.
28 * - LOAD_HANDLER targets must be within first 64K of physical 0 /
29 * virtual 0xc00...
30 * - Conditional branch targets must be within +/-32K of caller.
31 *
32 * "Virtual exceptions" run with relocation on (MSR_IR=1, MSR_DR=1), and
33 * therefore don't have to run in physically located code or rfid to
34 * virtual mode kernel code. However on relocatable kernels they do have
35 * to branch to KERNELBASE offset because the rest of the kernel (outside
36 * the exception vectors) may be located elsewhere.
37 *
38 * Virtual exceptions correspond with physical, except their entry points
39 * are offset by 0xc000000000000000 and also tend to get an added 0x4000
40 * offset applied. Virtual exceptions are enabled with the Alternate
41 * Interrupt Location (AIL) bit set in the LPCR. However this does not
42 * guarantee they will be delivered virtually. Some conditions (see the ISA)
43 * cause exceptions to be delivered in real mode.
44 *
45 * It's impossible to receive interrupts below 0x300 via AIL.
46 *
47 * KVM: None of the virtual exceptions are from the guest. Anything that
48 * escalated to HV=1 from HV=0 is delivered via real mode handlers.
49 *
50 *
51 * We layout physical memory as follows:
52 * 0x0000 - 0x00ff : Secondary processor spin code
53 * 0x0100 - 0x18ff : Real mode pSeries interrupt vectors
54 * 0x1900 - 0x3fff : Real mode trampolines
55 * 0x4000 - 0x58ff : Relon (IR=1,DR=1) mode pSeries interrupt vectors
56 * 0x5900 - 0x6fff : Relon mode trampolines
57 * 0x7000 - 0x7fff : FWNMI data area
58 * 0x8000 - .... : Common interrupt handlers, remaining early
59 * setup code, rest of kernel.
60 *
61 * We could reclaim 0x4000-0x42ff for real mode trampolines if the space
62 * is necessary. Until then it's more consistent to explicitly put VIRT_NONE
63 * vectors there.
64 */
65OPEN_FIXED_SECTION(real_vectors, 0x0100, 0x1900)
66OPEN_FIXED_SECTION(real_trampolines, 0x1900, 0x4000)
67OPEN_FIXED_SECTION(virt_vectors, 0x4000, 0x5900)
68OPEN_FIXED_SECTION(virt_trampolines, 0x5900, 0x7000)
69#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
70/*
71 * Data area reserved for FWNMI option.
72 * This address (0x7000) is fixed by the RPA.
73 * pseries and powernv need to keep the whole page from
74 * 0x7000 to 0x8000 free for use by the firmware
75 */
76ZERO_FIXED_SECTION(fwnmi_page, 0x7000, 0x8000)
77OPEN_TEXT_SECTION(0x8000)
78#else
79OPEN_TEXT_SECTION(0x7000)
80#endif
81
82USE_FIXED_SECTION(real_vectors)
83
84/*
85 * This is the start of the interrupt handlers for pSeries
86 * This code runs with relocation off.
87 * Code from here to __end_interrupts gets copied down to real
88 * address 0x100 when we are running a relocatable kernel.
89 * Therefore any relative branches in this section must only
90 * branch to labels in this section.
91 */
92 .globl __start_interrupts
93__start_interrupts:
94
95/* No virt vectors corresponding with 0x0..0x100 */
96EXC_VIRT_NONE(0x4000, 0x4100)
97
98
99#ifdef CONFIG_PPC_P7_NAP
100 /*
101 * If running native on arch 2.06 or later, check if we are waking up
102 * from nap/sleep/winkle, and branch to idle handler.
103 */
104#define IDLETEST(n) \
105 BEGIN_FTR_SECTION ; \
106 mfspr r10,SPRN_SRR1 ; \
107 rlwinm. r10,r10,47-31,30,31 ; \
108 beq- 1f ; \
109 cmpwi cr3,r10,2 ; \
110 BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \
1111: \
112 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
113#else
114#define IDLETEST NOTEST
115#endif
116
117EXC_REAL_BEGIN(system_reset, 0x100, 0x200)
118 SET_SCRATCH0(r13)
119 GET_PACA(r13)
120 clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */
121 EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD,
122 IDLETEST, 0x100)
123
124EXC_REAL_END(system_reset, 0x100, 0x200)
125EXC_VIRT_NONE(0x4100, 0x4200)
126
127#ifdef CONFIG_PPC_P7_NAP
128EXC_COMMON_BEGIN(system_reset_idle_common)
129BEGIN_FTR_SECTION
130 GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */
131END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
132 bl pnv_restore_hyp_resource
133
134 li r0,PNV_THREAD_RUNNING
135 stb r0,PACA_THREAD_IDLE_STATE(r13) /* Clear thread state */
136
137#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
138 li r0,KVM_HWTHREAD_IN_KERNEL
139 stb r0,HSTATE_HWTHREAD_STATE(r13)
140 /* Order setting hwthread_state vs. testing hwthread_req */
141 sync
142 lbz r0,HSTATE_HWTHREAD_REQ(r13)
143 cmpwi r0,0
144 beq 1f
145 b kvm_start_guest
1461:
147#endif
148
149 /* Return SRR1 from power7_nap() */
150 mfspr r3,SPRN_SRR1
151 blt cr3,2f
152 b pnv_wakeup_loss
1532: b pnv_wakeup_noloss
154#endif
155
156EXC_COMMON(system_reset_common, 0x100, system_reset_exception)
157
158#ifdef CONFIG_PPC_PSERIES
159/*
160 * Vectors for the FWNMI option. Share common code.
161 */
162TRAMP_REAL_BEGIN(system_reset_fwnmi)
163 SET_SCRATCH0(r13) /* save r13 */
164 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
165 NOTEST, 0x100)
166#endif /* CONFIG_PPC_PSERIES */
167
168
169EXC_REAL_BEGIN(machine_check, 0x200, 0x300)
170 /* This is moved out of line as it can be patched by FW, but
171 * some code path might still want to branch into the original
172 * vector
173 */
174 SET_SCRATCH0(r13) /* save r13 */
175 /*
176 * Running native on arch 2.06 or later, we may wakeup from winkle
177 * inside machine check. If yes, then last bit of HSPRG0 would be set
178 * to 1. Hence clear it unconditionally.
179 */
180 GET_PACA(r13)
181 clrrdi r13,r13,1
182 SET_PACA(r13)
183 EXCEPTION_PROLOG_0(PACA_EXMC)
184BEGIN_FTR_SECTION
185 b machine_check_powernv_early
186FTR_SECTION_ELSE
187 b machine_check_pSeries_0
188ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
189EXC_REAL_END(machine_check, 0x200, 0x300)
190EXC_VIRT_NONE(0x4200, 0x4300)
191TRAMP_REAL_BEGIN(machine_check_powernv_early)
192BEGIN_FTR_SECTION
193 EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
194 /*
195 * Register contents:
196 * R13 = PACA
197 * R9 = CR
198 * Original R9 to R13 is saved on PACA_EXMC
199 *
200 * Switch to mc_emergency stack and handle re-entrancy (we limit
201 * the nested MCE upto level 4 to avoid stack overflow).
202 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
203 *
204 * We use paca->in_mce to check whether this is the first entry or
205 * nested machine check. We increment paca->in_mce to track nested
206 * machine checks.
207 *
208 * If this is the first entry then set stack pointer to
209 * paca->mc_emergency_sp, otherwise r1 is already pointing to
210 * stack frame on mc_emergency stack.
211 *
212 * NOTE: We are here with MSR_ME=0 (off), which means we risk a
213 * checkstop if we get another machine check exception before we do
214 * rfid with MSR_ME=1.
215 */
216 mr r11,r1 /* Save r1 */
217 lhz r10,PACA_IN_MCE(r13)
218 cmpwi r10,0 /* Are we in nested machine check */
219 bne 0f /* Yes, we are. */
220 /* First machine check entry */
221 ld r1,PACAMCEMERGSP(r13) /* Use MC emergency stack */
2220: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
223 addi r10,r10,1 /* increment paca->in_mce */
224 sth r10,PACA_IN_MCE(r13)
225 /* Limit nested MCE to level 4 to avoid stack overflow */
226 cmpwi r10,4
227 bgt 2f /* Check if we hit limit of 4 */
228 std r11,GPR1(r1) /* Save r1 on the stack. */
229 std r11,0(r1) /* make stack chain pointer */
230 mfspr r11,SPRN_SRR0 /* Save SRR0 */
231 std r11,_NIP(r1)
232 mfspr r11,SPRN_SRR1 /* Save SRR1 */
233 std r11,_MSR(r1)
234 mfspr r11,SPRN_DAR /* Save DAR */
235 std r11,_DAR(r1)
236 mfspr r11,SPRN_DSISR /* Save DSISR */
237 std r11,_DSISR(r1)
238 std r9,_CCR(r1) /* Save CR in stackframe */
239 /* Save r9 through r13 from EXMC save area to stack frame. */
240 EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
241 mfmsr r11 /* get MSR value */
242 ori r11,r11,MSR_ME /* turn on ME bit */
243 ori r11,r11,MSR_RI /* turn on RI bit */
244 LOAD_HANDLER(r12, machine_check_handle_early)
2451: mtspr SPRN_SRR0,r12
246 mtspr SPRN_SRR1,r11
247 rfid
248 b . /* prevent speculative execution */
2492:
250 /* Stack overflow. Stay on emergency stack and panic.
251 * Keep the ME bit off while panic-ing, so that if we hit
252 * another machine check we checkstop.
253 */
254 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
255 ld r11,PACAKMSR(r13)
256 LOAD_HANDLER(r12, unrecover_mce)
257 li r10,MSR_ME
258 andc r11,r11,r10 /* Turn off MSR_ME */
259 b 1b
260 b . /* prevent speculative execution */
261END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
262
263TRAMP_REAL_BEGIN(machine_check_pSeries)
264 .globl machine_check_fwnmi
265machine_check_fwnmi:
266 SET_SCRATCH0(r13) /* save r13 */
267 EXCEPTION_PROLOG_0(PACA_EXMC)
268machine_check_pSeries_0:
269 EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST_PR, 0x200)
270 /*
271 * The following is essentially EXCEPTION_PROLOG_PSERIES_1 with the
272 * difference that MSR_RI is not enabled, because PACA_EXMC is being
273 * used, so nested machine check corrupts it. machine_check_common
274 * enables MSR_RI.
275 */
276 ld r10,PACAKMSR(r13)
277 xori r10,r10,MSR_RI
278 mfspr r11,SPRN_SRR0
279 LOAD_HANDLER(r12, machine_check_common)
280 mtspr SPRN_SRR0,r12
281 mfspr r12,SPRN_SRR1
282 mtspr SPRN_SRR1,r10
283 rfid
284 b . /* prevent speculative execution */
285
286TRAMP_KVM_SKIP(PACA_EXMC, 0x200)
287
288EXC_COMMON_BEGIN(machine_check_common)
289 /*
290 * Machine check is different because we use a different
291 * save area: PACA_EXMC instead of PACA_EXGEN.
292 */
293 mfspr r10,SPRN_DAR
294 std r10,PACA_EXMC+EX_DAR(r13)
295 mfspr r10,SPRN_DSISR
296 stw r10,PACA_EXMC+EX_DSISR(r13)
297 EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
298 FINISH_NAP
299 RECONCILE_IRQ_STATE(r10, r11)
300 ld r3,PACA_EXMC+EX_DAR(r13)
301 lwz r4,PACA_EXMC+EX_DSISR(r13)
302 /* Enable MSR_RI when finished with PACA_EXMC */
303 li r10,MSR_RI
304 mtmsrd r10,1
305 std r3,_DAR(r1)
306 std r4,_DSISR(r1)
307 bl save_nvgprs
308 addi r3,r1,STACK_FRAME_OVERHEAD
309 bl machine_check_exception
310 b ret_from_except
311
312#define MACHINE_CHECK_HANDLER_WINDUP \
313 /* Clear MSR_RI before setting SRR0 and SRR1. */\
314 li r0,MSR_RI; \
315 mfmsr r9; /* get MSR value */ \
316 andc r9,r9,r0; \
317 mtmsrd r9,1; /* Clear MSR_RI */ \
318 /* Move original SRR0 and SRR1 into the respective regs */ \
319 ld r9,_MSR(r1); \
320 mtspr SPRN_SRR1,r9; \
321 ld r3,_NIP(r1); \
322 mtspr SPRN_SRR0,r3; \
323 ld r9,_CTR(r1); \
324 mtctr r9; \
325 ld r9,_XER(r1); \
326 mtxer r9; \
327 ld r9,_LINK(r1); \
328 mtlr r9; \
329 REST_GPR(0, r1); \
330 REST_8GPRS(2, r1); \
331 REST_GPR(10, r1); \
332 ld r11,_CCR(r1); \
333 mtcr r11; \
334 /* Decrement paca->in_mce. */ \
335 lhz r12,PACA_IN_MCE(r13); \
336 subi r12,r12,1; \
337 sth r12,PACA_IN_MCE(r13); \
338 REST_GPR(11, r1); \
339 REST_2GPRS(12, r1); \
340 /* restore original r1. */ \
341 ld r1,GPR1(r1)
342
343 /*
344 * Handle machine check early in real mode. We come here with
345 * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
346 */
347EXC_COMMON_BEGIN(machine_check_handle_early)
348 std r0,GPR0(r1) /* Save r0 */
349 EXCEPTION_PROLOG_COMMON_3(0x200)
350 bl save_nvgprs
351 addi r3,r1,STACK_FRAME_OVERHEAD
352 bl machine_check_early
353 std r3,RESULT(r1) /* Save result */
354 ld r12,_MSR(r1)
355#ifdef CONFIG_PPC_P7_NAP
356 /*
357 * Check if thread was in power saving mode. We come here when any
358 * of the following is true:
359 * a. thread wasn't in power saving mode
360 * b. thread was in power saving mode with no state loss,
361 * supervisor state loss or hypervisor state loss.
362 *
363 * Go back to nap/sleep/winkle mode again if (b) is true.
364 */
365 rlwinm. r11,r12,47-31,30,31 /* Was it in power saving mode? */
366 beq 4f /* No, it wasn;t */
367 /* Thread was in power saving mode. Go back to nap again. */
368 cmpwi r11,2
369 blt 3f
370 /* Supervisor/Hypervisor state loss */
371 li r0,1
372 stb r0,PACA_NAPSTATELOST(r13)
3733: bl machine_check_queue_event
374 MACHINE_CHECK_HANDLER_WINDUP
375 GET_PACA(r13)
376 ld r1,PACAR1(r13)
377 /*
378 * Check what idle state this CPU was in and go back to same mode
379 * again.
380 */
381 lbz r3,PACA_THREAD_IDLE_STATE(r13)
382 cmpwi r3,PNV_THREAD_NAP
383 bgt 10f
384 IDLE_STATE_ENTER_SEQ(PPC_NAP)
385 /* No return */
38610:
387 cmpwi r3,PNV_THREAD_SLEEP
388 bgt 2f
389 IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
390 /* No return */
391
3922:
393 /*
394 * Go back to winkle. Please note that this thread was woken up in
395 * machine check from winkle and have not restored the per-subcore
396 * state. Hence before going back to winkle, set last bit of HSPRG0
397 * to 1. This will make sure that if this thread gets woken up
398 * again at reset vector 0x100 then it will get chance to restore
399 * the subcore state.
400 */
401 ori r13,r13,1
402 SET_PACA(r13)
403 IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
404 /* No return */
4054:
406#endif
407 /*
408 * Check if we are coming from hypervisor userspace. If yes then we
409 * continue in host kernel in V mode to deliver the MC event.
410 */
411 rldicl. r11,r12,4,63 /* See if MC hit while in HV mode. */
412 beq 5f
413 andi. r11,r12,MSR_PR /* See if coming from user. */
414 bne 9f /* continue in V mode if we are. */
415
4165:
417#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
418 /*
419 * We are coming from kernel context. Check if we are coming from
420 * guest. if yes, then we can continue. We will fall through
421 * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
422 */
423 lbz r11,HSTATE_IN_GUEST(r13)
424 cmpwi r11,0 /* Check if coming from guest */
425 bne 9f /* continue if we are. */
426#endif
427 /*
428 * At this point we are not sure about what context we come from.
429 * Queue up the MCE event and return from the interrupt.
430 * But before that, check if this is an un-recoverable exception.
431 * If yes, then stay on emergency stack and panic.
432 */
433 andi. r11,r12,MSR_RI
434 bne 2f
4351: mfspr r11,SPRN_SRR0
436 LOAD_HANDLER(r10,unrecover_mce)
437 mtspr SPRN_SRR0,r10
438 ld r10,PACAKMSR(r13)
439 /*
440 * We are going down. But there are chances that we might get hit by
441 * another MCE during panic path and we may run into unstable state
442 * with no way out. Hence, turn ME bit off while going down, so that
443 * when another MCE is hit during panic path, system will checkstop
444 * and hypervisor will get restarted cleanly by SP.
445 */
446 li r3,MSR_ME
447 andc r10,r10,r3 /* Turn off MSR_ME */
448 mtspr SPRN_SRR1,r10
449 rfid
450 b .
4512:
452 /*
453 * Check if we have successfully handled/recovered from error, if not
454 * then stay on emergency stack and panic.
455 */
456 ld r3,RESULT(r1) /* Load result */
457 cmpdi r3,0 /* see if we handled MCE successfully */
458
459 beq 1b /* if !handled then panic */
460 /*
461 * Return from MC interrupt.
462 * Queue up the MCE event so that we can log it later, while
463 * returning from kernel or opal call.
464 */
465 bl machine_check_queue_event
466 MACHINE_CHECK_HANDLER_WINDUP
467 rfid
4689:
469 /* Deliver the machine check to host kernel in V mode. */
470 MACHINE_CHECK_HANDLER_WINDUP
471 b machine_check_pSeries
472
473EXC_COMMON_BEGIN(unrecover_mce)
474 /* Invoke machine_check_exception to print MCE event and panic. */
475 addi r3,r1,STACK_FRAME_OVERHEAD
476 bl machine_check_exception
477 /*
478 * We will not reach here. Even if we did, there is no way out. Call
479 * unrecoverable_exception and die.
480 */
4811: addi r3,r1,STACK_FRAME_OVERHEAD
482 bl unrecoverable_exception
483 b 1b
484
485
486EXC_REAL(data_access, 0x300, 0x380)
487EXC_VIRT(data_access, 0x4300, 0x4380, 0x300)
488TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
489
490EXC_COMMON_BEGIN(data_access_common)
491 /*
492 * Here r13 points to the paca, r9 contains the saved CR,
493 * SRR0 and SRR1 are saved in r11 and r12,
494 * r9 - r13 are saved in paca->exgen.
495 */
496 mfspr r10,SPRN_DAR
497 std r10,PACA_EXGEN+EX_DAR(r13)
498 mfspr r10,SPRN_DSISR
499 stw r10,PACA_EXGEN+EX_DSISR(r13)
500 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
501 RECONCILE_IRQ_STATE(r10, r11)
502 ld r12,_MSR(r1)
503 ld r3,PACA_EXGEN+EX_DAR(r13)
504 lwz r4,PACA_EXGEN+EX_DSISR(r13)
505 li r5,0x300
506 std r3,_DAR(r1)
507 std r4,_DSISR(r1)
508BEGIN_MMU_FTR_SECTION
509 b do_hash_page /* Try to handle as hpte fault */
510MMU_FTR_SECTION_ELSE
511 b handle_page_fault
512ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
513
514
515EXC_REAL_BEGIN(data_access_slb, 0x380, 0x400)
516 SET_SCRATCH0(r13)
517 EXCEPTION_PROLOG_0(PACA_EXSLB)
518 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
519 std r3,PACA_EXSLB+EX_R3(r13)
520 mfspr r3,SPRN_DAR
521 mfspr r12,SPRN_SRR1
522 crset 4*cr6+eq
523#ifndef CONFIG_RELOCATABLE
524 b slb_miss_realmode
525#else
526 /*
527 * We can't just use a direct branch to slb_miss_realmode
528 * because the distance from here to there depends on where
529 * the kernel ends up being put.
530 */
531 mfctr r11
532 LOAD_HANDLER(r10, slb_miss_realmode)
533 mtctr r10
534 bctr
535#endif
536EXC_REAL_END(data_access_slb, 0x380, 0x400)
537
538EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x4400)
539 SET_SCRATCH0(r13)
540 EXCEPTION_PROLOG_0(PACA_EXSLB)
541 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
542 std r3,PACA_EXSLB+EX_R3(r13)
543 mfspr r3,SPRN_DAR
544 mfspr r12,SPRN_SRR1
545 crset 4*cr6+eq
546#ifndef CONFIG_RELOCATABLE
547 b slb_miss_realmode
548#else
549 /*
550 * We can't just use a direct branch to slb_miss_realmode
551 * because the distance from here to there depends on where
552 * the kernel ends up being put.
553 */
554 mfctr r11
555 LOAD_HANDLER(r10, slb_miss_realmode)
556 mtctr r10
557 bctr
558#endif
559EXC_VIRT_END(data_access_slb, 0x4380, 0x4400)
560TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
561
562
563EXC_REAL(instruction_access, 0x400, 0x480)
564EXC_VIRT(instruction_access, 0x4400, 0x4480, 0x400)
565TRAMP_KVM(PACA_EXGEN, 0x400)
566
567EXC_COMMON_BEGIN(instruction_access_common)
568 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
569 RECONCILE_IRQ_STATE(r10, r11)
570 ld r12,_MSR(r1)
571 ld r3,_NIP(r1)
572 andis. r4,r12,0x5820
573 li r5,0x400
574 std r3,_DAR(r1)
575 std r4,_DSISR(r1)
576BEGIN_MMU_FTR_SECTION
577 b do_hash_page /* Try to handle as hpte fault */
578MMU_FTR_SECTION_ELSE
579 b handle_page_fault
580ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
581
582
583EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x500)
584 SET_SCRATCH0(r13)
585 EXCEPTION_PROLOG_0(PACA_EXSLB)
586 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
587 std r3,PACA_EXSLB+EX_R3(r13)
588 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
589 mfspr r12,SPRN_SRR1
590 crclr 4*cr6+eq
591#ifndef CONFIG_RELOCATABLE
592 b slb_miss_realmode
593#else
594 mfctr r11
595 LOAD_HANDLER(r10, slb_miss_realmode)
596 mtctr r10
597 bctr
598#endif
599EXC_REAL_END(instruction_access_slb, 0x480, 0x500)
600
601EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x4500)
602 SET_SCRATCH0(r13)
603 EXCEPTION_PROLOG_0(PACA_EXSLB)
604 EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
605 std r3,PACA_EXSLB+EX_R3(r13)
606 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
607 mfspr r12,SPRN_SRR1
608 crclr 4*cr6+eq
609#ifndef CONFIG_RELOCATABLE
610 b slb_miss_realmode
611#else
612 mfctr r11
613 LOAD_HANDLER(r10, slb_miss_realmode)
614 mtctr r10
615 bctr
616#endif
617EXC_VIRT_END(instruction_access_slb, 0x4480, 0x4500)
618TRAMP_KVM(PACA_EXSLB, 0x480)
619
620
621/* This handler is used by both 0x380 and 0x480 slb miss interrupts */
622EXC_COMMON_BEGIN(slb_miss_realmode)
623 /*
624 * r13 points to the PACA, r9 contains the saved CR,
625 * r12 contain the saved SRR1, SRR0 is still ready for return
626 * r3 has the faulting address
627 * r9 - r13 are saved in paca->exslb.
628 * r3 is saved in paca->slb_r3
629 * cr6.eq is set for a D-SLB miss, clear for a I-SLB miss
630 * We assume we aren't going to take any exceptions during this
631 * procedure.
632 */
633 mflr r10
634#ifdef CONFIG_RELOCATABLE
635 mtctr r11
636#endif
637
638 stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
639 std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
640 std r3,PACA_EXSLB+EX_DAR(r13)
641
642 crset 4*cr0+eq
643#ifdef CONFIG_PPC_STD_MMU_64
644BEGIN_MMU_FTR_SECTION
645 bl slb_allocate_realmode
646END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
647#endif
648
649 ld r10,PACA_EXSLB+EX_LR(r13)
650 ld r3,PACA_EXSLB+EX_R3(r13)
651 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
652 mtlr r10
653
654 beq 8f /* if bad address, make full stack frame */
655
656 andi. r10,r12,MSR_RI /* check for unrecoverable exception */
657 beq- 2f
658
659 /* All done -- return from exception. */
660
661.machine push
662.machine "power4"
663 mtcrf 0x80,r9
664 mtcrf 0x02,r9 /* I/D indication is in cr6 */
665 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
666.machine pop
667
668 RESTORE_PPR_PACA(PACA_EXSLB, r9)
669 ld r9,PACA_EXSLB+EX_R9(r13)
670 ld r10,PACA_EXSLB+EX_R10(r13)
671 ld r11,PACA_EXSLB+EX_R11(r13)
672 ld r12,PACA_EXSLB+EX_R12(r13)
673 ld r13,PACA_EXSLB+EX_R13(r13)
674 rfid
675 b . /* prevent speculative execution */
676
6772: mfspr r11,SPRN_SRR0
678 LOAD_HANDLER(r10,unrecov_slb)
679 mtspr SPRN_SRR0,r10
680 ld r10,PACAKMSR(r13)
681 mtspr SPRN_SRR1,r10
682 rfid
683 b .
684
6858: mfspr r11,SPRN_SRR0
686 LOAD_HANDLER(r10,bad_addr_slb)
687 mtspr SPRN_SRR0,r10
688 ld r10,PACAKMSR(r13)
689 mtspr SPRN_SRR1,r10
690 rfid
691 b .
692
693EXC_COMMON_BEGIN(unrecov_slb)
694 EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
695 RECONCILE_IRQ_STATE(r10, r11)
696 bl save_nvgprs
6971: addi r3,r1,STACK_FRAME_OVERHEAD
698 bl unrecoverable_exception
699 b 1b
700
701EXC_COMMON_BEGIN(bad_addr_slb)
702 EXCEPTION_PROLOG_COMMON(0x380, PACA_EXSLB)
703 RECONCILE_IRQ_STATE(r10, r11)
704 ld r3, PACA_EXSLB+EX_DAR(r13)
705 std r3, _DAR(r1)
706 beq cr6, 2f
707 li r10, 0x480 /* fix trap number for I-SLB miss */
708 std r10, _TRAP(r1)
7092: bl save_nvgprs
710 addi r3, r1, STACK_FRAME_OVERHEAD
711 bl slb_miss_bad_addr
712 b ret_from_except
713
714EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x600)
715 .globl hardware_interrupt_hv;
716hardware_interrupt_hv:
717 BEGIN_FTR_SECTION
718 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
719 EXC_HV, SOFTEN_TEST_HV)
720do_kvm_H0x500:
721 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
722 FTR_SECTION_ELSE
723 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt_common,
724 EXC_STD, SOFTEN_TEST_PR)
725do_kvm_0x500:
726 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
727 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
728EXC_REAL_END(hardware_interrupt, 0x500, 0x600)
729
730EXC_VIRT_BEGIN(hardware_interrupt, 0x4500, 0x4600)
731 .globl hardware_interrupt_relon_hv;
732hardware_interrupt_relon_hv:
733 BEGIN_FTR_SECTION
734 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_HV, SOFTEN_TEST_HV)
735 FTR_SECTION_ELSE
736 _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt_common, EXC_STD, SOFTEN_TEST_PR)
737 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
738EXC_VIRT_END(hardware_interrupt, 0x4500, 0x4600)
739
740EXC_COMMON_ASYNC(hardware_interrupt_common, 0x500, do_IRQ)
741
742
743EXC_REAL(alignment, 0x600, 0x700)
744EXC_VIRT(alignment, 0x4600, 0x4700, 0x600)
745TRAMP_KVM(PACA_EXGEN, 0x600)
746EXC_COMMON_BEGIN(alignment_common)
747 mfspr r10,SPRN_DAR
748 std r10,PACA_EXGEN+EX_DAR(r13)
749 mfspr r10,SPRN_DSISR
750 stw r10,PACA_EXGEN+EX_DSISR(r13)
751 EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
752 ld r3,PACA_EXGEN+EX_DAR(r13)
753 lwz r4,PACA_EXGEN+EX_DSISR(r13)
754 std r3,_DAR(r1)
755 std r4,_DSISR(r1)
756 bl save_nvgprs
757 RECONCILE_IRQ_STATE(r10, r11)
758 addi r3,r1,STACK_FRAME_OVERHEAD
759 bl alignment_exception
760 b ret_from_except
761
762
763EXC_REAL(program_check, 0x700, 0x800)
764EXC_VIRT(program_check, 0x4700, 0x4800, 0x700)
765TRAMP_KVM(PACA_EXGEN, 0x700)
766EXC_COMMON_BEGIN(program_check_common)
767 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
768 bl save_nvgprs
769 RECONCILE_IRQ_STATE(r10, r11)
770 addi r3,r1,STACK_FRAME_OVERHEAD
771 bl program_check_exception
772 b ret_from_except
773
774
775EXC_REAL(fp_unavailable, 0x800, 0x900)
776EXC_VIRT(fp_unavailable, 0x4800, 0x4900, 0x800)
777TRAMP_KVM(PACA_EXGEN, 0x800)
778EXC_COMMON_BEGIN(fp_unavailable_common)
779 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
780 bne 1f /* if from user, just load it up */
781 bl save_nvgprs
782 RECONCILE_IRQ_STATE(r10, r11)
783 addi r3,r1,STACK_FRAME_OVERHEAD
784 bl kernel_fp_unavailable_exception
785 BUG_OPCODE
7861:
787#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
788BEGIN_FTR_SECTION
789 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
790 * transaction), go do TM stuff
791 */
792 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
793 bne- 2f
794END_FTR_SECTION_IFSET(CPU_FTR_TM)
795#endif
796 bl load_up_fpu
797 b fast_exception_return
798#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
7992: /* User process was in a transaction */
800 bl save_nvgprs
801 RECONCILE_IRQ_STATE(r10, r11)
802 addi r3,r1,STACK_FRAME_OVERHEAD
803 bl fp_unavailable_tm
804 b ret_from_except
805#endif
806
807
808EXC_REAL_MASKABLE(decrementer, 0x900, 0x980)
809EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x4980, 0x900)
810TRAMP_KVM(PACA_EXGEN, 0x900)
811EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
812
813
814EXC_REAL_HV(hdecrementer, 0x980, 0xa00)
815EXC_VIRT_HV(hdecrementer, 0x4980, 0x4a00, 0x980)
816TRAMP_KVM_HV(PACA_EXGEN, 0x980)
817EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
818
819
820EXC_REAL_MASKABLE(doorbell_super, 0xa00, 0xb00)
821EXC_VIRT_MASKABLE(doorbell_super, 0x4a00, 0x4b00, 0xa00)
822TRAMP_KVM(PACA_EXGEN, 0xa00)
823#ifdef CONFIG_PPC_DOORBELL
824EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, doorbell_exception)
825#else
826EXC_COMMON_ASYNC(doorbell_super_common, 0xa00, unknown_exception)
827#endif
828
829
830EXC_REAL(trap_0b, 0xb00, 0xc00)
831EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00)
832TRAMP_KVM(PACA_EXGEN, 0xb00)
833EXC_COMMON(trap_0b_common, 0xb00, unknown_exception)
834
835#define LOAD_SYSCALL_HANDLER(reg) \
836 __LOAD_HANDLER(reg, system_call_common)
837
838/* Syscall routine is used twice, in reloc-off and reloc-on paths */
839#define SYSCALL_PSERIES_1 \
840BEGIN_FTR_SECTION \
841 cmpdi r0,0x1ebe ; \
842 beq- 1f ; \
843END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) \
844 mr r9,r13 ; \
845 GET_PACA(r13) ; \
846 mfspr r11,SPRN_SRR0 ; \
8470:
848
849#define SYSCALL_PSERIES_2_RFID \
850 mfspr r12,SPRN_SRR1 ; \
851 LOAD_SYSCALL_HANDLER(r10) ; \
852 mtspr SPRN_SRR0,r10 ; \
853 ld r10,PACAKMSR(r13) ; \
854 mtspr SPRN_SRR1,r10 ; \
855 rfid ; \
856 b . ; /* prevent speculative execution */
857
858#define SYSCALL_PSERIES_3 \
859 /* Fast LE/BE switch system call */ \
8601: mfspr r12,SPRN_SRR1 ; \
861 xori r12,r12,MSR_LE ; \
862 mtspr SPRN_SRR1,r12 ; \
863 rfid ; /* return to userspace */ \
864 b . ; /* prevent speculative execution */
865
866#if defined(CONFIG_RELOCATABLE)
867 /*
868 * We can't branch directly so we do it via the CTR which
869 * is volatile across system calls.
870 */
871#define SYSCALL_PSERIES_2_DIRECT \
872 LOAD_SYSCALL_HANDLER(r12) ; \
873 mtctr r12 ; \
874 mfspr r12,SPRN_SRR1 ; \
875 li r10,MSR_RI ; \
876 mtmsrd r10,1 ; \
877 bctr ;
878#else
879 /* We can branch directly */
880#define SYSCALL_PSERIES_2_DIRECT \
881 mfspr r12,SPRN_SRR1 ; \
882 li r10,MSR_RI ; \
883 mtmsrd r10,1 ; /* Set RI (EE=0) */ \
884 b system_call_common ;
885#endif
886
887EXC_REAL_BEGIN(system_call, 0xc00, 0xd00)
888 /*
889 * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
890 * that support it) before changing to HMT_MEDIUM. That allows the KVM
891 * code to save that value into the guest state (it is the guest's PPR
892 * value). Otherwise just change to HMT_MEDIUM as userspace has
893 * already saved the PPR.
894 */
895#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
896 SET_SCRATCH0(r13)
897 GET_PACA(r13)
898 std r9,PACA_EXGEN+EX_R9(r13)
899 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
900 HMT_MEDIUM;
901 std r10,PACA_EXGEN+EX_R10(r13)
902 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
903 mfcr r9
904 KVMTEST_PR(0xc00)
905 GET_SCRATCH0(r13)
906#else
907 HMT_MEDIUM;
908#endif
909 SYSCALL_PSERIES_1
910 SYSCALL_PSERIES_2_RFID
911 SYSCALL_PSERIES_3
912EXC_REAL_END(system_call, 0xc00, 0xd00)
913
914EXC_VIRT_BEGIN(system_call, 0x4c00, 0x4d00)
915 HMT_MEDIUM
916 SYSCALL_PSERIES_1
917 SYSCALL_PSERIES_2_DIRECT
918 SYSCALL_PSERIES_3
919EXC_VIRT_END(system_call, 0x4c00, 0x4d00)
920
921TRAMP_KVM(PACA_EXGEN, 0xc00)
922
923
924EXC_REAL(single_step, 0xd00, 0xe00)
925EXC_VIRT(single_step, 0x4d00, 0x4e00, 0xd00)
926TRAMP_KVM(PACA_EXGEN, 0xd00)
927EXC_COMMON(single_step_common, 0xd00, single_step_exception)
928
929EXC_REAL_OOL_HV(h_data_storage, 0xe00, 0xe20)
930EXC_VIRT_NONE(0x4e00, 0x4e20)
931TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0xe00)
932EXC_COMMON_BEGIN(h_data_storage_common)
933 mfspr r10,SPRN_HDAR
934 std r10,PACA_EXGEN+EX_DAR(r13)
935 mfspr r10,SPRN_HDSISR
936 stw r10,PACA_EXGEN+EX_DSISR(r13)
937 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
938 bl save_nvgprs
939 RECONCILE_IRQ_STATE(r10, r11)
940 addi r3,r1,STACK_FRAME_OVERHEAD
941 bl unknown_exception
942 b ret_from_except
943
944
945EXC_REAL_OOL_HV(h_instr_storage, 0xe20, 0xe40)
946EXC_VIRT_NONE(0x4e20, 0x4e40)
947TRAMP_KVM_HV(PACA_EXGEN, 0xe20)
948EXC_COMMON(h_instr_storage_common, 0xe20, unknown_exception)
949
950
951EXC_REAL_OOL_HV(emulation_assist, 0xe40, 0xe60)
952EXC_VIRT_OOL_HV(emulation_assist, 0x4e40, 0x4e60, 0xe40)
953TRAMP_KVM_HV(PACA_EXGEN, 0xe40)
954EXC_COMMON(emulation_assist_common, 0xe40, emulation_assist_interrupt)
955
956
957/*
958 * hmi_exception trampoline is a special case. It jumps to hmi_exception_early
959 * first, and then eventaully from there to the trampoline to get into virtual
960 * mode.
961 */
962__EXC_REAL_OOL_HV_DIRECT(hmi_exception, 0xe60, 0xe80, hmi_exception_early)
963__TRAMP_REAL_REAL_OOL_MASKABLE_HV(hmi_exception, 0xe60)
964EXC_VIRT_NONE(0x4e60, 0x4e80)
965TRAMP_KVM_HV(PACA_EXGEN, 0xe60)
966TRAMP_REAL_BEGIN(hmi_exception_early)
967 EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_HV, 0xe60)
968 mr r10,r1 /* Save r1 */
969 ld r1,PACAEMERGSP(r13) /* Use emergency stack */
970 subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
971 std r9,_CCR(r1) /* save CR in stackframe */
972 mfspr r11,SPRN_HSRR0 /* Save HSRR0 */
973 std r11,_NIP(r1) /* save HSRR0 in stackframe */
974 mfspr r12,SPRN_HSRR1 /* Save SRR1 */
975 std r12,_MSR(r1) /* save SRR1 in stackframe */
976 std r10,0(r1) /* make stack chain pointer */
977 std r0,GPR0(r1) /* save r0 in stackframe */
978 std r10,GPR1(r1) /* save r1 in stackframe */
979 EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
980 EXCEPTION_PROLOG_COMMON_3(0xe60)
981 addi r3,r1,STACK_FRAME_OVERHEAD
982 bl hmi_exception_realmode
983 /* Windup the stack. */
984 /* Move original HSRR0 and HSRR1 into the respective regs */
985 ld r9,_MSR(r1)
986 mtspr SPRN_HSRR1,r9
987 ld r3,_NIP(r1)
988 mtspr SPRN_HSRR0,r3
989 ld r9,_CTR(r1)
990 mtctr r9
991 ld r9,_XER(r1)
992 mtxer r9
993 ld r9,_LINK(r1)
994 mtlr r9
995 REST_GPR(0, r1)
996 REST_8GPRS(2, r1)
997 REST_GPR(10, r1)
998 ld r11,_CCR(r1)
999 mtcr r11
1000 REST_GPR(11, r1)
1001 REST_2GPRS(12, r1)
1002 /* restore original r1. */
1003 ld r1,GPR1(r1)
1004
1005 /*
1006 * Go to virtual mode and pull the HMI event information from
1007 * firmware.
1008 */
1009 .globl hmi_exception_after_realmode
1010hmi_exception_after_realmode:
1011 SET_SCRATCH0(r13)
1012 EXCEPTION_PROLOG_0(PACA_EXGEN)
1013 b tramp_real_hmi_exception
1014
1015EXC_COMMON_ASYNC(hmi_exception_common, 0xe60, handle_hmi_exception)
1016
1017
1018EXC_REAL_OOL_MASKABLE_HV(h_doorbell, 0xe80, 0xea0)
1019EXC_VIRT_OOL_MASKABLE_HV(h_doorbell, 0x4e80, 0x4ea0, 0xe80)
1020TRAMP_KVM_HV(PACA_EXGEN, 0xe80)
1021#ifdef CONFIG_PPC_DOORBELL
1022EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, doorbell_exception)
1023#else
1024EXC_COMMON_ASYNC(h_doorbell_common, 0xe80, unknown_exception)
1025#endif
1026
1027
1028EXC_REAL_OOL_MASKABLE_HV(h_virt_irq, 0xea0, 0xec0)
1029EXC_VIRT_OOL_MASKABLE_HV(h_virt_irq, 0x4ea0, 0x4ec0, 0xea0)
1030TRAMP_KVM_HV(PACA_EXGEN, 0xea0)
1031EXC_COMMON_ASYNC(h_virt_irq_common, 0xea0, do_IRQ)
1032
1033
1034EXC_REAL_NONE(0xec0, 0xf00)
1035EXC_VIRT_NONE(0x4ec0, 0x4f00)
1036
1037
1038EXC_REAL_OOL(performance_monitor, 0xf00, 0xf20)
1039EXC_VIRT_OOL(performance_monitor, 0x4f00, 0x4f20, 0xf00)
1040TRAMP_KVM(PACA_EXGEN, 0xf00)
1041EXC_COMMON_ASYNC(performance_monitor_common, 0xf00, performance_monitor_exception)
1042
1043
1044EXC_REAL_OOL(altivec_unavailable, 0xf20, 0xf40)
1045EXC_VIRT_OOL(altivec_unavailable, 0x4f20, 0x4f40, 0xf20)
1046TRAMP_KVM(PACA_EXGEN, 0xf20)
1047EXC_COMMON_BEGIN(altivec_unavailable_common)
1048 EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1049#ifdef CONFIG_ALTIVEC
1050BEGIN_FTR_SECTION
1051 beq 1f
1052#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1053 BEGIN_FTR_SECTION_NESTED(69)
1054 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1055 * transaction), go do TM stuff
1056 */
1057 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1058 bne- 2f
1059 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1060#endif
1061 bl load_up_altivec
1062 b fast_exception_return
1063#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
10642: /* User process was in a transaction */
1065 bl save_nvgprs
1066 RECONCILE_IRQ_STATE(r10, r11)
1067 addi r3,r1,STACK_FRAME_OVERHEAD
1068 bl altivec_unavailable_tm
1069 b ret_from_except
1070#endif
10711:
1072END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1073#endif
1074 bl save_nvgprs
1075 RECONCILE_IRQ_STATE(r10, r11)
1076 addi r3,r1,STACK_FRAME_OVERHEAD
1077 bl altivec_unavailable_exception
1078 b ret_from_except
1079
1080
1081EXC_REAL_OOL(vsx_unavailable, 0xf40, 0xf60)
1082EXC_VIRT_OOL(vsx_unavailable, 0x4f40, 0x4f60, 0xf40)
1083TRAMP_KVM(PACA_EXGEN, 0xf40)
1084EXC_COMMON_BEGIN(vsx_unavailable_common)
1085 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1086#ifdef CONFIG_VSX
1087BEGIN_FTR_SECTION
1088 beq 1f
1089#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1090 BEGIN_FTR_SECTION_NESTED(69)
1091 /* Test if 2 TM state bits are zero. If non-zero (ie. userspace was in
1092 * transaction), go do TM stuff
1093 */
1094 rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1095 bne- 2f
1096 END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1097#endif
1098 b load_up_vsx
1099#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
11002: /* User process was in a transaction */
1101 bl save_nvgprs
1102 RECONCILE_IRQ_STATE(r10, r11)
1103 addi r3,r1,STACK_FRAME_OVERHEAD
1104 bl vsx_unavailable_tm
1105 b ret_from_except
1106#endif
11071:
1108END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1109#endif
1110 bl save_nvgprs
1111 RECONCILE_IRQ_STATE(r10, r11)
1112 addi r3,r1,STACK_FRAME_OVERHEAD
1113 bl vsx_unavailable_exception
1114 b ret_from_except
1115
1116
1117EXC_REAL_OOL(facility_unavailable, 0xf60, 0xf80)
1118EXC_VIRT_OOL(facility_unavailable, 0x4f60, 0x4f80, 0xf60)
1119TRAMP_KVM(PACA_EXGEN, 0xf60)
1120EXC_COMMON(facility_unavailable_common, 0xf60, facility_unavailable_exception)
1121
1122
1123EXC_REAL_OOL_HV(h_facility_unavailable, 0xf80, 0xfa0)
1124EXC_VIRT_OOL_HV(h_facility_unavailable, 0x4f80, 0x4fa0, 0xf80)
1125TRAMP_KVM_HV(PACA_EXGEN, 0xf80)
1126EXC_COMMON(h_facility_unavailable_common, 0xf80, facility_unavailable_exception)
1127
1128
1129EXC_REAL_NONE(0xfa0, 0x1200)
1130EXC_VIRT_NONE(0x4fa0, 0x5200)
1131
1132#ifdef CONFIG_CBE_RAS
1133EXC_REAL_HV(cbe_system_error, 0x1200, 0x1300)
1134EXC_VIRT_NONE(0x5200, 0x5300)
1135TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1200)
1136EXC_COMMON(cbe_system_error_common, 0x1200, cbe_system_error_exception)
1137#else /* CONFIG_CBE_RAS */
1138EXC_REAL_NONE(0x1200, 0x1300)
1139EXC_VIRT_NONE(0x5200, 0x5300)
1140#endif
1141
1142
1143EXC_REAL(instruction_breakpoint, 0x1300, 0x1400)
1144EXC_VIRT(instruction_breakpoint, 0x5300, 0x5400, 0x1300)
1145TRAMP_KVM_SKIP(PACA_EXGEN, 0x1300)
1146EXC_COMMON(instruction_breakpoint_common, 0x1300, instruction_breakpoint_exception)
1147
1148EXC_REAL_NONE(0x1400, 0x1500)
1149EXC_VIRT_NONE(0x5400, 0x5500)
1150
1151EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x1600)
1152 mtspr SPRN_SPRG_HSCRATCH0,r13
1153 EXCEPTION_PROLOG_0(PACA_EXGEN)
1154 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
1155
1156#ifdef CONFIG_PPC_DENORMALISATION
1157 mfspr r10,SPRN_HSRR1
1158 mfspr r11,SPRN_HSRR0 /* save HSRR0 */
1159 andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
1160 addi r11,r11,-4 /* HSRR0 is next instruction */
1161 bne+ denorm_assist
1162#endif
1163
1164 KVMTEST_PR(0x1500)
1165 EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
1166EXC_REAL_END(denorm_exception_hv, 0x1500, 0x1600)
1167
1168#ifdef CONFIG_PPC_DENORMALISATION
1169EXC_VIRT_BEGIN(denorm_exception, 0x5500, 0x5600)
1170 b exc_real_0x1500_denorm_exception_hv
1171EXC_VIRT_END(denorm_exception, 0x5500, 0x5600)
1172#else
1173EXC_VIRT_NONE(0x5500, 0x5600)
1174#endif
1175
1176TRAMP_KVM_SKIP(PACA_EXGEN, 0x1500)
1177
1178#ifdef CONFIG_PPC_DENORMALISATION
1179TRAMP_REAL_BEGIN(denorm_assist)
1180BEGIN_FTR_SECTION
1181/*
1182 * To denormalise we need to move a copy of the register to itself.
1183 * For POWER6 do that here for all FP regs.
1184 */
1185 mfmsr r10
1186 ori r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
1187 xori r10,r10,(MSR_FE0|MSR_FE1)
1188 mtmsrd r10
1189 sync
1190
1191#define FMR2(n) fmr (n), (n) ; fmr n+1, n+1
1192#define FMR4(n) FMR2(n) ; FMR2(n+2)
1193#define FMR8(n) FMR4(n) ; FMR4(n+4)
1194#define FMR16(n) FMR8(n) ; FMR8(n+8)
1195#define FMR32(n) FMR16(n) ; FMR16(n+16)
1196 FMR32(0)
1197
1198FTR_SECTION_ELSE
1199/*
1200 * To denormalise we need to move a copy of the register to itself.
1201 * For POWER7 do that here for the first 32 VSX registers only.
1202 */
1203 mfmsr r10
1204 oris r10,r10,MSR_VSX@h
1205 mtmsrd r10
1206 sync
1207
1208#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
1209#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
1210#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
1211#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
1212#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
1213 XVCPSGNDP32(0)
1214
1215ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
1216
1217BEGIN_FTR_SECTION
1218 b denorm_done
1219END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1220/*
1221 * To denormalise we need to move a copy of the register to itself.
1222 * For POWER8 we need to do that for all 64 VSX registers
1223 */
1224 XVCPSGNDP32(32)
1225denorm_done:
1226 mtspr SPRN_HSRR0,r11
1227 mtcrf 0x80,r9
1228 ld r9,PACA_EXGEN+EX_R9(r13)
1229 RESTORE_PPR_PACA(PACA_EXGEN, r10)
1230BEGIN_FTR_SECTION
1231 ld r10,PACA_EXGEN+EX_CFAR(r13)
1232 mtspr SPRN_CFAR,r10
1233END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1234 ld r10,PACA_EXGEN+EX_R10(r13)
1235 ld r11,PACA_EXGEN+EX_R11(r13)
1236 ld r12,PACA_EXGEN+EX_R12(r13)
1237 ld r13,PACA_EXGEN+EX_R13(r13)
1238 HRFID
1239 b .
1240#endif
1241
1242EXC_COMMON_HV(denorm_common, 0x1500, unknown_exception)
1243
1244
1245#ifdef CONFIG_CBE_RAS
1246EXC_REAL_HV(cbe_maintenance, 0x1600, 0x1700)
1247EXC_VIRT_NONE(0x5600, 0x5700)
1248TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1600)
1249EXC_COMMON(cbe_maintenance_common, 0x1600, cbe_maintenance_exception)
1250#else /* CONFIG_CBE_RAS */
1251EXC_REAL_NONE(0x1600, 0x1700)
1252EXC_VIRT_NONE(0x5600, 0x5700)
1253#endif
1254
1255
1256EXC_REAL(altivec_assist, 0x1700, 0x1800)
1257EXC_VIRT(altivec_assist, 0x5700, 0x5800, 0x1700)
1258TRAMP_KVM(PACA_EXGEN, 0x1700)
1259#ifdef CONFIG_ALTIVEC
1260EXC_COMMON(altivec_assist_common, 0x1700, altivec_assist_exception)
1261#else
1262EXC_COMMON(altivec_assist_common, 0x1700, unknown_exception)
1263#endif
1264
1265
1266#ifdef CONFIG_CBE_RAS
1267EXC_REAL_HV(cbe_thermal, 0x1800, 0x1900)
1268EXC_VIRT_NONE(0x5800, 0x5900)
1269TRAMP_KVM_HV_SKIP(PACA_EXGEN, 0x1800)
1270EXC_COMMON(cbe_thermal_common, 0x1800, cbe_thermal_exception)
1271#else /* CONFIG_CBE_RAS */
1272EXC_REAL_NONE(0x1800, 0x1900)
1273EXC_VIRT_NONE(0x5800, 0x5900)
1274#endif
1275
1276
1277/*
1278 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
1279 * - If it was a decrementer interrupt, we bump the dec to max and and return.
1280 * - If it was a doorbell we return immediately since doorbells are edge
1281 * triggered and won't automatically refire.
1282 * - If it was a HMI we return immediately since we handled it in realmode
1283 * and it won't refire.
1284 * - else we hard disable and return.
1285 * This is called with r10 containing the value to OR to the paca field.
1286 */
1287#define MASKED_INTERRUPT(_H) \
1288masked_##_H##interrupt: \
1289 std r11,PACA_EXGEN+EX_R11(r13); \
1290 lbz r11,PACAIRQHAPPENED(r13); \
1291 or r11,r11,r10; \
1292 stb r11,PACAIRQHAPPENED(r13); \
1293 cmpwi r10,PACA_IRQ_DEC; \
1294 bne 1f; \
1295 lis r10,0x7fff; \
1296 ori r10,r10,0xffff; \
1297 mtspr SPRN_DEC,r10; \
1298 b 2f; \
12991: cmpwi r10,PACA_IRQ_DBELL; \
1300 beq 2f; \
1301 cmpwi r10,PACA_IRQ_HMI; \
1302 beq 2f; \
1303 mfspr r10,SPRN_##_H##SRR1; \
1304 rldicl r10,r10,48,1; /* clear MSR_EE */ \
1305 rotldi r10,r10,16; \
1306 mtspr SPRN_##_H##SRR1,r10; \
13072: mtcrf 0x80,r9; \
1308 ld r9,PACA_EXGEN+EX_R9(r13); \
1309 ld r10,PACA_EXGEN+EX_R10(r13); \
1310 ld r11,PACA_EXGEN+EX_R11(r13); \
1311 GET_SCRATCH0(r13); \
1312 ##_H##rfid; \
1313 b .
1314
1315/*
1316 * Real mode exceptions actually use this too, but alternate
1317 * instruction code patches (which end up in the common .text area)
1318 * cannot reach these if they are put there.
1319 */
1320USE_FIXED_SECTION(virt_trampolines)
1321 MASKED_INTERRUPT()
1322 MASKED_INTERRUPT(H)
1323
1324#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1325TRAMP_REAL_BEGIN(kvmppc_skip_interrupt)
1326 /*
1327 * Here all GPRs are unchanged from when the interrupt happened
1328 * except for r13, which is saved in SPRG_SCRATCH0.
1329 */
1330 mfspr r13, SPRN_SRR0
1331 addi r13, r13, 4
1332 mtspr SPRN_SRR0, r13
1333 GET_SCRATCH0(r13)
1334 rfid
1335 b .
1336
1337TRAMP_REAL_BEGIN(kvmppc_skip_Hinterrupt)
1338 /*
1339 * Here all GPRs are unchanged from when the interrupt happened
1340 * except for r13, which is saved in SPRG_SCRATCH0.
1341 */
1342 mfspr r13, SPRN_HSRR0
1343 addi r13, r13, 4
1344 mtspr SPRN_HSRR0, r13
1345 GET_SCRATCH0(r13)
1346 hrfid
1347 b .
1348#endif
1349
1350/*
1351 * Ensure that any handlers that get invoked from the exception prologs
1352 * above are below the first 64KB (0x10000) of the kernel image because
1353 * the prologs assemble the addresses of these handlers using the
1354 * LOAD_HANDLER macro, which uses an ori instruction.
1355 */
1356
1357/*** Common interrupt handlers ***/
1358
1359
1360 /*
1361 * Relocation-on interrupts: A subset of the interrupts can be delivered
1362 * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
1363 * it. Addresses are the same as the original interrupt addresses, but
1364 * offset by 0xc000000000004000.
1365 * It's impossible to receive interrupts below 0x300 via this mechanism.
1366 * KVM: None of these traps are from the guest ; anything that escalated
1367 * to HV=1 from HV=0 is delivered via real mode handlers.
1368 */
1369
1370 /*
1371 * This uses the standard macro, since the original 0x300 vector
1372 * only has extra guff for STAB-based processors -- which never
1373 * come here.
1374 */
1375
1376EXC_COMMON_BEGIN(ppc64_runlatch_on_trampoline)
1377 b __ppc64_runlatch_on
1378
1379USE_FIXED_SECTION(virt_trampolines)
1380 /*
1381 * The __end_interrupts marker must be past the out-of-line (OOL)
1382 * handlers, so that they are copied to real address 0x100 when running
1383 * a relocatable kernel. This ensures they can be reached from the short
1384 * trampoline handlers (like 0x4f00, 0x4f20, etc.) which branch
1385 * directly, without using LOAD_HANDLER().
1386 */
1387 .align 7
1388 .globl __end_interrupts
1389__end_interrupts:
1390DEFINE_FIXED_SYMBOL(__end_interrupts)
1391
1392#ifdef CONFIG_PPC_970_NAP
1393EXC_COMMON_BEGIN(power4_fixup_nap)
1394 andc r9,r9,r10
1395 std r9,TI_LOCAL_FLAGS(r11)
1396 ld r10,_LINK(r1) /* make idle task do the */
1397 std r10,_NIP(r1) /* equivalent of a blr */
1398 blr
1399#endif
1400
1401CLOSE_FIXED_SECTION(real_vectors);
1402CLOSE_FIXED_SECTION(real_trampolines);
1403CLOSE_FIXED_SECTION(virt_vectors);
1404CLOSE_FIXED_SECTION(virt_trampolines);
1405
1406USE_TEXT_SECTION()
1407
1408/*
1409 * Hash table stuff
1410 */
1411 .balign IFETCH_ALIGN_BYTES
1412do_hash_page:
1413#ifdef CONFIG_PPC_STD_MMU_64
1414 andis. r0,r4,0xa410 /* weird error? */
1415 bne- handle_page_fault /* if not, try to insert a HPTE */
1416 andis. r0,r4,DSISR_DABRMATCH@h
1417 bne- handle_dabr_fault
1418 CURRENT_THREAD_INFO(r11, r1)
1419 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
1420 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
1421 bne 77f /* then don't call hash_page now */
1422
1423 /*
1424 * r3 contains the faulting address
1425 * r4 msr
1426 * r5 contains the trap number
1427 * r6 contains dsisr
1428 *
1429 * at return r3 = 0 for success, 1 for page fault, negative for error
1430 */
1431 mr r4,r12
1432 ld r6,_DSISR(r1)
1433 bl __hash_page /* build HPTE if possible */
1434 cmpdi r3,0 /* see if __hash_page succeeded */
1435
1436 /* Success */
1437 beq fast_exc_return_irq /* Return from exception on success */
1438
1439 /* Error */
1440 blt- 13f
1441#endif /* CONFIG_PPC_STD_MMU_64 */
1442
1443/* Here we have a page fault that hash_page can't handle. */
1444handle_page_fault:
144511: ld r4,_DAR(r1)
1446 ld r5,_DSISR(r1)
1447 addi r3,r1,STACK_FRAME_OVERHEAD
1448 bl do_page_fault
1449 cmpdi r3,0
1450 beq+ 12f
1451 bl save_nvgprs
1452 mr r5,r3
1453 addi r3,r1,STACK_FRAME_OVERHEAD
1454 lwz r4,_DAR(r1)
1455 bl bad_page_fault
1456 b ret_from_except
1457
1458/* We have a data breakpoint exception - handle it */
1459handle_dabr_fault:
1460 bl save_nvgprs
1461 ld r4,_DAR(r1)
1462 ld r5,_DSISR(r1)
1463 addi r3,r1,STACK_FRAME_OVERHEAD
1464 bl do_break
146512: b ret_from_except_lite
1466
1467
1468#ifdef CONFIG_PPC_STD_MMU_64
1469/* We have a page fault that hash_page could handle but HV refused
1470 * the PTE insertion
1471 */
147213: bl save_nvgprs
1473 mr r5,r3
1474 addi r3,r1,STACK_FRAME_OVERHEAD
1475 ld r4,_DAR(r1)
1476 bl low_hash_fault
1477 b ret_from_except
1478#endif
1479
1480/*
1481 * We come here as a result of a DSI at a point where we don't want
1482 * to call hash_page, such as when we are accessing memory (possibly
1483 * user memory) inside a PMU interrupt that occurred while interrupts
1484 * were soft-disabled. We want to invoke the exception handler for
1485 * the access, or panic if there isn't a handler.
1486 */
148777: bl save_nvgprs
1488 mr r4,r3
1489 addi r3,r1,STACK_FRAME_OVERHEAD
1490 li r5,SIGSEGV
1491 bl bad_page_fault
1492 b ret_from_except
1493
1494/*
1495 * Here we have detected that the kernel stack pointer is bad.
1496 * R9 contains the saved CR, r13 points to the paca,
1497 * r10 contains the (bad) kernel stack pointer,
1498 * r11 and r12 contain the saved SRR0 and SRR1.
1499 * We switch to using an emergency stack, save the registers there,
1500 * and call kernel_bad_stack(), which panics.
1501 */
1502bad_stack:
1503 ld r1,PACAEMERGSP(r13)
1504 subi r1,r1,64+INT_FRAME_SIZE
1505 std r9,_CCR(r1)
1506 std r10,GPR1(r1)
1507 std r11,_NIP(r1)
1508 std r12,_MSR(r1)
1509 mfspr r11,SPRN_DAR
1510 mfspr r12,SPRN_DSISR
1511 std r11,_DAR(r1)
1512 std r12,_DSISR(r1)
1513 mflr r10
1514 mfctr r11
1515 mfxer r12
1516 std r10,_LINK(r1)
1517 std r11,_CTR(r1)
1518 std r12,_XER(r1)
1519 SAVE_GPR(0,r1)
1520 SAVE_GPR(2,r1)
1521 ld r10,EX_R3(r3)
1522 std r10,GPR3(r1)
1523 SAVE_GPR(4,r1)
1524 SAVE_4GPRS(5,r1)
1525 ld r9,EX_R9(r3)
1526 ld r10,EX_R10(r3)
1527 SAVE_2GPRS(9,r1)
1528 ld r9,EX_R11(r3)
1529 ld r10,EX_R12(r3)
1530 ld r11,EX_R13(r3)
1531 std r9,GPR11(r1)
1532 std r10,GPR12(r1)
1533 std r11,GPR13(r1)
1534BEGIN_FTR_SECTION
1535 ld r10,EX_CFAR(r3)
1536 std r10,ORIG_GPR3(r1)
1537END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1538 SAVE_8GPRS(14,r1)
1539 SAVE_10GPRS(22,r1)
1540 lhz r12,PACA_TRAP_SAVE(r13)
1541 std r12,_TRAP(r1)
1542 addi r11,r1,INT_FRAME_SIZE
1543 std r11,0(r1)
1544 li r12,0
1545 std r12,0(r11)
1546 ld r2,PACATOC(r13)
1547 ld r11,exception_marker@toc(r2)
1548 std r12,RESULT(r1)
1549 std r11,STACK_FRAME_OVERHEAD-16(r1)
15501: addi r3,r1,STACK_FRAME_OVERHEAD
1551 bl kernel_bad_stack
1552 b 1b
1553
1554/*
1555 * Called from arch_local_irq_enable when an interrupt needs
1556 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
1557 * which kind of interrupt. MSR:EE is already off. We generate a
1558 * stackframe like if a real interrupt had happened.
1559 *
1560 * Note: While MSR:EE is off, we need to make sure that _MSR
1561 * in the generated frame has EE set to 1 or the exception
1562 * handler will not properly re-enable them.
1563 */
1564_GLOBAL(__replay_interrupt)
1565 /* We are going to jump to the exception common code which
1566 * will retrieve various register values from the PACA which
1567 * we don't give a damn about, so we don't bother storing them.
1568 */
1569 mfmsr r12
1570 mflr r11
1571 mfcr r9
1572 ori r12,r12,MSR_EE
1573 cmpwi r3,0x900
1574 beq decrementer_common
1575 cmpwi r3,0x500
1576 beq hardware_interrupt_common
1577BEGIN_FTR_SECTION
1578 cmpwi r3,0xe80
1579 beq h_doorbell_common
1580 cmpwi r3,0xea0
1581 beq h_virt_irq_common
1582 cmpwi r3,0xe60
1583 beq hmi_exception_common
1584FTR_SECTION_ELSE
1585 cmpwi r3,0xa00
1586 beq doorbell_super_common
1587ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
1588 blr