Linux Audio

Check our new training course

Loading...
v5.9
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  Boot code and exception vectors for Book3E processors
   4 *
   5 *  Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
 
 
 
 
 
   6 */
   7
   8#include <linux/threads.h>
   9#include <asm/reg.h>
  10#include <asm/page.h>
  11#include <asm/ppc_asm.h>
  12#include <asm/asm-offsets.h>
  13#include <asm/cputable.h>
  14#include <asm/setup.h>
  15#include <asm/thread_info.h>
  16#include <asm/reg_a2.h>
  17#include <asm/exception-64e.h>
  18#include <asm/bug.h>
  19#include <asm/irqflags.h>
  20#include <asm/ptrace.h>
  21#include <asm/ppc-opcode.h>
  22#include <asm/mmu.h>
  23#include <asm/hw_irq.h>
  24#include <asm/kvm_asm.h>
  25#include <asm/kvm_booke_hv_asm.h>
  26#include <asm/feature-fixups.h>
  27#include <asm/context_tracking.h>
  28
  29/* XXX This will ultimately add space for a special exception save
  30 *     structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
  31 *     when taking special interrupts. For now we don't support that,
  32 *     special interrupts from within a non-standard level will probably
  33 *     blow you up
  34 */
  35#define SPECIAL_EXC_SRR0	0
  36#define SPECIAL_EXC_SRR1	1
  37#define SPECIAL_EXC_SPRG_GEN	2
  38#define SPECIAL_EXC_SPRG_TLB	3
  39#define SPECIAL_EXC_MAS0	4
  40#define SPECIAL_EXC_MAS1	5
  41#define SPECIAL_EXC_MAS2	6
  42#define SPECIAL_EXC_MAS3	7
  43#define SPECIAL_EXC_MAS6	8
  44#define SPECIAL_EXC_MAS7	9
  45#define SPECIAL_EXC_MAS5	10	/* E.HV only */
  46#define SPECIAL_EXC_MAS8	11	/* E.HV only */
  47#define SPECIAL_EXC_IRQHAPPENED	12
  48#define SPECIAL_EXC_DEAR	13
  49#define SPECIAL_EXC_ESR		14
  50#define SPECIAL_EXC_SOFTE	15
  51#define SPECIAL_EXC_CSRR0	16
  52#define SPECIAL_EXC_CSRR1	17
  53/* must be even to keep 16-byte stack alignment */
  54#define SPECIAL_EXC_END		18
  55
  56#define SPECIAL_EXC_FRAME_SIZE	(INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
  57#define SPECIAL_EXC_FRAME_OFFS  (INT_FRAME_SIZE - 288)
  58
  59#define SPECIAL_EXC_STORE(reg, name) \
  60	std	reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  61
  62#define SPECIAL_EXC_LOAD(reg, name) \
  63	ld	reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  64
  65special_reg_save:
  66	lbz	r9,PACAIRQHAPPENED(r13)
  67	RECONCILE_IRQ_STATE(r3,r4)
  68
  69	/*
  70	 * We only need (or have stack space) to save this stuff if
  71	 * we interrupted the kernel.
  72	 */
  73	ld	r3,_MSR(r1)
  74	andi.	r3,r3,MSR_PR
  75	bnelr
  76
 
 
 
 
 
 
 
 
 
 
 
  77	/*
  78	 * Advance to the next TLB exception frame for handler
  79	 * types that don't do it automatically.
  80	 */
  81	LOAD_REG_ADDR(r11,extlb_level_exc)
  82	lwz	r12,0(r11)
  83	mfspr	r10,SPRN_SPRG_TLB_EXFRAME
  84	add	r10,r10,r12
  85	mtspr	SPRN_SPRG_TLB_EXFRAME,r10
  86
  87	/*
  88	 * Save registers needed to allow nesting of certain exceptions
  89	 * (such as TLB misses) inside special exception levels
  90	 */
  91	mfspr	r10,SPRN_SRR0
  92	SPECIAL_EXC_STORE(r10,SRR0)
  93	mfspr	r10,SPRN_SRR1
  94	SPECIAL_EXC_STORE(r10,SRR1)
  95	mfspr	r10,SPRN_SPRG_GEN_SCRATCH
  96	SPECIAL_EXC_STORE(r10,SPRG_GEN)
  97	mfspr	r10,SPRN_SPRG_TLB_SCRATCH
  98	SPECIAL_EXC_STORE(r10,SPRG_TLB)
  99	mfspr	r10,SPRN_MAS0
 100	SPECIAL_EXC_STORE(r10,MAS0)
 101	mfspr	r10,SPRN_MAS1
 102	SPECIAL_EXC_STORE(r10,MAS1)
 103	mfspr	r10,SPRN_MAS2
 104	SPECIAL_EXC_STORE(r10,MAS2)
 105	mfspr	r10,SPRN_MAS3
 106	SPECIAL_EXC_STORE(r10,MAS3)
 107	mfspr	r10,SPRN_MAS6
 108	SPECIAL_EXC_STORE(r10,MAS6)
 109	mfspr	r10,SPRN_MAS7
 110	SPECIAL_EXC_STORE(r10,MAS7)
 111BEGIN_FTR_SECTION
 112	mfspr	r10,SPRN_MAS5
 113	SPECIAL_EXC_STORE(r10,MAS5)
 114	mfspr	r10,SPRN_MAS8
 115	SPECIAL_EXC_STORE(r10,MAS8)
 116
 117	/* MAS5/8 could have inappropriate values if we interrupted KVM code */
 118	li	r10,0
 119	mtspr	SPRN_MAS5,r10
 120	mtspr	SPRN_MAS8,r10
 121END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 122	SPECIAL_EXC_STORE(r9,IRQHAPPENED)
 123
 124	mfspr	r10,SPRN_DEAR
 125	SPECIAL_EXC_STORE(r10,DEAR)
 126	mfspr	r10,SPRN_ESR
 127	SPECIAL_EXC_STORE(r10,ESR)
 128
 129	lbz	r10,PACAIRQSOFTMASK(r13)
 130	SPECIAL_EXC_STORE(r10,SOFTE)
 131	ld	r10,_NIP(r1)
 132	SPECIAL_EXC_STORE(r10,CSRR0)
 133	ld	r10,_MSR(r1)
 134	SPECIAL_EXC_STORE(r10,CSRR1)
 135
 136	blr
 137
 138ret_from_level_except:
 139	ld	r3,_MSR(r1)
 140	andi.	r3,r3,MSR_PR
 141	beq	1f
 142	b	ret_from_except
 1431:
 144
 145	LOAD_REG_ADDR(r11,extlb_level_exc)
 146	lwz	r12,0(r11)
 147	mfspr	r10,SPRN_SPRG_TLB_EXFRAME
 148	sub	r10,r10,r12
 149	mtspr	SPRN_SPRG_TLB_EXFRAME,r10
 150
 151	/*
 152	 * It's possible that the special level exception interrupted a
 153	 * TLB miss handler, and inserted the same entry that the
 154	 * interrupted handler was about to insert.  On CPUs without TLB
 155	 * write conditional, this can result in a duplicate TLB entry.
 156	 * Wipe all non-bolted entries to be safe.
 157	 *
 158	 * Note that this doesn't protect against any TLB misses
 159	 * we may take accessing the stack from here to the end of
 160	 * the special level exception.  It's not clear how we can
 161	 * reasonably protect against that, but only CPUs with
 162	 * neither TLB write conditional nor bolted kernel memory
 163	 * are affected.  Do any such CPUs even exist?
 164	 */
 165	PPC_TLBILX_ALL(0,R0)
 166
 167	REST_NVGPRS(r1)
 168
 169	SPECIAL_EXC_LOAD(r10,SRR0)
 170	mtspr	SPRN_SRR0,r10
 171	SPECIAL_EXC_LOAD(r10,SRR1)
 172	mtspr	SPRN_SRR1,r10
 173	SPECIAL_EXC_LOAD(r10,SPRG_GEN)
 174	mtspr	SPRN_SPRG_GEN_SCRATCH,r10
 175	SPECIAL_EXC_LOAD(r10,SPRG_TLB)
 176	mtspr	SPRN_SPRG_TLB_SCRATCH,r10
 177	SPECIAL_EXC_LOAD(r10,MAS0)
 178	mtspr	SPRN_MAS0,r10
 179	SPECIAL_EXC_LOAD(r10,MAS1)
 180	mtspr	SPRN_MAS1,r10
 181	SPECIAL_EXC_LOAD(r10,MAS2)
 182	mtspr	SPRN_MAS2,r10
 183	SPECIAL_EXC_LOAD(r10,MAS3)
 184	mtspr	SPRN_MAS3,r10
 185	SPECIAL_EXC_LOAD(r10,MAS6)
 186	mtspr	SPRN_MAS6,r10
 187	SPECIAL_EXC_LOAD(r10,MAS7)
 188	mtspr	SPRN_MAS7,r10
 189BEGIN_FTR_SECTION
 190	SPECIAL_EXC_LOAD(r10,MAS5)
 191	mtspr	SPRN_MAS5,r10
 192	SPECIAL_EXC_LOAD(r10,MAS8)
 193	mtspr	SPRN_MAS8,r10
 194END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 195
 196	lbz	r6,PACAIRQSOFTMASK(r13)
 197	ld	r5,SOFTE(r1)
 198
 199	/* Interrupts had better not already be enabled... */
 200	tweqi	r6,IRQS_ENABLED
 201
 202	andi.	r6,r5,IRQS_DISABLED
 203	bne	1f
 204
 205	TRACE_ENABLE_INTS
 206	stb	r5,PACAIRQSOFTMASK(r13)
 2071:
 208	/*
 209	 * Restore PACAIRQHAPPENED rather than setting it based on
 210	 * the return MSR[EE], since we could have interrupted
 211	 * __check_irq_replay() or other inconsistent transitory
 212	 * states that must remain that way.
 213	 */
 214	SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
 215	stb	r10,PACAIRQHAPPENED(r13)
 216
 217	SPECIAL_EXC_LOAD(r10,DEAR)
 218	mtspr	SPRN_DEAR,r10
 219	SPECIAL_EXC_LOAD(r10,ESR)
 220	mtspr	SPRN_ESR,r10
 221
 222	stdcx.	r0,0,r1		/* to clear the reservation */
 223
 224	REST_4GPRS(2, r1)
 225	REST_4GPRS(6, r1)
 226
 227	ld	r10,_CTR(r1)
 228	ld	r11,_XER(r1)
 229	mtctr	r10
 230	mtxer	r11
 231
 232	blr
 233
 234.macro ret_from_level srr0 srr1 paca_ex scratch
 235	bl	ret_from_level_except
 236
 237	ld	r10,_LINK(r1)
 238	ld	r11,_CCR(r1)
 239	ld	r0,GPR13(r1)
 240	mtlr	r10
 241	mtcr	r11
 242
 243	ld	r10,GPR10(r1)
 244	ld	r11,GPR11(r1)
 245	ld	r12,GPR12(r1)
 246	mtspr	\scratch,r0
 247
 248	std	r10,\paca_ex+EX_R10(r13);
 249	std	r11,\paca_ex+EX_R11(r13);
 250	ld	r10,_NIP(r1)
 251	ld	r11,_MSR(r1)
 252	ld	r0,GPR0(r1)
 253	ld	r1,GPR1(r1)
 254	mtspr	\srr0,r10
 255	mtspr	\srr1,r11
 256	ld	r10,\paca_ex+EX_R10(r13)
 257	ld	r11,\paca_ex+EX_R11(r13)
 258	mfspr	r13,\scratch
 259.endm
 260
 261ret_from_crit_except:
 262	ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
 263	rfci
 264
 265ret_from_mc_except:
 266	ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
 267	rfmci
 268
 269/* Exception prolog code for all exceptions */
 270#define EXCEPTION_PROLOG(n, intnum, type, addition)	    		    \
 271	mtspr	SPRN_SPRG_##type##_SCRATCH,r13;	/* get spare registers */   \
 272	mfspr	r13,SPRN_SPRG_PACA;	/* get PACA */			    \
 273	std	r10,PACA_EX##type+EX_R10(r13);				    \
 274	std	r11,PACA_EX##type+EX_R11(r13);				    \
 275	mfcr	r10;			/* save CR */			    \
 276	mfspr	r11,SPRN_##type##_SRR1;/* what are we coming from */	    \
 277	DO_KVM	intnum,SPRN_##type##_SRR1;    /* KVM hook */		    \
 278	stw	r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
 279	addition;			/* additional code for that exc. */ \
 280	std	r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */  \
 281	type##_SET_KSTACK;		/* get special stack if necessary */\
 282	andi.	r10,r11,MSR_PR;		/* save stack pointer */	    \
 283	beq	1f;			/* branch around if supervisor */   \
 284	ld	r1,PACAKSAVE(r13);	/* get kernel stack coming from usr */\
 2851:	type##_BTB_FLUSH		\
 286	cmpdi	cr1,r1,0;		/* check if SP makes sense */	    \
 287	bge-	cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
 288	mfspr	r10,SPRN_##type##_SRR0;	/* read SRR0 before touching stack */
 289
 290/* Exception type-specific macros */
 291#define	GEN_SET_KSTACK							    \
 292	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack */
 293#define SPRN_GEN_SRR0	SPRN_SRR0
 294#define SPRN_GEN_SRR1	SPRN_SRR1
 295
 296#define	GDBELL_SET_KSTACK	GEN_SET_KSTACK
 297#define SPRN_GDBELL_SRR0	SPRN_GSRR0
 298#define SPRN_GDBELL_SRR1	SPRN_GSRR1
 299
 300#define CRIT_SET_KSTACK						            \
 301	ld	r1,PACA_CRIT_STACK(r13);				    \
 302	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 303#define SPRN_CRIT_SRR0	SPRN_CSRR0
 304#define SPRN_CRIT_SRR1	SPRN_CSRR1
 305
 306#define DBG_SET_KSTACK						            \
 307	ld	r1,PACA_DBG_STACK(r13);					    \
 308	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 309#define SPRN_DBG_SRR0	SPRN_DSRR0
 310#define SPRN_DBG_SRR1	SPRN_DSRR1
 311
 312#define MC_SET_KSTACK						            \
 313	ld	r1,PACA_MC_STACK(r13);					    \
 314	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 315#define SPRN_MC_SRR0	SPRN_MCSRR0
 316#define SPRN_MC_SRR1	SPRN_MCSRR1
 317
 318#ifdef CONFIG_PPC_FSL_BOOK3E
 319#define GEN_BTB_FLUSH			\
 320	START_BTB_FLUSH_SECTION		\
 321		beq 1f;			\
 322		BTB_FLUSH(r10)			\
 323		1:		\
 324	END_BTB_FLUSH_SECTION
 325
 326#define CRIT_BTB_FLUSH			\
 327	START_BTB_FLUSH_SECTION		\
 328		BTB_FLUSH(r10)		\
 329	END_BTB_FLUSH_SECTION
 330
 331#define DBG_BTB_FLUSH CRIT_BTB_FLUSH
 332#define MC_BTB_FLUSH CRIT_BTB_FLUSH
 333#define GDBELL_BTB_FLUSH GEN_BTB_FLUSH
 334#else
 335#define GEN_BTB_FLUSH
 336#define CRIT_BTB_FLUSH
 337#define DBG_BTB_FLUSH
 338#define MC_BTB_FLUSH
 339#define GDBELL_BTB_FLUSH
 340#endif
 341
 342#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition)			    \
 343	EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
 344
 345#define CRIT_EXCEPTION_PROLOG(n, intnum, addition)			    \
 346	EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
 347
 348#define DBG_EXCEPTION_PROLOG(n, intnum, addition)			    \
 349	EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
 350
 351#define MC_EXCEPTION_PROLOG(n, intnum, addition)			    \
 352	EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
 353
 354#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition)			    \
 355	EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
 356
 357/* Variants of the "addition" argument for the prolog
 358 */
 359#define PROLOG_ADDITION_NONE_GEN(n)
 360#define PROLOG_ADDITION_NONE_GDBELL(n)
 361#define PROLOG_ADDITION_NONE_CRIT(n)
 362#define PROLOG_ADDITION_NONE_DBG(n)
 363#define PROLOG_ADDITION_NONE_MC(n)
 364
 365#define PROLOG_ADDITION_MASKABLE_GEN(n)					    \
 366	lbz	r10,PACAIRQSOFTMASK(r13);	/* are irqs soft-masked? */ \
 367	andi.	r10,r10,IRQS_DISABLED;	/* yes -> go out of line */ \
 368	bne	masked_interrupt_book3e_##n
 369
 370#define PROLOG_ADDITION_2REGS_GEN(n)					    \
 371	std	r14,PACA_EXGEN+EX_R14(r13);				    \
 372	std	r15,PACA_EXGEN+EX_R15(r13)
 373
 374#define PROLOG_ADDITION_1REG_GEN(n)					    \
 375	std	r14,PACA_EXGEN+EX_R14(r13);
 376
 377#define PROLOG_ADDITION_2REGS_CRIT(n)					    \
 378	std	r14,PACA_EXCRIT+EX_R14(r13);				    \
 379	std	r15,PACA_EXCRIT+EX_R15(r13)
 380
 381#define PROLOG_ADDITION_2REGS_DBG(n)					    \
 382	std	r14,PACA_EXDBG+EX_R14(r13);				    \
 383	std	r15,PACA_EXDBG+EX_R15(r13)
 384
 385#define PROLOG_ADDITION_2REGS_MC(n)					    \
 386	std	r14,PACA_EXMC+EX_R14(r13);				    \
 387	std	r15,PACA_EXMC+EX_R15(r13)
 388
 389
 390/* Core exception code for all exceptions except TLB misses. */
 391#define EXCEPTION_COMMON_LVL(n, scratch, excf)				    \
 392exc_##n##_common:							    \
 393	std	r0,GPR0(r1);		/* save r0 in stackframe */	    \
 394	std	r2,GPR2(r1);		/* save r2 in stackframe */	    \
 395	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe */    \
 396	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe */	    \
 397	std	r9,GPR9(r1);		/* save r9 in stackframe */	    \
 398	std	r10,_NIP(r1);		/* save SRR0 to stackframe */	    \
 399	std	r11,_MSR(r1);		/* save SRR1 to stackframe */	    \
 400	beq	2f;			/* if from kernel mode */	    \
 401	ACCOUNT_CPU_USER_ENTRY(r13,r10,r11);/* accounting (uses cr0+eq) */  \
 4022:	ld	r3,excf+EX_R10(r13);	/* get back r10 */		    \
 403	ld	r4,excf+EX_R11(r13);	/* get back r11 */		    \
 404	mfspr	r5,scratch;		/* get back r13 */		    \
 405	std	r12,GPR12(r1);		/* save r12 in stackframe */	    \
 406	ld	r2,PACATOC(r13);	/* get kernel TOC into r2 */	    \
 407	mflr	r6;			/* save LR in stackframe */	    \
 408	mfctr	r7;			/* save CTR in stackframe */	    \
 409	mfspr	r8,SPRN_XER;		/* save XER in stackframe */	    \
 410	ld	r9,excf+EX_R1(r13);	/* load orig r1 back from PACA */   \
 411	lwz	r10,excf+EX_CR(r13);	/* load orig CR back from PACA	*/  \
 412	lbz	r11,PACAIRQSOFTMASK(r13); /* get current IRQ softe */	    \
 413	ld	r12,exception_marker@toc(r2);				    \
 414	li	r0,0;							    \
 415	std	r3,GPR10(r1);		/* save r10 to stackframe */	    \
 416	std	r4,GPR11(r1);		/* save r11 to stackframe */	    \
 417	std	r5,GPR13(r1);		/* save it to stackframe */	    \
 418	std	r6,_LINK(r1);						    \
 419	std	r7,_CTR(r1);						    \
 420	std	r8,_XER(r1);						    \
 421	li	r3,(n)+1;		/* indicate partial regs in trap */ \
 422	std	r9,0(r1);		/* store stack frame back link */   \
 423	std	r10,_CCR(r1);		/* store orig CR in stackframe */   \
 424	std	r9,GPR1(r1);		/* store stack frame back link */   \
 425	std	r11,SOFTE(r1);		/* and save it to stackframe */     \
 426	std	r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */	    \
 427	std	r3,_TRAP(r1);		/* set trap number		*/  \
 428	std	r0,RESULT(r1);		/* clear regs->result */
 429
 430#define EXCEPTION_COMMON(n) \
 431	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
 432#define EXCEPTION_COMMON_CRIT(n) \
 433	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
 434#define EXCEPTION_COMMON_MC(n) \
 435	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
 436#define EXCEPTION_COMMON_DBG(n) \
 437	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
 438
 439/*
 440 * This is meant for exceptions that don't immediately hard-enable.  We
 441 * set a bit in paca->irq_happened to ensure that a subsequent call to
 442 * arch_local_irq_restore() will properly hard-enable and avoid the
 443 * fast-path, and then reconcile irq state.
 444 */
 445#define INTS_DISABLE	RECONCILE_IRQ_STATE(r3,r4)
 446
 447/*
 448 * This is called by exceptions that don't use INTS_DISABLE (that did not
 449 * touch irq indicators in the PACA).  This will restore MSR:EE to it's
 450 * previous value
 451 *
 452 * XXX In the long run, we may want to open-code it in order to separate the
 453 *     load from the wrtee, thus limiting the latency caused by the dependency
 454 *     but at this point, I'll favor code clarity until we have a near to final
 455 *     implementation
 456 */
 457#define INTS_RESTORE_HARD						    \
 458	ld	r11,_MSR(r1);						    \
 459	wrtee	r11;
 460
 461/* XXX FIXME: Restore r14/r15 when necessary */
 462#define BAD_STACK_TRAMPOLINE(n)						    \
 463exc_##n##_bad_stack:							    \
 464	li	r1,(n);			/* get exception number */	    \
 465	sth	r1,PACA_TRAP_SAVE(r13);	/* store trap */		    \
 466	b	bad_stack_book3e;	/* bad stack error */
 467
 468/* WARNING: If you change the layout of this stub, make sure you check
 469	*   the debug exception handler which handles single stepping
 470	*   into exceptions from userspace, and the MM code in
 471	*   arch/powerpc/mm/tlb_nohash.c which patches the branch here
 472	*   and would need to be updated if that branch is moved
 473	*/
 474#define	EXCEPTION_STUB(loc, label)					\
 475	. = interrupt_base_book3e + loc;				\
 476	nop;	/* To make debug interrupts happy */			\
 477	b	exc_##label##_book3e;
 478
 479#define ACK_NONE(r)
 480#define ACK_DEC(r)							\
 481	lis	r,TSR_DIS@h;						\
 482	mtspr	SPRN_TSR,r
 483#define ACK_FIT(r)							\
 484	lis	r,TSR_FIS@h;						\
 485	mtspr	SPRN_TSR,r
 486
 487/* Used by asynchronous interrupt that may happen in the idle loop.
 488 *
 489 * This check if the thread was in the idle loop, and if yes, returns
 490 * to the caller rather than the PC. This is to avoid a race if
 491 * interrupts happen before the wait instruction.
 492 */
 493#define CHECK_NAPPING()							\
 494	ld	r11, PACA_THREAD_INFO(r13);				\
 495	ld	r10,TI_LOCAL_FLAGS(r11);				\
 496	andi.	r9,r10,_TLF_NAPPING;					\
 497	beq+	1f;							\
 498	ld	r8,_LINK(r1);						\
 499	rlwinm	r7,r10,0,~_TLF_NAPPING;					\
 500	std	r8,_NIP(r1);						\
 501	std	r7,TI_LOCAL_FLAGS(r11);					\
 5021:
 503
 504
 505#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack)		\
 506	START_EXCEPTION(label);						\
 507	NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
 508	EXCEPTION_COMMON(trapnum)					\
 509	INTS_DISABLE;							\
 510	ack(r8);							\
 511	CHECK_NAPPING();						\
 512	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 513	bl	hdlr;							\
 514	b	ret_from_except_lite;
 515
 516/* This value is used to mark exception frames on the stack. */
 517	.section	".toc","aw"
 518exception_marker:
 519	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
 520
 521
 522/*
 523 * And here we have the exception vectors !
 524 */
 525
 526	.text
 527	.balign	0x1000
 528	.globl interrupt_base_book3e
 529interrupt_base_book3e:					/* fake trap */
 530	EXCEPTION_STUB(0x000, machine_check)
 531	EXCEPTION_STUB(0x020, critical_input)		/* 0x0100 */
 532	EXCEPTION_STUB(0x040, debug_crit)		/* 0x0d00 */
 533	EXCEPTION_STUB(0x060, data_storage)		/* 0x0300 */
 534	EXCEPTION_STUB(0x080, instruction_storage)	/* 0x0400 */
 535	EXCEPTION_STUB(0x0a0, external_input)		/* 0x0500 */
 536	EXCEPTION_STUB(0x0c0, alignment)		/* 0x0600 */
 537	EXCEPTION_STUB(0x0e0, program)			/* 0x0700 */
 538	EXCEPTION_STUB(0x100, fp_unavailable)		/* 0x0800 */
 539	EXCEPTION_STUB(0x120, system_call)		/* 0x0c00 */
 540	EXCEPTION_STUB(0x140, ap_unavailable)		/* 0x0f20 */
 541	EXCEPTION_STUB(0x160, decrementer)		/* 0x0900 */
 542	EXCEPTION_STUB(0x180, fixed_interval)		/* 0x0980 */
 543	EXCEPTION_STUB(0x1a0, watchdog)			/* 0x09f0 */
 544	EXCEPTION_STUB(0x1c0, data_tlb_miss)
 545	EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
 546	EXCEPTION_STUB(0x200, altivec_unavailable)
 547	EXCEPTION_STUB(0x220, altivec_assist)
 548	EXCEPTION_STUB(0x260, perfmon)
 549	EXCEPTION_STUB(0x280, doorbell)
 550	EXCEPTION_STUB(0x2a0, doorbell_crit)
 551	EXCEPTION_STUB(0x2c0, guest_doorbell)
 552	EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
 553	EXCEPTION_STUB(0x300, hypercall)
 554	EXCEPTION_STUB(0x320, ehpriv)
 555	EXCEPTION_STUB(0x340, lrat_error)
 556
 557	.globl __end_interrupts
 558__end_interrupts:
 559
 560/* Critical Input Interrupt */
 561	START_EXCEPTION(critical_input);
 562	CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
 563			      PROLOG_ADDITION_NONE)
 564	EXCEPTION_COMMON_CRIT(0x100)
 565	bl	save_nvgprs
 566	bl	special_reg_save
 567	CHECK_NAPPING();
 568	addi	r3,r1,STACK_FRAME_OVERHEAD
 569	bl	unknown_exception
 570	b	ret_from_crit_except
 571
 572/* Machine Check Interrupt */
 573	START_EXCEPTION(machine_check);
 574	MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
 575			    PROLOG_ADDITION_NONE)
 576	EXCEPTION_COMMON_MC(0x000)
 577	bl	save_nvgprs
 578	bl	special_reg_save
 579	CHECK_NAPPING();
 580	addi	r3,r1,STACK_FRAME_OVERHEAD
 581	bl	machine_check_exception
 582	b	ret_from_mc_except
 583
 584/* Data Storage Interrupt */
 585	START_EXCEPTION(data_storage)
 586	NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
 587				PROLOG_ADDITION_2REGS)
 588	mfspr	r14,SPRN_DEAR
 589	mfspr	r15,SPRN_ESR
 590	EXCEPTION_COMMON(0x300)
 591	INTS_DISABLE
 592	b	storage_fault_common
 593
 594/* Instruction Storage Interrupt */
 595	START_EXCEPTION(instruction_storage);
 596	NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
 597				PROLOG_ADDITION_2REGS)
 598	li	r15,0
 599	mr	r14,r10
 600	EXCEPTION_COMMON(0x400)
 601	INTS_DISABLE
 602	b	storage_fault_common
 603
 604/* External Input Interrupt */
 605	MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
 606			   external_input, do_IRQ, ACK_NONE)
 607
 608/* Alignment */
 609	START_EXCEPTION(alignment);
 610	NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
 611				PROLOG_ADDITION_2REGS)
 612	mfspr	r14,SPRN_DEAR
 613	mfspr	r15,SPRN_ESR
 614	EXCEPTION_COMMON(0x600)
 615	b	alignment_more	/* no room, go out of line */
 616
 617/* Program Interrupt */
 618	START_EXCEPTION(program);
 619	NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
 620				PROLOG_ADDITION_1REG)
 621	mfspr	r14,SPRN_ESR
 622	EXCEPTION_COMMON(0x700)
 623	INTS_DISABLE
 624	std	r14,_DSISR(r1)
 625	addi	r3,r1,STACK_FRAME_OVERHEAD
 626	ld	r14,PACA_EXGEN+EX_R14(r13)
 627	bl	save_nvgprs
 628	bl	program_check_exception
 629	b	ret_from_except
 630
 631/* Floating Point Unavailable Interrupt */
 632	START_EXCEPTION(fp_unavailable);
 633	NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
 634				PROLOG_ADDITION_NONE)
 635	/* we can probably do a shorter exception entry for that one... */
 636	EXCEPTION_COMMON(0x800)
 637	ld	r12,_MSR(r1)
 638	andi.	r0,r12,MSR_PR;
 639	beq-	1f
 640	bl	load_up_fpu
 641	b	fast_exception_return
 6421:	INTS_DISABLE
 643	bl	save_nvgprs
 644	addi	r3,r1,STACK_FRAME_OVERHEAD
 645	bl	kernel_fp_unavailable_exception
 646	b	ret_from_except
 647
 648/* Altivec Unavailable Interrupt */
 649	START_EXCEPTION(altivec_unavailable);
 650	NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
 651				PROLOG_ADDITION_NONE)
 652	/* we can probably do a shorter exception entry for that one... */
 653	EXCEPTION_COMMON(0x200)
 654#ifdef CONFIG_ALTIVEC
 655BEGIN_FTR_SECTION
 656	ld	r12,_MSR(r1)
 657	andi.	r0,r12,MSR_PR;
 658	beq-	1f
 659	bl	load_up_altivec
 660	b	fast_exception_return
 6611:
 662END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 663#endif
 664	INTS_DISABLE
 665	bl	save_nvgprs
 666	addi	r3,r1,STACK_FRAME_OVERHEAD
 667	bl	altivec_unavailable_exception
 668	b	ret_from_except
 669
 670/* AltiVec Assist */
 671	START_EXCEPTION(altivec_assist);
 672	NORMAL_EXCEPTION_PROLOG(0x220,
 673				BOOKE_INTERRUPT_ALTIVEC_ASSIST,
 674				PROLOG_ADDITION_NONE)
 675	EXCEPTION_COMMON(0x220)
 676	INTS_DISABLE
 677	bl	save_nvgprs
 678	addi	r3,r1,STACK_FRAME_OVERHEAD
 679#ifdef CONFIG_ALTIVEC
 680BEGIN_FTR_SECTION
 681	bl	altivec_assist_exception
 682END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 683#else
 684	bl	unknown_exception
 685#endif
 686	b	ret_from_except
 687
 688
 689/* Decrementer Interrupt */
 690	MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
 691			   decrementer, timer_interrupt, ACK_DEC)
 692
 693/* Fixed Interval Timer Interrupt */
 694	MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
 695			   fixed_interval, unknown_exception, ACK_FIT)
 696
 697/* Watchdog Timer Interrupt */
 698	START_EXCEPTION(watchdog);
 699	CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
 700			      PROLOG_ADDITION_NONE)
 701	EXCEPTION_COMMON_CRIT(0x9f0)
 702	bl	save_nvgprs
 703	bl	special_reg_save
 704	CHECK_NAPPING();
 705	addi	r3,r1,STACK_FRAME_OVERHEAD
 706#ifdef CONFIG_BOOKE_WDT
 707	bl	WatchdogException
 708#else
 709	bl	unknown_exception
 710#endif
 711	b	ret_from_crit_except
 712
 713/* System Call Interrupt */
 714	START_EXCEPTION(system_call)
 715	mr	r9,r13			/* keep a copy of userland r13 */
 716	mfspr	r11,SPRN_SRR0		/* get return address */
 717	mfspr	r12,SPRN_SRR1		/* get previous MSR */
 718	mfspr	r13,SPRN_SPRG_PACA	/* get our PACA */
 719	b	system_call_common
 720
 721/* Auxiliary Processor Unavailable Interrupt */
 722	START_EXCEPTION(ap_unavailable);
 723	NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
 724				PROLOG_ADDITION_NONE)
 725	EXCEPTION_COMMON(0xf20)
 726	INTS_DISABLE
 727	bl	save_nvgprs
 728	addi	r3,r1,STACK_FRAME_OVERHEAD
 729	bl	unknown_exception
 730	b	ret_from_except
 731
 732/* Debug exception as a critical interrupt*/
 733	START_EXCEPTION(debug_crit);
 734	CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
 735			      PROLOG_ADDITION_2REGS)
 736
 737	/*
 738	 * If there is a single step or branch-taken exception in an
 739	 * exception entry sequence, it was probably meant to apply to
 740	 * the code where the exception occurred (since exception entry
 741	 * doesn't turn off DE automatically).  We simulate the effect
 742	 * of turning off DE on entry to an exception handler by turning
 743	 * off DE in the CSRR1 value and clearing the debug status.
 744	 */
 745
 746	mfspr	r14,SPRN_DBSR		/* check single-step/branch taken */
 747	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 748	beq+	1f
 749
 750#ifdef CONFIG_RELOCATABLE
 751	ld	r15,PACATOC(r13)
 752	ld	r14,interrupt_base_book3e@got(r15)
 753	ld	r15,__end_interrupts@got(r15)
 754	cmpld	cr0,r10,r14
 755	cmpld	cr1,r10,r15
 756#else
 757	LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
 758	cmpld	cr0, r10, r14
 759	LOAD_REG_IMMEDIATE_SYM(r14, r15, __end_interrupts)
 760	cmpld	cr1, r10, r14
 761#endif
 762	blt+	cr0,1f
 763	bge+	cr1,1f
 764
 765	/* here it looks like we got an inappropriate debug exception. */
 766	lis	r14,(DBSR_IC|DBSR_BT)@h		/* clear the event */
 767	rlwinm	r11,r11,0,~MSR_DE	/* clear DE in the CSRR1 value */
 768	mtspr	SPRN_DBSR,r14
 769	mtspr	SPRN_CSRR1,r11
 770	lwz	r10,PACA_EXCRIT+EX_CR(r13)	/* restore registers */
 771	ld	r1,PACA_EXCRIT+EX_R1(r13)
 772	ld	r14,PACA_EXCRIT+EX_R14(r13)
 773	ld	r15,PACA_EXCRIT+EX_R15(r13)
 774	mtcr	r10
 775	ld	r10,PACA_EXCRIT+EX_R10(r13)	/* restore registers */
 776	ld	r11,PACA_EXCRIT+EX_R11(r13)
 777	mfspr	r13,SPRN_SPRG_CRIT_SCRATCH
 778	rfci
 779
 780	/* Normal debug exception */
 781	/* XXX We only handle coming from userspace for now since we can't
 782	 *     quite save properly an interrupted kernel state yet
 783	 */
 7841:	andi.	r14,r11,MSR_PR;		/* check for userspace again */
 785	beq	kernel_dbg_exc;		/* if from kernel mode */
 786
 787	/* Now we mash up things to make it look like we are coming on a
 788	 * normal exception
 789	 */
 790	mfspr	r14,SPRN_DBSR
 791	EXCEPTION_COMMON_CRIT(0xd00)
 792	std	r14,_DSISR(r1)
 793	addi	r3,r1,STACK_FRAME_OVERHEAD
 794	mr	r4,r14
 795	ld	r14,PACA_EXCRIT+EX_R14(r13)
 796	ld	r15,PACA_EXCRIT+EX_R15(r13)
 797	bl	save_nvgprs
 798	bl	DebugException
 799	b	ret_from_except
 800
 801kernel_dbg_exc:
 802	b	.	/* NYI */
 803
 804/* Debug exception as a debug interrupt*/
 805	START_EXCEPTION(debug_debug);
 806	DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
 807						 PROLOG_ADDITION_2REGS)
 808
 809	/*
 810	 * If there is a single step or branch-taken exception in an
 811	 * exception entry sequence, it was probably meant to apply to
 812	 * the code where the exception occurred (since exception entry
 813	 * doesn't turn off DE automatically).  We simulate the effect
 814	 * of turning off DE on entry to an exception handler by turning
 815	 * off DE in the DSRR1 value and clearing the debug status.
 816	 */
 817
 818	mfspr	r14,SPRN_DBSR		/* check single-step/branch taken */
 819	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 820	beq+	1f
 821
 822#ifdef CONFIG_RELOCATABLE
 823	ld	r15,PACATOC(r13)
 824	ld	r14,interrupt_base_book3e@got(r15)
 825	ld	r15,__end_interrupts@got(r15)
 826	cmpld	cr0,r10,r14
 827	cmpld	cr1,r10,r15
 828#else
 829	LOAD_REG_IMMEDIATE_SYM(r14, r15, interrupt_base_book3e)
 830	cmpld	cr0, r10, r14
 831	LOAD_REG_IMMEDIATE_SYM(r14, r15,__end_interrupts)
 832	cmpld	cr1, r10, r14
 833#endif
 834	blt+	cr0,1f
 835	bge+	cr1,1f
 836
 837	/* here it looks like we got an inappropriate debug exception. */
 838	lis	r14,(DBSR_IC|DBSR_BT)@h		/* clear the event */
 839	rlwinm	r11,r11,0,~MSR_DE	/* clear DE in the DSRR1 value */
 840	mtspr	SPRN_DBSR,r14
 841	mtspr	SPRN_DSRR1,r11
 842	lwz	r10,PACA_EXDBG+EX_CR(r13)	/* restore registers */
 843	ld	r1,PACA_EXDBG+EX_R1(r13)
 844	ld	r14,PACA_EXDBG+EX_R14(r13)
 845	ld	r15,PACA_EXDBG+EX_R15(r13)
 846	mtcr	r10
 847	ld	r10,PACA_EXDBG+EX_R10(r13)	/* restore registers */
 848	ld	r11,PACA_EXDBG+EX_R11(r13)
 849	mfspr	r13,SPRN_SPRG_DBG_SCRATCH
 850	rfdi
 851
 852	/* Normal debug exception */
 853	/* XXX We only handle coming from userspace for now since we can't
 854	 *     quite save properly an interrupted kernel state yet
 855	 */
 8561:	andi.	r14,r11,MSR_PR;		/* check for userspace again */
 857	beq	kernel_dbg_exc;		/* if from kernel mode */
 858
 859	/* Now we mash up things to make it look like we are coming on a
 860	 * normal exception
 861	 */
 862	mfspr	r14,SPRN_DBSR
 863	EXCEPTION_COMMON_DBG(0xd08)
 864	INTS_DISABLE
 865	std	r14,_DSISR(r1)
 866	addi	r3,r1,STACK_FRAME_OVERHEAD
 867	mr	r4,r14
 868	ld	r14,PACA_EXDBG+EX_R14(r13)
 869	ld	r15,PACA_EXDBG+EX_R15(r13)
 870	bl	save_nvgprs
 871	bl	DebugException
 872	b	ret_from_except
 873
 874	START_EXCEPTION(perfmon);
 875	NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
 876				PROLOG_ADDITION_NONE)
 877	EXCEPTION_COMMON(0x260)
 878	INTS_DISABLE
 879	CHECK_NAPPING()
 880	addi	r3,r1,STACK_FRAME_OVERHEAD
 881	bl	performance_monitor_exception
 882	b	ret_from_except_lite
 883
 884/* Doorbell interrupt */
 885	MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
 886			   doorbell, doorbell_exception, ACK_NONE)
 887
 888/* Doorbell critical Interrupt */
 889	START_EXCEPTION(doorbell_crit);
 890	CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
 891			      PROLOG_ADDITION_NONE)
 892	EXCEPTION_COMMON_CRIT(0x2a0)
 893	bl	save_nvgprs
 894	bl	special_reg_save
 895	CHECK_NAPPING();
 896	addi	r3,r1,STACK_FRAME_OVERHEAD
 897	bl	unknown_exception
 898	b	ret_from_crit_except
 899
 900/*
 901 *	Guest doorbell interrupt
 902 *	This general exception use GSRRx save/restore registers
 903 */
 904	START_EXCEPTION(guest_doorbell);
 905	GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
 906			        PROLOG_ADDITION_NONE)
 907	EXCEPTION_COMMON(0x2c0)
 908	addi	r3,r1,STACK_FRAME_OVERHEAD
 909	bl	save_nvgprs
 910	INTS_RESTORE_HARD
 911	bl	unknown_exception
 912	b	ret_from_except
 913
 914/* Guest Doorbell critical Interrupt */
 915	START_EXCEPTION(guest_doorbell_crit);
 916	CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
 917			      PROLOG_ADDITION_NONE)
 918	EXCEPTION_COMMON_CRIT(0x2e0)
 919	bl	save_nvgprs
 920	bl	special_reg_save
 921	CHECK_NAPPING();
 922	addi	r3,r1,STACK_FRAME_OVERHEAD
 923	bl	unknown_exception
 924	b	ret_from_crit_except
 925
 926/* Hypervisor call */
 927	START_EXCEPTION(hypercall);
 928	NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
 929			        PROLOG_ADDITION_NONE)
 930	EXCEPTION_COMMON(0x310)
 931	addi	r3,r1,STACK_FRAME_OVERHEAD
 932	bl	save_nvgprs
 933	INTS_RESTORE_HARD
 934	bl	unknown_exception
 935	b	ret_from_except
 936
 937/* Embedded Hypervisor priviledged  */
 938	START_EXCEPTION(ehpriv);
 939	NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
 940			        PROLOG_ADDITION_NONE)
 941	EXCEPTION_COMMON(0x320)
 942	addi	r3,r1,STACK_FRAME_OVERHEAD
 943	bl	save_nvgprs
 944	INTS_RESTORE_HARD
 945	bl	unknown_exception
 946	b	ret_from_except
 947
 948/* LRAT Error interrupt */
 949	START_EXCEPTION(lrat_error);
 950	NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
 951			        PROLOG_ADDITION_NONE)
 952	EXCEPTION_COMMON(0x340)
 953	addi	r3,r1,STACK_FRAME_OVERHEAD
 954	bl	save_nvgprs
 955	INTS_RESTORE_HARD
 956	bl	unknown_exception
 957	b	ret_from_except
 958
 959/*
 960 * An interrupt came in while soft-disabled; We mark paca->irq_happened
 961 * accordingly and if the interrupt is level sensitive, we hard disable
 962 * hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
 963 * keep these in synch.
 964 */
 965
 966.macro masked_interrupt_book3e paca_irq full_mask
 967	lbz	r10,PACAIRQHAPPENED(r13)
 968	.if \full_mask == 1
 969	ori	r10,r10,\paca_irq | PACA_IRQ_HARD_DIS
 970	.else
 971	ori	r10,r10,\paca_irq
 972	.endif
 973	stb	r10,PACAIRQHAPPENED(r13)
 974
 975	.if \full_mask == 1
 976	rldicl	r10,r11,48,1		/* clear MSR_EE */
 977	rotldi	r11,r10,16
 978	mtspr	SPRN_SRR1,r11
 979	.endif
 980
 981	lwz	r11,PACA_EXGEN+EX_CR(r13)
 982	mtcr	r11
 983	ld	r10,PACA_EXGEN+EX_R10(r13)
 984	ld	r11,PACA_EXGEN+EX_R11(r13)
 985	mfspr	r13,SPRN_SPRG_GEN_SCRATCH
 986	rfi
 987	b	.
 988.endm
 989
 990masked_interrupt_book3e_0x500:
 991	// XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
 992	masked_interrupt_book3e PACA_IRQ_EE 1
 993
 994masked_interrupt_book3e_0x900:
 995	ACK_DEC(r10);
 996	masked_interrupt_book3e PACA_IRQ_DEC 0
 997
 998masked_interrupt_book3e_0x980:
 999	ACK_FIT(r10);
1000	masked_interrupt_book3e PACA_IRQ_DEC 0
1001
1002masked_interrupt_book3e_0x280:
1003masked_interrupt_book3e_0x2c0:
1004	masked_interrupt_book3e PACA_IRQ_DBELL 0
1005
1006/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1007 * This is called from 0x300 and 0x400 handlers after the prologs with
1008 * r14 and r15 containing the fault address and error code, with the
1009 * original values stashed away in the PACA
1010 */
1011storage_fault_common:
1012	std	r14,_DAR(r1)
1013	std	r15,_DSISR(r1)
1014	addi	r3,r1,STACK_FRAME_OVERHEAD
1015	mr	r4,r14
1016	mr	r5,r15
1017	ld	r14,PACA_EXGEN+EX_R14(r13)
1018	ld	r15,PACA_EXGEN+EX_R15(r13)
1019	bl	do_page_fault
1020	cmpdi	r3,0
1021	bne-	1f
1022	b	ret_from_except_lite
10231:	bl	save_nvgprs
1024	mr	r5,r3
1025	addi	r3,r1,STACK_FRAME_OVERHEAD
1026	ld	r4,_DAR(r1)
1027	bl	bad_page_fault
1028	b	ret_from_except
1029
1030/*
1031 * Alignment exception doesn't fit entirely in the 0x100 bytes so it
1032 * continues here.
1033 */
1034alignment_more:
1035	std	r14,_DAR(r1)
1036	std	r15,_DSISR(r1)
1037	addi	r3,r1,STACK_FRAME_OVERHEAD
1038	ld	r14,PACA_EXGEN+EX_R14(r13)
1039	ld	r15,PACA_EXGEN+EX_R15(r13)
1040	bl	save_nvgprs
1041	INTS_RESTORE_HARD
1042	bl	alignment_exception
1043	b	ret_from_except
1044
1045	.align	7
1046_GLOBAL(ret_from_except)
1047	ld	r11,_TRAP(r1)
1048	andi.	r0,r11,1
1049	bne	ret_from_except_lite
1050	REST_NVGPRS(r1)
1051
1052_GLOBAL(ret_from_except_lite)
1053	/*
1054	 * Disable interrupts so that current_thread_info()->flags
1055	 * can't change between when we test it and when we return
1056	 * from the interrupt.
1057	 */
1058	wrteei	0
1059
1060	ld	r9, PACA_THREAD_INFO(r13)
1061	ld	r3,_MSR(r1)
1062	ld	r10,PACACURRENT(r13)
1063	ld	r4,TI_FLAGS(r9)
1064	andi.	r3,r3,MSR_PR
1065	beq	resume_kernel
1066	lwz	r3,(THREAD+THREAD_DBCR0)(r10)
1067
1068	/* Check current_thread_info()->flags */
1069	andi.	r0,r4,_TIF_USER_WORK_MASK
1070	bne	1f
1071	/*
1072	 * Check to see if the dbcr0 register is set up to debug.
1073	 * Use the internal debug mode bit to do this.
1074	 */
1075	andis.	r0,r3,DBCR0_IDM@h
1076	beq	restore
1077	mfmsr	r0
1078	rlwinm	r0,r0,0,~MSR_DE	/* Clear MSR.DE */
1079	mtmsr	r0
1080	mtspr	SPRN_DBCR0,r3
1081	li	r10, -1
1082	mtspr	SPRN_DBSR,r10
1083	b	restore
10841:	andi.	r0,r4,_TIF_NEED_RESCHED
1085	beq	2f
1086	bl	restore_interrupts
1087	SCHEDULE_USER
1088	b	ret_from_except_lite
10892:
1090	bl	save_nvgprs
1091	/*
1092	 * Use a non volatile GPR to save and restore our thread_info flags
1093	 * across the call to restore_interrupts.
1094	 */
1095	mr	r30,r4
1096	bl	restore_interrupts
1097	mr	r4,r30
1098	addi	r3,r1,STACK_FRAME_OVERHEAD
1099	bl	do_notify_resume
1100	b	ret_from_except
1101
1102resume_kernel:
1103	/* check current_thread_info, _TIF_EMULATE_STACK_STORE */
1104	andis.	r8,r4,_TIF_EMULATE_STACK_STORE@h
1105	beq+	1f
1106
1107	addi	r8,r1,INT_FRAME_SIZE	/* Get the kprobed function entry */
1108
1109	ld	r3,GPR1(r1)
1110	subi	r3,r3,INT_FRAME_SIZE	/* dst: Allocate a trampoline exception frame */
1111	mr	r4,r1			/* src:  current exception frame */
1112	mr	r1,r3			/* Reroute the trampoline frame to r1 */
1113
1114	/* Copy from the original to the trampoline. */
1115	li	r5,INT_FRAME_SIZE/8	/* size: INT_FRAME_SIZE */
1116	li	r6,0			/* start offset: 0 */
1117	mtctr	r5
11182:	ldx	r0,r6,r4
1119	stdx	r0,r6,r3
1120	addi	r6,r6,8
1121	bdnz	2b
1122
1123	/* Do real store operation to complete stdu */
1124	ld	r5,GPR1(r1)
1125	std	r8,0(r5)
1126
1127	/* Clear _TIF_EMULATE_STACK_STORE flag */
1128	lis	r11,_TIF_EMULATE_STACK_STORE@h
1129	addi	r5,r9,TI_FLAGS
11300:	ldarx	r4,0,r5
1131	andc	r4,r4,r11
1132	stdcx.	r4,0,r5
1133	bne-	0b
11341:
1135
1136#ifdef CONFIG_PREEMPT
1137	/* Check if we need to preempt */
1138	andi.	r0,r4,_TIF_NEED_RESCHED
1139	beq+	restore
1140	/* Check that preempt_count() == 0 and interrupts are enabled */
1141	lwz	r8,TI_PREEMPT(r9)
1142	cmpwi	cr0,r8,0
1143	bne	restore
1144	ld	r0,SOFTE(r1)
1145	andi.	r0,r0,IRQS_DISABLED
1146	bne	restore
1147
1148	/*
1149	 * Here we are preempting the current task. We want to make
1150	 * sure we are soft-disabled first and reconcile irq state.
1151	 */
1152	RECONCILE_IRQ_STATE(r3,r4)
1153	bl	preempt_schedule_irq
1154
1155	/*
1156	 * arch_local_irq_restore() from preempt_schedule_irq above may
1157	 * enable hard interrupt but we really should disable interrupts
1158	 * when we return from the interrupt, and so that we don't get
1159	 * interrupted after loading SRR0/1.
1160	 */
1161	wrteei	0
1162#endif /* CONFIG_PREEMPT */
1163
1164restore:
1165	/*
1166	 * This is the main kernel exit path. First we check if we
1167	 * are about to re-enable interrupts
1168	 */
1169	ld	r5,SOFTE(r1)
1170	lbz	r6,PACAIRQSOFTMASK(r13)
1171	andi.	r5,r5,IRQS_DISABLED
1172	bne	.Lrestore_irq_off
1173
1174	/* We are enabling, were we already enabled ? Yes, just return */
1175	andi.	r6,r6,IRQS_DISABLED
1176	beq	cr0,fast_exception_return
1177
1178	/*
1179	 * We are about to soft-enable interrupts (we are hard disabled
1180	 * at this point). We check if there's anything that needs to
1181	 * be replayed first.
1182	 */
1183	lbz	r0,PACAIRQHAPPENED(r13)
1184	cmpwi	cr0,r0,0
1185	bne-	.Lrestore_check_irq_replay
1186
1187	/*
1188	 * Get here when nothing happened while soft-disabled, just
1189	 * soft-enable and move-on. We will hard-enable as a side
1190	 * effect of rfi
1191	 */
1192.Lrestore_no_replay:
1193	TRACE_ENABLE_INTS
1194	li	r0,IRQS_ENABLED
1195	stb	r0,PACAIRQSOFTMASK(r13);
1196
1197/* This is the return from load_up_fpu fast path which could do with
1198 * less GPR restores in fact, but for now we have a single return path
1199 */
 
1200fast_exception_return:
1201	wrteei	0
12021:	mr	r0,r13
1203	ld	r10,_MSR(r1)
1204	REST_4GPRS(2, r1)
1205	andi.	r6,r10,MSR_PR
1206	REST_2GPRS(6, r1)
1207	beq	1f
1208	ACCOUNT_CPU_USER_EXIT(r13, r10, r11)
1209	ld	r0,GPR13(r1)
1210
12111:	stdcx.	r0,0,r1		/* to clear the reservation */
1212
1213	ld	r8,_CCR(r1)
1214	ld	r9,_LINK(r1)
1215	ld	r10,_CTR(r1)
1216	ld	r11,_XER(r1)
1217	mtcr	r8
1218	mtlr	r9
1219	mtctr	r10
1220	mtxer	r11
1221	REST_2GPRS(8, r1)
1222	ld	r10,GPR10(r1)
1223	ld	r11,GPR11(r1)
1224	ld	r12,GPR12(r1)
1225	mtspr	SPRN_SPRG_GEN_SCRATCH,r0
1226
1227	std	r10,PACA_EXGEN+EX_R10(r13);
1228	std	r11,PACA_EXGEN+EX_R11(r13);
1229	ld	r10,_NIP(r1)
1230	ld	r11,_MSR(r1)
1231	ld	r0,GPR0(r1)
1232	ld	r1,GPR1(r1)
1233	mtspr	SPRN_SRR0,r10
1234	mtspr	SPRN_SRR1,r11
1235	ld	r10,PACA_EXGEN+EX_R10(r13)
1236	ld	r11,PACA_EXGEN+EX_R11(r13)
1237	mfspr	r13,SPRN_SPRG_GEN_SCRATCH
1238	rfi
1239
1240	/*
1241	 * We are returning to a context with interrupts soft disabled.
1242	 *
1243	 * However, we may also about to hard enable, so we need to
1244	 * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
1245	 * or that bit can get out of sync and bad things will happen
1246	 */
1247.Lrestore_irq_off:
1248	ld	r3,_MSR(r1)
1249	lbz	r7,PACAIRQHAPPENED(r13)
1250	andi.	r0,r3,MSR_EE
1251	beq	1f
1252	rlwinm	r7,r7,0,~PACA_IRQ_HARD_DIS
1253	stb	r7,PACAIRQHAPPENED(r13)
12541:
1255#if defined(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) && defined(CONFIG_BUG)
1256	/* The interrupt should not have soft enabled. */
1257	lbz	r7,PACAIRQSOFTMASK(r13)
12581:	tdeqi	r7,IRQS_ENABLED
1259	EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
1260#endif
1261	b	fast_exception_return
1262
1263	/*
1264	 * Something did happen, check if a re-emit is needed
1265	 * (this also clears paca->irq_happened)
1266	 */
1267.Lrestore_check_irq_replay:
1268	/* XXX: We could implement a fast path here where we check
1269	 * for irq_happened being just 0x01, in which case we can
1270	 * clear it and return. That means that we would potentially
1271	 * miss a decrementer having wrapped all the way around.
1272	 *
1273	 * Still, this might be useful for things like hash_page
1274	 */
1275	bl	__check_irq_replay
1276	cmpwi	cr0,r3,0
1277	beq	.Lrestore_no_replay
1278
1279	/*
1280	 * We need to re-emit an interrupt. We do so by re-using our
1281	 * existing exception frame. We first change the trap value,
1282	 * but we need to ensure we preserve the low nibble of it
1283	 */
1284	ld	r4,_TRAP(r1)
1285	clrldi	r4,r4,60
1286	or	r4,r4,r3
1287	std	r4,_TRAP(r1)
1288
1289	/*
1290	 * PACA_IRQ_HARD_DIS won't always be set here, so set it now
1291	 * to reconcile the IRQ state. Tracing is already accounted for.
1292	 */
1293	lbz	r4,PACAIRQHAPPENED(r13)
1294	ori	r4,r4,PACA_IRQ_HARD_DIS
1295	stb	r4,PACAIRQHAPPENED(r13)
1296
1297	/*
1298	 * Then find the right handler and call it. Interrupts are
1299	 * still soft-disabled and we keep them that way.
1300	*/
1301	cmpwi	cr0,r3,0x500
1302	bne	1f
1303	addi	r3,r1,STACK_FRAME_OVERHEAD;
1304	bl	do_IRQ
1305	b	ret_from_except
13061:	cmpwi	cr0,r3,0xf00
1307	bne	1f
1308	addi	r3,r1,STACK_FRAME_OVERHEAD;
1309	bl	performance_monitor_exception
1310	b	ret_from_except
13111:	cmpwi	cr0,r3,0xe60
1312	bne	1f
1313	addi	r3,r1,STACK_FRAME_OVERHEAD;
1314	bl	handle_hmi_exception
1315	b	ret_from_except
13161:	cmpwi	cr0,r3,0x900
1317	bne	1f
1318	addi	r3,r1,STACK_FRAME_OVERHEAD;
1319	bl	timer_interrupt
1320	b	ret_from_except
1321#ifdef CONFIG_PPC_DOORBELL
13221:
1323	cmpwi	cr0,r3,0x280
1324	bne	1f
1325	addi	r3,r1,STACK_FRAME_OVERHEAD;
1326	bl	doorbell_exception
1327#endif /* CONFIG_PPC_DOORBELL */
13281:	b	ret_from_except /* What else to do here ? */
1329
1330_ASM_NOKPROBE_SYMBOL(ret_from_except);
1331_ASM_NOKPROBE_SYMBOL(ret_from_except_lite);
1332_ASM_NOKPROBE_SYMBOL(resume_kernel);
1333_ASM_NOKPROBE_SYMBOL(restore);
1334_ASM_NOKPROBE_SYMBOL(fast_exception_return);
1335
1336/*
1337 * Trampolines used when spotting a bad kernel stack pointer in
1338 * the exception entry code.
1339 *
1340 * TODO: move some bits like SRR0 read to trampoline, pass PACA
1341 * index around, etc... to handle crit & mcheck
1342 */
1343BAD_STACK_TRAMPOLINE(0x000)
1344BAD_STACK_TRAMPOLINE(0x100)
1345BAD_STACK_TRAMPOLINE(0x200)
1346BAD_STACK_TRAMPOLINE(0x220)
1347BAD_STACK_TRAMPOLINE(0x260)
1348BAD_STACK_TRAMPOLINE(0x280)
1349BAD_STACK_TRAMPOLINE(0x2a0)
1350BAD_STACK_TRAMPOLINE(0x2c0)
1351BAD_STACK_TRAMPOLINE(0x2e0)
1352BAD_STACK_TRAMPOLINE(0x300)
1353BAD_STACK_TRAMPOLINE(0x310)
1354BAD_STACK_TRAMPOLINE(0x320)
1355BAD_STACK_TRAMPOLINE(0x340)
1356BAD_STACK_TRAMPOLINE(0x400)
1357BAD_STACK_TRAMPOLINE(0x500)
1358BAD_STACK_TRAMPOLINE(0x600)
1359BAD_STACK_TRAMPOLINE(0x700)
1360BAD_STACK_TRAMPOLINE(0x800)
1361BAD_STACK_TRAMPOLINE(0x900)
1362BAD_STACK_TRAMPOLINE(0x980)
1363BAD_STACK_TRAMPOLINE(0x9f0)
1364BAD_STACK_TRAMPOLINE(0xa00)
1365BAD_STACK_TRAMPOLINE(0xb00)
1366BAD_STACK_TRAMPOLINE(0xc00)
1367BAD_STACK_TRAMPOLINE(0xd00)
1368BAD_STACK_TRAMPOLINE(0xd08)
1369BAD_STACK_TRAMPOLINE(0xe00)
1370BAD_STACK_TRAMPOLINE(0xf00)
1371BAD_STACK_TRAMPOLINE(0xf20)
1372
1373	.globl	bad_stack_book3e
1374bad_stack_book3e:
1375	/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
1376	mfspr	r10,SPRN_SRR0;		  /* read SRR0 before touching stack */
1377	ld	r1,PACAEMERGSP(r13)
1378	subi	r1,r1,64+INT_FRAME_SIZE
1379	std	r10,_NIP(r1)
1380	std	r11,_MSR(r1)
1381	ld	r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
1382	lwz	r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
1383	std	r10,GPR1(r1)
1384	std	r11,_CCR(r1)
1385	mfspr	r10,SPRN_DEAR
1386	mfspr	r11,SPRN_ESR
1387	std	r10,_DAR(r1)
1388	std	r11,_DSISR(r1)
1389	std	r0,GPR0(r1);		/* save r0 in stackframe */	    \
1390	std	r2,GPR2(r1);		/* save r2 in stackframe */	    \
1391	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe */    \
1392	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe */	    \
1393	std	r9,GPR9(r1);		/* save r9 in stackframe */	    \
1394	ld	r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */		    \
1395	ld	r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */		    \
1396	mfspr	r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
1397	std	r3,GPR10(r1);		/* save r10 to stackframe */	    \
1398	std	r4,GPR11(r1);		/* save r11 to stackframe */	    \
1399	std	r12,GPR12(r1);		/* save r12 in stackframe */	    \
1400	std	r5,GPR13(r1);		/* save it to stackframe */	    \
1401	mflr	r10
1402	mfctr	r11
1403	mfxer	r12
1404	std	r10,_LINK(r1)
1405	std	r11,_CTR(r1)
1406	std	r12,_XER(r1)
1407	SAVE_10GPRS(14,r1)
1408	SAVE_8GPRS(24,r1)
1409	lhz	r12,PACA_TRAP_SAVE(r13)
1410	std	r12,_TRAP(r1)
1411	addi	r11,r1,INT_FRAME_SIZE
1412	std	r11,0(r1)
1413	li	r12,0
1414	std	r12,0(r11)
1415	ld	r2,PACATOC(r13)
14161:	addi	r3,r1,STACK_FRAME_OVERHEAD
1417	bl	kernel_bad_stack
1418	b	1b
1419
1420/*
1421 * Setup the initial TLB for a core. This current implementation
1422 * assume that whatever we are running off will not conflict with
1423 * the new mapping at PAGE_OFFSET.
1424 */
1425_GLOBAL(initial_tlb_book3e)
1426
1427	/* Look for the first TLB with IPROT set */
1428	mfspr	r4,SPRN_TLB0CFG
1429	andi.	r3,r4,TLBnCFG_IPROT
1430	lis	r3,MAS0_TLBSEL(0)@h
1431	bne	found_iprot
1432
1433	mfspr	r4,SPRN_TLB1CFG
1434	andi.	r3,r4,TLBnCFG_IPROT
1435	lis	r3,MAS0_TLBSEL(1)@h
1436	bne	found_iprot
1437
1438	mfspr	r4,SPRN_TLB2CFG
1439	andi.	r3,r4,TLBnCFG_IPROT
1440	lis	r3,MAS0_TLBSEL(2)@h
1441	bne	found_iprot
1442
1443	lis	r3,MAS0_TLBSEL(3)@h
1444	mfspr	r4,SPRN_TLB3CFG
1445	/* fall through */
1446
1447found_iprot:
1448	andi.	r5,r4,TLBnCFG_HES
1449	bne	have_hes
1450
1451	mflr	r8				/* save LR */
1452/* 1. Find the index of the entry we're executing in
1453 *
1454 * r3 = MAS0_TLBSEL (for the iprot array)
1455 * r4 = SPRN_TLBnCFG
1456 */
1457	bl	invstr				/* Find our address */
1458invstr:	mflr	r6				/* Make it accessible */
1459	mfmsr	r7
1460	rlwinm	r5,r7,27,31,31			/* extract MSR[IS] */
1461	mfspr	r7,SPRN_PID
1462	slwi	r7,r7,16
1463	or	r7,r7,r5
1464	mtspr	SPRN_MAS6,r7
1465	tlbsx	0,r6				/* search MSR[IS], SPID=PID */
1466
1467	mfspr	r3,SPRN_MAS0
1468	rlwinm	r5,r3,16,20,31			/* Extract MAS0(Entry) */
1469
1470	mfspr	r7,SPRN_MAS1			/* Insure IPROT set */
1471	oris	r7,r7,MAS1_IPROT@h
1472	mtspr	SPRN_MAS1,r7
1473	tlbwe
1474
1475/* 2. Invalidate all entries except the entry we're executing in
1476 *
1477 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1478 * r4 = SPRN_TLBnCFG
1479 * r5 = ESEL of entry we are running in
1480 */
1481	andi.	r4,r4,TLBnCFG_N_ENTRY		/* Extract # entries */
1482	li	r6,0				/* Set Entry counter to 0 */
14831:	mr	r7,r3				/* Set MAS0(TLBSEL) */
1484	rlwimi	r7,r6,16,4,15			/* Setup MAS0 = TLBSEL | ESEL(r6) */
1485	mtspr	SPRN_MAS0,r7
1486	tlbre
1487	mfspr	r7,SPRN_MAS1
1488	rlwinm	r7,r7,0,2,31			/* Clear MAS1 Valid and IPROT */
1489	cmpw	r5,r6
1490	beq	skpinv				/* Dont update the current execution TLB */
1491	mtspr	SPRN_MAS1,r7
1492	tlbwe
1493	isync
1494skpinv:	addi	r6,r6,1				/* Increment */
1495	cmpw	r6,r4				/* Are we done? */
1496	bne	1b				/* If not, repeat */
1497
1498	/* Invalidate all TLBs */
1499	PPC_TLBILX_ALL(0,R0)
1500	sync
1501	isync
1502
1503/* 3. Setup a temp mapping and jump to it
1504 *
1505 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1506 * r5 = ESEL of entry we are running in
1507 */
1508	andi.	r7,r5,0x1	/* Find an entry not used and is non-zero */
1509	addi	r7,r7,0x1
1510	mr	r4,r3		/* Set MAS0(TLBSEL) = 1 */
1511	mtspr	SPRN_MAS0,r4
1512	tlbre
1513
1514	rlwimi	r4,r7,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r7) */
1515	mtspr	SPRN_MAS0,r4
1516
1517	mfspr	r7,SPRN_MAS1
1518	xori	r6,r7,MAS1_TS		/* Setup TMP mapping in the other Address space */
1519	mtspr	SPRN_MAS1,r6
1520
1521	tlbwe
1522
1523	mfmsr	r6
1524	xori	r6,r6,MSR_IS
1525	mtspr	SPRN_SRR1,r6
1526	bl	1f		/* Find our address */
15271:	mflr	r6
1528	addi	r6,r6,(2f - 1b)
1529	mtspr	SPRN_SRR0,r6
1530	rfi
15312:
1532
1533/* 4. Clear out PIDs & Search info
1534 *
1535 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1536 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1537 * r5 = MAS3
1538 */
1539	li	r6,0
1540	mtspr   SPRN_MAS6,r6
1541	mtspr	SPRN_PID,r6
1542
1543/* 5. Invalidate mapping we started in
1544 *
1545 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1546 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1547 * r5 = MAS3
1548 */
1549	mtspr	SPRN_MAS0,r3
1550	tlbre
1551	mfspr	r6,SPRN_MAS1
1552	rlwinm	r6,r6,0,2,31	/* clear IPROT and VALID */
1553	mtspr	SPRN_MAS1,r6
1554	tlbwe
1555	sync
1556	isync
1557
 
 
 
 
 
 
 
 
 
 
1558/* 6. Setup KERNELBASE mapping in TLB[0]
1559 *
1560 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1561 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1562 * r5 = MAS3
1563 */
1564	rlwinm	r3,r3,0,16,3	/* clear ESEL */
1565	mtspr	SPRN_MAS0,r3
1566	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
1567	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
1568	mtspr	SPRN_MAS1,r6
1569
1570	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | MAS2_M_IF_NEEDED)
1571	mtspr	SPRN_MAS2,r6
1572
1573	rlwinm	r5,r5,0,0,25
1574	ori	r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
1575	mtspr	SPRN_MAS3,r5
1576	li	r5,-1
1577	rlwinm	r5,r5,0,0,25
1578
1579	tlbwe
1580
1581/* 7. Jump to KERNELBASE mapping
1582 *
1583 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1584 */
1585	/* Now we branch the new virtual address mapped by this entry */
1586	bl	1f		/* Find our address */
15871:	mflr	r6
1588	addi	r6,r6,(2f - 1b)
1589	tovirt(r6,r6)
1590	lis	r7,MSR_KERNEL@h
1591	ori	r7,r7,MSR_KERNEL@l
1592	mtspr	SPRN_SRR0,r6
1593	mtspr	SPRN_SRR1,r7
1594	rfi				/* start execution out of TLB1[0] entry */
15952:
1596
1597/* 8. Clear out the temp mapping
1598 *
1599 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1600 */
1601	mtspr	SPRN_MAS0,r4
1602	tlbre
1603	mfspr	r5,SPRN_MAS1
1604	rlwinm	r5,r5,0,2,31	/* clear IPROT and VALID */
1605	mtspr	SPRN_MAS1,r5
1606	tlbwe
1607	sync
1608	isync
1609
1610	/* We translate LR and return */
1611	tovirt(r8,r8)
1612	mtlr	r8
1613	blr
1614
1615have_hes:
1616	/* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
1617	 * kernel linear mapping. We also set MAS8 once for all here though
1618	 * that will have to be made dependent on whether we are running under
1619	 * a hypervisor I suppose.
1620	 */
1621
1622	/* BEWARE, MAGIC
1623	 * This code is called as an ordinary function on the boot CPU. But to
1624	 * avoid duplication, this code is also used in SCOM bringup of
1625	 * secondary CPUs. We read the code between the initial_tlb_code_start
1626	 * and initial_tlb_code_end labels one instruction at a time and RAM it
1627	 * into the new core via SCOM. That doesn't process branches, so there
1628	 * must be none between those two labels. It also means if this code
1629	 * ever takes any parameters, the SCOM code must also be updated to
1630	 * provide them.
1631	 */
1632	.globl a2_tlbinit_code_start
1633a2_tlbinit_code_start:
1634
1635	ori	r11,r3,MAS0_WQ_ALLWAYS
1636	oris	r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
1637	mtspr	SPRN_MAS0,r11
1638	lis	r3,(MAS1_VALID | MAS1_IPROT)@h
1639	ori	r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
1640	mtspr	SPRN_MAS1,r3
1641	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
1642	mtspr	SPRN_MAS2,r3
1643	li	r3,MAS3_SR | MAS3_SW | MAS3_SX
1644	mtspr	SPRN_MAS7_MAS3,r3
1645	li	r3,0
1646	mtspr	SPRN_MAS8,r3
1647
1648	/* Write the TLB entry */
1649	tlbwe
1650
1651	.globl a2_tlbinit_after_linear_map
1652a2_tlbinit_after_linear_map:
1653
1654	/* Now we branch the new virtual address mapped by this entry */
1655	LOAD_REG_IMMEDIATE_SYM(r3, r5, 1f)
1656	mtctr	r3
1657	bctr
1658
16591:	/* We are now running at PAGE_OFFSET, clean the TLB of everything
1660	 * else (including IPROTed things left by firmware)
1661	 * r4 = TLBnCFG
1662	 * r3 = current address (more or less)
1663	 */
1664
1665	li	r5,0
1666	mtspr	SPRN_MAS6,r5
1667	tlbsx	0,r3
1668
1669	rlwinm	r9,r4,0,TLBnCFG_N_ENTRY
1670	rlwinm	r10,r4,8,0xff
1671	addi	r10,r10,-1	/* Get inner loop mask */
1672
1673	li	r3,1
1674
1675	mfspr	r5,SPRN_MAS1
1676	rlwinm	r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
1677
1678	mfspr	r6,SPRN_MAS2
1679	rldicr	r6,r6,0,51		/* Extract EPN */
1680
1681	mfspr	r7,SPRN_MAS0
1682	rlwinm	r7,r7,0,0xffff0fff	/* Clear HES and WQ */
1683
1684	rlwinm	r8,r7,16,0xfff		/* Extract ESEL */
1685
16862:	add	r4,r3,r8
1687	and	r4,r4,r10
1688
1689	rlwimi	r7,r4,16,MAS0_ESEL_MASK
1690
1691	mtspr	SPRN_MAS0,r7
1692	mtspr	SPRN_MAS1,r5
1693	mtspr	SPRN_MAS2,r6
1694	tlbwe
1695
1696	addi	r3,r3,1
1697	and.	r4,r3,r10
1698
1699	bne	3f
1700	addis	r6,r6,(1<<30)@h
17013:
1702	cmpw	r3,r9
1703	blt	2b
1704
1705	.globl  a2_tlbinit_after_iprot_flush
1706a2_tlbinit_after_iprot_flush:
1707
1708	PPC_TLBILX(0,0,R0)
1709	sync
1710	isync
1711
1712	.globl a2_tlbinit_code_end
1713a2_tlbinit_code_end:
1714
1715	/* We translate LR and return */
1716	mflr	r3
1717	tovirt(r3,r3)
1718	mtlr	r3
1719	blr
1720
1721/*
1722 * Main entry (boot CPU, thread 0)
1723 *
1724 * We enter here from head_64.S, possibly after the prom_init trampoline
1725 * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
1726 * mode. Anything else is as it was left by the bootloader
1727 *
1728 * Initial requirements of this port:
1729 *
1730 * - Kernel loaded at 0 physical
1731 * - A good lump of memory mapped 0:0 by UTLB entry 0
1732 * - MSR:IS & MSR:DS set to 0
1733 *
1734 * Note that some of the above requirements will be relaxed in the future
1735 * as the kernel becomes smarter at dealing with different initial conditions
1736 * but for now you have to be careful
1737 */
1738_GLOBAL(start_initialization_book3e)
1739	mflr	r28
1740
1741	/* First, we need to setup some initial TLBs to map the kernel
1742	 * text, data and bss at PAGE_OFFSET. We don't have a real mode
1743	 * and always use AS 0, so we just set it up to match our link
1744	 * address and never use 0 based addresses.
1745	 */
1746	bl	initial_tlb_book3e
1747
1748	/* Init global core bits */
1749	bl	init_core_book3e
1750
1751	/* Init per-thread bits */
1752	bl	init_thread_book3e
1753
1754	/* Return to common init code */
1755	tovirt(r28,r28)
1756	mtlr	r28
1757	blr
1758
1759
1760/*
1761 * Secondary core/processor entry
1762 *
1763 * This is entered for thread 0 of a secondary core, all other threads
1764 * are expected to be stopped. It's similar to start_initialization_book3e
1765 * except that it's generally entered from the holding loop in head_64.S
1766 * after CPUs have been gathered by Open Firmware.
1767 *
1768 * We assume we are in 32 bits mode running with whatever TLB entry was
1769 * set for us by the firmware or POR engine.
1770 */
1771_GLOBAL(book3e_secondary_core_init_tlb_set)
1772	li	r4,1
1773	b	generic_secondary_smp_init
1774
1775_GLOBAL(book3e_secondary_core_init)
1776	mflr	r28
1777
1778	/* Do we need to setup initial TLB entry ? */
1779	cmplwi	r4,0
1780	bne	2f
1781
1782	/* Setup TLB for this core */
1783	bl	initial_tlb_book3e
1784
1785	/* We can return from the above running at a different
1786	 * address, so recalculate r2 (TOC)
1787	 */
1788	bl	relative_toc
1789
1790	/* Init global core bits */
17912:	bl	init_core_book3e
1792
1793	/* Init per-thread bits */
17943:	bl	init_thread_book3e
1795
1796	/* Return to common init code at proper virtual address.
1797	 *
1798	 * Due to various previous assumptions, we know we entered this
1799	 * function at either the final PAGE_OFFSET mapping or using a
1800	 * 1:1 mapping at 0, so we don't bother doing a complicated check
1801	 * here, we just ensure the return address has the right top bits.
1802	 *
1803	 * Note that if we ever want to be smarter about where we can be
1804	 * started from, we have to be careful that by the time we reach
1805	 * the code below we may already be running at a different location
1806	 * than the one we were called from since initial_tlb_book3e can
1807	 * have moved us already.
1808	 */
1809	cmpdi	cr0,r28,0
1810	blt	1f
1811	lis	r3,PAGE_OFFSET@highest
1812	sldi	r3,r3,32
1813	or	r28,r28,r3
18141:	mtlr	r28
1815	blr
1816
1817_GLOBAL(book3e_secondary_thread_init)
1818	mflr	r28
1819	b	3b
1820
1821	.globl init_core_book3e
1822init_core_book3e:
1823	/* Establish the interrupt vector base */
1824	tovirt(r2,r2)
1825	LOAD_REG_ADDR(r3, interrupt_base_book3e)
1826	mtspr	SPRN_IVPR,r3
1827	sync
1828	blr
1829
1830init_thread_book3e:
1831	lis	r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
1832	mtspr	SPRN_EPCR,r3
1833
1834	/* Make sure interrupts are off */
1835	wrteei	0
1836
1837	/* disable all timers and clear out status */
1838	li	r3,0
1839	mtspr	SPRN_TCR,r3
1840	mfspr	r3,SPRN_TSR
1841	mtspr	SPRN_TSR,r3
1842
1843	blr
1844
1845_GLOBAL(__setup_base_ivors)
1846	SET_IVOR(0, 0x020) /* Critical Input */
1847	SET_IVOR(1, 0x000) /* Machine Check */
1848	SET_IVOR(2, 0x060) /* Data Storage */ 
1849	SET_IVOR(3, 0x080) /* Instruction Storage */
1850	SET_IVOR(4, 0x0a0) /* External Input */ 
1851	SET_IVOR(5, 0x0c0) /* Alignment */ 
1852	SET_IVOR(6, 0x0e0) /* Program */ 
1853	SET_IVOR(7, 0x100) /* FP Unavailable */ 
1854	SET_IVOR(8, 0x120) /* System Call */ 
1855	SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 
1856	SET_IVOR(10, 0x160) /* Decrementer */ 
1857	SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 
1858	SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 
1859	SET_IVOR(13, 0x1c0) /* Data TLB Error */ 
1860	SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
1861	SET_IVOR(15, 0x040) /* Debug */
1862
1863	sync
1864
1865	blr
1866
1867_GLOBAL(setup_altivec_ivors)
1868	SET_IVOR(32, 0x200) /* AltiVec Unavailable */
1869	SET_IVOR(33, 0x220) /* AltiVec Assist */
1870	blr
1871
1872_GLOBAL(setup_perfmon_ivor)
1873	SET_IVOR(35, 0x260) /* Performance Monitor */
1874	blr
1875
1876_GLOBAL(setup_doorbell_ivors)
1877	SET_IVOR(36, 0x280) /* Processor Doorbell */
1878	SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1879	blr
1880
1881_GLOBAL(setup_ehv_ivors)
1882	SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1883	SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1884	SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1885	SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1886	blr
1887
1888_GLOBAL(setup_lrat_ivor)
1889	SET_IVOR(42, 0x340) /* LRAT Error */
1890	blr
v4.6
 
   1/*
   2 *  Boot code and exception vectors for Book3E processors
   3 *
   4 *  Copyright (C) 2007 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp.
   5 *
   6 *  This program is free software; you can redistribute it and/or
   7 *  modify it under the terms of the GNU General Public License
   8 *  as published by the Free Software Foundation; either version
   9 *  2 of the License, or (at your option) any later version.
  10 */
  11
  12#include <linux/threads.h>
  13#include <asm/reg.h>
  14#include <asm/page.h>
  15#include <asm/ppc_asm.h>
  16#include <asm/asm-offsets.h>
  17#include <asm/cputable.h>
  18#include <asm/setup.h>
  19#include <asm/thread_info.h>
  20#include <asm/reg_a2.h>
  21#include <asm/exception-64e.h>
  22#include <asm/bug.h>
  23#include <asm/irqflags.h>
  24#include <asm/ptrace.h>
  25#include <asm/ppc-opcode.h>
  26#include <asm/mmu.h>
  27#include <asm/hw_irq.h>
  28#include <asm/kvm_asm.h>
  29#include <asm/kvm_booke_hv_asm.h>
 
 
  30
  31/* XXX This will ultimately add space for a special exception save
  32 *     structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
  33 *     when taking special interrupts. For now we don't support that,
  34 *     special interrupts from within a non-standard level will probably
  35 *     blow you up
  36 */
  37#define SPECIAL_EXC_SRR0	0
  38#define SPECIAL_EXC_SRR1	1
  39#define SPECIAL_EXC_SPRG_GEN	2
  40#define SPECIAL_EXC_SPRG_TLB	3
  41#define SPECIAL_EXC_MAS0	4
  42#define SPECIAL_EXC_MAS1	5
  43#define SPECIAL_EXC_MAS2	6
  44#define SPECIAL_EXC_MAS3	7
  45#define SPECIAL_EXC_MAS6	8
  46#define SPECIAL_EXC_MAS7	9
  47#define SPECIAL_EXC_MAS5	10	/* E.HV only */
  48#define SPECIAL_EXC_MAS8	11	/* E.HV only */
  49#define SPECIAL_EXC_IRQHAPPENED	12
  50#define SPECIAL_EXC_DEAR	13
  51#define SPECIAL_EXC_ESR		14
  52#define SPECIAL_EXC_SOFTE	15
  53#define SPECIAL_EXC_CSRR0	16
  54#define SPECIAL_EXC_CSRR1	17
  55/* must be even to keep 16-byte stack alignment */
  56#define SPECIAL_EXC_END		18
  57
  58#define SPECIAL_EXC_FRAME_SIZE	(INT_FRAME_SIZE + SPECIAL_EXC_END * 8)
  59#define SPECIAL_EXC_FRAME_OFFS  (INT_FRAME_SIZE - 288)
  60
  61#define SPECIAL_EXC_STORE(reg, name) \
  62	std	reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  63
  64#define SPECIAL_EXC_LOAD(reg, name) \
  65	ld	reg, (SPECIAL_EXC_##name * 8 + SPECIAL_EXC_FRAME_OFFS)(r1)
  66
  67special_reg_save:
  68	lbz	r9,PACAIRQHAPPENED(r13)
  69	RECONCILE_IRQ_STATE(r3,r4)
  70
  71	/*
  72	 * We only need (or have stack space) to save this stuff if
  73	 * we interrupted the kernel.
  74	 */
  75	ld	r3,_MSR(r1)
  76	andi.	r3,r3,MSR_PR
  77	bnelr
  78
  79	/* Copy info into temporary exception thread info */
  80	ld	r11,PACAKSAVE(r13)
  81	CURRENT_THREAD_INFO(r11, r11)
  82	CURRENT_THREAD_INFO(r12, r1)
  83	ld	r10,TI_FLAGS(r11)
  84	std	r10,TI_FLAGS(r12)
  85	ld	r10,TI_PREEMPT(r11)
  86	std	r10,TI_PREEMPT(r12)
  87	ld	r10,TI_TASK(r11)
  88	std	r10,TI_TASK(r12)
  89
  90	/*
  91	 * Advance to the next TLB exception frame for handler
  92	 * types that don't do it automatically.
  93	 */
  94	LOAD_REG_ADDR(r11,extlb_level_exc)
  95	lwz	r12,0(r11)
  96	mfspr	r10,SPRN_SPRG_TLB_EXFRAME
  97	add	r10,r10,r12
  98	mtspr	SPRN_SPRG_TLB_EXFRAME,r10
  99
 100	/*
 101	 * Save registers needed to allow nesting of certain exceptions
 102	 * (such as TLB misses) inside special exception levels
 103	 */
 104	mfspr	r10,SPRN_SRR0
 105	SPECIAL_EXC_STORE(r10,SRR0)
 106	mfspr	r10,SPRN_SRR1
 107	SPECIAL_EXC_STORE(r10,SRR1)
 108	mfspr	r10,SPRN_SPRG_GEN_SCRATCH
 109	SPECIAL_EXC_STORE(r10,SPRG_GEN)
 110	mfspr	r10,SPRN_SPRG_TLB_SCRATCH
 111	SPECIAL_EXC_STORE(r10,SPRG_TLB)
 112	mfspr	r10,SPRN_MAS0
 113	SPECIAL_EXC_STORE(r10,MAS0)
 114	mfspr	r10,SPRN_MAS1
 115	SPECIAL_EXC_STORE(r10,MAS1)
 116	mfspr	r10,SPRN_MAS2
 117	SPECIAL_EXC_STORE(r10,MAS2)
 118	mfspr	r10,SPRN_MAS3
 119	SPECIAL_EXC_STORE(r10,MAS3)
 120	mfspr	r10,SPRN_MAS6
 121	SPECIAL_EXC_STORE(r10,MAS6)
 122	mfspr	r10,SPRN_MAS7
 123	SPECIAL_EXC_STORE(r10,MAS7)
 124BEGIN_FTR_SECTION
 125	mfspr	r10,SPRN_MAS5
 126	SPECIAL_EXC_STORE(r10,MAS5)
 127	mfspr	r10,SPRN_MAS8
 128	SPECIAL_EXC_STORE(r10,MAS8)
 129
 130	/* MAS5/8 could have inappropriate values if we interrupted KVM code */
 131	li	r10,0
 132	mtspr	SPRN_MAS5,r10
 133	mtspr	SPRN_MAS8,r10
 134END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 135	SPECIAL_EXC_STORE(r9,IRQHAPPENED)
 136
 137	mfspr	r10,SPRN_DEAR
 138	SPECIAL_EXC_STORE(r10,DEAR)
 139	mfspr	r10,SPRN_ESR
 140	SPECIAL_EXC_STORE(r10,ESR)
 141
 142	lbz	r10,PACASOFTIRQEN(r13)
 143	SPECIAL_EXC_STORE(r10,SOFTE)
 144	ld	r10,_NIP(r1)
 145	SPECIAL_EXC_STORE(r10,CSRR0)
 146	ld	r10,_MSR(r1)
 147	SPECIAL_EXC_STORE(r10,CSRR1)
 148
 149	blr
 150
 151ret_from_level_except:
 152	ld	r3,_MSR(r1)
 153	andi.	r3,r3,MSR_PR
 154	beq	1f
 155	b	ret_from_except
 1561:
 157
 158	LOAD_REG_ADDR(r11,extlb_level_exc)
 159	lwz	r12,0(r11)
 160	mfspr	r10,SPRN_SPRG_TLB_EXFRAME
 161	sub	r10,r10,r12
 162	mtspr	SPRN_SPRG_TLB_EXFRAME,r10
 163
 164	/*
 165	 * It's possible that the special level exception interrupted a
 166	 * TLB miss handler, and inserted the same entry that the
 167	 * interrupted handler was about to insert.  On CPUs without TLB
 168	 * write conditional, this can result in a duplicate TLB entry.
 169	 * Wipe all non-bolted entries to be safe.
 170	 *
 171	 * Note that this doesn't protect against any TLB misses
 172	 * we may take accessing the stack from here to the end of
 173	 * the special level exception.  It's not clear how we can
 174	 * reasonably protect against that, but only CPUs with
 175	 * neither TLB write conditional nor bolted kernel memory
 176	 * are affected.  Do any such CPUs even exist?
 177	 */
 178	PPC_TLBILX_ALL(0,R0)
 179
 180	REST_NVGPRS(r1)
 181
 182	SPECIAL_EXC_LOAD(r10,SRR0)
 183	mtspr	SPRN_SRR0,r10
 184	SPECIAL_EXC_LOAD(r10,SRR1)
 185	mtspr	SPRN_SRR1,r10
 186	SPECIAL_EXC_LOAD(r10,SPRG_GEN)
 187	mtspr	SPRN_SPRG_GEN_SCRATCH,r10
 188	SPECIAL_EXC_LOAD(r10,SPRG_TLB)
 189	mtspr	SPRN_SPRG_TLB_SCRATCH,r10
 190	SPECIAL_EXC_LOAD(r10,MAS0)
 191	mtspr	SPRN_MAS0,r10
 192	SPECIAL_EXC_LOAD(r10,MAS1)
 193	mtspr	SPRN_MAS1,r10
 194	SPECIAL_EXC_LOAD(r10,MAS2)
 195	mtspr	SPRN_MAS2,r10
 196	SPECIAL_EXC_LOAD(r10,MAS3)
 197	mtspr	SPRN_MAS3,r10
 198	SPECIAL_EXC_LOAD(r10,MAS6)
 199	mtspr	SPRN_MAS6,r10
 200	SPECIAL_EXC_LOAD(r10,MAS7)
 201	mtspr	SPRN_MAS7,r10
 202BEGIN_FTR_SECTION
 203	SPECIAL_EXC_LOAD(r10,MAS5)
 204	mtspr	SPRN_MAS5,r10
 205	SPECIAL_EXC_LOAD(r10,MAS8)
 206	mtspr	SPRN_MAS8,r10
 207END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
 208
 209	lbz	r6,PACASOFTIRQEN(r13)
 210	ld	r5,SOFTE(r1)
 211
 212	/* Interrupts had better not already be enabled... */
 213	twnei	r6,0
 214
 215	cmpwi	cr0,r5,0
 216	beq	1f
 217
 218	TRACE_ENABLE_INTS
 219	stb	r5,PACASOFTIRQEN(r13)
 2201:
 221	/*
 222	 * Restore PACAIRQHAPPENED rather than setting it based on
 223	 * the return MSR[EE], since we could have interrupted
 224	 * __check_irq_replay() or other inconsistent transitory
 225	 * states that must remain that way.
 226	 */
 227	SPECIAL_EXC_LOAD(r10,IRQHAPPENED)
 228	stb	r10,PACAIRQHAPPENED(r13)
 229
 230	SPECIAL_EXC_LOAD(r10,DEAR)
 231	mtspr	SPRN_DEAR,r10
 232	SPECIAL_EXC_LOAD(r10,ESR)
 233	mtspr	SPRN_ESR,r10
 234
 235	stdcx.	r0,0,r1		/* to clear the reservation */
 236
 237	REST_4GPRS(2, r1)
 238	REST_4GPRS(6, r1)
 239
 240	ld	r10,_CTR(r1)
 241	ld	r11,_XER(r1)
 242	mtctr	r10
 243	mtxer	r11
 244
 245	blr
 246
 247.macro ret_from_level srr0 srr1 paca_ex scratch
 248	bl	ret_from_level_except
 249
 250	ld	r10,_LINK(r1)
 251	ld	r11,_CCR(r1)
 252	ld	r0,GPR13(r1)
 253	mtlr	r10
 254	mtcr	r11
 255
 256	ld	r10,GPR10(r1)
 257	ld	r11,GPR11(r1)
 258	ld	r12,GPR12(r1)
 259	mtspr	\scratch,r0
 260
 261	std	r10,\paca_ex+EX_R10(r13);
 262	std	r11,\paca_ex+EX_R11(r13);
 263	ld	r10,_NIP(r1)
 264	ld	r11,_MSR(r1)
 265	ld	r0,GPR0(r1)
 266	ld	r1,GPR1(r1)
 267	mtspr	\srr0,r10
 268	mtspr	\srr1,r11
 269	ld	r10,\paca_ex+EX_R10(r13)
 270	ld	r11,\paca_ex+EX_R11(r13)
 271	mfspr	r13,\scratch
 272.endm
 273
 274ret_from_crit_except:
 275	ret_from_level SPRN_CSRR0 SPRN_CSRR1 PACA_EXCRIT SPRN_SPRG_CRIT_SCRATCH
 276	rfci
 277
 278ret_from_mc_except:
 279	ret_from_level SPRN_MCSRR0 SPRN_MCSRR1 PACA_EXMC SPRN_SPRG_MC_SCRATCH
 280	rfmci
 281
 282/* Exception prolog code for all exceptions */
 283#define EXCEPTION_PROLOG(n, intnum, type, addition)	    		    \
 284	mtspr	SPRN_SPRG_##type##_SCRATCH,r13;	/* get spare registers */   \
 285	mfspr	r13,SPRN_SPRG_PACA;	/* get PACA */			    \
 286	std	r10,PACA_EX##type+EX_R10(r13);				    \
 287	std	r11,PACA_EX##type+EX_R11(r13);				    \
 288	mfcr	r10;			/* save CR */			    \
 289	mfspr	r11,SPRN_##type##_SRR1;/* what are we coming from */	    \
 290	DO_KVM	intnum,SPRN_##type##_SRR1;    /* KVM hook */		    \
 291	stw	r10,PACA_EX##type+EX_CR(r13); /* save old CR in the PACA */ \
 292	addition;			/* additional code for that exc. */ \
 293	std	r1,PACA_EX##type+EX_R1(r13); /* save old r1 in the PACA */  \
 294	type##_SET_KSTACK;		/* get special stack if necessary */\
 295	andi.	r10,r11,MSR_PR;		/* save stack pointer */	    \
 296	beq	1f;			/* branch around if supervisor */   \
 297	ld	r1,PACAKSAVE(r13);	/* get kernel stack coming from usr */\
 2981:	cmpdi	cr1,r1,0;		/* check if SP makes sense */	    \
 
 299	bge-	cr1,exc_##n##_bad_stack;/* bad stack (TODO: out of line) */ \
 300	mfspr	r10,SPRN_##type##_SRR0;	/* read SRR0 before touching stack */
 301
 302/* Exception type-specific macros */
 303#define	GEN_SET_KSTACK							    \
 304	subi	r1,r1,INT_FRAME_SIZE;	/* alloc frame on kernel stack */
 305#define SPRN_GEN_SRR0	SPRN_SRR0
 306#define SPRN_GEN_SRR1	SPRN_SRR1
 307
 308#define	GDBELL_SET_KSTACK	GEN_SET_KSTACK
 309#define SPRN_GDBELL_SRR0	SPRN_GSRR0
 310#define SPRN_GDBELL_SRR1	SPRN_GSRR1
 311
 312#define CRIT_SET_KSTACK						            \
 313	ld	r1,PACA_CRIT_STACK(r13);				    \
 314	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 315#define SPRN_CRIT_SRR0	SPRN_CSRR0
 316#define SPRN_CRIT_SRR1	SPRN_CSRR1
 317
 318#define DBG_SET_KSTACK						            \
 319	ld	r1,PACA_DBG_STACK(r13);					    \
 320	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 321#define SPRN_DBG_SRR0	SPRN_DSRR0
 322#define SPRN_DBG_SRR1	SPRN_DSRR1
 323
 324#define MC_SET_KSTACK						            \
 325	ld	r1,PACA_MC_STACK(r13);					    \
 326	subi	r1,r1,SPECIAL_EXC_FRAME_SIZE
 327#define SPRN_MC_SRR0	SPRN_MCSRR0
 328#define SPRN_MC_SRR1	SPRN_MCSRR1
 329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330#define NORMAL_EXCEPTION_PROLOG(n, intnum, addition)			    \
 331	EXCEPTION_PROLOG(n, intnum, GEN, addition##_GEN(n))
 332
 333#define CRIT_EXCEPTION_PROLOG(n, intnum, addition)			    \
 334	EXCEPTION_PROLOG(n, intnum, CRIT, addition##_CRIT(n))
 335
 336#define DBG_EXCEPTION_PROLOG(n, intnum, addition)			    \
 337	EXCEPTION_PROLOG(n, intnum, DBG, addition##_DBG(n))
 338
 339#define MC_EXCEPTION_PROLOG(n, intnum, addition)			    \
 340	EXCEPTION_PROLOG(n, intnum, MC, addition##_MC(n))
 341
 342#define GDBELL_EXCEPTION_PROLOG(n, intnum, addition)			    \
 343	EXCEPTION_PROLOG(n, intnum, GDBELL, addition##_GDBELL(n))
 344
 345/* Variants of the "addition" argument for the prolog
 346 */
 347#define PROLOG_ADDITION_NONE_GEN(n)
 348#define PROLOG_ADDITION_NONE_GDBELL(n)
 349#define PROLOG_ADDITION_NONE_CRIT(n)
 350#define PROLOG_ADDITION_NONE_DBG(n)
 351#define PROLOG_ADDITION_NONE_MC(n)
 352
 353#define PROLOG_ADDITION_MASKABLE_GEN(n)					    \
 354	lbz	r10,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */	    \
 355	cmpwi	cr0,r10,0;		/* yes -> go out of line */	    \
 356	beq	masked_interrupt_book3e_##n
 357
 358#define PROLOG_ADDITION_2REGS_GEN(n)					    \
 359	std	r14,PACA_EXGEN+EX_R14(r13);				    \
 360	std	r15,PACA_EXGEN+EX_R15(r13)
 361
 362#define PROLOG_ADDITION_1REG_GEN(n)					    \
 363	std	r14,PACA_EXGEN+EX_R14(r13);
 364
 365#define PROLOG_ADDITION_2REGS_CRIT(n)					    \
 366	std	r14,PACA_EXCRIT+EX_R14(r13);				    \
 367	std	r15,PACA_EXCRIT+EX_R15(r13)
 368
 369#define PROLOG_ADDITION_2REGS_DBG(n)					    \
 370	std	r14,PACA_EXDBG+EX_R14(r13);				    \
 371	std	r15,PACA_EXDBG+EX_R15(r13)
 372
 373#define PROLOG_ADDITION_2REGS_MC(n)					    \
 374	std	r14,PACA_EXMC+EX_R14(r13);				    \
 375	std	r15,PACA_EXMC+EX_R15(r13)
 376
 377
 378/* Core exception code for all exceptions except TLB misses. */
 379#define EXCEPTION_COMMON_LVL(n, scratch, excf)				    \
 380exc_##n##_common:							    \
 381	std	r0,GPR0(r1);		/* save r0 in stackframe */	    \
 382	std	r2,GPR2(r1);		/* save r2 in stackframe */	    \
 383	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe */    \
 384	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe */	    \
 385	std	r9,GPR9(r1);		/* save r9 in stackframe */	    \
 386	std	r10,_NIP(r1);		/* save SRR0 to stackframe */	    \
 387	std	r11,_MSR(r1);		/* save SRR1 to stackframe */	    \
 388	beq	2f;			/* if from kernel mode */	    \
 389	ACCOUNT_CPU_USER_ENTRY(r10,r11);/* accounting (uses cr0+eq) */	    \
 3902:	ld	r3,excf+EX_R10(r13);	/* get back r10 */		    \
 391	ld	r4,excf+EX_R11(r13);	/* get back r11 */		    \
 392	mfspr	r5,scratch;		/* get back r13 */		    \
 393	std	r12,GPR12(r1);		/* save r12 in stackframe */	    \
 394	ld	r2,PACATOC(r13);	/* get kernel TOC into r2 */	    \
 395	mflr	r6;			/* save LR in stackframe */	    \
 396	mfctr	r7;			/* save CTR in stackframe */	    \
 397	mfspr	r8,SPRN_XER;		/* save XER in stackframe */	    \
 398	ld	r9,excf+EX_R1(r13);	/* load orig r1 back from PACA */   \
 399	lwz	r10,excf+EX_CR(r13);	/* load orig CR back from PACA	*/  \
 400	lbz	r11,PACASOFTIRQEN(r13);	/* get current IRQ softe */	    \
 401	ld	r12,exception_marker@toc(r2);				    \
 402	li	r0,0;							    \
 403	std	r3,GPR10(r1);		/* save r10 to stackframe */	    \
 404	std	r4,GPR11(r1);		/* save r11 to stackframe */	    \
 405	std	r5,GPR13(r1);		/* save it to stackframe */	    \
 406	std	r6,_LINK(r1);						    \
 407	std	r7,_CTR(r1);						    \
 408	std	r8,_XER(r1);						    \
 409	li	r3,(n)+1;		/* indicate partial regs in trap */ \
 410	std	r9,0(r1);		/* store stack frame back link */   \
 411	std	r10,_CCR(r1);		/* store orig CR in stackframe */   \
 412	std	r9,GPR1(r1);		/* store stack frame back link */   \
 413	std	r11,SOFTE(r1);		/* and save it to stackframe */     \
 414	std	r12,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */	    \
 415	std	r3,_TRAP(r1);		/* set trap number		*/  \
 416	std	r0,RESULT(r1);		/* clear regs->result */
 417
 418#define EXCEPTION_COMMON(n) \
 419	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_GEN_SCRATCH, PACA_EXGEN)
 420#define EXCEPTION_COMMON_CRIT(n) \
 421	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_CRIT_SCRATCH, PACA_EXCRIT)
 422#define EXCEPTION_COMMON_MC(n) \
 423	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_MC_SCRATCH, PACA_EXMC)
 424#define EXCEPTION_COMMON_DBG(n) \
 425	EXCEPTION_COMMON_LVL(n, SPRN_SPRG_DBG_SCRATCH, PACA_EXDBG)
 426
 427/*
 428 * This is meant for exceptions that don't immediately hard-enable.  We
 429 * set a bit in paca->irq_happened to ensure that a subsequent call to
 430 * arch_local_irq_restore() will properly hard-enable and avoid the
 431 * fast-path, and then reconcile irq state.
 432 */
 433#define INTS_DISABLE	RECONCILE_IRQ_STATE(r3,r4)
 434
 435/*
 436 * This is called by exceptions that don't use INTS_DISABLE (that did not
 437 * touch irq indicators in the PACA).  This will restore MSR:EE to it's
 438 * previous value
 439 *
 440 * XXX In the long run, we may want to open-code it in order to separate the
 441 *     load from the wrtee, thus limiting the latency caused by the dependency
 442 *     but at this point, I'll favor code clarity until we have a near to final
 443 *     implementation
 444 */
 445#define INTS_RESTORE_HARD						    \
 446	ld	r11,_MSR(r1);						    \
 447	wrtee	r11;
 448
 449/* XXX FIXME: Restore r14/r15 when necessary */
 450#define BAD_STACK_TRAMPOLINE(n)						    \
 451exc_##n##_bad_stack:							    \
 452	li	r1,(n);			/* get exception number */	    \
 453	sth	r1,PACA_TRAP_SAVE(r13);	/* store trap */		    \
 454	b	bad_stack_book3e;	/* bad stack error */
 455
 456/* WARNING: If you change the layout of this stub, make sure you chcek
 457	*   the debug exception handler which handles single stepping
 458	*   into exceptions from userspace, and the MM code in
 459	*   arch/powerpc/mm/tlb_nohash.c which patches the branch here
 460	*   and would need to be updated if that branch is moved
 461	*/
 462#define	EXCEPTION_STUB(loc, label)					\
 463	. = interrupt_base_book3e + loc;				\
 464	nop;	/* To make debug interrupts happy */			\
 465	b	exc_##label##_book3e;
 466
 467#define ACK_NONE(r)
 468#define ACK_DEC(r)							\
 469	lis	r,TSR_DIS@h;						\
 470	mtspr	SPRN_TSR,r
 471#define ACK_FIT(r)							\
 472	lis	r,TSR_FIS@h;						\
 473	mtspr	SPRN_TSR,r
 474
 475/* Used by asynchronous interrupt that may happen in the idle loop.
 476 *
 477 * This check if the thread was in the idle loop, and if yes, returns
 478 * to the caller rather than the PC. This is to avoid a race if
 479 * interrupts happen before the wait instruction.
 480 */
 481#define CHECK_NAPPING()							\
 482	CURRENT_THREAD_INFO(r11, r1);					\
 483	ld	r10,TI_LOCAL_FLAGS(r11);				\
 484	andi.	r9,r10,_TLF_NAPPING;					\
 485	beq+	1f;							\
 486	ld	r8,_LINK(r1);						\
 487	rlwinm	r7,r10,0,~_TLF_NAPPING;					\
 488	std	r8,_NIP(r1);						\
 489	std	r7,TI_LOCAL_FLAGS(r11);					\
 4901:
 491
 492
 493#define MASKABLE_EXCEPTION(trapnum, intnum, label, hdlr, ack)		\
 494	START_EXCEPTION(label);						\
 495	NORMAL_EXCEPTION_PROLOG(trapnum, intnum, PROLOG_ADDITION_MASKABLE)\
 496	EXCEPTION_COMMON(trapnum)					\
 497	INTS_DISABLE;							\
 498	ack(r8);							\
 499	CHECK_NAPPING();						\
 500	addi	r3,r1,STACK_FRAME_OVERHEAD;				\
 501	bl	hdlr;							\
 502	b	ret_from_except_lite;
 503
 504/* This value is used to mark exception frames on the stack. */
 505	.section	".toc","aw"
 506exception_marker:
 507	.tc	ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
 508
 509
 510/*
 511 * And here we have the exception vectors !
 512 */
 513
 514	.text
 515	.balign	0x1000
 516	.globl interrupt_base_book3e
 517interrupt_base_book3e:					/* fake trap */
 518	EXCEPTION_STUB(0x000, machine_check)
 519	EXCEPTION_STUB(0x020, critical_input)		/* 0x0100 */
 520	EXCEPTION_STUB(0x040, debug_crit)		/* 0x0d00 */
 521	EXCEPTION_STUB(0x060, data_storage)		/* 0x0300 */
 522	EXCEPTION_STUB(0x080, instruction_storage)	/* 0x0400 */
 523	EXCEPTION_STUB(0x0a0, external_input)		/* 0x0500 */
 524	EXCEPTION_STUB(0x0c0, alignment)		/* 0x0600 */
 525	EXCEPTION_STUB(0x0e0, program)			/* 0x0700 */
 526	EXCEPTION_STUB(0x100, fp_unavailable)		/* 0x0800 */
 527	EXCEPTION_STUB(0x120, system_call)		/* 0x0c00 */
 528	EXCEPTION_STUB(0x140, ap_unavailable)		/* 0x0f20 */
 529	EXCEPTION_STUB(0x160, decrementer)		/* 0x0900 */
 530	EXCEPTION_STUB(0x180, fixed_interval)		/* 0x0980 */
 531	EXCEPTION_STUB(0x1a0, watchdog)			/* 0x09f0 */
 532	EXCEPTION_STUB(0x1c0, data_tlb_miss)
 533	EXCEPTION_STUB(0x1e0, instruction_tlb_miss)
 534	EXCEPTION_STUB(0x200, altivec_unavailable)
 535	EXCEPTION_STUB(0x220, altivec_assist)
 536	EXCEPTION_STUB(0x260, perfmon)
 537	EXCEPTION_STUB(0x280, doorbell)
 538	EXCEPTION_STUB(0x2a0, doorbell_crit)
 539	EXCEPTION_STUB(0x2c0, guest_doorbell)
 540	EXCEPTION_STUB(0x2e0, guest_doorbell_crit)
 541	EXCEPTION_STUB(0x300, hypercall)
 542	EXCEPTION_STUB(0x320, ehpriv)
 543	EXCEPTION_STUB(0x340, lrat_error)
 544
 545	.globl __end_interrupts
 546__end_interrupts:
 547
 548/* Critical Input Interrupt */
 549	START_EXCEPTION(critical_input);
 550	CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL,
 551			      PROLOG_ADDITION_NONE)
 552	EXCEPTION_COMMON_CRIT(0x100)
 553	bl	save_nvgprs
 554	bl	special_reg_save
 555	CHECK_NAPPING();
 556	addi	r3,r1,STACK_FRAME_OVERHEAD
 557	bl	unknown_exception
 558	b	ret_from_crit_except
 559
 560/* Machine Check Interrupt */
 561	START_EXCEPTION(machine_check);
 562	MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK,
 563			    PROLOG_ADDITION_NONE)
 564	EXCEPTION_COMMON_MC(0x000)
 565	bl	save_nvgprs
 566	bl	special_reg_save
 567	CHECK_NAPPING();
 568	addi	r3,r1,STACK_FRAME_OVERHEAD
 569	bl	machine_check_exception
 570	b	ret_from_mc_except
 571
 572/* Data Storage Interrupt */
 573	START_EXCEPTION(data_storage)
 574	NORMAL_EXCEPTION_PROLOG(0x300, BOOKE_INTERRUPT_DATA_STORAGE,
 575				PROLOG_ADDITION_2REGS)
 576	mfspr	r14,SPRN_DEAR
 577	mfspr	r15,SPRN_ESR
 578	EXCEPTION_COMMON(0x300)
 579	INTS_DISABLE
 580	b	storage_fault_common
 581
 582/* Instruction Storage Interrupt */
 583	START_EXCEPTION(instruction_storage);
 584	NORMAL_EXCEPTION_PROLOG(0x400, BOOKE_INTERRUPT_INST_STORAGE,
 585				PROLOG_ADDITION_2REGS)
 586	li	r15,0
 587	mr	r14,r10
 588	EXCEPTION_COMMON(0x400)
 589	INTS_DISABLE
 590	b	storage_fault_common
 591
 592/* External Input Interrupt */
 593	MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL,
 594			   external_input, do_IRQ, ACK_NONE)
 595
 596/* Alignment */
 597	START_EXCEPTION(alignment);
 598	NORMAL_EXCEPTION_PROLOG(0x600, BOOKE_INTERRUPT_ALIGNMENT,
 599				PROLOG_ADDITION_2REGS)
 600	mfspr	r14,SPRN_DEAR
 601	mfspr	r15,SPRN_ESR
 602	EXCEPTION_COMMON(0x600)
 603	b	alignment_more	/* no room, go out of line */
 604
 605/* Program Interrupt */
 606	START_EXCEPTION(program);
 607	NORMAL_EXCEPTION_PROLOG(0x700, BOOKE_INTERRUPT_PROGRAM,
 608				PROLOG_ADDITION_1REG)
 609	mfspr	r14,SPRN_ESR
 610	EXCEPTION_COMMON(0x700)
 611	INTS_DISABLE
 612	std	r14,_DSISR(r1)
 613	addi	r3,r1,STACK_FRAME_OVERHEAD
 614	ld	r14,PACA_EXGEN+EX_R14(r13)
 615	bl	save_nvgprs
 616	bl	program_check_exception
 617	b	ret_from_except
 618
 619/* Floating Point Unavailable Interrupt */
 620	START_EXCEPTION(fp_unavailable);
 621	NORMAL_EXCEPTION_PROLOG(0x800, BOOKE_INTERRUPT_FP_UNAVAIL,
 622				PROLOG_ADDITION_NONE)
 623	/* we can probably do a shorter exception entry for that one... */
 624	EXCEPTION_COMMON(0x800)
 625	ld	r12,_MSR(r1)
 626	andi.	r0,r12,MSR_PR;
 627	beq-	1f
 628	bl	load_up_fpu
 629	b	fast_exception_return
 6301:	INTS_DISABLE
 631	bl	save_nvgprs
 632	addi	r3,r1,STACK_FRAME_OVERHEAD
 633	bl	kernel_fp_unavailable_exception
 634	b	ret_from_except
 635
 636/* Altivec Unavailable Interrupt */
 637	START_EXCEPTION(altivec_unavailable);
 638	NORMAL_EXCEPTION_PROLOG(0x200, BOOKE_INTERRUPT_ALTIVEC_UNAVAIL,
 639				PROLOG_ADDITION_NONE)
 640	/* we can probably do a shorter exception entry for that one... */
 641	EXCEPTION_COMMON(0x200)
 642#ifdef CONFIG_ALTIVEC
 643BEGIN_FTR_SECTION
 644	ld	r12,_MSR(r1)
 645	andi.	r0,r12,MSR_PR;
 646	beq-	1f
 647	bl	load_up_altivec
 648	b	fast_exception_return
 6491:
 650END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 651#endif
 652	INTS_DISABLE
 653	bl	save_nvgprs
 654	addi	r3,r1,STACK_FRAME_OVERHEAD
 655	bl	altivec_unavailable_exception
 656	b	ret_from_except
 657
 658/* AltiVec Assist */
 659	START_EXCEPTION(altivec_assist);
 660	NORMAL_EXCEPTION_PROLOG(0x220,
 661				BOOKE_INTERRUPT_ALTIVEC_ASSIST,
 662				PROLOG_ADDITION_NONE)
 663	EXCEPTION_COMMON(0x220)
 664	INTS_DISABLE
 665	bl	save_nvgprs
 666	addi	r3,r1,STACK_FRAME_OVERHEAD
 667#ifdef CONFIG_ALTIVEC
 668BEGIN_FTR_SECTION
 669	bl	altivec_assist_exception
 670END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 671#else
 672	bl	unknown_exception
 673#endif
 674	b	ret_from_except
 675
 676
 677/* Decrementer Interrupt */
 678	MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER,
 679			   decrementer, timer_interrupt, ACK_DEC)
 680
 681/* Fixed Interval Timer Interrupt */
 682	MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT,
 683			   fixed_interval, unknown_exception, ACK_FIT)
 684
 685/* Watchdog Timer Interrupt */
 686	START_EXCEPTION(watchdog);
 687	CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG,
 688			      PROLOG_ADDITION_NONE)
 689	EXCEPTION_COMMON_CRIT(0x9f0)
 690	bl	save_nvgprs
 691	bl	special_reg_save
 692	CHECK_NAPPING();
 693	addi	r3,r1,STACK_FRAME_OVERHEAD
 694#ifdef CONFIG_BOOKE_WDT
 695	bl	WatchdogException
 696#else
 697	bl	unknown_exception
 698#endif
 699	b	ret_from_crit_except
 700
 701/* System Call Interrupt */
 702	START_EXCEPTION(system_call)
 703	mr	r9,r13			/* keep a copy of userland r13 */
 704	mfspr	r11,SPRN_SRR0		/* get return address */
 705	mfspr	r12,SPRN_SRR1		/* get previous MSR */
 706	mfspr	r13,SPRN_SPRG_PACA	/* get our PACA */
 707	b	system_call_common
 708
 709/* Auxiliary Processor Unavailable Interrupt */
 710	START_EXCEPTION(ap_unavailable);
 711	NORMAL_EXCEPTION_PROLOG(0xf20, BOOKE_INTERRUPT_AP_UNAVAIL,
 712				PROLOG_ADDITION_NONE)
 713	EXCEPTION_COMMON(0xf20)
 714	INTS_DISABLE
 715	bl	save_nvgprs
 716	addi	r3,r1,STACK_FRAME_OVERHEAD
 717	bl	unknown_exception
 718	b	ret_from_except
 719
 720/* Debug exception as a critical interrupt*/
 721	START_EXCEPTION(debug_crit);
 722	CRIT_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
 723			      PROLOG_ADDITION_2REGS)
 724
 725	/*
 726	 * If there is a single step or branch-taken exception in an
 727	 * exception entry sequence, it was probably meant to apply to
 728	 * the code where the exception occurred (since exception entry
 729	 * doesn't turn off DE automatically).  We simulate the effect
 730	 * of turning off DE on entry to an exception handler by turning
 731	 * off DE in the CSRR1 value and clearing the debug status.
 732	 */
 733
 734	mfspr	r14,SPRN_DBSR		/* check single-step/branch taken */
 735	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 736	beq+	1f
 737
 738	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 739	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
 
 
 740	cmpld	cr0,r10,r14
 741	cmpld	cr1,r10,r15
 
 
 
 
 
 
 742	blt+	cr0,1f
 743	bge+	cr1,1f
 744
 745	/* here it looks like we got an inappropriate debug exception. */
 746	lis	r14,(DBSR_IC|DBSR_BT)@h		/* clear the event */
 747	rlwinm	r11,r11,0,~MSR_DE	/* clear DE in the CSRR1 value */
 748	mtspr	SPRN_DBSR,r14
 749	mtspr	SPRN_CSRR1,r11
 750	lwz	r10,PACA_EXCRIT+EX_CR(r13)	/* restore registers */
 751	ld	r1,PACA_EXCRIT+EX_R1(r13)
 752	ld	r14,PACA_EXCRIT+EX_R14(r13)
 753	ld	r15,PACA_EXCRIT+EX_R15(r13)
 754	mtcr	r10
 755	ld	r10,PACA_EXCRIT+EX_R10(r13)	/* restore registers */
 756	ld	r11,PACA_EXCRIT+EX_R11(r13)
 757	mfspr	r13,SPRN_SPRG_CRIT_SCRATCH
 758	rfci
 759
 760	/* Normal debug exception */
 761	/* XXX We only handle coming from userspace for now since we can't
 762	 *     quite save properly an interrupted kernel state yet
 763	 */
 7641:	andi.	r14,r11,MSR_PR;		/* check for userspace again */
 765	beq	kernel_dbg_exc;		/* if from kernel mode */
 766
 767	/* Now we mash up things to make it look like we are coming on a
 768	 * normal exception
 769	 */
 770	mfspr	r14,SPRN_DBSR
 771	EXCEPTION_COMMON_CRIT(0xd00)
 772	std	r14,_DSISR(r1)
 773	addi	r3,r1,STACK_FRAME_OVERHEAD
 774	mr	r4,r14
 775	ld	r14,PACA_EXCRIT+EX_R14(r13)
 776	ld	r15,PACA_EXCRIT+EX_R15(r13)
 777	bl	save_nvgprs
 778	bl	DebugException
 779	b	ret_from_except
 780
 781kernel_dbg_exc:
 782	b	.	/* NYI */
 783
 784/* Debug exception as a debug interrupt*/
 785	START_EXCEPTION(debug_debug);
 786	DBG_EXCEPTION_PROLOG(0xd00, BOOKE_INTERRUPT_DEBUG,
 787						 PROLOG_ADDITION_2REGS)
 788
 789	/*
 790	 * If there is a single step or branch-taken exception in an
 791	 * exception entry sequence, it was probably meant to apply to
 792	 * the code where the exception occurred (since exception entry
 793	 * doesn't turn off DE automatically).  We simulate the effect
 794	 * of turning off DE on entry to an exception handler by turning
 795	 * off DE in the DSRR1 value and clearing the debug status.
 796	 */
 797
 798	mfspr	r14,SPRN_DBSR		/* check single-step/branch taken */
 799	andis.	r15,r14,(DBSR_IC|DBSR_BT)@h
 800	beq+	1f
 801
 802	LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
 803	LOAD_REG_IMMEDIATE(r15,__end_interrupts)
 
 
 804	cmpld	cr0,r10,r14
 805	cmpld	cr1,r10,r15
 
 
 
 
 
 
 806	blt+	cr0,1f
 807	bge+	cr1,1f
 808
 809	/* here it looks like we got an inappropriate debug exception. */
 810	lis	r14,(DBSR_IC|DBSR_BT)@h		/* clear the event */
 811	rlwinm	r11,r11,0,~MSR_DE	/* clear DE in the DSRR1 value */
 812	mtspr	SPRN_DBSR,r14
 813	mtspr	SPRN_DSRR1,r11
 814	lwz	r10,PACA_EXDBG+EX_CR(r13)	/* restore registers */
 815	ld	r1,PACA_EXDBG+EX_R1(r13)
 816	ld	r14,PACA_EXDBG+EX_R14(r13)
 817	ld	r15,PACA_EXDBG+EX_R15(r13)
 818	mtcr	r10
 819	ld	r10,PACA_EXDBG+EX_R10(r13)	/* restore registers */
 820	ld	r11,PACA_EXDBG+EX_R11(r13)
 821	mfspr	r13,SPRN_SPRG_DBG_SCRATCH
 822	rfdi
 823
 824	/* Normal debug exception */
 825	/* XXX We only handle coming from userspace for now since we can't
 826	 *     quite save properly an interrupted kernel state yet
 827	 */
 8281:	andi.	r14,r11,MSR_PR;		/* check for userspace again */
 829	beq	kernel_dbg_exc;		/* if from kernel mode */
 830
 831	/* Now we mash up things to make it look like we are coming on a
 832	 * normal exception
 833	 */
 834	mfspr	r14,SPRN_DBSR
 835	EXCEPTION_COMMON_DBG(0xd08)
 836	INTS_DISABLE
 837	std	r14,_DSISR(r1)
 838	addi	r3,r1,STACK_FRAME_OVERHEAD
 839	mr	r4,r14
 840	ld	r14,PACA_EXDBG+EX_R14(r13)
 841	ld	r15,PACA_EXDBG+EX_R15(r13)
 842	bl	save_nvgprs
 843	bl	DebugException
 844	b	ret_from_except
 845
 846	START_EXCEPTION(perfmon);
 847	NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR,
 848				PROLOG_ADDITION_NONE)
 849	EXCEPTION_COMMON(0x260)
 850	INTS_DISABLE
 851	CHECK_NAPPING()
 852	addi	r3,r1,STACK_FRAME_OVERHEAD
 853	bl	performance_monitor_exception
 854	b	ret_from_except_lite
 855
 856/* Doorbell interrupt */
 857	MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL,
 858			   doorbell, doorbell_exception, ACK_NONE)
 859
 860/* Doorbell critical Interrupt */
 861	START_EXCEPTION(doorbell_crit);
 862	CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL,
 863			      PROLOG_ADDITION_NONE)
 864	EXCEPTION_COMMON_CRIT(0x2a0)
 865	bl	save_nvgprs
 866	bl	special_reg_save
 867	CHECK_NAPPING();
 868	addi	r3,r1,STACK_FRAME_OVERHEAD
 869	bl	unknown_exception
 870	b	ret_from_crit_except
 871
 872/*
 873 *	Guest doorbell interrupt
 874 *	This general exception use GSRRx save/restore registers
 875 */
 876	START_EXCEPTION(guest_doorbell);
 877	GDBELL_EXCEPTION_PROLOG(0x2c0, BOOKE_INTERRUPT_GUEST_DBELL,
 878			        PROLOG_ADDITION_NONE)
 879	EXCEPTION_COMMON(0x2c0)
 880	addi	r3,r1,STACK_FRAME_OVERHEAD
 881	bl	save_nvgprs
 882	INTS_RESTORE_HARD
 883	bl	unknown_exception
 884	b	ret_from_except
 885
 886/* Guest Doorbell critical Interrupt */
 887	START_EXCEPTION(guest_doorbell_crit);
 888	CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT,
 889			      PROLOG_ADDITION_NONE)
 890	EXCEPTION_COMMON_CRIT(0x2e0)
 891	bl	save_nvgprs
 892	bl	special_reg_save
 893	CHECK_NAPPING();
 894	addi	r3,r1,STACK_FRAME_OVERHEAD
 895	bl	unknown_exception
 896	b	ret_from_crit_except
 897
 898/* Hypervisor call */
 899	START_EXCEPTION(hypercall);
 900	NORMAL_EXCEPTION_PROLOG(0x310, BOOKE_INTERRUPT_HV_SYSCALL,
 901			        PROLOG_ADDITION_NONE)
 902	EXCEPTION_COMMON(0x310)
 903	addi	r3,r1,STACK_FRAME_OVERHEAD
 904	bl	save_nvgprs
 905	INTS_RESTORE_HARD
 906	bl	unknown_exception
 907	b	ret_from_except
 908
 909/* Embedded Hypervisor priviledged  */
 910	START_EXCEPTION(ehpriv);
 911	NORMAL_EXCEPTION_PROLOG(0x320, BOOKE_INTERRUPT_HV_PRIV,
 912			        PROLOG_ADDITION_NONE)
 913	EXCEPTION_COMMON(0x320)
 914	addi	r3,r1,STACK_FRAME_OVERHEAD
 915	bl	save_nvgprs
 916	INTS_RESTORE_HARD
 917	bl	unknown_exception
 918	b	ret_from_except
 919
 920/* LRAT Error interrupt */
 921	START_EXCEPTION(lrat_error);
 922	NORMAL_EXCEPTION_PROLOG(0x340, BOOKE_INTERRUPT_LRAT_ERROR,
 923			        PROLOG_ADDITION_NONE)
 924	EXCEPTION_COMMON(0x340)
 925	addi	r3,r1,STACK_FRAME_OVERHEAD
 926	bl	.save_nvgprs
 927	INTS_RESTORE_HARD
 928	bl	.unknown_exception
 929	b	.ret_from_except
 930
 931/*
 932 * An interrupt came in while soft-disabled; We mark paca->irq_happened
 933 * accordingly and if the interrupt is level sensitive, we hard disable
 
 
 934 */
 935
 936.macro masked_interrupt_book3e paca_irq full_mask
 937	lbz	r10,PACAIRQHAPPENED(r13)
 
 
 
 938	ori	r10,r10,\paca_irq
 
 939	stb	r10,PACAIRQHAPPENED(r13)
 940
 941	.if \full_mask == 1
 942	rldicl	r10,r11,48,1		/* clear MSR_EE */
 943	rotldi	r11,r10,16
 944	mtspr	SPRN_SRR1,r11
 945	.endif
 946
 947	lwz	r11,PACA_EXGEN+EX_CR(r13)
 948	mtcr	r11
 949	ld	r10,PACA_EXGEN+EX_R10(r13)
 950	ld	r11,PACA_EXGEN+EX_R11(r13)
 951	mfspr	r13,SPRN_SPRG_GEN_SCRATCH
 952	rfi
 953	b	.
 954.endm
 955
 956masked_interrupt_book3e_0x500:
 957	// XXX When adding support for EPR, use PACA_IRQ_EE_EDGE
 958	masked_interrupt_book3e PACA_IRQ_EE 1
 959
 960masked_interrupt_book3e_0x900:
 961	ACK_DEC(r10);
 962	masked_interrupt_book3e PACA_IRQ_DEC 0
 963
 964masked_interrupt_book3e_0x980:
 965	ACK_FIT(r10);
 966	masked_interrupt_book3e PACA_IRQ_DEC 0
 967
 968masked_interrupt_book3e_0x280:
 969masked_interrupt_book3e_0x2c0:
 970	masked_interrupt_book3e PACA_IRQ_DBELL 0
 971
 972/*
 973 * Called from arch_local_irq_enable when an interrupt needs
 974 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
 975 * to indicate the kind of interrupt. MSR:EE is already off.
 976 * We generate a stackframe like if a real interrupt had happened.
 977 *
 978 * Note: While MSR:EE is off, we need to make sure that _MSR
 979 * in the generated frame has EE set to 1 or the exception
 980 * handler will not properly re-enable them.
 981 */
 982_GLOBAL(__replay_interrupt)
 983	/* We are going to jump to the exception common code which
 984	 * will retrieve various register values from the PACA which
 985	 * we don't give a damn about.
 986	 */
 987	mflr	r10
 988	mfmsr	r11
 989	mfcr	r4
 990	mtspr	SPRN_SPRG_GEN_SCRATCH,r13;
 991	std	r1,PACA_EXGEN+EX_R1(r13);
 992	stw	r4,PACA_EXGEN+EX_CR(r13);
 993	ori	r11,r11,MSR_EE
 994	subi	r1,r1,INT_FRAME_SIZE;
 995	cmpwi	cr0,r3,0x500
 996	beq	exc_0x500_common
 997	cmpwi	cr0,r3,0x900
 998	beq	exc_0x900_common
 999	cmpwi	cr0,r3,0x280
1000	beq	exc_0x280_common
1001	blr
1002
1003
1004/*
1005 * This is called from 0x300 and 0x400 handlers after the prologs with
1006 * r14 and r15 containing the fault address and error code, with the
1007 * original values stashed away in the PACA
1008 */
1009storage_fault_common:
1010	std	r14,_DAR(r1)
1011	std	r15,_DSISR(r1)
1012	addi	r3,r1,STACK_FRAME_OVERHEAD
1013	mr	r4,r14
1014	mr	r5,r15
1015	ld	r14,PACA_EXGEN+EX_R14(r13)
1016	ld	r15,PACA_EXGEN+EX_R15(r13)
1017	bl	do_page_fault
1018	cmpdi	r3,0
1019	bne-	1f
1020	b	ret_from_except_lite
10211:	bl	save_nvgprs
1022	mr	r5,r3
1023	addi	r3,r1,STACK_FRAME_OVERHEAD
1024	ld	r4,_DAR(r1)
1025	bl	bad_page_fault
1026	b	ret_from_except
1027
1028/*
1029 * Alignment exception doesn't fit entirely in the 0x100 bytes so it
1030 * continues here.
1031 */
1032alignment_more:
1033	std	r14,_DAR(r1)
1034	std	r15,_DSISR(r1)
1035	addi	r3,r1,STACK_FRAME_OVERHEAD
1036	ld	r14,PACA_EXGEN+EX_R14(r13)
1037	ld	r15,PACA_EXGEN+EX_R15(r13)
1038	bl	save_nvgprs
1039	INTS_RESTORE_HARD
1040	bl	alignment_exception
1041	b	ret_from_except
1042
1043/*
1044 * We branch here from entry_64.S for the last stage of the exception
1045 * return code path. MSR:EE is expected to be off at that point
1046 */
1047_GLOBAL(exception_return_book3e)
1048	b	1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1049
1050/* This is the return from load_up_fpu fast path which could do with
1051 * less GPR restores in fact, but for now we have a single return path
1052 */
1053	.globl fast_exception_return
1054fast_exception_return:
1055	wrteei	0
10561:	mr	r0,r13
1057	ld	r10,_MSR(r1)
1058	REST_4GPRS(2, r1)
1059	andi.	r6,r10,MSR_PR
1060	REST_2GPRS(6, r1)
1061	beq	1f
1062	ACCOUNT_CPU_USER_EXIT(r10, r11)
1063	ld	r0,GPR13(r1)
1064
10651:	stdcx.	r0,0,r1		/* to clear the reservation */
1066
1067	ld	r8,_CCR(r1)
1068	ld	r9,_LINK(r1)
1069	ld	r10,_CTR(r1)
1070	ld	r11,_XER(r1)
1071	mtcr	r8
1072	mtlr	r9
1073	mtctr	r10
1074	mtxer	r11
1075	REST_2GPRS(8, r1)
1076	ld	r10,GPR10(r1)
1077	ld	r11,GPR11(r1)
1078	ld	r12,GPR12(r1)
1079	mtspr	SPRN_SPRG_GEN_SCRATCH,r0
1080
1081	std	r10,PACA_EXGEN+EX_R10(r13);
1082	std	r11,PACA_EXGEN+EX_R11(r13);
1083	ld	r10,_NIP(r1)
1084	ld	r11,_MSR(r1)
1085	ld	r0,GPR0(r1)
1086	ld	r1,GPR1(r1)
1087	mtspr	SPRN_SRR0,r10
1088	mtspr	SPRN_SRR1,r11
1089	ld	r10,PACA_EXGEN+EX_R10(r13)
1090	ld	r11,PACA_EXGEN+EX_R11(r13)
1091	mfspr	r13,SPRN_SPRG_GEN_SCRATCH
1092	rfi
1093
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094/*
1095 * Trampolines used when spotting a bad kernel stack pointer in
1096 * the exception entry code.
1097 *
1098 * TODO: move some bits like SRR0 read to trampoline, pass PACA
1099 * index around, etc... to handle crit & mcheck
1100 */
1101BAD_STACK_TRAMPOLINE(0x000)
1102BAD_STACK_TRAMPOLINE(0x100)
1103BAD_STACK_TRAMPOLINE(0x200)
1104BAD_STACK_TRAMPOLINE(0x220)
1105BAD_STACK_TRAMPOLINE(0x260)
1106BAD_STACK_TRAMPOLINE(0x280)
1107BAD_STACK_TRAMPOLINE(0x2a0)
1108BAD_STACK_TRAMPOLINE(0x2c0)
1109BAD_STACK_TRAMPOLINE(0x2e0)
1110BAD_STACK_TRAMPOLINE(0x300)
1111BAD_STACK_TRAMPOLINE(0x310)
1112BAD_STACK_TRAMPOLINE(0x320)
1113BAD_STACK_TRAMPOLINE(0x340)
1114BAD_STACK_TRAMPOLINE(0x400)
1115BAD_STACK_TRAMPOLINE(0x500)
1116BAD_STACK_TRAMPOLINE(0x600)
1117BAD_STACK_TRAMPOLINE(0x700)
1118BAD_STACK_TRAMPOLINE(0x800)
1119BAD_STACK_TRAMPOLINE(0x900)
1120BAD_STACK_TRAMPOLINE(0x980)
1121BAD_STACK_TRAMPOLINE(0x9f0)
1122BAD_STACK_TRAMPOLINE(0xa00)
1123BAD_STACK_TRAMPOLINE(0xb00)
1124BAD_STACK_TRAMPOLINE(0xc00)
1125BAD_STACK_TRAMPOLINE(0xd00)
1126BAD_STACK_TRAMPOLINE(0xd08)
1127BAD_STACK_TRAMPOLINE(0xe00)
1128BAD_STACK_TRAMPOLINE(0xf00)
1129BAD_STACK_TRAMPOLINE(0xf20)
1130
1131	.globl	bad_stack_book3e
1132bad_stack_book3e:
1133	/* XXX: Needs to make SPRN_SPRG_GEN depend on exception type */
1134	mfspr	r10,SPRN_SRR0;		  /* read SRR0 before touching stack */
1135	ld	r1,PACAEMERGSP(r13)
1136	subi	r1,r1,64+INT_FRAME_SIZE
1137	std	r10,_NIP(r1)
1138	std	r11,_MSR(r1)
1139	ld	r10,PACA_EXGEN+EX_R1(r13) /* FIXME for crit & mcheck */
1140	lwz	r11,PACA_EXGEN+EX_CR(r13) /* FIXME for crit & mcheck */
1141	std	r10,GPR1(r1)
1142	std	r11,_CCR(r1)
1143	mfspr	r10,SPRN_DEAR
1144	mfspr	r11,SPRN_ESR
1145	std	r10,_DAR(r1)
1146	std	r11,_DSISR(r1)
1147	std	r0,GPR0(r1);		/* save r0 in stackframe */	    \
1148	std	r2,GPR2(r1);		/* save r2 in stackframe */	    \
1149	SAVE_4GPRS(3, r1);		/* save r3 - r6 in stackframe */    \
1150	SAVE_2GPRS(7, r1);		/* save r7, r8 in stackframe */	    \
1151	std	r9,GPR9(r1);		/* save r9 in stackframe */	    \
1152	ld	r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */		    \
1153	ld	r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */		    \
1154	mfspr	r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
1155	std	r3,GPR10(r1);		/* save r10 to stackframe */	    \
1156	std	r4,GPR11(r1);		/* save r11 to stackframe */	    \
1157	std	r12,GPR12(r1);		/* save r12 in stackframe */	    \
1158	std	r5,GPR13(r1);		/* save it to stackframe */	    \
1159	mflr	r10
1160	mfctr	r11
1161	mfxer	r12
1162	std	r10,_LINK(r1)
1163	std	r11,_CTR(r1)
1164	std	r12,_XER(r1)
1165	SAVE_10GPRS(14,r1)
1166	SAVE_8GPRS(24,r1)
1167	lhz	r12,PACA_TRAP_SAVE(r13)
1168	std	r12,_TRAP(r1)
1169	addi	r11,r1,INT_FRAME_SIZE
1170	std	r11,0(r1)
1171	li	r12,0
1172	std	r12,0(r11)
1173	ld	r2,PACATOC(r13)
11741:	addi	r3,r1,STACK_FRAME_OVERHEAD
1175	bl	kernel_bad_stack
1176	b	1b
1177
1178/*
1179 * Setup the initial TLB for a core. This current implementation
1180 * assume that whatever we are running off will not conflict with
1181 * the new mapping at PAGE_OFFSET.
1182 */
1183_GLOBAL(initial_tlb_book3e)
1184
1185	/* Look for the first TLB with IPROT set */
1186	mfspr	r4,SPRN_TLB0CFG
1187	andi.	r3,r4,TLBnCFG_IPROT
1188	lis	r3,MAS0_TLBSEL(0)@h
1189	bne	found_iprot
1190
1191	mfspr	r4,SPRN_TLB1CFG
1192	andi.	r3,r4,TLBnCFG_IPROT
1193	lis	r3,MAS0_TLBSEL(1)@h
1194	bne	found_iprot
1195
1196	mfspr	r4,SPRN_TLB2CFG
1197	andi.	r3,r4,TLBnCFG_IPROT
1198	lis	r3,MAS0_TLBSEL(2)@h
1199	bne	found_iprot
1200
1201	lis	r3,MAS0_TLBSEL(3)@h
1202	mfspr	r4,SPRN_TLB3CFG
1203	/* fall through */
1204
1205found_iprot:
1206	andi.	r5,r4,TLBnCFG_HES
1207	bne	have_hes
1208
1209	mflr	r8				/* save LR */
1210/* 1. Find the index of the entry we're executing in
1211 *
1212 * r3 = MAS0_TLBSEL (for the iprot array)
1213 * r4 = SPRN_TLBnCFG
1214 */
1215	bl	invstr				/* Find our address */
1216invstr:	mflr	r6				/* Make it accessible */
1217	mfmsr	r7
1218	rlwinm	r5,r7,27,31,31			/* extract MSR[IS] */
1219	mfspr	r7,SPRN_PID
1220	slwi	r7,r7,16
1221	or	r7,r7,r5
1222	mtspr	SPRN_MAS6,r7
1223	tlbsx	0,r6				/* search MSR[IS], SPID=PID */
1224
1225	mfspr	r3,SPRN_MAS0
1226	rlwinm	r5,r3,16,20,31			/* Extract MAS0(Entry) */
1227
1228	mfspr	r7,SPRN_MAS1			/* Insure IPROT set */
1229	oris	r7,r7,MAS1_IPROT@h
1230	mtspr	SPRN_MAS1,r7
1231	tlbwe
1232
1233/* 2. Invalidate all entries except the entry we're executing in
1234 *
1235 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1236 * r4 = SPRN_TLBnCFG
1237 * r5 = ESEL of entry we are running in
1238 */
1239	andi.	r4,r4,TLBnCFG_N_ENTRY		/* Extract # entries */
1240	li	r6,0				/* Set Entry counter to 0 */
12411:	mr	r7,r3				/* Set MAS0(TLBSEL) */
1242	rlwimi	r7,r6,16,4,15			/* Setup MAS0 = TLBSEL | ESEL(r6) */
1243	mtspr	SPRN_MAS0,r7
1244	tlbre
1245	mfspr	r7,SPRN_MAS1
1246	rlwinm	r7,r7,0,2,31			/* Clear MAS1 Valid and IPROT */
1247	cmpw	r5,r6
1248	beq	skpinv				/* Dont update the current execution TLB */
1249	mtspr	SPRN_MAS1,r7
1250	tlbwe
1251	isync
1252skpinv:	addi	r6,r6,1				/* Increment */
1253	cmpw	r6,r4				/* Are we done? */
1254	bne	1b				/* If not, repeat */
1255
1256	/* Invalidate all TLBs */
1257	PPC_TLBILX_ALL(0,R0)
1258	sync
1259	isync
1260
1261/* 3. Setup a temp mapping and jump to it
1262 *
1263 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1264 * r5 = ESEL of entry we are running in
1265 */
1266	andi.	r7,r5,0x1	/* Find an entry not used and is non-zero */
1267	addi	r7,r7,0x1
1268	mr	r4,r3		/* Set MAS0(TLBSEL) = 1 */
1269	mtspr	SPRN_MAS0,r4
1270	tlbre
1271
1272	rlwimi	r4,r7,16,4,15	/* Setup MAS0 = TLBSEL | ESEL(r7) */
1273	mtspr	SPRN_MAS0,r4
1274
1275	mfspr	r7,SPRN_MAS1
1276	xori	r6,r7,MAS1_TS		/* Setup TMP mapping in the other Address space */
1277	mtspr	SPRN_MAS1,r6
1278
1279	tlbwe
1280
1281	mfmsr	r6
1282	xori	r6,r6,MSR_IS
1283	mtspr	SPRN_SRR1,r6
1284	bl	1f		/* Find our address */
12851:	mflr	r6
1286	addi	r6,r6,(2f - 1b)
1287	mtspr	SPRN_SRR0,r6
1288	rfi
12892:
1290
1291/* 4. Clear out PIDs & Search info
1292 *
1293 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1294 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1295 * r5 = MAS3
1296 */
1297	li	r6,0
1298	mtspr   SPRN_MAS6,r6
1299	mtspr	SPRN_PID,r6
1300
1301/* 5. Invalidate mapping we started in
1302 *
1303 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1304 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1305 * r5 = MAS3
1306 */
1307	mtspr	SPRN_MAS0,r3
1308	tlbre
1309	mfspr	r6,SPRN_MAS1
1310	rlwinm	r6,r6,0,2,31	/* clear IPROT and VALID */
1311	mtspr	SPRN_MAS1,r6
1312	tlbwe
1313	sync
1314	isync
1315
1316/*
1317 * The mapping only needs to be cache-coherent on SMP, except on
1318 * Freescale e500mc derivatives where it's also needed for coherent DMA.
1319 */
1320#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
1321#define M_IF_NEEDED	MAS2_M
1322#else
1323#define M_IF_NEEDED	0
1324#endif
1325
1326/* 6. Setup KERNELBASE mapping in TLB[0]
1327 *
1328 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
1329 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1330 * r5 = MAS3
1331 */
1332	rlwinm	r3,r3,0,16,3	/* clear ESEL */
1333	mtspr	SPRN_MAS0,r3
1334	lis	r6,(MAS1_VALID|MAS1_IPROT)@h
1335	ori	r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
1336	mtspr	SPRN_MAS1,r6
1337
1338	LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_NEEDED)
1339	mtspr	SPRN_MAS2,r6
1340
1341	rlwinm	r5,r5,0,0,25
1342	ori	r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
1343	mtspr	SPRN_MAS3,r5
1344	li	r5,-1
1345	rlwinm	r5,r5,0,0,25
1346
1347	tlbwe
1348
1349/* 7. Jump to KERNELBASE mapping
1350 *
1351 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
1352 */
1353	/* Now we branch the new virtual address mapped by this entry */
1354	bl	1f		/* Find our address */
13551:	mflr	r6
1356	addi	r6,r6,(2f - 1b)
1357	tovirt(r6,r6)
1358	lis	r7,MSR_KERNEL@h
1359	ori	r7,r7,MSR_KERNEL@l
1360	mtspr	SPRN_SRR0,r6
1361	mtspr	SPRN_SRR1,r7
1362	rfi				/* start execution out of TLB1[0] entry */
13632:
1364
1365/* 8. Clear out the temp mapping
1366 *
1367 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
1368 */
1369	mtspr	SPRN_MAS0,r4
1370	tlbre
1371	mfspr	r5,SPRN_MAS1
1372	rlwinm	r5,r5,0,2,31	/* clear IPROT and VALID */
1373	mtspr	SPRN_MAS1,r5
1374	tlbwe
1375	sync
1376	isync
1377
1378	/* We translate LR and return */
1379	tovirt(r8,r8)
1380	mtlr	r8
1381	blr
1382
1383have_hes:
1384	/* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
1385	 * kernel linear mapping. We also set MAS8 once for all here though
1386	 * that will have to be made dependent on whether we are running under
1387	 * a hypervisor I suppose.
1388	 */
1389
1390	/* BEWARE, MAGIC
1391	 * This code is called as an ordinary function on the boot CPU. But to
1392	 * avoid duplication, this code is also used in SCOM bringup of
1393	 * secondary CPUs. We read the code between the initial_tlb_code_start
1394	 * and initial_tlb_code_end labels one instruction at a time and RAM it
1395	 * into the new core via SCOM. That doesn't process branches, so there
1396	 * must be none between those two labels. It also means if this code
1397	 * ever takes any parameters, the SCOM code must also be updated to
1398	 * provide them.
1399	 */
1400	.globl a2_tlbinit_code_start
1401a2_tlbinit_code_start:
1402
1403	ori	r11,r3,MAS0_WQ_ALLWAYS
1404	oris	r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */
1405	mtspr	SPRN_MAS0,r11
1406	lis	r3,(MAS1_VALID | MAS1_IPROT)@h
1407	ori	r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT
1408	mtspr	SPRN_MAS1,r3
1409	LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET | MAS2_M)
1410	mtspr	SPRN_MAS2,r3
1411	li	r3,MAS3_SR | MAS3_SW | MAS3_SX
1412	mtspr	SPRN_MAS7_MAS3,r3
1413	li	r3,0
1414	mtspr	SPRN_MAS8,r3
1415
1416	/* Write the TLB entry */
1417	tlbwe
1418
1419	.globl a2_tlbinit_after_linear_map
1420a2_tlbinit_after_linear_map:
1421
1422	/* Now we branch the new virtual address mapped by this entry */
1423	LOAD_REG_IMMEDIATE(r3,1f)
1424	mtctr	r3
1425	bctr
1426
14271:	/* We are now running at PAGE_OFFSET, clean the TLB of everything
1428	 * else (including IPROTed things left by firmware)
1429	 * r4 = TLBnCFG
1430	 * r3 = current address (more or less)
1431	 */
1432
1433	li	r5,0
1434	mtspr	SPRN_MAS6,r5
1435	tlbsx	0,r3
1436
1437	rlwinm	r9,r4,0,TLBnCFG_N_ENTRY
1438	rlwinm	r10,r4,8,0xff
1439	addi	r10,r10,-1	/* Get inner loop mask */
1440
1441	li	r3,1
1442
1443	mfspr	r5,SPRN_MAS1
1444	rlwinm	r5,r5,0,(~(MAS1_VALID|MAS1_IPROT))
1445
1446	mfspr	r6,SPRN_MAS2
1447	rldicr	r6,r6,0,51		/* Extract EPN */
1448
1449	mfspr	r7,SPRN_MAS0
1450	rlwinm	r7,r7,0,0xffff0fff	/* Clear HES and WQ */
1451
1452	rlwinm	r8,r7,16,0xfff		/* Extract ESEL */
1453
14542:	add	r4,r3,r8
1455	and	r4,r4,r10
1456
1457	rlwimi	r7,r4,16,MAS0_ESEL_MASK
1458
1459	mtspr	SPRN_MAS0,r7
1460	mtspr	SPRN_MAS1,r5
1461	mtspr	SPRN_MAS2,r6
1462	tlbwe
1463
1464	addi	r3,r3,1
1465	and.	r4,r3,r10
1466
1467	bne	3f
1468	addis	r6,r6,(1<<30)@h
14693:
1470	cmpw	r3,r9
1471	blt	2b
1472
1473	.globl  a2_tlbinit_after_iprot_flush
1474a2_tlbinit_after_iprot_flush:
1475
1476	PPC_TLBILX(0,0,R0)
1477	sync
1478	isync
1479
1480	.globl a2_tlbinit_code_end
1481a2_tlbinit_code_end:
1482
1483	/* We translate LR and return */
1484	mflr	r3
1485	tovirt(r3,r3)
1486	mtlr	r3
1487	blr
1488
1489/*
1490 * Main entry (boot CPU, thread 0)
1491 *
1492 * We enter here from head_64.S, possibly after the prom_init trampoline
1493 * with r3 and r4 already saved to r31 and 30 respectively and in 64 bits
1494 * mode. Anything else is as it was left by the bootloader
1495 *
1496 * Initial requirements of this port:
1497 *
1498 * - Kernel loaded at 0 physical
1499 * - A good lump of memory mapped 0:0 by UTLB entry 0
1500 * - MSR:IS & MSR:DS set to 0
1501 *
1502 * Note that some of the above requirements will be relaxed in the future
1503 * as the kernel becomes smarter at dealing with different initial conditions
1504 * but for now you have to be careful
1505 */
1506_GLOBAL(start_initialization_book3e)
1507	mflr	r28
1508
1509	/* First, we need to setup some initial TLBs to map the kernel
1510	 * text, data and bss at PAGE_OFFSET. We don't have a real mode
1511	 * and always use AS 0, so we just set it up to match our link
1512	 * address and never use 0 based addresses.
1513	 */
1514	bl	initial_tlb_book3e
1515
1516	/* Init global core bits */
1517	bl	init_core_book3e
1518
1519	/* Init per-thread bits */
1520	bl	init_thread_book3e
1521
1522	/* Return to common init code */
1523	tovirt(r28,r28)
1524	mtlr	r28
1525	blr
1526
1527
1528/*
1529 * Secondary core/processor entry
1530 *
1531 * This is entered for thread 0 of a secondary core, all other threads
1532 * are expected to be stopped. It's similar to start_initialization_book3e
1533 * except that it's generally entered from the holding loop in head_64.S
1534 * after CPUs have been gathered by Open Firmware.
1535 *
1536 * We assume we are in 32 bits mode running with whatever TLB entry was
1537 * set for us by the firmware or POR engine.
1538 */
1539_GLOBAL(book3e_secondary_core_init_tlb_set)
1540	li	r4,1
1541	b	generic_secondary_smp_init
1542
1543_GLOBAL(book3e_secondary_core_init)
1544	mflr	r28
1545
1546	/* Do we need to setup initial TLB entry ? */
1547	cmplwi	r4,0
1548	bne	2f
1549
1550	/* Setup TLB for this core */
1551	bl	initial_tlb_book3e
1552
1553	/* We can return from the above running at a different
1554	 * address, so recalculate r2 (TOC)
1555	 */
1556	bl	relative_toc
1557
1558	/* Init global core bits */
15592:	bl	init_core_book3e
1560
1561	/* Init per-thread bits */
15623:	bl	init_thread_book3e
1563
1564	/* Return to common init code at proper virtual address.
1565	 *
1566	 * Due to various previous assumptions, we know we entered this
1567	 * function at either the final PAGE_OFFSET mapping or using a
1568	 * 1:1 mapping at 0, so we don't bother doing a complicated check
1569	 * here, we just ensure the return address has the right top bits.
1570	 *
1571	 * Note that if we ever want to be smarter about where we can be
1572	 * started from, we have to be careful that by the time we reach
1573	 * the code below we may already be running at a different location
1574	 * than the one we were called from since initial_tlb_book3e can
1575	 * have moved us already.
1576	 */
1577	cmpdi	cr0,r28,0
1578	blt	1f
1579	lis	r3,PAGE_OFFSET@highest
1580	sldi	r3,r3,32
1581	or	r28,r28,r3
15821:	mtlr	r28
1583	blr
1584
1585_GLOBAL(book3e_secondary_thread_init)
1586	mflr	r28
1587	b	3b
1588
1589	.globl init_core_book3e
1590init_core_book3e:
1591	/* Establish the interrupt vector base */
1592	tovirt(r2,r2)
1593	LOAD_REG_ADDR(r3, interrupt_base_book3e)
1594	mtspr	SPRN_IVPR,r3
1595	sync
1596	blr
1597
1598init_thread_book3e:
1599	lis	r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h
1600	mtspr	SPRN_EPCR,r3
1601
1602	/* Make sure interrupts are off */
1603	wrteei	0
1604
1605	/* disable all timers and clear out status */
1606	li	r3,0
1607	mtspr	SPRN_TCR,r3
1608	mfspr	r3,SPRN_TSR
1609	mtspr	SPRN_TSR,r3
1610
1611	blr
1612
1613_GLOBAL(__setup_base_ivors)
1614	SET_IVOR(0, 0x020) /* Critical Input */
1615	SET_IVOR(1, 0x000) /* Machine Check */
1616	SET_IVOR(2, 0x060) /* Data Storage */ 
1617	SET_IVOR(3, 0x080) /* Instruction Storage */
1618	SET_IVOR(4, 0x0a0) /* External Input */ 
1619	SET_IVOR(5, 0x0c0) /* Alignment */ 
1620	SET_IVOR(6, 0x0e0) /* Program */ 
1621	SET_IVOR(7, 0x100) /* FP Unavailable */ 
1622	SET_IVOR(8, 0x120) /* System Call */ 
1623	SET_IVOR(9, 0x140) /* Auxiliary Processor Unavailable */ 
1624	SET_IVOR(10, 0x160) /* Decrementer */ 
1625	SET_IVOR(11, 0x180) /* Fixed Interval Timer */ 
1626	SET_IVOR(12, 0x1a0) /* Watchdog Timer */ 
1627	SET_IVOR(13, 0x1c0) /* Data TLB Error */ 
1628	SET_IVOR(14, 0x1e0) /* Instruction TLB Error */
1629	SET_IVOR(15, 0x040) /* Debug */
1630
1631	sync
1632
1633	blr
1634
1635_GLOBAL(setup_altivec_ivors)
1636	SET_IVOR(32, 0x200) /* AltiVec Unavailable */
1637	SET_IVOR(33, 0x220) /* AltiVec Assist */
1638	blr
1639
1640_GLOBAL(setup_perfmon_ivor)
1641	SET_IVOR(35, 0x260) /* Performance Monitor */
1642	blr
1643
1644_GLOBAL(setup_doorbell_ivors)
1645	SET_IVOR(36, 0x280) /* Processor Doorbell */
1646	SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
1647	blr
1648
1649_GLOBAL(setup_ehv_ivors)
1650	SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
1651	SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
1652	SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
1653	SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
1654	blr
1655
1656_GLOBAL(setup_lrat_ivor)
1657	SET_IVOR(42, 0x340) /* LRAT Error */
1658	blr