Linux Audio

Check our new training course

Loading...
v6.8
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Kernel execution entry point code.
   4 *
   5 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
   6 *      Initial PowerPC version.
   7 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
   8 *      Rewritten for PReP
   9 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  10 *      Low-level exception handers, MMU support, and rewrite.
  11 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  12 *      PowerPC 8xx modifications.
  13 *    Copyright (c) 1998-1999 TiVo, Inc.
  14 *      PowerPC 403GCX modifications.
  15 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  16 *      PowerPC 403GCX/405GP modifications.
  17 *    Copyright 2000 MontaVista Software Inc.
  18 *	PPC405 modifications
  19 *      PowerPC 403GCX/405GP modifications.
  20 * 	Author: MontaVista Software, Inc.
  21 *         	frank_rowand@mvista.com or source@mvista.com
  22 * 	   	debbie_chu@mvista.com
  23 *    Copyright 2002-2005 MontaVista Software, Inc.
  24 *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
  25 */
  26
  27#include <linux/init.h>
  28#include <linux/pgtable.h>
  29#include <asm/processor.h>
  30#include <asm/page.h>
  31#include <asm/mmu.h>
  32#include <asm/cputable.h>
  33#include <asm/thread_info.h>
  34#include <asm/ppc_asm.h>
  35#include <asm/asm-offsets.h>
  36#include <asm/ptrace.h>
  37#include <asm/synch.h>
 
  38#include <asm/code-patching-asm.h>
  39#include "head_booke.h"
  40
  41
  42/* As with the other PowerPC ports, it is expected that when code
  43 * execution begins here, the following registers contain valid, yet
  44 * optional, information:
  45 *
  46 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
  47 *   r4 - Starting address of the init RAM disk
  48 *   r5 - Ending address of the init RAM disk
  49 *   r6 - Start of kernel command line string (e.g. "mem=128")
  50 *   r7 - End of kernel command line string
  51 *
  52 */
  53	__HEAD
  54_GLOBAL(_stext);
  55_GLOBAL(_start);
  56	/*
  57	 * Reserve a word at a fixed location to store the address
  58	 * of abatron_pteptrs
  59	 */
  60	nop
  61	mr	r31,r3		/* save device tree ptr */
  62	li	r24,0		/* CPU number */
  63
  64#ifdef CONFIG_RELOCATABLE
  65/*
  66 * Relocate ourselves to the current runtime address.
  67 * This is called only by the Boot CPU.
  68 * "relocate" is called with our current runtime virutal
  69 * address.
  70 * r21 will be loaded with the physical runtime address of _stext
  71 */
  72	bcl	20,31,$+4			/* Get our runtime address */
  730:	mflr	r21				/* Make it accessible */
  74	addis	r21,r21,(_stext - 0b)@ha
  75	addi	r21,r21,(_stext - 0b)@l 	/* Get our current runtime base */
  76
  77	/*
  78	 * We have the runtime (virutal) address of our base.
  79	 * We calculate our shift of offset from a 256M page.
  80	 * We could map the 256M page we belong to at PAGE_OFFSET and
  81	 * get going from there.
  82	 */
  83	lis	r4,KERNELBASE@h
  84	ori	r4,r4,KERNELBASE@l
  85	rlwinm	r6,r21,0,4,31			/* r6 = PHYS_START % 256M */
  86	rlwinm	r5,r4,0,4,31			/* r5 = KERNELBASE % 256M */
  87	subf	r3,r5,r6			/* r3 = r6 - r5 */
  88	add	r3,r4,r3			/* Required Virutal Address */
  89
  90	bl	relocate
  91#endif
  92
  93	bl	init_cpu_state
  94
  95	/*
  96	 * This is where the main kernel code starts.
  97	 */
  98
  99	/* ptr to current */
 100	lis	r2,init_task@h
 101	ori	r2,r2,init_task@l
 102
 103	/* ptr to current thread */
 104	addi	r4,r2,THREAD	/* init task's THREAD */
 105	mtspr	SPRN_SPRG_THREAD,r4
 106
 107	/* stack */
 108	lis	r1,init_thread_union@h
 109	ori	r1,r1,init_thread_union@l
 110	li	r0,0
 111	stwu	r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
 112
 113	bl	early_init
 114
 115#ifdef CONFIG_RELOCATABLE
 116	/*
 117	 * Relocatable kernel support based on processing of dynamic
 118	 * relocation entries.
 119	 *
 120	 * r25 will contain RPN/ERPN for the start address of memory
 121	 * r21 will contain the current offset of _stext
 122	 */
 123	lis	r3,kernstart_addr@ha
 124	la	r3,kernstart_addr@l(r3)
 125
 126	/*
 127	 * Compute the kernstart_addr.
 128	 * kernstart_addr => (r6,r8)
 129	 * kernstart_addr & ~0xfffffff => (r6,r7)
 130	 */
 131	rlwinm	r6,r25,0,28,31	/* ERPN. Bits 32-35 of Address */
 132	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
 133	rlwinm	r8,r21,0,4,31	/* r8 = (_stext & 0xfffffff) */
 134	or	r8,r7,r8	/* Compute the lower 32bit of kernstart_addr */
 135
 136	/* Store kernstart_addr */
 137	stw	r6,0(r3)	/* higher 32bit */
 138	stw	r8,4(r3)	/* lower 32bit  */
 139
 140	/*
 141	 * Compute the virt_phys_offset :
 142	 * virt_phys_offset = stext.run - kernstart_addr
 143	 *
 144	 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
 145	 * When we relocate, we have :
 146	 *
 147	 *	(kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
 148	 *
 149	 * hence:
 150	 *  virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
 151	 *
 152	 */
 153
 154	/* KERNELBASE&~0xfffffff => (r4,r5) */
 155	li	r4, 0		/* higer 32bit */
 156	lis	r5,KERNELBASE@h
 157	rlwinm	r5,r5,0,0,3	/* Align to 256M, lower 32bit */
 158
 159	/*
 160	 * 64bit subtraction.
 161	 */
 162	subfc	r5,r7,r5
 163	subfe	r4,r6,r4
 164
 165	/* Store virt_phys_offset */
 166	lis	r3,virt_phys_offset@ha
 167	la	r3,virt_phys_offset@l(r3)
 168
 169	stw	r4,0(r3)
 170	stw	r5,4(r3)
 171
 172#elif defined(CONFIG_DYNAMIC_MEMSTART)
 173	/*
 174	 * Mapping based, page aligned dynamic kernel loading.
 175	 *
 176	 * r25 will contain RPN/ERPN for the start address of memory
 177	 *
 178	 * Add the difference between KERNELBASE and PAGE_OFFSET to the
 179	 * start of physical memory to get kernstart_addr.
 180	 */
 181	lis	r3,kernstart_addr@ha
 182	la	r3,kernstart_addr@l(r3)
 183
 184	lis	r4,KERNELBASE@h
 185	ori	r4,r4,KERNELBASE@l
 186	lis	r5,PAGE_OFFSET@h
 187	ori	r5,r5,PAGE_OFFSET@l
 188	subf	r4,r5,r4
 189
 190	rlwinm	r6,r25,0,28,31	/* ERPN */
 191	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
 192	add	r7,r7,r4
 193
 194	stw	r6,0(r3)
 195	stw	r7,4(r3)
 196#endif
 197
 198/*
 199 * Decide what sort of machine this is and initialize the MMU.
 200 */
 201#ifdef CONFIG_KASAN
 202	bl	kasan_early_init
 203#endif
 204	li	r3,0
 205	mr	r4,r31
 206	bl	machine_init
 207	bl	MMU_init
 208
 209	/* Setup PTE pointers for the Abatron bdiGDB */
 210	lis	r6, swapper_pg_dir@h
 211	ori	r6, r6, swapper_pg_dir@l
 212	lis	r5, abatron_pteptrs@h
 213	ori	r5, r5, abatron_pteptrs@l
 214	lis	r4, KERNELBASE@h
 215	ori	r4, r4, KERNELBASE@l
 216	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
 217	stw	r6, 0(r5)
 218
 219	/* Clear the Machine Check Syndrome Register */
 220	li	r0,0
 221	mtspr	SPRN_MCSR,r0
 222
 223	/* Let's move on */
 224	lis	r4,start_kernel@h
 225	ori	r4,r4,start_kernel@l
 226	lis	r3,MSR_KERNEL@h
 227	ori	r3,r3,MSR_KERNEL@l
 228	mtspr	SPRN_SRR0,r4
 229	mtspr	SPRN_SRR1,r3
 230	rfi			/* change context and jump to start_kernel */
 231
 232/*
 233 * Interrupt vector entry code
 234 *
 235 * The Book E MMUs are always on so we don't need to handle
 236 * interrupts in real mode as with previous PPC processors. In
 237 * this case we handle interrupts in the kernel virtual address
 238 * space.
 239 *
 240 * Interrupt vectors are dynamically placed relative to the
 241 * interrupt prefix as determined by the address of interrupt_base.
 242 * The interrupt vectors offsets are programmed using the labels
 243 * for each interrupt vector entry.
 244 *
 245 * Interrupt vectors must be aligned on a 16 byte boundary.
 246 * We align on a 32 byte cache line boundary for good measure.
 247 */
 248
 249interrupt_base:
 250	/* Critical Input Interrupt */
 251	CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
 252
 253	/* Machine Check Interrupt */
 254	CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
 255			   machine_check_exception)
 256	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
 257
 258	/* Data Storage Interrupt */
 259	DATA_STORAGE_EXCEPTION
 260
 261		/* Instruction Storage Interrupt */
 262	INSTRUCTION_STORAGE_EXCEPTION
 263
 264	/* External Input Interrupt */
 265	EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
 266
 267	/* Alignment Interrupt */
 268	ALIGNMENT_EXCEPTION
 269
 270	/* Program Interrupt */
 271	PROGRAM_EXCEPTION
 272
 273	/* Floating Point Unavailable Interrupt */
 274#ifdef CONFIG_PPC_FPU
 275	FP_UNAVAILABLE_EXCEPTION
 276#else
 277	EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
 278		  FloatingPointUnavailable, unknown_exception)
 279#endif
 280	/* System Call Interrupt */
 281	START_EXCEPTION(SystemCall)
 282	SYSCALL_ENTRY   0xc00 BOOKE_INTERRUPT_SYSCALL
 283
 284	/* Auxiliary Processor Unavailable Interrupt */
 285	EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
 286		  AuxillaryProcessorUnavailable, unknown_exception)
 287
 288	/* Decrementer Interrupt */
 289	DECREMENTER_EXCEPTION
 290
 291	/* Fixed Internal Timer Interrupt */
 292	/* TODO: Add FIT support */
 293	EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
 294
 295	/* Watchdog Timer Interrupt */
 296	/* TODO: Add watchdog support */
 297#ifdef CONFIG_BOOKE_WDT
 298	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
 299#else
 300	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
 301#endif
 302
 303	/* Data TLB Error Interrupt */
 304	START_EXCEPTION(DataTLBError44x)
 305	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
 306	mtspr	SPRN_SPRG_WSCRATCH1, r11
 307	mtspr	SPRN_SPRG_WSCRATCH2, r12
 308	mtspr	SPRN_SPRG_WSCRATCH3, r13
 309	mfcr	r11
 310	mtspr	SPRN_SPRG_WSCRATCH4, r11
 311	mfspr	r10, SPRN_DEAR		/* Get faulting address */
 312
 313	/* If we are faulting a kernel address, we have to use the
 314	 * kernel page tables.
 315	 */
 316	lis	r11, PAGE_OFFSET@h
 317	cmplw	cr7, r10, r11
 318	blt+	cr7, 3f
 319	lis	r11, swapper_pg_dir@h
 320	ori	r11, r11, swapper_pg_dir@l
 321
 322	mfspr	r12,SPRN_MMUCR
 323	rlwinm	r12,r12,0,0,23		/* Clear TID */
 324
 325	b	4f
 326
 327	/* Get the PGD for the current thread */
 3283:
 329	mfspr	r11,SPRN_SPRG_THREAD
 330	lwz	r11,PGDIR(r11)
 331
 332	/* Load PID into MMUCR TID */
 333	mfspr	r12,SPRN_MMUCR
 334	mfspr   r13,SPRN_PID		/* Get PID */
 335	rlwimi	r12,r13,0,24,31		/* Set TID */
 336#ifdef CONFIG_PPC_KUAP
 337	cmpwi	r13,0
 338	beq	2f			/* KUAP Fault */
 339#endif
 340
 3414:
 342	mtspr	SPRN_MMUCR,r12
 343
 344	/* Mask of required permission bits. Note that while we
 345	 * do copy ESR:ST to _PAGE_WRITE position as trying to write
 346	 * to an RO page is pretty common, we don't do it with
 347	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
 348	 * event so I'd rather take the overhead when it happens
 349	 * rather than adding an instruction here. We should measure
 350	 * whether the whole thing is worth it in the first place
 351	 * as we could avoid loading SPRN_ESR completely in the first
 352	 * place...
 353	 *
 354	 * TODO: Is it worth doing that mfspr & rlwimi in the first
 355	 *       place or can we save a couple of instructions here ?
 356	 */
 357	mfspr	r12,SPRN_ESR
 358	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
 359	rlwimi	r13,r12,10,30,30
 360
 361	/* Load the PTE */
 362	/* Compute pgdir/pmd offset */
 363	rlwinm  r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
 364	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
 365	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
 366	beq	2f			/* Bail if no table */
 367
 368	/* Compute pte address */
 369	rlwimi  r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
 370	lwz	r11, 0(r12)		/* Get high word of pte entry */
 371	lwz	r12, 4(r12)		/* Get low word of pte entry */
 372
 373	lis	r10,tlb_44x_index@ha
 374
 375	andc.	r13,r13,r12		/* Check permission */
 376
 377	/* Load the next available TLB index */
 378	lwz	r13,tlb_44x_index@l(r10)
 379
 380	bne	2f			/* Bail if permission mismatch */
 381
 382	/* Increment, rollover, and store TLB index */
 383	addi	r13,r13,1
 384
 385	patch_site 0f, patch__tlb_44x_hwater_D
 386	/* Compare with watermark (instruction gets patched) */
 3870:	cmpwi	0,r13,1			/* reserve entries */
 388	ble	5f
 389	li	r13,0
 3905:
 391	/* Store the next available TLB index */
 392	stw	r13,tlb_44x_index@l(r10)
 393
 394	/* Re-load the faulting address */
 395	mfspr	r10,SPRN_DEAR
 396
 397	 /* Jump to common tlb load */
 398	b	finish_tlb_load_44x
 399
 4002:
 401	/* The bailout.  Restore registers to pre-exception conditions
 402	 * and call the heavyweights to help us out.
 403	 */
 404	mfspr	r11, SPRN_SPRG_RSCRATCH4
 405	mtcr	r11
 406	mfspr	r13, SPRN_SPRG_RSCRATCH3
 407	mfspr	r12, SPRN_SPRG_RSCRATCH2
 408	mfspr	r11, SPRN_SPRG_RSCRATCH1
 409	mfspr	r10, SPRN_SPRG_RSCRATCH0
 410	b	DataStorage
 411
 412	/* Instruction TLB Error Interrupt */
 413	/*
 414	 * Nearly the same as above, except we get our
 415	 * information from different registers and bailout
 416	 * to a different point.
 417	 */
 418	START_EXCEPTION(InstructionTLBError44x)
 419	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
 420	mtspr	SPRN_SPRG_WSCRATCH1, r11
 421	mtspr	SPRN_SPRG_WSCRATCH2, r12
 422	mtspr	SPRN_SPRG_WSCRATCH3, r13
 423	mfcr	r11
 424	mtspr	SPRN_SPRG_WSCRATCH4, r11
 425	mfspr	r10, SPRN_SRR0		/* Get faulting address */
 426
 427	/* If we are faulting a kernel address, we have to use the
 428	 * kernel page tables.
 429	 */
 430	lis	r11, PAGE_OFFSET@h
 431	cmplw	cr7, r10, r11
 432	blt+	cr7, 3f
 433	lis	r11, swapper_pg_dir@h
 434	ori	r11, r11, swapper_pg_dir@l
 435
 436	mfspr	r12,SPRN_MMUCR
 437	rlwinm	r12,r12,0,0,23		/* Clear TID */
 438
 439	b	4f
 440
 441	/* Get the PGD for the current thread */
 4423:
 443	mfspr	r11,SPRN_SPRG_THREAD
 444	lwz	r11,PGDIR(r11)
 445
 446	/* Load PID into MMUCR TID */
 447	mfspr	r12,SPRN_MMUCR
 448	mfspr   r13,SPRN_PID		/* Get PID */
 449	rlwimi	r12,r13,0,24,31		/* Set TID */
 450#ifdef CONFIG_PPC_KUAP
 451	cmpwi	r13,0
 452	beq	2f			/* KUAP Fault */
 453#endif
 454
 4554:
 456	mtspr	SPRN_MMUCR,r12
 457
 458	/* Make up the required permissions */
 459	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 460
 461	/* Compute pgdir/pmd offset */
 462	rlwinm 	r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
 463	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
 464	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
 465	beq	2f			/* Bail if no table */
 466
 467	/* Compute pte address */
 468	rlwimi	r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
 469	lwz	r11, 0(r12)		/* Get high word of pte entry */
 470	lwz	r12, 4(r12)		/* Get low word of pte entry */
 471
 472	lis	r10,tlb_44x_index@ha
 473
 474	andc.	r13,r13,r12		/* Check permission */
 475
 476	/* Load the next available TLB index */
 477	lwz	r13,tlb_44x_index@l(r10)
 478
 479	bne	2f			/* Bail if permission mismatch */
 480
 481	/* Increment, rollover, and store TLB index */
 482	addi	r13,r13,1
 483
 484	patch_site 0f, patch__tlb_44x_hwater_I
 485	/* Compare with watermark (instruction gets patched) */
 4860:	cmpwi	0,r13,1			/* reserve entries */
 487	ble	5f
 488	li	r13,0
 4895:
 490	/* Store the next available TLB index */
 491	stw	r13,tlb_44x_index@l(r10)
 492
 493	/* Re-load the faulting address */
 494	mfspr	r10,SPRN_SRR0
 495
 496	/* Jump to common TLB load point */
 497	b	finish_tlb_load_44x
 498
 4992:
 500	/* The bailout.  Restore registers to pre-exception conditions
 501	 * and call the heavyweights to help us out.
 502	 */
 503	mfspr	r11, SPRN_SPRG_RSCRATCH4
 504	mtcr	r11
 505	mfspr	r13, SPRN_SPRG_RSCRATCH3
 506	mfspr	r12, SPRN_SPRG_RSCRATCH2
 507	mfspr	r11, SPRN_SPRG_RSCRATCH1
 508	mfspr	r10, SPRN_SPRG_RSCRATCH0
 509	b	InstructionStorage
 510
 511/*
 512 * Both the instruction and data TLB miss get to this
 513 * point to load the TLB.
 514 * 	r10 - EA of fault
 515 * 	r11 - PTE high word value
 516 *	r12 - PTE low word value
 517 *	r13 - TLB index
 518 *	cr7 - Result of comparison with PAGE_OFFSET
 519 *	MMUCR - loaded with proper value when we get here
 520 *	Upon exit, we reload everything and RFI.
 521 */
 522finish_tlb_load_44x:
 523	/* Combine RPN & ERPN an write WS 0 */
 524	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 525	tlbwe	r11,r13,PPC44x_TLB_XLAT
 526
 527	/*
 528	 * Create WS1. This is the faulting address (EPN),
 529	 * page size, and valid flag.
 530	 */
 531	li	r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
 532	/* Insert valid and page size */
 533	rlwimi	r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
 534	tlbwe	r10,r13,PPC44x_TLB_PAGEID	/* Write PAGEID */
 535
 536	/* And WS 2 */
 537	li	r10,0xf84			/* Mask to apply from PTE */
 538	rlwimi	r10,r12,29,30,31		/* DIRTY,READ -> SW,SR position */
 539	and	r11,r12,r10			/* Mask PTE bits to keep */
 540	bge	cr7,1f			/* User page ? no, leave U bits empty */
 
 541	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
 542	rlwinm	r11,r11,0,~PPC44x_TLB_SX	/* Clear SX if User page */
 5431:	tlbwe	r11,r13,PPC44x_TLB_ATTRIB	/* Write ATTRIB */
 544
 545	/* Done...restore registers and get out of here.
 546	*/
 547	mfspr	r11, SPRN_SPRG_RSCRATCH4
 548	mtcr	r11
 549	mfspr	r13, SPRN_SPRG_RSCRATCH3
 550	mfspr	r12, SPRN_SPRG_RSCRATCH2
 551	mfspr	r11, SPRN_SPRG_RSCRATCH1
 552	mfspr	r10, SPRN_SPRG_RSCRATCH0
 553	rfi					/* Force context change */
 554
 555/* TLB error interrupts for 476
 556 */
 557#ifdef CONFIG_PPC_47x
 558	START_EXCEPTION(DataTLBError47x)
 559	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
 560	mtspr	SPRN_SPRG_WSCRATCH1,r11
 561	mtspr	SPRN_SPRG_WSCRATCH2,r12
 562	mtspr	SPRN_SPRG_WSCRATCH3,r13
 563	mfcr	r11
 564	mtspr	SPRN_SPRG_WSCRATCH4,r11
 565	mfspr	r10,SPRN_DEAR		/* Get faulting address */
 566
 567	/* If we are faulting a kernel address, we have to use the
 568	 * kernel page tables.
 569	 */
 570	lis	r11,PAGE_OFFSET@h
 571	cmplw	cr7,r10,r11
 572	blt+	cr7,3f
 573	lis	r11,swapper_pg_dir@h
 574	ori	r11,r11, swapper_pg_dir@l
 575	li	r12,0			/* MMUCR = 0 */
 576	b	4f
 577
 578	/* Get the PGD for the current thread and setup MMUCR */
 5793:	mfspr	r11,SPRN_SPRG3
 580	lwz	r11,PGDIR(r11)
 581	mfspr   r12,SPRN_PID		/* Get PID */
 582#ifdef CONFIG_PPC_KUAP
 583	cmpwi	r12,0
 584	beq	2f			/* KUAP Fault */
 585#endif
 5864:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
 587
 588	/* Mask of required permission bits. Note that while we
 589	 * do copy ESR:ST to _PAGE_WRITE position as trying to write
 590	 * to an RO page is pretty common, we don't do it with
 591	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
 592	 * event so I'd rather take the overhead when it happens
 593	 * rather than adding an instruction here. We should measure
 594	 * whether the whole thing is worth it in the first place
 595	 * as we could avoid loading SPRN_ESR completely in the first
 596	 * place...
 597	 *
 598	 * TODO: Is it worth doing that mfspr & rlwimi in the first
 599	 *       place or can we save a couple of instructions here ?
 600	 */
 601	mfspr	r12,SPRN_ESR
 602	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ
 603	rlwimi	r13,r12,10,30,30
 604
 605	/* Load the PTE */
 606	/* Compute pgdir/pmd offset */
 607	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
 608	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
 609
 610	/* Word 0 is EPN,V,TS,DSIZ */
 611	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
 612	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
 613	li	r12,0
 614	tlbwe	r10,r12,0
 615
 616	/* XXX can we do better ? Need to make sure tlbwe has established
 617	 * latch V bit in MMUCR0 before the PTE is loaded further down */
 618#ifdef CONFIG_SMP
 619	isync
 620#endif
 621
 622	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
 623	/* Compute pte address */
 624	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
 625	beq	2f			/* Bail if no table */
 626	lwz	r11,0(r12)		/* Get high word of pte entry */
 627
 628	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
 629	 * bottom of r12 to create a data dependency... We can also use r10
 630	 * as destination nowadays
 631	 */
 632#ifdef CONFIG_SMP
 633	lwsync
 634#endif
 635	lwz	r12,4(r12)		/* Get low word of pte entry */
 636
 637	andc.	r13,r13,r12		/* Check permission */
 638
 639	 /* Jump to common tlb load */
 640	beq	finish_tlb_load_47x
 641
 6422:	/* The bailout.  Restore registers to pre-exception conditions
 643	 * and call the heavyweights to help us out.
 644	 */
 645	mfspr	r11,SPRN_SPRG_RSCRATCH4
 646	mtcr	r11
 647	mfspr	r13,SPRN_SPRG_RSCRATCH3
 648	mfspr	r12,SPRN_SPRG_RSCRATCH2
 649	mfspr	r11,SPRN_SPRG_RSCRATCH1
 650	mfspr	r10,SPRN_SPRG_RSCRATCH0
 651	b	DataStorage
 652
 653	/* Instruction TLB Error Interrupt */
 654	/*
 655	 * Nearly the same as above, except we get our
 656	 * information from different registers and bailout
 657	 * to a different point.
 658	 */
 659	START_EXCEPTION(InstructionTLBError47x)
 660	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
 661	mtspr	SPRN_SPRG_WSCRATCH1,r11
 662	mtspr	SPRN_SPRG_WSCRATCH2,r12
 663	mtspr	SPRN_SPRG_WSCRATCH3,r13
 664	mfcr	r11
 665	mtspr	SPRN_SPRG_WSCRATCH4,r11
 666	mfspr	r10,SPRN_SRR0		/* Get faulting address */
 667
 668	/* If we are faulting a kernel address, we have to use the
 669	 * kernel page tables.
 670	 */
 671	lis	r11,PAGE_OFFSET@h
 672	cmplw	cr7,r10,r11
 673	blt+	cr7,3f
 674	lis	r11,swapper_pg_dir@h
 675	ori	r11,r11, swapper_pg_dir@l
 676	li	r12,0			/* MMUCR = 0 */
 677	b	4f
 678
 679	/* Get the PGD for the current thread and setup MMUCR */
 6803:	mfspr	r11,SPRN_SPRG_THREAD
 681	lwz	r11,PGDIR(r11)
 682	mfspr   r12,SPRN_PID		/* Get PID */
 683#ifdef CONFIG_PPC_KUAP
 684	cmpwi	r12,0
 685	beq	2f			/* KUAP Fault */
 686#endif
 6874:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
 688
 689	/* Make up the required permissions */
 690	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 691
 692	/* Load PTE */
 693	/* Compute pgdir/pmd offset */
 694	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
 695	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
 696
 697	/* Word 0 is EPN,V,TS,DSIZ */
 698	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
 699	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
 700	li	r12,0
 701	tlbwe	r10,r12,0
 702
 703	/* XXX can we do better ? Need to make sure tlbwe has established
 704	 * latch V bit in MMUCR0 before the PTE is loaded further down */
 705#ifdef CONFIG_SMP
 706	isync
 707#endif
 708
 709	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
 710	/* Compute pte address */
 711	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
 712	beq	2f			/* Bail if no table */
 713
 714	lwz	r11,0(r12)		/* Get high word of pte entry */
 715	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
 716	 * bottom of r12 to create a data dependency... We can also use r10
 717	 * as destination nowadays
 718	 */
 719#ifdef CONFIG_SMP
 720	lwsync
 721#endif
 722	lwz	r12,4(r12)		/* Get low word of pte entry */
 723
 724	andc.	r13,r13,r12		/* Check permission */
 725
 726	/* Jump to common TLB load point */
 727	beq	finish_tlb_load_47x
 728
 7292:	/* The bailout.  Restore registers to pre-exception conditions
 730	 * and call the heavyweights to help us out.
 731	 */
 732	mfspr	r11, SPRN_SPRG_RSCRATCH4
 733	mtcr	r11
 734	mfspr	r13, SPRN_SPRG_RSCRATCH3
 735	mfspr	r12, SPRN_SPRG_RSCRATCH2
 736	mfspr	r11, SPRN_SPRG_RSCRATCH1
 737	mfspr	r10, SPRN_SPRG_RSCRATCH0
 738	b	InstructionStorage
 739
 740/*
 741 * Both the instruction and data TLB miss get to this
 742 * point to load the TLB.
 743 * 	r10 - free to use
 744 * 	r11 - PTE high word value
 745 *	r12 - PTE low word value
 746 *      r13 - free to use
 747 *	cr7 - Result of comparison with PAGE_OFFSET
 748 *	MMUCR - loaded with proper value when we get here
 749 *	Upon exit, we reload everything and RFI.
 750 */
 751finish_tlb_load_47x:
 752	/* Combine RPN & ERPN an write WS 1 */
 753	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 754	tlbwe	r11,r13,1
 755
 756	/* And make up word 2 */
 757	li	r10,0xf84			/* Mask to apply from PTE */
 758	rlwimi	r10,r12,29,30,31		/* DIRTY,READ -> SW,SR position */
 759	and	r11,r12,r10			/* Mask PTE bits to keep */
 760	bge	cr7,1f			/* User page ? no, leave U bits empty */
 
 761	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
 762	rlwinm	r11,r11,0,~PPC47x_TLB2_SX	/* Clear SX if User page */
 7631:	tlbwe	r11,r13,2
 764
 765	/* Done...restore registers and get out of here.
 766	*/
 767	mfspr	r11, SPRN_SPRG_RSCRATCH4
 768	mtcr	r11
 769	mfspr	r13, SPRN_SPRG_RSCRATCH3
 770	mfspr	r12, SPRN_SPRG_RSCRATCH2
 771	mfspr	r11, SPRN_SPRG_RSCRATCH1
 772	mfspr	r10, SPRN_SPRG_RSCRATCH0
 773	rfi
 774
 775#endif /* CONFIG_PPC_47x */
 776
 777	/* Debug Interrupt */
 778	/*
 779	 * This statement needs to exist at the end of the IVPR
 780	 * definition just in case you end up taking a debug
 781	 * exception within another exception.
 782	 */
 783	DEBUG_CRIT_EXCEPTION
 784
 785interrupt_end:
 786
 787/*
 788 * Global functions
 789 */
 790
 791/*
 792 * Adjust the machine check IVOR on 440A cores
 793 */
 794_GLOBAL(__fixup_440A_mcheck)
 795	li	r3,MachineCheckA@l
 796	mtspr	SPRN_IVOR1,r3
 797	sync
 798	blr
 799
 800/*
 801 * Init CPU state. This is called at boot time or for secondary CPUs
 802 * to setup initial TLB entries, setup IVORs, etc...
 803 *
 804 */
 805_GLOBAL(init_cpu_state)
 806	mflr	r22
 807#ifdef CONFIG_PPC_47x
 808	/* We use the PVR to differentiate 44x cores from 476 */
 809	mfspr	r3,SPRN_PVR
 810	srwi	r3,r3,16
 811	cmplwi	cr0,r3,PVR_476FPE@h
 812	beq	head_start_47x
 813	cmplwi	cr0,r3,PVR_476@h
 814	beq	head_start_47x
 815	cmplwi	cr0,r3,PVR_476_ISS@h
 816	beq	head_start_47x
 817#endif /* CONFIG_PPC_47x */
 818
 819/*
 820 * In case the firmware didn't do it, we apply some workarounds
 821 * that are good for all 440 core variants here
 822 */
 823	mfspr	r3,SPRN_CCR0
 824	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
 825	isync
 826	mtspr	SPRN_CCR0,r3
 827	isync
 828	sync
 829
 830/*
 831 * Set up the initial MMU state for 44x
 832 *
 833 * We are still executing code at the virtual address
 834 * mappings set by the firmware for the base of RAM.
 835 *
 836 * We first invalidate all TLB entries but the one
 837 * we are running from.  We then load the KERNELBASE
 838 * mappings so we can begin to use kernel addresses
 839 * natively and so the interrupt vector locations are
 840 * permanently pinned (necessary since Book E
 841 * implementations always have translation enabled).
 842 *
 843 * TODO: Use the known TLB entry we are running from to
 844 *	 determine which physical region we are located
 845 *	 in.  This can be used to determine where in RAM
 846 *	 (on a shared CPU system) or PCI memory space
 847 *	 (on a DRAMless system) we are located.
 848 *       For now, we assume a perfect world which means
 849 *	 we are located at the base of DRAM (physical 0).
 850 */
 851
 852/*
 853 * Search TLB for entry that we are currently using.
 854 * Invalidate all entries but the one we are using.
 855 */
 856	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
 857	mfspr	r3,SPRN_PID			/* Get PID */
 858	mfmsr	r4				/* Get MSR */
 859	andi.	r4,r4,MSR_IS@l			/* TS=1? */
 860	beq	wmmucr				/* If not, leave STS=0 */
 861	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
 862wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
 863	sync
 864
 865	bcl	20,31,$+4			/* Find our address */
 866invstr:	mflr	r5				/* Make it accessible */
 867	tlbsx	r23,0,r5			/* Find entry we are in */
 868	li	r4,0				/* Start at TLB entry 0 */
 869	li	r3,0				/* Set PAGEID inval value */
 8701:	cmpw	r23,r4				/* Is this our entry? */
 871	beq	skpinv				/* If so, skip the inval */
 872	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
 873skpinv:	addi	r4,r4,1				/* Increment */
 874	cmpwi	r4,64				/* Are we done? */
 875	bne	1b				/* If not, repeat */
 876	isync					/* If so, context change */
 877
 878/*
 879 * Configure and load pinned entry into TLB slot 63.
 880 */
 881#ifdef CONFIG_NONSTATIC_KERNEL
 882	/*
 883	 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
 884	 * entries of the initial mapping set by the boot loader.
 885	 * The XLAT entry is stored in r25
 886	 */
 887
 888	/* Read the XLAT entry for our current mapping */
 889	tlbre	r25,r23,PPC44x_TLB_XLAT
 890
 891	lis	r3,KERNELBASE@h
 892	ori	r3,r3,KERNELBASE@l
 893
 894	/* Use our current RPN entry */
 895	mr	r4,r25
 896#else
 897
 898	lis	r3,PAGE_OFFSET@h
 899	ori	r3,r3,PAGE_OFFSET@l
 900
 901	/* Kernel is at the base of RAM */
 902	li r4, 0			/* Load the kernel physical address */
 903#endif
 904
 905	/* Load the kernel PID = 0 */
 906	li	r0,0
 907	mtspr	SPRN_PID,r0
 908	sync
 909
 910	/* Initialize MMUCR */
 911	li	r5,0
 912	mtspr	SPRN_MMUCR,r5
 913	sync
 914
 915	/* pageid fields */
 916	clrrwi	r3,r3,10		/* Mask off the effective page number */
 917	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
 918
 919	/* xlat fields */
 920	clrrwi	r4,r4,10		/* Mask off the real page number */
 921					/* ERPN is 0 for first 4GB page */
 922
 923	/* attrib fields */
 924	/* Added guarded bit to protect against speculative loads/stores */
 925	li	r5,0
 926	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
 927
 928        li      r0,63                    /* TLB slot 63 */
 929
 930	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
 931	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
 932	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
 933
 934	/* Force context change */
 935	mfmsr	r0
 936	mtspr	SPRN_SRR1, r0
 937	lis	r0,3f@h
 938	ori	r0,r0,3f@l
 939	mtspr	SPRN_SRR0,r0
 940	sync
 941	rfi
 942
 943	/* If necessary, invalidate original entry we used */
 9443:	cmpwi	r23,63
 945	beq	4f
 946	li	r6,0
 947	tlbwe   r6,r23,PPC44x_TLB_PAGEID
 948	isync
 949
 9504:
 951#ifdef CONFIG_PPC_EARLY_DEBUG_44x
 952	/* Add UART mapping for early debug. */
 953
 954	/* pageid fields */
 955	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
 956	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
 957
 958	/* xlat fields */
 959	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
 960	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
 961
 962	/* attrib fields */
 963	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
 964        li      r0,62                    /* TLB slot 0 */
 965
 966	tlbwe	r3,r0,PPC44x_TLB_PAGEID
 967	tlbwe	r4,r0,PPC44x_TLB_XLAT
 968	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
 969
 970	/* Force context change */
 971	isync
 972#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
 973
 974	/* Establish the interrupt vector offsets */
 975	SET_IVOR(0,  CriticalInput);
 976	SET_IVOR(1,  MachineCheck);
 977	SET_IVOR(2,  DataStorage);
 978	SET_IVOR(3,  InstructionStorage);
 979	SET_IVOR(4,  ExternalInput);
 980	SET_IVOR(5,  Alignment);
 981	SET_IVOR(6,  Program);
 982	SET_IVOR(7,  FloatingPointUnavailable);
 983	SET_IVOR(8,  SystemCall);
 984	SET_IVOR(9,  AuxillaryProcessorUnavailable);
 985	SET_IVOR(10, Decrementer);
 986	SET_IVOR(11, FixedIntervalTimer);
 987	SET_IVOR(12, WatchdogTimer);
 988	SET_IVOR(13, DataTLBError44x);
 989	SET_IVOR(14, InstructionTLBError44x);
 990	SET_IVOR(15, DebugCrit);
 991
 992	b	head_start_common
 993
 994
 995#ifdef CONFIG_PPC_47x
 996
 997#ifdef CONFIG_SMP
 998
 999/* Entry point for secondary 47x processors */
1000_GLOBAL(start_secondary_47x)
1001        mr      r24,r3          /* CPU number */
1002
1003	bl	init_cpu_state
1004
1005	/* Now we need to bolt the rest of kernel memory which
1006	 * is done in C code. We must be careful because our task
1007	 * struct or our stack can (and will probably) be out
1008	 * of reach of the initial 256M TLB entry, so we use a
1009	 * small temporary stack in .bss for that. This works
1010	 * because only one CPU at a time can be in this code
1011	 */
1012	lis	r1,temp_boot_stack@h
1013	ori	r1,r1,temp_boot_stack@l
1014	addi	r1,r1,1024-STACK_FRAME_MIN_SIZE
1015	li	r0,0
1016	stw	r0,0(r1)
1017	bl	mmu_init_secondary
1018
1019	/* Now we can get our task struct and real stack pointer */
1020
1021	/* Get current's stack and current */
1022	lis	r2,secondary_current@ha
1023	lwz	r2,secondary_current@l(r2)
1024	lwz	r1,TASK_STACK(r2)
1025
1026	/* Current stack pointer */
1027	addi	r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
1028	li	r0,0
1029	stw	r0,0(r1)
1030
1031	/* Kernel stack for exception entry in SPRG3 */
1032	addi	r4,r2,THREAD	/* init task's THREAD */
1033	mtspr	SPRN_SPRG3,r4
1034
1035	b	start_secondary
1036
1037#endif /* CONFIG_SMP */
1038
1039/*
1040 * Set up the initial MMU state for 44x
1041 *
1042 * We are still executing code at the virtual address
1043 * mappings set by the firmware for the base of RAM.
1044 */
1045
1046head_start_47x:
1047	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
1048	mfspr	r3,SPRN_PID			/* Get PID */
1049	mfmsr	r4				/* Get MSR */
1050	andi.	r4,r4,MSR_IS@l			/* TS=1? */
1051	beq	1f				/* If not, leave STS=0 */
1052	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
10531:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
1054	sync
1055
1056	/* Find the entry we are running from */
1057	bcl	20,31,$+4
10581:	mflr	r23
1059	tlbsx	r23,0,r23
1060	tlbre	r24,r23,0
1061	tlbre	r25,r23,1
1062	tlbre	r26,r23,2
1063
1064/*
1065 * Cleanup time
1066 */
1067
1068	/* Initialize MMUCR */
1069	li	r5,0
1070	mtspr	SPRN_MMUCR,r5
1071	sync
1072
1073clear_all_utlb_entries:
1074
1075	#; Set initial values.
1076
1077	addis		r3,0,0x8000
1078	addi		r4,0,0
1079	addi		r5,0,0
1080	b		clear_utlb_entry
1081
1082	#; Align the loop to speed things up.
1083
1084	.align		6
1085
1086clear_utlb_entry:
1087
1088	tlbwe		r4,r3,0
1089	tlbwe		r5,r3,1
1090	tlbwe		r5,r3,2
1091	addis		r3,r3,0x2000
1092	cmpwi		r3,0
1093	bne		clear_utlb_entry
1094	addis		r3,0,0x8000
1095	addis		r4,r4,0x100
1096	cmpwi		r4,0
1097	bne		clear_utlb_entry
1098
1099	#; Restore original entry.
1100
1101	oris	r23,r23,0x8000  /* specify the way */
1102	tlbwe		r24,r23,0
1103	tlbwe		r25,r23,1
1104	tlbwe		r26,r23,2
1105
1106/*
1107 * Configure and load pinned entry into TLB for the kernel core
1108 */
1109
1110	lis	r3,PAGE_OFFSET@h
1111	ori	r3,r3,PAGE_OFFSET@l
1112
1113	/* Load the kernel PID = 0 */
1114	li	r0,0
1115	mtspr	SPRN_PID,r0
1116	sync
1117
1118	/* Word 0 */
1119	clrrwi	r3,r3,12		/* Mask off the effective page number */
1120	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1121
1122	/* Word 1 - use r25.  RPN is the same as the original entry */
1123
1124	/* Word 2 */
1125	li	r5,0
1126	ori	r5,r5,PPC47x_TLB2_S_RWX
1127#ifdef CONFIG_SMP
1128	ori	r5,r5,PPC47x_TLB2_M
1129#endif
1130
1131	/* We write to way 0 and bolted 0 */
1132	lis	r0,0x8800
1133	tlbwe	r3,r0,0
1134	tlbwe	r25,r0,1
1135	tlbwe	r5,r0,2
1136
1137/*
1138 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1139 * them up later
1140 */
1141	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1142	mtspr	SPRN_SSPCR,r3
1143	mtspr	SPRN_USPCR,r3
1144	LOAD_REG_IMMEDIATE(r3, 0x12345670)
1145	mtspr	SPRN_ISPCR,r3
1146
1147	/* Force context change */
1148	mfmsr	r0
1149	mtspr	SPRN_SRR1, r0
1150	lis	r0,3f@h
1151	ori	r0,r0,3f@l
1152	mtspr	SPRN_SRR0,r0
1153	sync
1154	rfi
1155
1156	/* Invalidate original entry we used */
11573:
1158	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
1159	tlbwe	r24,r23,0
1160	addi	r24,0,0
1161	tlbwe	r24,r23,1
1162	tlbwe	r24,r23,2
1163	isync                   /* Clear out the shadow TLB entries */
1164
1165#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1166	/* Add UART mapping for early debug. */
1167
1168	/* Word 0 */
1169	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1170	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1171
1172	/* Word 1 */
1173	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1174	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1175
1176	/* Word 2 */
1177	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1178
1179	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1180	 * congruence class as the kernel, we need to make sure of it at
1181	 * some point
1182	 */
1183        lis	r0,0x8d00
1184	tlbwe	r3,r0,0
1185	tlbwe	r4,r0,1
1186	tlbwe	r5,r0,2
1187
1188	/* Force context change */
1189	isync
1190#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1191
1192	/* Establish the interrupt vector offsets */
1193	SET_IVOR(0,  CriticalInput);
1194	SET_IVOR(1,  MachineCheckA);
1195	SET_IVOR(2,  DataStorage);
1196	SET_IVOR(3,  InstructionStorage);
1197	SET_IVOR(4,  ExternalInput);
1198	SET_IVOR(5,  Alignment);
1199	SET_IVOR(6,  Program);
1200	SET_IVOR(7,  FloatingPointUnavailable);
1201	SET_IVOR(8,  SystemCall);
1202	SET_IVOR(9,  AuxillaryProcessorUnavailable);
1203	SET_IVOR(10, Decrementer);
1204	SET_IVOR(11, FixedIntervalTimer);
1205	SET_IVOR(12, WatchdogTimer);
1206	SET_IVOR(13, DataTLBError47x);
1207	SET_IVOR(14, InstructionTLBError47x);
1208	SET_IVOR(15, DebugCrit);
1209
1210	/* We configure icbi to invalidate 128 bytes at a time since the
1211	 * current 32-bit kernel code isn't too happy with icache != dcache
1212	 * block size. We also disable the BTAC as this can cause errors
1213	 * in some circumstances (see IBM Erratum 47).
1214	 */
1215	mfspr	r3,SPRN_CCR0
1216	oris	r3,r3,0x0020
1217	ori	r3,r3,0x0040
1218	mtspr	SPRN_CCR0,r3
1219	isync
1220
1221#endif /* CONFIG_PPC_47x */
1222
1223/*
1224 * Here we are back to code that is common between 44x and 47x
1225 *
1226 * We proceed to further kernel initialization and return to the
1227 * main kernel entry
1228 */
1229head_start_common:
1230	/* Establish the interrupt vector base */
1231	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
1232	mtspr	SPRN_IVPR,r4
1233
1234	/*
1235	 * If the kernel was loaded at a non-zero 256 MB page, we need to
1236	 * mask off the most significant 4 bits to get the relative address
1237	 * from the start of physical memory
1238	 */
1239	rlwinm	r22,r22,0,4,31
1240	addis	r22,r22,PAGE_OFFSET@h
1241	mtlr	r22
1242	isync
1243	blr
1244
1245#ifdef CONFIG_SMP
1246	.data
1247	.align	12
1248temp_boot_stack:
1249	.space	1024
1250#endif /* CONFIG_SMP */
v6.2
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * Kernel execution entry point code.
   4 *
   5 *    Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
   6 *      Initial PowerPC version.
   7 *    Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
   8 *      Rewritten for PReP
   9 *    Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
  10 *      Low-level exception handers, MMU support, and rewrite.
  11 *    Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
  12 *      PowerPC 8xx modifications.
  13 *    Copyright (c) 1998-1999 TiVo, Inc.
  14 *      PowerPC 403GCX modifications.
  15 *    Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
  16 *      PowerPC 403GCX/405GP modifications.
  17 *    Copyright 2000 MontaVista Software Inc.
  18 *	PPC405 modifications
  19 *      PowerPC 403GCX/405GP modifications.
  20 * 	Author: MontaVista Software, Inc.
  21 *         	frank_rowand@mvista.com or source@mvista.com
  22 * 	   	debbie_chu@mvista.com
  23 *    Copyright 2002-2005 MontaVista Software, Inc.
  24 *      PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org>
  25 */
  26
  27#include <linux/init.h>
  28#include <linux/pgtable.h>
  29#include <asm/processor.h>
  30#include <asm/page.h>
  31#include <asm/mmu.h>
  32#include <asm/cputable.h>
  33#include <asm/thread_info.h>
  34#include <asm/ppc_asm.h>
  35#include <asm/asm-offsets.h>
  36#include <asm/ptrace.h>
  37#include <asm/synch.h>
  38#include <asm/export.h>
  39#include <asm/code-patching-asm.h>
  40#include "head_booke.h"
  41
  42
  43/* As with the other PowerPC ports, it is expected that when code
  44 * execution begins here, the following registers contain valid, yet
  45 * optional, information:
  46 *
  47 *   r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.)
  48 *   r4 - Starting address of the init RAM disk
  49 *   r5 - Ending address of the init RAM disk
  50 *   r6 - Start of kernel command line string (e.g. "mem=128")
  51 *   r7 - End of kernel command line string
  52 *
  53 */
  54	__HEAD
  55_GLOBAL(_stext);
  56_GLOBAL(_start);
  57	/*
  58	 * Reserve a word at a fixed location to store the address
  59	 * of abatron_pteptrs
  60	 */
  61	nop
  62	mr	r31,r3		/* save device tree ptr */
  63	li	r24,0		/* CPU number */
  64
  65#ifdef CONFIG_RELOCATABLE
  66/*
  67 * Relocate ourselves to the current runtime address.
  68 * This is called only by the Boot CPU.
  69 * "relocate" is called with our current runtime virutal
  70 * address.
  71 * r21 will be loaded with the physical runtime address of _stext
  72 */
  73	bcl	20,31,$+4			/* Get our runtime address */
  740:	mflr	r21				/* Make it accessible */
  75	addis	r21,r21,(_stext - 0b)@ha
  76	addi	r21,r21,(_stext - 0b)@l 	/* Get our current runtime base */
  77
  78	/*
  79	 * We have the runtime (virutal) address of our base.
  80	 * We calculate our shift of offset from a 256M page.
  81	 * We could map the 256M page we belong to at PAGE_OFFSET and
  82	 * get going from there.
  83	 */
  84	lis	r4,KERNELBASE@h
  85	ori	r4,r4,KERNELBASE@l
  86	rlwinm	r6,r21,0,4,31			/* r6 = PHYS_START % 256M */
  87	rlwinm	r5,r4,0,4,31			/* r5 = KERNELBASE % 256M */
  88	subf	r3,r5,r6			/* r3 = r6 - r5 */
  89	add	r3,r4,r3			/* Required Virutal Address */
  90
  91	bl	relocate
  92#endif
  93
  94	bl	init_cpu_state
  95
  96	/*
  97	 * This is where the main kernel code starts.
  98	 */
  99
 100	/* ptr to current */
 101	lis	r2,init_task@h
 102	ori	r2,r2,init_task@l
 103
 104	/* ptr to current thread */
 105	addi	r4,r2,THREAD	/* init task's THREAD */
 106	mtspr	SPRN_SPRG_THREAD,r4
 107
 108	/* stack */
 109	lis	r1,init_thread_union@h
 110	ori	r1,r1,init_thread_union@l
 111	li	r0,0
 112	stwu	r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
 113
 114	bl	early_init
 115
 116#ifdef CONFIG_RELOCATABLE
 117	/*
 118	 * Relocatable kernel support based on processing of dynamic
 119	 * relocation entries.
 120	 *
 121	 * r25 will contain RPN/ERPN for the start address of memory
 122	 * r21 will contain the current offset of _stext
 123	 */
 124	lis	r3,kernstart_addr@ha
 125	la	r3,kernstart_addr@l(r3)
 126
 127	/*
 128	 * Compute the kernstart_addr.
 129	 * kernstart_addr => (r6,r8)
 130	 * kernstart_addr & ~0xfffffff => (r6,r7)
 131	 */
 132	rlwinm	r6,r25,0,28,31	/* ERPN. Bits 32-35 of Address */
 133	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
 134	rlwinm	r8,r21,0,4,31	/* r8 = (_stext & 0xfffffff) */
 135	or	r8,r7,r8	/* Compute the lower 32bit of kernstart_addr */
 136
 137	/* Store kernstart_addr */
 138	stw	r6,0(r3)	/* higher 32bit */
 139	stw	r8,4(r3)	/* lower 32bit  */
 140
 141	/*
 142	 * Compute the virt_phys_offset :
 143	 * virt_phys_offset = stext.run - kernstart_addr
 144	 *
 145	 * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff)
 146	 * When we relocate, we have :
 147	 *
 148	 *	(kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff)
 149	 *
 150	 * hence:
 151	 *  virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff)
 152	 *
 153	 */
 154
 155	/* KERNELBASE&~0xfffffff => (r4,r5) */
 156	li	r4, 0		/* higer 32bit */
 157	lis	r5,KERNELBASE@h
 158	rlwinm	r5,r5,0,0,3	/* Align to 256M, lower 32bit */
 159
 160	/*
 161	 * 64bit subtraction.
 162	 */
 163	subfc	r5,r7,r5
 164	subfe	r4,r6,r4
 165
 166	/* Store virt_phys_offset */
 167	lis	r3,virt_phys_offset@ha
 168	la	r3,virt_phys_offset@l(r3)
 169
 170	stw	r4,0(r3)
 171	stw	r5,4(r3)
 172
 173#elif defined(CONFIG_DYNAMIC_MEMSTART)
 174	/*
 175	 * Mapping based, page aligned dynamic kernel loading.
 176	 *
 177	 * r25 will contain RPN/ERPN for the start address of memory
 178	 *
 179	 * Add the difference between KERNELBASE and PAGE_OFFSET to the
 180	 * start of physical memory to get kernstart_addr.
 181	 */
 182	lis	r3,kernstart_addr@ha
 183	la	r3,kernstart_addr@l(r3)
 184
 185	lis	r4,KERNELBASE@h
 186	ori	r4,r4,KERNELBASE@l
 187	lis	r5,PAGE_OFFSET@h
 188	ori	r5,r5,PAGE_OFFSET@l
 189	subf	r4,r5,r4
 190
 191	rlwinm	r6,r25,0,28,31	/* ERPN */
 192	rlwinm	r7,r25,0,0,3	/* RPN - assuming 256 MB page size */
 193	add	r7,r7,r4
 194
 195	stw	r6,0(r3)
 196	stw	r7,4(r3)
 197#endif
 198
 199/*
 200 * Decide what sort of machine this is and initialize the MMU.
 201 */
 202#ifdef CONFIG_KASAN
 203	bl	kasan_early_init
 204#endif
 205	li	r3,0
 206	mr	r4,r31
 207	bl	machine_init
 208	bl	MMU_init
 209
 210	/* Setup PTE pointers for the Abatron bdiGDB */
 211	lis	r6, swapper_pg_dir@h
 212	ori	r6, r6, swapper_pg_dir@l
 213	lis	r5, abatron_pteptrs@h
 214	ori	r5, r5, abatron_pteptrs@l
 215	lis	r4, KERNELBASE@h
 216	ori	r4, r4, KERNELBASE@l
 217	stw	r5, 0(r4)	/* Save abatron_pteptrs at a fixed location */
 218	stw	r6, 0(r5)
 219
 220	/* Clear the Machine Check Syndrome Register */
 221	li	r0,0
 222	mtspr	SPRN_MCSR,r0
 223
 224	/* Let's move on */
 225	lis	r4,start_kernel@h
 226	ori	r4,r4,start_kernel@l
 227	lis	r3,MSR_KERNEL@h
 228	ori	r3,r3,MSR_KERNEL@l
 229	mtspr	SPRN_SRR0,r4
 230	mtspr	SPRN_SRR1,r3
 231	rfi			/* change context and jump to start_kernel */
 232
 233/*
 234 * Interrupt vector entry code
 235 *
 236 * The Book E MMUs are always on so we don't need to handle
 237 * interrupts in real mode as with previous PPC processors. In
 238 * this case we handle interrupts in the kernel virtual address
 239 * space.
 240 *
 241 * Interrupt vectors are dynamically placed relative to the
 242 * interrupt prefix as determined by the address of interrupt_base.
 243 * The interrupt vectors offsets are programmed using the labels
 244 * for each interrupt vector entry.
 245 *
 246 * Interrupt vectors must be aligned on a 16 byte boundary.
 247 * We align on a 32 byte cache line boundary for good measure.
 248 */
 249
 250interrupt_base:
 251	/* Critical Input Interrupt */
 252	CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
 253
 254	/* Machine Check Interrupt */
 255	CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
 256			   machine_check_exception)
 257	MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
 258
 259	/* Data Storage Interrupt */
 260	DATA_STORAGE_EXCEPTION
 261
 262		/* Instruction Storage Interrupt */
 263	INSTRUCTION_STORAGE_EXCEPTION
 264
 265	/* External Input Interrupt */
 266	EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ)
 267
 268	/* Alignment Interrupt */
 269	ALIGNMENT_EXCEPTION
 270
 271	/* Program Interrupt */
 272	PROGRAM_EXCEPTION
 273
 274	/* Floating Point Unavailable Interrupt */
 275#ifdef CONFIG_PPC_FPU
 276	FP_UNAVAILABLE_EXCEPTION
 277#else
 278	EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
 279		  FloatingPointUnavailable, unknown_exception)
 280#endif
 281	/* System Call Interrupt */
 282	START_EXCEPTION(SystemCall)
 283	SYSCALL_ENTRY   0xc00 BOOKE_INTERRUPT_SYSCALL
 284
 285	/* Auxiliary Processor Unavailable Interrupt */
 286	EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
 287		  AuxillaryProcessorUnavailable, unknown_exception)
 288
 289	/* Decrementer Interrupt */
 290	DECREMENTER_EXCEPTION
 291
 292	/* Fixed Internal Timer Interrupt */
 293	/* TODO: Add FIT support */
 294	EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception)
 295
 296	/* Watchdog Timer Interrupt */
 297	/* TODO: Add watchdog support */
 298#ifdef CONFIG_BOOKE_WDT
 299	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
 300#else
 301	CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
 302#endif
 303
 304	/* Data TLB Error Interrupt */
 305	START_EXCEPTION(DataTLBError44x)
 306	mtspr	SPRN_SPRG_WSCRATCH0, r10		/* Save some working registers */
 307	mtspr	SPRN_SPRG_WSCRATCH1, r11
 308	mtspr	SPRN_SPRG_WSCRATCH2, r12
 309	mtspr	SPRN_SPRG_WSCRATCH3, r13
 310	mfcr	r11
 311	mtspr	SPRN_SPRG_WSCRATCH4, r11
 312	mfspr	r10, SPRN_DEAR		/* Get faulting address */
 313
 314	/* If we are faulting a kernel address, we have to use the
 315	 * kernel page tables.
 316	 */
 317	lis	r11, PAGE_OFFSET@h
 318	cmplw	r10, r11
 319	blt+	3f
 320	lis	r11, swapper_pg_dir@h
 321	ori	r11, r11, swapper_pg_dir@l
 322
 323	mfspr	r12,SPRN_MMUCR
 324	rlwinm	r12,r12,0,0,23		/* Clear TID */
 325
 326	b	4f
 327
 328	/* Get the PGD for the current thread */
 3293:
 330	mfspr	r11,SPRN_SPRG_THREAD
 331	lwz	r11,PGDIR(r11)
 332
 333	/* Load PID into MMUCR TID */
 334	mfspr	r12,SPRN_MMUCR
 335	mfspr   r13,SPRN_PID		/* Get PID */
 336	rlwimi	r12,r13,0,24,31		/* Set TID */
 337#ifdef CONFIG_PPC_KUAP
 338	cmpwi	r13,0
 339	beq	2f			/* KUAP Fault */
 340#endif
 341
 3424:
 343	mtspr	SPRN_MMUCR,r12
 344
 345	/* Mask of required permission bits. Note that while we
 346	 * do copy ESR:ST to _PAGE_RW position as trying to write
 347	 * to an RO page is pretty common, we don't do it with
 348	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
 349	 * event so I'd rather take the overhead when it happens
 350	 * rather than adding an instruction here. We should measure
 351	 * whether the whole thing is worth it in the first place
 352	 * as we could avoid loading SPRN_ESR completely in the first
 353	 * place...
 354	 *
 355	 * TODO: Is it worth doing that mfspr & rlwimi in the first
 356	 *       place or can we save a couple of instructions here ?
 357	 */
 358	mfspr	r12,SPRN_ESR
 359	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
 360	rlwimi	r13,r12,10,30,30
 361
 362	/* Load the PTE */
 363	/* Compute pgdir/pmd offset */
 364	rlwinm  r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
 365	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
 366	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
 367	beq	2f			/* Bail if no table */
 368
 369	/* Compute pte address */
 370	rlwimi  r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
 371	lwz	r11, 0(r12)		/* Get high word of pte entry */
 372	lwz	r12, 4(r12)		/* Get low word of pte entry */
 373
 374	lis	r10,tlb_44x_index@ha
 375
 376	andc.	r13,r13,r12		/* Check permission */
 377
 378	/* Load the next available TLB index */
 379	lwz	r13,tlb_44x_index@l(r10)
 380
 381	bne	2f			/* Bail if permission mismatch */
 382
 383	/* Increment, rollover, and store TLB index */
 384	addi	r13,r13,1
 385
 386	patch_site 0f, patch__tlb_44x_hwater_D
 387	/* Compare with watermark (instruction gets patched) */
 3880:	cmpwi	0,r13,1			/* reserve entries */
 389	ble	5f
 390	li	r13,0
 3915:
 392	/* Store the next available TLB index */
 393	stw	r13,tlb_44x_index@l(r10)
 394
 395	/* Re-load the faulting address */
 396	mfspr	r10,SPRN_DEAR
 397
 398	 /* Jump to common tlb load */
 399	b	finish_tlb_load_44x
 400
 4012:
 402	/* The bailout.  Restore registers to pre-exception conditions
 403	 * and call the heavyweights to help us out.
 404	 */
 405	mfspr	r11, SPRN_SPRG_RSCRATCH4
 406	mtcr	r11
 407	mfspr	r13, SPRN_SPRG_RSCRATCH3
 408	mfspr	r12, SPRN_SPRG_RSCRATCH2
 409	mfspr	r11, SPRN_SPRG_RSCRATCH1
 410	mfspr	r10, SPRN_SPRG_RSCRATCH0
 411	b	DataStorage
 412
 413	/* Instruction TLB Error Interrupt */
 414	/*
 415	 * Nearly the same as above, except we get our
 416	 * information from different registers and bailout
 417	 * to a different point.
 418	 */
 419	START_EXCEPTION(InstructionTLBError44x)
 420	mtspr	SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
 421	mtspr	SPRN_SPRG_WSCRATCH1, r11
 422	mtspr	SPRN_SPRG_WSCRATCH2, r12
 423	mtspr	SPRN_SPRG_WSCRATCH3, r13
 424	mfcr	r11
 425	mtspr	SPRN_SPRG_WSCRATCH4, r11
 426	mfspr	r10, SPRN_SRR0		/* Get faulting address */
 427
 428	/* If we are faulting a kernel address, we have to use the
 429	 * kernel page tables.
 430	 */
 431	lis	r11, PAGE_OFFSET@h
 432	cmplw	r10, r11
 433	blt+	3f
 434	lis	r11, swapper_pg_dir@h
 435	ori	r11, r11, swapper_pg_dir@l
 436
 437	mfspr	r12,SPRN_MMUCR
 438	rlwinm	r12,r12,0,0,23		/* Clear TID */
 439
 440	b	4f
 441
 442	/* Get the PGD for the current thread */
 4433:
 444	mfspr	r11,SPRN_SPRG_THREAD
 445	lwz	r11,PGDIR(r11)
 446
 447	/* Load PID into MMUCR TID */
 448	mfspr	r12,SPRN_MMUCR
 449	mfspr   r13,SPRN_PID		/* Get PID */
 450	rlwimi	r12,r13,0,24,31		/* Set TID */
 451#ifdef CONFIG_PPC_KUAP
 452	cmpwi	r13,0
 453	beq	2f			/* KUAP Fault */
 454#endif
 455
 4564:
 457	mtspr	SPRN_MMUCR,r12
 458
 459	/* Make up the required permissions */
 460	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 461
 462	/* Compute pgdir/pmd offset */
 463	rlwinm 	r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29
 464	lwzx	r11, r12, r11		/* Get pgd/pmd entry */
 465	rlwinm.	r12, r11, 0, 0, 20	/* Extract pt base address */
 466	beq	2f			/* Bail if no table */
 467
 468	/* Compute pte address */
 469	rlwimi	r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28
 470	lwz	r11, 0(r12)		/* Get high word of pte entry */
 471	lwz	r12, 4(r12)		/* Get low word of pte entry */
 472
 473	lis	r10,tlb_44x_index@ha
 474
 475	andc.	r13,r13,r12		/* Check permission */
 476
 477	/* Load the next available TLB index */
 478	lwz	r13,tlb_44x_index@l(r10)
 479
 480	bne	2f			/* Bail if permission mismatch */
 481
 482	/* Increment, rollover, and store TLB index */
 483	addi	r13,r13,1
 484
 485	patch_site 0f, patch__tlb_44x_hwater_I
 486	/* Compare with watermark (instruction gets patched) */
 4870:	cmpwi	0,r13,1			/* reserve entries */
 488	ble	5f
 489	li	r13,0
 4905:
 491	/* Store the next available TLB index */
 492	stw	r13,tlb_44x_index@l(r10)
 493
 494	/* Re-load the faulting address */
 495	mfspr	r10,SPRN_SRR0
 496
 497	/* Jump to common TLB load point */
 498	b	finish_tlb_load_44x
 499
 5002:
 501	/* The bailout.  Restore registers to pre-exception conditions
 502	 * and call the heavyweights to help us out.
 503	 */
 504	mfspr	r11, SPRN_SPRG_RSCRATCH4
 505	mtcr	r11
 506	mfspr	r13, SPRN_SPRG_RSCRATCH3
 507	mfspr	r12, SPRN_SPRG_RSCRATCH2
 508	mfspr	r11, SPRN_SPRG_RSCRATCH1
 509	mfspr	r10, SPRN_SPRG_RSCRATCH0
 510	b	InstructionStorage
 511
 512/*
 513 * Both the instruction and data TLB miss get to this
 514 * point to load the TLB.
 515 * 	r10 - EA of fault
 516 * 	r11 - PTE high word value
 517 *	r12 - PTE low word value
 518 *	r13 - TLB index
 
 519 *	MMUCR - loaded with proper value when we get here
 520 *	Upon exit, we reload everything and RFI.
 521 */
 522finish_tlb_load_44x:
 523	/* Combine RPN & ERPN an write WS 0 */
 524	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 525	tlbwe	r11,r13,PPC44x_TLB_XLAT
 526
 527	/*
 528	 * Create WS1. This is the faulting address (EPN),
 529	 * page size, and valid flag.
 530	 */
 531	li	r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE
 532	/* Insert valid and page size */
 533	rlwimi	r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31
 534	tlbwe	r10,r13,PPC44x_TLB_PAGEID	/* Write PAGEID */
 535
 536	/* And WS 2 */
 537	li	r10,0xf85			/* Mask to apply from PTE */
 538	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
 539	and	r11,r12,r10			/* Mask PTE bits to keep */
 540	andi.	r10,r12,_PAGE_USER		/* User page ? */
 541	beq	1f				/* nope, leave U bits empty */
 542	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
 543	rlwinm	r11,r11,0,~PPC44x_TLB_SX	/* Clear SX if User page */
 5441:	tlbwe	r11,r13,PPC44x_TLB_ATTRIB	/* Write ATTRIB */
 545
 546	/* Done...restore registers and get out of here.
 547	*/
 548	mfspr	r11, SPRN_SPRG_RSCRATCH4
 549	mtcr	r11
 550	mfspr	r13, SPRN_SPRG_RSCRATCH3
 551	mfspr	r12, SPRN_SPRG_RSCRATCH2
 552	mfspr	r11, SPRN_SPRG_RSCRATCH1
 553	mfspr	r10, SPRN_SPRG_RSCRATCH0
 554	rfi					/* Force context change */
 555
 556/* TLB error interrupts for 476
 557 */
 558#ifdef CONFIG_PPC_47x
 559	START_EXCEPTION(DataTLBError47x)
 560	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
 561	mtspr	SPRN_SPRG_WSCRATCH1,r11
 562	mtspr	SPRN_SPRG_WSCRATCH2,r12
 563	mtspr	SPRN_SPRG_WSCRATCH3,r13
 564	mfcr	r11
 565	mtspr	SPRN_SPRG_WSCRATCH4,r11
 566	mfspr	r10,SPRN_DEAR		/* Get faulting address */
 567
 568	/* If we are faulting a kernel address, we have to use the
 569	 * kernel page tables.
 570	 */
 571	lis	r11,PAGE_OFFSET@h
 572	cmplw	cr0,r10,r11
 573	blt+	3f
 574	lis	r11,swapper_pg_dir@h
 575	ori	r11,r11, swapper_pg_dir@l
 576	li	r12,0			/* MMUCR = 0 */
 577	b	4f
 578
 579	/* Get the PGD for the current thread and setup MMUCR */
 5803:	mfspr	r11,SPRN_SPRG3
 581	lwz	r11,PGDIR(r11)
 582	mfspr   r12,SPRN_PID		/* Get PID */
 583#ifdef CONFIG_PPC_KUAP
 584	cmpwi	r12,0
 585	beq	2f			/* KUAP Fault */
 586#endif
 5874:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
 588
 589	/* Mask of required permission bits. Note that while we
 590	 * do copy ESR:ST to _PAGE_RW position as trying to write
 591	 * to an RO page is pretty common, we don't do it with
 592	 * _PAGE_DIRTY. We could do it, but it's a fairly rare
 593	 * event so I'd rather take the overhead when it happens
 594	 * rather than adding an instruction here. We should measure
 595	 * whether the whole thing is worth it in the first place
 596	 * as we could avoid loading SPRN_ESR completely in the first
 597	 * place...
 598	 *
 599	 * TODO: Is it worth doing that mfspr & rlwimi in the first
 600	 *       place or can we save a couple of instructions here ?
 601	 */
 602	mfspr	r12,SPRN_ESR
 603	li	r13,_PAGE_PRESENT|_PAGE_ACCESSED
 604	rlwimi	r13,r12,10,30,30
 605
 606	/* Load the PTE */
 607	/* Compute pgdir/pmd offset */
 608	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
 609	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
 610
 611	/* Word 0 is EPN,V,TS,DSIZ */
 612	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
 613	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
 614	li	r12,0
 615	tlbwe	r10,r12,0
 616
 617	/* XXX can we do better ? Need to make sure tlbwe has established
 618	 * latch V bit in MMUCR0 before the PTE is loaded further down */
 619#ifdef CONFIG_SMP
 620	isync
 621#endif
 622
 623	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
 624	/* Compute pte address */
 625	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
 626	beq	2f			/* Bail if no table */
 627	lwz	r11,0(r12)		/* Get high word of pte entry */
 628
 629	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
 630	 * bottom of r12 to create a data dependency... We can also use r10
 631	 * as destination nowadays
 632	 */
 633#ifdef CONFIG_SMP
 634	lwsync
 635#endif
 636	lwz	r12,4(r12)		/* Get low word of pte entry */
 637
 638	andc.	r13,r13,r12		/* Check permission */
 639
 640	 /* Jump to common tlb load */
 641	beq	finish_tlb_load_47x
 642
 6432:	/* The bailout.  Restore registers to pre-exception conditions
 644	 * and call the heavyweights to help us out.
 645	 */
 646	mfspr	r11,SPRN_SPRG_RSCRATCH4
 647	mtcr	r11
 648	mfspr	r13,SPRN_SPRG_RSCRATCH3
 649	mfspr	r12,SPRN_SPRG_RSCRATCH2
 650	mfspr	r11,SPRN_SPRG_RSCRATCH1
 651	mfspr	r10,SPRN_SPRG_RSCRATCH0
 652	b	DataStorage
 653
 654	/* Instruction TLB Error Interrupt */
 655	/*
 656	 * Nearly the same as above, except we get our
 657	 * information from different registers and bailout
 658	 * to a different point.
 659	 */
 660	START_EXCEPTION(InstructionTLBError47x)
 661	mtspr	SPRN_SPRG_WSCRATCH0,r10	/* Save some working registers */
 662	mtspr	SPRN_SPRG_WSCRATCH1,r11
 663	mtspr	SPRN_SPRG_WSCRATCH2,r12
 664	mtspr	SPRN_SPRG_WSCRATCH3,r13
 665	mfcr	r11
 666	mtspr	SPRN_SPRG_WSCRATCH4,r11
 667	mfspr	r10,SPRN_SRR0		/* Get faulting address */
 668
 669	/* If we are faulting a kernel address, we have to use the
 670	 * kernel page tables.
 671	 */
 672	lis	r11,PAGE_OFFSET@h
 673	cmplw	cr0,r10,r11
 674	blt+	3f
 675	lis	r11,swapper_pg_dir@h
 676	ori	r11,r11, swapper_pg_dir@l
 677	li	r12,0			/* MMUCR = 0 */
 678	b	4f
 679
 680	/* Get the PGD for the current thread and setup MMUCR */
 6813:	mfspr	r11,SPRN_SPRG_THREAD
 682	lwz	r11,PGDIR(r11)
 683	mfspr   r12,SPRN_PID		/* Get PID */
 684#ifdef CONFIG_PPC_KUAP
 685	cmpwi	r12,0
 686	beq	2f			/* KUAP Fault */
 687#endif
 6884:	mtspr	SPRN_MMUCR,r12		/* Set MMUCR */
 689
 690	/* Make up the required permissions */
 691	li	r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
 692
 693	/* Load PTE */
 694	/* Compute pgdir/pmd offset */
 695	rlwinm  r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
 696	lwzx	r11,r12,r11		/* Get pgd/pmd entry */
 697
 698	/* Word 0 is EPN,V,TS,DSIZ */
 699	li	r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
 700	rlwimi	r10,r12,0,32-PAGE_SHIFT,31	/* Insert valid and page size*/
 701	li	r12,0
 702	tlbwe	r10,r12,0
 703
 704	/* XXX can we do better ? Need to make sure tlbwe has established
 705	 * latch V bit in MMUCR0 before the PTE is loaded further down */
 706#ifdef CONFIG_SMP
 707	isync
 708#endif
 709
 710	rlwinm.	r12,r11,0,0,20		/* Extract pt base address */
 711	/* Compute pte address */
 712	rlwimi  r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
 713	beq	2f			/* Bail if no table */
 714
 715	lwz	r11,0(r12)		/* Get high word of pte entry */
 716	/* XXX can we do better ? maybe insert a known 0 bit from r11 into the
 717	 * bottom of r12 to create a data dependency... We can also use r10
 718	 * as destination nowadays
 719	 */
 720#ifdef CONFIG_SMP
 721	lwsync
 722#endif
 723	lwz	r12,4(r12)		/* Get low word of pte entry */
 724
 725	andc.	r13,r13,r12		/* Check permission */
 726
 727	/* Jump to common TLB load point */
 728	beq	finish_tlb_load_47x
 729
 7302:	/* The bailout.  Restore registers to pre-exception conditions
 731	 * and call the heavyweights to help us out.
 732	 */
 733	mfspr	r11, SPRN_SPRG_RSCRATCH4
 734	mtcr	r11
 735	mfspr	r13, SPRN_SPRG_RSCRATCH3
 736	mfspr	r12, SPRN_SPRG_RSCRATCH2
 737	mfspr	r11, SPRN_SPRG_RSCRATCH1
 738	mfspr	r10, SPRN_SPRG_RSCRATCH0
 739	b	InstructionStorage
 740
 741/*
 742 * Both the instruction and data TLB miss get to this
 743 * point to load the TLB.
 744 * 	r10 - free to use
 745 * 	r11 - PTE high word value
 746 *	r12 - PTE low word value
 747 *      r13 - free to use
 
 748 *	MMUCR - loaded with proper value when we get here
 749 *	Upon exit, we reload everything and RFI.
 750 */
 751finish_tlb_load_47x:
 752	/* Combine RPN & ERPN an write WS 1 */
 753	rlwimi	r11,r12,0,0,31-PAGE_SHIFT
 754	tlbwe	r11,r13,1
 755
 756	/* And make up word 2 */
 757	li	r10,0xf85			/* Mask to apply from PTE */
 758	rlwimi	r10,r12,29,30,30		/* DIRTY -> SW position */
 759	and	r11,r12,r10			/* Mask PTE bits to keep */
 760	andi.	r10,r12,_PAGE_USER		/* User page ? */
 761	beq	1f				/* nope, leave U bits empty */
 762	rlwimi	r11,r11,3,26,28			/* yes, copy S bits to U */
 763	rlwinm	r11,r11,0,~PPC47x_TLB2_SX	/* Clear SX if User page */
 7641:	tlbwe	r11,r13,2
 765
 766	/* Done...restore registers and get out of here.
 767	*/
 768	mfspr	r11, SPRN_SPRG_RSCRATCH4
 769	mtcr	r11
 770	mfspr	r13, SPRN_SPRG_RSCRATCH3
 771	mfspr	r12, SPRN_SPRG_RSCRATCH2
 772	mfspr	r11, SPRN_SPRG_RSCRATCH1
 773	mfspr	r10, SPRN_SPRG_RSCRATCH0
 774	rfi
 775
 776#endif /* CONFIG_PPC_47x */
 777
 778	/* Debug Interrupt */
 779	/*
 780	 * This statement needs to exist at the end of the IVPR
 781	 * definition just in case you end up taking a debug
 782	 * exception within another exception.
 783	 */
 784	DEBUG_CRIT_EXCEPTION
 785
 786interrupt_end:
 787
 788/*
 789 * Global functions
 790 */
 791
 792/*
 793 * Adjust the machine check IVOR on 440A cores
 794 */
 795_GLOBAL(__fixup_440A_mcheck)
 796	li	r3,MachineCheckA@l
 797	mtspr	SPRN_IVOR1,r3
 798	sync
 799	blr
 800
 801/*
 802 * Init CPU state. This is called at boot time or for secondary CPUs
 803 * to setup initial TLB entries, setup IVORs, etc...
 804 *
 805 */
 806_GLOBAL(init_cpu_state)
 807	mflr	r22
 808#ifdef CONFIG_PPC_47x
 809	/* We use the PVR to differentiate 44x cores from 476 */
 810	mfspr	r3,SPRN_PVR
 811	srwi	r3,r3,16
 812	cmplwi	cr0,r3,PVR_476FPE@h
 813	beq	head_start_47x
 814	cmplwi	cr0,r3,PVR_476@h
 815	beq	head_start_47x
 816	cmplwi	cr0,r3,PVR_476_ISS@h
 817	beq	head_start_47x
 818#endif /* CONFIG_PPC_47x */
 819
 820/*
 821 * In case the firmware didn't do it, we apply some workarounds
 822 * that are good for all 440 core variants here
 823 */
 824	mfspr	r3,SPRN_CCR0
 825	rlwinm	r3,r3,0,0,27	/* disable icache prefetch */
 826	isync
 827	mtspr	SPRN_CCR0,r3
 828	isync
 829	sync
 830
 831/*
 832 * Set up the initial MMU state for 44x
 833 *
 834 * We are still executing code at the virtual address
 835 * mappings set by the firmware for the base of RAM.
 836 *
 837 * We first invalidate all TLB entries but the one
 838 * we are running from.  We then load the KERNELBASE
 839 * mappings so we can begin to use kernel addresses
 840 * natively and so the interrupt vector locations are
 841 * permanently pinned (necessary since Book E
 842 * implementations always have translation enabled).
 843 *
 844 * TODO: Use the known TLB entry we are running from to
 845 *	 determine which physical region we are located
 846 *	 in.  This can be used to determine where in RAM
 847 *	 (on a shared CPU system) or PCI memory space
 848 *	 (on a DRAMless system) we are located.
 849 *       For now, we assume a perfect world which means
 850 *	 we are located at the base of DRAM (physical 0).
 851 */
 852
 853/*
 854 * Search TLB for entry that we are currently using.
 855 * Invalidate all entries but the one we are using.
 856 */
 857	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
 858	mfspr	r3,SPRN_PID			/* Get PID */
 859	mfmsr	r4				/* Get MSR */
 860	andi.	r4,r4,MSR_IS@l			/* TS=1? */
 861	beq	wmmucr				/* If not, leave STS=0 */
 862	oris	r3,r3,PPC44x_MMUCR_STS@h	/* Set STS=1 */
 863wmmucr:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
 864	sync
 865
 866	bcl	20,31,$+4			/* Find our address */
 867invstr:	mflr	r5				/* Make it accessible */
 868	tlbsx	r23,0,r5			/* Find entry we are in */
 869	li	r4,0				/* Start at TLB entry 0 */
 870	li	r3,0				/* Set PAGEID inval value */
 8711:	cmpw	r23,r4				/* Is this our entry? */
 872	beq	skpinv				/* If so, skip the inval */
 873	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
 874skpinv:	addi	r4,r4,1				/* Increment */
 875	cmpwi	r4,64				/* Are we done? */
 876	bne	1b				/* If not, repeat */
 877	isync					/* If so, context change */
 878
 879/*
 880 * Configure and load pinned entry into TLB slot 63.
 881 */
 882#ifdef CONFIG_NONSTATIC_KERNEL
 883	/*
 884	 * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT
 885	 * entries of the initial mapping set by the boot loader.
 886	 * The XLAT entry is stored in r25
 887	 */
 888
 889	/* Read the XLAT entry for our current mapping */
 890	tlbre	r25,r23,PPC44x_TLB_XLAT
 891
 892	lis	r3,KERNELBASE@h
 893	ori	r3,r3,KERNELBASE@l
 894
 895	/* Use our current RPN entry */
 896	mr	r4,r25
 897#else
 898
 899	lis	r3,PAGE_OFFSET@h
 900	ori	r3,r3,PAGE_OFFSET@l
 901
 902	/* Kernel is at the base of RAM */
 903	li r4, 0			/* Load the kernel physical address */
 904#endif
 905
 906	/* Load the kernel PID = 0 */
 907	li	r0,0
 908	mtspr	SPRN_PID,r0
 909	sync
 910
 911	/* Initialize MMUCR */
 912	li	r5,0
 913	mtspr	SPRN_MMUCR,r5
 914	sync
 915
 916	/* pageid fields */
 917	clrrwi	r3,r3,10		/* Mask off the effective page number */
 918	ori	r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M
 919
 920	/* xlat fields */
 921	clrrwi	r4,r4,10		/* Mask off the real page number */
 922					/* ERPN is 0 for first 4GB page */
 923
 924	/* attrib fields */
 925	/* Added guarded bit to protect against speculative loads/stores */
 926	li	r5,0
 927	ori	r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
 928
 929        li      r0,63                    /* TLB slot 63 */
 930
 931	tlbwe	r3,r0,PPC44x_TLB_PAGEID	/* Load the pageid fields */
 932	tlbwe	r4,r0,PPC44x_TLB_XLAT	/* Load the translation fields */
 933	tlbwe	r5,r0,PPC44x_TLB_ATTRIB	/* Load the attrib/access fields */
 934
 935	/* Force context change */
 936	mfmsr	r0
 937	mtspr	SPRN_SRR1, r0
 938	lis	r0,3f@h
 939	ori	r0,r0,3f@l
 940	mtspr	SPRN_SRR0,r0
 941	sync
 942	rfi
 943
 944	/* If necessary, invalidate original entry we used */
 9453:	cmpwi	r23,63
 946	beq	4f
 947	li	r6,0
 948	tlbwe   r6,r23,PPC44x_TLB_PAGEID
 949	isync
 950
 9514:
 952#ifdef CONFIG_PPC_EARLY_DEBUG_44x
 953	/* Add UART mapping for early debug. */
 954
 955	/* pageid fields */
 956	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
 957	ori	r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K
 958
 959	/* xlat fields */
 960	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
 961	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
 962
 963	/* attrib fields */
 964	li	r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G)
 965        li      r0,62                    /* TLB slot 0 */
 966
 967	tlbwe	r3,r0,PPC44x_TLB_PAGEID
 968	tlbwe	r4,r0,PPC44x_TLB_XLAT
 969	tlbwe	r5,r0,PPC44x_TLB_ATTRIB
 970
 971	/* Force context change */
 972	isync
 973#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
 974
 975	/* Establish the interrupt vector offsets */
 976	SET_IVOR(0,  CriticalInput);
 977	SET_IVOR(1,  MachineCheck);
 978	SET_IVOR(2,  DataStorage);
 979	SET_IVOR(3,  InstructionStorage);
 980	SET_IVOR(4,  ExternalInput);
 981	SET_IVOR(5,  Alignment);
 982	SET_IVOR(6,  Program);
 983	SET_IVOR(7,  FloatingPointUnavailable);
 984	SET_IVOR(8,  SystemCall);
 985	SET_IVOR(9,  AuxillaryProcessorUnavailable);
 986	SET_IVOR(10, Decrementer);
 987	SET_IVOR(11, FixedIntervalTimer);
 988	SET_IVOR(12, WatchdogTimer);
 989	SET_IVOR(13, DataTLBError44x);
 990	SET_IVOR(14, InstructionTLBError44x);
 991	SET_IVOR(15, DebugCrit);
 992
 993	b	head_start_common
 994
 995
 996#ifdef CONFIG_PPC_47x
 997
 998#ifdef CONFIG_SMP
 999
1000/* Entry point for secondary 47x processors */
1001_GLOBAL(start_secondary_47x)
1002        mr      r24,r3          /* CPU number */
1003
1004	bl	init_cpu_state
1005
1006	/* Now we need to bolt the rest of kernel memory which
1007	 * is done in C code. We must be careful because our task
1008	 * struct or our stack can (and will probably) be out
1009	 * of reach of the initial 256M TLB entry, so we use a
1010	 * small temporary stack in .bss for that. This works
1011	 * because only one CPU at a time can be in this code
1012	 */
1013	lis	r1,temp_boot_stack@h
1014	ori	r1,r1,temp_boot_stack@l
1015	addi	r1,r1,1024-STACK_FRAME_MIN_SIZE
1016	li	r0,0
1017	stw	r0,0(r1)
1018	bl	mmu_init_secondary
1019
1020	/* Now we can get our task struct and real stack pointer */
1021
1022	/* Get current's stack and current */
1023	lis	r2,secondary_current@ha
1024	lwz	r2,secondary_current@l(r2)
1025	lwz	r1,TASK_STACK(r2)
1026
1027	/* Current stack pointer */
1028	addi	r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
1029	li	r0,0
1030	stw	r0,0(r1)
1031
1032	/* Kernel stack for exception entry in SPRG3 */
1033	addi	r4,r2,THREAD	/* init task's THREAD */
1034	mtspr	SPRN_SPRG3,r4
1035
1036	b	start_secondary
1037
1038#endif /* CONFIG_SMP */
1039
1040/*
1041 * Set up the initial MMU state for 44x
1042 *
1043 * We are still executing code at the virtual address
1044 * mappings set by the firmware for the base of RAM.
1045 */
1046
1047head_start_47x:
1048	/* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
1049	mfspr	r3,SPRN_PID			/* Get PID */
1050	mfmsr	r4				/* Get MSR */
1051	andi.	r4,r4,MSR_IS@l			/* TS=1? */
1052	beq	1f				/* If not, leave STS=0 */
1053	oris	r3,r3,PPC47x_MMUCR_STS@h	/* Set STS=1 */
10541:	mtspr	SPRN_MMUCR,r3			/* Put MMUCR */
1055	sync
1056
1057	/* Find the entry we are running from */
1058	bcl	20,31,$+4
10591:	mflr	r23
1060	tlbsx	r23,0,r23
1061	tlbre	r24,r23,0
1062	tlbre	r25,r23,1
1063	tlbre	r26,r23,2
1064
1065/*
1066 * Cleanup time
1067 */
1068
1069	/* Initialize MMUCR */
1070	li	r5,0
1071	mtspr	SPRN_MMUCR,r5
1072	sync
1073
1074clear_all_utlb_entries:
1075
1076	#; Set initial values.
1077
1078	addis		r3,0,0x8000
1079	addi		r4,0,0
1080	addi		r5,0,0
1081	b		clear_utlb_entry
1082
1083	#; Align the loop to speed things up.
1084
1085	.align		6
1086
1087clear_utlb_entry:
1088
1089	tlbwe		r4,r3,0
1090	tlbwe		r5,r3,1
1091	tlbwe		r5,r3,2
1092	addis		r3,r3,0x2000
1093	cmpwi		r3,0
1094	bne		clear_utlb_entry
1095	addis		r3,0,0x8000
1096	addis		r4,r4,0x100
1097	cmpwi		r4,0
1098	bne		clear_utlb_entry
1099
1100	#; Restore original entry.
1101
1102	oris	r23,r23,0x8000  /* specify the way */
1103	tlbwe		r24,r23,0
1104	tlbwe		r25,r23,1
1105	tlbwe		r26,r23,2
1106
1107/*
1108 * Configure and load pinned entry into TLB for the kernel core
1109 */
1110
1111	lis	r3,PAGE_OFFSET@h
1112	ori	r3,r3,PAGE_OFFSET@l
1113
1114	/* Load the kernel PID = 0 */
1115	li	r0,0
1116	mtspr	SPRN_PID,r0
1117	sync
1118
1119	/* Word 0 */
1120	clrrwi	r3,r3,12		/* Mask off the effective page number */
1121	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1122
1123	/* Word 1 - use r25.  RPN is the same as the original entry */
1124
1125	/* Word 2 */
1126	li	r5,0
1127	ori	r5,r5,PPC47x_TLB2_S_RWX
1128#ifdef CONFIG_SMP
1129	ori	r5,r5,PPC47x_TLB2_M
1130#endif
1131
1132	/* We write to way 0 and bolted 0 */
1133	lis	r0,0x8800
1134	tlbwe	r3,r0,0
1135	tlbwe	r25,r0,1
1136	tlbwe	r5,r0,2
1137
1138/*
1139 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1140 * them up later
1141 */
1142	LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1143	mtspr	SPRN_SSPCR,r3
1144	mtspr	SPRN_USPCR,r3
1145	LOAD_REG_IMMEDIATE(r3, 0x12345670)
1146	mtspr	SPRN_ISPCR,r3
1147
1148	/* Force context change */
1149	mfmsr	r0
1150	mtspr	SPRN_SRR1, r0
1151	lis	r0,3f@h
1152	ori	r0,r0,3f@l
1153	mtspr	SPRN_SRR0,r0
1154	sync
1155	rfi
1156
1157	/* Invalidate original entry we used */
11583:
1159	rlwinm	r24,r24,0,21,19 /* clear the "valid" bit */
1160	tlbwe	r24,r23,0
1161	addi	r24,0,0
1162	tlbwe	r24,r23,1
1163	tlbwe	r24,r23,2
1164	isync                   /* Clear out the shadow TLB entries */
1165
1166#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1167	/* Add UART mapping for early debug. */
1168
1169	/* Word 0 */
1170	lis	r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1171	ori	r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1172
1173	/* Word 1 */
1174	lis	r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1175	ori	r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1176
1177	/* Word 2 */
1178	li	r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1179
1180	/* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1181	 * congruence class as the kernel, we need to make sure of it at
1182	 * some point
1183	 */
1184        lis	r0,0x8d00
1185	tlbwe	r3,r0,0
1186	tlbwe	r4,r0,1
1187	tlbwe	r5,r0,2
1188
1189	/* Force context change */
1190	isync
1191#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1192
1193	/* Establish the interrupt vector offsets */
1194	SET_IVOR(0,  CriticalInput);
1195	SET_IVOR(1,  MachineCheckA);
1196	SET_IVOR(2,  DataStorage);
1197	SET_IVOR(3,  InstructionStorage);
1198	SET_IVOR(4,  ExternalInput);
1199	SET_IVOR(5,  Alignment);
1200	SET_IVOR(6,  Program);
1201	SET_IVOR(7,  FloatingPointUnavailable);
1202	SET_IVOR(8,  SystemCall);
1203	SET_IVOR(9,  AuxillaryProcessorUnavailable);
1204	SET_IVOR(10, Decrementer);
1205	SET_IVOR(11, FixedIntervalTimer);
1206	SET_IVOR(12, WatchdogTimer);
1207	SET_IVOR(13, DataTLBError47x);
1208	SET_IVOR(14, InstructionTLBError47x);
1209	SET_IVOR(15, DebugCrit);
1210
1211	/* We configure icbi to invalidate 128 bytes at a time since the
1212	 * current 32-bit kernel code isn't too happy with icache != dcache
1213	 * block size. We also disable the BTAC as this can cause errors
1214	 * in some circumstances (see IBM Erratum 47).
1215	 */
1216	mfspr	r3,SPRN_CCR0
1217	oris	r3,r3,0x0020
1218	ori	r3,r3,0x0040
1219	mtspr	SPRN_CCR0,r3
1220	isync
1221
1222#endif /* CONFIG_PPC_47x */
1223
1224/*
1225 * Here we are back to code that is common between 44x and 47x
1226 *
1227 * We proceed to further kernel initialization and return to the
1228 * main kernel entry
1229 */
1230head_start_common:
1231	/* Establish the interrupt vector base */
1232	lis	r4,interrupt_base@h	/* IVPR only uses the high 16-bits */
1233	mtspr	SPRN_IVPR,r4
1234
1235	/*
1236	 * If the kernel was loaded at a non-zero 256 MB page, we need to
1237	 * mask off the most significant 4 bits to get the relative address
1238	 * from the start of physical memory
1239	 */
1240	rlwinm	r22,r22,0,4,31
1241	addis	r22,r22,PAGE_OFFSET@h
1242	mtlr	r22
1243	isync
1244	blr
1245
1246#ifdef CONFIG_SMP
1247	.data
1248	.align	12
1249temp_boot_stack:
1250	.space	1024
1251#endif /* CONFIG_SMP */