Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <linux/linkage.h>
22
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/cputable.h>
27#include <asm/cache.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/ptrace.h>
32#include <asm/bug.h>
33#include <asm/kvm_book3s_asm.h>
34#include <asm/feature-fixups.h>
35#include <asm/interrupt.h>
36
37#include "head_32.h"
38
39#define LOAD_BAT(n, reg, RA, RB) \
40 /* see the comment for clear_bats() -- Cort */ \
41 li RA,0; \
42 mtspr SPRN_IBAT##n##U,RA; \
43 mtspr SPRN_DBAT##n##U,RA; \
44 lwz RA,(n*16)+0(reg); \
45 lwz RB,(n*16)+4(reg); \
46 mtspr SPRN_IBAT##n##U,RA; \
47 mtspr SPRN_IBAT##n##L,RB; \
48 lwz RA,(n*16)+8(reg); \
49 lwz RB,(n*16)+12(reg); \
50 mtspr SPRN_DBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##L,RB
52
53 __HEAD
54_GLOBAL(_stext);
55
56/*
57 * _start is defined this way because the XCOFF loader in the OpenFirmware
58 * on the powermac expects the entry point to be a procedure descriptor.
59 */
60_GLOBAL(_start);
61 /*
62 * These are here for legacy reasons, the kernel used to
63 * need to look like a coff function entry for the pmac
64 * but we're always started by some kind of bootloader now.
65 * -- Cort
66 */
67 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop
70
71/* PMAC
72 * Enter here with the kernel text, data and bss loaded starting at
73 * 0, running with virtual == physical mapping.
74 * r5 points to the prom entry point (the client interface handler
75 * address). Address translation is turned on, with the prom
76 * managing the hash table. Interrupts are disabled. The stack
77 * pointer (r1) points to just below the end of the half-meg region
78 * from 0x380000 - 0x400000, which is mapped in already.
79 *
80 * If we are booted from MacOS via BootX, we enter with the kernel
81 * image loaded somewhere, and the following values in registers:
82 * r3: 'BooX' (0x426f6f58)
83 * r4: virtual address of boot_infos_t
84 * r5: 0
85 *
86 * PREP
87 * This is jumped to on prep systems right after the kernel is relocated
88 * to its proper place in memory by the boot loader. The expected layout
89 * of the regs is:
90 * r3: ptr to residual data
91 * r4: initrd_start or if no initrd then 0
92 * r5: initrd_end - unused if r4 is 0
93 * r6: Start of command line string
94 * r7: End of command line string
95 *
96 * This just gets a minimal mmu environment setup so we can call
97 * start_here() to do the real work.
98 * -- Cort
99 */
100
101 .globl __start
102__start:
103/*
104 * We have to do any OF calls before we map ourselves to KERNELBASE,
105 * because OF may have I/O devices mapped into that area
106 * (particularly on CHRP).
107 */
108 cmpwi 0,r5,0
109 beq 1f
110
111#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
112 /* find out where we are now */
113 bcl 20,31,$+4
1140: mflr r8 /* r8 = runtime addr here */
115 addis r8,r8,(_stext - 0b)@ha
116 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
117 bl prom_init
118#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
119
120 /* We never return. We also hit that trap if trying to boot
121 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
122 trap
123
124/*
125 * Check for BootX signature when supporting PowerMac and branch to
126 * appropriate trampoline if it's present
127 */
128#ifdef CONFIG_PPC_PMAC
1291: lis r31,0x426f
130 ori r31,r31,0x6f58
131 cmpw 0,r3,r31
132 bne 1f
133 bl bootx_init
134 trap
135#endif /* CONFIG_PPC_PMAC */
136
1371: mr r31,r3 /* save device tree ptr */
138 li r24,0 /* cpu # */
139
140/*
141 * early_init() does the early machine identification and does
142 * the necessary low-level setup and clears the BSS
143 * -- Cort <cort@fsmlabs.com>
144 */
145 bl early_init
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156 bl load_segment_registers
157 bl reloc_offset
158 bl early_hash_table
159#if defined(CONFIG_BOOTX_TEXT)
160 bl setup_disp_bat
161#endif
162#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
163 bl setup_cpm_bat
164#endif
165#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
166 bl setup_usbgecko_bat
167#endif
168
169/*
170 * Call setup_cpu for CPU 0 and initialize 6xx Idle
171 */
172 bl reloc_offset
173 li r24,0 /* cpu# */
174 bl call_setup_cpu /* Call setup_cpu for this CPU */
175 bl reloc_offset
176 bl init_idle_6xx
177
178
179/*
180 * We need to run with _start at physical address 0.
181 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
182 * the exception vectors at 0 (and therefore this copy
183 * overwrites OF's exception vectors with our own).
184 * The MMU is off at this point.
185 */
186 bl reloc_offset
187 mr r26,r3
188 addis r4,r3,KERNELBASE@h /* current address of _start */
189 lis r5,PHYSICAL_START@h
190 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
191 bne relocate_kernel
192/*
193 * we now have the 1st 16M of ram mapped with the bats.
194 * prep needs the mmu to be turned on here, but pmac already has it on.
195 * this shouldn't bother the pmac since it just gets turned on again
196 * as we jump to our code at KERNELBASE. -- Cort
197 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
198 * off, and in other cases, we now turn it off before changing BATs above.
199 */
200turn_on_mmu:
201 mfmsr r0
202 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
203 mtspr SPRN_SRR1,r0
204 lis r0,start_here@h
205 ori r0,r0,start_here@l
206 mtspr SPRN_SRR0,r0
207 rfi /* enables MMU */
208
209/*
210 * We need __secondary_hold as a place to hold the other cpus on
211 * an SMP machine, even when we are running a UP kernel.
212 */
213 . = 0xc0 /* for prep bootloader */
214 li r3,1 /* MTX only has 1 cpu */
215 .globl __secondary_hold
216__secondary_hold:
217 /* tell the master we're here */
218 stw r3,__secondary_hold_acknowledge@l(0)
219#ifdef CONFIG_SMP
220100: lwz r4,0(0)
221 /* wait until we're told to start */
222 cmpw 0,r4,r3
223 bne 100b
224 /* our cpu # was at addr 0 - go */
225 mr r24,r3 /* cpu # */
226 b __secondary_start
227#else
228 b .
229#endif /* CONFIG_SMP */
230
231 .globl __secondary_hold_spinloop
232__secondary_hold_spinloop:
233 .long 0
234 .globl __secondary_hold_acknowledge
235__secondary_hold_acknowledge:
236 .long -1
237
238/* System reset */
239/* core99 pmac starts the seconary here by changing the vector, and
240 putting it back to what it was (unknown_async_exception) when done. */
241 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
242
243/* Machine check */
244/*
245 * On CHRP, this is complicated by the fact that we could get a
246 * machine check inside RTAS, and we have no guarantee that certain
247 * critical registers will have the values we expect. The set of
248 * registers that might have bad values includes all the GPRs
249 * and all the BATs. We indicate that we are in RTAS by putting
250 * a non-zero value, the address of the exception frame to use,
251 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
252 * and uses its value if it is non-zero.
253 * (Other exception handlers assume that r1 is a valid kernel stack
254 * pointer when we take an exception from supervisor mode.)
255 * -- paulus.
256 */
257 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
258 EXCEPTION_PROLOG_0
259#ifdef CONFIG_PPC_CHRP
260 mtspr SPRN_SPRG_SCRATCH2,r1
261 mfspr r1, SPRN_SPRG_THREAD
262 lwz r1, RTAS_SP(r1)
263 cmpwi cr1, r1, 0
264 bne cr1, 7f
265 mfspr r1, SPRN_SPRG_SCRATCH2
266#endif /* CONFIG_PPC_CHRP */
267 EXCEPTION_PROLOG_1
2687: EXCEPTION_PROLOG_2 0x200 MachineCheck
269#ifdef CONFIG_PPC_CHRP
270 beq cr1, 1f
271 twi 31, 0, 0
272#endif
2731: prepare_transfer_to_handler
274 bl machine_check_exception
275 b interrupt_return
276
277/* Data access exception. */
278 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
279#ifdef CONFIG_PPC_BOOK3S_604
280BEGIN_MMU_FTR_SECTION
281 mtspr SPRN_SPRG_SCRATCH2,r10
282 mfspr r10, SPRN_SPRG_THREAD
283 stw r11, THR11(r10)
284 mfspr r10, SPRN_DSISR
285 mfcr r11
286 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
287 mfspr r10, SPRN_SPRG_THREAD
288 beq hash_page_dsi
289.Lhash_page_dsi_cont:
290 mtcr r11
291 lwz r11, THR11(r10)
292 mfspr r10, SPRN_SPRG_SCRATCH2
293MMU_FTR_SECTION_ELSE
294 b 1f
295ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
296#endif
2971: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
298 EXCEPTION_PROLOG_1
299 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
300 prepare_transfer_to_handler
301 lwz r5, _DSISR(r1)
302 andis. r0, r5, DSISR_DABRMATCH@h
303 bne- 1f
304 bl do_page_fault
305 b interrupt_return
3061: bl do_break
307 REST_NVGPRS(r1)
308 b interrupt_return
309
310
311/* Instruction access exception. */
312 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
313 mtspr SPRN_SPRG_SCRATCH0,r10
314 mtspr SPRN_SPRG_SCRATCH1,r11
315 mfspr r10, SPRN_SPRG_THREAD
316 mfspr r11, SPRN_SRR0
317 stw r11, SRR0(r10)
318 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
319 stw r11, SRR1(r10)
320 mfcr r10
321#ifdef CONFIG_PPC_BOOK3S_604
322BEGIN_MMU_FTR_SECTION
323 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
324 bne hash_page_isi
325.Lhash_page_isi_cont:
326 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
327END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
328#endif
329 andi. r11, r11, MSR_PR
330
331 EXCEPTION_PROLOG_1
332 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
333 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
334 stw r5, _DSISR(r11)
335 stw r12, _DAR(r11)
336 prepare_transfer_to_handler
337 bl do_page_fault
338 b interrupt_return
339
340/* External interrupt */
341 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
342
343/* Alignment exception */
344 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
345 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
346 prepare_transfer_to_handler
347 bl alignment_exception
348 REST_NVGPRS(r1)
349 b interrupt_return
350
351/* Program check exception */
352 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
353 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
354 prepare_transfer_to_handler
355 bl program_check_exception
356 REST_NVGPRS(r1)
357 b interrupt_return
358
359/* Floating-point unavailable */
360 START_EXCEPTION(0x800, FPUnavailable)
361#ifdef CONFIG_PPC_FPU
362BEGIN_FTR_SECTION
363/*
364 * Certain Freescale cores don't have a FPU and treat fp instructions
365 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
366 */
367 b ProgramCheck
368END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
369 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
370 beq 1f
371 bl load_up_fpu /* if from user, just load it up */
372 b fast_exception_return
3731: prepare_transfer_to_handler
374 bl kernel_fp_unavailable_exception
375 b interrupt_return
376#else
377 b ProgramCheck
378#endif
379
380/* Decrementer */
381 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
382
383 EXCEPTION(0xa00, Trap_0a, unknown_exception)
384 EXCEPTION(0xb00, Trap_0b, unknown_exception)
385
386/* System call */
387 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
388 SYSCALL_ENTRY INTERRUPT_SYSCALL
389
390 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
391 EXCEPTION(0xe00, Trap_0e, unknown_exception)
392
393/*
394 * The Altivec unavailable trap is at 0x0f20. Foo.
395 * We effectively remap it to 0x3000.
396 * We include an altivec unavailable exception vector even if
397 * not configured for Altivec, so that you can't panic a
398 * non-altivec kernel running on a machine with altivec just
399 * by executing an altivec instruction.
400 */
401 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
402 b PerformanceMonitor
403
404 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
405 b AltiVecUnavailable
406
407 __HEAD
408/*
409 * Handle TLB miss for instruction on 603/603e.
410 * Note: we get an alternate set of r0 - r3 to use automatically.
411 */
412 . = INTERRUPT_INST_TLB_MISS_603
413InstructionTLBMiss:
414/*
415 * r0: userspace flag (later scratch)
416 * r1: linux style pte ( later becomes ppc hardware pte )
417 * r2: ptr to linux-style pte
418 * r3: fault address
419 */
420 /* Get PTE (linux-style) and check access */
421 mfspr r3,SPRN_IMISS
422#ifdef CONFIG_MODULES
423 lis r1, TASK_SIZE@h /* check if kernel address */
424 cmplw 0,r1,r3
425#endif
426 mfspr r2, SPRN_SDR1
427 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
428 rlwinm r2, r2, 28, 0xfffff000
429#ifdef CONFIG_MODULES
430 li r0, 3
431 bgt- 112f
432 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
433 li r0, 0
434 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
435#endif
436112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
437 lwz r2,0(r2) /* get pmd entry */
438 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
439 beq- InstructionAddressInvalid /* return if no mapping */
440 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
441 lwz r2,0(r2) /* get linux-style pte */
442 andc. r1,r1,r2 /* check access & ~permission */
443 bne- InstructionAddressInvalid /* return if access not permitted */
444 /* Convert linux-style PTE to low word of PPC-style PTE */
445#ifdef CONFIG_MODULES
446 rlwimi r2, r0, 0, 31, 31 /* userspace ? -> PP lsb */
447#endif
448 ori r1, r1, 0xe06 /* clear out reserved bits */
449 andc r1, r2, r1 /* PP = user? 1 : 0 */
450BEGIN_FTR_SECTION
451 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
452END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
453 mtspr SPRN_RPA,r1
454 tlbli r3
455 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
456 mtcrf 0x80,r3
457 rfi
458InstructionAddressInvalid:
459 mfspr r3,SPRN_SRR1
460 rlwinm r1,r3,9,6,6 /* Get load/store bit */
461
462 addis r1,r1,0x2000
463 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
464 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
465 or r2,r2,r1
466 mtspr SPRN_SRR1,r2
467 mfspr r1,SPRN_IMISS /* Get failing address */
468 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
469 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
470 xor r1,r1,r2
471 mtspr SPRN_DAR,r1 /* Set fault address */
472 mfmsr r0 /* Restore "normal" registers */
473 xoris r0,r0,MSR_TGPR>>16
474 mtcrf 0x80,r3 /* Restore CR0 */
475 mtmsr r0
476 b InstructionAccess
477
478/*
479 * Handle TLB miss for DATA Load operation on 603/603e
480 */
481 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
482DataLoadTLBMiss:
483/*
484 * r0: userspace flag (later scratch)
485 * r1: linux style pte ( later becomes ppc hardware pte )
486 * r2: ptr to linux-style pte
487 * r3: fault address
488 */
489 /* Get PTE (linux-style) and check access */
490 mfspr r3,SPRN_DMISS
491 lis r1, TASK_SIZE@h /* check if kernel address */
492 cmplw 0,r1,r3
493 mfspr r2, SPRN_SDR1
494 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
495 rlwinm r2, r2, 28, 0xfffff000
496 li r0, 3
497 bgt- 112f
498 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
499 li r0, 0
500 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
501112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
502 lwz r2,0(r2) /* get pmd entry */
503 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
504 beq- DataAddressInvalid /* return if no mapping */
505 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
506 lwz r2,0(r2) /* get linux-style pte */
507 andc. r1,r1,r2 /* check access & ~permission */
508 bne- DataAddressInvalid /* return if access not permitted */
509 /* Convert linux-style PTE to low word of PPC-style PTE */
510 rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
511 rlwimi r2,r0,0,30,31 /* userspace ? -> PP */
512 rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
513 xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
514 ori r1,r1,0xe04 /* clear out reserved bits */
515 andc r1,r2,r1 /* PP = user? rw? 1: 3: 0 */
516BEGIN_FTR_SECTION
517 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
518END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
519 mtspr SPRN_RPA,r1
520BEGIN_MMU_FTR_SECTION
521 li r0,1
522 mfspr r1,SPRN_SPRG_603_LRU
523 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
524 slw r0,r0,r2
525 xor r1,r0,r1
526 srw r0,r1,r2
527 mtspr SPRN_SPRG_603_LRU,r1
528 mfspr r2,SPRN_SRR1
529 rlwimi r2,r0,31-14,14,14
530 mtspr SPRN_SRR1,r2
531 mtcrf 0x80,r2
532 tlbld r3
533 rfi
534MMU_FTR_SECTION_ELSE
535 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
536 mtcrf 0x80,r2
537 tlbld r3
538 rfi
539ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
540DataAddressInvalid:
541 mfspr r3,SPRN_SRR1
542 rlwinm r1,r3,9,6,6 /* Get load/store bit */
543 addis r1,r1,0x2000
544 mtspr SPRN_DSISR,r1
545 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
546 mtspr SPRN_SRR1,r2
547 mfspr r1,SPRN_DMISS /* Get failing address */
548 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
549 beq 20f /* Jump if big endian */
550 xori r1,r1,3
55120: mtspr SPRN_DAR,r1 /* Set fault address */
552 mfmsr r0 /* Restore "normal" registers */
553 xoris r0,r0,MSR_TGPR>>16
554 mtcrf 0x80,r3 /* Restore CR0 */
555 mtmsr r0
556 b DataAccess
557
558/*
559 * Handle TLB miss for DATA Store on 603/603e
560 */
561 . = INTERRUPT_DATA_STORE_TLB_MISS_603
562DataStoreTLBMiss:
563/*
564 * r0: userspace flag (later scratch)
565 * r1: linux style pte ( later becomes ppc hardware pte )
566 * r2: ptr to linux-style pte
567 * r3: fault address
568 */
569 /* Get PTE (linux-style) and check access */
570 mfspr r3,SPRN_DMISS
571 lis r1, TASK_SIZE@h /* check if kernel address */
572 cmplw 0,r1,r3
573 mfspr r2, SPRN_SDR1
574 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
575 rlwinm r2, r2, 28, 0xfffff000
576 li r0, 3
577 bgt- 112f
578 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
579 li r0, 0
580 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
581112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
582 lwz r2,0(r2) /* get pmd entry */
583 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
584 beq- DataAddressInvalid /* return if no mapping */
585 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
586 lwz r2,0(r2) /* get linux-style pte */
587 andc. r1,r1,r2 /* check access & ~permission */
588 bne- DataAddressInvalid /* return if access not permitted */
589 /* Convert linux-style PTE to low word of PPC-style PTE */
590 rlwimi r2,r0,0,31,31 /* userspace ? -> PP lsb */
591 li r1,0xe06 /* clear out reserved bits & PP msb */
592 andc r1,r2,r1 /* PP = user? 1: 0 */
593BEGIN_FTR_SECTION
594 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
595END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
596 mtspr SPRN_RPA,r1
597 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
598 mtcrf 0x80,r2
599BEGIN_MMU_FTR_SECTION
600 li r0,1
601 mfspr r1,SPRN_SPRG_603_LRU
602 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
603 slw r0,r0,r2
604 xor r1,r0,r1
605 srw r0,r1,r2
606 mtspr SPRN_SPRG_603_LRU,r1
607 mfspr r2,SPRN_SRR1
608 rlwimi r2,r0,31-14,14,14
609 mtspr SPRN_SRR1,r2
610 mtcrf 0x80,r2
611 tlbld r3
612 rfi
613MMU_FTR_SECTION_ELSE
614 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
615 mtcrf 0x80,r2
616 tlbld r3
617 rfi
618ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
619
620#ifndef CONFIG_ALTIVEC
621#define altivec_assist_exception unknown_exception
622#endif
623
624#ifndef CONFIG_TAU_INT
625#define TAUException unknown_async_exception
626#endif
627
628 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
629 EXCEPTION(0x1400, SMI, SMIException)
630 EXCEPTION(0x1500, Trap_15, unknown_exception)
631 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
632 EXCEPTION(0x1700, Trap_17, TAUException)
633 EXCEPTION(0x1800, Trap_18, unknown_exception)
634 EXCEPTION(0x1900, Trap_19, unknown_exception)
635 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
636 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
637 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
638 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
639 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
640 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
641 EXCEPTION(0x2000, RunMode, RunModeException)
642 EXCEPTION(0x2100, Trap_21, unknown_exception)
643 EXCEPTION(0x2200, Trap_22, unknown_exception)
644 EXCEPTION(0x2300, Trap_23, unknown_exception)
645 EXCEPTION(0x2400, Trap_24, unknown_exception)
646 EXCEPTION(0x2500, Trap_25, unknown_exception)
647 EXCEPTION(0x2600, Trap_26, unknown_exception)
648 EXCEPTION(0x2700, Trap_27, unknown_exception)
649 EXCEPTION(0x2800, Trap_28, unknown_exception)
650 EXCEPTION(0x2900, Trap_29, unknown_exception)
651 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
652 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
653 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
654 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
655 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
656 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
657
658 __HEAD
659 . = 0x3000
660
661#ifdef CONFIG_PPC_BOOK3S_604
662.macro save_regs_thread thread
663 stw r0, THR0(\thread)
664 stw r3, THR3(\thread)
665 stw r4, THR4(\thread)
666 stw r5, THR5(\thread)
667 stw r6, THR6(\thread)
668 stw r8, THR8(\thread)
669 stw r9, THR9(\thread)
670 mflr r0
671 stw r0, THLR(\thread)
672 mfctr r0
673 stw r0, THCTR(\thread)
674.endm
675
676.macro restore_regs_thread thread
677 lwz r0, THLR(\thread)
678 mtlr r0
679 lwz r0, THCTR(\thread)
680 mtctr r0
681 lwz r0, THR0(\thread)
682 lwz r3, THR3(\thread)
683 lwz r4, THR4(\thread)
684 lwz r5, THR5(\thread)
685 lwz r6, THR6(\thread)
686 lwz r8, THR8(\thread)
687 lwz r9, THR9(\thread)
688.endm
689
690hash_page_dsi:
691 save_regs_thread r10
692 mfdsisr r3
693 mfdar r4
694 mfsrr0 r5
695 mfsrr1 r9
696 rlwinm r3, r3, 32 - 15, _PAGE_WRITE /* DSISR_STORE -> _PAGE_WRITE */
697 ori r3, r3, _PAGE_PRESENT | _PAGE_READ
698 bl hash_page
699 mfspr r10, SPRN_SPRG_THREAD
700 restore_regs_thread r10
701 b .Lhash_page_dsi_cont
702
703hash_page_isi:
704 mr r11, r10
705 mfspr r10, SPRN_SPRG_THREAD
706 save_regs_thread r10
707 li r3, _PAGE_PRESENT | _PAGE_EXEC
708 lwz r4, SRR0(r10)
709 lwz r9, SRR1(r10)
710 bl hash_page
711 mfspr r10, SPRN_SPRG_THREAD
712 restore_regs_thread r10
713 mr r10, r11
714 b .Lhash_page_isi_cont
715
716 .globl fast_hash_page_return
717fast_hash_page_return:
718 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
719 mfspr r10, SPRN_SPRG_THREAD
720 restore_regs_thread r10
721 bne 1f
722
723 /* DSI */
724 mtcr r11
725 lwz r11, THR11(r10)
726 mfspr r10, SPRN_SPRG_SCRATCH2
727 rfi
728
7291: /* ISI */
730 mtcr r11
731 mfspr r11, SPRN_SPRG_SCRATCH1
732 mfspr r10, SPRN_SPRG_SCRATCH0
733 rfi
734#endif /* CONFIG_PPC_BOOK3S_604 */
735
736#ifdef CONFIG_VMAP_STACK
737 vmap_stack_overflow_exception
738#endif
739
740 __HEAD
741AltiVecUnavailable:
742 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
743#ifdef CONFIG_ALTIVEC
744 beq 1f
745 bl load_up_altivec /* if from user, just load it up */
746 b fast_exception_return
747#endif /* CONFIG_ALTIVEC */
7481: prepare_transfer_to_handler
749 bl altivec_unavailable_exception
750 b interrupt_return
751
752 __HEAD
753PerformanceMonitor:
754 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
755 prepare_transfer_to_handler
756 bl performance_monitor_exception
757 b interrupt_return
758
759
760 __HEAD
761/*
762 * This code is jumped to from the startup code to copy
763 * the kernel image to physical address PHYSICAL_START.
764 */
765relocate_kernel:
766 lis r3,PHYSICAL_START@h /* Destination base address */
767 li r6,0 /* Destination offset */
768 li r5,0x4000 /* # bytes of memory to copy */
769 bl copy_and_flush /* copy the first 0x4000 bytes */
770 addi r0,r3,4f@l /* jump to the address of 4f */
771 mtctr r0 /* in copy and do the rest. */
772 bctr /* jump to the copy */
7734: lis r5,_end-KERNELBASE@h
774 ori r5,r5,_end-KERNELBASE@l
775 bl copy_and_flush /* copy the rest */
776 b turn_on_mmu
777
778/*
779 * Copy routine used to copy the kernel to start at physical address 0
780 * and flush and invalidate the caches as needed.
781 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
782 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
783 */
784_GLOBAL(copy_and_flush)
785 addi r5,r5,-4
786 addi r6,r6,-4
7874: li r0,L1_CACHE_BYTES/4
788 mtctr r0
7893: addi r6,r6,4 /* copy a cache line */
790 lwzx r0,r6,r4
791 stwx r0,r6,r3
792 bdnz 3b
793 dcbst r6,r3 /* write it to memory */
794 sync
795 icbi r6,r3 /* flush the icache line */
796 cmplw 0,r6,r5
797 blt 4b
798 sync /* additional sync needed on g4 */
799 isync
800 addi r5,r5,4
801 addi r6,r6,4
802 blr
803
804#ifdef CONFIG_SMP
805 .globl __secondary_start_mpc86xx
806__secondary_start_mpc86xx:
807 mfspr r3, SPRN_PIR
808 stw r3, __secondary_hold_acknowledge@l(0)
809 mr r24, r3 /* cpu # */
810 b __secondary_start
811
812 .globl __secondary_start_pmac_0
813__secondary_start_pmac_0:
814 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
815 li r24,0
816 b 1f
817 li r24,1
818 b 1f
819 li r24,2
820 b 1f
821 li r24,3
8221:
823 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
824 set to map the 0xf0000000 - 0xffffffff region */
825 mfmsr r0
826 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
827 mtmsr r0
828 isync
829
830 .globl __secondary_start
831__secondary_start:
832 /* Copy some CPU settings from CPU 0 */
833 bl __restore_cpu_setup
834
835 lis r3,-KERNELBASE@h
836 mr r4,r24
837 bl call_setup_cpu /* Call setup_cpu for this CPU */
838 lis r3,-KERNELBASE@h
839 bl init_idle_6xx
840
841 /* get current's stack and current */
842 lis r2,secondary_current@ha
843 tophys(r2,r2)
844 lwz r2,secondary_current@l(r2)
845 tophys(r1,r2)
846 lwz r1,TASK_STACK(r1)
847
848 /* stack */
849 addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
850 li r0,0
851 tophys(r3,r1)
852 stw r0,0(r3)
853
854 /* load up the MMU */
855 bl load_segment_registers
856 bl load_up_mmu
857
858 /* ptr to phys current thread */
859 tophys(r4,r2)
860 addi r4,r4,THREAD /* phys address of our thread_struct */
861 mtspr SPRN_SPRG_THREAD,r4
862BEGIN_MMU_FTR_SECTION
863 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
864 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
865 rlwinm r4, r4, 4, 0xffff01ff
866 mtspr SPRN_SDR1, r4
867END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
868
869 /* enable MMU and jump to start_secondary */
870 li r4,MSR_KERNEL
871 lis r3,start_secondary@h
872 ori r3,r3,start_secondary@l
873 mtspr SPRN_SRR0,r3
874 mtspr SPRN_SRR1,r4
875 rfi
876#endif /* CONFIG_SMP */
877
878#ifdef CONFIG_KVM_BOOK3S_HANDLER
879#include "../kvm/book3s_rmhandlers.S"
880#endif
881
882/*
883 * Load stuff into the MMU. Intended to be called with
884 * IR=0 and DR=0.
885 */
886SYM_FUNC_START_LOCAL(early_hash_table)
887 sync /* Force all PTE updates to finish */
888 isync
889 tlbia /* Clear all TLB entries */
890 sync /* wait for tlbia/tlbie to finish */
891 TLBSYNC /* ... on all CPUs */
892 /* Load the SDR1 register (hash table base & size) */
893 lis r6, early_hash - PAGE_OFFSET@h
894 ori r6, r6, 3 /* 256kB table */
895 mtspr SPRN_SDR1, r6
896 blr
897SYM_FUNC_END(early_hash_table)
898
899SYM_FUNC_START_LOCAL(load_up_mmu)
900 sync /* Force all PTE updates to finish */
901 isync
902 tlbia /* Clear all TLB entries */
903 sync /* wait for tlbia/tlbie to finish */
904 TLBSYNC /* ... on all CPUs */
905BEGIN_MMU_FTR_SECTION
906 /* Load the SDR1 register (hash table base & size) */
907 lis r6,_SDR1@ha
908 tophys(r6,r6)
909 lwz r6,_SDR1@l(r6)
910 mtspr SPRN_SDR1,r6
911END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
912
913/* Load the BAT registers with the values set up by MMU_init. */
914 lis r3,BATS@ha
915 addi r3,r3,BATS@l
916 tophys(r3,r3)
917 LOAD_BAT(0,r3,r4,r5)
918 LOAD_BAT(1,r3,r4,r5)
919 LOAD_BAT(2,r3,r4,r5)
920 LOAD_BAT(3,r3,r4,r5)
921BEGIN_MMU_FTR_SECTION
922 LOAD_BAT(4,r3,r4,r5)
923 LOAD_BAT(5,r3,r4,r5)
924 LOAD_BAT(6,r3,r4,r5)
925 LOAD_BAT(7,r3,r4,r5)
926END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
927 blr
928SYM_FUNC_END(load_up_mmu)
929
930_GLOBAL(load_segment_registers)
931 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
932 mtctr r0 /* for context 0 */
933#ifdef CONFIG_PPC_KUEP
934 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
935#else
936 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
937#endif
938 li r4, 0
9393: mtsrin r3, r4
940 addi r3, r3, 0x111 /* increment VSID */
941 addis r4, r4, 0x1000 /* address of next segment */
942 bdnz 3b
943 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
944 mtctr r0 /* for context 0 */
945 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
946 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
947 oris r3, r3, SR_KP@h /* Kp = 1 */
9483: mtsrin r3, r4
949 addi r3, r3, 0x111 /* increment VSID */
950 addis r4, r4, 0x1000 /* address of next segment */
951 bdnz 3b
952 blr
953
954/*
955 * This is where the main kernel code starts.
956 */
957start_here:
958 /* ptr to current */
959 lis r2,init_task@h
960 ori r2,r2,init_task@l
961 /* Set up for using our exception vectors */
962 /* ptr to phys current thread */
963 tophys(r4,r2)
964 addi r4,r4,THREAD /* init task's THREAD */
965 mtspr SPRN_SPRG_THREAD,r4
966BEGIN_MMU_FTR_SECTION
967 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
968 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
969 rlwinm r4, r4, 4, 0xffff01ff
970 mtspr SPRN_SDR1, r4
971END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
972
973 /* stack */
974 lis r1,init_thread_union@ha
975 addi r1,r1,init_thread_union@l
976 li r0,0
977 stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
978/*
979 * Do early platform-specific initialization,
980 * and set up the MMU.
981 */
982#ifdef CONFIG_KASAN
983 bl kasan_early_init
984#endif
985 li r3,0
986 mr r4,r31
987 bl machine_init
988 bl __save_cpu_setup
989 bl MMU_init
990 bl MMU_init_hw_patch
991
992/*
993 * Go back to running unmapped so we can load up new values
994 * for SDR1 (hash table pointer) and the segment registers
995 * and change to using our exception vectors.
996 */
997 lis r4,2f@h
998 ori r4,r4,2f@l
999 tophys(r4,r4)
1000 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1001
1002 .align 4
1003 mtspr SPRN_SRR0,r4
1004 mtspr SPRN_SRR1,r3
1005 rfi
1006/* Load up the kernel context */
10072: bl load_up_mmu
1008
1009#ifdef CONFIG_BDI_SWITCH
1010 /* Add helper information for the Abatron bdiGDB debugger.
1011 * We do this here because we know the mmu is disabled, and
1012 * will be enabled for real in just a few instructions.
1013 */
1014 lis r5, abatron_pteptrs@h
1015 ori r5, r5, abatron_pteptrs@l
1016 stw r5, 0xf0(0) /* This much match your Abatron config */
1017 lis r6, swapper_pg_dir@h
1018 ori r6, r6, swapper_pg_dir@l
1019 tophys(r5, r5)
1020 stw r6, 0(r5)
1021#endif /* CONFIG_BDI_SWITCH */
1022
1023/* Now turn on the MMU for real! */
1024 li r4,MSR_KERNEL
1025 lis r3,start_kernel@h
1026 ori r3,r3,start_kernel@l
1027 mtspr SPRN_SRR0,r3
1028 mtspr SPRN_SRR1,r4
1029 rfi
1030
1031/*
1032 * An undocumented "feature" of 604e requires that the v bit
1033 * be cleared before changing BAT values.
1034 *
1035 * Also, newer IBM firmware does not clear bat3 and 4 so
1036 * this makes sure it's done.
1037 * -- Cort
1038 */
1039SYM_FUNC_START_LOCAL(clear_bats)
1040 li r10,0
1041
1042 mtspr SPRN_DBAT0U,r10
1043 mtspr SPRN_DBAT0L,r10
1044 mtspr SPRN_DBAT1U,r10
1045 mtspr SPRN_DBAT1L,r10
1046 mtspr SPRN_DBAT2U,r10
1047 mtspr SPRN_DBAT2L,r10
1048 mtspr SPRN_DBAT3U,r10
1049 mtspr SPRN_DBAT3L,r10
1050 mtspr SPRN_IBAT0U,r10
1051 mtspr SPRN_IBAT0L,r10
1052 mtspr SPRN_IBAT1U,r10
1053 mtspr SPRN_IBAT1L,r10
1054 mtspr SPRN_IBAT2U,r10
1055 mtspr SPRN_IBAT2L,r10
1056 mtspr SPRN_IBAT3U,r10
1057 mtspr SPRN_IBAT3L,r10
1058BEGIN_MMU_FTR_SECTION
1059 /* Here's a tweak: at this point, CPU setup have
1060 * not been called yet, so HIGH_BAT_EN may not be
1061 * set in HID0 for the 745x processors. However, it
1062 * seems that doesn't affect our ability to actually
1063 * write to these SPRs.
1064 */
1065 mtspr SPRN_DBAT4U,r10
1066 mtspr SPRN_DBAT4L,r10
1067 mtspr SPRN_DBAT5U,r10
1068 mtspr SPRN_DBAT5L,r10
1069 mtspr SPRN_DBAT6U,r10
1070 mtspr SPRN_DBAT6L,r10
1071 mtspr SPRN_DBAT7U,r10
1072 mtspr SPRN_DBAT7L,r10
1073 mtspr SPRN_IBAT4U,r10
1074 mtspr SPRN_IBAT4L,r10
1075 mtspr SPRN_IBAT5U,r10
1076 mtspr SPRN_IBAT5L,r10
1077 mtspr SPRN_IBAT6U,r10
1078 mtspr SPRN_IBAT6L,r10
1079 mtspr SPRN_IBAT7U,r10
1080 mtspr SPRN_IBAT7L,r10
1081END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1082 blr
1083SYM_FUNC_END(clear_bats)
1084
1085_GLOBAL(update_bats)
1086 lis r4, 1f@h
1087 ori r4, r4, 1f@l
1088 tophys(r4, r4)
1089 mfmsr r6
1090 mflr r7
1091 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1092 rlwinm r0, r6, 0, ~MSR_RI
1093 rlwinm r0, r0, 0, ~MSR_EE
1094 mtmsr r0
1095
1096 .align 4
1097 mtspr SPRN_SRR0, r4
1098 mtspr SPRN_SRR1, r3
1099 rfi
11001: bl clear_bats
1101 lis r3, BATS@ha
1102 addi r3, r3, BATS@l
1103 tophys(r3, r3)
1104 LOAD_BAT(0, r3, r4, r5)
1105 LOAD_BAT(1, r3, r4, r5)
1106 LOAD_BAT(2, r3, r4, r5)
1107 LOAD_BAT(3, r3, r4, r5)
1108BEGIN_MMU_FTR_SECTION
1109 LOAD_BAT(4, r3, r4, r5)
1110 LOAD_BAT(5, r3, r4, r5)
1111 LOAD_BAT(6, r3, r4, r5)
1112 LOAD_BAT(7, r3, r4, r5)
1113END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1114 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1115 mtmsr r3
1116 mtspr SPRN_SRR0, r7
1117 mtspr SPRN_SRR1, r6
1118 rfi
1119
1120SYM_FUNC_START_LOCAL(flush_tlbs)
1121 lis r10, 0x40
11221: addic. r10, r10, -0x1000
1123 tlbie r10
1124 bgt 1b
1125 sync
1126 blr
1127SYM_FUNC_END(flush_tlbs)
1128
1129SYM_FUNC_START_LOCAL(mmu_off)
1130 addi r4, r3, __after_mmu_off - _start
1131 mfmsr r3
1132 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1133 beqlr
1134 andc r3,r3,r0
1135
1136 .align 4
1137 mtspr SPRN_SRR0,r4
1138 mtspr SPRN_SRR1,r3
1139 sync
1140 rfi
1141SYM_FUNC_END(mmu_off)
1142
1143/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1144SYM_FUNC_START_LOCAL(initial_bats)
1145 lis r11,PAGE_OFFSET@h
1146 tophys(r8,r11)
1147#ifdef CONFIG_SMP
1148 ori r8,r8,0x12 /* R/W access, M=1 */
1149#else
1150 ori r8,r8,2 /* R/W access */
1151#endif /* CONFIG_SMP */
1152 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1153
1154 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1155 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1156 mtspr SPRN_IBAT0L,r8
1157 mtspr SPRN_IBAT0U,r11
1158 isync
1159 blr
1160SYM_FUNC_END(initial_bats)
1161
1162#ifdef CONFIG_BOOTX_TEXT
1163SYM_FUNC_START_LOCAL(setup_disp_bat)
1164 /*
1165 * setup the display bat prepared for us in prom.c
1166 */
1167 mflr r8
1168 bl reloc_offset
1169 mtlr r8
1170 addis r8,r3,disp_BAT@ha
1171 addi r8,r8,disp_BAT@l
1172 cmpwi cr0,r8,0
1173 beqlr
1174 lwz r11,0(r8)
1175 lwz r8,4(r8)
1176 mtspr SPRN_DBAT3L,r8
1177 mtspr SPRN_DBAT3U,r11
1178 blr
1179SYM_FUNC_END(setup_disp_bat)
1180#endif /* CONFIG_BOOTX_TEXT */
1181
1182#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1183SYM_FUNC_START_LOCAL(setup_cpm_bat)
1184 lis r8, 0xf000
1185 ori r8, r8, 0x002a
1186 mtspr SPRN_DBAT1L, r8
1187
1188 lis r11, 0xf000
1189 ori r11, r11, (BL_1M << 2) | 2
1190 mtspr SPRN_DBAT1U, r11
1191
1192 blr
1193SYM_FUNC_END(setup_cpm_bat)
1194#endif
1195
1196#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1197SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
1198 /* prepare a BAT for early io */
1199#if defined(CONFIG_GAMECUBE)
1200 lis r8, 0x0c00
1201#elif defined(CONFIG_WII)
1202 lis r8, 0x0d00
1203#else
1204#error Invalid platform for USB Gecko based early debugging.
1205#endif
1206 /*
1207 * The virtual address used must match the virtual address
1208 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1209 */
1210 lis r11, 0xfffe /* top 128K */
1211 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1212 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1213 mtspr SPRN_DBAT1L, r8
1214 mtspr SPRN_DBAT1U, r11
1215 blr
1216SYM_FUNC_END(setup_usbgecko_bat)
1217#endif
1218
1219 .data
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <linux/linkage.h>
22
23#include <asm/reg.h>
24#include <asm/page.h>
25#include <asm/mmu.h>
26#include <asm/cputable.h>
27#include <asm/cache.h>
28#include <asm/thread_info.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/ptrace.h>
32#include <asm/bug.h>
33#include <asm/kvm_book3s_asm.h>
34#include <asm/feature-fixups.h>
35#include <asm/interrupt.h>
36
37#include "head_32.h"
38
39#define LOAD_BAT(n, reg, RA, RB) \
40 /* see the comment for clear_bats() -- Cort */ \
41 li RA,0; \
42 mtspr SPRN_IBAT##n##U,RA; \
43 mtspr SPRN_DBAT##n##U,RA; \
44 lwz RA,(n*16)+0(reg); \
45 lwz RB,(n*16)+4(reg); \
46 mtspr SPRN_IBAT##n##U,RA; \
47 mtspr SPRN_IBAT##n##L,RB; \
48 lwz RA,(n*16)+8(reg); \
49 lwz RB,(n*16)+12(reg); \
50 mtspr SPRN_DBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##L,RB
52
53 __HEAD
54_GLOBAL(_stext);
55
56/*
57 * _start is defined this way because the XCOFF loader in the OpenFirmware
58 * on the powermac expects the entry point to be a procedure descriptor.
59 */
60_GLOBAL(_start);
61 /*
62 * These are here for legacy reasons, the kernel used to
63 * need to look like a coff function entry for the pmac
64 * but we're always started by some kind of bootloader now.
65 * -- Cort
66 */
67 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
68 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
69 nop
70
71/* PMAC
72 * Enter here with the kernel text, data and bss loaded starting at
73 * 0, running with virtual == physical mapping.
74 * r5 points to the prom entry point (the client interface handler
75 * address). Address translation is turned on, with the prom
76 * managing the hash table. Interrupts are disabled. The stack
77 * pointer (r1) points to just below the end of the half-meg region
78 * from 0x380000 - 0x400000, which is mapped in already.
79 *
80 * If we are booted from MacOS via BootX, we enter with the kernel
81 * image loaded somewhere, and the following values in registers:
82 * r3: 'BooX' (0x426f6f58)
83 * r4: virtual address of boot_infos_t
84 * r5: 0
85 *
86 * PREP
87 * This is jumped to on prep systems right after the kernel is relocated
88 * to its proper place in memory by the boot loader. The expected layout
89 * of the regs is:
90 * r3: ptr to residual data
91 * r4: initrd_start or if no initrd then 0
92 * r5: initrd_end - unused if r4 is 0
93 * r6: Start of command line string
94 * r7: End of command line string
95 *
96 * This just gets a minimal mmu environment setup so we can call
97 * start_here() to do the real work.
98 * -- Cort
99 */
100
101 .globl __start
102__start:
103/*
104 * We have to do any OF calls before we map ourselves to KERNELBASE,
105 * because OF may have I/O devices mapped into that area
106 * (particularly on CHRP).
107 */
108 cmpwi 0,r5,0
109 beq 1f
110
111#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
112 /* find out where we are now */
113 bcl 20,31,$+4
1140: mflr r8 /* r8 = runtime addr here */
115 addis r8,r8,(_stext - 0b)@ha
116 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
117 bl prom_init
118#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
119
120 /* We never return. We also hit that trap if trying to boot
121 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
122 trap
123
124/*
125 * Check for BootX signature when supporting PowerMac and branch to
126 * appropriate trampoline if it's present
127 */
128#ifdef CONFIG_PPC_PMAC
1291: lis r31,0x426f
130 ori r31,r31,0x6f58
131 cmpw 0,r3,r31
132 bne 1f
133 bl bootx_init
134 trap
135#endif /* CONFIG_PPC_PMAC */
136
1371: mr r31,r3 /* save device tree ptr */
138 li r24,0 /* cpu # */
139
140/*
141 * early_init() does the early machine identification and does
142 * the necessary low-level setup and clears the BSS
143 * -- Cort <cort@fsmlabs.com>
144 */
145 bl early_init
146
147/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
148 * the physical address we are running at, returned by early_init()
149 */
150 bl mmu_off
151__after_mmu_off:
152 bl clear_bats
153 bl flush_tlbs
154
155 bl initial_bats
156 bl load_segment_registers
157 bl reloc_offset
158 bl early_hash_table
159#if defined(CONFIG_BOOTX_TEXT)
160 bl setup_disp_bat
161#endif
162#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
163 bl setup_cpm_bat
164#endif
165#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
166 bl setup_usbgecko_bat
167#endif
168
169/*
170 * Call setup_cpu for CPU 0 and initialize 6xx Idle
171 */
172 bl reloc_offset
173 li r24,0 /* cpu# */
174 bl call_setup_cpu /* Call setup_cpu for this CPU */
175 bl reloc_offset
176 bl init_idle_6xx
177
178
179/*
180 * We need to run with _start at physical address 0.
181 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
182 * the exception vectors at 0 (and therefore this copy
183 * overwrites OF's exception vectors with our own).
184 * The MMU is off at this point.
185 */
186 bl reloc_offset
187 mr r26,r3
188 addis r4,r3,KERNELBASE@h /* current address of _start */
189 lis r5,PHYSICAL_START@h
190 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
191 bne relocate_kernel
192/*
193 * we now have the 1st 16M of ram mapped with the bats.
194 * prep needs the mmu to be turned on here, but pmac already has it on.
195 * this shouldn't bother the pmac since it just gets turned on again
196 * as we jump to our code at KERNELBASE. -- Cort
197 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
198 * off, and in other cases, we now turn it off before changing BATs above.
199 */
200turn_on_mmu:
201 mfmsr r0
202 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
203 mtspr SPRN_SRR1,r0
204 lis r0,start_here@h
205 ori r0,r0,start_here@l
206 mtspr SPRN_SRR0,r0
207 rfi /* enables MMU */
208
209/*
210 * We need __secondary_hold as a place to hold the other cpus on
211 * an SMP machine, even when we are running a UP kernel.
212 */
213 . = 0xc0 /* for prep bootloader */
214 li r3,1 /* MTX only has 1 cpu */
215 .globl __secondary_hold
216__secondary_hold:
217 /* tell the master we're here */
218 stw r3,__secondary_hold_acknowledge@l(0)
219#ifdef CONFIG_SMP
220100: lwz r4,0(0)
221 /* wait until we're told to start */
222 cmpw 0,r4,r3
223 bne 100b
224 /* our cpu # was at addr 0 - go */
225 mr r24,r3 /* cpu # */
226 b __secondary_start
227#else
228 b .
229#endif /* CONFIG_SMP */
230
231 .globl __secondary_hold_spinloop
232__secondary_hold_spinloop:
233 .long 0
234 .globl __secondary_hold_acknowledge
235__secondary_hold_acknowledge:
236 .long -1
237
238/* System reset */
239/* core99 pmac starts the seconary here by changing the vector, and
240 putting it back to what it was (unknown_async_exception) when done. */
241 EXCEPTION(INTERRUPT_SYSTEM_RESET, Reset, unknown_async_exception)
242
243/* Machine check */
244/*
245 * On CHRP, this is complicated by the fact that we could get a
246 * machine check inside RTAS, and we have no guarantee that certain
247 * critical registers will have the values we expect. The set of
248 * registers that might have bad values includes all the GPRs
249 * and all the BATs. We indicate that we are in RTAS by putting
250 * a non-zero value, the address of the exception frame to use,
251 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
252 * and uses its value if it is non-zero.
253 * (Other exception handlers assume that r1 is a valid kernel stack
254 * pointer when we take an exception from supervisor mode.)
255 * -- paulus.
256 */
257 START_EXCEPTION(INTERRUPT_MACHINE_CHECK, MachineCheck)
258 EXCEPTION_PROLOG_0
259#ifdef CONFIG_PPC_CHRP
260 mtspr SPRN_SPRG_SCRATCH2,r1
261 mfspr r1, SPRN_SPRG_THREAD
262 lwz r1, RTAS_SP(r1)
263 cmpwi cr1, r1, 0
264 bne cr1, 7f
265 mfspr r1, SPRN_SPRG_SCRATCH2
266#endif /* CONFIG_PPC_CHRP */
267 EXCEPTION_PROLOG_1
2687: EXCEPTION_PROLOG_2 0x200 MachineCheck
269#ifdef CONFIG_PPC_CHRP
270 beq cr1, 1f
271 twi 31, 0, 0
272#endif
2731: prepare_transfer_to_handler
274 bl machine_check_exception
275 b interrupt_return
276
277/* Data access exception. */
278 START_EXCEPTION(INTERRUPT_DATA_STORAGE, DataAccess)
279#ifdef CONFIG_PPC_BOOK3S_604
280BEGIN_MMU_FTR_SECTION
281 mtspr SPRN_SPRG_SCRATCH2,r10
282 mfspr r10, SPRN_SPRG_THREAD
283 stw r11, THR11(r10)
284 mfspr r10, SPRN_DSISR
285 mfcr r11
286 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
287 mfspr r10, SPRN_SPRG_THREAD
288 beq hash_page_dsi
289.Lhash_page_dsi_cont:
290 mtcr r11
291 lwz r11, THR11(r10)
292 mfspr r10, SPRN_SPRG_SCRATCH2
293MMU_FTR_SECTION_ELSE
294 b 1f
295ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
296#endif
2971: EXCEPTION_PROLOG_0 handle_dar_dsisr=1
298 EXCEPTION_PROLOG_1
299 EXCEPTION_PROLOG_2 INTERRUPT_DATA_STORAGE DataAccess handle_dar_dsisr=1
300 prepare_transfer_to_handler
301 lwz r5, _DSISR(r1)
302 andis. r0, r5, DSISR_DABRMATCH@h
303 bne- 1f
304 bl do_page_fault
305 b interrupt_return
3061: bl do_break
307 REST_NVGPRS(r1)
308 b interrupt_return
309
310
311/* Instruction access exception. */
312 START_EXCEPTION(INTERRUPT_INST_STORAGE, InstructionAccess)
313 mtspr SPRN_SPRG_SCRATCH0,r10
314 mtspr SPRN_SPRG_SCRATCH1,r11
315 mfspr r10, SPRN_SPRG_THREAD
316 mfspr r11, SPRN_SRR0
317 stw r11, SRR0(r10)
318 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
319 stw r11, SRR1(r10)
320 mfcr r10
321#ifdef CONFIG_PPC_BOOK3S_604
322BEGIN_MMU_FTR_SECTION
323 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
324 bne hash_page_isi
325.Lhash_page_isi_cont:
326 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
327END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
328#endif
329 andi. r11, r11, MSR_PR
330
331 EXCEPTION_PROLOG_1
332 EXCEPTION_PROLOG_2 INTERRUPT_INST_STORAGE InstructionAccess
333 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
334 stw r5, _DSISR(r11)
335 stw r12, _DAR(r11)
336 prepare_transfer_to_handler
337 bl do_page_fault
338 b interrupt_return
339
340/* External interrupt */
341 EXCEPTION(INTERRUPT_EXTERNAL, HardwareInterrupt, do_IRQ)
342
343/* Alignment exception */
344 START_EXCEPTION(INTERRUPT_ALIGNMENT, Alignment)
345 EXCEPTION_PROLOG INTERRUPT_ALIGNMENT Alignment handle_dar_dsisr=1
346 prepare_transfer_to_handler
347 bl alignment_exception
348 REST_NVGPRS(r1)
349 b interrupt_return
350
351/* Program check exception */
352 START_EXCEPTION(INTERRUPT_PROGRAM, ProgramCheck)
353 EXCEPTION_PROLOG INTERRUPT_PROGRAM ProgramCheck
354 prepare_transfer_to_handler
355 bl program_check_exception
356 REST_NVGPRS(r1)
357 b interrupt_return
358
359/* Floating-point unavailable */
360 START_EXCEPTION(0x800, FPUnavailable)
361#ifdef CONFIG_PPC_FPU
362BEGIN_FTR_SECTION
363/*
364 * Certain Freescale cores don't have a FPU and treat fp instructions
365 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
366 */
367 b ProgramCheck
368END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
369 EXCEPTION_PROLOG INTERRUPT_FP_UNAVAIL FPUnavailable
370 beq 1f
371 bl load_up_fpu /* if from user, just load it up */
372 b fast_exception_return
3731: prepare_transfer_to_handler
374 bl kernel_fp_unavailable_exception
375 b interrupt_return
376#else
377 b ProgramCheck
378#endif
379
380/* Decrementer */
381 EXCEPTION(INTERRUPT_DECREMENTER, Decrementer, timer_interrupt)
382
383 EXCEPTION(0xa00, Trap_0a, unknown_exception)
384 EXCEPTION(0xb00, Trap_0b, unknown_exception)
385
386/* System call */
387 START_EXCEPTION(INTERRUPT_SYSCALL, SystemCall)
388 SYSCALL_ENTRY INTERRUPT_SYSCALL
389
390 EXCEPTION(INTERRUPT_TRACE, SingleStep, single_step_exception)
391 EXCEPTION(0xe00, Trap_0e, unknown_exception)
392
393/*
394 * The Altivec unavailable trap is at 0x0f20. Foo.
395 * We effectively remap it to 0x3000.
396 * We include an altivec unavailable exception vector even if
397 * not configured for Altivec, so that you can't panic a
398 * non-altivec kernel running on a machine with altivec just
399 * by executing an altivec instruction.
400 */
401 START_EXCEPTION(INTERRUPT_PERFMON, PerformanceMonitorTrap)
402 b PerformanceMonitor
403
404 START_EXCEPTION(INTERRUPT_ALTIVEC_UNAVAIL, AltiVecUnavailableTrap)
405 b AltiVecUnavailable
406
407 __HEAD
408/*
409 * Handle TLB miss for instruction on 603/603e.
410 * Note: we get an alternate set of r0 - r3 to use automatically.
411 */
412 . = INTERRUPT_INST_TLB_MISS_603
413InstructionTLBMiss:
414 /* Get PTE (linux-style) and check access */
415 mfspr r0,SPRN_IMISS
416 mfspr r2, SPRN_SDR1
417 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
418 rlwinm r2, r2, 28, 0xfffff000
419 rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
420 lwz r2,0(r2) /* get pmd entry */
421#ifdef CONFIG_EXECMEM
422 rlwinm r3, r0, 4, 0xf
423 subi r3, r3, (TASK_SIZE >> 28) & 0xf
424#endif
425 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
426 beq- InstructionAddressInvalid /* return if no mapping */
427 rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
428 lwz r2,0(r2) /* get linux-style pte */
429 andc. r1,r1,r2 /* check access & ~permission */
430 bne- InstructionAddressInvalid /* return if access not permitted */
431 /* Convert linux-style PTE to low word of PPC-style PTE */
432#ifdef CONFIG_EXECMEM
433 rlwimi r2, r3, 1, 31, 31 /* userspace ? -> PP lsb */
434#endif
435 ori r1, r1, 0xe06 /* clear out reserved bits */
436 andc r1, r2, r1 /* PP = user? 1 : 0 */
437BEGIN_FTR_SECTION
438 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
439END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
440 mtspr SPRN_RPA,r1
441 tlbli r0
442 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
443 mtcrf 0x80,r3
444 rfi
445InstructionAddressInvalid:
446 mfspr r3,SPRN_SRR1
447 rlwinm r1,r3,9,6,6 /* Get load/store bit */
448
449 addis r1,r1,0x2000
450 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
451 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
452 or r2,r2,r1
453 mtspr SPRN_SRR1,r2
454 mfspr r1,SPRN_IMISS /* Get failing address */
455 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
456 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
457 xor r1,r1,r2
458 mtspr SPRN_DAR,r1 /* Set fault address */
459 mfmsr r0 /* Restore "normal" registers */
460 xoris r0,r0,MSR_TGPR>>16
461 mtcrf 0x80,r3 /* Restore CR0 */
462 mtmsr r0
463 b InstructionAccess
464
465/*
466 * Handle TLB miss for DATA Load operation on 603/603e
467 */
468 . = INTERRUPT_DATA_LOAD_TLB_MISS_603
469DataLoadTLBMiss:
470 /* Get PTE (linux-style) and check access */
471 mfspr r0,SPRN_DMISS
472 mfspr r2, SPRN_SDR1
473 rlwinm r1, r2, 28, 0xfffff000
474 rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
475 lwz r2,0(r1) /* get pmd entry */
476 rlwinm r3, r0, 4, 0xf
477 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
478 subi r3, r3, (TASK_SIZE >> 28) & 0xf
479 beq- 2f /* bail if no mapping */
4801: rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
481 lwz r2,0(r2) /* get linux-style pte */
482 li r1, _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_READ
483 andc. r1,r1,r2 /* check access & ~permission */
484 bne- DataAddressInvalid /* return if access not permitted */
485 /* Convert linux-style PTE to low word of PPC-style PTE */
486 rlwinm r1,r2,32-9,30,30 /* _PAGE_WRITE -> PP msb */
487 rlwimi r2,r3,2,30,31 /* userspace ? -> PP */
488 rlwimi r1,r2,32-3,24,24 /* _PAGE_WRITE -> _PAGE_DIRTY */
489 xori r1,r1,_PAGE_DIRTY /* clear dirty when not rw */
490 ori r1,r1,0xe04 /* clear out reserved bits */
491 andc r1,r2,r1 /* PP = user? rw? 1: 3: 0 */
492BEGIN_FTR_SECTION
493 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
494END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
495 mtspr SPRN_RPA,r1
496BEGIN_MMU_FTR_SECTION
497 li r3,1
498 mfspr r1,SPRN_SPRG_603_LRU
499 rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
500 slw r3,r3,r2
501 xor r1,r3,r1
502 srw r3,r1,r2
503 mtspr SPRN_SPRG_603_LRU,r1
504 mfspr r2,SPRN_SRR1
505 rlwimi r2,r3,31-14,14,14
506 mtspr SPRN_SRR1,r2
507 mtcrf 0x80,r2
508 tlbld r0
509 rfi
510MMU_FTR_SECTION_ELSE
511 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
512 mtcrf 0x80,r2
513 tlbld r0
514 rfi
515ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
516
5172: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
518 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
519 rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
520 lwz r2,0(r2) /* get pmd entry */
521 cmpwi cr0,r2,0
522 beq- DataAddressInvalid /* return if no mapping */
523 stw r2,0(r1)
524 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
525 b 1b
526DataAddressInvalid:
527 mfspr r3,SPRN_SRR1
528 rlwinm r1,r3,9,6,6 /* Get load/store bit */
529 addis r1,r1,0x2000
530 mtspr SPRN_DSISR,r1
531 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
532 mtspr SPRN_SRR1,r2
533 mfspr r1,SPRN_DMISS /* Get failing address */
534 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
535 beq 20f /* Jump if big endian */
536 xori r1,r1,3
53720: mtspr SPRN_DAR,r1 /* Set fault address */
538 mfmsr r0 /* Restore "normal" registers */
539 xoris r0,r0,MSR_TGPR>>16
540 mtcrf 0x80,r3 /* Restore CR0 */
541 mtmsr r0
542 b DataAccess
543
544/*
545 * Handle TLB miss for DATA Store on 603/603e
546 */
547 . = INTERRUPT_DATA_STORE_TLB_MISS_603
548DataStoreTLBMiss:
549 /* Get PTE (linux-style) and check access */
550 mfspr r0,SPRN_DMISS
551 mfspr r2, SPRN_SDR1
552 rlwinm r1, r2, 28, 0xfffff000
553 rlwimi r1,r0,12,20,29 /* insert top 10 bits of address */
554 lwz r2,0(r1) /* get pmd entry */
555 rlwinm r3, r0, 4, 0xf
556 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
557 subi r3, r3, (TASK_SIZE >> 28) & 0xf
558 beq- 2f /* bail if no mapping */
5591:
560 rlwimi r2,r0,22,20,29 /* insert next 10 bits of address */
561 lwz r2,0(r2) /* get linux-style pte */
562 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
563 andc. r1,r1,r2 /* check access & ~permission */
564 bne- DataAddressInvalid /* return if access not permitted */
565 /* Convert linux-style PTE to low word of PPC-style PTE */
566 rlwimi r2,r3,1,31,31 /* userspace ? -> PP lsb */
567 li r1,0xe06 /* clear out reserved bits & PP msb */
568 andc r1,r2,r1 /* PP = user? 1: 0 */
569BEGIN_FTR_SECTION
570 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
571END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
572 mtspr SPRN_RPA,r1
573 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
574 mtcrf 0x80,r2
575BEGIN_MMU_FTR_SECTION
576 li r3,1
577 mfspr r1,SPRN_SPRG_603_LRU
578 rlwinm r2,r0,20,27,31 /* Get Address bits 15:19 */
579 slw r3,r3,r2
580 xor r1,r3,r1
581 srw r3,r1,r2
582 mtspr SPRN_SPRG_603_LRU,r1
583 mfspr r2,SPRN_SRR1
584 rlwimi r2,r3,31-14,14,14
585 mtspr SPRN_SRR1,r2
586 mtcrf 0x80,r2
587 tlbld r0
588 rfi
589MMU_FTR_SECTION_ELSE
590 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
591 mtcrf 0x80,r2
592 tlbld r0
593 rfi
594ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
595
5962: lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha
597 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
598 rlwimi r2,r0,12,20,29 /* insert top 10 bits of address */
599 lwz r2,0(r2) /* get pmd entry */
600 cmpwi cr0,r2,0
601 beq- DataAddressInvalid /* return if no mapping */
602 stw r2,0(r1)
603 rlwinm r2,r2,0,0,19 /* extract address of pte page */
604 b 1b
605
606#ifndef CONFIG_ALTIVEC
607#define altivec_assist_exception unknown_exception
608#endif
609
610#ifndef CONFIG_TAU_INT
611#define TAUException unknown_async_exception
612#endif
613
614 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception)
615 EXCEPTION(0x1400, SMI, SMIException)
616 EXCEPTION(0x1500, Trap_15, unknown_exception)
617 EXCEPTION(0x1600, Trap_16, altivec_assist_exception)
618 EXCEPTION(0x1700, Trap_17, TAUException)
619 EXCEPTION(0x1800, Trap_18, unknown_exception)
620 EXCEPTION(0x1900, Trap_19, unknown_exception)
621 EXCEPTION(0x1a00, Trap_1a, unknown_exception)
622 EXCEPTION(0x1b00, Trap_1b, unknown_exception)
623 EXCEPTION(0x1c00, Trap_1c, unknown_exception)
624 EXCEPTION(0x1d00, Trap_1d, unknown_exception)
625 EXCEPTION(0x1e00, Trap_1e, unknown_exception)
626 EXCEPTION(0x1f00, Trap_1f, unknown_exception)
627 EXCEPTION(0x2000, RunMode, RunModeException)
628 EXCEPTION(0x2100, Trap_21, unknown_exception)
629 EXCEPTION(0x2200, Trap_22, unknown_exception)
630 EXCEPTION(0x2300, Trap_23, unknown_exception)
631 EXCEPTION(0x2400, Trap_24, unknown_exception)
632 EXCEPTION(0x2500, Trap_25, unknown_exception)
633 EXCEPTION(0x2600, Trap_26, unknown_exception)
634 EXCEPTION(0x2700, Trap_27, unknown_exception)
635 EXCEPTION(0x2800, Trap_28, unknown_exception)
636 EXCEPTION(0x2900, Trap_29, unknown_exception)
637 EXCEPTION(0x2a00, Trap_2a, unknown_exception)
638 EXCEPTION(0x2b00, Trap_2b, unknown_exception)
639 EXCEPTION(0x2c00, Trap_2c, unknown_exception)
640 EXCEPTION(0x2d00, Trap_2d, unknown_exception)
641 EXCEPTION(0x2e00, Trap_2e, unknown_exception)
642 EXCEPTION(0x2f00, Trap_2f, unknown_exception)
643
644 __HEAD
645 . = 0x3000
646
647#ifdef CONFIG_PPC_BOOK3S_604
648.macro save_regs_thread thread
649 stw r0, THR0(\thread)
650 stw r3, THR3(\thread)
651 stw r4, THR4(\thread)
652 stw r5, THR5(\thread)
653 stw r6, THR6(\thread)
654 stw r8, THR8(\thread)
655 stw r9, THR9(\thread)
656 mflr r0
657 stw r0, THLR(\thread)
658 mfctr r0
659 stw r0, THCTR(\thread)
660.endm
661
662.macro restore_regs_thread thread
663 lwz r0, THLR(\thread)
664 mtlr r0
665 lwz r0, THCTR(\thread)
666 mtctr r0
667 lwz r0, THR0(\thread)
668 lwz r3, THR3(\thread)
669 lwz r4, THR4(\thread)
670 lwz r5, THR5(\thread)
671 lwz r6, THR6(\thread)
672 lwz r8, THR8(\thread)
673 lwz r9, THR9(\thread)
674.endm
675
676hash_page_dsi:
677 save_regs_thread r10
678 mfdsisr r3
679 mfdar r4
680 mfsrr0 r5
681 mfsrr1 r9
682 rlwinm r3, r3, 32 - 15, _PAGE_WRITE /* DSISR_STORE -> _PAGE_WRITE */
683 ori r3, r3, _PAGE_PRESENT | _PAGE_READ
684 bl hash_page
685 mfspr r10, SPRN_SPRG_THREAD
686 restore_regs_thread r10
687 b .Lhash_page_dsi_cont
688
689hash_page_isi:
690 mr r11, r10
691 mfspr r10, SPRN_SPRG_THREAD
692 save_regs_thread r10
693 li r3, _PAGE_PRESENT | _PAGE_EXEC
694 lwz r4, SRR0(r10)
695 lwz r9, SRR1(r10)
696 bl hash_page
697 mfspr r10, SPRN_SPRG_THREAD
698 restore_regs_thread r10
699 mr r10, r11
700 b .Lhash_page_isi_cont
701
702 .globl fast_hash_page_return
703fast_hash_page_return:
704 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
705 mfspr r10, SPRN_SPRG_THREAD
706 restore_regs_thread r10
707 bne 1f
708
709 /* DSI */
710 mtcr r11
711 lwz r11, THR11(r10)
712 mfspr r10, SPRN_SPRG_SCRATCH2
713 rfi
714
7151: /* ISI */
716 mtcr r11
717 mfspr r11, SPRN_SPRG_SCRATCH1
718 mfspr r10, SPRN_SPRG_SCRATCH0
719 rfi
720#endif /* CONFIG_PPC_BOOK3S_604 */
721
722#ifdef CONFIG_VMAP_STACK
723 vmap_stack_overflow_exception
724#endif
725
726 __HEAD
727AltiVecUnavailable:
728 EXCEPTION_PROLOG 0xf20 AltiVecUnavailable
729#ifdef CONFIG_ALTIVEC
730 beq 1f
731 bl load_up_altivec /* if from user, just load it up */
732 b fast_exception_return
733#endif /* CONFIG_ALTIVEC */
7341: prepare_transfer_to_handler
735 bl altivec_unavailable_exception
736 b interrupt_return
737
738 __HEAD
739PerformanceMonitor:
740 EXCEPTION_PROLOG 0xf00 PerformanceMonitor
741 prepare_transfer_to_handler
742 bl performance_monitor_exception
743 b interrupt_return
744
745
746 __HEAD
747/*
748 * This code is jumped to from the startup code to copy
749 * the kernel image to physical address PHYSICAL_START.
750 */
751relocate_kernel:
752 lis r3,PHYSICAL_START@h /* Destination base address */
753 li r6,0 /* Destination offset */
754 li r5,0x4000 /* # bytes of memory to copy */
755 bl copy_and_flush /* copy the first 0x4000 bytes */
756 addi r0,r3,4f@l /* jump to the address of 4f */
757 mtctr r0 /* in copy and do the rest. */
758 bctr /* jump to the copy */
7594: lis r5,_end-KERNELBASE@h
760 ori r5,r5,_end-KERNELBASE@l
761 bl copy_and_flush /* copy the rest */
762 b turn_on_mmu
763
764/*
765 * Copy routine used to copy the kernel to start at physical address 0
766 * and flush and invalidate the caches as needed.
767 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
768 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
769 */
770_GLOBAL(copy_and_flush)
771 addi r5,r5,-4
772 addi r6,r6,-4
7734: li r0,L1_CACHE_BYTES/4
774 mtctr r0
7753: addi r6,r6,4 /* copy a cache line */
776 lwzx r0,r6,r4
777 stwx r0,r6,r3
778 bdnz 3b
779 dcbst r6,r3 /* write it to memory */
780 sync
781 icbi r6,r3 /* flush the icache line */
782 cmplw 0,r6,r5
783 blt 4b
784 sync /* additional sync needed on g4 */
785 isync
786 addi r5,r5,4
787 addi r6,r6,4
788 blr
789
790#ifdef CONFIG_SMP
791 .globl __secondary_start_mpc86xx
792__secondary_start_mpc86xx:
793 mfspr r3, SPRN_PIR
794 stw r3, __secondary_hold_acknowledge@l(0)
795 mr r24, r3 /* cpu # */
796 b __secondary_start
797
798 .globl __secondary_start_pmac_0
799__secondary_start_pmac_0:
800 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
801 li r24,0
802 b 1f
803 li r24,1
804 b 1f
805 li r24,2
806 b 1f
807 li r24,3
8081:
809 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
810 set to map the 0xf0000000 - 0xffffffff region */
811 mfmsr r0
812 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
813 mtmsr r0
814 isync
815
816 .globl __secondary_start
817__secondary_start:
818 /* Copy some CPU settings from CPU 0 */
819 bl __restore_cpu_setup
820
821 lis r3,-KERNELBASE@h
822 mr r4,r24
823 bl call_setup_cpu /* Call setup_cpu for this CPU */
824 lis r3,-KERNELBASE@h
825 bl init_idle_6xx
826
827 /* get current's stack and current */
828 lis r2,secondary_current@ha
829 tophys(r2,r2)
830 lwz r2,secondary_current@l(r2)
831 tophys(r1,r2)
832 lwz r1,TASK_STACK(r1)
833
834 /* stack */
835 addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE
836 li r0,0
837 tophys(r3,r1)
838 stw r0,0(r3)
839
840 /* load up the MMU */
841 bl load_segment_registers
842 bl load_up_mmu
843
844 /* ptr to phys current thread */
845 tophys(r4,r2)
846 addi r4,r4,THREAD /* phys address of our thread_struct */
847 mtspr SPRN_SPRG_THREAD,r4
848BEGIN_MMU_FTR_SECTION
849 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
850 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
851 rlwinm r4, r4, 4, 0xffff01ff
852 mtspr SPRN_SDR1, r4
853END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
854
855 /* enable MMU and jump to start_secondary */
856 li r4,MSR_KERNEL
857 lis r3,start_secondary@h
858 ori r3,r3,start_secondary@l
859 mtspr SPRN_SRR0,r3
860 mtspr SPRN_SRR1,r4
861 rfi
862#endif /* CONFIG_SMP */
863
864#ifdef CONFIG_KVM_BOOK3S_HANDLER
865#include "../kvm/book3s_rmhandlers.S"
866#endif
867
868/*
869 * Load stuff into the MMU. Intended to be called with
870 * IR=0 and DR=0.
871 */
872SYM_FUNC_START_LOCAL(early_hash_table)
873 sync /* Force all PTE updates to finish */
874 isync
875 tlbia /* Clear all TLB entries */
876 sync /* wait for tlbia/tlbie to finish */
877 TLBSYNC /* ... on all CPUs */
878 /* Load the SDR1 register (hash table base & size) */
879 lis r6, early_hash - PAGE_OFFSET@h
880 ori r6, r6, 3 /* 256kB table */
881 mtspr SPRN_SDR1, r6
882 blr
883SYM_FUNC_END(early_hash_table)
884
885SYM_FUNC_START_LOCAL(load_up_mmu)
886 sync /* Force all PTE updates to finish */
887 isync
888 tlbia /* Clear all TLB entries */
889 sync /* wait for tlbia/tlbie to finish */
890 TLBSYNC /* ... on all CPUs */
891BEGIN_MMU_FTR_SECTION
892 /* Load the SDR1 register (hash table base & size) */
893 lis r6,_SDR1@ha
894 tophys(r6,r6)
895 lwz r6,_SDR1@l(r6)
896 mtspr SPRN_SDR1,r6
897END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
898
899/* Load the BAT registers with the values set up by MMU_init. */
900 lis r3,BATS@ha
901 addi r3,r3,BATS@l
902 tophys(r3,r3)
903 LOAD_BAT(0,r3,r4,r5)
904 LOAD_BAT(1,r3,r4,r5)
905 LOAD_BAT(2,r3,r4,r5)
906 LOAD_BAT(3,r3,r4,r5)
907BEGIN_MMU_FTR_SECTION
908 LOAD_BAT(4,r3,r4,r5)
909 LOAD_BAT(5,r3,r4,r5)
910 LOAD_BAT(6,r3,r4,r5)
911 LOAD_BAT(7,r3,r4,r5)
912END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
913 blr
914SYM_FUNC_END(load_up_mmu)
915
916_GLOBAL(load_segment_registers)
917 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
918 mtctr r0 /* for context 0 */
919#ifdef CONFIG_PPC_KUEP
920 lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
921#else
922 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
923#endif
924 li r4, 0
9253: mtsrin r3, r4
926 addi r3, r3, 0x111 /* increment VSID */
927 addis r4, r4, 0x1000 /* address of next segment */
928 bdnz 3b
929 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
930 mtctr r0 /* for context 0 */
931 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
932 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
933 oris r3, r3, SR_KP@h /* Kp = 1 */
9343: mtsrin r3, r4
935 addi r3, r3, 0x111 /* increment VSID */
936 addis r4, r4, 0x1000 /* address of next segment */
937 bdnz 3b
938 blr
939
940/*
941 * This is where the main kernel code starts.
942 */
943start_here:
944 /* ptr to current */
945 lis r2,init_task@h
946 ori r2,r2,init_task@l
947 /* Set up for using our exception vectors */
948 /* ptr to phys current thread */
949 tophys(r4,r2)
950 addi r4,r4,THREAD /* init task's THREAD */
951 mtspr SPRN_SPRG_THREAD,r4
952BEGIN_MMU_FTR_SECTION
953 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
954 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
955 rlwinm r4, r4, 4, 0xffff01ff
956 mtspr SPRN_SDR1, r4
957END_MMU_FTR_SECTION_IFCLR(MMU_FTR_HPTE_TABLE)
958
959 /* stack */
960 lis r1,init_thread_union@ha
961 addi r1,r1,init_thread_union@l
962 li r0,0
963 stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1)
964/*
965 * Do early platform-specific initialization,
966 * and set up the MMU.
967 */
968#ifdef CONFIG_KASAN
969 bl kasan_early_init
970#endif
971 li r3,0
972 mr r4,r31
973 bl machine_init
974 bl __save_cpu_setup
975 bl MMU_init
976 bl MMU_init_hw_patch
977
978/*
979 * Go back to running unmapped so we can load up new values
980 * for SDR1 (hash table pointer) and the segment registers
981 * and change to using our exception vectors.
982 */
983 lis r4,2f@h
984 ori r4,r4,2f@l
985 tophys(r4,r4)
986 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
987
988 .align 4
989 mtspr SPRN_SRR0,r4
990 mtspr SPRN_SRR1,r3
991 rfi
992/* Load up the kernel context */
9932: bl load_up_mmu
994
995#ifdef CONFIG_BDI_SWITCH
996 /* Add helper information for the Abatron bdiGDB debugger.
997 * We do this here because we know the mmu is disabled, and
998 * will be enabled for real in just a few instructions.
999 */
1000 lis r5, abatron_pteptrs@h
1001 ori r5, r5, abatron_pteptrs@l
1002 stw r5, 0xf0(0) /* This much match your Abatron config */
1003 lis r6, swapper_pg_dir@h
1004 ori r6, r6, swapper_pg_dir@l
1005 tophys(r5, r5)
1006 stw r6, 0(r5)
1007#endif /* CONFIG_BDI_SWITCH */
1008
1009/* Now turn on the MMU for real! */
1010 li r4,MSR_KERNEL
1011 lis r3,start_kernel@h
1012 ori r3,r3,start_kernel@l
1013 mtspr SPRN_SRR0,r3
1014 mtspr SPRN_SRR1,r4
1015 rfi
1016
1017/*
1018 * An undocumented "feature" of 604e requires that the v bit
1019 * be cleared before changing BAT values.
1020 *
1021 * Also, newer IBM firmware does not clear bat3 and 4 so
1022 * this makes sure it's done.
1023 * -- Cort
1024 */
1025SYM_FUNC_START_LOCAL(clear_bats)
1026 li r10,0
1027
1028 mtspr SPRN_DBAT0U,r10
1029 mtspr SPRN_DBAT0L,r10
1030 mtspr SPRN_DBAT1U,r10
1031 mtspr SPRN_DBAT1L,r10
1032 mtspr SPRN_DBAT2U,r10
1033 mtspr SPRN_DBAT2L,r10
1034 mtspr SPRN_DBAT3U,r10
1035 mtspr SPRN_DBAT3L,r10
1036 mtspr SPRN_IBAT0U,r10
1037 mtspr SPRN_IBAT0L,r10
1038 mtspr SPRN_IBAT1U,r10
1039 mtspr SPRN_IBAT1L,r10
1040 mtspr SPRN_IBAT2U,r10
1041 mtspr SPRN_IBAT2L,r10
1042 mtspr SPRN_IBAT3U,r10
1043 mtspr SPRN_IBAT3L,r10
1044BEGIN_MMU_FTR_SECTION
1045 /* Here's a tweak: at this point, CPU setup have
1046 * not been called yet, so HIGH_BAT_EN may not be
1047 * set in HID0 for the 745x processors. However, it
1048 * seems that doesn't affect our ability to actually
1049 * write to these SPRs.
1050 */
1051 mtspr SPRN_DBAT4U,r10
1052 mtspr SPRN_DBAT4L,r10
1053 mtspr SPRN_DBAT5U,r10
1054 mtspr SPRN_DBAT5L,r10
1055 mtspr SPRN_DBAT6U,r10
1056 mtspr SPRN_DBAT6L,r10
1057 mtspr SPRN_DBAT7U,r10
1058 mtspr SPRN_DBAT7L,r10
1059 mtspr SPRN_IBAT4U,r10
1060 mtspr SPRN_IBAT4L,r10
1061 mtspr SPRN_IBAT5U,r10
1062 mtspr SPRN_IBAT5L,r10
1063 mtspr SPRN_IBAT6U,r10
1064 mtspr SPRN_IBAT6L,r10
1065 mtspr SPRN_IBAT7U,r10
1066 mtspr SPRN_IBAT7L,r10
1067END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1068 blr
1069SYM_FUNC_END(clear_bats)
1070
1071_GLOBAL(update_bats)
1072 lis r4, 1f@h
1073 ori r4, r4, 1f@l
1074 tophys(r4, r4)
1075 mfmsr r6
1076 mflr r7
1077 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1078 rlwinm r0, r6, 0, ~MSR_RI
1079 rlwinm r0, r0, 0, ~MSR_EE
1080 mtmsr r0
1081
1082 .align 4
1083 mtspr SPRN_SRR0, r4
1084 mtspr SPRN_SRR1, r3
1085 rfi
10861: bl clear_bats
1087 lis r3, BATS@ha
1088 addi r3, r3, BATS@l
1089 tophys(r3, r3)
1090 LOAD_BAT(0, r3, r4, r5)
1091 LOAD_BAT(1, r3, r4, r5)
1092 LOAD_BAT(2, r3, r4, r5)
1093 LOAD_BAT(3, r3, r4, r5)
1094BEGIN_MMU_FTR_SECTION
1095 LOAD_BAT(4, r3, r4, r5)
1096 LOAD_BAT(5, r3, r4, r5)
1097 LOAD_BAT(6, r3, r4, r5)
1098 LOAD_BAT(7, r3, r4, r5)
1099END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1100 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1101 mtmsr r3
1102 mtspr SPRN_SRR0, r7
1103 mtspr SPRN_SRR1, r6
1104 rfi
1105
1106SYM_FUNC_START_LOCAL(flush_tlbs)
1107 lis r10, 0x40
11081: addic. r10, r10, -0x1000
1109 tlbie r10
1110 bgt 1b
1111 sync
1112 blr
1113SYM_FUNC_END(flush_tlbs)
1114
1115SYM_FUNC_START_LOCAL(mmu_off)
1116 addi r4, r3, __after_mmu_off - _start
1117 mfmsr r3
1118 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1119 beqlr
1120 andc r3,r3,r0
1121
1122 .align 4
1123 mtspr SPRN_SRR0,r4
1124 mtspr SPRN_SRR1,r3
1125 sync
1126 rfi
1127SYM_FUNC_END(mmu_off)
1128
1129/* We use one BAT to map up to 256M of RAM at _PAGE_OFFSET */
1130SYM_FUNC_START_LOCAL(initial_bats)
1131 lis r11,PAGE_OFFSET@h
1132 tophys(r8,r11)
1133#ifdef CONFIG_SMP
1134 ori r8,r8,0x12 /* R/W access, M=1 */
1135#else
1136 ori r8,r8,2 /* R/W access */
1137#endif /* CONFIG_SMP */
1138 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1139
1140 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx have valid */
1141 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1142 mtspr SPRN_IBAT0L,r8
1143 mtspr SPRN_IBAT0U,r11
1144 isync
1145 blr
1146SYM_FUNC_END(initial_bats)
1147
1148#ifdef CONFIG_BOOTX_TEXT
1149SYM_FUNC_START_LOCAL(setup_disp_bat)
1150 /*
1151 * setup the display bat prepared for us in prom.c
1152 */
1153 mflr r8
1154 bl reloc_offset
1155 mtlr r8
1156 addis r8,r3,disp_BAT@ha
1157 addi r8,r8,disp_BAT@l
1158 cmpwi cr0,r8,0
1159 beqlr
1160 lwz r11,0(r8)
1161 lwz r8,4(r8)
1162 mtspr SPRN_DBAT3L,r8
1163 mtspr SPRN_DBAT3U,r11
1164 blr
1165SYM_FUNC_END(setup_disp_bat)
1166#endif /* CONFIG_BOOTX_TEXT */
1167
1168#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1169SYM_FUNC_START_LOCAL(setup_cpm_bat)
1170 lis r8, 0xf000
1171 ori r8, r8, 0x002a
1172 mtspr SPRN_DBAT1L, r8
1173
1174 lis r11, 0xf000
1175 ori r11, r11, (BL_1M << 2) | 2
1176 mtspr SPRN_DBAT1U, r11
1177
1178 blr
1179SYM_FUNC_END(setup_cpm_bat)
1180#endif
1181
1182#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1183SYM_FUNC_START_LOCAL(setup_usbgecko_bat)
1184 /* prepare a BAT for early io */
1185#if defined(CONFIG_GAMECUBE)
1186 lis r8, 0x0c00
1187#elif defined(CONFIG_WII)
1188 lis r8, 0x0d00
1189#else
1190#error Invalid platform for USB Gecko based early debugging.
1191#endif
1192 /*
1193 * The virtual address used must match the virtual address
1194 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1195 */
1196 lis r11, 0xfffe /* top 128K */
1197 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1198 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1199 mtspr SPRN_DBAT1L, r8
1200 mtspr SPRN_DBAT1U, r11
1201 blr
1202SYM_FUNC_END(setup_usbgecko_bat)
1203#endif
1204
1205 .data