Loading...
Note: File does not exist in v6.2.
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
7 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
8 * Adapted for Power Macintosh by Paul Mackerras.
9 * Low-level exception handlers and MMU support
10 * rewritten by Paul Mackerras.
11 * Copyright (C) 1996 Paul Mackerras.
12 * MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
13 *
14 * This file contains the low-level support and setup for the
15 * PowerPC platform, including trap and interrupt dispatch.
16 * (The PPC 8xx embedded CPUs use head_8xx.S instead.)
17 */
18
19#include <linux/init.h>
20#include <linux/pgtable.h>
21#include <asm/reg.h>
22#include <asm/page.h>
23#include <asm/mmu.h>
24#include <asm/cputable.h>
25#include <asm/cache.h>
26#include <asm/thread_info.h>
27#include <asm/ppc_asm.h>
28#include <asm/asm-offsets.h>
29#include <asm/ptrace.h>
30#include <asm/bug.h>
31#include <asm/kvm_book3s_asm.h>
32#include <asm/export.h>
33#include <asm/feature-fixups.h>
34
35#include "head_32.h"
36
37/* 601 only have IBAT */
38#ifdef CONFIG_PPC_BOOK3S_601
39#define LOAD_BAT(n, reg, RA, RB) \
40 li RA,0; \
41 mtspr SPRN_IBAT##n##U,RA; \
42 lwz RA,(n*16)+0(reg); \
43 lwz RB,(n*16)+4(reg); \
44 mtspr SPRN_IBAT##n##U,RA; \
45 mtspr SPRN_IBAT##n##L,RB
46#else
47#define LOAD_BAT(n, reg, RA, RB) \
48 /* see the comment for clear_bats() -- Cort */ \
49 li RA,0; \
50 mtspr SPRN_IBAT##n##U,RA; \
51 mtspr SPRN_DBAT##n##U,RA; \
52 lwz RA,(n*16)+0(reg); \
53 lwz RB,(n*16)+4(reg); \
54 mtspr SPRN_IBAT##n##U,RA; \
55 mtspr SPRN_IBAT##n##L,RB; \
56 lwz RA,(n*16)+8(reg); \
57 lwz RB,(n*16)+12(reg); \
58 mtspr SPRN_DBAT##n##U,RA; \
59 mtspr SPRN_DBAT##n##L,RB
60#endif
61
62 __HEAD
63 .stabs "arch/powerpc/kernel/",N_SO,0,0,0f
64 .stabs "head_32.S",N_SO,0,0,0f
650:
66_ENTRY(_stext);
67
68/*
69 * _start is defined this way because the XCOFF loader in the OpenFirmware
70 * on the powermac expects the entry point to be a procedure descriptor.
71 */
72_ENTRY(_start);
73 /*
74 * These are here for legacy reasons, the kernel used to
75 * need to look like a coff function entry for the pmac
76 * but we're always started by some kind of bootloader now.
77 * -- Cort
78 */
79 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
80 nop /* used by __secondary_hold on prep (mtx) and chrp smp */
81 nop
82
83/* PMAC
84 * Enter here with the kernel text, data and bss loaded starting at
85 * 0, running with virtual == physical mapping.
86 * r5 points to the prom entry point (the client interface handler
87 * address). Address translation is turned on, with the prom
88 * managing the hash table. Interrupts are disabled. The stack
89 * pointer (r1) points to just below the end of the half-meg region
90 * from 0x380000 - 0x400000, which is mapped in already.
91 *
92 * If we are booted from MacOS via BootX, we enter with the kernel
93 * image loaded somewhere, and the following values in registers:
94 * r3: 'BooX' (0x426f6f58)
95 * r4: virtual address of boot_infos_t
96 * r5: 0
97 *
98 * PREP
99 * This is jumped to on prep systems right after the kernel is relocated
100 * to its proper place in memory by the boot loader. The expected layout
101 * of the regs is:
102 * r3: ptr to residual data
103 * r4: initrd_start or if no initrd then 0
104 * r5: initrd_end - unused if r4 is 0
105 * r6: Start of command line string
106 * r7: End of command line string
107 *
108 * This just gets a minimal mmu environment setup so we can call
109 * start_here() to do the real work.
110 * -- Cort
111 */
112
113 .globl __start
114__start:
115/*
116 * We have to do any OF calls before we map ourselves to KERNELBASE,
117 * because OF may have I/O devices mapped into that area
118 * (particularly on CHRP).
119 */
120 cmpwi 0,r5,0
121 beq 1f
122
123#ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE
124 /* find out where we are now */
125 bcl 20,31,$+4
1260: mflr r8 /* r8 = runtime addr here */
127 addis r8,r8,(_stext - 0b)@ha
128 addi r8,r8,(_stext - 0b)@l /* current runtime base addr */
129 bl prom_init
130#endif /* CONFIG_PPC_OF_BOOT_TRAMPOLINE */
131
132 /* We never return. We also hit that trap if trying to boot
133 * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */
134 trap
135
136/*
137 * Check for BootX signature when supporting PowerMac and branch to
138 * appropriate trampoline if it's present
139 */
140#ifdef CONFIG_PPC_PMAC
1411: lis r31,0x426f
142 ori r31,r31,0x6f58
143 cmpw 0,r3,r31
144 bne 1f
145 bl bootx_init
146 trap
147#endif /* CONFIG_PPC_PMAC */
148
1491: mr r31,r3 /* save device tree ptr */
150 li r24,0 /* cpu # */
151
152/*
153 * early_init() does the early machine identification and does
154 * the necessary low-level setup and clears the BSS
155 * -- Cort <cort@fsmlabs.com>
156 */
157 bl early_init
158
159/* Switch MMU off, clear BATs and flush TLB. At this point, r3 contains
160 * the physical address we are running at, returned by early_init()
161 */
162 bl mmu_off
163__after_mmu_off:
164 bl clear_bats
165 bl flush_tlbs
166
167 bl initial_bats
168 bl load_segment_registers
169#ifdef CONFIG_KASAN
170 bl early_hash_table
171#endif
172#if defined(CONFIG_BOOTX_TEXT)
173 bl setup_disp_bat
174#endif
175#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
176 bl setup_cpm_bat
177#endif
178#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
179 bl setup_usbgecko_bat
180#endif
181
182/*
183 * Call setup_cpu for CPU 0 and initialize 6xx Idle
184 */
185 bl reloc_offset
186 li r24,0 /* cpu# */
187 bl call_setup_cpu /* Call setup_cpu for this CPU */
188#ifdef CONFIG_PPC_BOOK3S_32
189 bl reloc_offset
190 bl init_idle_6xx
191#endif /* CONFIG_PPC_BOOK3S_32 */
192
193
194/*
195 * We need to run with _start at physical address 0.
196 * On CHRP, we are loaded at 0x10000 since OF on CHRP uses
197 * the exception vectors at 0 (and therefore this copy
198 * overwrites OF's exception vectors with our own).
199 * The MMU is off at this point.
200 */
201 bl reloc_offset
202 mr r26,r3
203 addis r4,r3,KERNELBASE@h /* current address of _start */
204 lis r5,PHYSICAL_START@h
205 cmplw 0,r4,r5 /* already running at PHYSICAL_START? */
206 bne relocate_kernel
207/*
208 * we now have the 1st 16M of ram mapped with the bats.
209 * prep needs the mmu to be turned on here, but pmac already has it on.
210 * this shouldn't bother the pmac since it just gets turned on again
211 * as we jump to our code at KERNELBASE. -- Cort
212 * Actually no, pmac doesn't have it on any more. BootX enters with MMU
213 * off, and in other cases, we now turn it off before changing BATs above.
214 */
215turn_on_mmu:
216 mfmsr r0
217 ori r0,r0,MSR_DR|MSR_IR|MSR_RI
218 mtspr SPRN_SRR1,r0
219 lis r0,start_here@h
220 ori r0,r0,start_here@l
221 mtspr SPRN_SRR0,r0
222 SYNC
223 RFI /* enables MMU */
224
225/*
226 * We need __secondary_hold as a place to hold the other cpus on
227 * an SMP machine, even when we are running a UP kernel.
228 */
229 . = 0xc0 /* for prep bootloader */
230 li r3,1 /* MTX only has 1 cpu */
231 .globl __secondary_hold
232__secondary_hold:
233 /* tell the master we're here */
234 stw r3,__secondary_hold_acknowledge@l(0)
235#ifdef CONFIG_SMP
236100: lwz r4,0(0)
237 /* wait until we're told to start */
238 cmpw 0,r4,r3
239 bne 100b
240 /* our cpu # was at addr 0 - go */
241 mr r24,r3 /* cpu # */
242 b __secondary_start
243#else
244 b .
245#endif /* CONFIG_SMP */
246
247 .globl __secondary_hold_spinloop
248__secondary_hold_spinloop:
249 .long 0
250 .globl __secondary_hold_acknowledge
251__secondary_hold_acknowledge:
252 .long -1
253
254/* System reset */
255/* core99 pmac starts the seconary here by changing the vector, and
256 putting it back to what it was (unknown_exception) when done. */
257 EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD)
258
259/* Machine check */
260/*
261 * On CHRP, this is complicated by the fact that we could get a
262 * machine check inside RTAS, and we have no guarantee that certain
263 * critical registers will have the values we expect. The set of
264 * registers that might have bad values includes all the GPRs
265 * and all the BATs. We indicate that we are in RTAS by putting
266 * a non-zero value, the address of the exception frame to use,
267 * in thread.rtas_sp. The machine check handler checks thread.rtas_sp
268 * and uses its value if it is non-zero.
269 * (Other exception handlers assume that r1 is a valid kernel stack
270 * pointer when we take an exception from supervisor mode.)
271 * -- paulus.
272 */
273 . = 0x200
274 DO_KVM 0x200
275MachineCheck:
276 EXCEPTION_PROLOG_0
277#ifdef CONFIG_VMAP_STACK
278 li r11, MSR_KERNEL & ~(MSR_IR | MSR_RI) /* can take DTLB miss */
279 mtmsr r11
280 isync
281#endif
282#ifdef CONFIG_PPC_CHRP
283 mfspr r11, SPRN_SPRG_THREAD
284 tovirt_vmstack r11, r11
285 lwz r11, RTAS_SP(r11)
286 cmpwi cr1, r11, 0
287 bne cr1, 7f
288#endif /* CONFIG_PPC_CHRP */
289 EXCEPTION_PROLOG_1 for_rtas=1
2907: EXCEPTION_PROLOG_2
291 addi r3,r1,STACK_FRAME_OVERHEAD
292#ifdef CONFIG_PPC_CHRP
293#ifdef CONFIG_VMAP_STACK
294 mfspr r4, SPRN_SPRG_THREAD
295 tovirt(r4, r4)
296 lwz r4, RTAS_SP(r4)
297 cmpwi cr1, r4, 0
298#endif
299 beq cr1, machine_check_tramp
300 twi 31, 0, 0
301#else
302 b machine_check_tramp
303#endif
304
305/* Data access exception. */
306 . = 0x300
307 DO_KVM 0x300
308DataAccess:
309#ifdef CONFIG_VMAP_STACK
310 mtspr SPRN_SPRG_SCRATCH0,r10
311 mfspr r10, SPRN_SPRG_THREAD
312BEGIN_MMU_FTR_SECTION
313 stw r11, THR11(r10)
314 mfspr r10, SPRN_DSISR
315 mfcr r11
316#ifdef CONFIG_PPC_KUAP
317 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
318#else
319 andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
320#endif
321 mfspr r10, SPRN_SPRG_THREAD
322 beq hash_page_dsi
323.Lhash_page_dsi_cont:
324 mtcr r11
325 lwz r11, THR11(r10)
326END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
327 mtspr SPRN_SPRG_SCRATCH1,r11
328 mfspr r11, SPRN_DAR
329 stw r11, DAR(r10)
330 mfspr r11, SPRN_DSISR
331 stw r11, DSISR(r10)
332 mfspr r11, SPRN_SRR0
333 stw r11, SRR0(r10)
334 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
335 stw r11, SRR1(r10)
336 mfcr r10
337 andi. r11, r11, MSR_PR
338
339 EXCEPTION_PROLOG_1
340 b handle_page_fault_tramp_1
341#else /* CONFIG_VMAP_STACK */
342 EXCEPTION_PROLOG handle_dar_dsisr=1
343 get_and_save_dar_dsisr_on_stack r4, r5, r11
344BEGIN_MMU_FTR_SECTION
345#ifdef CONFIG_PPC_KUAP
346 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
347#else
348 andis. r0, r5, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
349#endif
350 bne handle_page_fault_tramp_2 /* if not, try to put a PTE */
351 rlwinm r3, r5, 32 - 15, 21, 21 /* DSISR_STORE -> _PAGE_RW */
352 bl hash_page
353 b handle_page_fault_tramp_1
354FTR_SECTION_ELSE
355 b handle_page_fault_tramp_2
356ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
357#endif /* CONFIG_VMAP_STACK */
358
359/* Instruction access exception. */
360 . = 0x400
361 DO_KVM 0x400
362InstructionAccess:
363#ifdef CONFIG_VMAP_STACK
364 mtspr SPRN_SPRG_SCRATCH0,r10
365 mtspr SPRN_SPRG_SCRATCH1,r11
366 mfspr r10, SPRN_SPRG_THREAD
367 mfspr r11, SPRN_SRR0
368 stw r11, SRR0(r10)
369 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
370 stw r11, SRR1(r10)
371 mfcr r10
372BEGIN_MMU_FTR_SECTION
373 andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
374 bne hash_page_isi
375.Lhash_page_isi_cont:
376 mfspr r11, SPRN_SRR1 /* check whether user or kernel */
377END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
378 andi. r11, r11, MSR_PR
379
380 EXCEPTION_PROLOG_1
381 EXCEPTION_PROLOG_2
382#else /* CONFIG_VMAP_STACK */
383 EXCEPTION_PROLOG
384 andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
385 beq 1f /* if so, try to put a PTE */
386 li r3,0 /* into the hash table */
387 mr r4,r12 /* SRR0 is fault address */
388BEGIN_MMU_FTR_SECTION
389 bl hash_page
390END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
391#endif /* CONFIG_VMAP_STACK */
3921: mr r4,r12
393 andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
394 stw r4, _DAR(r11)
395 EXC_XFER_LITE(0x400, handle_page_fault)
396
397/* External interrupt */
398 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
399
400/* Alignment exception */
401 . = 0x600
402 DO_KVM 0x600
403Alignment:
404 EXCEPTION_PROLOG handle_dar_dsisr=1
405 save_dar_dsisr_on_stack r4, r5, r11
406 addi r3,r1,STACK_FRAME_OVERHEAD
407 b alignment_exception_tramp
408
409/* Program check exception */
410 EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
411
412/* Floating-point unavailable */
413 . = 0x800
414 DO_KVM 0x800
415FPUnavailable:
416BEGIN_FTR_SECTION
417/*
418 * Certain Freescale cores don't have a FPU and treat fp instructions
419 * as a FP Unavailable exception. Redirect to illegal/emulation handling.
420 */
421 b ProgramCheck
422END_FTR_SECTION_IFSET(CPU_FTR_FPU_UNAVAILABLE)
423 EXCEPTION_PROLOG
424 beq 1f
425 bl load_up_fpu /* if from user, just load it up */
426 b fast_exception_return
4271: addi r3,r1,STACK_FRAME_OVERHEAD
428 EXC_XFER_LITE(0x800, kernel_fp_unavailable_exception)
429
430/* Decrementer */
431 EXCEPTION(0x900, Decrementer, timer_interrupt, EXC_XFER_LITE)
432
433 EXCEPTION(0xa00, Trap_0a, unknown_exception, EXC_XFER_STD)
434 EXCEPTION(0xb00, Trap_0b, unknown_exception, EXC_XFER_STD)
435
436/* System call */
437 . = 0xc00
438 DO_KVM 0xc00
439SystemCall:
440 SYSCALL_ENTRY 0xc00
441
442/* Single step - not used on 601 */
443 EXCEPTION(0xd00, SingleStep, single_step_exception, EXC_XFER_STD)
444 EXCEPTION(0xe00, Trap_0e, unknown_exception, EXC_XFER_STD)
445
446/*
447 * The Altivec unavailable trap is at 0x0f20. Foo.
448 * We effectively remap it to 0x3000.
449 * We include an altivec unavailable exception vector even if
450 * not configured for Altivec, so that you can't panic a
451 * non-altivec kernel running on a machine with altivec just
452 * by executing an altivec instruction.
453 */
454 . = 0xf00
455 DO_KVM 0xf00
456 b PerformanceMonitor
457
458 . = 0xf20
459 DO_KVM 0xf20
460 b AltiVecUnavailable
461
462/*
463 * Handle TLB miss for instruction on 603/603e.
464 * Note: we get an alternate set of r0 - r3 to use automatically.
465 */
466 . = 0x1000
467InstructionTLBMiss:
468/*
469 * r0: scratch
470 * r1: linux style pte ( later becomes ppc hardware pte )
471 * r2: ptr to linux-style pte
472 * r3: scratch
473 */
474 /* Get PTE (linux-style) and check access */
475 mfspr r3,SPRN_IMISS
476#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
477 lis r1, TASK_SIZE@h /* check if kernel address */
478 cmplw 0,r1,r3
479#endif
480 mfspr r2, SPRN_SPRG_PGDIR
481#ifdef CONFIG_SWAP
482 li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
483#else
484 li r1,_PAGE_PRESENT | _PAGE_EXEC
485#endif
486#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC)
487 bgt- 112f
488 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
489 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
490#endif
491112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
492 lwz r2,0(r2) /* get pmd entry */
493 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
494 beq- InstructionAddressInvalid /* return if no mapping */
495 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
496 lwz r0,0(r2) /* get linux-style pte */
497 andc. r1,r1,r0 /* check access & ~permission */
498 bne- InstructionAddressInvalid /* return if access not permitted */
499 /* Convert linux-style PTE to low word of PPC-style PTE */
500 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
501 ori r1, r1, 0xe06 /* clear out reserved bits */
502 andc r1, r0, r1 /* PP = user? 1 : 0 */
503BEGIN_FTR_SECTION
504 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
505END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
506 mtspr SPRN_RPA,r1
507 tlbli r3
508 mfspr r3,SPRN_SRR1 /* Need to restore CR0 */
509 mtcrf 0x80,r3
510 rfi
511InstructionAddressInvalid:
512 mfspr r3,SPRN_SRR1
513 rlwinm r1,r3,9,6,6 /* Get load/store bit */
514
515 addis r1,r1,0x2000
516 mtspr SPRN_DSISR,r1 /* (shouldn't be needed) */
517 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
518 or r2,r2,r1
519 mtspr SPRN_SRR1,r2
520 mfspr r1,SPRN_IMISS /* Get failing address */
521 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
522 rlwimi r2,r2,1,30,30 /* change 1 -> 3 */
523 xor r1,r1,r2
524 mtspr SPRN_DAR,r1 /* Set fault address */
525 mfmsr r0 /* Restore "normal" registers */
526 xoris r0,r0,MSR_TGPR>>16
527 mtcrf 0x80,r3 /* Restore CR0 */
528 mtmsr r0
529 b InstructionAccess
530
531/*
532 * Handle TLB miss for DATA Load operation on 603/603e
533 */
534 . = 0x1100
535DataLoadTLBMiss:
536/*
537 * r0: scratch
538 * r1: linux style pte ( later becomes ppc hardware pte )
539 * r2: ptr to linux-style pte
540 * r3: scratch
541 */
542 /* Get PTE (linux-style) and check access */
543 mfspr r3,SPRN_DMISS
544 lis r1, TASK_SIZE@h /* check if kernel address */
545 cmplw 0,r1,r3
546 mfspr r2, SPRN_SPRG_PGDIR
547#ifdef CONFIG_SWAP
548 li r1, _PAGE_PRESENT | _PAGE_ACCESSED
549#else
550 li r1, _PAGE_PRESENT
551#endif
552 bgt- 112f
553 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
554 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
555112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
556 lwz r2,0(r2) /* get pmd entry */
557 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
558 beq- DataAddressInvalid /* return if no mapping */
559 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
560 lwz r0,0(r2) /* get linux-style pte */
561 andc. r1,r1,r0 /* check access & ~permission */
562 bne- DataAddressInvalid /* return if access not permitted */
563 /*
564 * NOTE! We are assuming this is not an SMP system, otherwise
565 * we would need to update the pte atomically with lwarx/stwcx.
566 */
567 /* Convert linux-style PTE to low word of PPC-style PTE */
568 rlwinm r1,r0,32-9,30,30 /* _PAGE_RW -> PP msb */
569 rlwimi r0,r0,32-1,30,30 /* _PAGE_USER -> PP msb */
570 rlwimi r0,r0,32-1,31,31 /* _PAGE_USER -> PP lsb */
571 ori r1,r1,0xe04 /* clear out reserved bits */
572 andc r1,r0,r1 /* PP = user? rw? 1: 3: 0 */
573BEGIN_FTR_SECTION
574 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
575END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
576 mtspr SPRN_RPA,r1
577 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
578 mtcrf 0x80,r2
579BEGIN_MMU_FTR_SECTION
580 li r0,1
581 mfspr r1,SPRN_SPRG_603_LRU
582 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
583 slw r0,r0,r2
584 xor r1,r0,r1
585 srw r0,r1,r2
586 mtspr SPRN_SPRG_603_LRU,r1
587 mfspr r2,SPRN_SRR1
588 rlwimi r2,r0,31-14,14,14
589 mtspr SPRN_SRR1,r2
590END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
591 tlbld r3
592 rfi
593DataAddressInvalid:
594 mfspr r3,SPRN_SRR1
595 rlwinm r1,r3,9,6,6 /* Get load/store bit */
596 addis r1,r1,0x2000
597 mtspr SPRN_DSISR,r1
598 andi. r2,r3,0xFFFF /* Clear upper bits of SRR1 */
599 mtspr SPRN_SRR1,r2
600 mfspr r1,SPRN_DMISS /* Get failing address */
601 rlwinm. r2,r2,0,31,31 /* Check for little endian access */
602 beq 20f /* Jump if big endian */
603 xori r1,r1,3
60420: mtspr SPRN_DAR,r1 /* Set fault address */
605 mfmsr r0 /* Restore "normal" registers */
606 xoris r0,r0,MSR_TGPR>>16
607 mtcrf 0x80,r3 /* Restore CR0 */
608 mtmsr r0
609 b DataAccess
610
611/*
612 * Handle TLB miss for DATA Store on 603/603e
613 */
614 . = 0x1200
615DataStoreTLBMiss:
616/*
617 * r0: scratch
618 * r1: linux style pte ( later becomes ppc hardware pte )
619 * r2: ptr to linux-style pte
620 * r3: scratch
621 */
622 /* Get PTE (linux-style) and check access */
623 mfspr r3,SPRN_DMISS
624 lis r1, TASK_SIZE@h /* check if kernel address */
625 cmplw 0,r1,r3
626 mfspr r2, SPRN_SPRG_PGDIR
627#ifdef CONFIG_SWAP
628 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT | _PAGE_ACCESSED
629#else
630 li r1, _PAGE_RW | _PAGE_DIRTY | _PAGE_PRESENT
631#endif
632 bgt- 112f
633 lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
634 addi r2, r2, (swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
635112: rlwimi r2,r3,12,20,29 /* insert top 10 bits of address */
636 lwz r2,0(r2) /* get pmd entry */
637 rlwinm. r2,r2,0,0,19 /* extract address of pte page */
638 beq- DataAddressInvalid /* return if no mapping */
639 rlwimi r2,r3,22,20,29 /* insert next 10 bits of address */
640 lwz r0,0(r2) /* get linux-style pte */
641 andc. r1,r1,r0 /* check access & ~permission */
642 bne- DataAddressInvalid /* return if access not permitted */
643 /*
644 * NOTE! We are assuming this is not an SMP system, otherwise
645 * we would need to update the pte atomically with lwarx/stwcx.
646 */
647 /* Convert linux-style PTE to low word of PPC-style PTE */
648 rlwimi r0,r0,32-2,31,31 /* _PAGE_USER -> PP lsb */
649 li r1,0xe06 /* clear out reserved bits & PP msb */
650 andc r1,r0,r1 /* PP = user? 1: 0 */
651BEGIN_FTR_SECTION
652 rlwinm r1,r1,0,~_PAGE_COHERENT /* clear M (coherence not required) */
653END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
654 mtspr SPRN_RPA,r1
655 mfspr r2,SPRN_SRR1 /* Need to restore CR0 */
656 mtcrf 0x80,r2
657BEGIN_MMU_FTR_SECTION
658 li r0,1
659 mfspr r1,SPRN_SPRG_603_LRU
660 rlwinm r2,r3,20,27,31 /* Get Address bits 15:19 */
661 slw r0,r0,r2
662 xor r1,r0,r1
663 srw r0,r1,r2
664 mtspr SPRN_SPRG_603_LRU,r1
665 mfspr r2,SPRN_SRR1
666 rlwimi r2,r0,31-14,14,14
667 mtspr SPRN_SRR1,r2
668END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
669 tlbld r3
670 rfi
671
672#ifndef CONFIG_ALTIVEC
673#define altivec_assist_exception unknown_exception
674#endif
675
676#ifndef CONFIG_TAU_INT
677#define TAUException unknown_exception
678#endif
679
680 EXCEPTION(0x1300, Trap_13, instruction_breakpoint_exception, EXC_XFER_STD)
681 EXCEPTION(0x1400, SMI, SMIException, EXC_XFER_STD)
682 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_STD)
683 EXCEPTION(0x1600, Trap_16, altivec_assist_exception, EXC_XFER_STD)
684 EXCEPTION(0x1700, Trap_17, TAUException, EXC_XFER_STD)
685 EXCEPTION(0x1800, Trap_18, unknown_exception, EXC_XFER_STD)
686 EXCEPTION(0x1900, Trap_19, unknown_exception, EXC_XFER_STD)
687 EXCEPTION(0x1a00, Trap_1a, unknown_exception, EXC_XFER_STD)
688 EXCEPTION(0x1b00, Trap_1b, unknown_exception, EXC_XFER_STD)
689 EXCEPTION(0x1c00, Trap_1c, unknown_exception, EXC_XFER_STD)
690 EXCEPTION(0x1d00, Trap_1d, unknown_exception, EXC_XFER_STD)
691 EXCEPTION(0x1e00, Trap_1e, unknown_exception, EXC_XFER_STD)
692 EXCEPTION(0x1f00, Trap_1f, unknown_exception, EXC_XFER_STD)
693 EXCEPTION(0x2000, RunMode, RunModeException, EXC_XFER_STD)
694 EXCEPTION(0x2100, Trap_21, unknown_exception, EXC_XFER_STD)
695 EXCEPTION(0x2200, Trap_22, unknown_exception, EXC_XFER_STD)
696 EXCEPTION(0x2300, Trap_23, unknown_exception, EXC_XFER_STD)
697 EXCEPTION(0x2400, Trap_24, unknown_exception, EXC_XFER_STD)
698 EXCEPTION(0x2500, Trap_25, unknown_exception, EXC_XFER_STD)
699 EXCEPTION(0x2600, Trap_26, unknown_exception, EXC_XFER_STD)
700 EXCEPTION(0x2700, Trap_27, unknown_exception, EXC_XFER_STD)
701 EXCEPTION(0x2800, Trap_28, unknown_exception, EXC_XFER_STD)
702 EXCEPTION(0x2900, Trap_29, unknown_exception, EXC_XFER_STD)
703 EXCEPTION(0x2a00, Trap_2a, unknown_exception, EXC_XFER_STD)
704 EXCEPTION(0x2b00, Trap_2b, unknown_exception, EXC_XFER_STD)
705 EXCEPTION(0x2c00, Trap_2c, unknown_exception, EXC_XFER_STD)
706 EXCEPTION(0x2d00, Trap_2d, unknown_exception, EXC_XFER_STD)
707 EXCEPTION(0x2e00, Trap_2e, unknown_exception, EXC_XFER_STD)
708 EXCEPTION(0x2f00, Trap_2f, unknown_exception, EXC_XFER_STD)
709
710 . = 0x3000
711
712machine_check_tramp:
713 EXC_XFER_STD(0x200, machine_check_exception)
714
715alignment_exception_tramp:
716 EXC_XFER_STD(0x600, alignment_exception)
717
718handle_page_fault_tramp_1:
719#ifdef CONFIG_VMAP_STACK
720 EXCEPTION_PROLOG_2 handle_dar_dsisr=1
721#endif
722 lwz r4, _DAR(r11)
723 lwz r5, _DSISR(r11)
724 /* fall through */
725handle_page_fault_tramp_2:
726 EXC_XFER_LITE(0x300, handle_page_fault)
727
728#ifdef CONFIG_VMAP_STACK
729.macro save_regs_thread thread
730 stw r0, THR0(\thread)
731 stw r3, THR3(\thread)
732 stw r4, THR4(\thread)
733 stw r5, THR5(\thread)
734 stw r6, THR6(\thread)
735 stw r8, THR8(\thread)
736 stw r9, THR9(\thread)
737 mflr r0
738 stw r0, THLR(\thread)
739 mfctr r0
740 stw r0, THCTR(\thread)
741.endm
742
743.macro restore_regs_thread thread
744 lwz r0, THLR(\thread)
745 mtlr r0
746 lwz r0, THCTR(\thread)
747 mtctr r0
748 lwz r0, THR0(\thread)
749 lwz r3, THR3(\thread)
750 lwz r4, THR4(\thread)
751 lwz r5, THR5(\thread)
752 lwz r6, THR6(\thread)
753 lwz r8, THR8(\thread)
754 lwz r9, THR9(\thread)
755.endm
756
757hash_page_dsi:
758 save_regs_thread r10
759 mfdsisr r3
760 mfdar r4
761 mfsrr0 r5
762 mfsrr1 r9
763 rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
764 bl hash_page
765 mfspr r10, SPRN_SPRG_THREAD
766 restore_regs_thread r10
767 b .Lhash_page_dsi_cont
768
769hash_page_isi:
770 mr r11, r10
771 mfspr r10, SPRN_SPRG_THREAD
772 save_regs_thread r10
773 li r3, 0
774 lwz r4, SRR0(r10)
775 lwz r9, SRR1(r10)
776 bl hash_page
777 mfspr r10, SPRN_SPRG_THREAD
778 restore_regs_thread r10
779 mr r10, r11
780 b .Lhash_page_isi_cont
781
782 .globl fast_hash_page_return
783fast_hash_page_return:
784 andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
785 mfspr r10, SPRN_SPRG_THREAD
786 restore_regs_thread r10
787 bne 1f
788
789 /* DSI */
790 mtcr r11
791 lwz r11, THR11(r10)
792 mfspr r10, SPRN_SPRG_SCRATCH0
793 SYNC
794 RFI
795
7961: /* ISI */
797 mtcr r11
798 mfspr r11, SPRN_SPRG_SCRATCH1
799 mfspr r10, SPRN_SPRG_SCRATCH0
800 SYNC
801 RFI
802
803stack_overflow:
804 vmap_stack_overflow_exception
805#endif
806
807AltiVecUnavailable:
808 EXCEPTION_PROLOG
809#ifdef CONFIG_ALTIVEC
810 beq 1f
811 bl load_up_altivec /* if from user, just load it up */
812 b fast_exception_return
813#endif /* CONFIG_ALTIVEC */
8141: addi r3,r1,STACK_FRAME_OVERHEAD
815 EXC_XFER_LITE(0xf20, altivec_unavailable_exception)
816
817PerformanceMonitor:
818 EXCEPTION_PROLOG
819 addi r3,r1,STACK_FRAME_OVERHEAD
820 EXC_XFER_STD(0xf00, performance_monitor_exception)
821
822
823/*
824 * This code is jumped to from the startup code to copy
825 * the kernel image to physical address PHYSICAL_START.
826 */
827relocate_kernel:
828 addis r9,r26,klimit@ha /* fetch klimit */
829 lwz r25,klimit@l(r9)
830 addis r25,r25,-KERNELBASE@h
831 lis r3,PHYSICAL_START@h /* Destination base address */
832 li r6,0 /* Destination offset */
833 li r5,0x4000 /* # bytes of memory to copy */
834 bl copy_and_flush /* copy the first 0x4000 bytes */
835 addi r0,r3,4f@l /* jump to the address of 4f */
836 mtctr r0 /* in copy and do the rest. */
837 bctr /* jump to the copy */
8384: mr r5,r25
839 bl copy_and_flush /* copy the rest */
840 b turn_on_mmu
841
842/*
843 * Copy routine used to copy the kernel to start at physical address 0
844 * and flush and invalidate the caches as needed.
845 * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset
846 * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5.
847 */
848_ENTRY(copy_and_flush)
849 addi r5,r5,-4
850 addi r6,r6,-4
8514: li r0,L1_CACHE_BYTES/4
852 mtctr r0
8533: addi r6,r6,4 /* copy a cache line */
854 lwzx r0,r6,r4
855 stwx r0,r6,r3
856 bdnz 3b
857 dcbst r6,r3 /* write it to memory */
858 sync
859 icbi r6,r3 /* flush the icache line */
860 cmplw 0,r6,r5
861 blt 4b
862 sync /* additional sync needed on g4 */
863 isync
864 addi r5,r5,4
865 addi r6,r6,4
866 blr
867
868#ifdef CONFIG_SMP
869 .globl __secondary_start_mpc86xx
870__secondary_start_mpc86xx:
871 mfspr r3, SPRN_PIR
872 stw r3, __secondary_hold_acknowledge@l(0)
873 mr r24, r3 /* cpu # */
874 b __secondary_start
875
876 .globl __secondary_start_pmac_0
877__secondary_start_pmac_0:
878 /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */
879 li r24,0
880 b 1f
881 li r24,1
882 b 1f
883 li r24,2
884 b 1f
885 li r24,3
8861:
887 /* on powersurge, we come in here with IR=0 and DR=1, and DBAT 0
888 set to map the 0xf0000000 - 0xffffffff region */
889 mfmsr r0
890 rlwinm r0,r0,0,28,26 /* clear DR (0x10) */
891 SYNC
892 mtmsr r0
893 isync
894
895 .globl __secondary_start
896__secondary_start:
897 /* Copy some CPU settings from CPU 0 */
898 bl __restore_cpu_setup
899
900 lis r3,-KERNELBASE@h
901 mr r4,r24
902 bl call_setup_cpu /* Call setup_cpu for this CPU */
903#ifdef CONFIG_PPC_BOOK3S_32
904 lis r3,-KERNELBASE@h
905 bl init_idle_6xx
906#endif /* CONFIG_PPC_BOOK3S_32 */
907
908 /* get current's stack and current */
909 lis r2,secondary_current@ha
910 tophys(r2,r2)
911 lwz r2,secondary_current@l(r2)
912 tophys(r1,r2)
913 lwz r1,TASK_STACK(r1)
914
915 /* stack */
916 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
917 li r0,0
918 tophys(r3,r1)
919 stw r0,0(r3)
920
921 /* load up the MMU */
922 bl load_segment_registers
923 bl load_up_mmu
924
925 /* ptr to phys current thread */
926 tophys(r4,r2)
927 addi r4,r4,THREAD /* phys address of our thread_struct */
928 mtspr SPRN_SPRG_THREAD,r4
929 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
930 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
931 mtspr SPRN_SPRG_PGDIR, r4
932
933 /* enable MMU and jump to start_secondary */
934 li r4,MSR_KERNEL
935 lis r3,start_secondary@h
936 ori r3,r3,start_secondary@l
937 mtspr SPRN_SRR0,r3
938 mtspr SPRN_SRR1,r4
939 SYNC
940 RFI
941#endif /* CONFIG_SMP */
942
943#ifdef CONFIG_KVM_BOOK3S_HANDLER
944#include "../kvm/book3s_rmhandlers.S"
945#endif
946
947/*
948 * Those generic dummy functions are kept for CPUs not
949 * included in CONFIG_PPC_BOOK3S_32
950 */
951#if !defined(CONFIG_PPC_BOOK3S_32)
952_ENTRY(__save_cpu_setup)
953 blr
954_ENTRY(__restore_cpu_setup)
955 blr
956#endif /* !defined(CONFIG_PPC_BOOK3S_32) */
957
958/*
959 * Load stuff into the MMU. Intended to be called with
960 * IR=0 and DR=0.
961 */
962#ifdef CONFIG_KASAN
963early_hash_table:
964 sync /* Force all PTE updates to finish */
965 isync
966 tlbia /* Clear all TLB entries */
967 sync /* wait for tlbia/tlbie to finish */
968 TLBSYNC /* ... on all CPUs */
969 /* Load the SDR1 register (hash table base & size) */
970 lis r6, early_hash - PAGE_OFFSET@h
971 ori r6, r6, 3 /* 256kB table */
972 mtspr SPRN_SDR1, r6
973 blr
974#endif
975
976load_up_mmu:
977 sync /* Force all PTE updates to finish */
978 isync
979 tlbia /* Clear all TLB entries */
980 sync /* wait for tlbia/tlbie to finish */
981 TLBSYNC /* ... on all CPUs */
982 /* Load the SDR1 register (hash table base & size) */
983 lis r6,_SDR1@ha
984 tophys(r6,r6)
985 lwz r6,_SDR1@l(r6)
986 mtspr SPRN_SDR1,r6
987
988/* Load the BAT registers with the values set up by MMU_init.
989 MMU_init takes care of whether we're on a 601 or not. */
990 lis r3,BATS@ha
991 addi r3,r3,BATS@l
992 tophys(r3,r3)
993 LOAD_BAT(0,r3,r4,r5)
994 LOAD_BAT(1,r3,r4,r5)
995 LOAD_BAT(2,r3,r4,r5)
996 LOAD_BAT(3,r3,r4,r5)
997BEGIN_MMU_FTR_SECTION
998 LOAD_BAT(4,r3,r4,r5)
999 LOAD_BAT(5,r3,r4,r5)
1000 LOAD_BAT(6,r3,r4,r5)
1001 LOAD_BAT(7,r3,r4,r5)
1002END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1003 blr
1004
1005load_segment_registers:
1006 li r0, NUM_USER_SEGMENTS /* load up user segment register values */
1007 mtctr r0 /* for context 0 */
1008 li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
1009#ifdef CONFIG_PPC_KUEP
1010 oris r3, r3, SR_NX@h /* Set Nx */
1011#endif
1012#ifdef CONFIG_PPC_KUAP
1013 oris r3, r3, SR_KS@h /* Set Ks */
1014#endif
1015 li r4, 0
10163: mtsrin r3, r4
1017 addi r3, r3, 0x111 /* increment VSID */
1018 addis r4, r4, 0x1000 /* address of next segment */
1019 bdnz 3b
1020 li r0, 16 - NUM_USER_SEGMENTS /* load up kernel segment registers */
1021 mtctr r0 /* for context 0 */
1022 rlwinm r3, r3, 0, ~SR_NX /* Nx = 0 */
1023 rlwinm r3, r3, 0, ~SR_KS /* Ks = 0 */
1024 oris r3, r3, SR_KP@h /* Kp = 1 */
10253: mtsrin r3, r4
1026 addi r3, r3, 0x111 /* increment VSID */
1027 addis r4, r4, 0x1000 /* address of next segment */
1028 bdnz 3b
1029 blr
1030
1031/*
1032 * This is where the main kernel code starts.
1033 */
1034start_here:
1035 /* ptr to current */
1036 lis r2,init_task@h
1037 ori r2,r2,init_task@l
1038 /* Set up for using our exception vectors */
1039 /* ptr to phys current thread */
1040 tophys(r4,r2)
1041 addi r4,r4,THREAD /* init task's THREAD */
1042 mtspr SPRN_SPRG_THREAD,r4
1043 lis r4, (swapper_pg_dir - PAGE_OFFSET)@h
1044 ori r4, r4, (swapper_pg_dir - PAGE_OFFSET)@l
1045 mtspr SPRN_SPRG_PGDIR, r4
1046
1047 /* stack */
1048 lis r1,init_thread_union@ha
1049 addi r1,r1,init_thread_union@l
1050 li r0,0
1051 stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1)
1052/*
1053 * Do early platform-specific initialization,
1054 * and set up the MMU.
1055 */
1056#ifdef CONFIG_KASAN
1057 bl kasan_early_init
1058#endif
1059 li r3,0
1060 mr r4,r31
1061 bl machine_init
1062 bl __save_cpu_setup
1063 bl MMU_init
1064#ifdef CONFIG_KASAN
1065BEGIN_MMU_FTR_SECTION
1066 bl MMU_init_hw_patch
1067END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
1068#endif
1069
1070/*
1071 * Go back to running unmapped so we can load up new values
1072 * for SDR1 (hash table pointer) and the segment registers
1073 * and change to using our exception vectors.
1074 */
1075 lis r4,2f@h
1076 ori r4,r4,2f@l
1077 tophys(r4,r4)
1078 li r3,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1079
1080 .align 4
1081 mtspr SPRN_SRR0,r4
1082 mtspr SPRN_SRR1,r3
1083 SYNC
1084 RFI
1085/* Load up the kernel context */
10862: bl load_up_mmu
1087
1088#ifdef CONFIG_BDI_SWITCH
1089 /* Add helper information for the Abatron bdiGDB debugger.
1090 * We do this here because we know the mmu is disabled, and
1091 * will be enabled for real in just a few instructions.
1092 */
1093 lis r5, abatron_pteptrs@h
1094 ori r5, r5, abatron_pteptrs@l
1095 stw r5, 0xf0(r0) /* This much match your Abatron config */
1096 lis r6, swapper_pg_dir@h
1097 ori r6, r6, swapper_pg_dir@l
1098 tophys(r5, r5)
1099 stw r6, 0(r5)
1100#endif /* CONFIG_BDI_SWITCH */
1101
1102/* Now turn on the MMU for real! */
1103 li r4,MSR_KERNEL
1104 lis r3,start_kernel@h
1105 ori r3,r3,start_kernel@l
1106 mtspr SPRN_SRR0,r3
1107 mtspr SPRN_SRR1,r4
1108 SYNC
1109 RFI
1110
1111/*
1112 * void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
1113 *
1114 * Set up the segment registers for a new context.
1115 */
1116_ENTRY(switch_mmu_context)
1117 lwz r3,MMCONTEXTID(r4)
1118 cmpwi cr0,r3,0
1119 blt- 4f
1120 mulli r3,r3,897 /* multiply context by skew factor */
1121 rlwinm r3,r3,4,8,27 /* VSID = (context & 0xfffff) << 4 */
1122#ifdef CONFIG_PPC_KUEP
1123 oris r3, r3, SR_NX@h /* Set Nx */
1124#endif
1125#ifdef CONFIG_PPC_KUAP
1126 oris r3, r3, SR_KS@h /* Set Ks */
1127#endif
1128 li r0,NUM_USER_SEGMENTS
1129 mtctr r0
1130
1131 lwz r4, MM_PGD(r4)
1132#ifdef CONFIG_BDI_SWITCH
1133 /* Context switch the PTE pointer for the Abatron BDI2000.
1134 * The PGDIR is passed as second argument.
1135 */
1136 lis r5, abatron_pteptrs@ha
1137 stw r4, abatron_pteptrs@l + 0x4(r5)
1138#endif
1139 tophys(r4, r4)
1140 mtspr SPRN_SPRG_PGDIR, r4
1141 li r4,0
1142 isync
11433:
1144 mtsrin r3,r4
1145 addi r3,r3,0x111 /* next VSID */
1146 rlwinm r3,r3,0,8,3 /* clear out any overflow from VSID field */
1147 addis r4,r4,0x1000 /* address of next segment */
1148 bdnz 3b
1149 sync
1150 isync
1151 blr
11524: trap
1153 EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0
1154 blr
1155EXPORT_SYMBOL(switch_mmu_context)
1156
1157/*
1158 * An undocumented "feature" of 604e requires that the v bit
1159 * be cleared before changing BAT values.
1160 *
1161 * Also, newer IBM firmware does not clear bat3 and 4 so
1162 * this makes sure it's done.
1163 * -- Cort
1164 */
1165clear_bats:
1166 li r10,0
1167
1168#ifndef CONFIG_PPC_BOOK3S_601
1169 mtspr SPRN_DBAT0U,r10
1170 mtspr SPRN_DBAT0L,r10
1171 mtspr SPRN_DBAT1U,r10
1172 mtspr SPRN_DBAT1L,r10
1173 mtspr SPRN_DBAT2U,r10
1174 mtspr SPRN_DBAT2L,r10
1175 mtspr SPRN_DBAT3U,r10
1176 mtspr SPRN_DBAT3L,r10
1177#endif
1178 mtspr SPRN_IBAT0U,r10
1179 mtspr SPRN_IBAT0L,r10
1180 mtspr SPRN_IBAT1U,r10
1181 mtspr SPRN_IBAT1L,r10
1182 mtspr SPRN_IBAT2U,r10
1183 mtspr SPRN_IBAT2L,r10
1184 mtspr SPRN_IBAT3U,r10
1185 mtspr SPRN_IBAT3L,r10
1186BEGIN_MMU_FTR_SECTION
1187 /* Here's a tweak: at this point, CPU setup have
1188 * not been called yet, so HIGH_BAT_EN may not be
1189 * set in HID0 for the 745x processors. However, it
1190 * seems that doesn't affect our ability to actually
1191 * write to these SPRs.
1192 */
1193 mtspr SPRN_DBAT4U,r10
1194 mtspr SPRN_DBAT4L,r10
1195 mtspr SPRN_DBAT5U,r10
1196 mtspr SPRN_DBAT5L,r10
1197 mtspr SPRN_DBAT6U,r10
1198 mtspr SPRN_DBAT6L,r10
1199 mtspr SPRN_DBAT7U,r10
1200 mtspr SPRN_DBAT7L,r10
1201 mtspr SPRN_IBAT4U,r10
1202 mtspr SPRN_IBAT4L,r10
1203 mtspr SPRN_IBAT5U,r10
1204 mtspr SPRN_IBAT5L,r10
1205 mtspr SPRN_IBAT6U,r10
1206 mtspr SPRN_IBAT6L,r10
1207 mtspr SPRN_IBAT7U,r10
1208 mtspr SPRN_IBAT7L,r10
1209END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1210 blr
1211
1212_ENTRY(update_bats)
1213 lis r4, 1f@h
1214 ori r4, r4, 1f@l
1215 tophys(r4, r4)
1216 mfmsr r6
1217 mflr r7
1218 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)
1219 rlwinm r0, r6, 0, ~MSR_RI
1220 rlwinm r0, r0, 0, ~MSR_EE
1221 mtmsr r0
1222
1223 .align 4
1224 mtspr SPRN_SRR0, r4
1225 mtspr SPRN_SRR1, r3
1226 SYNC
1227 RFI
12281: bl clear_bats
1229 lis r3, BATS@ha
1230 addi r3, r3, BATS@l
1231 tophys(r3, r3)
1232 LOAD_BAT(0, r3, r4, r5)
1233 LOAD_BAT(1, r3, r4, r5)
1234 LOAD_BAT(2, r3, r4, r5)
1235 LOAD_BAT(3, r3, r4, r5)
1236BEGIN_MMU_FTR_SECTION
1237 LOAD_BAT(4, r3, r4, r5)
1238 LOAD_BAT(5, r3, r4, r5)
1239 LOAD_BAT(6, r3, r4, r5)
1240 LOAD_BAT(7, r3, r4, r5)
1241END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
1242 li r3, MSR_KERNEL & ~(MSR_IR | MSR_DR | MSR_RI)
1243 mtmsr r3
1244 mtspr SPRN_SRR0, r7
1245 mtspr SPRN_SRR1, r6
1246 SYNC
1247 RFI
1248
1249flush_tlbs:
1250 lis r10, 0x40
12511: addic. r10, r10, -0x1000
1252 tlbie r10
1253 bgt 1b
1254 sync
1255 blr
1256
1257mmu_off:
1258 addi r4, r3, __after_mmu_off - _start
1259 mfmsr r3
1260 andi. r0,r3,MSR_DR|MSR_IR /* MMU enabled? */
1261 beqlr
1262 andc r3,r3,r0
1263
1264 .align 4
1265 mtspr SPRN_SRR0,r4
1266 mtspr SPRN_SRR1,r3
1267 sync
1268 RFI
1269
1270/*
1271 * On 601, we use 3 BATs to map up to 24M of RAM at _PAGE_OFFSET
1272 * (we keep one for debugging) and on others, we use one 256M BAT.
1273 */
1274initial_bats:
1275 lis r11,PAGE_OFFSET@h
1276#ifdef CONFIG_PPC_BOOK3S_601
1277 ori r11,r11,4 /* set up BAT registers for 601 */
1278 li r8,0x7f /* valid, block length = 8MB */
1279 mtspr SPRN_IBAT0U,r11 /* N.B. 601 has valid bit in */
1280 mtspr SPRN_IBAT0L,r8 /* lower BAT register */
1281 addis r11,r11,0x800000@h
1282 addis r8,r8,0x800000@h
1283 mtspr SPRN_IBAT1U,r11
1284 mtspr SPRN_IBAT1L,r8
1285 addis r11,r11,0x800000@h
1286 addis r8,r8,0x800000@h
1287 mtspr SPRN_IBAT2U,r11
1288 mtspr SPRN_IBAT2L,r8
1289#else
1290 tophys(r8,r11)
1291#ifdef CONFIG_SMP
1292 ori r8,r8,0x12 /* R/W access, M=1 */
1293#else
1294 ori r8,r8,2 /* R/W access */
1295#endif /* CONFIG_SMP */
1296 ori r11,r11,BL_256M<<2|0x2 /* set up BAT registers for 604 */
1297
1298 mtspr SPRN_DBAT0L,r8 /* N.B. 6xx (not 601) have valid */
1299 mtspr SPRN_DBAT0U,r11 /* bit in upper BAT register */
1300 mtspr SPRN_IBAT0L,r8
1301 mtspr SPRN_IBAT0U,r11
1302#endif
1303 isync
1304 blr
1305
1306#ifdef CONFIG_BOOTX_TEXT
1307setup_disp_bat:
1308 /*
1309 * setup the display bat prepared for us in prom.c
1310 */
1311 mflr r8
1312 bl reloc_offset
1313 mtlr r8
1314 addis r8,r3,disp_BAT@ha
1315 addi r8,r8,disp_BAT@l
1316 cmpwi cr0,r8,0
1317 beqlr
1318 lwz r11,0(r8)
1319 lwz r8,4(r8)
1320#ifndef CONFIG_PPC_BOOK3S_601
1321 mtspr SPRN_DBAT3L,r8
1322 mtspr SPRN_DBAT3U,r11
1323#else
1324 mtspr SPRN_IBAT3L,r8
1325 mtspr SPRN_IBAT3U,r11
1326#endif
1327 blr
1328#endif /* CONFIG_BOOTX_TEXT */
1329
1330#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
1331setup_cpm_bat:
1332 lis r8, 0xf000
1333 ori r8, r8, 0x002a
1334 mtspr SPRN_DBAT1L, r8
1335
1336 lis r11, 0xf000
1337 ori r11, r11, (BL_1M << 2) | 2
1338 mtspr SPRN_DBAT1U, r11
1339
1340 blr
1341#endif
1342
1343#ifdef CONFIG_PPC_EARLY_DEBUG_USBGECKO
1344setup_usbgecko_bat:
1345 /* prepare a BAT for early io */
1346#if defined(CONFIG_GAMECUBE)
1347 lis r8, 0x0c00
1348#elif defined(CONFIG_WII)
1349 lis r8, 0x0d00
1350#else
1351#error Invalid platform for USB Gecko based early debugging.
1352#endif
1353 /*
1354 * The virtual address used must match the virtual address
1355 * associated to the fixmap entry FIX_EARLY_DEBUG_BASE.
1356 */
1357 lis r11, 0xfffe /* top 128K */
1358 ori r8, r8, 0x002a /* uncached, guarded ,rw */
1359 ori r11, r11, 0x2 /* 128K, Vs=1, Vp=0 */
1360 mtspr SPRN_DBAT1L, r8
1361 mtspr SPRN_DBAT1U, r11
1362 blr
1363#endif
1364
1365#ifdef CONFIG_8260
1366/* Jump into the system reset for the rom.
1367 * We first disable the MMU, and then jump to the ROM reset address.
1368 *
1369 * r3 is the board info structure, r4 is the location for starting.
1370 * I use this for building a small kernel that can load other kernels,
1371 * rather than trying to write or rely on a rom monitor that can tftp load.
1372 */
1373 .globl m8260_gorom
1374m8260_gorom:
1375 mfmsr r0
1376 rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
1377 sync
1378 mtmsr r0
1379 sync
1380 mfspr r11, SPRN_HID0
1381 lis r10, 0
1382 ori r10,r10,HID0_ICE|HID0_DCE
1383 andc r11, r11, r10
1384 mtspr SPRN_HID0, r11
1385 isync
1386 li r5, MSR_ME|MSR_RI
1387 lis r6,2f@h
1388 addis r6,r6,-KERNELBASE@h
1389 ori r6,r6,2f@l
1390 mtspr SPRN_SRR0,r6
1391 mtspr SPRN_SRR1,r5
1392 isync
1393 sync
1394 rfi
13952:
1396 mtlr r4
1397 blr
1398#endif
1399
1400
1401/*
1402 * We put a few things here that have to be page-aligned.
1403 * This stuff goes at the beginning of the data segment,
1404 * which is page-aligned.
1405 */
1406 .data
1407 .globl sdata
1408sdata:
1409 .globl empty_zero_page
1410empty_zero_page:
1411 .space 4096
1412EXPORT_SYMBOL(empty_zero_page)
1413
1414 .globl swapper_pg_dir
1415swapper_pg_dir:
1416 .space PGD_TABLE_SIZE
1417
1418/* Room for two PTE pointers, usually the kernel and current user pointers
1419 * to their respective root page table.
1420 */
1421abatron_pteptrs:
1422 .space 8