Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 */
11
12#include <linux/linkage.h>
13#include <linux/sys.h>
14#include <asm/unistd.h>
15#include <asm/errno.h>
16#include <asm/processor.h>
17#include <asm/page.h>
18#include <asm/cache.h>
19#include <asm/ppc_asm.h>
20#include <asm/asm-offsets.h>
21#include <asm/cputable.h>
22#include <asm/thread_info.h>
23#include <asm/kexec.h>
24#include <asm/ptrace.h>
25#include <asm/mmu.h>
26#include <asm/export.h>
27#include <asm/feature-fixups.h>
28
29 .text
30
31_GLOBAL(__bswapdi2)
32EXPORT_SYMBOL(__bswapdi2)
33 srdi r8,r3,32
34 rlwinm r7,r3,8,0xffffffff
35 rlwimi r7,r3,24,0,7
36 rlwinm r9,r8,8,0xffffffff
37 rlwimi r7,r3,24,16,23
38 rlwimi r9,r8,24,0,7
39 rlwimi r9,r8,24,16,23
40 sldi r7,r7,32
41 or r3,r7,r9
42 blr
43
44
45#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
46_GLOBAL(rmci_on)
47 sync
48 isync
49 li r3,0x100
50 rldicl r3,r3,32,0
51 mfspr r5,SPRN_HID4
52 or r5,r5,r3
53 sync
54 mtspr SPRN_HID4,r5
55 isync
56 slbia
57 isync
58 sync
59 blr
60
61_GLOBAL(rmci_off)
62 sync
63 isync
64 li r3,0x100
65 rldicl r3,r3,32,0
66 mfspr r5,SPRN_HID4
67 andc r5,r5,r3
68 sync
69 mtspr SPRN_HID4,r5
70 isync
71 slbia
72 isync
73 sync
74 blr
75#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
76
77#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
78
79/*
80 * Do an IO access in real mode
81 */
82_GLOBAL(real_readb)
83 mfmsr r7
84 ori r0,r7,MSR_DR
85 xori r0,r0,MSR_DR
86 sync
87 mtmsrd r0
88 sync
89 isync
90 mfspr r6,SPRN_HID4
91 rldicl r5,r6,32,0
92 ori r5,r5,0x100
93 rldicl r5,r5,32,0
94 sync
95 mtspr SPRN_HID4,r5
96 isync
97 slbia
98 isync
99 lbz r3,0(r3)
100 sync
101 mtspr SPRN_HID4,r6
102 isync
103 slbia
104 isync
105 mtmsrd r7
106 sync
107 isync
108 blr
109
110 /*
111 * Do an IO access in real mode
112 */
113_GLOBAL(real_writeb)
114 mfmsr r7
115 ori r0,r7,MSR_DR
116 xori r0,r0,MSR_DR
117 sync
118 mtmsrd r0
119 sync
120 isync
121 mfspr r6,SPRN_HID4
122 rldicl r5,r6,32,0
123 ori r5,r5,0x100
124 rldicl r5,r5,32,0
125 sync
126 mtspr SPRN_HID4,r5
127 isync
128 slbia
129 isync
130 stb r3,0(r4)
131 sync
132 mtspr SPRN_HID4,r6
133 isync
134 slbia
135 isync
136 mtmsrd r7
137 sync
138 isync
139 blr
140#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
141
142#ifdef CONFIG_PPC_PASEMI
143
144_GLOBAL(real_205_readb)
145 mfmsr r7
146 ori r0,r7,MSR_DR
147 xori r0,r0,MSR_DR
148 sync
149 mtmsrd r0
150 sync
151 isync
152 LBZCIX(R3,R0,R3)
153 isync
154 mtmsrd r7
155 sync
156 isync
157 blr
158
159_GLOBAL(real_205_writeb)
160 mfmsr r7
161 ori r0,r7,MSR_DR
162 xori r0,r0,MSR_DR
163 sync
164 mtmsrd r0
165 sync
166 isync
167 STBCIX(R3,R0,R4)
168 isync
169 mtmsrd r7
170 sync
171 isync
172 blr
173
174#endif /* CONFIG_PPC_PASEMI */
175
176
177#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
178/*
179 * SCOM access functions for 970 (FX only for now)
180 *
181 * unsigned long scom970_read(unsigned int address);
182 * void scom970_write(unsigned int address, unsigned long value);
183 *
184 * The address passed in is the 24 bits register address. This code
185 * is 970 specific and will not check the status bits, so you should
186 * know what you are doing.
187 */
188_GLOBAL(scom970_read)
189 /* interrupts off */
190 mfmsr r4
191 ori r0,r4,MSR_EE
192 xori r0,r0,MSR_EE
193 mtmsrd r0,1
194
195 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
196 * (including parity). On current CPUs they must be 0'd,
197 * and finally or in RW bit
198 */
199 rlwinm r3,r3,8,0,15
200 ori r3,r3,0x8000
201
202 /* do the actual scom read */
203 sync
204 mtspr SPRN_SCOMC,r3
205 isync
206 mfspr r3,SPRN_SCOMD
207 isync
208 mfspr r0,SPRN_SCOMC
209 isync
210
211 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
212 * that's the best we can do). Not implemented yet as we don't use
213 * the scom on any of the bogus CPUs yet, but may have to be done
214 * ultimately
215 */
216
217 /* restore interrupts */
218 mtmsrd r4,1
219 blr
220
221
222_GLOBAL(scom970_write)
223 /* interrupts off */
224 mfmsr r5
225 ori r0,r5,MSR_EE
226 xori r0,r0,MSR_EE
227 mtmsrd r0,1
228
229 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
230 * (including parity). On current CPUs they must be 0'd.
231 */
232
233 rlwinm r3,r3,8,0,15
234
235 sync
236 mtspr SPRN_SCOMD,r4 /* write data */
237 isync
238 mtspr SPRN_SCOMC,r3 /* write command */
239 isync
240 mfspr 3,SPRN_SCOMC
241 isync
242
243 /* restore interrupts */
244 mtmsrd r5,1
245 blr
246#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
247
248/* kexec_wait(phys_cpu)
249 *
250 * wait for the flag to change, indicating this kernel is going away but
251 * the slave code for the next one is at addresses 0 to 100.
252 *
253 * This is used by all slaves, even those that did not find a matching
254 * paca in the secondary startup code.
255 *
256 * Physical (hardware) cpu id should be in r3.
257 */
258_GLOBAL(kexec_wait)
259 bcl 20,31,$+4
2601: mflr r5
261 addi r5,r5,kexec_flag-1b
262
26399: HMT_LOW
264#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
265 lwz r4,0(r5)
266 cmpwi 0,r4,0
267 beq 99b
268#ifdef CONFIG_PPC_BOOK3S_64
269 li r10,0x60
270 mfmsr r11
271 clrrdi r11,r11,1 /* Clear MSR_LE */
272 mtsrr0 r10
273 mtsrr1 r11
274 rfid
275#else
276 /* Create TLB entry in book3e_secondary_core_init */
277 li r4,0
278 ba 0x60
279#endif
280#endif
281
282/* this can be in text because we won't change it until we are
283 * running in real anyways
284 */
285kexec_flag:
286 .long 0
287
288
289#ifdef CONFIG_KEXEC_CORE
290#ifdef CONFIG_PPC_BOOK3E_64
291/*
292 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
293 * for a core to identity map v:0 to p:0. This current implementation
294 * assumes that 1G is enough for kexec.
295 */
296kexec_create_tlb:
297 /*
298 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
299 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
300 */
301 PPC_TLBILX_ALL(0,R0)
302 sync
303 isync
304
305 mfspr r10,SPRN_TLB1CFG
306 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
307 subi r10,r10,1 /* Last entry: no conflict with kernel text */
308 lis r9,MAS0_TLBSEL(1)@h
309 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
310
311/* Set up a temp identity mapping v:0 to p:0 and return to it. */
312 mtspr SPRN_MAS0,r9
313
314 lis r9,(MAS1_VALID|MAS1_IPROT)@h
315 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
316 mtspr SPRN_MAS1,r9
317
318 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
319 mtspr SPRN_MAS2,r9
320
321 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
322 mtspr SPRN_MAS3,r9
323 li r9,0
324 mtspr SPRN_MAS7,r9
325
326 tlbwe
327 isync
328 blr
329#endif
330
331/* kexec_smp_wait(void)
332 *
333 * call with interrupts off
334 * note: this is a terminal routine, it does not save lr
335 *
336 * get phys id from paca
337 * switch to real mode
338 * mark the paca as no longer used
339 * join other cpus in kexec_wait(phys_id)
340 */
341_GLOBAL(kexec_smp_wait)
342 lhz r3,PACAHWCPUID(r13)
343 bl real_mode
344
345 li r4,KEXEC_STATE_REAL_MODE
346 stb r4,PACAKEXECSTATE(r13)
347
348 b kexec_wait
349
350/*
351 * switch to real mode (turn mmu off)
352 * we use the early kernel trick that the hardware ignores bits
353 * 0 and 1 (big endian) of the effective address in real mode
354 *
355 * don't overwrite r3 here, it is live for kexec_wait above.
356 */
357SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */
358#ifdef CONFIG_PPC_BOOK3E_64
359 /* Create an identity mapping. */
360 b kexec_create_tlb
361#else
3621: li r9,MSR_RI
363 li r10,MSR_DR|MSR_IR
364 mflr r11 /* return address to SRR0 */
365 mfmsr r12
366 andc r9,r12,r9
367 andc r10,r12,r10
368
369 mtmsrd r9,1
370 mtspr SPRN_SRR1,r10
371 mtspr SPRN_SRR0,r11
372 rfid
373#endif
374SYM_FUNC_END(real_mode)
375
376/*
377 * kexec_sequence(newstack, start, image, control, clear_all(),
378 copy_with_mmu_off)
379 *
380 * does the grungy work with stack switching and real mode switches
381 * also does simple calls to other code
382 */
383
384_GLOBAL(kexec_sequence)
385 mflr r0
386 std r0,16(r1)
387
388 /* switch stacks to newstack -- &kexec_stack.stack */
389 stdu r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r3)
390 mr r1,r3
391
392 li r0,0
393 std r0,16(r1)
394
395 /* save regs for local vars on new stack.
396 * yes, we won't go back, but ...
397 */
398 std r31,-8(r1)
399 std r30,-16(r1)
400 std r29,-24(r1)
401 std r28,-32(r1)
402 std r27,-40(r1)
403 std r26,-48(r1)
404 std r25,-56(r1)
405
406 stdu r1,-STACK_FRAME_MIN_SIZE-64(r1)
407
408 /* save args into preserved regs */
409 mr r31,r3 /* newstack (both) */
410 mr r30,r4 /* start (real) */
411 mr r29,r5 /* image (virt) */
412 mr r28,r6 /* control, unused */
413 mr r27,r7 /* clear_all() fn desc */
414 mr r26,r8 /* copy_with_mmu_off */
415 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
416
417 /* disable interrupts, we are overwriting kernel data next */
418#ifdef CONFIG_PPC_BOOK3E_64
419 wrteei 0
420#else
421 mfmsr r3
422 rlwinm r3,r3,0,17,15
423 mtmsrd r3,1
424#endif
425
426 /* We need to turn the MMU off unless we are in hash mode
427 * under a hypervisor
428 */
429 cmpdi r26,0
430 beq 1f
431 bl real_mode
4321:
433 /* copy dest pages, flush whole dest image */
434 mr r3,r29
435 bl kexec_copy_flush /* (image) */
436
437 /* turn off mmu now if not done earlier */
438 cmpdi r26,0
439 bne 1f
440 bl real_mode
441
442 /* copy 0x100 bytes starting at start to 0 */
4431: li r3,0
444 mr r4,r30 /* start, aka phys mem offset */
445 li r5,0x100
446 li r6,0
447 bl copy_and_flush /* (dest, src, copy limit, start offset) */
4481: /* assume normal blr return */
449
450 /* release other cpus to the new kernel secondary start at 0x60 */
451 mflr r5
452 li r6,1
453 stw r6,kexec_flag-1b(5)
454
455 cmpdi r27,0
456 beq 1f
457
458 /* clear out hardware hash page table and tlb */
459#ifdef CONFIG_PPC64_ELF_ABI_V1
460 ld r12,0(r27) /* deref function descriptor */
461#else
462 mr r12,r27
463#endif
464 mtctr r12
465 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
466
467/*
468 * kexec image calling is:
469 * the first 0x100 bytes of the entry point are copied to 0
470 *
471 * all slaves branch to slave = 0x60 (absolute)
472 * slave(phys_cpu_id);
473 *
474 * master goes to start = entry point
475 * start(phys_cpu_id, start, 0);
476 *
477 *
478 * a wrapper is needed to call existing kernels, here is an approximate
479 * description of one method:
480 *
481 * v2: (2.6.10)
482 * start will be near the boot_block (maybe 0x100 bytes before it?)
483 * it will have a 0x60, which will b to boot_block, where it will wait
484 * and 0 will store phys into struct boot-block and load r3 from there,
485 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
486 *
487 * v1: (2.6.9)
488 * boot block will have all cpus scanning device tree to see if they
489 * are the boot cpu ?????
490 * other device tree differences (prop sizes, va vs pa, etc)...
491 */
4921: mr r3,r25 # my phys cpu
493 mr r4,r30 # start, aka phys mem offset
494 mtlr 4
495 li r5,0
496 blr /* image->start(physid, image->start, 0); */
497#endif /* CONFIG_KEXEC_CORE */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 */
11
12#include <linux/sys.h>
13#include <asm/unistd.h>
14#include <asm/errno.h>
15#include <asm/processor.h>
16#include <asm/page.h>
17#include <asm/cache.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/kexec.h>
23#include <asm/ptrace.h>
24#include <asm/mmu.h>
25#include <asm/export.h>
26#include <asm/feature-fixups.h>
27
28 .text
29
30_GLOBAL(call_do_softirq)
31 mflr r0
32 std r0,16(r1)
33 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
34 mr r1,r3
35 bl __do_softirq
36 ld r1,0(r1)
37 ld r0,16(r1)
38 mtlr r0
39 blr
40
41_GLOBAL(call_do_irq)
42 mflr r0
43 std r0,16(r1)
44 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
45 mr r1,r4
46 bl __do_irq
47 ld r1,0(r1)
48 ld r0,16(r1)
49 mtlr r0
50 blr
51
52 .section ".toc","aw"
53PPC64_CACHES:
54 .tc ppc64_caches[TC],ppc64_caches
55 .section ".text"
56
57/*
58 * Write any modified data cache blocks out to memory
59 * and invalidate the corresponding instruction cache blocks.
60 *
61 * flush_icache_range(unsigned long start, unsigned long stop)
62 *
63 * flush all bytes from start through stop-1 inclusive
64 */
65
66_GLOBAL_TOC(flush_icache_range)
67BEGIN_FTR_SECTION
68 PURGE_PREFETCHED_INS
69 blr
70END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
71/*
72 * Flush the data cache to memory
73 *
74 * Different systems have different cache line sizes
75 * and in some cases i-cache and d-cache line sizes differ from
76 * each other.
77 */
78 ld r10,PPC64_CACHES@toc(r2)
79 lwz r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
80 addi r5,r7,-1
81 andc r6,r3,r5 /* round low to line bdy */
82 subf r8,r6,r4 /* compute length */
83 add r8,r8,r5 /* ensure we get enough */
84 lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of cache block size */
85 srw. r8,r8,r9 /* compute line count */
86 beqlr /* nothing to do? */
87 mtctr r8
881: dcbst 0,r6
89 add r6,r6,r7
90 bdnz 1b
91 sync
92
93/* Now invalidate the instruction cache */
94
95 lwz r7,ICACHEL1BLOCKSIZE(r10) /* Get Icache block size */
96 addi r5,r7,-1
97 andc r6,r3,r5 /* round low to line bdy */
98 subf r8,r6,r4 /* compute length */
99 add r8,r8,r5
100 lwz r9,ICACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of Icache block size */
101 srw. r8,r8,r9 /* compute line count */
102 beqlr /* nothing to do? */
103 mtctr r8
1042: icbi 0,r6
105 add r6,r6,r7
106 bdnz 2b
107 isync
108 blr
109_ASM_NOKPROBE_SYMBOL(flush_icache_range)
110EXPORT_SYMBOL(flush_icache_range)
111
112/*
113 * Flush a particular page from the data cache to RAM.
114 * Note: this is necessary because the instruction cache does *not*
115 * snoop from the data cache.
116 *
117 * void __flush_dcache_icache(void *page)
118 */
119_GLOBAL(__flush_dcache_icache)
120/*
121 * Flush the data cache to memory
122 *
123 * Different systems have different cache line sizes
124 */
125
126BEGIN_FTR_SECTION
127 PURGE_PREFETCHED_INS
128 blr
129END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
130
131/* Flush the dcache */
132 ld r7,PPC64_CACHES@toc(r2)
133 clrrdi r3,r3,PAGE_SHIFT /* Page align */
134 lwz r4,DCACHEL1BLOCKSPERPAGE(r7) /* Get # dcache blocks per page */
135 lwz r5,DCACHEL1BLOCKSIZE(r7) /* Get dcache block size */
136 mr r6,r3
137 mtctr r4
1380: dcbst 0,r6
139 add r6,r6,r5
140 bdnz 0b
141 sync
142
143/* Now invalidate the icache */
144
145 lwz r4,ICACHEL1BLOCKSPERPAGE(r7) /* Get # icache blocks per page */
146 lwz r5,ICACHEL1BLOCKSIZE(r7) /* Get icache block size */
147 mtctr r4
1481: icbi 0,r3
149 add r3,r3,r5
150 bdnz 1b
151 isync
152 blr
153
154_GLOBAL(__bswapdi2)
155EXPORT_SYMBOL(__bswapdi2)
156 srdi r8,r3,32
157 rlwinm r7,r3,8,0xffffffff
158 rlwimi r7,r3,24,0,7
159 rlwinm r9,r8,8,0xffffffff
160 rlwimi r7,r3,24,16,23
161 rlwimi r9,r8,24,0,7
162 rlwimi r9,r8,24,16,23
163 sldi r7,r7,32
164 or r3,r7,r9
165 blr
166
167
168#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
169_GLOBAL(rmci_on)
170 sync
171 isync
172 li r3,0x100
173 rldicl r3,r3,32,0
174 mfspr r5,SPRN_HID4
175 or r5,r5,r3
176 sync
177 mtspr SPRN_HID4,r5
178 isync
179 slbia
180 isync
181 sync
182 blr
183
184_GLOBAL(rmci_off)
185 sync
186 isync
187 li r3,0x100
188 rldicl r3,r3,32,0
189 mfspr r5,SPRN_HID4
190 andc r5,r5,r3
191 sync
192 mtspr SPRN_HID4,r5
193 isync
194 slbia
195 isync
196 sync
197 blr
198#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
199
200#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
201
202/*
203 * Do an IO access in real mode
204 */
205_GLOBAL(real_readb)
206 mfmsr r7
207 ori r0,r7,MSR_DR
208 xori r0,r0,MSR_DR
209 sync
210 mtmsrd r0
211 sync
212 isync
213 mfspr r6,SPRN_HID4
214 rldicl r5,r6,32,0
215 ori r5,r5,0x100
216 rldicl r5,r5,32,0
217 sync
218 mtspr SPRN_HID4,r5
219 isync
220 slbia
221 isync
222 lbz r3,0(r3)
223 sync
224 mtspr SPRN_HID4,r6
225 isync
226 slbia
227 isync
228 mtmsrd r7
229 sync
230 isync
231 blr
232
233 /*
234 * Do an IO access in real mode
235 */
236_GLOBAL(real_writeb)
237 mfmsr r7
238 ori r0,r7,MSR_DR
239 xori r0,r0,MSR_DR
240 sync
241 mtmsrd r0
242 sync
243 isync
244 mfspr r6,SPRN_HID4
245 rldicl r5,r6,32,0
246 ori r5,r5,0x100
247 rldicl r5,r5,32,0
248 sync
249 mtspr SPRN_HID4,r5
250 isync
251 slbia
252 isync
253 stb r3,0(r4)
254 sync
255 mtspr SPRN_HID4,r6
256 isync
257 slbia
258 isync
259 mtmsrd r7
260 sync
261 isync
262 blr
263#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
264
265#ifdef CONFIG_PPC_PASEMI
266
267_GLOBAL(real_205_readb)
268 mfmsr r7
269 ori r0,r7,MSR_DR
270 xori r0,r0,MSR_DR
271 sync
272 mtmsrd r0
273 sync
274 isync
275 LBZCIX(R3,R0,R3)
276 isync
277 mtmsrd r7
278 sync
279 isync
280 blr
281
282_GLOBAL(real_205_writeb)
283 mfmsr r7
284 ori r0,r7,MSR_DR
285 xori r0,r0,MSR_DR
286 sync
287 mtmsrd r0
288 sync
289 isync
290 STBCIX(R3,R0,R4)
291 isync
292 mtmsrd r7
293 sync
294 isync
295 blr
296
297#endif /* CONFIG_PPC_PASEMI */
298
299
300#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
301/*
302 * SCOM access functions for 970 (FX only for now)
303 *
304 * unsigned long scom970_read(unsigned int address);
305 * void scom970_write(unsigned int address, unsigned long value);
306 *
307 * The address passed in is the 24 bits register address. This code
308 * is 970 specific and will not check the status bits, so you should
309 * know what you are doing.
310 */
311_GLOBAL(scom970_read)
312 /* interrupts off */
313 mfmsr r4
314 ori r0,r4,MSR_EE
315 xori r0,r0,MSR_EE
316 mtmsrd r0,1
317
318 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
319 * (including parity). On current CPUs they must be 0'd,
320 * and finally or in RW bit
321 */
322 rlwinm r3,r3,8,0,15
323 ori r3,r3,0x8000
324
325 /* do the actual scom read */
326 sync
327 mtspr SPRN_SCOMC,r3
328 isync
329 mfspr r3,SPRN_SCOMD
330 isync
331 mfspr r0,SPRN_SCOMC
332 isync
333
334 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
335 * that's the best we can do). Not implemented yet as we don't use
336 * the scom on any of the bogus CPUs yet, but may have to be done
337 * ultimately
338 */
339
340 /* restore interrupts */
341 mtmsrd r4,1
342 blr
343
344
345_GLOBAL(scom970_write)
346 /* interrupts off */
347 mfmsr r5
348 ori r0,r5,MSR_EE
349 xori r0,r0,MSR_EE
350 mtmsrd r0,1
351
352 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
353 * (including parity). On current CPUs they must be 0'd.
354 */
355
356 rlwinm r3,r3,8,0,15
357
358 sync
359 mtspr SPRN_SCOMD,r4 /* write data */
360 isync
361 mtspr SPRN_SCOMC,r3 /* write command */
362 isync
363 mfspr 3,SPRN_SCOMC
364 isync
365
366 /* restore interrupts */
367 mtmsrd r5,1
368 blr
369#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
370
371/* kexec_wait(phys_cpu)
372 *
373 * wait for the flag to change, indicating this kernel is going away but
374 * the slave code for the next one is at addresses 0 to 100.
375 *
376 * This is used by all slaves, even those that did not find a matching
377 * paca in the secondary startup code.
378 *
379 * Physical (hardware) cpu id should be in r3.
380 */
381_GLOBAL(kexec_wait)
382 bl 1f
3831: mflr r5
384 addi r5,r5,kexec_flag-1b
385
38699: HMT_LOW
387#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
388 lwz r4,0(r5)
389 cmpwi 0,r4,0
390 beq 99b
391#ifdef CONFIG_PPC_BOOK3S_64
392 li r10,0x60
393 mfmsr r11
394 clrrdi r11,r11,1 /* Clear MSR_LE */
395 mtsrr0 r10
396 mtsrr1 r11
397 rfid
398#else
399 /* Create TLB entry in book3e_secondary_core_init */
400 li r4,0
401 ba 0x60
402#endif
403#endif
404
405/* this can be in text because we won't change it until we are
406 * running in real anyways
407 */
408kexec_flag:
409 .long 0
410
411
412#ifdef CONFIG_KEXEC_CORE
413#ifdef CONFIG_PPC_BOOK3E
414/*
415 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
416 * for a core to identity map v:0 to p:0. This current implementation
417 * assumes that 1G is enough for kexec.
418 */
419kexec_create_tlb:
420 /*
421 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
422 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
423 */
424 PPC_TLBILX_ALL(0,R0)
425 sync
426 isync
427
428 mfspr r10,SPRN_TLB1CFG
429 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
430 subi r10,r10,1 /* Last entry: no conflict with kernel text */
431 lis r9,MAS0_TLBSEL(1)@h
432 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
433
434/* Set up a temp identity mapping v:0 to p:0 and return to it. */
435#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
436#define M_IF_NEEDED MAS2_M
437#else
438#define M_IF_NEEDED 0
439#endif
440 mtspr SPRN_MAS0,r9
441
442 lis r9,(MAS1_VALID|MAS1_IPROT)@h
443 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
444 mtspr SPRN_MAS1,r9
445
446 LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
447 mtspr SPRN_MAS2,r9
448
449 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
450 mtspr SPRN_MAS3,r9
451 li r9,0
452 mtspr SPRN_MAS7,r9
453
454 tlbwe
455 isync
456 blr
457#endif
458
459/* kexec_smp_wait(void)
460 *
461 * call with interrupts off
462 * note: this is a terminal routine, it does not save lr
463 *
464 * get phys id from paca
465 * switch to real mode
466 * mark the paca as no longer used
467 * join other cpus in kexec_wait(phys_id)
468 */
469_GLOBAL(kexec_smp_wait)
470 lhz r3,PACAHWCPUID(r13)
471 bl real_mode
472
473 li r4,KEXEC_STATE_REAL_MODE
474 stb r4,PACAKEXECSTATE(r13)
475 SYNC
476
477 b kexec_wait
478
479/*
480 * switch to real mode (turn mmu off)
481 * we use the early kernel trick that the hardware ignores bits
482 * 0 and 1 (big endian) of the effective address in real mode
483 *
484 * don't overwrite r3 here, it is live for kexec_wait above.
485 */
486real_mode: /* assume normal blr return */
487#ifdef CONFIG_PPC_BOOK3E
488 /* Create an identity mapping. */
489 b kexec_create_tlb
490#else
4911: li r9,MSR_RI
492 li r10,MSR_DR|MSR_IR
493 mflr r11 /* return address to SRR0 */
494 mfmsr r12
495 andc r9,r12,r9
496 andc r10,r12,r10
497
498 mtmsrd r9,1
499 mtspr SPRN_SRR1,r10
500 mtspr SPRN_SRR0,r11
501 rfid
502#endif
503
504/*
505 * kexec_sequence(newstack, start, image, control, clear_all(),
506 copy_with_mmu_off)
507 *
508 * does the grungy work with stack switching and real mode switches
509 * also does simple calls to other code
510 */
511
512_GLOBAL(kexec_sequence)
513 mflr r0
514 std r0,16(r1)
515
516 /* switch stacks to newstack -- &kexec_stack.stack */
517 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
518 mr r1,r3
519
520 li r0,0
521 std r0,16(r1)
522
523BEGIN_FTR_SECTION
524 /*
525 * This is the best time to turn AMR/IAMR off.
526 * key 0 is used in radix for supervisor<->user
527 * protection, but on hash key 0 is reserved
528 * ideally we want to enter with a clean state.
529 * NOTE, we rely on r0 being 0 from above.
530 */
531 mtspr SPRN_IAMR,r0
532BEGIN_FTR_SECTION_NESTED(42)
533 mtspr SPRN_AMOR,r0
534END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
535END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
536
537 /* save regs for local vars on new stack.
538 * yes, we won't go back, but ...
539 */
540 std r31,-8(r1)
541 std r30,-16(r1)
542 std r29,-24(r1)
543 std r28,-32(r1)
544 std r27,-40(r1)
545 std r26,-48(r1)
546 std r25,-56(r1)
547
548 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
549
550 /* save args into preserved regs */
551 mr r31,r3 /* newstack (both) */
552 mr r30,r4 /* start (real) */
553 mr r29,r5 /* image (virt) */
554 mr r28,r6 /* control, unused */
555 mr r27,r7 /* clear_all() fn desc */
556 mr r26,r8 /* copy_with_mmu_off */
557 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
558
559 /* disable interrupts, we are overwriting kernel data next */
560#ifdef CONFIG_PPC_BOOK3E
561 wrteei 0
562#else
563 mfmsr r3
564 rlwinm r3,r3,0,17,15
565 mtmsrd r3,1
566#endif
567
568 /* We need to turn the MMU off unless we are in hash mode
569 * under a hypervisor
570 */
571 cmpdi r26,0
572 beq 1f
573 bl real_mode
5741:
575 /* copy dest pages, flush whole dest image */
576 mr r3,r29
577 bl kexec_copy_flush /* (image) */
578
579 /* turn off mmu now if not done earlier */
580 cmpdi r26,0
581 bne 1f
582 bl real_mode
583
584 /* copy 0x100 bytes starting at start to 0 */
5851: li r3,0
586 mr r4,r30 /* start, aka phys mem offset */
587 li r5,0x100
588 li r6,0
589 bl copy_and_flush /* (dest, src, copy limit, start offset) */
5901: /* assume normal blr return */
591
592 /* release other cpus to the new kernel secondary start at 0x60 */
593 mflr r5
594 li r6,1
595 stw r6,kexec_flag-1b(5)
596
597 cmpdi r27,0
598 beq 1f
599
600 /* clear out hardware hash page table and tlb */
601#ifdef PPC64_ELF_ABI_v1
602 ld r12,0(r27) /* deref function descriptor */
603#else
604 mr r12,r27
605#endif
606 mtctr r12
607 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
608
609/*
610 * kexec image calling is:
611 * the first 0x100 bytes of the entry point are copied to 0
612 *
613 * all slaves branch to slave = 0x60 (absolute)
614 * slave(phys_cpu_id);
615 *
616 * master goes to start = entry point
617 * start(phys_cpu_id, start, 0);
618 *
619 *
620 * a wrapper is needed to call existing kernels, here is an approximate
621 * description of one method:
622 *
623 * v2: (2.6.10)
624 * start will be near the boot_block (maybe 0x100 bytes before it?)
625 * it will have a 0x60, which will b to boot_block, where it will wait
626 * and 0 will store phys into struct boot-block and load r3 from there,
627 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
628 *
629 * v1: (2.6.9)
630 * boot block will have all cpus scanning device tree to see if they
631 * are the boot cpu ?????
632 * other device tree differences (prop sizes, va vs pa, etc)...
633 */
6341: mr r3,r25 # my phys cpu
635 mr r4,r30 # start, aka phys mem offset
636 mtlr 4
637 li r5,0
638 blr /* image->start(physid, image->start, 0); */
639#endif /* CONFIG_KEXEC_CORE */