Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 */
11
12#include <linux/export.h>
13#include <linux/linkage.h>
14#include <linux/sys.h>
15#include <asm/unistd.h>
16#include <asm/errno.h>
17#include <asm/processor.h>
18#include <asm/page.h>
19#include <asm/cache.h>
20#include <asm/ppc_asm.h>
21#include <asm/asm-offsets.h>
22#include <asm/cputable.h>
23#include <asm/thread_info.h>
24#include <asm/kexec.h>
25#include <asm/ptrace.h>
26#include <asm/mmu.h>
27#include <asm/feature-fixups.h>
28
29 .text
30
31_GLOBAL(__bswapdi2)
32EXPORT_SYMBOL(__bswapdi2)
33 srdi r8,r3,32
34 rlwinm r7,r3,8,0xffffffff
35 rlwimi r7,r3,24,0,7
36 rlwinm r9,r8,8,0xffffffff
37 rlwimi r7,r3,24,16,23
38 rlwimi r9,r8,24,0,7
39 rlwimi r9,r8,24,16,23
40 sldi r7,r7,32
41 or r3,r7,r9
42 blr
43
44
45#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
46_GLOBAL(rmci_on)
47 sync
48 isync
49 li r3,0x100
50 rldicl r3,r3,32,0
51 mfspr r5,SPRN_HID4
52 or r5,r5,r3
53 sync
54 mtspr SPRN_HID4,r5
55 isync
56 slbia
57 isync
58 sync
59 blr
60
61_GLOBAL(rmci_off)
62 sync
63 isync
64 li r3,0x100
65 rldicl r3,r3,32,0
66 mfspr r5,SPRN_HID4
67 andc r5,r5,r3
68 sync
69 mtspr SPRN_HID4,r5
70 isync
71 slbia
72 isync
73 sync
74 blr
75#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
76
77#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
78
79/*
80 * Do an IO access in real mode
81 */
82_GLOBAL(real_readb)
83 mfmsr r7
84 ori r0,r7,MSR_DR
85 xori r0,r0,MSR_DR
86 sync
87 mtmsrd r0
88 sync
89 isync
90 mfspr r6,SPRN_HID4
91 rldicl r5,r6,32,0
92 ori r5,r5,0x100
93 rldicl r5,r5,32,0
94 sync
95 mtspr SPRN_HID4,r5
96 isync
97 slbia
98 isync
99 lbz r3,0(r3)
100 sync
101 mtspr SPRN_HID4,r6
102 isync
103 slbia
104 isync
105 mtmsrd r7
106 sync
107 isync
108 blr
109
110 /*
111 * Do an IO access in real mode
112 */
113_GLOBAL(real_writeb)
114 mfmsr r7
115 ori r0,r7,MSR_DR
116 xori r0,r0,MSR_DR
117 sync
118 mtmsrd r0
119 sync
120 isync
121 mfspr r6,SPRN_HID4
122 rldicl r5,r6,32,0
123 ori r5,r5,0x100
124 rldicl r5,r5,32,0
125 sync
126 mtspr SPRN_HID4,r5
127 isync
128 slbia
129 isync
130 stb r3,0(r4)
131 sync
132 mtspr SPRN_HID4,r6
133 isync
134 slbia
135 isync
136 mtmsrd r7
137 sync
138 isync
139 blr
140#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
141
142#ifdef CONFIG_PPC_PASEMI
143
144_GLOBAL(real_205_readb)
145 mfmsr r7
146 ori r0,r7,MSR_DR
147 xori r0,r0,MSR_DR
148 sync
149 mtmsrd r0
150 sync
151 isync
152 LBZCIX(R3,R0,R3)
153 isync
154 mtmsrd r7
155 sync
156 isync
157 blr
158
159_GLOBAL(real_205_writeb)
160 mfmsr r7
161 ori r0,r7,MSR_DR
162 xori r0,r0,MSR_DR
163 sync
164 mtmsrd r0
165 sync
166 isync
167 STBCIX(R3,R0,R4)
168 isync
169 mtmsrd r7
170 sync
171 isync
172 blr
173
174#endif /* CONFIG_PPC_PASEMI */
175
176
177#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
178/*
179 * SCOM access functions for 970 (FX only for now)
180 *
181 * unsigned long scom970_read(unsigned int address);
182 * void scom970_write(unsigned int address, unsigned long value);
183 *
184 * The address passed in is the 24 bits register address. This code
185 * is 970 specific and will not check the status bits, so you should
186 * know what you are doing.
187 */
188_GLOBAL(scom970_read)
189 /* interrupts off */
190 mfmsr r4
191 ori r0,r4,MSR_EE
192 xori r0,r0,MSR_EE
193 mtmsrd r0,1
194
195 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
196 * (including parity). On current CPUs they must be 0'd,
197 * and finally or in RW bit
198 */
199 rlwinm r3,r3,8,0,15
200 ori r3,r3,0x8000
201
202 /* do the actual scom read */
203 sync
204 mtspr SPRN_SCOMC,r3
205 isync
206 mfspr r3,SPRN_SCOMD
207 isync
208 mfspr r0,SPRN_SCOMC
209 isync
210
211 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
212 * that's the best we can do). Not implemented yet as we don't use
213 * the scom on any of the bogus CPUs yet, but may have to be done
214 * ultimately
215 */
216
217 /* restore interrupts */
218 mtmsrd r4,1
219 blr
220
221
222_GLOBAL(scom970_write)
223 /* interrupts off */
224 mfmsr r5
225 ori r0,r5,MSR_EE
226 xori r0,r0,MSR_EE
227 mtmsrd r0,1
228
229 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
230 * (including parity). On current CPUs they must be 0'd.
231 */
232
233 rlwinm r3,r3,8,0,15
234
235 sync
236 mtspr SPRN_SCOMD,r4 /* write data */
237 isync
238 mtspr SPRN_SCOMC,r3 /* write command */
239 isync
240 mfspr 3,SPRN_SCOMC
241 isync
242
243 /* restore interrupts */
244 mtmsrd r5,1
245 blr
246#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
247
248/* kexec_wait(phys_cpu)
249 *
250 * wait for the flag to change, indicating this kernel is going away but
251 * the slave code for the next one is at addresses 0 to 100.
252 *
253 * This is used by all slaves, even those that did not find a matching
254 * paca in the secondary startup code.
255 *
256 * Physical (hardware) cpu id should be in r3.
257 */
258_GLOBAL(kexec_wait)
259 bcl 20,31,$+4
2601: mflr r5
261 addi r5,r5,kexec_flag-1b
262
26399: HMT_LOW
264#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
265 lwz r4,0(r5)
266 cmpwi 0,r4,0
267 beq 99b
268#ifdef CONFIG_PPC_BOOK3S_64
269 li r10,0x60
270 mfmsr r11
271 clrrdi r11,r11,1 /* Clear MSR_LE */
272 mtsrr0 r10
273 mtsrr1 r11
274 rfid
275#else
276 /* Create TLB entry in book3e_secondary_core_init */
277 li r4,0
278 ba 0x60
279#endif
280#endif
281
282/* this can be in text because we won't change it until we are
283 * running in real anyways
284 */
285kexec_flag:
286 .long 0
287
288
289#ifdef CONFIG_KEXEC_CORE
290#ifdef CONFIG_PPC_BOOK3E_64
291/*
292 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
293 * for a core to identity map v:0 to p:0. This current implementation
294 * assumes that 1G is enough for kexec.
295 */
296kexec_create_tlb:
297 /*
298 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
299 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
300 */
301 PPC_TLBILX_ALL(0,R0)
302 sync
303 isync
304
305 mfspr r10,SPRN_TLB1CFG
306 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
307 subi r10,r10,1 /* Last entry: no conflict with kernel text */
308 lis r9,MAS0_TLBSEL(1)@h
309 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
310
311/* Set up a temp identity mapping v:0 to p:0 and return to it. */
312 mtspr SPRN_MAS0,r9
313
314 lis r9,(MAS1_VALID|MAS1_IPROT)@h
315 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
316 mtspr SPRN_MAS1,r9
317
318 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
319 mtspr SPRN_MAS2,r9
320
321 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
322 mtspr SPRN_MAS3,r9
323 li r9,0
324 mtspr SPRN_MAS7,r9
325
326 tlbwe
327 isync
328 blr
329#endif
330
331/* kexec_smp_wait(void)
332 *
333 * call with interrupts off
334 * note: this is a terminal routine, it does not save lr
335 *
336 * get phys id from paca
337 * switch to real mode
338 * mark the paca as no longer used
339 * join other cpus in kexec_wait(phys_id)
340 */
341_GLOBAL(kexec_smp_wait)
342 lhz r3,PACAHWCPUID(r13)
343 bl real_mode
344
345 li r4,KEXEC_STATE_REAL_MODE
346 stb r4,PACAKEXECSTATE(r13)
347
348 b kexec_wait
349
350/*
351 * switch to real mode (turn mmu off)
352 * we use the early kernel trick that the hardware ignores bits
353 * 0 and 1 (big endian) of the effective address in real mode
354 *
355 * don't overwrite r3 here, it is live for kexec_wait above.
356 */
357SYM_FUNC_START_LOCAL(real_mode) /* assume normal blr return */
358#ifdef CONFIG_PPC_BOOK3E_64
359 /* Create an identity mapping. */
360 b kexec_create_tlb
361#else
3621: li r9,MSR_RI
363 li r10,MSR_DR|MSR_IR
364 mflr r11 /* return address to SRR0 */
365 mfmsr r12
366 andc r9,r12,r9
367 andc r10,r12,r10
368
369 mtmsrd r9,1
370 mtspr SPRN_SRR1,r10
371 mtspr SPRN_SRR0,r11
372 rfid
373#endif
374SYM_FUNC_END(real_mode)
375
376/*
377 * kexec_sequence(newstack, start, image, control, clear_all(),
378 copy_with_mmu_off)
379 *
380 * does the grungy work with stack switching and real mode switches
381 * also does simple calls to other code
382 */
383
384_GLOBAL(kexec_sequence)
385 mflr r0
386 std r0,16(r1)
387
388 /* switch stacks to newstack -- &kexec_stack.stack */
389 stdu r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r3)
390 mr r1,r3
391
392 li r0,0
393 std r0,16(r1)
394
395 /* save regs for local vars on new stack.
396 * yes, we won't go back, but ...
397 */
398 std r31,-8(r1)
399 std r30,-16(r1)
400 std r29,-24(r1)
401 std r28,-32(r1)
402 std r27,-40(r1)
403 std r26,-48(r1)
404 std r25,-56(r1)
405
406 stdu r1,-STACK_FRAME_MIN_SIZE-64(r1)
407
408 /* save args into preserved regs */
409 mr r31,r3 /* newstack (both) */
410 mr r30,r4 /* start (real) */
411 mr r29,r5 /* image (virt) */
412 mr r28,r6 /* control, unused */
413 mr r27,r7 /* clear_all() fn desc */
414 mr r26,r8 /* copy_with_mmu_off */
415 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
416
417 /* disable interrupts, we are overwriting kernel data next */
418#ifdef CONFIG_PPC_BOOK3E_64
419 wrteei 0
420#else
421 mfmsr r3
422 rlwinm r3,r3,0,17,15
423 mtmsrd r3,1
424#endif
425
426 /* We need to turn the MMU off unless we are in hash mode
427 * under a hypervisor
428 */
429 cmpdi r26,0
430 beq 1f
431 bl real_mode
4321:
433 /* copy dest pages, flush whole dest image */
434 mr r3,r29
435 bl CFUNC(kexec_copy_flush) /* (image) */
436
437 /* turn off mmu now if not done earlier */
438 cmpdi r26,0
439 bne 1f
440 bl real_mode
441
442 /* copy 0x100 bytes starting at start to 0 */
4431: li r3,0
444 mr r4,r30 /* start, aka phys mem offset */
445 li r5,0x100
446 li r6,0
447 bl copy_and_flush /* (dest, src, copy limit, start offset) */
4481: /* assume normal blr return */
449
450 /* release other cpus to the new kernel secondary start at 0x60 */
451 mflr r5
452 li r6,1
453 stw r6,kexec_flag-1b(5)
454
455 cmpdi r27,0
456 beq 1f
457
458 /* clear out hardware hash page table and tlb */
459#ifdef CONFIG_PPC64_ELF_ABI_V1
460 ld r12,0(r27) /* deref function descriptor */
461#else
462 mr r12,r27
463#endif
464 mtctr r12
465 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
466
467/*
468 * kexec image calling is:
469 * the first 0x100 bytes of the entry point are copied to 0
470 *
471 * all slaves branch to slave = 0x60 (absolute)
472 * slave(phys_cpu_id);
473 *
474 * master goes to start = entry point
475 * start(phys_cpu_id, start, 0);
476 *
477 *
478 * a wrapper is needed to call existing kernels, here is an approximate
479 * description of one method:
480 *
481 * v2: (2.6.10)
482 * start will be near the boot_block (maybe 0x100 bytes before it?)
483 * it will have a 0x60, which will b to boot_block, where it will wait
484 * and 0 will store phys into struct boot-block and load r3 from there,
485 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
486 *
487 * v1: (2.6.9)
488 * boot block will have all cpus scanning device tree to see if they
489 * are the boot cpu ?????
490 * other device tree differences (prop sizes, va vs pa, etc)...
491 */
4921: mr r3,r25 # my phys cpu
493 mr r4,r30 # start, aka phys mem offset
494 mtlr 4
495 li r5,0
496 blr /* image->start(physid, image->start, 0); */
497#endif /* CONFIG_KEXEC_CORE */
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
26#include <asm/thread_info.h>
27#include <asm/kexec.h>
28#include <asm/ptrace.h>
29
30 .text
31
32_GLOBAL(call_do_softirq)
33 mflr r0
34 std r0,16(r1)
35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
36 mr r1,r3
37 bl .__do_softirq
38 ld r1,0(r1)
39 ld r0,16(r1)
40 mtlr r0
41 blr
42
43_GLOBAL(call_do_irq)
44 mflr r0
45 std r0,16(r1)
46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
47 mr r1,r4
48 bl .__do_irq
49 ld r1,0(r1)
50 ld r0,16(r1)
51 mtlr r0
52 blr
53
54 .section ".toc","aw"
55PPC64_CACHES:
56 .tc ppc64_caches[TC],ppc64_caches
57 .section ".text"
58
59/*
60 * Write any modified data cache blocks out to memory
61 * and invalidate the corresponding instruction cache blocks.
62 *
63 * flush_icache_range(unsigned long start, unsigned long stop)
64 *
65 * flush all bytes from start through stop-1 inclusive
66 */
67
68_KPROBE(flush_icache_range)
69BEGIN_FTR_SECTION
70 PURGE_PREFETCHED_INS
71 blr
72END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
73/*
74 * Flush the data cache to memory
75 *
76 * Different systems have different cache line sizes
77 * and in some cases i-cache and d-cache line sizes differ from
78 * each other.
79 */
80 ld r10,PPC64_CACHES@toc(r2)
81 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
82 addi r5,r7,-1
83 andc r6,r3,r5 /* round low to line bdy */
84 subf r8,r6,r4 /* compute length */
85 add r8,r8,r5 /* ensure we get enough */
86 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
87 srw. r8,r8,r9 /* compute line count */
88 beqlr /* nothing to do? */
89 mtctr r8
901: dcbst 0,r6
91 add r6,r6,r7
92 bdnz 1b
93 sync
94
95/* Now invalidate the instruction cache */
96
97 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
98 addi r5,r7,-1
99 andc r6,r3,r5 /* round low to line bdy */
100 subf r8,r6,r4 /* compute length */
101 add r8,r8,r5
102 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
103 srw. r8,r8,r9 /* compute line count */
104 beqlr /* nothing to do? */
105 mtctr r8
1062: icbi 0,r6
107 add r6,r6,r7
108 bdnz 2b
109 isync
110 blr
111 .previous .text
112/*
113 * Like above, but only do the D-cache.
114 *
115 * flush_dcache_range(unsigned long start, unsigned long stop)
116 *
117 * flush all bytes from start to stop-1 inclusive
118 */
119_GLOBAL(flush_dcache_range)
120
121/*
122 * Flush the data cache to memory
123 *
124 * Different systems have different cache line sizes
125 */
126 ld r10,PPC64_CACHES@toc(r2)
127 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
128 addi r5,r7,-1
129 andc r6,r3,r5 /* round low to line bdy */
130 subf r8,r6,r4 /* compute length */
131 add r8,r8,r5 /* ensure we get enough */
132 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
133 srw. r8,r8,r9 /* compute line count */
134 beqlr /* nothing to do? */
135 mtctr r8
1360: dcbst 0,r6
137 add r6,r6,r7
138 bdnz 0b
139 sync
140 blr
141
142/*
143 * Like above, but works on non-mapped physical addresses.
144 * Use only for non-LPAR setups ! It also assumes real mode
145 * is cacheable. Used for flushing out the DART before using
146 * it as uncacheable memory
147 *
148 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
149 *
150 * flush all bytes from start to stop-1 inclusive
151 */
152_GLOBAL(flush_dcache_phys_range)
153 ld r10,PPC64_CACHES@toc(r2)
154 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
155 addi r5,r7,-1
156 andc r6,r3,r5 /* round low to line bdy */
157 subf r8,r6,r4 /* compute length */
158 add r8,r8,r5 /* ensure we get enough */
159 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
160 srw. r8,r8,r9 /* compute line count */
161 beqlr /* nothing to do? */
162 mfmsr r5 /* Disable MMU Data Relocation */
163 ori r0,r5,MSR_DR
164 xori r0,r0,MSR_DR
165 sync
166 mtmsr r0
167 sync
168 isync
169 mtctr r8
1700: dcbst 0,r6
171 add r6,r6,r7
172 bdnz 0b
173 sync
174 isync
175 mtmsr r5 /* Re-enable MMU Data Relocation */
176 sync
177 isync
178 blr
179
180_GLOBAL(flush_inval_dcache_range)
181 ld r10,PPC64_CACHES@toc(r2)
182 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
183 addi r5,r7,-1
184 andc r6,r3,r5 /* round low to line bdy */
185 subf r8,r6,r4 /* compute length */
186 add r8,r8,r5 /* ensure we get enough */
187 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
188 srw. r8,r8,r9 /* compute line count */
189 beqlr /* nothing to do? */
190 sync
191 isync
192 mtctr r8
1930: dcbf 0,r6
194 add r6,r6,r7
195 bdnz 0b
196 sync
197 isync
198 blr
199
200
201/*
202 * Flush a particular page from the data cache to RAM.
203 * Note: this is necessary because the instruction cache does *not*
204 * snoop from the data cache.
205 *
206 * void __flush_dcache_icache(void *page)
207 */
208_GLOBAL(__flush_dcache_icache)
209/*
210 * Flush the data cache to memory
211 *
212 * Different systems have different cache line sizes
213 */
214
215BEGIN_FTR_SECTION
216 PURGE_PREFETCHED_INS
217 blr
218END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
219
220/* Flush the dcache */
221 ld r7,PPC64_CACHES@toc(r2)
222 clrrdi r3,r3,PAGE_SHIFT /* Page align */
223 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
224 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
225 mr r6,r3
226 mtctr r4
2270: dcbst 0,r6
228 add r6,r6,r5
229 bdnz 0b
230 sync
231
232/* Now invalidate the icache */
233
234 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
235 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
236 mtctr r4
2371: icbi 0,r3
238 add r3,r3,r5
239 bdnz 1b
240 isync
241 blr
242
243_GLOBAL(__bswapdi2)
244 srdi r8,r3,32
245 rlwinm r7,r3,8,0xffffffff
246 rlwimi r7,r3,24,0,7
247 rlwinm r9,r8,8,0xffffffff
248 rlwimi r7,r3,24,16,23
249 rlwimi r9,r8,24,0,7
250 rlwimi r9,r8,24,16,23
251 sldi r7,r7,32
252 or r3,r7,r9
253 blr
254
255
256#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
257_GLOBAL(rmci_on)
258 sync
259 isync
260 li r3,0x100
261 rldicl r3,r3,32,0
262 mfspr r5,SPRN_HID4
263 or r5,r5,r3
264 sync
265 mtspr SPRN_HID4,r5
266 isync
267 slbia
268 isync
269 sync
270 blr
271
272_GLOBAL(rmci_off)
273 sync
274 isync
275 li r3,0x100
276 rldicl r3,r3,32,0
277 mfspr r5,SPRN_HID4
278 andc r5,r5,r3
279 sync
280 mtspr SPRN_HID4,r5
281 isync
282 slbia
283 isync
284 sync
285 blr
286#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
287
288#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
289
290/*
291 * Do an IO access in real mode
292 */
293_GLOBAL(real_readb)
294 mfmsr r7
295 ori r0,r7,MSR_DR
296 xori r0,r0,MSR_DR
297 sync
298 mtmsrd r0
299 sync
300 isync
301 mfspr r6,SPRN_HID4
302 rldicl r5,r6,32,0
303 ori r5,r5,0x100
304 rldicl r5,r5,32,0
305 sync
306 mtspr SPRN_HID4,r5
307 isync
308 slbia
309 isync
310 lbz r3,0(r3)
311 sync
312 mtspr SPRN_HID4,r6
313 isync
314 slbia
315 isync
316 mtmsrd r7
317 sync
318 isync
319 blr
320
321 /*
322 * Do an IO access in real mode
323 */
324_GLOBAL(real_writeb)
325 mfmsr r7
326 ori r0,r7,MSR_DR
327 xori r0,r0,MSR_DR
328 sync
329 mtmsrd r0
330 sync
331 isync
332 mfspr r6,SPRN_HID4
333 rldicl r5,r6,32,0
334 ori r5,r5,0x100
335 rldicl r5,r5,32,0
336 sync
337 mtspr SPRN_HID4,r5
338 isync
339 slbia
340 isync
341 stb r3,0(r4)
342 sync
343 mtspr SPRN_HID4,r6
344 isync
345 slbia
346 isync
347 mtmsrd r7
348 sync
349 isync
350 blr
351#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
352
353#ifdef CONFIG_PPC_PASEMI
354
355_GLOBAL(real_205_readb)
356 mfmsr r7
357 ori r0,r7,MSR_DR
358 xori r0,r0,MSR_DR
359 sync
360 mtmsrd r0
361 sync
362 isync
363 LBZCIX(R3,R0,R3)
364 isync
365 mtmsrd r7
366 sync
367 isync
368 blr
369
370_GLOBAL(real_205_writeb)
371 mfmsr r7
372 ori r0,r7,MSR_DR
373 xori r0,r0,MSR_DR
374 sync
375 mtmsrd r0
376 sync
377 isync
378 STBCIX(R3,R0,R4)
379 isync
380 mtmsrd r7
381 sync
382 isync
383 blr
384
385#endif /* CONFIG_PPC_PASEMI */
386
387
388#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
389/*
390 * SCOM access functions for 970 (FX only for now)
391 *
392 * unsigned long scom970_read(unsigned int address);
393 * void scom970_write(unsigned int address, unsigned long value);
394 *
395 * The address passed in is the 24 bits register address. This code
396 * is 970 specific and will not check the status bits, so you should
397 * know what you are doing.
398 */
399_GLOBAL(scom970_read)
400 /* interrupts off */
401 mfmsr r4
402 ori r0,r4,MSR_EE
403 xori r0,r0,MSR_EE
404 mtmsrd r0,1
405
406 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
407 * (including parity). On current CPUs they must be 0'd,
408 * and finally or in RW bit
409 */
410 rlwinm r3,r3,8,0,15
411 ori r3,r3,0x8000
412
413 /* do the actual scom read */
414 sync
415 mtspr SPRN_SCOMC,r3
416 isync
417 mfspr r3,SPRN_SCOMD
418 isync
419 mfspr r0,SPRN_SCOMC
420 isync
421
422 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
423 * that's the best we can do). Not implemented yet as we don't use
424 * the scom on any of the bogus CPUs yet, but may have to be done
425 * ultimately
426 */
427
428 /* restore interrupts */
429 mtmsrd r4,1
430 blr
431
432
433_GLOBAL(scom970_write)
434 /* interrupts off */
435 mfmsr r5
436 ori r0,r5,MSR_EE
437 xori r0,r0,MSR_EE
438 mtmsrd r0,1
439
440 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
441 * (including parity). On current CPUs they must be 0'd.
442 */
443
444 rlwinm r3,r3,8,0,15
445
446 sync
447 mtspr SPRN_SCOMD,r4 /* write data */
448 isync
449 mtspr SPRN_SCOMC,r3 /* write command */
450 isync
451 mfspr 3,SPRN_SCOMC
452 isync
453
454 /* restore interrupts */
455 mtmsrd r5,1
456 blr
457#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
458
459/* kexec_wait(phys_cpu)
460 *
461 * wait for the flag to change, indicating this kernel is going away but
462 * the slave code for the next one is at addresses 0 to 100.
463 *
464 * This is used by all slaves, even those that did not find a matching
465 * paca in the secondary startup code.
466 *
467 * Physical (hardware) cpu id should be in r3.
468 */
469_GLOBAL(kexec_wait)
470 bl 1f
4711: mflr r5
472 addi r5,r5,kexec_flag-1b
473
47499: HMT_LOW
475#ifdef CONFIG_KEXEC /* use no memory without kexec */
476 lwz r4,0(r5)
477 cmpwi 0,r4,0
478 bnea 0x60
479#endif
480 b 99b
481
482/* this can be in text because we won't change it until we are
483 * running in real anyways
484 */
485kexec_flag:
486 .long 0
487
488
489#ifdef CONFIG_KEXEC
490
491/* kexec_smp_wait(void)
492 *
493 * call with interrupts off
494 * note: this is a terminal routine, it does not save lr
495 *
496 * get phys id from paca
497 * switch to real mode
498 * mark the paca as no longer used
499 * join other cpus in kexec_wait(phys_id)
500 */
501_GLOBAL(kexec_smp_wait)
502 lhz r3,PACAHWCPUID(r13)
503 bl real_mode
504
505 li r4,KEXEC_STATE_REAL_MODE
506 stb r4,PACAKEXECSTATE(r13)
507 SYNC
508
509 b .kexec_wait
510
511/*
512 * switch to real mode (turn mmu off)
513 * we use the early kernel trick that the hardware ignores bits
514 * 0 and 1 (big endian) of the effective address in real mode
515 *
516 * don't overwrite r3 here, it is live for kexec_wait above.
517 */
518real_mode: /* assume normal blr return */
5191: li r9,MSR_RI
520 li r10,MSR_DR|MSR_IR
521 mflr r11 /* return address to SRR0 */
522 mfmsr r12
523 andc r9,r12,r9
524 andc r10,r12,r10
525
526 mtmsrd r9,1
527 mtspr SPRN_SRR1,r10
528 mtspr SPRN_SRR0,r11
529 rfid
530
531
532/*
533 * kexec_sequence(newstack, start, image, control, clear_all())
534 *
535 * does the grungy work with stack switching and real mode switches
536 * also does simple calls to other code
537 */
538
539_GLOBAL(kexec_sequence)
540 mflr r0
541 std r0,16(r1)
542
543 /* switch stacks to newstack -- &kexec_stack.stack */
544 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
545 mr r1,r3
546
547 li r0,0
548 std r0,16(r1)
549
550 /* save regs for local vars on new stack.
551 * yes, we won't go back, but ...
552 */
553 std r31,-8(r1)
554 std r30,-16(r1)
555 std r29,-24(r1)
556 std r28,-32(r1)
557 std r27,-40(r1)
558 std r26,-48(r1)
559 std r25,-56(r1)
560
561 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
562
563 /* save args into preserved regs */
564 mr r31,r3 /* newstack (both) */
565 mr r30,r4 /* start (real) */
566 mr r29,r5 /* image (virt) */
567 mr r28,r6 /* control, unused */
568 mr r27,r7 /* clear_all() fn desc */
569 mr r26,r8 /* spare */
570 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
571
572 /* disable interrupts, we are overwriting kernel data next */
573 mfmsr r3
574 rlwinm r3,r3,0,17,15
575 mtmsrd r3,1
576
577 /* copy dest pages, flush whole dest image */
578 mr r3,r29
579 bl .kexec_copy_flush /* (image) */
580
581 /* turn off mmu */
582 bl real_mode
583
584 /* copy 0x100 bytes starting at start to 0 */
585 li r3,0
586 mr r4,r30 /* start, aka phys mem offset */
587 li r5,0x100
588 li r6,0
589 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
5901: /* assume normal blr return */
591
592 /* release other cpus to the new kernel secondary start at 0x60 */
593 mflr r5
594 li r6,1
595 stw r6,kexec_flag-1b(5)
596
597 /* clear out hardware hash page table and tlb */
598 ld r5,0(r27) /* deref function descriptor */
599 mtctr r5
600 bctrl /* ppc_md.hpte_clear_all(void); */
601
602/*
603 * kexec image calling is:
604 * the first 0x100 bytes of the entry point are copied to 0
605 *
606 * all slaves branch to slave = 0x60 (absolute)
607 * slave(phys_cpu_id);
608 *
609 * master goes to start = entry point
610 * start(phys_cpu_id, start, 0);
611 *
612 *
613 * a wrapper is needed to call existing kernels, here is an approximate
614 * description of one method:
615 *
616 * v2: (2.6.10)
617 * start will be near the boot_block (maybe 0x100 bytes before it?)
618 * it will have a 0x60, which will b to boot_block, where it will wait
619 * and 0 will store phys into struct boot-block and load r3 from there,
620 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
621 *
622 * v1: (2.6.9)
623 * boot block will have all cpus scanning device tree to see if they
624 * are the boot cpu ?????
625 * other device tree differences (prop sizes, va vs pa, etc)...
626 */
627 mr r3,r25 # my phys cpu
628 mr r4,r30 # start, aka phys mem offset
629 mtlr 4
630 li r5,0
631 blr /* image->start(physid, image->start, 0); */
632#endif /* CONFIG_KEXEC */