Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains miscellaneous low-level functions.
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
7 * and Paul Mackerras.
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 */
11
12#include <linux/sys.h>
13#include <asm/unistd.h>
14#include <asm/errno.h>
15#include <asm/processor.h>
16#include <asm/page.h>
17#include <asm/cache.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/cputable.h>
21#include <asm/thread_info.h>
22#include <asm/kexec.h>
23#include <asm/ptrace.h>
24#include <asm/mmu.h>
25#include <asm/export.h>
26#include <asm/feature-fixups.h>
27
28 .text
29
30_GLOBAL(call_do_softirq)
31 mflr r0
32 std r0,16(r1)
33 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
34 mr r1,r3
35 bl __do_softirq
36 ld r1,0(r1)
37 ld r0,16(r1)
38 mtlr r0
39 blr
40
41_GLOBAL(call_do_irq)
42 mflr r0
43 std r0,16(r1)
44 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
45 mr r1,r4
46 bl __do_irq
47 ld r1,0(r1)
48 ld r0,16(r1)
49 mtlr r0
50 blr
51
52_GLOBAL(__bswapdi2)
53EXPORT_SYMBOL(__bswapdi2)
54 srdi r8,r3,32
55 rlwinm r7,r3,8,0xffffffff
56 rlwimi r7,r3,24,0,7
57 rlwinm r9,r8,8,0xffffffff
58 rlwimi r7,r3,24,16,23
59 rlwimi r9,r8,24,0,7
60 rlwimi r9,r8,24,16,23
61 sldi r7,r7,32
62 or r3,r7,r9
63 blr
64
65
66#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
67_GLOBAL(rmci_on)
68 sync
69 isync
70 li r3,0x100
71 rldicl r3,r3,32,0
72 mfspr r5,SPRN_HID4
73 or r5,r5,r3
74 sync
75 mtspr SPRN_HID4,r5
76 isync
77 slbia
78 isync
79 sync
80 blr
81
82_GLOBAL(rmci_off)
83 sync
84 isync
85 li r3,0x100
86 rldicl r3,r3,32,0
87 mfspr r5,SPRN_HID4
88 andc r5,r5,r3
89 sync
90 mtspr SPRN_HID4,r5
91 isync
92 slbia
93 isync
94 sync
95 blr
96#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
97
98#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
99
100/*
101 * Do an IO access in real mode
102 */
103_GLOBAL(real_readb)
104 mfmsr r7
105 ori r0,r7,MSR_DR
106 xori r0,r0,MSR_DR
107 sync
108 mtmsrd r0
109 sync
110 isync
111 mfspr r6,SPRN_HID4
112 rldicl r5,r6,32,0
113 ori r5,r5,0x100
114 rldicl r5,r5,32,0
115 sync
116 mtspr SPRN_HID4,r5
117 isync
118 slbia
119 isync
120 lbz r3,0(r3)
121 sync
122 mtspr SPRN_HID4,r6
123 isync
124 slbia
125 isync
126 mtmsrd r7
127 sync
128 isync
129 blr
130
131 /*
132 * Do an IO access in real mode
133 */
134_GLOBAL(real_writeb)
135 mfmsr r7
136 ori r0,r7,MSR_DR
137 xori r0,r0,MSR_DR
138 sync
139 mtmsrd r0
140 sync
141 isync
142 mfspr r6,SPRN_HID4
143 rldicl r5,r6,32,0
144 ori r5,r5,0x100
145 rldicl r5,r5,32,0
146 sync
147 mtspr SPRN_HID4,r5
148 isync
149 slbia
150 isync
151 stb r3,0(r4)
152 sync
153 mtspr SPRN_HID4,r6
154 isync
155 slbia
156 isync
157 mtmsrd r7
158 sync
159 isync
160 blr
161#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
162
163#ifdef CONFIG_PPC_PASEMI
164
165_GLOBAL(real_205_readb)
166 mfmsr r7
167 ori r0,r7,MSR_DR
168 xori r0,r0,MSR_DR
169 sync
170 mtmsrd r0
171 sync
172 isync
173 LBZCIX(R3,R0,R3)
174 isync
175 mtmsrd r7
176 sync
177 isync
178 blr
179
180_GLOBAL(real_205_writeb)
181 mfmsr r7
182 ori r0,r7,MSR_DR
183 xori r0,r0,MSR_DR
184 sync
185 mtmsrd r0
186 sync
187 isync
188 STBCIX(R3,R0,R4)
189 isync
190 mtmsrd r7
191 sync
192 isync
193 blr
194
195#endif /* CONFIG_PPC_PASEMI */
196
197
198#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
199/*
200 * SCOM access functions for 970 (FX only for now)
201 *
202 * unsigned long scom970_read(unsigned int address);
203 * void scom970_write(unsigned int address, unsigned long value);
204 *
205 * The address passed in is the 24 bits register address. This code
206 * is 970 specific and will not check the status bits, so you should
207 * know what you are doing.
208 */
209_GLOBAL(scom970_read)
210 /* interrupts off */
211 mfmsr r4
212 ori r0,r4,MSR_EE
213 xori r0,r0,MSR_EE
214 mtmsrd r0,1
215
216 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
217 * (including parity). On current CPUs they must be 0'd,
218 * and finally or in RW bit
219 */
220 rlwinm r3,r3,8,0,15
221 ori r3,r3,0x8000
222
223 /* do the actual scom read */
224 sync
225 mtspr SPRN_SCOMC,r3
226 isync
227 mfspr r3,SPRN_SCOMD
228 isync
229 mfspr r0,SPRN_SCOMC
230 isync
231
232 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
233 * that's the best we can do). Not implemented yet as we don't use
234 * the scom on any of the bogus CPUs yet, but may have to be done
235 * ultimately
236 */
237
238 /* restore interrupts */
239 mtmsrd r4,1
240 blr
241
242
243_GLOBAL(scom970_write)
244 /* interrupts off */
245 mfmsr r5
246 ori r0,r5,MSR_EE
247 xori r0,r0,MSR_EE
248 mtmsrd r0,1
249
250 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
251 * (including parity). On current CPUs they must be 0'd.
252 */
253
254 rlwinm r3,r3,8,0,15
255
256 sync
257 mtspr SPRN_SCOMD,r4 /* write data */
258 isync
259 mtspr SPRN_SCOMC,r3 /* write command */
260 isync
261 mfspr 3,SPRN_SCOMC
262 isync
263
264 /* restore interrupts */
265 mtmsrd r5,1
266 blr
267#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
268
269/* kexec_wait(phys_cpu)
270 *
271 * wait for the flag to change, indicating this kernel is going away but
272 * the slave code for the next one is at addresses 0 to 100.
273 *
274 * This is used by all slaves, even those that did not find a matching
275 * paca in the secondary startup code.
276 *
277 * Physical (hardware) cpu id should be in r3.
278 */
279_GLOBAL(kexec_wait)
280 bl 1f
2811: mflr r5
282 addi r5,r5,kexec_flag-1b
283
28499: HMT_LOW
285#ifdef CONFIG_KEXEC_CORE /* use no memory without kexec */
286 lwz r4,0(r5)
287 cmpwi 0,r4,0
288 beq 99b
289#ifdef CONFIG_PPC_BOOK3S_64
290 li r10,0x60
291 mfmsr r11
292 clrrdi r11,r11,1 /* Clear MSR_LE */
293 mtsrr0 r10
294 mtsrr1 r11
295 rfid
296#else
297 /* Create TLB entry in book3e_secondary_core_init */
298 li r4,0
299 ba 0x60
300#endif
301#endif
302
303/* this can be in text because we won't change it until we are
304 * running in real anyways
305 */
306kexec_flag:
307 .long 0
308
309
310#ifdef CONFIG_KEXEC_CORE
311#ifdef CONFIG_PPC_BOOK3E
312/*
313 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
314 * for a core to identity map v:0 to p:0. This current implementation
315 * assumes that 1G is enough for kexec.
316 */
317kexec_create_tlb:
318 /*
319 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
320 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
321 */
322 PPC_TLBILX_ALL(0,R0)
323 sync
324 isync
325
326 mfspr r10,SPRN_TLB1CFG
327 andi. r10,r10,TLBnCFG_N_ENTRY /* Extract # entries */
328 subi r10,r10,1 /* Last entry: no conflict with kernel text */
329 lis r9,MAS0_TLBSEL(1)@h
330 rlwimi r9,r10,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r9) */
331
332/* Set up a temp identity mapping v:0 to p:0 and return to it. */
333 mtspr SPRN_MAS0,r9
334
335 lis r9,(MAS1_VALID|MAS1_IPROT)@h
336 ori r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
337 mtspr SPRN_MAS1,r9
338
339 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS2_M_IF_NEEDED)
340 mtspr SPRN_MAS2,r9
341
342 LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
343 mtspr SPRN_MAS3,r9
344 li r9,0
345 mtspr SPRN_MAS7,r9
346
347 tlbwe
348 isync
349 blr
350#endif
351
352/* kexec_smp_wait(void)
353 *
354 * call with interrupts off
355 * note: this is a terminal routine, it does not save lr
356 *
357 * get phys id from paca
358 * switch to real mode
359 * mark the paca as no longer used
360 * join other cpus in kexec_wait(phys_id)
361 */
362_GLOBAL(kexec_smp_wait)
363 lhz r3,PACAHWCPUID(r13)
364 bl real_mode
365
366 li r4,KEXEC_STATE_REAL_MODE
367 stb r4,PACAKEXECSTATE(r13)
368 SYNC
369
370 b kexec_wait
371
372/*
373 * switch to real mode (turn mmu off)
374 * we use the early kernel trick that the hardware ignores bits
375 * 0 and 1 (big endian) of the effective address in real mode
376 *
377 * don't overwrite r3 here, it is live for kexec_wait above.
378 */
379real_mode: /* assume normal blr return */
380#ifdef CONFIG_PPC_BOOK3E
381 /* Create an identity mapping. */
382 b kexec_create_tlb
383#else
3841: li r9,MSR_RI
385 li r10,MSR_DR|MSR_IR
386 mflr r11 /* return address to SRR0 */
387 mfmsr r12
388 andc r9,r12,r9
389 andc r10,r12,r10
390
391 mtmsrd r9,1
392 mtspr SPRN_SRR1,r10
393 mtspr SPRN_SRR0,r11
394 rfid
395#endif
396
397/*
398 * kexec_sequence(newstack, start, image, control, clear_all(),
399 copy_with_mmu_off)
400 *
401 * does the grungy work with stack switching and real mode switches
402 * also does simple calls to other code
403 */
404
405_GLOBAL(kexec_sequence)
406 mflr r0
407 std r0,16(r1)
408
409 /* switch stacks to newstack -- &kexec_stack.stack */
410 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
411 mr r1,r3
412
413 li r0,0
414 std r0,16(r1)
415
416 /* save regs for local vars on new stack.
417 * yes, we won't go back, but ...
418 */
419 std r31,-8(r1)
420 std r30,-16(r1)
421 std r29,-24(r1)
422 std r28,-32(r1)
423 std r27,-40(r1)
424 std r26,-48(r1)
425 std r25,-56(r1)
426
427 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
428
429 /* save args into preserved regs */
430 mr r31,r3 /* newstack (both) */
431 mr r30,r4 /* start (real) */
432 mr r29,r5 /* image (virt) */
433 mr r28,r6 /* control, unused */
434 mr r27,r7 /* clear_all() fn desc */
435 mr r26,r8 /* copy_with_mmu_off */
436 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
437
438 /* disable interrupts, we are overwriting kernel data next */
439#ifdef CONFIG_PPC_BOOK3E
440 wrteei 0
441#else
442 mfmsr r3
443 rlwinm r3,r3,0,17,15
444 mtmsrd r3,1
445#endif
446
447 /* We need to turn the MMU off unless we are in hash mode
448 * under a hypervisor
449 */
450 cmpdi r26,0
451 beq 1f
452 bl real_mode
4531:
454 /* copy dest pages, flush whole dest image */
455 mr r3,r29
456 bl kexec_copy_flush /* (image) */
457
458 /* turn off mmu now if not done earlier */
459 cmpdi r26,0
460 bne 1f
461 bl real_mode
462
463 /* copy 0x100 bytes starting at start to 0 */
4641: li r3,0
465 mr r4,r30 /* start, aka phys mem offset */
466 li r5,0x100
467 li r6,0
468 bl copy_and_flush /* (dest, src, copy limit, start offset) */
4691: /* assume normal blr return */
470
471 /* release other cpus to the new kernel secondary start at 0x60 */
472 mflr r5
473 li r6,1
474 stw r6,kexec_flag-1b(5)
475
476 cmpdi r27,0
477 beq 1f
478
479 /* clear out hardware hash page table and tlb */
480#ifdef PPC64_ELF_ABI_v1
481 ld r12,0(r27) /* deref function descriptor */
482#else
483 mr r12,r27
484#endif
485 mtctr r12
486 bctrl /* mmu_hash_ops.hpte_clear_all(void); */
487
488/*
489 * kexec image calling is:
490 * the first 0x100 bytes of the entry point are copied to 0
491 *
492 * all slaves branch to slave = 0x60 (absolute)
493 * slave(phys_cpu_id);
494 *
495 * master goes to start = entry point
496 * start(phys_cpu_id, start, 0);
497 *
498 *
499 * a wrapper is needed to call existing kernels, here is an approximate
500 * description of one method:
501 *
502 * v2: (2.6.10)
503 * start will be near the boot_block (maybe 0x100 bytes before it?)
504 * it will have a 0x60, which will b to boot_block, where it will wait
505 * and 0 will store phys into struct boot-block and load r3 from there,
506 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
507 *
508 * v1: (2.6.9)
509 * boot block will have all cpus scanning device tree to see if they
510 * are the boot cpu ?????
511 * other device tree differences (prop sizes, va vs pa, etc)...
512 */
5131: mr r3,r25 # my phys cpu
514 mr r4,r30 # start, aka phys mem offset
515 mtlr 4
516 li r5,0
517 blr /* image->start(physid, image->start, 0); */
518#endif /* CONFIG_KEXEC_CORE */
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
26#include <asm/thread_info.h>
27#include <asm/kexec.h>
28#include <asm/ptrace.h>
29
30 .text
31
32_GLOBAL(call_do_softirq)
33 mflr r0
34 std r0,16(r1)
35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
36 mr r1,r3
37 bl .__do_softirq
38 ld r1,0(r1)
39 ld r0,16(r1)
40 mtlr r0
41 blr
42
43_GLOBAL(call_handle_irq)
44 ld r8,0(r6)
45 mflr r0
46 std r0,16(r1)
47 mtctr r8
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
49 mr r1,r5
50 bctrl
51 ld r1,0(r1)
52 ld r0,16(r1)
53 mtlr r0
54 blr
55
56 .section ".toc","aw"
57PPC64_CACHES:
58 .tc ppc64_caches[TC],ppc64_caches
59 .section ".text"
60
61/*
62 * Write any modified data cache blocks out to memory
63 * and invalidate the corresponding instruction cache blocks.
64 *
65 * flush_icache_range(unsigned long start, unsigned long stop)
66 *
67 * flush all bytes from start through stop-1 inclusive
68 */
69
70_KPROBE(__flush_icache_range)
71
72/*
73 * Flush the data cache to memory
74 *
75 * Different systems have different cache line sizes
76 * and in some cases i-cache and d-cache line sizes differ from
77 * each other.
78 */
79 ld r10,PPC64_CACHES@toc(r2)
80 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
81 addi r5,r7,-1
82 andc r6,r3,r5 /* round low to line bdy */
83 subf r8,r6,r4 /* compute length */
84 add r8,r8,r5 /* ensure we get enough */
85 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
86 srw. r8,r8,r9 /* compute line count */
87 beqlr /* nothing to do? */
88 mtctr r8
891: dcbst 0,r6
90 add r6,r6,r7
91 bdnz 1b
92 sync
93
94/* Now invalidate the instruction cache */
95
96 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
97 addi r5,r7,-1
98 andc r6,r3,r5 /* round low to line bdy */
99 subf r8,r6,r4 /* compute length */
100 add r8,r8,r5
101 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
102 srw. r8,r8,r9 /* compute line count */
103 beqlr /* nothing to do? */
104 mtctr r8
1052: icbi 0,r6
106 add r6,r6,r7
107 bdnz 2b
108 isync
109 blr
110 .previous .text
111/*
112 * Like above, but only do the D-cache.
113 *
114 * flush_dcache_range(unsigned long start, unsigned long stop)
115 *
116 * flush all bytes from start to stop-1 inclusive
117 */
118_GLOBAL(flush_dcache_range)
119
120/*
121 * Flush the data cache to memory
122 *
123 * Different systems have different cache line sizes
124 */
125 ld r10,PPC64_CACHES@toc(r2)
126 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
127 addi r5,r7,-1
128 andc r6,r3,r5 /* round low to line bdy */
129 subf r8,r6,r4 /* compute length */
130 add r8,r8,r5 /* ensure we get enough */
131 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
132 srw. r8,r8,r9 /* compute line count */
133 beqlr /* nothing to do? */
134 mtctr r8
1350: dcbst 0,r6
136 add r6,r6,r7
137 bdnz 0b
138 sync
139 blr
140
141/*
142 * Like above, but works on non-mapped physical addresses.
143 * Use only for non-LPAR setups ! It also assumes real mode
144 * is cacheable. Used for flushing out the DART before using
145 * it as uncacheable memory
146 *
147 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
148 *
149 * flush all bytes from start to stop-1 inclusive
150 */
151_GLOBAL(flush_dcache_phys_range)
152 ld r10,PPC64_CACHES@toc(r2)
153 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
154 addi r5,r7,-1
155 andc r6,r3,r5 /* round low to line bdy */
156 subf r8,r6,r4 /* compute length */
157 add r8,r8,r5 /* ensure we get enough */
158 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
159 srw. r8,r8,r9 /* compute line count */
160 beqlr /* nothing to do? */
161 mfmsr r5 /* Disable MMU Data Relocation */
162 ori r0,r5,MSR_DR
163 xori r0,r0,MSR_DR
164 sync
165 mtmsr r0
166 sync
167 isync
168 mtctr r8
1690: dcbst 0,r6
170 add r6,r6,r7
171 bdnz 0b
172 sync
173 isync
174 mtmsr r5 /* Re-enable MMU Data Relocation */
175 sync
176 isync
177 blr
178
179_GLOBAL(flush_inval_dcache_range)
180 ld r10,PPC64_CACHES@toc(r2)
181 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
182 addi r5,r7,-1
183 andc r6,r3,r5 /* round low to line bdy */
184 subf r8,r6,r4 /* compute length */
185 add r8,r8,r5 /* ensure we get enough */
186 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
187 srw. r8,r8,r9 /* compute line count */
188 beqlr /* nothing to do? */
189 sync
190 isync
191 mtctr r8
1920: dcbf 0,r6
193 add r6,r6,r7
194 bdnz 0b
195 sync
196 isync
197 blr
198
199
200/*
201 * Flush a particular page from the data cache to RAM.
202 * Note: this is necessary because the instruction cache does *not*
203 * snoop from the data cache.
204 *
205 * void __flush_dcache_icache(void *page)
206 */
207_GLOBAL(__flush_dcache_icache)
208/*
209 * Flush the data cache to memory
210 *
211 * Different systems have different cache line sizes
212 */
213
214/* Flush the dcache */
215 ld r7,PPC64_CACHES@toc(r2)
216 clrrdi r3,r3,PAGE_SHIFT /* Page align */
217 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
218 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
219 mr r6,r3
220 mtctr r4
2210: dcbst 0,r6
222 add r6,r6,r5
223 bdnz 0b
224 sync
225
226/* Now invalidate the icache */
227
228 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
229 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
230 mtctr r4
2311: icbi 0,r3
232 add r3,r3,r5
233 bdnz 1b
234 isync
235 blr
236
237
238#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
239/*
240 * Do an IO access in real mode
241 */
242_GLOBAL(real_readb)
243 mfmsr r7
244 ori r0,r7,MSR_DR
245 xori r0,r0,MSR_DR
246 sync
247 mtmsrd r0
248 sync
249 isync
250 mfspr r6,SPRN_HID4
251 rldicl r5,r6,32,0
252 ori r5,r5,0x100
253 rldicl r5,r5,32,0
254 sync
255 mtspr SPRN_HID4,r5
256 isync
257 slbia
258 isync
259 lbz r3,0(r3)
260 sync
261 mtspr SPRN_HID4,r6
262 isync
263 slbia
264 isync
265 mtmsrd r7
266 sync
267 isync
268 blr
269
270 /*
271 * Do an IO access in real mode
272 */
273_GLOBAL(real_writeb)
274 mfmsr r7
275 ori r0,r7,MSR_DR
276 xori r0,r0,MSR_DR
277 sync
278 mtmsrd r0
279 sync
280 isync
281 mfspr r6,SPRN_HID4
282 rldicl r5,r6,32,0
283 ori r5,r5,0x100
284 rldicl r5,r5,32,0
285 sync
286 mtspr SPRN_HID4,r5
287 isync
288 slbia
289 isync
290 stb r3,0(r4)
291 sync
292 mtspr SPRN_HID4,r6
293 isync
294 slbia
295 isync
296 mtmsrd r7
297 sync
298 isync
299 blr
300#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
301
302#ifdef CONFIG_PPC_PASEMI
303
304/* No support in all binutils for these yet, so use defines */
305#define LBZCIX(RT,RA,RB) .long (0x7c0006aa|(RT<<21)|(RA<<16)|(RB << 11))
306#define STBCIX(RS,RA,RB) .long (0x7c0007aa|(RS<<21)|(RA<<16)|(RB << 11))
307
308
309_GLOBAL(real_205_readb)
310 mfmsr r7
311 ori r0,r7,MSR_DR
312 xori r0,r0,MSR_DR
313 sync
314 mtmsrd r0
315 sync
316 isync
317 LBZCIX(r3,0,r3)
318 isync
319 mtmsrd r7
320 sync
321 isync
322 blr
323
324_GLOBAL(real_205_writeb)
325 mfmsr r7
326 ori r0,r7,MSR_DR
327 xori r0,r0,MSR_DR
328 sync
329 mtmsrd r0
330 sync
331 isync
332 STBCIX(r3,0,r4)
333 isync
334 mtmsrd r7
335 sync
336 isync
337 blr
338
339#endif /* CONFIG_PPC_PASEMI */
340
341
342#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
343/*
344 * SCOM access functions for 970 (FX only for now)
345 *
346 * unsigned long scom970_read(unsigned int address);
347 * void scom970_write(unsigned int address, unsigned long value);
348 *
349 * The address passed in is the 24 bits register address. This code
350 * is 970 specific and will not check the status bits, so you should
351 * know what you are doing.
352 */
353_GLOBAL(scom970_read)
354 /* interrupts off */
355 mfmsr r4
356 ori r0,r4,MSR_EE
357 xori r0,r0,MSR_EE
358 mtmsrd r0,1
359
360 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
361 * (including parity). On current CPUs they must be 0'd,
362 * and finally or in RW bit
363 */
364 rlwinm r3,r3,8,0,15
365 ori r3,r3,0x8000
366
367 /* do the actual scom read */
368 sync
369 mtspr SPRN_SCOMC,r3
370 isync
371 mfspr r3,SPRN_SCOMD
372 isync
373 mfspr r0,SPRN_SCOMC
374 isync
375
376 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
377 * that's the best we can do). Not implemented yet as we don't use
378 * the scom on any of the bogus CPUs yet, but may have to be done
379 * ultimately
380 */
381
382 /* restore interrupts */
383 mtmsrd r4,1
384 blr
385
386
387_GLOBAL(scom970_write)
388 /* interrupts off */
389 mfmsr r5
390 ori r0,r5,MSR_EE
391 xori r0,r0,MSR_EE
392 mtmsrd r0,1
393
394 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
395 * (including parity). On current CPUs they must be 0'd.
396 */
397
398 rlwinm r3,r3,8,0,15
399
400 sync
401 mtspr SPRN_SCOMD,r4 /* write data */
402 isync
403 mtspr SPRN_SCOMC,r3 /* write command */
404 isync
405 mfspr 3,SPRN_SCOMC
406 isync
407
408 /* restore interrupts */
409 mtmsrd r5,1
410 blr
411#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
412
413
414/*
415 * Create a kernel thread
416 * kernel_thread(fn, arg, flags)
417 */
418_GLOBAL(kernel_thread)
419 std r29,-24(r1)
420 std r30,-16(r1)
421 stdu r1,-STACK_FRAME_OVERHEAD(r1)
422 mr r29,r3
423 mr r30,r4
424 ori r3,r5,CLONE_VM /* flags */
425 oris r3,r3,(CLONE_UNTRACED>>16)
426 li r4,0 /* new sp (unused) */
427 li r0,__NR_clone
428 sc
429 bns+ 1f /* did system call indicate error? */
430 neg r3,r3 /* if so, make return code negative */
4311: cmpdi 0,r3,0 /* parent or child? */
432 bne 2f /* return if parent */
433 li r0,0
434 stdu r0,-STACK_FRAME_OVERHEAD(r1)
435 ld r2,8(r29)
436 ld r29,0(r29)
437 mtlr r29 /* fn addr in lr */
438 mr r3,r30 /* load arg and call fn */
439 blrl
440 li r0,__NR_exit /* exit after child exits */
441 li r3,0
442 sc
4432: addi r1,r1,STACK_FRAME_OVERHEAD
444 ld r29,-24(r1)
445 ld r30,-16(r1)
446 blr
447
448/*
449 * disable_kernel_fp()
450 * Disable the FPU.
451 */
452_GLOBAL(disable_kernel_fp)
453 mfmsr r3
454 rldicl r0,r3,(63-MSR_FP_LG),1
455 rldicl r3,r0,(MSR_FP_LG+1),0
456 mtmsrd r3 /* disable use of fpu now */
457 isync
458 blr
459
460/* kexec_wait(phys_cpu)
461 *
462 * wait for the flag to change, indicating this kernel is going away but
463 * the slave code for the next one is at addresses 0 to 100.
464 *
465 * This is used by all slaves, even those that did not find a matching
466 * paca in the secondary startup code.
467 *
468 * Physical (hardware) cpu id should be in r3.
469 */
470_GLOBAL(kexec_wait)
471 bl 1f
4721: mflr r5
473 addi r5,r5,kexec_flag-1b
474
47599: HMT_LOW
476#ifdef CONFIG_KEXEC /* use no memory without kexec */
477 lwz r4,0(r5)
478 cmpwi 0,r4,0
479 bnea 0x60
480#endif
481 b 99b
482
483/* this can be in text because we won't change it until we are
484 * running in real anyways
485 */
486kexec_flag:
487 .long 0
488
489
490#ifdef CONFIG_KEXEC
491
492/* kexec_smp_wait(void)
493 *
494 * call with interrupts off
495 * note: this is a terminal routine, it does not save lr
496 *
497 * get phys id from paca
498 * switch to real mode
499 * mark the paca as no longer used
500 * join other cpus in kexec_wait(phys_id)
501 */
502_GLOBAL(kexec_smp_wait)
503 lhz r3,PACAHWCPUID(r13)
504 bl real_mode
505
506 li r4,KEXEC_STATE_REAL_MODE
507 stb r4,PACAKEXECSTATE(r13)
508 SYNC
509
510 b .kexec_wait
511
512/*
513 * switch to real mode (turn mmu off)
514 * we use the early kernel trick that the hardware ignores bits
515 * 0 and 1 (big endian) of the effective address in real mode
516 *
517 * don't overwrite r3 here, it is live for kexec_wait above.
518 */
519real_mode: /* assume normal blr return */
5201: li r9,MSR_RI
521 li r10,MSR_DR|MSR_IR
522 mflr r11 /* return address to SRR0 */
523 mfmsr r12
524 andc r9,r12,r9
525 andc r10,r12,r10
526
527 mtmsrd r9,1
528 mtspr SPRN_SRR1,r10
529 mtspr SPRN_SRR0,r11
530 rfid
531
532
533/*
534 * kexec_sequence(newstack, start, image, control, clear_all())
535 *
536 * does the grungy work with stack switching and real mode switches
537 * also does simple calls to other code
538 */
539
540_GLOBAL(kexec_sequence)
541 mflr r0
542 std r0,16(r1)
543
544 /* switch stacks to newstack -- &kexec_stack.stack */
545 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
546 mr r1,r3
547
548 li r0,0
549 std r0,16(r1)
550
551 /* save regs for local vars on new stack.
552 * yes, we won't go back, but ...
553 */
554 std r31,-8(r1)
555 std r30,-16(r1)
556 std r29,-24(r1)
557 std r28,-32(r1)
558 std r27,-40(r1)
559 std r26,-48(r1)
560 std r25,-56(r1)
561
562 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
563
564 /* save args into preserved regs */
565 mr r31,r3 /* newstack (both) */
566 mr r30,r4 /* start (real) */
567 mr r29,r5 /* image (virt) */
568 mr r28,r6 /* control, unused */
569 mr r27,r7 /* clear_all() fn desc */
570 mr r26,r8 /* spare */
571 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
572
573 /* disable interrupts, we are overwriting kernel data next */
574 mfmsr r3
575 rlwinm r3,r3,0,17,15
576 mtmsrd r3,1
577
578 /* copy dest pages, flush whole dest image */
579 mr r3,r29
580 bl .kexec_copy_flush /* (image) */
581
582 /* turn off mmu */
583 bl real_mode
584
585 /* copy 0x100 bytes starting at start to 0 */
586 li r3,0
587 mr r4,r30 /* start, aka phys mem offset */
588 li r5,0x100
589 li r6,0
590 bl .copy_and_flush /* (dest, src, copy limit, start offset) */
5911: /* assume normal blr return */
592
593 /* release other cpus to the new kernel secondary start at 0x60 */
594 mflr r5
595 li r6,1
596 stw r6,kexec_flag-1b(5)
597
598 /* clear out hardware hash page table and tlb */
599 ld r5,0(r27) /* deref function descriptor */
600 mtctr r5
601 bctrl /* ppc_md.hpte_clear_all(void); */
602
603/*
604 * kexec image calling is:
605 * the first 0x100 bytes of the entry point are copied to 0
606 *
607 * all slaves branch to slave = 0x60 (absolute)
608 * slave(phys_cpu_id);
609 *
610 * master goes to start = entry point
611 * start(phys_cpu_id, start, 0);
612 *
613 *
614 * a wrapper is needed to call existing kernels, here is an approximate
615 * description of one method:
616 *
617 * v2: (2.6.10)
618 * start will be near the boot_block (maybe 0x100 bytes before it?)
619 * it will have a 0x60, which will b to boot_block, where it will wait
620 * and 0 will store phys into struct boot-block and load r3 from there,
621 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
622 *
623 * v1: (2.6.9)
624 * boot block will have all cpus scanning device tree to see if they
625 * are the boot cpu ?????
626 * other device tree differences (prop sizes, va vs pa, etc)...
627 */
628 mr r3,r25 # my phys cpu
629 mr r4,r30 # start, aka phys mem offset
630 mtlr 4
631 li r5,0
632 blr /* image->start(physid, image->start, 0); */
633#endif /* CONFIG_KEXEC */