Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * This file contains miscellaneous low-level functions.
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *
  5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6 * and Paul Mackerras.
  7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
  8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the License, or (at your option) any later version.
 14 *
 15 */
 16
 17#include <linux/sys.h>
 18#include <asm/unistd.h>
 19#include <asm/errno.h>
 20#include <asm/processor.h>
 21#include <asm/page.h>
 22#include <asm/cache.h>
 23#include <asm/ppc_asm.h>
 24#include <asm/asm-offsets.h>
 25#include <asm/cputable.h>
 26#include <asm/thread_info.h>
 27#include <asm/kexec.h>
 28#include <asm/ptrace.h>
 29#include <asm/mmu.h>
 30#include <asm/export.h>
 31
 32	.text
 33
 34_GLOBAL(call_do_softirq)
 35	mflr	r0
 36	std	r0,16(r1)
 37	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 38	mr	r1,r3
 39	bl	__do_softirq
 40	ld	r1,0(r1)
 41	ld	r0,16(r1)
 42	mtlr	r0
 43	blr
 44
 45_GLOBAL(call_do_irq)
 46	mflr	r0
 47	std	r0,16(r1)
 48	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
 49	mr	r1,r4
 50	bl	__do_irq
 51	ld	r1,0(r1)
 52	ld	r0,16(r1)
 53	mtlr	r0
 54	blr
 55
 56	.section	".toc","aw"
 57PPC64_CACHES:
 58	.tc		ppc64_caches[TC],ppc64_caches
 59	.section	".text"
 60
 61/*
 62 * Write any modified data cache blocks out to memory
 63 * and invalidate the corresponding instruction cache blocks.
 64 *
 65 * flush_icache_range(unsigned long start, unsigned long stop)
 66 *
 67 *   flush all bytes from start through stop-1 inclusive
 68 */
 69
 70_GLOBAL_TOC(flush_icache_range)
 71BEGIN_FTR_SECTION
 72	PURGE_PREFETCHED_INS
 73	blr
 74END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 75/*
 76 * Flush the data cache to memory 
 77 * 
 78 * Different systems have different cache line sizes
 79 * and in some cases i-cache and d-cache line sizes differ from
 80 * each other.
 81 */
 82 	ld	r10,PPC64_CACHES@toc(r2)
 83	lwz	r7,DCACHEL1BLOCKSIZE(r10)/* Get cache block size */
 84	addi	r5,r7,-1
 85	andc	r6,r3,r5		/* round low to line bdy */
 86	subf	r8,r6,r4		/* compute length */
 87	add	r8,r8,r5		/* ensure we get enough */
 88	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of cache block size */
 89	srw.	r8,r8,r9		/* compute line count */
 90	beqlr				/* nothing to do? */
 91	mtctr	r8
 921:	dcbst	0,r6
 93	add	r6,r6,r7
 94	bdnz	1b
 95	sync
 96
 97/* Now invalidate the instruction cache */
 98	
 99	lwz	r7,ICACHEL1BLOCKSIZE(r10)	/* Get Icache block size */
100	addi	r5,r7,-1
101	andc	r6,r3,r5		/* round low to line bdy */
102	subf	r8,r6,r4		/* compute length */
103	add	r8,r8,r5
104	lwz	r9,ICACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of Icache block size */
105	srw.	r8,r8,r9		/* compute line count */
106	beqlr				/* nothing to do? */
107	mtctr	r8
1082:	icbi	0,r6
109	add	r6,r6,r7
110	bdnz	2b
111	isync
112	blr
113_ASM_NOKPROBE_SYMBOL(flush_icache_range)
114EXPORT_SYMBOL(flush_icache_range)
115
116/*
117 * Like above, but only do the D-cache.
118 *
119 * flush_dcache_range(unsigned long start, unsigned long stop)
120 *
121 *    flush all bytes from start to stop-1 inclusive
122 */
123_GLOBAL_TOC(flush_dcache_range)
124
125/*
126 * Flush the data cache to memory 
127 * 
128 * Different systems have different cache line sizes
129 */
130 	ld	r10,PPC64_CACHES@toc(r2)
131	lwz	r7,DCACHEL1BLOCKSIZE(r10)	/* Get dcache block size */
132	addi	r5,r7,-1
133	andc	r6,r3,r5		/* round low to line bdy */
134	subf	r8,r6,r4		/* compute length */
135	add	r8,r8,r5		/* ensure we get enough */
136	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)	/* Get log-2 of dcache block size */
137	srw.	r8,r8,r9		/* compute line count */
138	beqlr				/* nothing to do? */
139	mtctr	r8
1400:	dcbst	0,r6
141	add	r6,r6,r7
142	bdnz	0b
143	sync
144	blr
145EXPORT_SYMBOL(flush_dcache_range)
146
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147_GLOBAL(flush_inval_dcache_range)
148 	ld	r10,PPC64_CACHES@toc(r2)
149	lwz	r7,DCACHEL1BLOCKSIZE(r10)	/* Get dcache block size */
150	addi	r5,r7,-1
151	andc	r6,r3,r5		/* round low to line bdy */
152	subf	r8,r6,r4		/* compute length */
153	add	r8,r8,r5		/* ensure we get enough */
154	lwz	r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
155	srw.	r8,r8,r9		/* compute line count */
156	beqlr				/* nothing to do? */
157	sync
158	isync
159	mtctr	r8
1600:	dcbf	0,r6
161	add	r6,r6,r7
162	bdnz	0b
163	sync
164	isync
165	blr
166
167
168/*
169 * Flush a particular page from the data cache to RAM.
170 * Note: this is necessary because the instruction cache does *not*
171 * snoop from the data cache.
172 *
173 *	void __flush_dcache_icache(void *page)
174 */
175_GLOBAL(__flush_dcache_icache)
176/*
177 * Flush the data cache to memory 
178 * 
179 * Different systems have different cache line sizes
180 */
181
182BEGIN_FTR_SECTION
183	PURGE_PREFETCHED_INS
184	blr
185END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
186
187/* Flush the dcache */
188 	ld	r7,PPC64_CACHES@toc(r2)
189	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
190	lwz	r4,DCACHEL1BLOCKSPERPAGE(r7)	/* Get # dcache blocks per page */
191	lwz	r5,DCACHEL1BLOCKSIZE(r7)	/* Get dcache block size */
192	mr	r6,r3
193	mtctr	r4
1940:	dcbst	0,r6
195	add	r6,r6,r5
196	bdnz	0b
197	sync
198
199/* Now invalidate the icache */	
200
201	lwz	r4,ICACHEL1BLOCKSPERPAGE(r7)	/* Get # icache blocks per page */
202	lwz	r5,ICACHEL1BLOCKSIZE(r7)	/* Get icache block size */
203	mtctr	r4
2041:	icbi	0,r3
205	add	r3,r3,r5
206	bdnz	1b
207	isync
208	blr
209
210_GLOBAL(__bswapdi2)
211EXPORT_SYMBOL(__bswapdi2)
212	srdi	r8,r3,32
213	rlwinm	r7,r3,8,0xffffffff
214	rlwimi	r7,r3,24,0,7
215	rlwinm	r9,r8,8,0xffffffff
216	rlwimi	r7,r3,24,16,23
217	rlwimi	r9,r8,24,0,7
218	rlwimi	r9,r8,24,16,23
219	sldi	r7,r7,32
220	or	r3,r7,r9
221	blr
222
223
224#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
225_GLOBAL(rmci_on)
226	sync
227	isync
228	li	r3,0x100
229	rldicl	r3,r3,32,0
230	mfspr	r5,SPRN_HID4
231	or	r5,r5,r3
232	sync
233	mtspr	SPRN_HID4,r5
234	isync
235	slbia
236	isync
237	sync
238	blr
239
240_GLOBAL(rmci_off)
241	sync
242	isync
243	li	r3,0x100
244	rldicl	r3,r3,32,0
245	mfspr	r5,SPRN_HID4
246	andc	r5,r5,r3
247	sync
248	mtspr	SPRN_HID4,r5
249	isync
250	slbia
251	isync
252	sync
253	blr
254#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
255
256#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
257
258/*
259 * Do an IO access in real mode
260 */
261_GLOBAL(real_readb)
262	mfmsr	r7
263	ori	r0,r7,MSR_DR
264	xori	r0,r0,MSR_DR
265	sync
266	mtmsrd	r0
267	sync
268	isync
269	mfspr	r6,SPRN_HID4
270	rldicl	r5,r6,32,0
271	ori	r5,r5,0x100
272	rldicl	r5,r5,32,0
273	sync
274	mtspr	SPRN_HID4,r5
275	isync
276	slbia
277	isync
278	lbz	r3,0(r3)
279	sync
280	mtspr	SPRN_HID4,r6
281	isync
282	slbia
283	isync
284	mtmsrd	r7
285	sync
286	isync
287	blr
288
289	/*
290 * Do an IO access in real mode
291 */
292_GLOBAL(real_writeb)
293	mfmsr	r7
294	ori	r0,r7,MSR_DR
295	xori	r0,r0,MSR_DR
296	sync
297	mtmsrd	r0
298	sync
299	isync
300	mfspr	r6,SPRN_HID4
301	rldicl	r5,r6,32,0
302	ori	r5,r5,0x100
303	rldicl	r5,r5,32,0
304	sync
305	mtspr	SPRN_HID4,r5
306	isync
307	slbia
308	isync
309	stb	r3,0(r4)
310	sync
311	mtspr	SPRN_HID4,r6
312	isync
313	slbia
314	isync
315	mtmsrd	r7
316	sync
317	isync
318	blr
319#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
320
321#ifdef CONFIG_PPC_PASEMI
322
323_GLOBAL(real_205_readb)
324	mfmsr	r7
325	ori	r0,r7,MSR_DR
326	xori	r0,r0,MSR_DR
327	sync
328	mtmsrd	r0
329	sync
330	isync
331	LBZCIX(R3,R0,R3)
332	isync
333	mtmsrd	r7
334	sync
335	isync
336	blr
337
338_GLOBAL(real_205_writeb)
339	mfmsr	r7
340	ori	r0,r7,MSR_DR
341	xori	r0,r0,MSR_DR
342	sync
343	mtmsrd	r0
344	sync
345	isync
346	STBCIX(R3,R0,R4)
347	isync
348	mtmsrd	r7
349	sync
350	isync
351	blr
352
353#endif /* CONFIG_PPC_PASEMI */
354
355
356#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
357/*
358 * SCOM access functions for 970 (FX only for now)
359 *
360 * unsigned long scom970_read(unsigned int address);
361 * void scom970_write(unsigned int address, unsigned long value);
362 *
363 * The address passed in is the 24 bits register address. This code
364 * is 970 specific and will not check the status bits, so you should
365 * know what you are doing.
366 */
367_GLOBAL(scom970_read)
368	/* interrupts off */
369	mfmsr	r4
370	ori	r0,r4,MSR_EE
371	xori	r0,r0,MSR_EE
372	mtmsrd	r0,1
373
374	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
375	 * (including parity). On current CPUs they must be 0'd,
376	 * and finally or in RW bit
377	 */
378	rlwinm	r3,r3,8,0,15
379	ori	r3,r3,0x8000
380
381	/* do the actual scom read */
382	sync
383	mtspr	SPRN_SCOMC,r3
384	isync
385	mfspr	r3,SPRN_SCOMD
386	isync
387	mfspr	r0,SPRN_SCOMC
388	isync
389
390	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
391	 * that's the best we can do). Not implemented yet as we don't use
392	 * the scom on any of the bogus CPUs yet, but may have to be done
393	 * ultimately
394	 */
395
396	/* restore interrupts */
397	mtmsrd	r4,1
398	blr
399
400
401_GLOBAL(scom970_write)
402	/* interrupts off */
403	mfmsr	r5
404	ori	r0,r5,MSR_EE
405	xori	r0,r0,MSR_EE
406	mtmsrd	r0,1
407
408	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
409	 * (including parity). On current CPUs they must be 0'd.
410	 */
411
412	rlwinm	r3,r3,8,0,15
413
414	sync
415	mtspr	SPRN_SCOMD,r4      /* write data */
416	isync
417	mtspr	SPRN_SCOMC,r3      /* write command */
418	isync
419	mfspr	3,SPRN_SCOMC
420	isync
421
422	/* restore interrupts */
423	mtmsrd	r5,1
424	blr
425#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
426
427/* kexec_wait(phys_cpu)
428 *
429 * wait for the flag to change, indicating this kernel is going away but
430 * the slave code for the next one is at addresses 0 to 100.
431 *
432 * This is used by all slaves, even those that did not find a matching
433 * paca in the secondary startup code.
434 *
435 * Physical (hardware) cpu id should be in r3.
436 */
437_GLOBAL(kexec_wait)
438	bl	1f
4391:	mflr	r5
440	addi	r5,r5,kexec_flag-1b
441
44299:	HMT_LOW
443#ifdef CONFIG_KEXEC_CORE	/* use no memory without kexec */
444	lwz	r4,0(r5)
445	cmpwi	0,r4,0
446	beq	99b
447#ifdef CONFIG_PPC_BOOK3S_64
448	li	r10,0x60
449	mfmsr	r11
450	clrrdi	r11,r11,1	/* Clear MSR_LE */
451	mtsrr0	r10
452	mtsrr1	r11
453	rfid
454#else
455	/* Create TLB entry in book3e_secondary_core_init */
456	li	r4,0
457	ba	0x60
458#endif
459#endif
460
461/* this can be in text because we won't change it until we are
462 * running in real anyways
463 */
464kexec_flag:
465	.long	0
466
467
468#ifdef CONFIG_KEXEC_CORE
469#ifdef CONFIG_PPC_BOOK3E
470/*
471 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
472 * for a core to identity map v:0 to p:0.  This current implementation
473 * assumes that 1G is enough for kexec.
474 */
475kexec_create_tlb:
476	/*
477	 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
478	 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
479	 */
480	PPC_TLBILX_ALL(0,R0)
481	sync
482	isync
483
484	mfspr	r10,SPRN_TLB1CFG
485	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
486	subi	r10,r10,1	/* Last entry: no conflict with kernel text */
487	lis	r9,MAS0_TLBSEL(1)@h
488	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
489
490/* Set up a temp identity mapping v:0 to p:0 and return to it. */
491#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
492#define M_IF_NEEDED	MAS2_M
493#else
494#define M_IF_NEEDED	0
495#endif
496	mtspr	SPRN_MAS0,r9
497
498	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
499	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
500	mtspr	SPRN_MAS1,r9
501
502	LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
503	mtspr	SPRN_MAS2,r9
504
505	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
506	mtspr	SPRN_MAS3,r9
507	li	r9,0
508	mtspr	SPRN_MAS7,r9
509
510	tlbwe
511	isync
512	blr
513#endif
514
515/* kexec_smp_wait(void)
516 *
517 * call with interrupts off
518 * note: this is a terminal routine, it does not save lr
519 *
520 * get phys id from paca
521 * switch to real mode
522 * mark the paca as no longer used
523 * join other cpus in kexec_wait(phys_id)
524 */
525_GLOBAL(kexec_smp_wait)
526	lhz	r3,PACAHWCPUID(r13)
527	bl	real_mode
528
529	li	r4,KEXEC_STATE_REAL_MODE
530	stb	r4,PACAKEXECSTATE(r13)
531	SYNC
532
533	b	kexec_wait
534
535/*
536 * switch to real mode (turn mmu off)
537 * we use the early kernel trick that the hardware ignores bits
538 * 0 and 1 (big endian) of the effective address in real mode
539 *
540 * don't overwrite r3 here, it is live for kexec_wait above.
541 */
542real_mode:	/* assume normal blr return */
543#ifdef CONFIG_PPC_BOOK3E
544	/* Create an identity mapping. */
545	b	kexec_create_tlb
546#else
5471:	li	r9,MSR_RI
548	li	r10,MSR_DR|MSR_IR
549	mflr	r11		/* return address to SRR0 */
550	mfmsr	r12
551	andc	r9,r12,r9
552	andc	r10,r12,r10
553
554	mtmsrd	r9,1
555	mtspr	SPRN_SRR1,r10
556	mtspr	SPRN_SRR0,r11
557	rfid
558#endif
559
560/*
561 * kexec_sequence(newstack, start, image, control, clear_all(),
562	          copy_with_mmu_off)
563 *
564 * does the grungy work with stack switching and real mode switches
565 * also does simple calls to other code
566 */
567
568_GLOBAL(kexec_sequence)
569	mflr	r0
570	std	r0,16(r1)
571
572	/* switch stacks to newstack -- &kexec_stack.stack */
573	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
574	mr	r1,r3
575
576	li	r0,0
577	std	r0,16(r1)
578
579BEGIN_FTR_SECTION
580	/*
581	 * This is the best time to turn AMR/IAMR off.
582	 * key 0 is used in radix for supervisor<->user
583	 * protection, but on hash key 0 is reserved
584	 * ideally we want to enter with a clean state.
585	 * NOTE, we rely on r0 being 0 from above.
586	 */
587	mtspr	SPRN_IAMR,r0
588BEGIN_FTR_SECTION_NESTED(42)
589	mtspr	SPRN_AMOR,r0
590END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
591END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
592
593	/* save regs for local vars on new stack.
594	 * yes, we won't go back, but ...
595	 */
596	std	r31,-8(r1)
597	std	r30,-16(r1)
598	std	r29,-24(r1)
599	std	r28,-32(r1)
600	std	r27,-40(r1)
601	std	r26,-48(r1)
602	std	r25,-56(r1)
603
604	stdu	r1,-STACK_FRAME_OVERHEAD-64(r1)
605
606	/* save args into preserved regs */
607	mr	r31,r3			/* newstack (both) */
608	mr	r30,r4			/* start (real) */
609	mr	r29,r5			/* image (virt) */
610	mr	r28,r6			/* control, unused */
611	mr	r27,r7			/* clear_all() fn desc */
612	mr	r26,r8			/* copy_with_mmu_off */
613	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
614
615	/* disable interrupts, we are overwriting kernel data next */
616#ifdef CONFIG_PPC_BOOK3E
617	wrteei	0
618#else
619	mfmsr	r3
620	rlwinm	r3,r3,0,17,15
621	mtmsrd	r3,1
622#endif
623
624	/* We need to turn the MMU off unless we are in hash mode
625	 * under a hypervisor
626	 */
627	cmpdi	r26,0
628	beq	1f
629	bl	real_mode
6301:
631	/* copy dest pages, flush whole dest image */
632	mr	r3,r29
633	bl	kexec_copy_flush	/* (image) */
634
635	/* turn off mmu now if not done earlier */
636	cmpdi	r26,0
637	bne	1f
638	bl	real_mode
639
640	/* copy  0x100 bytes starting at start to 0 */
6411:	li	r3,0
642	mr	r4,r30		/* start, aka phys mem offset */
643	li	r5,0x100
644	li	r6,0
645	bl	copy_and_flush	/* (dest, src, copy limit, start offset) */
6461:	/* assume normal blr return */
647
648	/* release other cpus to the new kernel secondary start at 0x60 */
649	mflr	r5
650	li	r6,1
651	stw	r6,kexec_flag-1b(5)
652
653	cmpdi	r27,0
654	beq	1f
655
656	/* clear out hardware hash page table and tlb */
657#ifdef PPC64_ELF_ABI_v1
658	ld	r12,0(r27)		/* deref function descriptor */
659#else
660	mr	r12,r27
661#endif
662	mtctr	r12
663	bctrl				/* mmu_hash_ops.hpte_clear_all(void); */
664
665/*
666 *   kexec image calling is:
667 *      the first 0x100 bytes of the entry point are copied to 0
668 *
669 *      all slaves branch to slave = 0x60 (absolute)
670 *              slave(phys_cpu_id);
671 *
672 *      master goes to start = entry point
673 *              start(phys_cpu_id, start, 0);
674 *
675 *
676 *   a wrapper is needed to call existing kernels, here is an approximate
677 *   description of one method:
678 *
679 * v2: (2.6.10)
680 *   start will be near the boot_block (maybe 0x100 bytes before it?)
681 *   it will have a 0x60, which will b to boot_block, where it will wait
682 *   and 0 will store phys into struct boot-block and load r3 from there,
683 *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
684 *
685 * v1: (2.6.9)
686 *    boot block will have all cpus scanning device tree to see if they
687 *    are the boot cpu ?????
688 *    other device tree differences (prop sizes, va vs pa, etc)...
689 */
6901:	mr	r3,r25	# my phys cpu
691	mr	r4,r30	# start, aka phys mem offset
692	mtlr	4
693	li	r5,0
694	blr	/* image->start(physid, image->start, 0); */
695#endif /* CONFIG_KEXEC_CORE */
v4.10.11
  1/*
  2 * This file contains miscellaneous low-level functions.
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *
  5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6 * and Paul Mackerras.
  7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
  8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
  9 *
 10 * This program is free software; you can redistribute it and/or
 11 * modify it under the terms of the GNU General Public License
 12 * as published by the Free Software Foundation; either version
 13 * 2 of the License, or (at your option) any later version.
 14 *
 15 */
 16
 17#include <linux/sys.h>
 18#include <asm/unistd.h>
 19#include <asm/errno.h>
 20#include <asm/processor.h>
 21#include <asm/page.h>
 22#include <asm/cache.h>
 23#include <asm/ppc_asm.h>
 24#include <asm/asm-offsets.h>
 25#include <asm/cputable.h>
 26#include <asm/thread_info.h>
 27#include <asm/kexec.h>
 28#include <asm/ptrace.h>
 29#include <asm/mmu.h>
 30#include <asm/export.h>
 31
 32	.text
 33
 34_GLOBAL(call_do_softirq)
 35	mflr	r0
 36	std	r0,16(r1)
 37	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
 38	mr	r1,r3
 39	bl	__do_softirq
 40	ld	r1,0(r1)
 41	ld	r0,16(r1)
 42	mtlr	r0
 43	blr
 44
 45_GLOBAL(call_do_irq)
 46	mflr	r0
 47	std	r0,16(r1)
 48	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
 49	mr	r1,r4
 50	bl	__do_irq
 51	ld	r1,0(r1)
 52	ld	r0,16(r1)
 53	mtlr	r0
 54	blr
 55
 56	.section	".toc","aw"
 57PPC64_CACHES:
 58	.tc		ppc64_caches[TC],ppc64_caches
 59	.section	".text"
 60
 61/*
 62 * Write any modified data cache blocks out to memory
 63 * and invalidate the corresponding instruction cache blocks.
 64 *
 65 * flush_icache_range(unsigned long start, unsigned long stop)
 66 *
 67 *   flush all bytes from start through stop-1 inclusive
 68 */
 69
 70_GLOBAL_TOC(flush_icache_range)
 71BEGIN_FTR_SECTION
 72	PURGE_PREFETCHED_INS
 73	blr
 74END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
 75/*
 76 * Flush the data cache to memory 
 77 * 
 78 * Different systems have different cache line sizes
 79 * and in some cases i-cache and d-cache line sizes differ from
 80 * each other.
 81 */
 82 	ld	r10,PPC64_CACHES@toc(r2)
 83	lwz	r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
 84	addi	r5,r7,-1
 85	andc	r6,r3,r5		/* round low to line bdy */
 86	subf	r8,r6,r4		/* compute length */
 87	add	r8,r8,r5		/* ensure we get enough */
 88	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of cache line size */
 89	srw.	r8,r8,r9		/* compute line count */
 90	beqlr				/* nothing to do? */
 91	mtctr	r8
 921:	dcbst	0,r6
 93	add	r6,r6,r7
 94	bdnz	1b
 95	sync
 96
 97/* Now invalidate the instruction cache */
 98	
 99	lwz	r7,ICACHEL1LINESIZE(r10)	/* Get Icache line size */
100	addi	r5,r7,-1
101	andc	r6,r3,r5		/* round low to line bdy */
102	subf	r8,r6,r4		/* compute length */
103	add	r8,r8,r5
104	lwz	r9,ICACHEL1LOGLINESIZE(r10)	/* Get log-2 of Icache line size */
105	srw.	r8,r8,r9		/* compute line count */
106	beqlr				/* nothing to do? */
107	mtctr	r8
1082:	icbi	0,r6
109	add	r6,r6,r7
110	bdnz	2b
111	isync
112	blr
113_ASM_NOKPROBE_SYMBOL(flush_icache_range)
114EXPORT_SYMBOL(flush_icache_range)
115
116/*
117 * Like above, but only do the D-cache.
118 *
119 * flush_dcache_range(unsigned long start, unsigned long stop)
120 *
121 *    flush all bytes from start to stop-1 inclusive
122 */
123_GLOBAL_TOC(flush_dcache_range)
124
125/*
126 * Flush the data cache to memory 
127 * 
128 * Different systems have different cache line sizes
129 */
130 	ld	r10,PPC64_CACHES@toc(r2)
131	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
132	addi	r5,r7,-1
133	andc	r6,r3,r5		/* round low to line bdy */
134	subf	r8,r6,r4		/* compute length */
135	add	r8,r8,r5		/* ensure we get enough */
136	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
137	srw.	r8,r8,r9		/* compute line count */
138	beqlr				/* nothing to do? */
139	mtctr	r8
1400:	dcbst	0,r6
141	add	r6,r6,r7
142	bdnz	0b
143	sync
144	blr
145EXPORT_SYMBOL(flush_dcache_range)
146
147/*
148 * Like above, but works on non-mapped physical addresses.
149 * Use only for non-LPAR setups ! It also assumes real mode
150 * is cacheable. Used for flushing out the DART before using
151 * it as uncacheable memory 
152 *
153 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
154 *
155 *    flush all bytes from start to stop-1 inclusive
156 */
157_GLOBAL(flush_dcache_phys_range)
158 	ld	r10,PPC64_CACHES@toc(r2)
159	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
160	addi	r5,r7,-1
161	andc	r6,r3,r5		/* round low to line bdy */
162	subf	r8,r6,r4		/* compute length */
163	add	r8,r8,r5		/* ensure we get enough */
164	lwz	r9,DCACHEL1LOGLINESIZE(r10)	/* Get log-2 of dcache line size */
165	srw.	r8,r8,r9		/* compute line count */
166	beqlr				/* nothing to do? */
167	mfmsr	r5			/* Disable MMU Data Relocation */
168	ori	r0,r5,MSR_DR
169	xori	r0,r0,MSR_DR
170	sync
171	mtmsr	r0
172	sync
173	isync
174	mtctr	r8
1750:	dcbst	0,r6
176	add	r6,r6,r7
177	bdnz	0b
178	sync
179	isync
180	mtmsr	r5			/* Re-enable MMU Data Relocation */
181	sync
182	isync
183	blr
184
185_GLOBAL(flush_inval_dcache_range)
186 	ld	r10,PPC64_CACHES@toc(r2)
187	lwz	r7,DCACHEL1LINESIZE(r10)	/* Get dcache line size */
188	addi	r5,r7,-1
189	andc	r6,r3,r5		/* round low to line bdy */
190	subf	r8,r6,r4		/* compute length */
191	add	r8,r8,r5		/* ensure we get enough */
192	lwz	r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
193	srw.	r8,r8,r9		/* compute line count */
194	beqlr				/* nothing to do? */
195	sync
196	isync
197	mtctr	r8
1980:	dcbf	0,r6
199	add	r6,r6,r7
200	bdnz	0b
201	sync
202	isync
203	blr
204
205
206/*
207 * Flush a particular page from the data cache to RAM.
208 * Note: this is necessary because the instruction cache does *not*
209 * snoop from the data cache.
210 *
211 *	void __flush_dcache_icache(void *page)
212 */
213_GLOBAL(__flush_dcache_icache)
214/*
215 * Flush the data cache to memory 
216 * 
217 * Different systems have different cache line sizes
218 */
219
220BEGIN_FTR_SECTION
221	PURGE_PREFETCHED_INS
222	blr
223END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
224
225/* Flush the dcache */
226 	ld	r7,PPC64_CACHES@toc(r2)
227	clrrdi	r3,r3,PAGE_SHIFT           	    /* Page align */
228	lwz	r4,DCACHEL1LINESPERPAGE(r7)	/* Get # dcache lines per page */
229	lwz	r5,DCACHEL1LINESIZE(r7)		/* Get dcache line size */
230	mr	r6,r3
231	mtctr	r4
2320:	dcbst	0,r6
233	add	r6,r6,r5
234	bdnz	0b
235	sync
236
237/* Now invalidate the icache */	
238
239	lwz	r4,ICACHEL1LINESPERPAGE(r7)	/* Get # icache lines per page */
240	lwz	r5,ICACHEL1LINESIZE(r7)		/* Get icache line size */
241	mtctr	r4
2421:	icbi	0,r3
243	add	r3,r3,r5
244	bdnz	1b
245	isync
246	blr
247
248_GLOBAL(__bswapdi2)
249EXPORT_SYMBOL(__bswapdi2)
250	srdi	r8,r3,32
251	rlwinm	r7,r3,8,0xffffffff
252	rlwimi	r7,r3,24,0,7
253	rlwinm	r9,r8,8,0xffffffff
254	rlwimi	r7,r3,24,16,23
255	rlwimi	r9,r8,24,0,7
256	rlwimi	r9,r8,24,16,23
257	sldi	r7,r7,32
258	or	r3,r7,r9
259	blr
260
261
262#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
263_GLOBAL(rmci_on)
264	sync
265	isync
266	li	r3,0x100
267	rldicl	r3,r3,32,0
268	mfspr	r5,SPRN_HID4
269	or	r5,r5,r3
270	sync
271	mtspr	SPRN_HID4,r5
272	isync
273	slbia
274	isync
275	sync
276	blr
277
278_GLOBAL(rmci_off)
279	sync
280	isync
281	li	r3,0x100
282	rldicl	r3,r3,32,0
283	mfspr	r5,SPRN_HID4
284	andc	r5,r5,r3
285	sync
286	mtspr	SPRN_HID4,r5
287	isync
288	slbia
289	isync
290	sync
291	blr
292#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
293
294#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
295
296/*
297 * Do an IO access in real mode
298 */
299_GLOBAL(real_readb)
300	mfmsr	r7
301	ori	r0,r7,MSR_DR
302	xori	r0,r0,MSR_DR
303	sync
304	mtmsrd	r0
305	sync
306	isync
307	mfspr	r6,SPRN_HID4
308	rldicl	r5,r6,32,0
309	ori	r5,r5,0x100
310	rldicl	r5,r5,32,0
311	sync
312	mtspr	SPRN_HID4,r5
313	isync
314	slbia
315	isync
316	lbz	r3,0(r3)
317	sync
318	mtspr	SPRN_HID4,r6
319	isync
320	slbia
321	isync
322	mtmsrd	r7
323	sync
324	isync
325	blr
326
327	/*
328 * Do an IO access in real mode
329 */
330_GLOBAL(real_writeb)
331	mfmsr	r7
332	ori	r0,r7,MSR_DR
333	xori	r0,r0,MSR_DR
334	sync
335	mtmsrd	r0
336	sync
337	isync
338	mfspr	r6,SPRN_HID4
339	rldicl	r5,r6,32,0
340	ori	r5,r5,0x100
341	rldicl	r5,r5,32,0
342	sync
343	mtspr	SPRN_HID4,r5
344	isync
345	slbia
346	isync
347	stb	r3,0(r4)
348	sync
349	mtspr	SPRN_HID4,r6
350	isync
351	slbia
352	isync
353	mtmsrd	r7
354	sync
355	isync
356	blr
357#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
358
359#ifdef CONFIG_PPC_PASEMI
360
361_GLOBAL(real_205_readb)
362	mfmsr	r7
363	ori	r0,r7,MSR_DR
364	xori	r0,r0,MSR_DR
365	sync
366	mtmsrd	r0
367	sync
368	isync
369	LBZCIX(R3,R0,R3)
370	isync
371	mtmsrd	r7
372	sync
373	isync
374	blr
375
376_GLOBAL(real_205_writeb)
377	mfmsr	r7
378	ori	r0,r7,MSR_DR
379	xori	r0,r0,MSR_DR
380	sync
381	mtmsrd	r0
382	sync
383	isync
384	STBCIX(R3,R0,R4)
385	isync
386	mtmsrd	r7
387	sync
388	isync
389	blr
390
391#endif /* CONFIG_PPC_PASEMI */
392
393
394#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
395/*
396 * SCOM access functions for 970 (FX only for now)
397 *
398 * unsigned long scom970_read(unsigned int address);
399 * void scom970_write(unsigned int address, unsigned long value);
400 *
401 * The address passed in is the 24 bits register address. This code
402 * is 970 specific and will not check the status bits, so you should
403 * know what you are doing.
404 */
405_GLOBAL(scom970_read)
406	/* interrupts off */
407	mfmsr	r4
408	ori	r0,r4,MSR_EE
409	xori	r0,r0,MSR_EE
410	mtmsrd	r0,1
411
412	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
413	 * (including parity). On current CPUs they must be 0'd,
414	 * and finally or in RW bit
415	 */
416	rlwinm	r3,r3,8,0,15
417	ori	r3,r3,0x8000
418
419	/* do the actual scom read */
420	sync
421	mtspr	SPRN_SCOMC,r3
422	isync
423	mfspr	r3,SPRN_SCOMD
424	isync
425	mfspr	r0,SPRN_SCOMC
426	isync
427
428	/* XXX:	fixup result on some buggy 970's (ouch ! we lost a bit, bah
429	 * that's the best we can do). Not implemented yet as we don't use
430	 * the scom on any of the bogus CPUs yet, but may have to be done
431	 * ultimately
432	 */
433
434	/* restore interrupts */
435	mtmsrd	r4,1
436	blr
437
438
439_GLOBAL(scom970_write)
440	/* interrupts off */
441	mfmsr	r5
442	ori	r0,r5,MSR_EE
443	xori	r0,r0,MSR_EE
444	mtmsrd	r0,1
445
446	/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
447	 * (including parity). On current CPUs they must be 0'd.
448	 */
449
450	rlwinm	r3,r3,8,0,15
451
452	sync
453	mtspr	SPRN_SCOMD,r4      /* write data */
454	isync
455	mtspr	SPRN_SCOMC,r3      /* write command */
456	isync
457	mfspr	3,SPRN_SCOMC
458	isync
459
460	/* restore interrupts */
461	mtmsrd	r5,1
462	blr
463#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
464
465/* kexec_wait(phys_cpu)
466 *
467 * wait for the flag to change, indicating this kernel is going away but
468 * the slave code for the next one is at addresses 0 to 100.
469 *
470 * This is used by all slaves, even those that did not find a matching
471 * paca in the secondary startup code.
472 *
473 * Physical (hardware) cpu id should be in r3.
474 */
475_GLOBAL(kexec_wait)
476	bl	1f
4771:	mflr	r5
478	addi	r5,r5,kexec_flag-1b
479
48099:	HMT_LOW
481#ifdef CONFIG_KEXEC_CORE	/* use no memory without kexec */
482	lwz	r4,0(r5)
483	cmpwi	0,r4,0
484	beq	99b
485#ifdef CONFIG_PPC_BOOK3S_64
486	li	r10,0x60
487	mfmsr	r11
488	clrrdi	r11,r11,1	/* Clear MSR_LE */
489	mtsrr0	r10
490	mtsrr1	r11
491	rfid
492#else
493	/* Create TLB entry in book3e_secondary_core_init */
494	li	r4,0
495	ba	0x60
496#endif
497#endif
498
499/* this can be in text because we won't change it until we are
500 * running in real anyways
501 */
502kexec_flag:
503	.long	0
504
505
506#ifdef CONFIG_KEXEC_CORE
507#ifdef CONFIG_PPC_BOOK3E
508/*
509 * BOOK3E has no real MMU mode, so we have to setup the initial TLB
510 * for a core to identity map v:0 to p:0.  This current implementation
511 * assumes that 1G is enough for kexec.
512 */
513kexec_create_tlb:
514	/*
515	 * Invalidate all non-IPROT TLB entries to avoid any TLB conflict.
516	 * IPROT TLB entries should be >= PAGE_OFFSET and thus not conflict.
517	 */
518	PPC_TLBILX_ALL(0,R0)
519	sync
520	isync
521
522	mfspr	r10,SPRN_TLB1CFG
523	andi.	r10,r10,TLBnCFG_N_ENTRY	/* Extract # entries */
524	subi	r10,r10,1	/* Last entry: no conflict with kernel text */
525	lis	r9,MAS0_TLBSEL(1)@h
526	rlwimi	r9,r10,16,4,15		/* Setup MAS0 = TLBSEL | ESEL(r9) */
527
528/* Set up a temp identity mapping v:0 to p:0 and return to it. */
529#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC)
530#define M_IF_NEEDED	MAS2_M
531#else
532#define M_IF_NEEDED	0
533#endif
534	mtspr	SPRN_MAS0,r9
535
536	lis	r9,(MAS1_VALID|MAS1_IPROT)@h
537	ori	r9,r9,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
538	mtspr	SPRN_MAS1,r9
539
540	LOAD_REG_IMMEDIATE(r9, 0x0 | M_IF_NEEDED)
541	mtspr	SPRN_MAS2,r9
542
543	LOAD_REG_IMMEDIATE(r9, 0x0 | MAS3_SR | MAS3_SW | MAS3_SX)
544	mtspr	SPRN_MAS3,r9
545	li	r9,0
546	mtspr	SPRN_MAS7,r9
547
548	tlbwe
549	isync
550	blr
551#endif
552
553/* kexec_smp_wait(void)
554 *
555 * call with interrupts off
556 * note: this is a terminal routine, it does not save lr
557 *
558 * get phys id from paca
559 * switch to real mode
560 * mark the paca as no longer used
561 * join other cpus in kexec_wait(phys_id)
562 */
563_GLOBAL(kexec_smp_wait)
564	lhz	r3,PACAHWCPUID(r13)
565	bl	real_mode
566
567	li	r4,KEXEC_STATE_REAL_MODE
568	stb	r4,PACAKEXECSTATE(r13)
569	SYNC
570
571	b	kexec_wait
572
573/*
574 * switch to real mode (turn mmu off)
575 * we use the early kernel trick that the hardware ignores bits
576 * 0 and 1 (big endian) of the effective address in real mode
577 *
578 * don't overwrite r3 here, it is live for kexec_wait above.
579 */
580real_mode:	/* assume normal blr return */
581#ifdef CONFIG_PPC_BOOK3E
582	/* Create an identity mapping. */
583	b	kexec_create_tlb
584#else
5851:	li	r9,MSR_RI
586	li	r10,MSR_DR|MSR_IR
587	mflr	r11		/* return address to SRR0 */
588	mfmsr	r12
589	andc	r9,r12,r9
590	andc	r10,r12,r10
591
592	mtmsrd	r9,1
593	mtspr	SPRN_SRR1,r10
594	mtspr	SPRN_SRR0,r11
595	rfid
596#endif
597
598/*
599 * kexec_sequence(newstack, start, image, control, clear_all(),
600	          copy_with_mmu_off)
601 *
602 * does the grungy work with stack switching and real mode switches
603 * also does simple calls to other code
604 */
605
606_GLOBAL(kexec_sequence)
607	mflr	r0
608	std	r0,16(r1)
609
610	/* switch stacks to newstack -- &kexec_stack.stack */
611	stdu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
612	mr	r1,r3
613
614	li	r0,0
615	std	r0,16(r1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
616
617	/* save regs for local vars on new stack.
618	 * yes, we won't go back, but ...
619	 */
620	std	r31,-8(r1)
621	std	r30,-16(r1)
622	std	r29,-24(r1)
623	std	r28,-32(r1)
624	std	r27,-40(r1)
625	std	r26,-48(r1)
626	std	r25,-56(r1)
627
628	stdu	r1,-STACK_FRAME_OVERHEAD-64(r1)
629
630	/* save args into preserved regs */
631	mr	r31,r3			/* newstack (both) */
632	mr	r30,r4			/* start (real) */
633	mr	r29,r5			/* image (virt) */
634	mr	r28,r6			/* control, unused */
635	mr	r27,r7			/* clear_all() fn desc */
636	mr	r26,r8			/* copy_with_mmu_off */
637	lhz	r25,PACAHWCPUID(r13)	/* get our phys cpu from paca */
638
639	/* disable interrupts, we are overwriting kernel data next */
640#ifdef CONFIG_PPC_BOOK3E
641	wrteei	0
642#else
643	mfmsr	r3
644	rlwinm	r3,r3,0,17,15
645	mtmsrd	r3,1
646#endif
647
648	/* We need to turn the MMU off unless we are in hash mode
649	 * under a hypervisor
650	 */
651	cmpdi	r26,0
652	beq	1f
653	bl	real_mode
6541:
655	/* copy dest pages, flush whole dest image */
656	mr	r3,r29
657	bl	kexec_copy_flush	/* (image) */
658
659	/* turn off mmu now if not done earlier */
660	cmpdi	r26,0
661	bne	1f
662	bl	real_mode
663
664	/* copy  0x100 bytes starting at start to 0 */
6651:	li	r3,0
666	mr	r4,r30		/* start, aka phys mem offset */
667	li	r5,0x100
668	li	r6,0
669	bl	copy_and_flush	/* (dest, src, copy limit, start offset) */
6701:	/* assume normal blr return */
671
672	/* release other cpus to the new kernel secondary start at 0x60 */
673	mflr	r5
674	li	r6,1
675	stw	r6,kexec_flag-1b(5)
676
677	cmpdi	r27,0
678	beq	1f
679
680	/* clear out hardware hash page table and tlb */
681#ifdef PPC64_ELF_ABI_v1
682	ld	r12,0(r27)		/* deref function descriptor */
683#else
684	mr	r12,r27
685#endif
686	mtctr	r12
687	bctrl				/* mmu_hash_ops.hpte_clear_all(void); */
688
689/*
690 *   kexec image calling is:
691 *      the first 0x100 bytes of the entry point are copied to 0
692 *
693 *      all slaves branch to slave = 0x60 (absolute)
694 *              slave(phys_cpu_id);
695 *
696 *      master goes to start = entry point
697 *              start(phys_cpu_id, start, 0);
698 *
699 *
700 *   a wrapper is needed to call existing kernels, here is an approximate
701 *   description of one method:
702 *
703 * v2: (2.6.10)
704 *   start will be near the boot_block (maybe 0x100 bytes before it?)
705 *   it will have a 0x60, which will b to boot_block, where it will wait
706 *   and 0 will store phys into struct boot-block and load r3 from there,
707 *   copy kernel 0-0x100 and tell slaves to back down to 0x60 again
708 *
709 * v1: (2.6.9)
710 *    boot block will have all cpus scanning device tree to see if they
711 *    are the boot cpu ?????
712 *    other device tree differences (prop sizes, va vs pa, etc)...
713 */
7141:	mr	r3,r25	# my phys cpu
715	mr	r4,r30	# start, aka phys mem offset
716	mtlr	4
717	li	r5,0
718	blr	/* image->start(physid, image->start, 0); */
719#endif /* CONFIG_KEXEC_CORE */