Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * This file contains miscellaneous low-level functions.
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *
  6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  7 * and Paul Mackerras.
  8 *
 
 
 
 
 
  9 */
 10
 11#include <linux/sys.h>
 12#include <asm/unistd.h>
 13#include <asm/errno.h>
 14#include <asm/reg.h>
 15#include <asm/page.h>
 16#include <asm/cache.h>
 17#include <asm/cputable.h>
 18#include <asm/mmu.h>
 19#include <asm/ppc_asm.h>
 20#include <asm/thread_info.h>
 21#include <asm/asm-offsets.h>
 22#include <asm/processor.h>
 
 23#include <asm/bug.h>
 24#include <asm/ptrace.h>
 25#include <asm/export.h>
 26#include <asm/feature-fixups.h>
 27
 28	.text
 29
 30/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31 * This returns the high 64 bits of the product of two 64-bit numbers.
 32 */
 33_GLOBAL(mulhdu)
 34	cmpwi	r6,0
 35	cmpwi	cr1,r3,0
 36	mr	r10,r4
 37	mulhwu	r4,r4,r5
 38	beq	1f
 39	mulhwu	r0,r10,r6
 40	mullw	r7,r10,r5
 41	addc	r7,r0,r7
 42	addze	r4,r4
 431:	beqlr	cr1		/* all done if high part of A is 0 */
 44	mullw	r9,r3,r5
 45	mulhwu	r10,r3,r5
 46	beq	2f
 47	mullw	r0,r3,r6
 48	mulhwu	r8,r3,r6
 49	addc	r7,r0,r7
 50	adde	r4,r4,r8
 51	addze	r10,r10
 522:	addc	r4,r4,r9
 53	addze	r3,r10
 54	blr
 55
 56/*
 57 * reloc_got2 runs through the .got2 section adding an offset
 58 * to each entry.
 59 */
 60_GLOBAL(reloc_got2)
 61	mflr	r11
 62	lis	r7,__got2_start@ha
 63	addi	r7,r7,__got2_start@l
 64	lis	r8,__got2_end@ha
 65	addi	r8,r8,__got2_end@l
 66	subf	r8,r7,r8
 67	srwi.	r8,r8,2
 68	beqlr
 69	mtctr	r8
 70	bl	1f
 711:	mflr	r0
 72	lis	r4,1b@ha
 73	addi	r4,r4,1b@l
 74	subf	r0,r4,r0
 75	add	r7,r0,r7
 762:	lwz	r0,0(r7)
 77	add	r0,r0,r3
 78	stw	r0,0(r7)
 79	addi	r7,r7,4
 80	bdnz	2b
 81	mtlr	r11
 82	blr
 83
 84/*
 85 * call_setup_cpu - call the setup_cpu function for this cpu
 86 * r3 = data offset, r24 = cpu number
 87 *
 88 * Setup function is called with:
 89 *   r3 = data offset
 90 *   r4 = ptr to CPU spec (relocated)
 91 */
 92_GLOBAL(call_setup_cpu)
 93	addis	r4,r3,cur_cpu_spec@ha
 94	addi	r4,r4,cur_cpu_spec@l
 95	lwz	r4,0(r4)
 96	add	r4,r4,r3
 97	lwz	r5,CPU_SPEC_SETUP(r4)
 98	cmpwi	0,r5,0
 99	add	r5,r5,r3
100	beqlr
101	mtctr	r5
102	bctr
103
104#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
105
106/* This gets called by via-pmu.c to switch the PLL selection
107 * on 750fx CPU. This function should really be moved to some
108 * other place (as most of the cpufreq code in via-pmu
109 */
110_GLOBAL(low_choose_750fx_pll)
111	/* Clear MSR:EE */
112	mfmsr	r7
113	rlwinm	r0,r7,0,17,15
114	mtmsr	r0
115
116	/* If switching to PLL1, disable HID0:BTIC */
117	cmplwi	cr0,r3,0
118	beq	1f
119	mfspr	r5,SPRN_HID0
120	rlwinm	r5,r5,0,27,25
121	sync
122	mtspr	SPRN_HID0,r5
123	isync
124	sync
125
1261:
127	/* Calc new HID1 value */
128	mfspr	r4,SPRN_HID1	/* Build a HID1:PS bit from parameter */
129	rlwinm	r5,r3,16,15,15	/* Clear out HID1:PS from value read */
130	rlwinm	r4,r4,0,16,14	/* Could have I used rlwimi here ? */
131	or	r4,r4,r5
132	mtspr	SPRN_HID1,r4
133
134#ifdef CONFIG_SMP
135	/* Store new HID1 image */
136	lwz	r6,TASK_CPU(r2)
137	slwi	r6,r6,2
138#else
139	li	r6, 0
140#endif
141	addis	r6,r6,nap_save_hid1@ha
142	stw	r4,nap_save_hid1@l(r6)
143
144	/* If switching to PLL0, enable HID0:BTIC */
145	cmplwi	cr0,r3,0
146	bne	1f
147	mfspr	r5,SPRN_HID0
148	ori	r5,r5,HID0_BTIC
149	sync
150	mtspr	SPRN_HID0,r5
151	isync
152	sync
153
1541:
155	/* Return */
156	mtmsr	r7
157	blr
158
159_GLOBAL(low_choose_7447a_dfs)
160	/* Clear MSR:EE */
161	mfmsr	r7
162	rlwinm	r0,r7,0,17,15
163	mtmsr	r0
164	
165	/* Calc new HID1 value */
166	mfspr	r4,SPRN_HID1
167	insrwi	r4,r3,1,9	/* insert parameter into bit 9 */
168	sync
169	mtspr	SPRN_HID1,r4
170	sync
171	isync
172
173	/* Return */
174	mtmsr	r7
175	blr
176
177#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
178
 
 
 
 
 
 
 
 
 
 
 
 
 
179#ifdef CONFIG_40x
180
181/*
182 * Do an IO access in real mode
183 */
184_GLOBAL(real_readb)
185	mfmsr	r7
186	rlwinm	r0,r7,0,~MSR_DR
187	sync
188	mtmsr	r0
189	sync
190	isync
191	lbz	r3,0(r3)
192	sync
193	mtmsr	r7
194	sync
195	isync
196	blr
197_ASM_NOKPROBE_SYMBOL(real_readb)
198
199	/*
200 * Do an IO access in real mode
201 */
202_GLOBAL(real_writeb)
203	mfmsr	r7
204	rlwinm	r0,r7,0,~MSR_DR
205	sync
206	mtmsr	r0
207	sync
208	isync
209	stb	r3,0(r4)
210	sync
211	mtmsr	r7
212	sync
213	isync
214	blr
215_ASM_NOKPROBE_SYMBOL(real_writeb)
216
217#endif /* CONFIG_40x */
218
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219/*
220 * Copy a whole page.  We use the dcbz instruction on the destination
221 * to reduce memory traffic (it eliminates the unnecessary reads of
222 * the destination into cache).  This requires that the destination
223 * is cacheable.
224 */
225#define COPY_16_BYTES		\
226	lwz	r6,4(r4);	\
227	lwz	r7,8(r4);	\
228	lwz	r8,12(r4);	\
229	lwzu	r9,16(r4);	\
230	stw	r6,4(r3);	\
231	stw	r7,8(r3);	\
232	stw	r8,12(r3);	\
233	stwu	r9,16(r3)
234
235_GLOBAL(copy_page)
236	rlwinm	r5, r3, 0, L1_CACHE_BYTES - 1
237	addi	r3,r3,-4
238
2390:	twnei	r5, 0	/* WARN if r3 is not cache aligned */
240	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
241
242	addi	r4,r4,-4
243
244	li	r5,4
245
246#if MAX_COPY_PREFETCH > 1
247	li	r0,MAX_COPY_PREFETCH
248	li	r11,4
249	mtctr	r0
25011:	dcbt	r11,r4
251	addi	r11,r11,L1_CACHE_BYTES
252	bdnz	11b
253#else /* MAX_COPY_PREFETCH == 1 */
254	dcbt	r5,r4
255	li	r11,L1_CACHE_BYTES+4
256#endif /* MAX_COPY_PREFETCH */
257	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
258	crclr	4*cr0+eq
2592:
260	mtctr	r0
2611:
262	dcbt	r11,r4
263	dcbz	r5,r3
264	COPY_16_BYTES
265#if L1_CACHE_BYTES >= 32
266	COPY_16_BYTES
267#if L1_CACHE_BYTES >= 64
268	COPY_16_BYTES
269	COPY_16_BYTES
270#if L1_CACHE_BYTES >= 128
271	COPY_16_BYTES
272	COPY_16_BYTES
273	COPY_16_BYTES
274	COPY_16_BYTES
275#endif
276#endif
277#endif
278	bdnz	1b
279	beqlr
280	crnot	4*cr0+eq,4*cr0+eq
281	li	r0,MAX_COPY_PREFETCH
282	li	r11,4
283	b	2b
284EXPORT_SYMBOL(copy_page)
285
286/*
287 * Extended precision shifts.
288 *
289 * Updated to be valid for shift counts from 0 to 63 inclusive.
290 * -- Gabriel
291 *
292 * R3/R4 has 64 bit value
293 * R5    has shift count
294 * result in R3/R4
295 *
296 *  ashrdi3: arithmetic right shift (sign propagation)	
297 *  lshrdi3: logical right shift
298 *  ashldi3: left shift
299 */
300_GLOBAL(__ashrdi3)
301	subfic	r6,r5,32
302	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
303	addi	r7,r5,32	# could be xori, or addi with -32
304	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
305	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0
306	sraw	r7,r3,r7	# t2 = MSW >> (count-32)
307	or	r4,r4,r6	# LSW |= t1
308	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2
309	sraw	r3,r3,r5	# MSW = MSW >> count
310	or	r4,r4,r7	# LSW |= t2
311	blr
312EXPORT_SYMBOL(__ashrdi3)
313
314_GLOBAL(__ashldi3)
315	subfic	r6,r5,32
316	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count
317	addi	r7,r5,32	# could be xori, or addi with -32
318	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count)
319	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32)
320	or	r3,r3,r6	# MSW |= t1
321	slw	r4,r4,r5	# LSW = LSW << count
322	or	r3,r3,r7	# MSW |= t2
323	blr
324EXPORT_SYMBOL(__ashldi3)
325
326_GLOBAL(__lshrdi3)
327	subfic	r6,r5,32
328	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
329	addi	r7,r5,32	# could be xori, or addi with -32
330	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
331	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32)
332	or	r4,r4,r6	# LSW |= t1
333	srw	r3,r3,r5	# MSW = MSW >> count
334	or	r4,r4,r7	# LSW |= t2
335	blr
336EXPORT_SYMBOL(__lshrdi3)
337
338/*
339 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
340 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
341 */
342_GLOBAL(__cmpdi2)
343	cmpw	r3,r5
344	li	r3,1
345	bne	1f
346	cmplw	r4,r6
347	beqlr
3481:	li	r3,0
349	bltlr
350	li	r3,2
351	blr
352EXPORT_SYMBOL(__cmpdi2)
353/*
354 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
355 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
356 */
357_GLOBAL(__ucmpdi2)
358	cmplw	r3,r5
359	li	r3,1
360	bne	1f
361	cmplw	r4,r6
362	beqlr
3631:	li	r3,0
364	bltlr
365	li	r3,2
366	blr
367EXPORT_SYMBOL(__ucmpdi2)
368
369_GLOBAL(__bswapdi2)
370	rotlwi  r9,r4,8
371	rotlwi  r10,r3,8
372	rlwimi  r9,r4,24,0,7
373	rlwimi  r10,r3,24,0,7
374	rlwimi  r9,r4,24,16,23
375	rlwimi  r10,r3,24,16,23
376	mr      r3,r9
377	mr      r4,r10
378	blr
379EXPORT_SYMBOL(__bswapdi2)
380
381#ifdef CONFIG_SMP
382_GLOBAL(start_secondary_resume)
383	/* Reset stack */
384	rlwinm	r1, r1, 0, 0, 31 - THREAD_SHIFT
385	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
386	li	r3,0
387	stw	r3,0(r1)		/* Zero the stack frame pointer	*/
388	bl	start_secondary
389	b	.
390#endif /* CONFIG_SMP */
v5.4
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * This file contains miscellaneous low-level functions.
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *
   6 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
   7 * and Paul Mackerras.
   8 *
   9 * kexec bits:
  10 * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
  11 * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  12 * PPC44x port. Copyright (C) 2011,  IBM Corporation
  13 * 		Author: Suzuki Poulose <suzuki@in.ibm.com>
  14 */
  15
  16#include <linux/sys.h>
  17#include <asm/unistd.h>
  18#include <asm/errno.h>
  19#include <asm/reg.h>
  20#include <asm/page.h>
  21#include <asm/cache.h>
  22#include <asm/cputable.h>
  23#include <asm/mmu.h>
  24#include <asm/ppc_asm.h>
  25#include <asm/thread_info.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/processor.h>
  28#include <asm/kexec.h>
  29#include <asm/bug.h>
  30#include <asm/ptrace.h>
  31#include <asm/export.h>
  32#include <asm/feature-fixups.h>
  33
  34	.text
  35
  36/*
  37 * We store the saved ksp_limit in the unused part
  38 * of the STACK_FRAME_OVERHEAD
  39 */
  40_GLOBAL(call_do_softirq)
  41	mflr	r0
  42	stw	r0,4(r1)
  43	lwz	r10,THREAD+KSP_LIMIT(r2)
  44	stw	r3, THREAD+KSP_LIMIT(r2)
  45	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  46	mr	r1,r3
  47	stw	r10,8(r1)
  48	bl	__do_softirq
  49	lwz	r10,8(r1)
  50	lwz	r1,0(r1)
  51	lwz	r0,4(r1)
  52	stw	r10,THREAD+KSP_LIMIT(r2)
  53	mtlr	r0
  54	blr
  55
  56/*
  57 * void call_do_irq(struct pt_regs *regs, void *sp);
  58 */
  59_GLOBAL(call_do_irq)
  60	mflr	r0
  61	stw	r0,4(r1)
  62	lwz	r10,THREAD+KSP_LIMIT(r2)
  63	stw	r4, THREAD+KSP_LIMIT(r2)
  64	stwu	r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
  65	mr	r1,r4
  66	stw	r10,8(r1)
  67	bl	__do_irq
  68	lwz	r10,8(r1)
  69	lwz	r1,0(r1)
  70	lwz	r0,4(r1)
  71	stw	r10,THREAD+KSP_LIMIT(r2)
  72	mtlr	r0
  73	blr
  74
  75/*
  76 * This returns the high 64 bits of the product of two 64-bit numbers.
  77 */
  78_GLOBAL(mulhdu)
  79	cmpwi	r6,0
  80	cmpwi	cr1,r3,0
  81	mr	r10,r4
  82	mulhwu	r4,r4,r5
  83	beq	1f
  84	mulhwu	r0,r10,r6
  85	mullw	r7,r10,r5
  86	addc	r7,r0,r7
  87	addze	r4,r4
  881:	beqlr	cr1		/* all done if high part of A is 0 */
  89	mullw	r9,r3,r5
  90	mulhwu	r10,r3,r5
  91	beq	2f
  92	mullw	r0,r3,r6
  93	mulhwu	r8,r3,r6
  94	addc	r7,r0,r7
  95	adde	r4,r4,r8
  96	addze	r10,r10
  972:	addc	r4,r4,r9
  98	addze	r3,r10
  99	blr
 100
 101/*
 102 * reloc_got2 runs through the .got2 section adding an offset
 103 * to each entry.
 104 */
 105_GLOBAL(reloc_got2)
 106	mflr	r11
 107	lis	r7,__got2_start@ha
 108	addi	r7,r7,__got2_start@l
 109	lis	r8,__got2_end@ha
 110	addi	r8,r8,__got2_end@l
 111	subf	r8,r7,r8
 112	srwi.	r8,r8,2
 113	beqlr
 114	mtctr	r8
 115	bl	1f
 1161:	mflr	r0
 117	lis	r4,1b@ha
 118	addi	r4,r4,1b@l
 119	subf	r0,r4,r0
 120	add	r7,r0,r7
 1212:	lwz	r0,0(r7)
 122	add	r0,r0,r3
 123	stw	r0,0(r7)
 124	addi	r7,r7,4
 125	bdnz	2b
 126	mtlr	r11
 127	blr
 128
 129/*
 130 * call_setup_cpu - call the setup_cpu function for this cpu
 131 * r3 = data offset, r24 = cpu number
 132 *
 133 * Setup function is called with:
 134 *   r3 = data offset
 135 *   r4 = ptr to CPU spec (relocated)
 136 */
 137_GLOBAL(call_setup_cpu)
 138	addis	r4,r3,cur_cpu_spec@ha
 139	addi	r4,r4,cur_cpu_spec@l
 140	lwz	r4,0(r4)
 141	add	r4,r4,r3
 142	lwz	r5,CPU_SPEC_SETUP(r4)
 143	cmpwi	0,r5,0
 144	add	r5,r5,r3
 145	beqlr
 146	mtctr	r5
 147	bctr
 148
 149#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
 150
 151/* This gets called by via-pmu.c to switch the PLL selection
 152 * on 750fx CPU. This function should really be moved to some
 153 * other place (as most of the cpufreq code in via-pmu
 154 */
 155_GLOBAL(low_choose_750fx_pll)
 156	/* Clear MSR:EE */
 157	mfmsr	r7
 158	rlwinm	r0,r7,0,17,15
 159	mtmsr	r0
 160
 161	/* If switching to PLL1, disable HID0:BTIC */
 162	cmplwi	cr0,r3,0
 163	beq	1f
 164	mfspr	r5,SPRN_HID0
 165	rlwinm	r5,r5,0,27,25
 166	sync
 167	mtspr	SPRN_HID0,r5
 168	isync
 169	sync
 170
 1711:
 172	/* Calc new HID1 value */
 173	mfspr	r4,SPRN_HID1	/* Build a HID1:PS bit from parameter */
 174	rlwinm	r5,r3,16,15,15	/* Clear out HID1:PS from value read */
 175	rlwinm	r4,r4,0,16,14	/* Could have I used rlwimi here ? */
 176	or	r4,r4,r5
 177	mtspr	SPRN_HID1,r4
 178
 179#ifdef CONFIG_SMP
 180	/* Store new HID1 image */
 181	lwz	r6,TASK_CPU(r2)
 182	slwi	r6,r6,2
 183#else
 184	li	r6, 0
 185#endif
 186	addis	r6,r6,nap_save_hid1@ha
 187	stw	r4,nap_save_hid1@l(r6)
 188
 189	/* If switching to PLL0, enable HID0:BTIC */
 190	cmplwi	cr0,r3,0
 191	bne	1f
 192	mfspr	r5,SPRN_HID0
 193	ori	r5,r5,HID0_BTIC
 194	sync
 195	mtspr	SPRN_HID0,r5
 196	isync
 197	sync
 198
 1991:
 200	/* Return */
 201	mtmsr	r7
 202	blr
 203
 204_GLOBAL(low_choose_7447a_dfs)
 205	/* Clear MSR:EE */
 206	mfmsr	r7
 207	rlwinm	r0,r7,0,17,15
 208	mtmsr	r0
 209	
 210	/* Calc new HID1 value */
 211	mfspr	r4,SPRN_HID1
 212	insrwi	r4,r3,1,9	/* insert parameter into bit 9 */
 213	sync
 214	mtspr	SPRN_HID1,r4
 215	sync
 216	isync
 217
 218	/* Return */
 219	mtmsr	r7
 220	blr
 221
 222#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
 223
 224/*
 225 * complement mask on the msr then "or" some values on.
 226 *     _nmask_and_or_msr(nmask, value_to_or)
 227 */
 228_GLOBAL(_nmask_and_or_msr)
 229	mfmsr	r0		/* Get current msr */
 230	andc	r0,r0,r3	/* And off the bits set in r3 (first parm) */
 231	or	r0,r0,r4	/* Or on the bits in r4 (second parm) */
 232	SYNC			/* Some chip revs have problems here... */
 233	mtmsr	r0		/* Update machine state */
 234	isync
 235	blr			/* Done */
 236
 237#ifdef CONFIG_40x
 238
 239/*
 240 * Do an IO access in real mode
 241 */
 242_GLOBAL(real_readb)
 243	mfmsr	r7
 244	rlwinm	r0,r7,0,~MSR_DR
 245	sync
 246	mtmsr	r0
 247	sync
 248	isync
 249	lbz	r3,0(r3)
 250	sync
 251	mtmsr	r7
 252	sync
 253	isync
 254	blr
 
 255
 256	/*
 257 * Do an IO access in real mode
 258 */
 259_GLOBAL(real_writeb)
 260	mfmsr	r7
 261	rlwinm	r0,r7,0,~MSR_DR
 262	sync
 263	mtmsr	r0
 264	sync
 265	isync
 266	stb	r3,0(r4)
 267	sync
 268	mtmsr	r7
 269	sync
 270	isync
 271	blr
 
 272
 273#endif /* CONFIG_40x */
 274
 275
 276/*
 277 * Flush instruction cache.
 278 * This is a no-op on the 601.
 279 */
 280#ifndef CONFIG_PPC_8xx
 281_GLOBAL(flush_instruction_cache)
 282#if defined(CONFIG_4xx)
 283#ifdef CONFIG_403GCX
 284	li      r3, 512
 285	mtctr   r3
 286	lis     r4, KERNELBASE@h
 2871:	iccci   0, r4
 288	addi    r4, r4, 16
 289	bdnz    1b
 290#else
 291	lis	r3, KERNELBASE@h
 292	iccci	0,r3
 293#endif
 294#elif defined(CONFIG_FSL_BOOKE)
 295#ifdef CONFIG_E200
 296	mfspr   r3,SPRN_L1CSR0
 297	ori     r3,r3,L1CSR0_CFI|L1CSR0_CLFC
 298	/* msync; isync recommended here */
 299	mtspr   SPRN_L1CSR0,r3
 300	isync
 301	blr
 302#endif
 303	mfspr	r3,SPRN_L1CSR1
 304	ori	r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
 305	mtspr	SPRN_L1CSR1,r3
 306#elif defined(CONFIG_PPC_BOOK3S_601)
 307	blr			/* for 601, do nothing */
 308#else
 309	/* 603/604 processor - use invalidate-all bit in HID0 */
 310	mfspr	r3,SPRN_HID0
 311	ori	r3,r3,HID0_ICFI
 312	mtspr	SPRN_HID0,r3
 313#endif /* CONFIG_4xx */
 314	isync
 315	blr
 316EXPORT_SYMBOL(flush_instruction_cache)
 317#endif /* CONFIG_PPC_8xx */
 318
 319/*
 320 * Write any modified data cache blocks out to memory
 321 * and invalidate the corresponding instruction cache blocks.
 322 * This is a no-op on the 601.
 323 *
 324 * flush_icache_range(unsigned long start, unsigned long stop)
 325 */
 326_GLOBAL(flush_icache_range)
 327#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
 328	PURGE_PREFETCHED_INS
 329	blr				/* for 601 and e200, do nothing */
 330#else
 331	rlwinm	r3,r3,0,0,31 - L1_CACHE_SHIFT
 332	subf	r4,r3,r4
 333	addi	r4,r4,L1_CACHE_BYTES - 1
 334	srwi.	r4,r4,L1_CACHE_SHIFT
 335	beqlr
 336	mtctr	r4
 337	mr	r6,r3
 3381:	dcbst	0,r3
 339	addi	r3,r3,L1_CACHE_BYTES
 340	bdnz	1b
 341	sync				/* wait for dcbst's to get to ram */
 342#ifndef CONFIG_44x
 343	mtctr	r4
 3442:	icbi	0,r6
 345	addi	r6,r6,L1_CACHE_BYTES
 346	bdnz	2b
 347#else
 348	/* Flash invalidate on 44x because we are passed kmapped addresses and
 349	   this doesn't work for userspace pages due to the virtually tagged
 350	   icache.  Sigh. */
 351	iccci	0, r0
 352#endif
 353	sync				/* additional sync needed on g4 */
 354	isync
 355	blr
 356#endif
 357_ASM_NOKPROBE_SYMBOL(flush_icache_range)
 358EXPORT_SYMBOL(flush_icache_range)
 359
 360/*
 361 * Flush a particular page from the data cache to RAM.
 362 * Note: this is necessary because the instruction cache does *not*
 363 * snoop from the data cache.
 364 * This is a no-op on the 601 and e200 which have a unified cache.
 365 *
 366 *	void __flush_dcache_icache(void *page)
 367 */
 368_GLOBAL(__flush_dcache_icache)
 369#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
 370	PURGE_PREFETCHED_INS
 371	blr
 372#else
 373	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */
 374	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */
 375	mtctr	r4
 376	mr	r6,r3
 3770:	dcbst	0,r3				/* Write line to ram */
 378	addi	r3,r3,L1_CACHE_BYTES
 379	bdnz	0b
 380	sync
 381#ifdef CONFIG_44x
 382	/* We don't flush the icache on 44x. Those have a virtual icache
 383	 * and we don't have access to the virtual address here (it's
 384	 * not the page vaddr but where it's mapped in user space). The
 385	 * flushing of the icache on these is handled elsewhere, when
 386	 * a change in the address space occurs, before returning to
 387	 * user space
 388	 */
 389BEGIN_MMU_FTR_SECTION
 390	blr
 391END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
 392#endif /* CONFIG_44x */
 393	mtctr	r4
 3941:	icbi	0,r6
 395	addi	r6,r6,L1_CACHE_BYTES
 396	bdnz	1b
 397	sync
 398	isync
 399	blr
 400#endif
 401
 402#ifndef CONFIG_BOOKE
 403/*
 404 * Flush a particular page from the data cache to RAM, identified
 405 * by its physical address.  We turn off the MMU so we can just use
 406 * the physical address (this may be a highmem page without a kernel
 407 * mapping).
 408 *
 409 *	void __flush_dcache_icache_phys(unsigned long physaddr)
 410 */
 411_GLOBAL(__flush_dcache_icache_phys)
 412#if defined(CONFIG_PPC_BOOK3S_601) || defined(CONFIG_E200)
 413	PURGE_PREFETCHED_INS
 414	blr					/* for 601 and e200, do nothing */
 415#else
 416	mfmsr	r10
 417	rlwinm	r0,r10,0,28,26			/* clear DR */
 418	mtmsr	r0
 419	isync
 420	rlwinm	r3,r3,0,0,31-PAGE_SHIFT		/* Get page base address */
 421	li	r4,PAGE_SIZE/L1_CACHE_BYTES	/* Number of lines in a page */
 422	mtctr	r4
 423	mr	r6,r3
 4240:	dcbst	0,r3				/* Write line to ram */
 425	addi	r3,r3,L1_CACHE_BYTES
 426	bdnz	0b
 427	sync
 428	mtctr	r4
 4291:	icbi	0,r6
 430	addi	r6,r6,L1_CACHE_BYTES
 431	bdnz	1b
 432	sync
 433	mtmsr	r10				/* restore DR */
 434	isync
 435	blr
 436#endif
 437#endif /* CONFIG_BOOKE */
 438
 439/*
 440 * Copy a whole page.  We use the dcbz instruction on the destination
 441 * to reduce memory traffic (it eliminates the unnecessary reads of
 442 * the destination into cache).  This requires that the destination
 443 * is cacheable.
 444 */
 445#define COPY_16_BYTES		\
 446	lwz	r6,4(r4);	\
 447	lwz	r7,8(r4);	\
 448	lwz	r8,12(r4);	\
 449	lwzu	r9,16(r4);	\
 450	stw	r6,4(r3);	\
 451	stw	r7,8(r3);	\
 452	stw	r8,12(r3);	\
 453	stwu	r9,16(r3)
 454
 455_GLOBAL(copy_page)
 456	rlwinm	r5, r3, 0, L1_CACHE_BYTES - 1
 457	addi	r3,r3,-4
 458
 4590:	twnei	r5, 0	/* WARN if r3 is not cache aligned */
 460	EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 461
 462	addi	r4,r4,-4
 463
 464	li	r5,4
 465
 466#if MAX_COPY_PREFETCH > 1
 467	li	r0,MAX_COPY_PREFETCH
 468	li	r11,4
 469	mtctr	r0
 47011:	dcbt	r11,r4
 471	addi	r11,r11,L1_CACHE_BYTES
 472	bdnz	11b
 473#else /* MAX_COPY_PREFETCH == 1 */
 474	dcbt	r5,r4
 475	li	r11,L1_CACHE_BYTES+4
 476#endif /* MAX_COPY_PREFETCH */
 477	li	r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
 478	crclr	4*cr0+eq
 4792:
 480	mtctr	r0
 4811:
 482	dcbt	r11,r4
 483	dcbz	r5,r3
 484	COPY_16_BYTES
 485#if L1_CACHE_BYTES >= 32
 486	COPY_16_BYTES
 487#if L1_CACHE_BYTES >= 64
 488	COPY_16_BYTES
 489	COPY_16_BYTES
 490#if L1_CACHE_BYTES >= 128
 491	COPY_16_BYTES
 492	COPY_16_BYTES
 493	COPY_16_BYTES
 494	COPY_16_BYTES
 495#endif
 496#endif
 497#endif
 498	bdnz	1b
 499	beqlr
 500	crnot	4*cr0+eq,4*cr0+eq
 501	li	r0,MAX_COPY_PREFETCH
 502	li	r11,4
 503	b	2b
 504EXPORT_SYMBOL(copy_page)
 505
 506/*
 507 * Extended precision shifts.
 508 *
 509 * Updated to be valid for shift counts from 0 to 63 inclusive.
 510 * -- Gabriel
 511 *
 512 * R3/R4 has 64 bit value
 513 * R5    has shift count
 514 * result in R3/R4
 515 *
 516 *  ashrdi3: arithmetic right shift (sign propagation)	
 517 *  lshrdi3: logical right shift
 518 *  ashldi3: left shift
 519 */
 520_GLOBAL(__ashrdi3)
 521	subfic	r6,r5,32
 522	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
 523	addi	r7,r5,32	# could be xori, or addi with -32
 524	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
 525	rlwinm	r8,r7,0,32	# t3 = (count < 32) ? 32 : 0
 526	sraw	r7,r3,r7	# t2 = MSW >> (count-32)
 527	or	r4,r4,r6	# LSW |= t1
 528	slw	r7,r7,r8	# t2 = (count < 32) ? 0 : t2
 529	sraw	r3,r3,r5	# MSW = MSW >> count
 530	or	r4,r4,r7	# LSW |= t2
 531	blr
 532EXPORT_SYMBOL(__ashrdi3)
 533
 534_GLOBAL(__ashldi3)
 535	subfic	r6,r5,32
 536	slw	r3,r3,r5	# MSW = count > 31 ? 0 : MSW << count
 537	addi	r7,r5,32	# could be xori, or addi with -32
 538	srw	r6,r4,r6	# t1 = count > 31 ? 0 : LSW >> (32-count)
 539	slw	r7,r4,r7	# t2 = count < 32 ? 0 : LSW << (count-32)
 540	or	r3,r3,r6	# MSW |= t1
 541	slw	r4,r4,r5	# LSW = LSW << count
 542	or	r3,r3,r7	# MSW |= t2
 543	blr
 544EXPORT_SYMBOL(__ashldi3)
 545
 546_GLOBAL(__lshrdi3)
 547	subfic	r6,r5,32
 548	srw	r4,r4,r5	# LSW = count > 31 ? 0 : LSW >> count
 549	addi	r7,r5,32	# could be xori, or addi with -32
 550	slw	r6,r3,r6	# t1 = count > 31 ? 0 : MSW << (32-count)
 551	srw	r7,r3,r7	# t2 = count < 32 ? 0 : MSW >> (count-32)
 552	or	r4,r4,r6	# LSW |= t1
 553	srw	r3,r3,r5	# MSW = MSW >> count
 554	or	r4,r4,r7	# LSW |= t2
 555	blr
 556EXPORT_SYMBOL(__lshrdi3)
 557
 558/*
 559 * 64-bit comparison: __cmpdi2(s64 a, s64 b)
 560 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
 561 */
 562_GLOBAL(__cmpdi2)
 563	cmpw	r3,r5
 564	li	r3,1
 565	bne	1f
 566	cmplw	r4,r6
 567	beqlr
 5681:	li	r3,0
 569	bltlr
 570	li	r3,2
 571	blr
 572EXPORT_SYMBOL(__cmpdi2)
 573/*
 574 * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
 575 * Returns 0 if a < b, 1 if a == b, 2 if a > b.
 576 */
 577_GLOBAL(__ucmpdi2)
 578	cmplw	r3,r5
 579	li	r3,1
 580	bne	1f
 581	cmplw	r4,r6
 582	beqlr
 5831:	li	r3,0
 584	bltlr
 585	li	r3,2
 586	blr
 587EXPORT_SYMBOL(__ucmpdi2)
 588
 589_GLOBAL(__bswapdi2)
 590	rotlwi  r9,r4,8
 591	rotlwi  r10,r3,8
 592	rlwimi  r9,r4,24,0,7
 593	rlwimi  r10,r3,24,0,7
 594	rlwimi  r9,r4,24,16,23
 595	rlwimi  r10,r3,24,16,23
 596	mr      r3,r9
 597	mr      r4,r10
 598	blr
 599EXPORT_SYMBOL(__bswapdi2)
 600
 601#ifdef CONFIG_SMP
 602_GLOBAL(start_secondary_resume)
 603	/* Reset stack */
 604	rlwinm	r1, r1, 0, 0, 31 - THREAD_SHIFT
 605	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 606	li	r3,0
 607	stw	r3,0(r1)		/* Zero the stack frame pointer	*/
 608	bl	start_secondary
 609	b	.
 610#endif /* CONFIG_SMP */
 611	
 612/*
 613 * This routine is just here to keep GCC happy - sigh...
 614 */
 615_GLOBAL(__main)
 616	blr
 617
 618#ifdef CONFIG_KEXEC_CORE
 619	/*
 620	 * Must be relocatable PIC code callable as a C function.
 621	 */
 622	.globl relocate_new_kernel
 623relocate_new_kernel:
 624	/* r3 = page_list   */
 625	/* r4 = reboot_code_buffer */
 626	/* r5 = start_address      */
 627
 628#ifdef CONFIG_FSL_BOOKE
 629
 630	mr	r29, r3
 631	mr	r30, r4
 632	mr	r31, r5
 633
 634#define ENTRY_MAPPING_KEXEC_SETUP
 635#include "fsl_booke_entry_mapping.S"
 636#undef ENTRY_MAPPING_KEXEC_SETUP
 637
 638	mr      r3, r29
 639	mr      r4, r30
 640	mr      r5, r31
 641
 642	li	r0, 0
 643#elif defined(CONFIG_44x)
 644
 645	/* Save our parameters */
 646	mr	r29, r3
 647	mr	r30, r4
 648	mr	r31, r5
 649
 650#ifdef CONFIG_PPC_47x
 651	/* Check for 47x cores */
 652	mfspr	r3,SPRN_PVR
 653	srwi	r3,r3,16
 654	cmplwi	cr0,r3,PVR_476FPE@h
 655	beq	setup_map_47x
 656	cmplwi	cr0,r3,PVR_476@h
 657	beq	setup_map_47x
 658	cmplwi	cr0,r3,PVR_476_ISS@h
 659	beq	setup_map_47x
 660#endif /* CONFIG_PPC_47x */
 661	
 662/*
 663 * Code for setting up 1:1 mapping for PPC440x for KEXEC
 664 *
 665 * We cannot switch off the MMU on PPC44x.
 666 * So we:
 667 * 1) Invalidate all the mappings except the one we are running from.
 668 * 2) Create a tmp mapping for our code in the other address space(TS) and
 669 *    jump to it. Invalidate the entry we started in.
 670 * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
 671 * 4) Jump to the 1:1 mapping in original TS.
 672 * 5) Invalidate the tmp mapping.
 673 *
 674 * - Based on the kexec support code for FSL BookE
 675 *
 676 */
 677
 678	/* 
 679	 * Load the PID with kernel PID (0).
 680	 * Also load our MSR_IS and TID to MMUCR for TLB search.
 681	 */
 682	li	r3, 0
 683	mtspr	SPRN_PID, r3
 684	mfmsr	r4
 685	andi.	r4,r4,MSR_IS@l
 686	beq	wmmucr
 687	oris	r3,r3,PPC44x_MMUCR_STS@h
 688wmmucr:
 689	mtspr	SPRN_MMUCR,r3
 690	sync
 691
 692	/*
 693	 * Invalidate all the TLB entries except the current entry
 694	 * where we are running from
 695	 */
 696	bl	0f				/* Find our address */
 6970:	mflr	r5				/* Make it accessible */
 698	tlbsx	r23,0,r5			/* Find entry we are in */
 699	li	r4,0				/* Start at TLB entry 0 */
 700	li	r3,0				/* Set PAGEID inval value */
 7011:	cmpw	r23,r4				/* Is this our entry? */
 702	beq	skip				/* If so, skip the inval */
 703	tlbwe	r3,r4,PPC44x_TLB_PAGEID		/* If not, inval the entry */
 704skip:
 705	addi	r4,r4,1				/* Increment */
 706	cmpwi	r4,64				/* Are we done?	*/
 707	bne	1b				/* If not, repeat */
 708	isync
 709
 710	/* Create a temp mapping and jump to it */
 711	andi.	r6, r23, 1		/* Find the index to use */
 712	addi	r24, r6, 1		/* r24 will contain 1 or 2 */
 713
 714	mfmsr	r9			/* get the MSR */
 715	rlwinm	r5, r9, 27, 31, 31	/* Extract the MSR[IS] */
 716	xori	r7, r5, 1		/* Use the other address space */
 717
 718	/* Read the current mapping entries */
 719	tlbre	r3, r23, PPC44x_TLB_PAGEID
 720	tlbre	r4, r23, PPC44x_TLB_XLAT
 721	tlbre	r5, r23, PPC44x_TLB_ATTRIB
 722
 723	/* Save our current XLAT entry */
 724	mr	r25, r4
 725
 726	/* Extract the TLB PageSize */
 727	li	r10, 1 			/* r10 will hold PageSize */
 728	rlwinm	r11, r3, 0, 24, 27	/* bits 24-27 */
 729
 730	/* XXX: As of now we use 256M, 4K pages */
 731	cmpwi	r11, PPC44x_TLB_256M
 732	bne	tlb_4k
 733	rotlwi	r10, r10, 28		/* r10 = 256M */
 734	b	write_out
 735tlb_4k:
 736	cmpwi	r11, PPC44x_TLB_4K
 737	bne	default
 738	rotlwi	r10, r10, 12		/* r10 = 4K */
 739	b	write_out
 740default:
 741	rotlwi	r10, r10, 10		/* r10 = 1K */
 742
 743write_out:
 744	/*
 745	 * Write out the tmp 1:1 mapping for this code in other address space
 746	 * Fixup  EPN = RPN , TS=other address space
 747	 */
 748	insrwi	r3, r7, 1, 23		/* Bit 23 is TS for PAGEID field */
 749
 750	/* Write out the tmp mapping entries */
 751	tlbwe	r3, r24, PPC44x_TLB_PAGEID
 752	tlbwe	r4, r24, PPC44x_TLB_XLAT
 753	tlbwe	r5, r24, PPC44x_TLB_ATTRIB
 754
 755	subi	r11, r10, 1		/* PageOffset Mask = PageSize - 1 */
 756	not	r10, r11		/* Mask for PageNum */
 757
 758	/* Switch to other address space in MSR */
 759	insrwi	r9, r7, 1, 26		/* Set MSR[IS] = r7 */
 760
 761	bl	1f
 7621:	mflr	r8
 763	addi	r8, r8, (2f-1b)		/* Find the target offset */
 764
 765	/* Jump to the tmp mapping */
 766	mtspr	SPRN_SRR0, r8
 767	mtspr	SPRN_SRR1, r9
 768	rfi
 769
 7702:
 771	/* Invalidate the entry we were executing from */
 772	li	r3, 0
 773	tlbwe	r3, r23, PPC44x_TLB_PAGEID
 774
 775	/* attribute fields. rwx for SUPERVISOR mode */
 776	li	r5, 0
 777	ori	r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
 778
 779	/* Create 1:1 mapping in 256M pages */
 780	xori	r7, r7, 1			/* Revert back to Original TS */
 781
 782	li	r8, 0				/* PageNumber */
 783	li	r6, 3				/* TLB Index, start at 3  */
 784
 785next_tlb:
 786	rotlwi	r3, r8, 28			/* Create EPN (bits 0-3) */
 787	mr	r4, r3				/* RPN = EPN  */
 788	ori	r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
 789	insrwi	r3, r7, 1, 23			/* Set TS from r7 */
 790
 791	tlbwe	r3, r6, PPC44x_TLB_PAGEID	/* PageID field : EPN, V, SIZE */
 792	tlbwe	r4, r6, PPC44x_TLB_XLAT		/* Address translation : RPN   */
 793	tlbwe	r5, r6, PPC44x_TLB_ATTRIB	/* Attributes */
 794
 795	addi	r8, r8, 1			/* Increment PN */
 796	addi	r6, r6, 1			/* Increment TLB Index */
 797	cmpwi	r8, 8				/* Are we done ? */
 798	bne	next_tlb
 799	isync
 800
 801	/* Jump to the new mapping 1:1 */
 802	li	r9,0
 803	insrwi	r9, r7, 1, 26			/* Set MSR[IS] = r7 */
 804
 805	bl	1f
 8061:	mflr	r8
 807	and	r8, r8, r11			/* Get our offset within page */
 808	addi	r8, r8, (2f-1b)
 809
 810	and	r5, r25, r10			/* Get our target PageNum */
 811	or	r8, r8, r5			/* Target jump address */
 812
 813	mtspr	SPRN_SRR0, r8
 814	mtspr	SPRN_SRR1, r9
 815	rfi
 8162:
 817	/* Invalidate the tmp entry we used */
 818	li	r3, 0
 819	tlbwe	r3, r24, PPC44x_TLB_PAGEID
 820	sync
 821	b	ppc44x_map_done
 822
 823#ifdef CONFIG_PPC_47x
 824
 825	/* 1:1 mapping for 47x */
 826
 827setup_map_47x:
 828
 829	/*
 830	 * Load the kernel pid (0) to PID and also to MMUCR[TID].
 831	 * Also set the MSR IS->MMUCR STS
 832	 */
 833	li	r3, 0
 834	mtspr	SPRN_PID, r3			/* Set PID */
 835	mfmsr	r4				/* Get MSR */
 836	andi.	r4, r4, MSR_IS@l		/* TS=1? */
 837	beq	1f				/* If not, leave STS=0 */
 838	oris	r3, r3, PPC47x_MMUCR_STS@h	/* Set STS=1 */
 8391:	mtspr	SPRN_MMUCR, r3			/* Put MMUCR */
 840	sync
 841
 842	/* Find the entry we are running from */
 843	bl	2f
 8442:	mflr	r23
 845	tlbsx	r23, 0, r23
 846	tlbre	r24, r23, 0			/* TLB Word 0 */
 847	tlbre	r25, r23, 1			/* TLB Word 1 */
 848	tlbre	r26, r23, 2			/* TLB Word 2 */
 849
 850
 851	/*
 852	 * Invalidates all the tlb entries by writing to 256 RPNs(r4)
 853	 * of 4k page size in all  4 ways (0-3 in r3).
 854	 * This would invalidate the entire UTLB including the one we are
 855	 * running from. However the shadow TLB entries would help us 
 856	 * to continue the execution, until we flush them (rfi/isync).
 857	 */
 858	addis	r3, 0, 0x8000			/* specify the way */
 859	addi	r4, 0, 0			/* TLB Word0 = (EPN=0, VALID = 0) */
 860	addi	r5, 0, 0
 861	b	clear_utlb_entry
 862
 863	/* Align the loop to speed things up. from head_44x.S */
 864	.align	6
 865
 866clear_utlb_entry:
 867
 868	tlbwe	r4, r3, 0
 869	tlbwe	r5, r3, 1
 870	tlbwe	r5, r3, 2
 871	addis	r3, r3, 0x2000			/* Increment the way */
 872	cmpwi	r3, 0
 873	bne	clear_utlb_entry
 874	addis	r3, 0, 0x8000
 875	addis	r4, r4, 0x100			/* Increment the EPN */
 876	cmpwi	r4, 0
 877	bne	clear_utlb_entry
 878
 879	/* Create the entries in the other address space */
 880	mfmsr	r5
 881	rlwinm	r7, r5, 27, 31, 31		/* Get the TS (Bit 26) from MSR */
 882	xori	r7, r7, 1			/* r7 = !TS */
 883
 884	insrwi	r24, r7, 1, 21			/* Change the TS in the saved TLB word 0 */
 885
 886	/* 
 887	 * write out the TLB entries for the tmp mapping
 888	 * Use way '0' so that we could easily invalidate it later.
 889	 */
 890	lis	r3, 0x8000			/* Way '0' */ 
 891
 892	tlbwe	r24, r3, 0
 893	tlbwe	r25, r3, 1
 894	tlbwe	r26, r3, 2
 895
 896	/* Update the msr to the new TS */
 897	insrwi	r5, r7, 1, 26
 898
 899	bl	1f
 9001:	mflr	r6
 901	addi	r6, r6, (2f-1b)
 902
 903	mtspr	SPRN_SRR0, r6
 904	mtspr	SPRN_SRR1, r5
 905	rfi
 906
 907	/* 
 908	 * Now we are in the tmp address space.
 909	 * Create a 1:1 mapping for 0-2GiB in the original TS.
 910	 */
 9112:
 912	li	r3, 0
 913	li	r4, 0				/* TLB Word 0 */
 914	li	r5, 0				/* TLB Word 1 */
 915	li	r6, 0
 916	ori	r6, r6, PPC47x_TLB2_S_RWX	/* TLB word 2 */
 917
 918	li	r8, 0				/* PageIndex */
 919
 920	xori	r7, r7, 1			/* revert back to original TS */
 921
 922write_utlb:
 923	rotlwi	r5, r8, 28			/* RPN = PageIndex * 256M */
 924						/* ERPN = 0 as we don't use memory above 2G */
 925
 926	mr	r4, r5				/* EPN = RPN */
 927	ori	r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
 928	insrwi	r4, r7, 1, 21			/* Insert the TS to Word 0 */
 929
 930	tlbwe	r4, r3, 0			/* Write out the entries */
 931	tlbwe	r5, r3, 1
 932	tlbwe	r6, r3, 2
 933	addi	r8, r8, 1
 934	cmpwi	r8, 8				/* Have we completed ? */
 935	bne	write_utlb
 936
 937	/* make sure we complete the TLB write up */
 938	isync
 939
 940	/* 
 941	 * Prepare to jump to the 1:1 mapping.
 942	 * 1) Extract page size of the tmp mapping
 943	 *    DSIZ = TLB_Word0[22:27]
 944	 * 2) Calculate the physical address of the address
 945	 *    to jump to.
 946	 */
 947	rlwinm	r10, r24, 0, 22, 27
 948
 949	cmpwi	r10, PPC47x_TLB0_4K
 950	bne	0f
 951	li	r10, 0x1000			/* r10 = 4k */
 952	bl	1f
 953
 9540:
 955	/* Defaults to 256M */
 956	lis	r10, 0x1000
 957	
 958	bl	1f
 9591:	mflr	r4
 960	addi	r4, r4, (2f-1b)			/* virtual address  of 2f */
 961
 962	subi	r11, r10, 1			/* offsetmask = Pagesize - 1 */
 963	not	r10, r11			/* Pagemask = ~(offsetmask) */
 964
 965	and	r5, r25, r10			/* Physical page */
 966	and	r6, r4, r11			/* offset within the current page */
 967
 968	or	r5, r5, r6			/* Physical address for 2f */
 969
 970	/* Switch the TS in MSR to the original one */
 971	mfmsr	r8
 972	insrwi	r8, r7, 1, 26
 973
 974	mtspr	SPRN_SRR1, r8
 975	mtspr	SPRN_SRR0, r5
 976	rfi
 977
 9782:
 979	/* Invalidate the tmp mapping */
 980	lis	r3, 0x8000			/* Way '0' */
 981
 982	clrrwi	r24, r24, 12			/* Clear the valid bit */
 983	tlbwe	r24, r3, 0
 984	tlbwe	r25, r3, 1
 985	tlbwe	r26, r3, 2
 986
 987	/* Make sure we complete the TLB write and flush the shadow TLB */
 988	isync
 989
 990#endif
 991
 992ppc44x_map_done:
 993
 994
 995	/* Restore the parameters */
 996	mr	r3, r29
 997	mr	r4, r30
 998	mr	r5, r31
 999
1000	li	r0, 0
1001#else
1002	li	r0, 0
1003
1004	/*
1005	 * Set Machine Status Register to a known status,
1006	 * switch the MMU off and jump to 1: in a single step.
1007	 */
1008
1009	mr	r8, r0
1010	ori     r8, r8, MSR_RI|MSR_ME
1011	mtspr	SPRN_SRR1, r8
1012	addi	r8, r4, 1f - relocate_new_kernel
1013	mtspr	SPRN_SRR0, r8
1014	sync
1015	rfi
1016
10171:
1018#endif
1019	/* from this point address translation is turned off */
1020	/* and interrupts are disabled */
1021
1022	/* set a new stack at the bottom of our page... */
1023	/* (not really needed now) */
1024	addi	r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
1025	stw	r0, 0(r1)
1026
1027	/* Do the copies */
1028	li	r6, 0 /* checksum */
1029	mr	r0, r3
1030	b	1f
1031
10320:	/* top, read another word for the indirection page */
1033	lwzu	r0, 4(r3)
1034
10351:
1036	/* is it a destination page? (r8) */
1037	rlwinm.	r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
1038	beq	2f
1039
1040	rlwinm	r8, r0, 0, 0, 19 /* clear kexec flags, page align */
1041	b	0b
1042
10432:	/* is it an indirection page? (r3) */
1044	rlwinm.	r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
1045	beq	2f
1046
1047	rlwinm	r3, r0, 0, 0, 19 /* clear kexec flags, page align */
1048	subi	r3, r3, 4
1049	b	0b
1050
10512:	/* are we done? */
1052	rlwinm.	r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
1053	beq	2f
1054	b	3f
1055
10562:	/* is it a source page? (r9) */
1057	rlwinm.	r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
1058	beq	0b
1059
1060	rlwinm	r9, r0, 0, 0, 19 /* clear kexec flags, page align */
1061
1062	li	r7, PAGE_SIZE / 4
1063	mtctr   r7
1064	subi    r9, r9, 4
1065	subi    r8, r8, 4
10669:
1067	lwzu    r0, 4(r9)  /* do the copy */
1068	xor	r6, r6, r0
1069	stwu    r0, 4(r8)
1070	dcbst	0, r8
1071	sync
1072	icbi	0, r8
1073	bdnz    9b
1074
1075	addi    r9, r9, 4
1076	addi    r8, r8, 4
1077	b	0b
1078
10793:
1080
1081	/* To be certain of avoiding problems with self-modifying code
1082	 * execute a serializing instruction here.
1083	 */
1084	isync
1085	sync
1086
1087	mfspr	r3, SPRN_PIR /* current core we are running on */
1088	mr	r4, r5 /* load physical address of chunk called */
1089
1090	/* jump to the entry point, usually the setup routine */
1091	mtlr	r5
1092	blrl
1093
10941:	b	1b
1095
1096relocate_new_kernel_end:
1097
1098	.globl relocate_new_kernel_size
1099relocate_new_kernel_size:
1100	.long relocate_new_kernel_end - relocate_new_kernel
1101#endif