Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/threads.h>
  3#include <linux/linkage.h>
  4
  5#include <asm/processor.h>
  6#include <asm/page.h>
  7#include <asm/cputable.h>
  8#include <asm/thread_info.h>
  9#include <asm/ppc_asm.h>
 10#include <asm/asm-offsets.h>
 11#include <asm/mmu.h>
 12#include <asm/feature-fixups.h>
 13
 14/*
 15 * Structure for storing CPU registers on the save area.
 16 */
 17#define SL_SP		0
 18#define SL_PC		4
 19#define SL_MSR		8
 20#define SL_SDR1		0xc
 21#define SL_SPRG0	0x10	/* 4 sprg's */
 22#define SL_DBAT0	0x20
 23#define SL_IBAT0	0x28
 24#define SL_DBAT1	0x30
 25#define SL_IBAT1	0x38
 26#define SL_DBAT2	0x40
 27#define SL_IBAT2	0x48
 28#define SL_DBAT3	0x50
 29#define SL_IBAT3	0x58
 30#define SL_DBAT4	0x60
 31#define SL_IBAT4	0x68
 32#define SL_DBAT5	0x70
 33#define SL_IBAT5	0x78
 34#define SL_DBAT6	0x80
 35#define SL_IBAT6	0x88
 36#define SL_DBAT7	0x90
 37#define SL_IBAT7	0x98
 38#define SL_TB		0xa0
 39#define SL_R2		0xa8
 40#define SL_CR		0xac
 41#define SL_LR		0xb0
 42#define SL_R12		0xb4	/* r12 to r31 */
 43#define SL_SIZE		(SL_R12 + 80)
 44
 45	.section .data
 46	.align	5
 47
 48_GLOBAL(swsusp_save_area)
 49	.space	SL_SIZE
 50
 51
 52	.section .text
 53	.align	5
 54
 55_GLOBAL(swsusp_arch_suspend)
 56
 57	lis	r11,swsusp_save_area@h
 58	ori	r11,r11,swsusp_save_area@l
 59
 60	mflr	r0
 61	stw	r0,SL_LR(r11)
 62	mfcr	r0
 63	stw	r0,SL_CR(r11)
 64	stw	r1,SL_SP(r11)
 65	stw	r2,SL_R2(r11)
 66	stmw	r12,SL_R12(r11)
 67
 68	/* Save MSR & SDR1 */
 69	mfmsr	r4
 70	stw	r4,SL_MSR(r11)
 71	mfsdr1	r4
 72	stw	r4,SL_SDR1(r11)
 73
 74	/* Get a stable timebase and save it */
 751:	mftbu	r4
 76	stw	r4,SL_TB(r11)
 77	mftb	r5
 78	stw	r5,SL_TB+4(r11)
 79	mftbu	r3
 80	cmpw	r3,r4
 81	bne	1b
 82
 83	/* Save SPRGs */
 84	mfsprg	r4,0
 85	stw	r4,SL_SPRG0(r11)
 86	mfsprg	r4,1
 87	stw	r4,SL_SPRG0+4(r11)
 88	mfsprg	r4,2
 89	stw	r4,SL_SPRG0+8(r11)
 90	mfsprg	r4,3
 91	stw	r4,SL_SPRG0+12(r11)
 92
 93	/* Save BATs */
 94	mfdbatu	r4,0
 95	stw	r4,SL_DBAT0(r11)
 96	mfdbatl	r4,0
 97	stw	r4,SL_DBAT0+4(r11)
 98	mfdbatu	r4,1
 99	stw	r4,SL_DBAT1(r11)
100	mfdbatl	r4,1
101	stw	r4,SL_DBAT1+4(r11)
102	mfdbatu	r4,2
103	stw	r4,SL_DBAT2(r11)
104	mfdbatl	r4,2
105	stw	r4,SL_DBAT2+4(r11)
106	mfdbatu	r4,3
107	stw	r4,SL_DBAT3(r11)
108	mfdbatl	r4,3
109	stw	r4,SL_DBAT3+4(r11)
110	mfibatu	r4,0
111	stw	r4,SL_IBAT0(r11)
112	mfibatl	r4,0
113	stw	r4,SL_IBAT0+4(r11)
114	mfibatu	r4,1
115	stw	r4,SL_IBAT1(r11)
116	mfibatl	r4,1
117	stw	r4,SL_IBAT1+4(r11)
118	mfibatu	r4,2
119	stw	r4,SL_IBAT2(r11)
120	mfibatl	r4,2
121	stw	r4,SL_IBAT2+4(r11)
122	mfibatu	r4,3
123	stw	r4,SL_IBAT3(r11)
124	mfibatl	r4,3
125	stw	r4,SL_IBAT3+4(r11)
126
127BEGIN_MMU_FTR_SECTION
128	mfspr	r4,SPRN_DBAT4U
129	stw	r4,SL_DBAT4(r11)
130	mfspr	r4,SPRN_DBAT4L
131	stw	r4,SL_DBAT4+4(r11)
132	mfspr	r4,SPRN_DBAT5U
133	stw	r4,SL_DBAT5(r11)
134	mfspr	r4,SPRN_DBAT5L
135	stw	r4,SL_DBAT5+4(r11)
136	mfspr	r4,SPRN_DBAT6U
137	stw	r4,SL_DBAT6(r11)
138	mfspr	r4,SPRN_DBAT6L
139	stw	r4,SL_DBAT6+4(r11)
140	mfspr	r4,SPRN_DBAT7U
141	stw	r4,SL_DBAT7(r11)
142	mfspr	r4,SPRN_DBAT7L
143	stw	r4,SL_DBAT7+4(r11)
144	mfspr	r4,SPRN_IBAT4U
145	stw	r4,SL_IBAT4(r11)
146	mfspr	r4,SPRN_IBAT4L
147	stw	r4,SL_IBAT4+4(r11)
148	mfspr	r4,SPRN_IBAT5U
149	stw	r4,SL_IBAT5(r11)
150	mfspr	r4,SPRN_IBAT5L
151	stw	r4,SL_IBAT5+4(r11)
152	mfspr	r4,SPRN_IBAT6U
153	stw	r4,SL_IBAT6(r11)
154	mfspr	r4,SPRN_IBAT6L
155	stw	r4,SL_IBAT6+4(r11)
156	mfspr	r4,SPRN_IBAT7U
157	stw	r4,SL_IBAT7(r11)
158	mfspr	r4,SPRN_IBAT7L
159	stw	r4,SL_IBAT7+4(r11)
160END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
161
162#if  0
163	/* Backup various CPU config stuffs */
164	bl	__save_cpu_setup
165#endif
166	/* Call the low level suspend stuff (we should probably have made
167	 * a stackframe...
168	 */
169	bl	swsusp_save
170
171	/* Restore LR from the save area */
172	lis	r11,swsusp_save_area@h
173	ori	r11,r11,swsusp_save_area@l
174	lwz	r0,SL_LR(r11)
175	mtlr	r0
176
177	blr
178
179
180/* Resume code */
181_GLOBAL(swsusp_arch_resume)
182
183#ifdef CONFIG_ALTIVEC
184	/* Stop pending alitvec streams and memory accesses */
185BEGIN_FTR_SECTION
186	PPC_DSSALL
187END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
188#endif
189 	sync
190
191	/* Disable MSR:DR to make sure we don't take a TLB or
192	 * hash miss during the copy, as our hash table will
193	 * for a while be unusable. For .text, we assume we are
194	 * covered by a BAT. This works only for non-G5 at this
195	 * point. G5 will need a better approach, possibly using
196	 * a small temporary hash table filled with large mappings,
197	 * disabling the MMU completely isn't a good option for
198	 * performance reasons.
199	 * (Note that 750's may have the same performance issue as
200	 * the G5 in this case, we should investigate using moving
201	 * BATs for these CPUs)
202	 */
203	mfmsr	r0
204	sync
205	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
206	mtmsr	r0
207	sync
208	isync
209
210	/* Load ptr the list of pages to copy in r3 */
211	lis	r11,(restore_pblist - KERNELBASE)@h
212	ori	r11,r11,restore_pblist@l
213	lwz	r10,0(r11)
214
215	/* Copy the pages. This is a very basic implementation, to
216	 * be replaced by something more cache efficient */
2171:
218	tophys(r3,r10)
219	li	r0,256
220	mtctr	r0
221	lwz	r11,pbe_address(r3)	/* source */
222	tophys(r5,r11)
223	lwz	r10,pbe_orig_address(r3)	/* destination */
224	tophys(r6,r10)
2252:
226	lwz	r8,0(r5)
227	lwz	r9,4(r5)
228	lwz	r10,8(r5)
229	lwz	r11,12(r5)
230	addi	r5,r5,16
231	stw	r8,0(r6)
232	stw	r9,4(r6)
233	stw	r10,8(r6)
234	stw	r11,12(r6)
235	addi	r6,r6,16
236	bdnz	2b
237	lwz		r10,pbe_next(r3)
238	cmpwi	0,r10,0
239	bne	1b
240
241	/* Do a very simple cache flush/inval of the L1 to ensure
242	 * coherency of the icache
243	 */
244	lis	r3,0x0002
245	mtctr	r3
246	li	r3, 0
2471:
248	lwz	r0,0(r3)
249	addi	r3,r3,0x0020
250	bdnz	1b
251	isync
252	sync
253
254	/* Now flush those cache lines */
255	lis	r3,0x0002
256	mtctr	r3
257	li	r3, 0
2581:
259	dcbf	0,r3
260	addi	r3,r3,0x0020
261	bdnz	1b
262	sync
263
264	/* Ok, we are now running with the kernel data of the old
265	 * kernel fully restored. We can get to the save area
266	 * easily now. As for the rest of the code, it assumes the
267	 * loader kernel and the booted one are exactly identical
268	 */
269	lis	r11,swsusp_save_area@h
270	ori	r11,r11,swsusp_save_area@l
271	tophys(r11,r11)
272
273#if 0
274	/* Restore various CPU config stuffs */
275	bl	__restore_cpu_setup
276#endif
277	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
278	 * This is a bit hairy as we are running out of those BATs,
279	 * but first, our code is probably in the icache, and we are
280	 * writing the same value to the BAT, so that should be fine,
281	 * though a better solution will have to be found long-term
282	 */
283	lwz	r4,SL_SDR1(r11)
284	mtsdr1	r4
285	lwz	r4,SL_SPRG0(r11)
286	mtsprg	0,r4
287	lwz	r4,SL_SPRG0+4(r11)
288	mtsprg	1,r4
289	lwz	r4,SL_SPRG0+8(r11)
290	mtsprg	2,r4
291	lwz	r4,SL_SPRG0+12(r11)
292	mtsprg	3,r4
293
294#if 0
295	lwz	r4,SL_DBAT0(r11)
296	mtdbatu	0,r4
297	lwz	r4,SL_DBAT0+4(r11)
298	mtdbatl	0,r4
299	lwz	r4,SL_DBAT1(r11)
300	mtdbatu	1,r4
301	lwz	r4,SL_DBAT1+4(r11)
302	mtdbatl	1,r4
303	lwz	r4,SL_DBAT2(r11)
304	mtdbatu	2,r4
305	lwz	r4,SL_DBAT2+4(r11)
306	mtdbatl	2,r4
307	lwz	r4,SL_DBAT3(r11)
308	mtdbatu	3,r4
309	lwz	r4,SL_DBAT3+4(r11)
310	mtdbatl	3,r4
311	lwz	r4,SL_IBAT0(r11)
312	mtibatu	0,r4
313	lwz	r4,SL_IBAT0+4(r11)
314	mtibatl	0,r4
315	lwz	r4,SL_IBAT1(r11)
316	mtibatu	1,r4
317	lwz	r4,SL_IBAT1+4(r11)
318	mtibatl	1,r4
319	lwz	r4,SL_IBAT2(r11)
320	mtibatu	2,r4
321	lwz	r4,SL_IBAT2+4(r11)
322	mtibatl	2,r4
323	lwz	r4,SL_IBAT3(r11)
324	mtibatu	3,r4
325	lwz	r4,SL_IBAT3+4(r11)
326	mtibatl	3,r4
 
 
327BEGIN_MMU_FTR_SECTION
328	lwz	r4,SL_DBAT4(r11)
329	mtspr	SPRN_DBAT4U,r4
330	lwz	r4,SL_DBAT4+4(r11)
331	mtspr	SPRN_DBAT4L,r4
332	lwz	r4,SL_DBAT5(r11)
333	mtspr	SPRN_DBAT5U,r4
334	lwz	r4,SL_DBAT5+4(r11)
335	mtspr	SPRN_DBAT5L,r4
336	lwz	r4,SL_DBAT6(r11)
337	mtspr	SPRN_DBAT6U,r4
338	lwz	r4,SL_DBAT6+4(r11)
339	mtspr	SPRN_DBAT6L,r4
340	lwz	r4,SL_DBAT7(r11)
341	mtspr	SPRN_DBAT7U,r4
342	lwz	r4,SL_DBAT7+4(r11)
343	mtspr	SPRN_DBAT7L,r4
344	lwz	r4,SL_IBAT4(r11)
345	mtspr	SPRN_IBAT4U,r4
346	lwz	r4,SL_IBAT4+4(r11)
347	mtspr	SPRN_IBAT4L,r4
348	lwz	r4,SL_IBAT5(r11)
349	mtspr	SPRN_IBAT5U,r4
350	lwz	r4,SL_IBAT5+4(r11)
351	mtspr	SPRN_IBAT5L,r4
352	lwz	r4,SL_IBAT6(r11)
353	mtspr	SPRN_IBAT6U,r4
354	lwz	r4,SL_IBAT6+4(r11)
355	mtspr	SPRN_IBAT6L,r4
356	lwz	r4,SL_IBAT7(r11)
357	mtspr	SPRN_IBAT7U,r4
358	lwz	r4,SL_IBAT7+4(r11)
359	mtspr	SPRN_IBAT7L,r4
360END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
361#endif
362
363	/* Flush all TLBs */
364	lis	r4,0x1000
3651:	addic.	r4,r4,-0x1000
366	tlbie	r4
367	bgt	1b
368	sync
369
370	/* restore the MSR and turn on the MMU */
371	lwz	r3,SL_MSR(r11)
372	bl	turn_on_mmu
373	tovirt(r11,r11)
374
375	/* Restore TB */
376	li	r3,0
377	mttbl	r3
378	lwz	r3,SL_TB(r11)
379	lwz	r4,SL_TB+4(r11)
380	mttbu	r3
381	mttbl	r4
382
383	/* Kick decrementer */
384	li	r0,1
385	mtdec	r0
386
387	/* Restore the callee-saved registers and return */
388	lwz	r0,SL_CR(r11)
389	mtcr	r0
390	lwz	r2,SL_R2(r11)
391	lmw	r12,SL_R12(r11)
392	lwz	r1,SL_SP(r11)
393	lwz	r0,SL_LR(r11)
394	mtlr	r0
395
396	// XXX Note: we don't really need to call swsusp_resume
397
398	li	r3,0
399	blr
400_ASM_NOKPROBE_SYMBOL(swsusp_arch_resume)
401
402/* FIXME:This construct is actually not useful since we don't shut
403 * down the instruction MMU, we could just flip back MSR-DR on.
404 */
405SYM_FUNC_START_LOCAL(turn_on_mmu)
406	mflr	r4
407	mtsrr0	r4
408	mtsrr1	r3
409	sync
410	isync
411	rfi
412_ASM_NOKPROBE_SYMBOL(turn_on_mmu)
413SYM_FUNC_END(turn_on_mmu)
414
v3.5.6
 
  1#include <linux/threads.h>
 
 
  2#include <asm/processor.h>
  3#include <asm/page.h>
  4#include <asm/cputable.h>
  5#include <asm/thread_info.h>
  6#include <asm/ppc_asm.h>
  7#include <asm/asm-offsets.h>
  8#include <asm/mmu.h>
 
  9
 10/*
 11 * Structure for storing CPU registers on the save area.
 12 */
 13#define SL_SP		0
 14#define SL_PC		4
 15#define SL_MSR		8
 16#define SL_SDR1		0xc
 17#define SL_SPRG0	0x10	/* 4 sprg's */
 18#define SL_DBAT0	0x20
 19#define SL_IBAT0	0x28
 20#define SL_DBAT1	0x30
 21#define SL_IBAT1	0x38
 22#define SL_DBAT2	0x40
 23#define SL_IBAT2	0x48
 24#define SL_DBAT3	0x50
 25#define SL_IBAT3	0x58
 26#define SL_TB		0x60
 27#define SL_R2		0x68
 28#define SL_CR		0x6c
 29#define SL_LR		0x70
 30#define SL_R12		0x74	/* r12 to r31 */
 
 
 
 
 
 
 
 
 31#define SL_SIZE		(SL_R12 + 80)
 32
 33	.section .data
 34	.align	5
 35
 36_GLOBAL(swsusp_save_area)
 37	.space	SL_SIZE
 38
 39
 40	.section .text
 41	.align	5
 42
 43_GLOBAL(swsusp_arch_suspend)
 44
 45	lis	r11,swsusp_save_area@h
 46	ori	r11,r11,swsusp_save_area@l
 47
 48	mflr	r0
 49	stw	r0,SL_LR(r11)
 50	mfcr	r0
 51	stw	r0,SL_CR(r11)
 52	stw	r1,SL_SP(r11)
 53	stw	r2,SL_R2(r11)
 54	stmw	r12,SL_R12(r11)
 55
 56	/* Save MSR & SDR1 */
 57	mfmsr	r4
 58	stw	r4,SL_MSR(r11)
 59	mfsdr1	r4
 60	stw	r4,SL_SDR1(r11)
 61
 62	/* Get a stable timebase and save it */
 631:	mftbu	r4
 64	stw	r4,SL_TB(r11)
 65	mftb	r5
 66	stw	r5,SL_TB+4(r11)
 67	mftbu	r3
 68	cmpw	r3,r4
 69	bne	1b
 70
 71	/* Save SPRGs */
 72	mfsprg	r4,0
 73	stw	r4,SL_SPRG0(r11)
 74	mfsprg	r4,1
 75	stw	r4,SL_SPRG0+4(r11)
 76	mfsprg	r4,2
 77	stw	r4,SL_SPRG0+8(r11)
 78	mfsprg	r4,3
 79	stw	r4,SL_SPRG0+12(r11)
 80
 81	/* Save BATs */
 82	mfdbatu	r4,0
 83	stw	r4,SL_DBAT0(r11)
 84	mfdbatl	r4,0
 85	stw	r4,SL_DBAT0+4(r11)
 86	mfdbatu	r4,1
 87	stw	r4,SL_DBAT1(r11)
 88	mfdbatl	r4,1
 89	stw	r4,SL_DBAT1+4(r11)
 90	mfdbatu	r4,2
 91	stw	r4,SL_DBAT2(r11)
 92	mfdbatl	r4,2
 93	stw	r4,SL_DBAT2+4(r11)
 94	mfdbatu	r4,3
 95	stw	r4,SL_DBAT3(r11)
 96	mfdbatl	r4,3
 97	stw	r4,SL_DBAT3+4(r11)
 98	mfibatu	r4,0
 99	stw	r4,SL_IBAT0(r11)
100	mfibatl	r4,0
101	stw	r4,SL_IBAT0+4(r11)
102	mfibatu	r4,1
103	stw	r4,SL_IBAT1(r11)
104	mfibatl	r4,1
105	stw	r4,SL_IBAT1+4(r11)
106	mfibatu	r4,2
107	stw	r4,SL_IBAT2(r11)
108	mfibatl	r4,2
109	stw	r4,SL_IBAT2+4(r11)
110	mfibatu	r4,3
111	stw	r4,SL_IBAT3(r11)
112	mfibatl	r4,3
113	stw	r4,SL_IBAT3+4(r11)
114
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
115#if  0
116	/* Backup various CPU config stuffs */
117	bl	__save_cpu_setup
118#endif
119	/* Call the low level suspend stuff (we should probably have made
120	 * a stackframe...
121	 */
122	bl	swsusp_save
123
124	/* Restore LR from the save area */
125	lis	r11,swsusp_save_area@h
126	ori	r11,r11,swsusp_save_area@l
127	lwz	r0,SL_LR(r11)
128	mtlr	r0
129
130	blr
131
132
133/* Resume code */
134_GLOBAL(swsusp_arch_resume)
135
136#ifdef CONFIG_ALTIVEC
137	/* Stop pending alitvec streams and memory accesses */
138BEGIN_FTR_SECTION
139	DSSALL
140END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
141#endif
142 	sync
143
144	/* Disable MSR:DR to make sure we don't take a TLB or
145	 * hash miss during the copy, as our hash table will
146	 * for a while be unusable. For .text, we assume we are
147	 * covered by a BAT. This works only for non-G5 at this
148	 * point. G5 will need a better approach, possibly using
149	 * a small temporary hash table filled with large mappings,
150	 * disabling the MMU completely isn't a good option for
151	 * performance reasons.
152	 * (Note that 750's may have the same performance issue as
153	 * the G5 in this case, we should investigate using moving
154	 * BATs for these CPUs)
155	 */
156	mfmsr	r0
157	sync
158	rlwinm	r0,r0,0,28,26		/* clear MSR_DR */
159	mtmsr	r0
160	sync
161	isync
162
163	/* Load ptr the list of pages to copy in r3 */
164	lis	r11,(restore_pblist - KERNELBASE)@h
165	ori	r11,r11,restore_pblist@l
166	lwz	r10,0(r11)
167
168	/* Copy the pages. This is a very basic implementation, to
169	 * be replaced by something more cache efficient */
1701:
171	tophys(r3,r10)
172	li	r0,256
173	mtctr	r0
174	lwz	r11,pbe_address(r3)	/* source */
175	tophys(r5,r11)
176	lwz	r10,pbe_orig_address(r3)	/* destination */
177	tophys(r6,r10)
1782:
179	lwz	r8,0(r5)
180	lwz	r9,4(r5)
181	lwz	r10,8(r5)
182	lwz	r11,12(r5)
183	addi	r5,r5,16
184	stw	r8,0(r6)
185	stw	r9,4(r6)
186	stw	r10,8(r6)
187	stw	r11,12(r6)
188	addi	r6,r6,16
189	bdnz	2b
190	lwz		r10,pbe_next(r3)
191	cmpwi	0,r10,0
192	bne	1b
193
194	/* Do a very simple cache flush/inval of the L1 to ensure
195	 * coherency of the icache
196	 */
197	lis	r3,0x0002
198	mtctr	r3
199	li	r3, 0
2001:
201	lwz	r0,0(r3)
202	addi	r3,r3,0x0020
203	bdnz	1b
204	isync
205	sync
206
207	/* Now flush those cache lines */
208	lis	r3,0x0002
209	mtctr	r3
210	li	r3, 0
2111:
212	dcbf	0,r3
213	addi	r3,r3,0x0020
214	bdnz	1b
215	sync
216
217	/* Ok, we are now running with the kernel data of the old
218	 * kernel fully restored. We can get to the save area
219	 * easily now. As for the rest of the code, it assumes the
220	 * loader kernel and the booted one are exactly identical
221	 */
222	lis	r11,swsusp_save_area@h
223	ori	r11,r11,swsusp_save_area@l
224	tophys(r11,r11)
225
226#if 0
227	/* Restore various CPU config stuffs */
228	bl	__restore_cpu_setup
229#endif
230	/* Restore the BATs, and SDR1.  Then we can turn on the MMU.
231	 * This is a bit hairy as we are running out of those BATs,
232	 * but first, our code is probably in the icache, and we are
233	 * writing the same value to the BAT, so that should be fine,
234	 * though a better solution will have to be found long-term
235	 */
236	lwz	r4,SL_SDR1(r11)
237	mtsdr1	r4
238	lwz	r4,SL_SPRG0(r11)
239	mtsprg	0,r4
240	lwz	r4,SL_SPRG0+4(r11)
241	mtsprg	1,r4
242	lwz	r4,SL_SPRG0+8(r11)
243	mtsprg	2,r4
244	lwz	r4,SL_SPRG0+12(r11)
245	mtsprg	3,r4
246
247#if 0
248	lwz	r4,SL_DBAT0(r11)
249	mtdbatu	0,r4
250	lwz	r4,SL_DBAT0+4(r11)
251	mtdbatl	0,r4
252	lwz	r4,SL_DBAT1(r11)
253	mtdbatu	1,r4
254	lwz	r4,SL_DBAT1+4(r11)
255	mtdbatl	1,r4
256	lwz	r4,SL_DBAT2(r11)
257	mtdbatu	2,r4
258	lwz	r4,SL_DBAT2+4(r11)
259	mtdbatl	2,r4
260	lwz	r4,SL_DBAT3(r11)
261	mtdbatu	3,r4
262	lwz	r4,SL_DBAT3+4(r11)
263	mtdbatl	3,r4
264	lwz	r4,SL_IBAT0(r11)
265	mtibatu	0,r4
266	lwz	r4,SL_IBAT0+4(r11)
267	mtibatl	0,r4
268	lwz	r4,SL_IBAT1(r11)
269	mtibatu	1,r4
270	lwz	r4,SL_IBAT1+4(r11)
271	mtibatl	1,r4
272	lwz	r4,SL_IBAT2(r11)
273	mtibatu	2,r4
274	lwz	r4,SL_IBAT2+4(r11)
275	mtibatl	2,r4
276	lwz	r4,SL_IBAT3(r11)
277	mtibatu	3,r4
278	lwz	r4,SL_IBAT3+4(r11)
279	mtibatl	3,r4
280#endif
281
282BEGIN_MMU_FTR_SECTION
283	li	r4,0
284	mtspr	SPRN_DBAT4U,r4
 
285	mtspr	SPRN_DBAT4L,r4
 
286	mtspr	SPRN_DBAT5U,r4
 
287	mtspr	SPRN_DBAT5L,r4
 
288	mtspr	SPRN_DBAT6U,r4
 
289	mtspr	SPRN_DBAT6L,r4
 
290	mtspr	SPRN_DBAT7U,r4
 
291	mtspr	SPRN_DBAT7L,r4
 
292	mtspr	SPRN_IBAT4U,r4
 
293	mtspr	SPRN_IBAT4L,r4
 
294	mtspr	SPRN_IBAT5U,r4
 
295	mtspr	SPRN_IBAT5L,r4
 
296	mtspr	SPRN_IBAT6U,r4
 
297	mtspr	SPRN_IBAT6L,r4
 
298	mtspr	SPRN_IBAT7U,r4
 
299	mtspr	SPRN_IBAT7L,r4
300END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
 
301
302	/* Flush all TLBs */
303	lis	r4,0x1000
3041:	addic.	r4,r4,-0x1000
305	tlbie	r4
306	bgt	1b
307	sync
308
309	/* restore the MSR and turn on the MMU */
310	lwz	r3,SL_MSR(r11)
311	bl	turn_on_mmu
312	tovirt(r11,r11)
313
314	/* Restore TB */
315	li	r3,0
316	mttbl	r3
317	lwz	r3,SL_TB(r11)
318	lwz	r4,SL_TB+4(r11)
319	mttbu	r3
320	mttbl	r4
321
322	/* Kick decrementer */
323	li	r0,1
324	mtdec	r0
325
326	/* Restore the callee-saved registers and return */
327	lwz	r0,SL_CR(r11)
328	mtcr	r0
329	lwz	r2,SL_R2(r11)
330	lmw	r12,SL_R12(r11)
331	lwz	r1,SL_SP(r11)
332	lwz	r0,SL_LR(r11)
333	mtlr	r0
334
335	// XXX Note: we don't really need to call swsusp_resume
336
337	li	r3,0
338	blr
 
339
340/* FIXME:This construct is actually not useful since we don't shut
341 * down the instruction MMU, we could just flip back MSR-DR on.
342 */
343turn_on_mmu:
344	mflr	r4
345	mtsrr0	r4
346	mtsrr1	r3
347	sync
348	isync
349	rfi
 
 
350