Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Main entry point for the guest, exception handling.
  7 *
  8 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
  9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
 10 */
 11
 12#include <asm/asm.h>
 13#include <asm/asmmacro.h>
 14#include <asm/regdef.h>
 15#include <asm/mipsregs.h>
 16#include <asm/stackframe.h>
 17#include <asm/asm-offsets.h>
 18
 19
 20#define _C_LABEL(x)     x
 21#define MIPSX(name)     mips32_ ## name
 22#define CALLFRAME_SIZ   32
 23
 24/*
 25 * VECTOR
 26 *  exception vector entrypoint
 27 */
 28#define VECTOR(x, regmask)      \
 29    .ent    _C_LABEL(x),0;      \
 30    EXPORT(x);
 31
 32#define VECTOR_END(x)      \
 33    EXPORT(x);
 34
 35/* Overload, Danger Will Robinson!! */
 36#define PT_HOST_ASID        PT_BVADDR
 37#define PT_HOST_USERLOCAL   PT_EPC
 38
 39#define CP0_DDATA_LO        $28,3
 40#define CP0_EBASE           $15,1
 41
 42#define CP0_INTCTL          $12,1
 43#define CP0_SRSCTL          $12,2
 44#define CP0_SRSMAP          $12,3
 45#define CP0_HWRENA          $7,0
 46
 47/* Resume Flags */
 48#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
 49
 50#define RESUME_GUEST            0
 51#define RESUME_HOST             RESUME_FLAG_HOST
 52
 53/*
 54 * __kvm_mips_vcpu_run: entry point to the guest
 55 * a0: run
 56 * a1: vcpu
 57 */
 58	.set	noreorder
 59	.set	noat
 60
 61FEXPORT(__kvm_mips_vcpu_run)
 62	/* k0/k1 not being used in host kernel context */
 63	INT_ADDIU k1, sp, -PT_SIZE
 64	LONG_S	$0, PT_R0(k1)
 65	LONG_S	$1, PT_R1(k1)
 66	LONG_S	$2, PT_R2(k1)
 67	LONG_S	$3, PT_R3(k1)
 68
 69	LONG_S	$4, PT_R4(k1)
 70	LONG_S	$5, PT_R5(k1)
 71	LONG_S	$6, PT_R6(k1)
 72	LONG_S	$7, PT_R7(k1)
 73
 74	LONG_S	$8,  PT_R8(k1)
 75	LONG_S	$9,  PT_R9(k1)
 76	LONG_S	$10, PT_R10(k1)
 77	LONG_S	$11, PT_R11(k1)
 78	LONG_S	$12, PT_R12(k1)
 79	LONG_S	$13, PT_R13(k1)
 80	LONG_S	$14, PT_R14(k1)
 81	LONG_S	$15, PT_R15(k1)
 82	LONG_S	$16, PT_R16(k1)
 83	LONG_S	$17, PT_R17(k1)
 84
 85	LONG_S	$18, PT_R18(k1)
 86	LONG_S	$19, PT_R19(k1)
 87	LONG_S	$20, PT_R20(k1)
 88	LONG_S	$21, PT_R21(k1)
 89	LONG_S	$22, PT_R22(k1)
 90	LONG_S	$23, PT_R23(k1)
 91	LONG_S	$24, PT_R24(k1)
 92	LONG_S	$25, PT_R25(k1)
 93
 94	/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
 95
 96	LONG_S	$28, PT_R28(k1)
 97	LONG_S	$29, PT_R29(k1)
 98	LONG_S	$30, PT_R30(k1)
 99	LONG_S	$31, PT_R31(k1)
100
101	/* Save hi/lo */
102	mflo	v0
103	LONG_S	v0, PT_LO(k1)
104	mfhi	v1
105	LONG_S	v1, PT_HI(k1)
106
107	/* Save host status */
108	mfc0	v0, CP0_STATUS
109	LONG_S	v0, PT_STATUS(k1)
110
111	/* Save host ASID, shove it into the BVADDR location */
112	mfc0	v1, CP0_ENTRYHI
113	andi	v1, 0xff
114	LONG_S	v1, PT_HOST_ASID(k1)
115
116	/* Save DDATA_LO, will be used to store pointer to vcpu */
117	mfc0	v1, CP0_DDATA_LO
118	LONG_S	v1, PT_HOST_USERLOCAL(k1)
119
120	/* DDATA_LO has pointer to vcpu */
121	mtc0	a1, CP0_DDATA_LO
122
123	/* Offset into vcpu->arch */
124	INT_ADDIU k1, a1, VCPU_HOST_ARCH
125
126	/*
127	 * Save the host stack to VCPU, used for exception processing
128	 * when we exit from the Guest
129	 */
130	LONG_S	sp, VCPU_HOST_STACK(k1)
131
132	/* Save the kernel gp as well */
133	LONG_S	gp, VCPU_HOST_GP(k1)
134
135	/* Setup status register for running the guest in UM, interrupts are disabled */
136	li	k0, (ST0_EXL | KSU_USER | ST0_BEV)
137	mtc0	k0, CP0_STATUS
138	ehb
139
140	/* load up the new EBASE */
141	LONG_L	k0, VCPU_GUEST_EBASE(k1)
142	mtc0	k0, CP0_EBASE
143
144	/*
145	 * Now that the new EBASE has been loaded, unset BEV, set
146	 * interrupt mask as it was but make sure that timer interrupts
147	 * are enabled
148	 */
149	li	k0, (ST0_EXL | KSU_USER | ST0_IE)
150	andi	v0, v0, ST0_IM
151	or	k0, k0, v0
152	mtc0	k0, CP0_STATUS
153	ehb
154
155
156	/* Set Guest EPC */
157	LONG_L	t0, VCPU_PC(k1)
158	mtc0	t0, CP0_EPC
159
160FEXPORT(__kvm_mips_load_asid)
161	/* Set the ASID for the Guest Kernel */
162	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
163			        /* addresses shift to 0x80000000 */
164	bltz	t0, 1f		/* If kernel */
165	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
166	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
1671:
168	     /* t1: contains the base of the ASID array, need to get the cpu id  */
169	LONG_L	t2, TI_CPU($28)             /* smp_processor_id */
170	INT_SLL	t2, t2, 2                   /* x4 */
171	REG_ADDU t3, t1, t2
172	LONG_L	k0, (t3)
173	andi	k0, k0, 0xff
174	mtc0	k0, CP0_ENTRYHI
175	ehb
176
177	/* Disable RDHWR access */
178	mtc0	zero, CP0_HWRENA
179
180	/* Now load up the Guest Context from VCPU */
181	LONG_L	$1, VCPU_R1(k1)
182	LONG_L	$2, VCPU_R2(k1)
183	LONG_L	$3, VCPU_R3(k1)
184
185	LONG_L	$4, VCPU_R4(k1)
186	LONG_L	$5, VCPU_R5(k1)
187	LONG_L	$6, VCPU_R6(k1)
188	LONG_L	$7, VCPU_R7(k1)
189
190	LONG_L	$8, VCPU_R8(k1)
191	LONG_L	$9, VCPU_R9(k1)
192	LONG_L	$10, VCPU_R10(k1)
193	LONG_L	$11, VCPU_R11(k1)
194	LONG_L	$12, VCPU_R12(k1)
195	LONG_L	$13, VCPU_R13(k1)
196	LONG_L	$14, VCPU_R14(k1)
197	LONG_L	$15, VCPU_R15(k1)
198	LONG_L	$16, VCPU_R16(k1)
199	LONG_L	$17, VCPU_R17(k1)
200	LONG_L	$18, VCPU_R18(k1)
201	LONG_L	$19, VCPU_R19(k1)
202	LONG_L	$20, VCPU_R20(k1)
203	LONG_L	$21, VCPU_R21(k1)
204	LONG_L	$22, VCPU_R22(k1)
205	LONG_L	$23, VCPU_R23(k1)
206	LONG_L	$24, VCPU_R24(k1)
207	LONG_L	$25, VCPU_R25(k1)
208
209	/* k0/k1 loaded up later */
210
211	LONG_L	$28, VCPU_R28(k1)
212	LONG_L	$29, VCPU_R29(k1)
213	LONG_L	$30, VCPU_R30(k1)
214	LONG_L	$31, VCPU_R31(k1)
215
216	/* Restore hi/lo */
217	LONG_L	k0, VCPU_LO(k1)
218	mtlo	k0
219
220	LONG_L	k0, VCPU_HI(k1)
221	mthi	k0
222
223FEXPORT(__kvm_mips_load_k0k1)
224	/* Restore the guest's k0/k1 registers */
225	LONG_L	k0, VCPU_R26(k1)
226	LONG_L	k1, VCPU_R27(k1)
227
228	/* Jump to guest */
229	eret
230
231VECTOR(MIPSX(exception), unknown)
232/*
233 * Find out what mode we came from and jump to the proper handler.
234 */
235	mtc0	k0, CP0_ERROREPC	#01: Save guest k0
236	ehb				#02:
237
238	mfc0	k0, CP0_EBASE		#02: Get EBASE
239	INT_SRL	k0, k0, 10		#03: Get rid of CPUNum
240	INT_SLL	k0, k0, 10		#04
241	LONG_S	k1, 0x3000(k0)		#05: Save k1 @ offset 0x3000
242	INT_ADDIU k0, k0, 0x2000		#06: Exception handler is installed @ offset 0x2000
243	j	k0			#07: jump to the function
244	 nop				#08: branch delay slot
245VECTOR_END(MIPSX(exceptionEnd))
246.end MIPSX(exception)
247
248/*
249 * Generic Guest exception handler. We end up here when the guest
250 * does something that causes a trap to kernel mode.
251 *
252 */
253NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
254	/* Get the VCPU pointer from DDTATA_LO */
255	mfc0	k1, CP0_DDATA_LO
256	INT_ADDIU k1, k1, VCPU_HOST_ARCH
257
258	/* Start saving Guest context to VCPU */
259	LONG_S	$0, VCPU_R0(k1)
260	LONG_S	$1, VCPU_R1(k1)
261	LONG_S	$2, VCPU_R2(k1)
262	LONG_S	$3, VCPU_R3(k1)
263	LONG_S	$4, VCPU_R4(k1)
264	LONG_S	$5, VCPU_R5(k1)
265	LONG_S	$6, VCPU_R6(k1)
266	LONG_S	$7, VCPU_R7(k1)
267	LONG_S	$8, VCPU_R8(k1)
268	LONG_S	$9, VCPU_R9(k1)
269	LONG_S	$10, VCPU_R10(k1)
270	LONG_S	$11, VCPU_R11(k1)
271	LONG_S	$12, VCPU_R12(k1)
272	LONG_S	$13, VCPU_R13(k1)
273	LONG_S	$14, VCPU_R14(k1)
274	LONG_S	$15, VCPU_R15(k1)
275	LONG_S	$16, VCPU_R16(k1)
276	LONG_S	$17, VCPU_R17(k1)
277	LONG_S	$18, VCPU_R18(k1)
278	LONG_S	$19, VCPU_R19(k1)
279	LONG_S	$20, VCPU_R20(k1)
280	LONG_S	$21, VCPU_R21(k1)
281	LONG_S	$22, VCPU_R22(k1)
282	LONG_S	$23, VCPU_R23(k1)
283	LONG_S	$24, VCPU_R24(k1)
284	LONG_S	$25, VCPU_R25(k1)
285
286	/* Guest k0/k1 saved later */
287
288	LONG_S	$28, VCPU_R28(k1)
289	LONG_S	$29, VCPU_R29(k1)
290	LONG_S	$30, VCPU_R30(k1)
291	LONG_S	$31, VCPU_R31(k1)
292
293	/* We need to save hi/lo and restore them on
294	 * the way out
295	 */
296	mfhi	t0
297	LONG_S	t0, VCPU_HI(k1)
298
299	mflo	t0
300	LONG_S	t0, VCPU_LO(k1)
301
302	/* Finally save guest k0/k1 to VCPU */
303	mfc0	t0, CP0_ERROREPC
304	LONG_S	t0, VCPU_R26(k1)
305
306	/* Get GUEST k1 and save it in VCPU */
307	PTR_LI	t1, ~0x2ff
308	mfc0	t0, CP0_EBASE
309	and	t0, t0, t1
310	LONG_L	t0, 0x3000(t0)
311	LONG_S	t0, VCPU_R27(k1)
312
313	/* Now that context has been saved, we can use other registers */
314
315	/* Restore vcpu */
316	mfc0	a1, CP0_DDATA_LO
317	move	s1, a1
318
319	/* Restore run (vcpu->run) */
320	LONG_L	a0, VCPU_RUN(a1)
321	/* Save pointer to run in s0, will be saved by the compiler */
322	move	s0, a0
323
324	/* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
325	 * process the exception */
326	mfc0	k0,CP0_EPC
327	LONG_S	k0, VCPU_PC(k1)
328
329	mfc0	k0, CP0_BADVADDR
330	LONG_S	k0, VCPU_HOST_CP0_BADVADDR(k1)
331
332	mfc0	k0, CP0_CAUSE
333	LONG_S	k0, VCPU_HOST_CP0_CAUSE(k1)
334
335	mfc0	k0, CP0_ENTRYHI
336	LONG_S	k0, VCPU_HOST_ENTRYHI(k1)
337
338	/* Now restore the host state just enough to run the handlers */
339
340	/* Swtich EBASE to the one used by Linux */
341	/* load up the host EBASE */
342	mfc0	v0, CP0_STATUS
343
344	.set	at
345	or	k0, v0, ST0_BEV
346	.set	noat
347
348	mtc0	k0, CP0_STATUS
349	ehb
350
351	LONG_L	k0, VCPU_HOST_EBASE(k1)
352	mtc0	k0,CP0_EBASE
353
354
355	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
356	.set	at
357	and	v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
358	or	v0, v0, ST0_CU0
359	.set	noat
360	mtc0	v0, CP0_STATUS
361	ehb
362
363	/* Load up host GP */
364	LONG_L	gp, VCPU_HOST_GP(k1)
365
366	/* Need a stack before we can jump to "C" */
367	LONG_L	sp, VCPU_HOST_STACK(k1)
368
369	/* Saved host state */
370	INT_ADDIU sp, sp, -PT_SIZE
371
372	/* XXXKYMA do we need to load the host ASID, maybe not because the
373	 * kernel entries are marked GLOBAL, need to verify
374	 */
375
376	/* Restore host DDATA_LO */
377	LONG_L	k0, PT_HOST_USERLOCAL(sp)
378	mtc0	k0, CP0_DDATA_LO
379
380	/* Restore RDHWR access */
381	PTR_LI	k0, 0x2000000F
382	mtc0	k0, CP0_HWRENA
383
384	/* Jump to handler */
385FEXPORT(__kvm_mips_jump_to_handler)
386	/* XXXKYMA: not sure if this is safe, how large is the stack??
387	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
388	 * with this in the kernel */
389	PTR_LA	t9, kvm_mips_handle_exit
390	jalr.hb	t9
391	 INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
392
393	/* Return from handler Make sure interrupts are disabled */
394	di
395	ehb
396
397	/* XXXKYMA: k0/k1 could have been blown away if we processed
398	 * an exception while we were handling the exception from the
399	 * guest, reload k1
400	 */
401
402	move	k1, s1
403	INT_ADDIU k1, k1, VCPU_HOST_ARCH
404
405	/* Check return value, should tell us if we are returning to the
406	 * host (handle I/O etc)or resuming the guest
407	 */
408	andi	t0, v0, RESUME_HOST
409	bnez	t0, __kvm_mips_return_to_host
410	 nop
411
412__kvm_mips_return_to_guest:
413	/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
414	mtc0	s1, CP0_DDATA_LO
415
416	/* Load up the Guest EBASE to minimize the window where BEV is set */
417	LONG_L	t0, VCPU_GUEST_EBASE(k1)
418
419	/* Switch EBASE back to the one used by KVM */
420	mfc0	v1, CP0_STATUS
421	.set	at
422	or	k0, v1, ST0_BEV
423	.set	noat
424	mtc0	k0, CP0_STATUS
425	ehb
426	mtc0	t0, CP0_EBASE
427
428	/* Setup status register for running guest in UM */
429	.set	at
430	or	v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
431	and	v1, v1, ~ST0_CU0
432	.set	noat
433	mtc0	v1, CP0_STATUS
434	ehb
435
436	/* Set Guest EPC */
437	LONG_L	t0, VCPU_PC(k1)
438	mtc0	t0, CP0_EPC
439
440	/* Set the ASID for the Guest Kernel */
441	INT_SLL	t0, t0, 1	/* with kseg0 @ 0x40000000, kernel */
442				/* addresses shift to 0x80000000 */
443	bltz	t0, 1f		/* If kernel */
444	 INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
445	INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
4461:
447	/* t1: contains the base of the ASID array, need to get the cpu id  */
448	LONG_L	t2, TI_CPU($28)		/* smp_processor_id */
449	INT_SLL	t2, t2, 2		/* x4 */
450	REG_ADDU t3, t1, t2
451	LONG_L	k0, (t3)
452	andi	k0, k0, 0xff
453	mtc0	k0,CP0_ENTRYHI
454	ehb
455
456	/* Disable RDHWR access */
457	mtc0    zero,  CP0_HWRENA
458
459	/* load the guest context from VCPU and return */
460	LONG_L	$0, VCPU_R0(k1)
461	LONG_L	$1, VCPU_R1(k1)
462	LONG_L	$2, VCPU_R2(k1)
463	LONG_L	$3, VCPU_R3(k1)
464	LONG_L	$4, VCPU_R4(k1)
465	LONG_L	$5, VCPU_R5(k1)
466	LONG_L	$6, VCPU_R6(k1)
467	LONG_L	$7, VCPU_R7(k1)
468	LONG_L	$8, VCPU_R8(k1)
469	LONG_L	$9, VCPU_R9(k1)
470	LONG_L	$10, VCPU_R10(k1)
471	LONG_L	$11, VCPU_R11(k1)
472	LONG_L	$12, VCPU_R12(k1)
473	LONG_L	$13, VCPU_R13(k1)
474	LONG_L	$14, VCPU_R14(k1)
475	LONG_L	$15, VCPU_R15(k1)
476	LONG_L	$16, VCPU_R16(k1)
477	LONG_L	$17, VCPU_R17(k1)
478	LONG_L	$18, VCPU_R18(k1)
479	LONG_L	$19, VCPU_R19(k1)
480	LONG_L	$20, VCPU_R20(k1)
481	LONG_L	$21, VCPU_R21(k1)
482	LONG_L	$22, VCPU_R22(k1)
483	LONG_L	$23, VCPU_R23(k1)
484	LONG_L	$24, VCPU_R24(k1)
485	LONG_L	$25, VCPU_R25(k1)
486
487	/* $/k1 loaded later */
488	LONG_L	$28, VCPU_R28(k1)
489	LONG_L	$29, VCPU_R29(k1)
490	LONG_L	$30, VCPU_R30(k1)
491	LONG_L	$31, VCPU_R31(k1)
492
493FEXPORT(__kvm_mips_skip_guest_restore)
494	LONG_L	k0, VCPU_HI(k1)
495	mthi	k0
496
497	LONG_L	k0, VCPU_LO(k1)
498	mtlo	k0
499
500	LONG_L	k0, VCPU_R26(k1)
501	LONG_L	k1, VCPU_R27(k1)
502
503	eret
504
505__kvm_mips_return_to_host:
506	/* EBASE is already pointing to Linux */
507	LONG_L	k1, VCPU_HOST_STACK(k1)
508	INT_ADDIU k1,k1, -PT_SIZE
509
510	/* Restore host DDATA_LO */
511	LONG_L	k0, PT_HOST_USERLOCAL(k1)
512	mtc0	k0, CP0_DDATA_LO
513
514	/* Restore host ASID */
515	LONG_L	k0, PT_HOST_ASID(sp)
516	andi	k0, 0xff
517	mtc0	k0,CP0_ENTRYHI
518	ehb
519
520	/* Load context saved on the host stack */
521	LONG_L	$0, PT_R0(k1)
522	LONG_L	$1, PT_R1(k1)
523
524	/* r2/v0 is the return code, shift it down by 2 (arithmetic)
525	 * to recover the err code  */
526	INT_SRA	k0, v0, 2
527	move	$2, k0
528
529	LONG_L	$3, PT_R3(k1)
530	LONG_L	$4, PT_R4(k1)
531	LONG_L	$5, PT_R5(k1)
532	LONG_L	$6, PT_R6(k1)
533	LONG_L	$7, PT_R7(k1)
534	LONG_L	$8, PT_R8(k1)
535	LONG_L	$9, PT_R9(k1)
536	LONG_L	$10, PT_R10(k1)
537	LONG_L	$11, PT_R11(k1)
538	LONG_L	$12, PT_R12(k1)
539	LONG_L	$13, PT_R13(k1)
540	LONG_L	$14, PT_R14(k1)
541	LONG_L	$15, PT_R15(k1)
542	LONG_L	$16, PT_R16(k1)
543	LONG_L	$17, PT_R17(k1)
544	LONG_L	$18, PT_R18(k1)
545	LONG_L	$19, PT_R19(k1)
546	LONG_L	$20, PT_R20(k1)
547	LONG_L	$21, PT_R21(k1)
548	LONG_L	$22, PT_R22(k1)
549	LONG_L	$23, PT_R23(k1)
550	LONG_L	$24, PT_R24(k1)
551	LONG_L	$25, PT_R25(k1)
552
553	/* Host k0/k1 were not saved */
554
555	LONG_L	$28, PT_R28(k1)
556	LONG_L	$29, PT_R29(k1)
557	LONG_L	$30, PT_R30(k1)
558
559	LONG_L	k0, PT_HI(k1)
560	mthi	k0
561
562	LONG_L	k0, PT_LO(k1)
563	mtlo	k0
564
565	/* Restore RDHWR access */
566	PTR_LI	k0, 0x2000000F
567	mtc0	k0,  CP0_HWRENA
568
569
570	/* Restore RA, which is the address we will return to */
571	LONG_L  ra, PT_R31(k1)
572	j       ra
573	 nop
574
575VECTOR_END(MIPSX(GuestExceptionEnd))
576.end MIPSX(GuestException)
577
578MIPSX(exceptions):
579	####
580	##### The exception handlers.
581	#####
582	.word _C_LABEL(MIPSX(GuestException))	#  0
583	.word _C_LABEL(MIPSX(GuestException))	#  1
584	.word _C_LABEL(MIPSX(GuestException))	#  2
585	.word _C_LABEL(MIPSX(GuestException))	#  3
586	.word _C_LABEL(MIPSX(GuestException))	#  4
587	.word _C_LABEL(MIPSX(GuestException))	#  5
588	.word _C_LABEL(MIPSX(GuestException))	#  6
589	.word _C_LABEL(MIPSX(GuestException))	#  7
590	.word _C_LABEL(MIPSX(GuestException))	#  8
591	.word _C_LABEL(MIPSX(GuestException))	#  9
592	.word _C_LABEL(MIPSX(GuestException))	# 10
593	.word _C_LABEL(MIPSX(GuestException))	# 11
594	.word _C_LABEL(MIPSX(GuestException))	# 12
595	.word _C_LABEL(MIPSX(GuestException))	# 13
596	.word _C_LABEL(MIPSX(GuestException))	# 14
597	.word _C_LABEL(MIPSX(GuestException))	# 15
598	.word _C_LABEL(MIPSX(GuestException))	# 16
599	.word _C_LABEL(MIPSX(GuestException))	# 17
600	.word _C_LABEL(MIPSX(GuestException))	# 18
601	.word _C_LABEL(MIPSX(GuestException))	# 19
602	.word _C_LABEL(MIPSX(GuestException))	# 20
603	.word _C_LABEL(MIPSX(GuestException))	# 21
604	.word _C_LABEL(MIPSX(GuestException))	# 22
605	.word _C_LABEL(MIPSX(GuestException))	# 23
606	.word _C_LABEL(MIPSX(GuestException))	# 24
607	.word _C_LABEL(MIPSX(GuestException))	# 25
608	.word _C_LABEL(MIPSX(GuestException))	# 26
609	.word _C_LABEL(MIPSX(GuestException))	# 27
610	.word _C_LABEL(MIPSX(GuestException))	# 28
611	.word _C_LABEL(MIPSX(GuestException))	# 29
612	.word _C_LABEL(MIPSX(GuestException))	# 30
613	.word _C_LABEL(MIPSX(GuestException))	# 31
614
615
616/* This routine makes changes to the instruction stream effective to the hardware.
617 * It should be called after the instruction stream is written.
618 * On return, the new instructions are effective.
619 * Inputs:
620 * a0 = Start address of new instruction stream
621 * a1 = Size, in bytes, of new instruction stream
622 */
623
624#define HW_SYNCI_Step       $1
625LEAF(MIPSX(SyncICache))
626	.set	push
627	.set	mips32r2
628	beq	a1, zero, 20f
629	 nop
630	REG_ADDU a1, a0, a1
631	rdhwr	v0, HW_SYNCI_Step
632	beq	v0, zero, 20f
633	 nop
63410:
635	synci	0(a0)
636	REG_ADDU a0, a0, v0
637	sltu	v1, a0, a1
638	bne	v1, zero, 10b
639	 nop
640	sync
64120:
642	jr.hb	ra
643	 nop
644	.set	pop
645END(MIPSX(SyncICache))