Linux Audio

Check our new training course

Loading...
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Copyright (C) 2002, 2007  Maciej W. Rozycki
  9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/init.h>
 12
 13#include <asm/asm.h>
 14#include <asm/asmmacro.h>
 15#include <asm/cacheops.h>
 16#include <asm/irqflags.h>
 17#include <asm/regdef.h>
 18#include <asm/fpregdef.h>
 19#include <asm/mipsregs.h>
 20#include <asm/stackframe.h>
 21#include <asm/sync.h>
 22#include <asm/war.h>
 23#include <asm/thread_info.h>
 24
 25	__INIT
 26
 27/*
 28 * General exception vector for all other CPUs.
 29 *
 30 * Be careful when changing this, it has to be at most 128 bytes
 31 * to fit into space reserved for the exception handler.
 32 */
 33NESTED(except_vec3_generic, 0, sp)
 34	.set	push
 35	.set	noat
 36	mfc0	k1, CP0_CAUSE
 37	andi	k1, k1, 0x7c
 38#ifdef CONFIG_64BIT
 39	dsll	k1, k1, 1
 40#endif
 41	PTR_L	k0, exception_handlers(k1)
 42	jr	k0
 43	.set	pop
 44	END(except_vec3_generic)
 45
 46/*
 47 * General exception handler for CPUs with virtual coherency exception.
 48 *
 49 * Be careful when changing this, it has to be at most 256 (as a special
 50 * exception) bytes to fit into space reserved for the exception handler.
 51 */
 52NESTED(except_vec3_r4000, 0, sp)
 53	.set	push
 54	.set	arch=r4000
 55	.set	noat
 56	mfc0	k1, CP0_CAUSE
 57	li	k0, 31<<2
 58	andi	k1, k1, 0x7c
 59	.set	push
 60	.set	noreorder
 61	.set	nomacro
 62	beq	k1, k0, handle_vced
 63	 li	k0, 14<<2
 64	beq	k1, k0, handle_vcei
 65#ifdef CONFIG_64BIT
 66	 dsll	k1, k1, 1
 67#endif
 68	.set	pop
 69	PTR_L	k0, exception_handlers(k1)
 70	jr	k0
 71
 72	/*
 73	 * Big shit, we now may have two dirty primary cache lines for the same
 74	 * physical address.  We can safely invalidate the line pointed to by
 75	 * c0_badvaddr because after return from this exception handler the
 76	 * load / store will be re-executed.
 77	 */
 78handle_vced:
 79	MFC0	k0, CP0_BADVADDR
 80	li	k1, -4					# Is this ...
 81	and	k0, k1					# ... really needed?
 82	mtc0	zero, CP0_TAGLO
 83	cache	Index_Store_Tag_D, (k0)
 84	cache	Hit_Writeback_Inv_SD, (k0)
 85#ifdef CONFIG_PROC_FS
 86	PTR_LA	k0, vced_count
 87	lw	k1, (k0)
 88	addiu	k1, 1
 89	sw	k1, (k0)
 90#endif
 91	eret
 92
 93handle_vcei:
 94	MFC0	k0, CP0_BADVADDR
 95	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
 96#ifdef CONFIG_PROC_FS
 97	PTR_LA	k0, vcei_count
 98	lw	k1, (k0)
 99	addiu	k1, 1
100	sw	k1, (k0)
101#endif
102	eret
103	.set	pop
104	END(except_vec3_r4000)
105
106	__FINIT
107
108	.align	5	/* 32 byte rollback region */
109LEAF(__r4k_wait)
110	.set	push
111	.set	noreorder
112	/* start of rollback region */
113	LONG_L	t0, TI_FLAGS($28)
114	nop
115	andi	t0, _TIF_NEED_RESCHED
116	bnez	t0, 1f
117	 nop
118	nop
119	nop
120#ifdef CONFIG_CPU_MICROMIPS
121	nop
122	nop
123	nop
124	nop
125#endif
126	.set	MIPS_ISA_ARCH_LEVEL_RAW
127	wait
128	/* end of rollback region (the region size must be power of two) */
1291:
130	jr	ra
131	 nop
132	.set	pop
133	END(__r4k_wait)
134
135	.macro	BUILD_ROLLBACK_PROLOGUE handler
136	FEXPORT(rollback_\handler)
137	.set	push
138	.set	noat
139	MFC0	k0, CP0_EPC
140	PTR_LA	k1, __r4k_wait
141	ori	k0, 0x1f	/* 32 byte rollback region */
142	xori	k0, 0x1f
143	bne	k0, k1, \handler
144	MTC0	k0, CP0_EPC
145	.set pop
146	.endm
147
148	.align	5
149BUILD_ROLLBACK_PROLOGUE handle_int
150NESTED(handle_int, PT_SIZE, sp)
151	.cfi_signal_frame
152#ifdef CONFIG_TRACE_IRQFLAGS
153	/*
154	 * Check to see if the interrupted code has just disabled
155	 * interrupts and ignore this interrupt for now if so.
156	 *
157	 * local_irq_disable() disables interrupts and then calls
158	 * trace_hardirqs_off() to track the state. If an interrupt is taken
159	 * after interrupts are disabled but before the state is updated
160	 * it will appear to restore_all that it is incorrectly returning with
161	 * interrupts disabled
162	 */
163	.set	push
164	.set	noat
165	mfc0	k0, CP0_STATUS
166#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
167	and	k0, ST0_IEP
168	bnez	k0, 1f
169
170	mfc0	k0, CP0_EPC
171	.set	noreorder
172	j	k0
173	 rfe
174#else
175	and	k0, ST0_IE
176	bnez	k0, 1f
177
178	eret
179#endif
1801:
181	.set pop
182#endif
183	SAVE_ALL docfi=1
184	CLI
185	TRACE_IRQS_OFF
186
187	LONG_L	s0, TI_REGS($28)
188	LONG_S	sp, TI_REGS($28)
189
190	/*
191	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
192	 * Check if we are already using the IRQ stack.
193	 */
194	move	s1, sp # Preserve the sp
195
196	/* Get IRQ stack for this CPU */
197	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
198#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
199	lui	k1, %hi(irq_stack)
200#else
201	lui	k1, %highest(irq_stack)
202	daddiu	k1, %higher(irq_stack)
203	dsll	k1, 16
204	daddiu	k1, %hi(irq_stack)
205	dsll	k1, 16
206#endif
207	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
208	LONG_ADDU	k1, k0
209	LONG_L	t0, %lo(irq_stack)(k1)
210
211	# Check if already on IRQ stack
212	PTR_LI	t1, ~(_THREAD_SIZE-1)
213	and	t1, t1, sp
214	beq	t0, t1, 2f
215
216	/* Switch to IRQ stack */
217	li	t1, _IRQ_STACK_START
218	PTR_ADD sp, t0, t1
219
220	/* Save task's sp on IRQ stack so that unwinding can follow it */
221	LONG_S	s1, 0(sp)
2222:
223	jal	plat_irq_dispatch
224
225	/* Restore sp */
226	move	sp, s1
227
228	j	ret_from_irq
229#ifdef CONFIG_CPU_MICROMIPS
230	nop
231#endif
232	END(handle_int)
233
234	__INIT
235
236/*
237 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
238 * This is a dedicated interrupt exception vector which reduces the
239 * interrupt processing overhead.  The jump instruction will be replaced
240 * at the initialization time.
241 *
242 * Be careful when changing this, it has to be at most 128 bytes
243 * to fit into space reserved for the exception handler.
244 */
245NESTED(except_vec4, 0, sp)
2461:	j	1b			/* Dummy, will be replaced */
247	END(except_vec4)
248
249/*
250 * EJTAG debug exception handler.
251 * The EJTAG debug exception entry point is 0xbfc00480, which
252 * normally is in the boot PROM, so the boot PROM must do an
253 * unconditional jump to this vector.
254 */
255NESTED(except_vec_ejtag_debug, 0, sp)
256	j	ejtag_debug_handler
257#ifdef CONFIG_CPU_MICROMIPS
258	 nop
259#endif
260	END(except_vec_ejtag_debug)
261
262	__FINIT
263
264/*
265 * Vectored interrupt handler.
266 * This prototype is copied to ebase + n*IntCtl.VS and patched
267 * to invoke the handler
268 */
269BUILD_ROLLBACK_PROLOGUE except_vec_vi
270NESTED(except_vec_vi, 0, sp)
271	SAVE_SOME docfi=1
272	SAVE_AT docfi=1
273	.set	push
274	.set	noreorder
275	PTR_LA	v1, except_vec_vi_handler
276FEXPORT(except_vec_vi_lui)
277	lui	v0, 0		/* Patched */
278	jr	v1
279FEXPORT(except_vec_vi_ori)
280	 ori	v0, 0		/* Patched */
281	.set	pop
282	END(except_vec_vi)
283EXPORT(except_vec_vi_end)
284
285/*
286 * Common Vectored Interrupt code
287 * Complete the register saves and invoke the handler which is passed in $v0
288 */
289NESTED(except_vec_vi_handler, 0, sp)
290	SAVE_TEMP
291	SAVE_STATIC
292	CLI
293#ifdef CONFIG_TRACE_IRQFLAGS
294	move	s0, v0
295	TRACE_IRQS_OFF
296	move	v0, s0
297#endif
298
299	LONG_L	s0, TI_REGS($28)
300	LONG_S	sp, TI_REGS($28)
301
302	/*
303	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
304	 * Check if we are already using the IRQ stack.
305	 */
306	move	s1, sp # Preserve the sp
307
308	/* Get IRQ stack for this CPU */
309	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
310#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
311	lui	k1, %hi(irq_stack)
312#else
313	lui	k1, %highest(irq_stack)
314	daddiu	k1, %higher(irq_stack)
315	dsll	k1, 16
316	daddiu	k1, %hi(irq_stack)
317	dsll	k1, 16
318#endif
319	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
320	LONG_ADDU	k1, k0
321	LONG_L	t0, %lo(irq_stack)(k1)
322
323	# Check if already on IRQ stack
324	PTR_LI	t1, ~(_THREAD_SIZE-1)
325	and	t1, t1, sp
326	beq	t0, t1, 2f
327
328	/* Switch to IRQ stack */
329	li	t1, _IRQ_STACK_START
330	PTR_ADD sp, t0, t1
331
332	/* Save task's sp on IRQ stack so that unwinding can follow it */
333	LONG_S	s1, 0(sp)
3342:
335	jalr	v0
336
337	/* Restore sp */
338	move	sp, s1
339
340	j	ret_from_irq
341	END(except_vec_vi_handler)
342
343/*
344 * EJTAG debug exception handler.
345 */
346NESTED(ejtag_debug_handler, PT_SIZE, sp)
347	.set	push
348	.set	noat
349	MTC0	k0, CP0_DESAVE
350	mfc0	k0, CP0_DEBUG
351
352	andi	k0, k0, MIPS_DEBUG_DBP	# Check for SDBBP.
353	beqz	k0, ejtag_return
354
355#ifdef CONFIG_SMP
3561:	PTR_LA	k0, ejtag_debug_buffer_spinlock
357	__SYNC(full, loongson3_war)
3582:	ll	k0, 0(k0)
359	bnez	k0, 2b
360	PTR_LA	k0, ejtag_debug_buffer_spinlock
361	sc	k0, 0(k0)
362	beqz	k0, 1b
363# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
364	sync
365# endif
366
367	PTR_LA	k0, ejtag_debug_buffer
368	LONG_S	k1, 0(k0)
369
370	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
371	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
372	PTR_SLL	k1, LONGLOG
373	PTR_LA	k0, ejtag_debug_buffer_per_cpu
374	PTR_ADDU k0, k1
375
376	PTR_LA	k1, ejtag_debug_buffer
377	LONG_L	k1, 0(k1)
378	LONG_S	k1, 0(k0)
379
380	PTR_LA	k0, ejtag_debug_buffer_spinlock
381	sw	zero, 0(k0)
382#else
383	PTR_LA	k0, ejtag_debug_buffer
384	LONG_S	k1, 0(k0)
385#endif
386
387	SAVE_ALL
388	move	a0, sp
389	jal	ejtag_exception_handler
390	RESTORE_ALL
391
392#ifdef CONFIG_SMP
393	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
394	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
395	PTR_SLL	k1, LONGLOG
396	PTR_LA	k0, ejtag_debug_buffer_per_cpu
397	PTR_ADDU k0, k1
398	LONG_L	k1, 0(k0)
399#else
400	PTR_LA	k0, ejtag_debug_buffer
401	LONG_L	k1, 0(k0)
402#endif
403
404ejtag_return:
405	back_to_back_c0_hazard
406	MFC0	k0, CP0_DESAVE
407	.set	mips32
408	deret
409	.set	pop
410	END(ejtag_debug_handler)
411
412/*
413 * This buffer is reserved for the use of the EJTAG debug
414 * handler.
415 */
416	.data
417EXPORT(ejtag_debug_buffer)
418	.fill	LONGSIZE
419#ifdef CONFIG_SMP
420EXPORT(ejtag_debug_buffer_spinlock)
421	.fill	LONGSIZE
422EXPORT(ejtag_debug_buffer_per_cpu)
423	.fill	LONGSIZE * NR_CPUS
424#endif
425	.previous
426
427	__INIT
428
429/*
430 * NMI debug exception handler for MIPS reference boards.
431 * The NMI debug exception entry point is 0xbfc00000, which
432 * normally is in the boot PROM, so the boot PROM must do a
433 * unconditional jump to this vector.
434 */
435NESTED(except_vec_nmi, 0, sp)
436	j	nmi_handler
437#ifdef CONFIG_CPU_MICROMIPS
438	 nop
439#endif
440	END(except_vec_nmi)
441
442	__FINIT
443
444NESTED(nmi_handler, PT_SIZE, sp)
445	.cfi_signal_frame
446	.set	push
447	.set	noat
448	/*
449	 * Clear ERL - restore segment mapping
450	 * Clear BEV - required for page fault exception handler to work
451	 */
452	mfc0	k0, CP0_STATUS
453	ori	k0, k0, ST0_EXL
454	li	k1, ~(ST0_BEV | ST0_ERL)
455	and	k0, k0, k1
456	mtc0	k0, CP0_STATUS
457	_ehb
458	SAVE_ALL
459	move	a0, sp
460	jal	nmi_exception_handler
461	/* nmi_exception_handler never returns */
462	.set	pop
463	END(nmi_handler)
464
465	.macro	__build_clear_none
466	.endm
467
468	.macro	__build_clear_sti
469	TRACE_IRQS_ON
470	STI
471	.endm
472
473	.macro	__build_clear_cli
474	CLI
475	TRACE_IRQS_OFF
476	.endm
477
478	.macro	__build_clear_fpe
479	CLI
480	TRACE_IRQS_OFF
481	.set	push
482	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
483	.set	mips1
484	SET_HARDFLOAT
485	cfc1	a1, fcr31
486	.set	pop
 
 
487	.endm
488
489	.macro	__build_clear_msa_fpe
 
490	CLI
491	TRACE_IRQS_OFF
492	_cfcmsa	a1, MSA_CSR
493	.endm
494
495	.macro	__build_clear_ade
496	MFC0	t0, CP0_BADVADDR
497	PTR_S	t0, PT_BVADDR(sp)
498	KMODE
499	.endm
500
501	.macro __build_clear_gsexc
502	.set	push
503	/*
504	 * We need to specify a selector to access the CP0.Diag1 (GSCause)
505	 * register. All GSExc-equipped processors have MIPS32.
506	 */
507	.set	mips32
508	mfc0	a1, CP0_DIAGNOSTIC1
509	.set	pop
510	TRACE_IRQS_ON
511	STI
512	.endm
513
514	.macro	__BUILD_silent exception
515	.endm
516
517	/* Gas tries to parse the ASM_PRINT argument as a string containing
518	   string escapes and emits bogus warnings if it believes to
519	   recognize an unknown escape code.  So make the arguments
520	   start with an n and gas will believe \n is ok ...  */
521	.macro	__BUILD_verbose nexception
522	LONG_L	a1, PT_EPC(sp)
523#ifdef CONFIG_32BIT
524	ASM_PRINT("Got \nexception at %08lx\012")
525#endif
526#ifdef CONFIG_64BIT
527	ASM_PRINT("Got \nexception at %016lx\012")
528#endif
529	.endm
530
531	.macro	__BUILD_count exception
532	LONG_L	t0,exception_count_\exception
533	LONG_ADDIU	t0, 1
534	LONG_S	t0,exception_count_\exception
535	.comm	exception_count\exception, 8, 8
536	.endm
537
538	.macro	__BUILD_HANDLER exception handler clear verbose ext
539	.align	5
540	NESTED(handle_\exception, PT_SIZE, sp)
541	.cfi_signal_frame
542	.set	noat
543	SAVE_ALL
544	FEXPORT(handle_\exception\ext)
545	__build_clear_\clear
546	.set	at
547	__BUILD_\verbose \exception
548	move	a0, sp
549	jal	do_\handler
550	j	ret_from_exception
551	END(handle_\exception)
552	.endm
553
554	.macro	BUILD_HANDLER exception handler clear verbose
555	__BUILD_HANDLER \exception \handler \clear \verbose _int
556	.endm
557
558	BUILD_HANDLER adel ade ade silent		/* #4  */
559	BUILD_HANDLER ades ade ade silent		/* #5  */
560	BUILD_HANDLER ibe be cli silent			/* #6  */
561	BUILD_HANDLER dbe be cli silent			/* #7  */
562	BUILD_HANDLER bp bp sti silent			/* #9  */
563	BUILD_HANDLER ri ri sti silent			/* #10 */
564	BUILD_HANDLER cpu cpu sti silent		/* #11 */
565	BUILD_HANDLER ov ov sti silent			/* #12 */
566	BUILD_HANDLER tr tr sti silent			/* #13 */
567	BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent	/* #14 */
568#ifdef CONFIG_MIPS_FP_SUPPORT
569	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
570#endif
571	BUILD_HANDLER ftlb ftlb none silent		/* #16 */
572	BUILD_HANDLER gsexc gsexc gsexc silent		/* #16 */
573	BUILD_HANDLER msa msa sti silent		/* #21 */
574	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
575#ifdef	CONFIG_HARDWARE_WATCHPOINTS
576	/*
577	 * For watch, interrupts will be enabled after the watch
578	 * registers are read.
579	 */
580	BUILD_HANDLER watch watch cli silent		/* #23 */
581#else
582	BUILD_HANDLER watch watch sti verbose		/* #23 */
583#endif
584	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
585	BUILD_HANDLER mt mt sti silent			/* #25 */
586	BUILD_HANDLER dsp dsp sti silent		/* #26 */
587	BUILD_HANDLER reserved reserved sti verbose	/* others */
588
589	.align	5
590	LEAF(handle_ri_rdhwr_tlbp)
591	.set	push
592	.set	noat
593	.set	noreorder
594	/* check if TLB contains a entry for EPC */
595	MFC0	k1, CP0_ENTRYHI
596	andi	k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
597	MFC0	k0, CP0_EPC
598	PTR_SRL	k0, _PAGE_SHIFT + 1
599	PTR_SLL	k0, _PAGE_SHIFT + 1
600	or	k1, k0
601	MTC0	k1, CP0_ENTRYHI
602	mtc0_tlbw_hazard
603	tlbp
604	tlb_probe_hazard
605	mfc0	k1, CP0_INDEX
606	.set	pop
607	bltz	k1, handle_ri	/* slow path */
608	/* fall thru */
609	END(handle_ri_rdhwr_tlbp)
610
611	LEAF(handle_ri_rdhwr)
612	.set	push
613	.set	noat
614	.set	noreorder
615	/* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
616	/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
617	MFC0	k1, CP0_EPC
618#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
619	and	k0, k1, 1
620	beqz	k0, 1f
621	 xor	k1, k0
622	lhu	k0, (k1)
623	lhu	k1, 2(k1)
624	ins	k1, k0, 16, 16
625	lui	k0, 0x007d
626	b	docheck
627	 ori	k0, 0x6b3c
6281:
629	lui	k0, 0x7c03
630	lw	k1, (k1)
631	ori	k0, 0xe83b
632#else
633	andi	k0, k1, 1
634	bnez	k0, handle_ri
635	 lui	k0, 0x7c03
636	lw	k1, (k1)
637	ori	k0, 0xe83b
638#endif
639	.set	reorder
640docheck:
641	bne	k0, k1, handle_ri	/* if not ours */
642
643isrdhwr:
644	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
645	get_saved_sp	/* k1 := current_thread_info */
646	.set	noreorder
647	MFC0	k0, CP0_EPC
648#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
649	ori	k1, _THREAD_MASK
650	xori	k1, _THREAD_MASK
651	LONG_L	v1, TI_TP_VALUE(k1)
652	LONG_ADDIU	k0, 4
653	jr	k0
654	 rfe
655#else
656#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
657	LONG_ADDIU	k0, 4		/* stall on $k0 */
658#else
659	.set	at=v1
660	LONG_ADDIU	k0, 4
661	.set	noat
662#endif
663	MTC0	k0, CP0_EPC
664	/* I hope three instructions between MTC0 and ERET are enough... */
665	ori	k1, _THREAD_MASK
666	xori	k1, _THREAD_MASK
667	LONG_L	v1, TI_TP_VALUE(k1)
668	.set	push
669	.set	arch=r4000
670	eret
671	.set	pop
672#endif
673	.set	pop
674	END(handle_ri_rdhwr)
675
676#ifdef CONFIG_CPU_R4X00_BUGS64
677/* A temporary overflow handler used by check_daddi(). */
678
679	__INIT
680
681	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
682#endif
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8 * Copyright (C) 2002, 2007  Maciej W. Rozycki
  9 * Copyright (C) 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
 10 */
 11#include <linux/init.h>
 12
 13#include <asm/asm.h>
 14#include <asm/asmmacro.h>
 15#include <asm/cacheops.h>
 16#include <asm/irqflags.h>
 17#include <asm/regdef.h>
 18#include <asm/fpregdef.h>
 19#include <asm/mipsregs.h>
 20#include <asm/stackframe.h>
 
 21#include <asm/war.h>
 22#include <asm/thread_info.h>
 23
 24	__INIT
 25
 26/*
 27 * General exception vector for all other CPUs.
 28 *
 29 * Be careful when changing this, it has to be at most 128 bytes
 30 * to fit into space reserved for the exception handler.
 31 */
 32NESTED(except_vec3_generic, 0, sp)
 33	.set	push
 34	.set	noat
 35	mfc0	k1, CP0_CAUSE
 36	andi	k1, k1, 0x7c
 37#ifdef CONFIG_64BIT
 38	dsll	k1, k1, 1
 39#endif
 40	PTR_L	k0, exception_handlers(k1)
 41	jr	k0
 42	.set	pop
 43	END(except_vec3_generic)
 44
 45/*
 46 * General exception handler for CPUs with virtual coherency exception.
 47 *
 48 * Be careful when changing this, it has to be at most 256 (as a special
 49 * exception) bytes to fit into space reserved for the exception handler.
 50 */
 51NESTED(except_vec3_r4000, 0, sp)
 52	.set	push
 53	.set	arch=r4000
 54	.set	noat
 55	mfc0	k1, CP0_CAUSE
 56	li	k0, 31<<2
 57	andi	k1, k1, 0x7c
 58	.set	push
 59	.set	noreorder
 60	.set	nomacro
 61	beq	k1, k0, handle_vced
 62	 li	k0, 14<<2
 63	beq	k1, k0, handle_vcei
 64#ifdef CONFIG_64BIT
 65	 dsll	k1, k1, 1
 66#endif
 67	.set	pop
 68	PTR_L	k0, exception_handlers(k1)
 69	jr	k0
 70
 71	/*
 72	 * Big shit, we now may have two dirty primary cache lines for the same
 73	 * physical address.  We can safely invalidate the line pointed to by
 74	 * c0_badvaddr because after return from this exception handler the
 75	 * load / store will be re-executed.
 76	 */
 77handle_vced:
 78	MFC0	k0, CP0_BADVADDR
 79	li	k1, -4					# Is this ...
 80	and	k0, k1					# ... really needed?
 81	mtc0	zero, CP0_TAGLO
 82	cache	Index_Store_Tag_D, (k0)
 83	cache	Hit_Writeback_Inv_SD, (k0)
 84#ifdef CONFIG_PROC_FS
 85	PTR_LA	k0, vced_count
 86	lw	k1, (k0)
 87	addiu	k1, 1
 88	sw	k1, (k0)
 89#endif
 90	eret
 91
 92handle_vcei:
 93	MFC0	k0, CP0_BADVADDR
 94	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
 95#ifdef CONFIG_PROC_FS
 96	PTR_LA	k0, vcei_count
 97	lw	k1, (k0)
 98	addiu	k1, 1
 99	sw	k1, (k0)
100#endif
101	eret
102	.set	pop
103	END(except_vec3_r4000)
104
105	__FINIT
106
107	.align	5	/* 32 byte rollback region */
108LEAF(__r4k_wait)
109	.set	push
110	.set	noreorder
111	/* start of rollback region */
112	LONG_L	t0, TI_FLAGS($28)
113	nop
114	andi	t0, _TIF_NEED_RESCHED
115	bnez	t0, 1f
116	 nop
117	nop
118	nop
119#ifdef CONFIG_CPU_MICROMIPS
120	nop
121	nop
122	nop
123	nop
124#endif
125	.set	MIPS_ISA_ARCH_LEVEL_RAW
126	wait
127	/* end of rollback region (the region size must be power of two) */
1281:
129	jr	ra
130	 nop
131	.set	pop
132	END(__r4k_wait)
133
134	.macro	BUILD_ROLLBACK_PROLOGUE handler
135	FEXPORT(rollback_\handler)
136	.set	push
137	.set	noat
138	MFC0	k0, CP0_EPC
139	PTR_LA	k1, __r4k_wait
140	ori	k0, 0x1f	/* 32 byte rollback region */
141	xori	k0, 0x1f
142	bne	k0, k1, \handler
143	MTC0	k0, CP0_EPC
144	.set pop
145	.endm
146
147	.align	5
148BUILD_ROLLBACK_PROLOGUE handle_int
149NESTED(handle_int, PT_SIZE, sp)
150	.cfi_signal_frame
151#ifdef CONFIG_TRACE_IRQFLAGS
152	/*
153	 * Check to see if the interrupted code has just disabled
154	 * interrupts and ignore this interrupt for now if so.
155	 *
156	 * local_irq_disable() disables interrupts and then calls
157	 * trace_hardirqs_off() to track the state. If an interrupt is taken
158	 * after interrupts are disabled but before the state is updated
159	 * it will appear to restore_all that it is incorrectly returning with
160	 * interrupts disabled
161	 */
162	.set	push
163	.set	noat
164	mfc0	k0, CP0_STATUS
165#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
166	and	k0, ST0_IEP
167	bnez	k0, 1f
168
169	mfc0	k0, CP0_EPC
170	.set	noreorder
171	j	k0
172	 rfe
173#else
174	and	k0, ST0_IE
175	bnez	k0, 1f
176
177	eret
178#endif
1791:
180	.set pop
181#endif
182	SAVE_ALL docfi=1
183	CLI
184	TRACE_IRQS_OFF
185
186	LONG_L	s0, TI_REGS($28)
187	LONG_S	sp, TI_REGS($28)
188
189	/*
190	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
191	 * Check if we are already using the IRQ stack.
192	 */
193	move	s1, sp # Preserve the sp
194
195	/* Get IRQ stack for this CPU */
196	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
197#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
198	lui	k1, %hi(irq_stack)
199#else
200	lui	k1, %highest(irq_stack)
201	daddiu	k1, %higher(irq_stack)
202	dsll	k1, 16
203	daddiu	k1, %hi(irq_stack)
204	dsll	k1, 16
205#endif
206	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
207	LONG_ADDU	k1, k0
208	LONG_L	t0, %lo(irq_stack)(k1)
209
210	# Check if already on IRQ stack
211	PTR_LI	t1, ~(_THREAD_SIZE-1)
212	and	t1, t1, sp
213	beq	t0, t1, 2f
214
215	/* Switch to IRQ stack */
216	li	t1, _IRQ_STACK_START
217	PTR_ADD sp, t0, t1
218
219	/* Save task's sp on IRQ stack so that unwinding can follow it */
220	LONG_S	s1, 0(sp)
2212:
222	jal	plat_irq_dispatch
223
224	/* Restore sp */
225	move	sp, s1
226
227	j	ret_from_irq
228#ifdef CONFIG_CPU_MICROMIPS
229	nop
230#endif
231	END(handle_int)
232
233	__INIT
234
235/*
236 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
237 * This is a dedicated interrupt exception vector which reduces the
238 * interrupt processing overhead.  The jump instruction will be replaced
239 * at the initialization time.
240 *
241 * Be careful when changing this, it has to be at most 128 bytes
242 * to fit into space reserved for the exception handler.
243 */
244NESTED(except_vec4, 0, sp)
2451:	j	1b			/* Dummy, will be replaced */
246	END(except_vec4)
247
248/*
249 * EJTAG debug exception handler.
250 * The EJTAG debug exception entry point is 0xbfc00480, which
251 * normally is in the boot PROM, so the boot PROM must do an
252 * unconditional jump to this vector.
253 */
254NESTED(except_vec_ejtag_debug, 0, sp)
255	j	ejtag_debug_handler
256#ifdef CONFIG_CPU_MICROMIPS
257	 nop
258#endif
259	END(except_vec_ejtag_debug)
260
261	__FINIT
262
263/*
264 * Vectored interrupt handler.
265 * This prototype is copied to ebase + n*IntCtl.VS and patched
266 * to invoke the handler
267 */
268BUILD_ROLLBACK_PROLOGUE except_vec_vi
269NESTED(except_vec_vi, 0, sp)
270	SAVE_SOME docfi=1
271	SAVE_AT docfi=1
272	.set	push
273	.set	noreorder
274	PTR_LA	v1, except_vec_vi_handler
275FEXPORT(except_vec_vi_lui)
276	lui	v0, 0		/* Patched */
277	jr	v1
278FEXPORT(except_vec_vi_ori)
279	 ori	v0, 0		/* Patched */
280	.set	pop
281	END(except_vec_vi)
282EXPORT(except_vec_vi_end)
283
284/*
285 * Common Vectored Interrupt code
286 * Complete the register saves and invoke the handler which is passed in $v0
287 */
288NESTED(except_vec_vi_handler, 0, sp)
289	SAVE_TEMP
290	SAVE_STATIC
291	CLI
292#ifdef CONFIG_TRACE_IRQFLAGS
293	move	s0, v0
294	TRACE_IRQS_OFF
295	move	v0, s0
296#endif
297
298	LONG_L	s0, TI_REGS($28)
299	LONG_S	sp, TI_REGS($28)
300
301	/*
302	 * SAVE_ALL ensures we are using a valid kernel stack for the thread.
303	 * Check if we are already using the IRQ stack.
304	 */
305	move	s1, sp # Preserve the sp
306
307	/* Get IRQ stack for this CPU */
308	ASM_CPUID_MFC0	k0, ASM_SMP_CPUID_REG
309#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
310	lui	k1, %hi(irq_stack)
311#else
312	lui	k1, %highest(irq_stack)
313	daddiu	k1, %higher(irq_stack)
314	dsll	k1, 16
315	daddiu	k1, %hi(irq_stack)
316	dsll	k1, 16
317#endif
318	LONG_SRL	k0, SMP_CPUID_PTRSHIFT
319	LONG_ADDU	k1, k0
320	LONG_L	t0, %lo(irq_stack)(k1)
321
322	# Check if already on IRQ stack
323	PTR_LI	t1, ~(_THREAD_SIZE-1)
324	and	t1, t1, sp
325	beq	t0, t1, 2f
326
327	/* Switch to IRQ stack */
328	li	t1, _IRQ_STACK_START
329	PTR_ADD sp, t0, t1
330
331	/* Save task's sp on IRQ stack so that unwinding can follow it */
332	LONG_S	s1, 0(sp)
3332:
334	jalr	v0
335
336	/* Restore sp */
337	move	sp, s1
338
339	j	ret_from_irq
340	END(except_vec_vi_handler)
341
342/*
343 * EJTAG debug exception handler.
344 */
345NESTED(ejtag_debug_handler, PT_SIZE, sp)
346	.set	push
347	.set	noat
348	MTC0	k0, CP0_DESAVE
349	mfc0	k0, CP0_DEBUG
350
351	sll	k0, k0, 30	# Check for SDBBP.
352	bgez	k0, ejtag_return
353
354#ifdef CONFIG_SMP
3551:	PTR_LA	k0, ejtag_debug_buffer_spinlock
356	ll	k0, 0(k0)
357	bnez	k0, 1b
 
358	PTR_LA	k0, ejtag_debug_buffer_spinlock
359	sc	k0, 0(k0)
360	beqz	k0, 1b
361# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
362	sync
363# endif
364
365	PTR_LA	k0, ejtag_debug_buffer
366	LONG_S	k1, 0(k0)
367
368	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
369	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
370	PTR_SLL	k1, LONGLOG
371	PTR_LA	k0, ejtag_debug_buffer_per_cpu
372	PTR_ADDU k0, k1
373
374	PTR_LA	k1, ejtag_debug_buffer
375	LONG_L	k1, 0(k1)
376	LONG_S	k1, 0(k0)
377
378	PTR_LA	k0, ejtag_debug_buffer_spinlock
379	sw	zero, 0(k0)
380#else
381	PTR_LA	k0, ejtag_debug_buffer
382	LONG_S	k1, 0(k0)
383#endif
384
385	SAVE_ALL
386	move	a0, sp
387	jal	ejtag_exception_handler
388	RESTORE_ALL
389
390#ifdef CONFIG_SMP
391	ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
392	PTR_SRL	k1, SMP_CPUID_PTRSHIFT
393	PTR_SLL	k1, LONGLOG
394	PTR_LA	k0, ejtag_debug_buffer_per_cpu
395	PTR_ADDU k0, k1
396	LONG_L	k1, 0(k0)
397#else
398	PTR_LA	k0, ejtag_debug_buffer
399	LONG_L	k1, 0(k0)
400#endif
401
402ejtag_return:
403	back_to_back_c0_hazard
404	MFC0	k0, CP0_DESAVE
405	.set	mips32
406	deret
407	.set	pop
408	END(ejtag_debug_handler)
409
410/*
411 * This buffer is reserved for the use of the EJTAG debug
412 * handler.
413 */
414	.data
415EXPORT(ejtag_debug_buffer)
416	.fill	LONGSIZE
417#ifdef CONFIG_SMP
418EXPORT(ejtag_debug_buffer_spinlock)
419	.fill	LONGSIZE
420EXPORT(ejtag_debug_buffer_per_cpu)
421	.fill	LONGSIZE * NR_CPUS
422#endif
423	.previous
424
425	__INIT
426
427/*
428 * NMI debug exception handler for MIPS reference boards.
429 * The NMI debug exception entry point is 0xbfc00000, which
430 * normally is in the boot PROM, so the boot PROM must do a
431 * unconditional jump to this vector.
432 */
433NESTED(except_vec_nmi, 0, sp)
434	j	nmi_handler
435#ifdef CONFIG_CPU_MICROMIPS
436	 nop
437#endif
438	END(except_vec_nmi)
439
440	__FINIT
441
442NESTED(nmi_handler, PT_SIZE, sp)
443	.cfi_signal_frame
444	.set	push
445	.set	noat
446	/*
447	 * Clear ERL - restore segment mapping
448	 * Clear BEV - required for page fault exception handler to work
449	 */
450	mfc0	k0, CP0_STATUS
451	ori	k0, k0, ST0_EXL
452	li	k1, ~(ST0_BEV | ST0_ERL)
453	and	k0, k0, k1
454	mtc0	k0, CP0_STATUS
455	_ehb
456	SAVE_ALL
457	move	a0, sp
458	jal	nmi_exception_handler
459	/* nmi_exception_handler never returns */
460	.set	pop
461	END(nmi_handler)
462
463	.macro	__build_clear_none
464	.endm
465
466	.macro	__build_clear_sti
467	TRACE_IRQS_ON
468	STI
469	.endm
470
471	.macro	__build_clear_cli
472	CLI
473	TRACE_IRQS_OFF
474	.endm
475
476	.macro	__build_clear_fpe
 
 
477	.set	push
478	/* gas fails to assemble cfc1 for some archs (octeon).*/ \
479	.set	mips1
480	SET_HARDFLOAT
481	cfc1	a1, fcr31
482	.set	pop
483	CLI
484	TRACE_IRQS_OFF
485	.endm
486
487	.macro	__build_clear_msa_fpe
488	_cfcmsa	a1, MSA_CSR
489	CLI
490	TRACE_IRQS_OFF
 
491	.endm
492
493	.macro	__build_clear_ade
494	MFC0	t0, CP0_BADVADDR
495	PTR_S	t0, PT_BVADDR(sp)
496	KMODE
497	.endm
498
 
 
 
 
 
 
 
 
 
 
 
 
 
499	.macro	__BUILD_silent exception
500	.endm
501
502	/* Gas tries to parse the PRINT argument as a string containing
503	   string escapes and emits bogus warnings if it believes to
504	   recognize an unknown escape code.  So make the arguments
505	   start with an n and gas will believe \n is ok ...  */
506	.macro	__BUILD_verbose nexception
507	LONG_L	a1, PT_EPC(sp)
508#ifdef CONFIG_32BIT
509	PRINT("Got \nexception at %08lx\012")
510#endif
511#ifdef CONFIG_64BIT
512	PRINT("Got \nexception at %016lx\012")
513#endif
514	.endm
515
516	.macro	__BUILD_count exception
517	LONG_L	t0,exception_count_\exception
518	LONG_ADDIU	t0, 1
519	LONG_S	t0,exception_count_\exception
520	.comm	exception_count\exception, 8, 8
521	.endm
522
523	.macro	__BUILD_HANDLER exception handler clear verbose ext
524	.align	5
525	NESTED(handle_\exception, PT_SIZE, sp)
526	.cfi_signal_frame
527	.set	noat
528	SAVE_ALL
529	FEXPORT(handle_\exception\ext)
530	__build_clear_\clear
531	.set	at
532	__BUILD_\verbose \exception
533	move	a0, sp
534	jal	do_\handler
535	j	ret_from_exception
536	END(handle_\exception)
537	.endm
538
539	.macro	BUILD_HANDLER exception handler clear verbose
540	__BUILD_HANDLER \exception \handler \clear \verbose _int
541	.endm
542
543	BUILD_HANDLER adel ade ade silent		/* #4  */
544	BUILD_HANDLER ades ade ade silent		/* #5  */
545	BUILD_HANDLER ibe be cli silent			/* #6  */
546	BUILD_HANDLER dbe be cli silent			/* #7  */
547	BUILD_HANDLER bp bp sti silent			/* #9  */
548	BUILD_HANDLER ri ri sti silent			/* #10 */
549	BUILD_HANDLER cpu cpu sti silent		/* #11 */
550	BUILD_HANDLER ov ov sti silent			/* #12 */
551	BUILD_HANDLER tr tr sti silent			/* #13 */
552	BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent	/* #14 */
553#ifdef CONFIG_MIPS_FP_SUPPORT
554	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
555#endif
556	BUILD_HANDLER ftlb ftlb none silent		/* #16 */
 
557	BUILD_HANDLER msa msa sti silent		/* #21 */
558	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
559#ifdef	CONFIG_HARDWARE_WATCHPOINTS
560	/*
561	 * For watch, interrupts will be enabled after the watch
562	 * registers are read.
563	 */
564	BUILD_HANDLER watch watch cli silent		/* #23 */
565#else
566	BUILD_HANDLER watch watch sti verbose		/* #23 */
567#endif
568	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
569	BUILD_HANDLER mt mt sti silent			/* #25 */
570	BUILD_HANDLER dsp dsp sti silent		/* #26 */
571	BUILD_HANDLER reserved reserved sti verbose	/* others */
572
573	.align	5
574	LEAF(handle_ri_rdhwr_tlbp)
575	.set	push
576	.set	noat
577	.set	noreorder
578	/* check if TLB contains a entry for EPC */
579	MFC0	k1, CP0_ENTRYHI
580	andi	k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
581	MFC0	k0, CP0_EPC
582	PTR_SRL	k0, _PAGE_SHIFT + 1
583	PTR_SLL	k0, _PAGE_SHIFT + 1
584	or	k1, k0
585	MTC0	k1, CP0_ENTRYHI
586	mtc0_tlbw_hazard
587	tlbp
588	tlb_probe_hazard
589	mfc0	k1, CP0_INDEX
590	.set	pop
591	bltz	k1, handle_ri	/* slow path */
592	/* fall thru */
593	END(handle_ri_rdhwr_tlbp)
594
595	LEAF(handle_ri_rdhwr)
596	.set	push
597	.set	noat
598	.set	noreorder
599	/* MIPS32:    0x7c03e83b: rdhwr v1,$29 */
600	/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
601	MFC0	k1, CP0_EPC
602#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
603	and	k0, k1, 1
604	beqz	k0, 1f
605	 xor	k1, k0
606	lhu	k0, (k1)
607	lhu	k1, 2(k1)
608	ins	k1, k0, 16, 16
609	lui	k0, 0x007d
610	b	docheck
611	 ori	k0, 0x6b3c
6121:
613	lui	k0, 0x7c03
614	lw	k1, (k1)
615	ori	k0, 0xe83b
616#else
617	andi	k0, k1, 1
618	bnez	k0, handle_ri
619	 lui	k0, 0x7c03
620	lw	k1, (k1)
621	ori	k0, 0xe83b
622#endif
623	.set	reorder
624docheck:
625	bne	k0, k1, handle_ri	/* if not ours */
626
627isrdhwr:
628	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
629	get_saved_sp	/* k1 := current_thread_info */
630	.set	noreorder
631	MFC0	k0, CP0_EPC
632#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
633	ori	k1, _THREAD_MASK
634	xori	k1, _THREAD_MASK
635	LONG_L	v1, TI_TP_VALUE(k1)
636	LONG_ADDIU	k0, 4
637	jr	k0
638	 rfe
639#else
640#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
641	LONG_ADDIU	k0, 4		/* stall on $k0 */
642#else
643	.set	at=v1
644	LONG_ADDIU	k0, 4
645	.set	noat
646#endif
647	MTC0	k0, CP0_EPC
648	/* I hope three instructions between MTC0 and ERET are enough... */
649	ori	k1, _THREAD_MASK
650	xori	k1, _THREAD_MASK
651	LONG_L	v1, TI_TP_VALUE(k1)
652	.set	push
653	.set	arch=r4000
654	eret
655	.set	pop
656#endif
657	.set	pop
658	END(handle_ri_rdhwr)
659
660#ifdef CONFIG_64BIT
661/* A temporary overflow handler used by check_daddi(). */
662
663	__INIT
664
665	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
666#endif