Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Copyright (C) 2013 Imagination Technologies
  4 * Author: Paul Burton <paul.burton@mips.com>
  5 */
  6
 
  7#include <asm/addrspace.h>
  8#include <asm/asm.h>
  9#include <asm/asm-offsets.h>
 10#include <asm/asmmacro.h>
 11#include <asm/cacheops.h>
 12#include <asm/eva.h>
 13#include <asm/mipsregs.h>
 14#include <asm/mipsmtregs.h>
 15#include <asm/pm.h>
 16#include <asm/smp-cps.h>
 17
 18#define GCR_CPC_BASE_OFS	0x0088
 19#define GCR_CL_COHERENCE_OFS	0x2008
 20#define GCR_CL_ID_OFS		0x2028
 21
 22#define CPC_CL_VC_STOP_OFS	0x2020
 23#define CPC_CL_VC_RUN_OFS	0x2028
 24
 25.extern mips_cm_base
 26
 27.set noreorder
 28
 29#ifdef CONFIG_64BIT
 30# define STATUS_BITDEPS		ST0_KX
 31#else
 32# define STATUS_BITDEPS		0
 33#endif
 34
 35#ifdef CONFIG_MIPS_CPS_NS16550
 36
 37#define DUMP_EXCEP(name)		\
 38	PTR_LA	a0, 8f;			\
 39	jal	mips_cps_bev_dump;	\
 40	 nop;				\
 41	TEXT(name)
 42
 43#else /* !CONFIG_MIPS_CPS_NS16550 */
 44
 45#define DUMP_EXCEP(name)
 46
 47#endif /* !CONFIG_MIPS_CPS_NS16550 */
 48
 49	/*
 50	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
 51	 * MT is not supported then branch to nomt.
 52	 */
 53	.macro	has_mt	dest, nomt
 54	mfc0	\dest, CP0_CONFIG, 1
 55	bgez	\dest, \nomt
 56	 mfc0	\dest, CP0_CONFIG, 2
 57	bgez	\dest, \nomt
 58	 mfc0	\dest, CP0_CONFIG, 3
 59	andi	\dest, \dest, MIPS_CONF3_MT
 60	beqz	\dest, \nomt
 61	 nop
 62	.endm
 63
 64	/*
 65	 * Set dest to non-zero if the core supports MIPSr6 multithreading
 66	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
 67	 * branch to nomt.
 68	 */
 69	.macro	has_vp	dest, nomt
 70	mfc0	\dest, CP0_CONFIG, 1
 71	bgez	\dest, \nomt
 72	 mfc0	\dest, CP0_CONFIG, 2
 73	bgez	\dest, \nomt
 74	 mfc0	\dest, CP0_CONFIG, 3
 75	bgez	\dest, \nomt
 76	 mfc0	\dest, CP0_CONFIG, 4
 77	bgez	\dest, \nomt
 78	 mfc0	\dest, CP0_CONFIG, 5
 79	andi	\dest, \dest, MIPS_CONF5_VP
 80	beqz	\dest, \nomt
 81	 nop
 82	.endm
 83
 84
 85.balign 0x1000
 86
 87LEAF(mips_cps_core_entry)
 88	/*
 89	 * These first several instructions will be patched by cps_smp_setup to load the
 90	 * CCA to use into register s0 and GCR base address to register s1.
 91	 */
 92	.rept   CPS_ENTRY_PATCH_INSNS
 93	nop
 94	.endr
 95
 96	.global mips_cps_core_entry_patch_end
 97mips_cps_core_entry_patch_end:
 98
 99	/* Check whether we're here due to an NMI */
100	mfc0	k0, CP0_STATUS
101	and	k0, k0, ST0_NMI
102	beqz	k0, not_nmi
103	 nop
104
105	/* This is an NMI */
106	PTR_LA	k0, nmi_handler
107	jr	k0
108	 nop
109
110not_nmi:
111	/* Setup Cause */
112	li	t0, CAUSEF_IV
113	mtc0	t0, CP0_CAUSE
114
115	/* Setup Status */
116	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
117	mtc0	t0, CP0_STATUS
118
119	/* We don't know how to do coherence setup on earlier ISA */
120#if MIPS_ISA_REV > 0
121	/* Skip cache & coherence setup if we're already coherent */
122	lw	s7, GCR_CL_COHERENCE_OFS(s1)
123	bnez	s7, 1f
124	 nop
125
126	/* Initialize the L1 caches */
127	jal	mips_cps_cache_init
128	 nop
129
130	/* Enter the coherent domain */
131	li	t0, 0xff
132	sw	t0, GCR_CL_COHERENCE_OFS(s1)
133	ehb
134#endif /* MIPS_ISA_REV > 0 */
135
136	/* Set Kseg0 CCA to that in s0 */
1371:	mfc0	t0, CP0_CONFIG
138	ori	t0, 0x7
139	xori	t0, 0x7
140	or	t0, t0, s0
141	mtc0	t0, CP0_CONFIG
142	ehb
143
144	/* Jump to kseg0 */
145	PTR_LA	t0, 1f
146	jr	t0
147	 nop
148
149	/*
150	 * We're up, cached & coherent. Perform any EVA initialization necessary
151	 * before we access memory.
152	 */
1531:	eva_init
154
155	/* Retrieve boot configuration pointers */
156	jal	mips_cps_get_bootcfg
157	 nop
158
159	/* Skip core-level init if we started up coherent */
160	bnez	s7, 1f
161	 nop
162
163	/* Perform any further required core-level initialisation */
164	jal	mips_cps_core_init
165	 nop
166
167	/*
168	 * Boot any other VPEs within this core that should be online, and
169	 * deactivate this VPE if it should be offline.
170	 */
171	move	a1, t9
172	jal	mips_cps_boot_vpes
173	 move	a0, v0
174
175	/* Off we go! */
1761:	PTR_L	t1, VPEBOOTCFG_PC(v1)
177	PTR_L	gp, VPEBOOTCFG_GP(v1)
178	PTR_L	sp, VPEBOOTCFG_SP(v1)
179	jr	t1
180	 nop
181	END(mips_cps_core_entry)
182
183.org 0x200
184LEAF(excep_tlbfill)
185	DUMP_EXCEP("TLB Fill")
186	b	.
187	 nop
188	END(excep_tlbfill)
189
190.org 0x280
191LEAF(excep_xtlbfill)
192	DUMP_EXCEP("XTLB Fill")
193	b	.
194	 nop
195	END(excep_xtlbfill)
196
197.org 0x300
198LEAF(excep_cache)
199	DUMP_EXCEP("Cache")
200	b	.
201	 nop
202	END(excep_cache)
203
204.org 0x380
205LEAF(excep_genex)
206	DUMP_EXCEP("General")
207	b	.
208	 nop
209	END(excep_genex)
210
211.org 0x400
212LEAF(excep_intex)
213	DUMP_EXCEP("Interrupt")
214	b	.
215	 nop
216	END(excep_intex)
217
218.org 0x480
219LEAF(excep_ejtag)
220	PTR_LA	k0, ejtag_debug_handler
221	jr	k0
222	 nop
223	END(excep_ejtag)
 
224
225LEAF(mips_cps_core_init)
226#ifdef CONFIG_MIPS_MT_SMP
227	/* Check that the core implements the MT ASE */
228	has_mt	t0, 3f
229
230	.set	push
231	.set	MIPS_ISA_LEVEL_RAW
232	.set	mt
233
234	/* Only allow 1 TC per VPE to execute... */
235	dmt
236
237	/* ...and for the moment only 1 VPE */
238	dvpe
239	PTR_LA	t1, 1f
240	jr.hb	t1
241	 nop
242
243	/* Enter VPE configuration state */
2441:	mfc0	t0, CP0_MVPCONTROL
245	ori	t0, t0, MVPCONTROL_VPC
246	mtc0	t0, CP0_MVPCONTROL
247
248	/* Retrieve the number of VPEs within the core */
249	mfc0	t0, CP0_MVPCONF0
250	srl	t0, t0, MVPCONF0_PVPE_SHIFT
251	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
252	addiu	ta3, t0, 1
253
254	/* If there's only 1, we're done */
255	beqz	t0, 2f
256	 nop
257
258	/* Loop through each VPE within this core */
259	li	ta1, 1
260
2611:	/* Operate on the appropriate TC */
262	mtc0	ta1, CP0_VPECONTROL
263	ehb
264
265	/* Bind TC to VPE (1:1 TC:VPE mapping) */
266	mttc0	ta1, CP0_TCBIND
267
268	/* Set exclusive TC, non-active, master */
269	li	t0, VPECONF0_MVP
270	sll	t1, ta1, VPECONF0_XTC_SHIFT
271	or	t0, t0, t1
272	mttc0	t0, CP0_VPECONF0
273
274	/* Set TC non-active, non-allocatable */
275	mttc0	zero, CP0_TCSTATUS
276
277	/* Set TC halted */
278	li	t0, TCHALT_H
279	mttc0	t0, CP0_TCHALT
280
281	/* Next VPE */
282	addiu	ta1, ta1, 1
283	slt	t0, ta1, ta3
284	bnez	t0, 1b
285	 nop
286
287	/* Leave VPE configuration state */
2882:	mfc0	t0, CP0_MVPCONTROL
289	xori	t0, t0, MVPCONTROL_VPC
290	mtc0	t0, CP0_MVPCONTROL
291
2923:	.set	pop
293#endif
294	jr	ra
295	 nop
296	END(mips_cps_core_init)
297
298/**
299 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
300 *
301 * Returns: pointer to struct core_boot_config in v0, pointer to
302 *          struct vpe_boot_config in v1, VPE ID in t9
303 */
304LEAF(mips_cps_get_bootcfg)
305	/* Calculate a pointer to this cores struct core_boot_config */
306	lw	t0, GCR_CL_ID_OFS(s1)
307	li	t1, COREBOOTCFG_SIZE
308	mul	t0, t0, t1
309	PTR_LA	t1, mips_cps_core_bootcfg
310	PTR_L	t1, 0(t1)
311	PTR_ADDU v0, t0, t1
312
313	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
314	li	t9, 0
315#if defined(CONFIG_CPU_MIPSR6)
316	has_vp	ta2, 1f
317
318	/*
319	 * Assume non-contiguous numbering. Perhaps some day we'll need
320	 * to handle contiguous VP numbering, but no such systems yet
321	 * exist.
322	 */
323	mfc0	t9, CP0_GLOBALNUMBER
324	andi	t9, t9, MIPS_GLOBALNUMBER_VP
325#elif defined(CONFIG_MIPS_MT_SMP)
326	has_mt	ta2, 1f
327
328	/* Find the number of VPEs present in the core */
329	mfc0	t1, CP0_MVPCONF0
330	srl	t1, t1, MVPCONF0_PVPE_SHIFT
331	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
332	addiu	t1, t1, 1
333
334	/* Calculate a mask for the VPE ID from EBase.CPUNum */
335	clz	t1, t1
336	li	t2, 31
337	subu	t1, t2, t1
338	li	t2, 1
339	sll	t1, t2, t1
340	addiu	t1, t1, -1
341
342	/* Retrieve the VPE ID from EBase.CPUNum */
343	mfc0	t9, $15, 1
344	and	t9, t9, t1
345#endif
346
3471:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
348	li	t1, VPEBOOTCFG_SIZE
349	mul	v1, t9, t1
350	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
351	PTR_ADDU v1, v1, ta3
352
353	jr	ra
354	 nop
355	END(mips_cps_get_bootcfg)
356
357LEAF(mips_cps_boot_vpes)
358	lw	ta2, COREBOOTCFG_VPEMASK(a0)
359	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
360
361#if defined(CONFIG_CPU_MIPSR6)
362
363	has_vp	t0, 5f
364
365	/* Find base address of CPC */
366	PTR_LA	t1, mips_gcr_base
367	PTR_L	t1, 0(t1)
368	PTR_L	t1, GCR_CPC_BASE_OFS(t1)
369	PTR_LI	t2, ~0x7fff
370	and	t1, t1, t2
371	PTR_LI	t2, UNCAC_BASE
372	PTR_ADD	t1, t1, t2
373
374	/* Start any other VPs that ought to be running */
375	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
376
377	/* Ensure this VP stops running if it shouldn't be */
378	not	ta2
379	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
380	ehb
381
382#elif defined(CONFIG_MIPS_MT)
383
384	/* If the core doesn't support MT then return */
385	has_mt	t0, 5f
386
387	/* Enter VPE configuration state */
388	.set	push
389	.set	MIPS_ISA_LEVEL_RAW
390	.set	mt
391	dvpe
392	.set	pop
393
394	PTR_LA	t1, 1f
395	jr.hb	t1
396	 nop
3971:	mfc0	t1, CP0_MVPCONTROL
398	ori	t1, t1, MVPCONTROL_VPC
399	mtc0	t1, CP0_MVPCONTROL
400	ehb
401
402	/* Loop through each VPE */
403	move	t8, ta2
404	li	ta1, 0
405
406	/* Check whether the VPE should be running. If not, skip it */
4071:	andi	t0, ta2, 1
408	beqz	t0, 2f
409	 nop
410
411	/* Operate on the appropriate TC */
412	mfc0	t0, CP0_VPECONTROL
413	ori	t0, t0, VPECONTROL_TARGTC
414	xori	t0, t0, VPECONTROL_TARGTC
415	or	t0, t0, ta1
416	mtc0	t0, CP0_VPECONTROL
417	ehb
418
419	.set	push
420	.set	MIPS_ISA_LEVEL_RAW
421	.set	mt
422
423	/* Skip the VPE if its TC is not halted */
424	mftc0	t0, CP0_TCHALT
425	beqz	t0, 2f
426	 nop
427
428	/* Calculate a pointer to the VPEs struct vpe_boot_config */
429	li	t0, VPEBOOTCFG_SIZE
430	mul	t0, t0, ta1
431	addu	t0, t0, ta3
432
433	/* Set the TC restart PC */
434	lw	t1, VPEBOOTCFG_PC(t0)
435	mttc0	t1, CP0_TCRESTART
436
437	/* Set the TC stack pointer */
438	lw	t1, VPEBOOTCFG_SP(t0)
439	mttgpr	t1, sp
440
441	/* Set the TC global pointer */
442	lw	t1, VPEBOOTCFG_GP(t0)
443	mttgpr	t1, gp
444
445	/* Copy config from this VPE */
446	mfc0	t0, CP0_CONFIG
447	mttc0	t0, CP0_CONFIG
448
449	/*
450	 * Copy the EVA config from this VPE if the CPU supports it.
451	 * CONFIG3 must exist to be running MT startup - just read it.
452	 */
453	mfc0	t0, CP0_CONFIG, 3
454	and	t0, t0, MIPS_CONF3_SC
455	beqz	t0, 3f
456	 nop
457	mfc0    t0, CP0_SEGCTL0
458	mttc0	t0, CP0_SEGCTL0
459	mfc0    t0, CP0_SEGCTL1
460	mttc0	t0, CP0_SEGCTL1
461	mfc0    t0, CP0_SEGCTL2
462	mttc0	t0, CP0_SEGCTL2
4633:
464	/* Ensure no software interrupts are pending */
465	mttc0	zero, CP0_CAUSE
466	mttc0	zero, CP0_STATUS
467
468	/* Set TC active, not interrupt exempt */
469	mftc0	t0, CP0_TCSTATUS
470	li	t1, ~TCSTATUS_IXMT
471	and	t0, t0, t1
472	ori	t0, t0, TCSTATUS_A
473	mttc0	t0, CP0_TCSTATUS
474
475	/* Clear the TC halt bit */
476	mttc0	zero, CP0_TCHALT
477
478	/* Set VPE active */
479	mftc0	t0, CP0_VPECONF0
480	ori	t0, t0, VPECONF0_VPA
481	mttc0	t0, CP0_VPECONF0
482
483	/* Next VPE */
4842:	srl	ta2, ta2, 1
485	addiu	ta1, ta1, 1
486	bnez	ta2, 1b
487	 nop
488
489	/* Leave VPE configuration state */
490	mfc0	t1, CP0_MVPCONTROL
491	xori	t1, t1, MVPCONTROL_VPC
492	mtc0	t1, CP0_MVPCONTROL
493	ehb
494	evpe
495
496	.set	pop
497
498	/* Check whether this VPE is meant to be running */
499	li	t0, 1
500	sll	t0, t0, a1
501	and	t0, t0, t8
502	bnez	t0, 2f
503	 nop
504
505	/* This VPE should be offline, halt the TC */
506	li	t0, TCHALT_H
507	mtc0	t0, CP0_TCHALT
508	PTR_LA	t0, 1f
5091:	jr.hb	t0
510	 nop
511
5122:
513
514#endif /* CONFIG_MIPS_MT_SMP */
515
516	/* Return */
5175:	jr	ra
518	 nop
519	END(mips_cps_boot_vpes)
520
521#if MIPS_ISA_REV > 0
522LEAF(mips_cps_cache_init)
523	/*
524	 * Clear the bits used to index the caches. Note that the architecture
525	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
526	 * be valid for all MIPS32 CPUs, even those for which said writes are
527	 * unnecessary.
528	 */
529	mtc0	zero, CP0_TAGLO, 0
530	mtc0	zero, CP0_TAGHI, 0
531	mtc0	zero, CP0_TAGLO, 2
532	mtc0	zero, CP0_TAGHI, 2
533	ehb
534
535	/* Primary cache configuration is indicated by Config1 */
536	mfc0	v0, CP0_CONFIG, 1
537
538	/* Detect I-cache line size */
539	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
540	beqz	t0, icache_done
541	 li	t1, 2
542	sllv	t0, t1, t0
543
544	/* Detect I-cache size */
545	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
546	xori	t2, t1, 0x7
547	beqz	t2, 1f
548	 li	t3, 32
549	addiu	t1, t1, 1
550	sllv	t1, t3, t1
5511:	/* At this point t1 == I-cache sets per way */
552	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
553	addiu	t2, t2, 1
554	mul	t1, t1, t0
555	mul	t1, t1, t2
556
557	li	a0, CKSEG0
558	PTR_ADD	a1, a0, t1
5591:	cache	Index_Store_Tag_I, 0(a0)
560	PTR_ADD	a0, a0, t0
561	bne	a0, a1, 1b
562	 nop
563icache_done:
564
565	/* Detect D-cache line size */
566	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
567	beqz	t0, dcache_done
568	 li	t1, 2
569	sllv	t0, t1, t0
570
571	/* Detect D-cache size */
572	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
573	xori	t2, t1, 0x7
574	beqz	t2, 1f
575	 li	t3, 32
576	addiu	t1, t1, 1
577	sllv	t1, t3, t1
5781:	/* At this point t1 == D-cache sets per way */
579	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
580	addiu	t2, t2, 1
581	mul	t1, t1, t0
582	mul	t1, t1, t2
583
584	li	a0, CKSEG0
585	PTR_ADDU a1, a0, t1
586	PTR_SUBU a1, a1, t0
5871:	cache	Index_Store_Tag_D, 0(a0)
588	bne	a0, a1, 1b
589	 PTR_ADD a0, a0, t0
590dcache_done:
591
592	jr	ra
593	 nop
594	END(mips_cps_cache_init)
595#endif /* MIPS_ISA_REV > 0 */
596
597#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
598
599	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
600	.macro	psstate	dest
601	.set	push
602	.set	noat
603	lw	$1, TI_CPU(gp)
604	sll	$1, $1, LONGLOG
605	PTR_LA	\dest, __per_cpu_offset
606	addu	$1, $1, \dest
607	lw	$1, 0($1)
608	PTR_LA	\dest, cps_cpu_state
609	addu	\dest, \dest, $1
610	.set	pop
611	.endm
612
613LEAF(mips_cps_pm_save)
614	/* Save CPU state */
615	SUSPEND_SAVE_REGS
616	psstate	t1
617	SUSPEND_SAVE_STATIC
618	jr	v0
619	 nop
620	END(mips_cps_pm_save)
621
622LEAF(mips_cps_pm_restore)
623	/* Restore CPU state */
624	psstate	t1
625	RESUME_RESTORE_STATIC
626	RESUME_RESTORE_REGS_RETURN
627	END(mips_cps_pm_restore)
628
629#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Copyright (C) 2013 Imagination Technologies
  4 * Author: Paul Burton <paul.burton@mips.com>
  5 */
  6
  7#include <linux/init.h>
  8#include <asm/addrspace.h>
  9#include <asm/asm.h>
 10#include <asm/asm-offsets.h>
 11#include <asm/asmmacro.h>
 12#include <asm/cacheops.h>
 13#include <asm/eva.h>
 14#include <asm/mipsregs.h>
 15#include <asm/mipsmtregs.h>
 16#include <asm/pm.h>
 17#include <asm/smp-cps.h>
 18
 19#define GCR_CPC_BASE_OFS	0x0088
 20#define GCR_CL_COHERENCE_OFS	0x2008
 21#define GCR_CL_ID_OFS		0x2028
 22
 23#define CPC_CL_VC_STOP_OFS	0x2020
 24#define CPC_CL_VC_RUN_OFS	0x2028
 25
 26.extern mips_cm_base
 27
 28.set noreorder
 29
 30#ifdef CONFIG_64BIT
 31# define STATUS_BITDEPS		ST0_KX
 32#else
 33# define STATUS_BITDEPS		0
 34#endif
 35
 36#ifdef CONFIG_MIPS_CPS_NS16550
 37
 38#define DUMP_EXCEP(name)		\
 39	PTR_LA	a0, 8f;			\
 40	jal	mips_cps_bev_dump;	\
 41	 nop;				\
 42	TEXT(name)
 43
 44#else /* !CONFIG_MIPS_CPS_NS16550 */
 45
 46#define DUMP_EXCEP(name)
 47
 48#endif /* !CONFIG_MIPS_CPS_NS16550 */
 49
 50	/*
 51	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
 52	 * MT is not supported then branch to nomt.
 53	 */
 54	.macro	has_mt	dest, nomt
 55	mfc0	\dest, CP0_CONFIG, 1
 56	bgez	\dest, \nomt
 57	 mfc0	\dest, CP0_CONFIG, 2
 58	bgez	\dest, \nomt
 59	 mfc0	\dest, CP0_CONFIG, 3
 60	andi	\dest, \dest, MIPS_CONF3_MT
 61	beqz	\dest, \nomt
 62	 nop
 63	.endm
 64
 65	/*
 66	 * Set dest to non-zero if the core supports MIPSr6 multithreading
 67	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
 68	 * branch to nomt.
 69	 */
 70	.macro	has_vp	dest, nomt
 71	mfc0	\dest, CP0_CONFIG, 1
 72	bgez	\dest, \nomt
 73	 mfc0	\dest, CP0_CONFIG, 2
 74	bgez	\dest, \nomt
 75	 mfc0	\dest, CP0_CONFIG, 3
 76	bgez	\dest, \nomt
 77	 mfc0	\dest, CP0_CONFIG, 4
 78	bgez	\dest, \nomt
 79	 mfc0	\dest, CP0_CONFIG, 5
 80	andi	\dest, \dest, MIPS_CONF5_VP
 81	beqz	\dest, \nomt
 82	 nop
 83	.endm
 84
 85
 86LEAF(mips_cps_core_boot)
 87	/* Save  CCA and GCR base */
 88	move   s0, a0
 89	move   s1, a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 90
 91	/* We don't know how to do coherence setup on earlier ISA */
 92#if MIPS_ISA_REV > 0
 93	/* Skip cache & coherence setup if we're already coherent */
 94	lw	s7, GCR_CL_COHERENCE_OFS(s1)
 95	bnez	s7, 1f
 96	 nop
 97
 98	/* Initialize the L1 caches */
 99	jal	mips_cps_cache_init
100	 nop
101
102	/* Enter the coherent domain */
103	li	t0, 0xff
104	sw	t0, GCR_CL_COHERENCE_OFS(s1)
105	ehb
106#endif /* MIPS_ISA_REV > 0 */
107
108	/* Set Kseg0 CCA to that in s0 */
1091:	mfc0	t0, CP0_CONFIG
110	ori	t0, 0x7
111	xori	t0, 0x7
112	or	t0, t0, s0
113	mtc0	t0, CP0_CONFIG
114	ehb
115
116	/* Jump to kseg0 */
117	PTR_LA	t0, 1f
118	jr	t0
119	 nop
120
121	/*
122	 * We're up, cached & coherent. Perform any EVA initialization necessary
123	 * before we access memory.
124	 */
1251:	eva_init
126
127	/* Retrieve boot configuration pointers */
128	jal	mips_cps_get_bootcfg
129	 nop
130
131	/* Skip core-level init if we started up coherent */
132	bnez	s7, 1f
133	 nop
134
135	/* Perform any further required core-level initialisation */
136	jal	mips_cps_core_init
137	 nop
138
139	/*
140	 * Boot any other VPEs within this core that should be online, and
141	 * deactivate this VPE if it should be offline.
142	 */
143	move	a1, t9
144	jal	mips_cps_boot_vpes
145	 move	a0, v0
146
147	/* Off we go! */
1481:	PTR_L	t1, VPEBOOTCFG_PC(v1)
149	PTR_L	gp, VPEBOOTCFG_GP(v1)
150	PTR_L	sp, VPEBOOTCFG_SP(v1)
151	jr	t1
152	 nop
153	END(mips_cps_core_boot)
154
155	__INIT
156LEAF(excep_tlbfill)
157	DUMP_EXCEP("TLB Fill")
158	b	.
159	 nop
160	END(excep_tlbfill)
161
 
162LEAF(excep_xtlbfill)
163	DUMP_EXCEP("XTLB Fill")
164	b	.
165	 nop
166	END(excep_xtlbfill)
167
 
168LEAF(excep_cache)
169	DUMP_EXCEP("Cache")
170	b	.
171	 nop
172	END(excep_cache)
173
 
174LEAF(excep_genex)
175	DUMP_EXCEP("General")
176	b	.
177	 nop
178	END(excep_genex)
179
 
180LEAF(excep_intex)
181	DUMP_EXCEP("Interrupt")
182	b	.
183	 nop
184	END(excep_intex)
185
 
186LEAF(excep_ejtag)
187	PTR_LA	k0, ejtag_debug_handler
188	jr	k0
189	 nop
190	END(excep_ejtag)
191	__FINIT
192
193LEAF(mips_cps_core_init)
194#ifdef CONFIG_MIPS_MT_SMP
195	/* Check that the core implements the MT ASE */
196	has_mt	t0, 3f
197
198	.set	push
199	.set	MIPS_ISA_LEVEL_RAW
200	.set	mt
201
202	/* Only allow 1 TC per VPE to execute... */
203	dmt
204
205	/* ...and for the moment only 1 VPE */
206	dvpe
207	PTR_LA	t1, 1f
208	jr.hb	t1
209	 nop
210
211	/* Enter VPE configuration state */
2121:	mfc0	t0, CP0_MVPCONTROL
213	ori	t0, t0, MVPCONTROL_VPC
214	mtc0	t0, CP0_MVPCONTROL
215
216	/* Retrieve the number of VPEs within the core */
217	mfc0	t0, CP0_MVPCONF0
218	srl	t0, t0, MVPCONF0_PVPE_SHIFT
219	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
220	addiu	ta3, t0, 1
221
222	/* If there's only 1, we're done */
223	beqz	t0, 2f
224	 nop
225
226	/* Loop through each VPE within this core */
227	li	ta1, 1
228
2291:	/* Operate on the appropriate TC */
230	mtc0	ta1, CP0_VPECONTROL
231	ehb
232
233	/* Bind TC to VPE (1:1 TC:VPE mapping) */
234	mttc0	ta1, CP0_TCBIND
235
236	/* Set exclusive TC, non-active, master */
237	li	t0, VPECONF0_MVP
238	sll	t1, ta1, VPECONF0_XTC_SHIFT
239	or	t0, t0, t1
240	mttc0	t0, CP0_VPECONF0
241
242	/* Set TC non-active, non-allocatable */
243	mttc0	zero, CP0_TCSTATUS
244
245	/* Set TC halted */
246	li	t0, TCHALT_H
247	mttc0	t0, CP0_TCHALT
248
249	/* Next VPE */
250	addiu	ta1, ta1, 1
251	slt	t0, ta1, ta3
252	bnez	t0, 1b
253	 nop
254
255	/* Leave VPE configuration state */
2562:	mfc0	t0, CP0_MVPCONTROL
257	xori	t0, t0, MVPCONTROL_VPC
258	mtc0	t0, CP0_MVPCONTROL
259
2603:	.set	pop
261#endif
262	jr	ra
263	 nop
264	END(mips_cps_core_init)
265
266/**
267 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
268 *
269 * Returns: pointer to struct core_boot_config in v0, pointer to
270 *          struct vpe_boot_config in v1, VPE ID in t9
271 */
272LEAF(mips_cps_get_bootcfg)
273	/* Calculate a pointer to this cores struct core_boot_config */
274	lw	t0, GCR_CL_ID_OFS(s1)
275	li	t1, COREBOOTCFG_SIZE
276	mul	t0, t0, t1
277	PTR_LA	t1, mips_cps_core_bootcfg
278	PTR_L	t1, 0(t1)
279	PTR_ADDU v0, t0, t1
280
281	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
282	li	t9, 0
283#if defined(CONFIG_CPU_MIPSR6)
284	has_vp	ta2, 1f
285
286	/*
287	 * Assume non-contiguous numbering. Perhaps some day we'll need
288	 * to handle contiguous VP numbering, but no such systems yet
289	 * exist.
290	 */
291	mfc0	t9, CP0_GLOBALNUMBER
292	andi	t9, t9, MIPS_GLOBALNUMBER_VP
293#elif defined(CONFIG_MIPS_MT_SMP)
294	has_mt	ta2, 1f
295
296	/* Find the number of VPEs present in the core */
297	mfc0	t1, CP0_MVPCONF0
298	srl	t1, t1, MVPCONF0_PVPE_SHIFT
299	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
300	addiu	t1, t1, 1
301
302	/* Calculate a mask for the VPE ID from EBase.CPUNum */
303	clz	t1, t1
304	li	t2, 31
305	subu	t1, t2, t1
306	li	t2, 1
307	sll	t1, t2, t1
308	addiu	t1, t1, -1
309
310	/* Retrieve the VPE ID from EBase.CPUNum */
311	mfc0	t9, $15, 1
312	and	t9, t9, t1
313#endif
314
3151:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
316	li	t1, VPEBOOTCFG_SIZE
317	mul	v1, t9, t1
318	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
319	PTR_ADDU v1, v1, ta3
320
321	jr	ra
322	 nop
323	END(mips_cps_get_bootcfg)
324
325LEAF(mips_cps_boot_vpes)
326	lw	ta2, COREBOOTCFG_VPEMASK(a0)
327	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
328
329#if defined(CONFIG_CPU_MIPSR6)
330
331	has_vp	t0, 5f
332
333	/* Find base address of CPC */
334	PTR_LA	t1, mips_gcr_base
335	PTR_L	t1, 0(t1)
336	PTR_L	t1, GCR_CPC_BASE_OFS(t1)
337	PTR_LI	t2, ~0x7fff
338	and	t1, t1, t2
339	PTR_LI	t2, UNCAC_BASE
340	PTR_ADD	t1, t1, t2
341
342	/* Start any other VPs that ought to be running */
343	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
344
345	/* Ensure this VP stops running if it shouldn't be */
346	not	ta2
347	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
348	ehb
349
350#elif defined(CONFIG_MIPS_MT)
351
352	/* If the core doesn't support MT then return */
353	has_mt	t0, 5f
354
355	/* Enter VPE configuration state */
356	.set	push
357	.set	MIPS_ISA_LEVEL_RAW
358	.set	mt
359	dvpe
360	.set	pop
361
362	PTR_LA	t1, 1f
363	jr.hb	t1
364	 nop
3651:	mfc0	t1, CP0_MVPCONTROL
366	ori	t1, t1, MVPCONTROL_VPC
367	mtc0	t1, CP0_MVPCONTROL
368	ehb
369
370	/* Loop through each VPE */
371	move	t8, ta2
372	li	ta1, 0
373
374	/* Check whether the VPE should be running. If not, skip it */
3751:	andi	t0, ta2, 1
376	beqz	t0, 2f
377	 nop
378
379	/* Operate on the appropriate TC */
380	mfc0	t0, CP0_VPECONTROL
381	ori	t0, t0, VPECONTROL_TARGTC
382	xori	t0, t0, VPECONTROL_TARGTC
383	or	t0, t0, ta1
384	mtc0	t0, CP0_VPECONTROL
385	ehb
386
387	.set	push
388	.set	MIPS_ISA_LEVEL_RAW
389	.set	mt
390
391	/* Skip the VPE if its TC is not halted */
392	mftc0	t0, CP0_TCHALT
393	beqz	t0, 2f
394	 nop
395
396	/* Calculate a pointer to the VPEs struct vpe_boot_config */
397	li	t0, VPEBOOTCFG_SIZE
398	mul	t0, t0, ta1
399	PTR_ADDU t0, t0, ta3
400
401	/* Set the TC restart PC */
402	lw	t1, VPEBOOTCFG_PC(t0)
403	mttc0	t1, CP0_TCRESTART
404
405	/* Set the TC stack pointer */
406	lw	t1, VPEBOOTCFG_SP(t0)
407	mttgpr	t1, sp
408
409	/* Set the TC global pointer */
410	lw	t1, VPEBOOTCFG_GP(t0)
411	mttgpr	t1, gp
412
413	/* Copy config from this VPE */
414	mfc0	t0, CP0_CONFIG
415	mttc0	t0, CP0_CONFIG
416
417	/*
418	 * Copy the EVA config from this VPE if the CPU supports it.
419	 * CONFIG3 must exist to be running MT startup - just read it.
420	 */
421	mfc0	t0, CP0_CONFIG, 3
422	and	t0, t0, MIPS_CONF3_SC
423	beqz	t0, 3f
424	 nop
425	mfc0    t0, CP0_SEGCTL0
426	mttc0	t0, CP0_SEGCTL0
427	mfc0    t0, CP0_SEGCTL1
428	mttc0	t0, CP0_SEGCTL1
429	mfc0    t0, CP0_SEGCTL2
430	mttc0	t0, CP0_SEGCTL2
4313:
432	/* Ensure no software interrupts are pending */
433	mttc0	zero, CP0_CAUSE
434	mttc0	zero, CP0_STATUS
435
436	/* Set TC active, not interrupt exempt */
437	mftc0	t0, CP0_TCSTATUS
438	li	t1, ~TCSTATUS_IXMT
439	and	t0, t0, t1
440	ori	t0, t0, TCSTATUS_A
441	mttc0	t0, CP0_TCSTATUS
442
443	/* Clear the TC halt bit */
444	mttc0	zero, CP0_TCHALT
445
446	/* Set VPE active */
447	mftc0	t0, CP0_VPECONF0
448	ori	t0, t0, VPECONF0_VPA
449	mttc0	t0, CP0_VPECONF0
450
451	/* Next VPE */
4522:	srl	ta2, ta2, 1
453	addiu	ta1, ta1, 1
454	bnez	ta2, 1b
455	 nop
456
457	/* Leave VPE configuration state */
458	mfc0	t1, CP0_MVPCONTROL
459	xori	t1, t1, MVPCONTROL_VPC
460	mtc0	t1, CP0_MVPCONTROL
461	ehb
462	evpe
463
464	.set	pop
465
466	/* Check whether this VPE is meant to be running */
467	li	t0, 1
468	sll	t0, t0, a1
469	and	t0, t0, t8
470	bnez	t0, 2f
471	 nop
472
473	/* This VPE should be offline, halt the TC */
474	li	t0, TCHALT_H
475	mtc0	t0, CP0_TCHALT
476	PTR_LA	t0, 1f
4771:	jr.hb	t0
478	 nop
479
4802:
481
482#endif /* CONFIG_MIPS_MT_SMP */
483
484	/* Return */
4855:	jr	ra
486	 nop
487	END(mips_cps_boot_vpes)
488
489#if MIPS_ISA_REV > 0
490LEAF(mips_cps_cache_init)
491	/*
492	 * Clear the bits used to index the caches. Note that the architecture
493	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
494	 * be valid for all MIPS32 CPUs, even those for which said writes are
495	 * unnecessary.
496	 */
497	mtc0	zero, CP0_TAGLO, 0
498	mtc0	zero, CP0_TAGHI, 0
499	mtc0	zero, CP0_TAGLO, 2
500	mtc0	zero, CP0_TAGHI, 2
501	ehb
502
503	/* Primary cache configuration is indicated by Config1 */
504	mfc0	v0, CP0_CONFIG, 1
505
506	/* Detect I-cache line size */
507	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
508	beqz	t0, icache_done
509	 li	t1, 2
510	sllv	t0, t1, t0
511
512	/* Detect I-cache size */
513	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
514	xori	t2, t1, 0x7
515	beqz	t2, 1f
516	 li	t3, 32
517	addiu	t1, t1, 1
518	sllv	t1, t3, t1
5191:	/* At this point t1 == I-cache sets per way */
520	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
521	addiu	t2, t2, 1
522	mul	t1, t1, t0
523	mul	t1, t1, t2
524
525	li	a0, CKSEG0
526	PTR_ADD	a1, a0, t1
5271:	cache	Index_Store_Tag_I, 0(a0)
528	PTR_ADD	a0, a0, t0
529	bne	a0, a1, 1b
530	 nop
531icache_done:
532
533	/* Detect D-cache line size */
534	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
535	beqz	t0, dcache_done
536	 li	t1, 2
537	sllv	t0, t1, t0
538
539	/* Detect D-cache size */
540	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
541	xori	t2, t1, 0x7
542	beqz	t2, 1f
543	 li	t3, 32
544	addiu	t1, t1, 1
545	sllv	t1, t3, t1
5461:	/* At this point t1 == D-cache sets per way */
547	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
548	addiu	t2, t2, 1
549	mul	t1, t1, t0
550	mul	t1, t1, t2
551
552	li	a0, CKSEG0
553	PTR_ADDU a1, a0, t1
554	PTR_SUBU a1, a1, t0
5551:	cache	Index_Store_Tag_D, 0(a0)
556	bne	a0, a1, 1b
557	 PTR_ADD a0, a0, t0
558dcache_done:
559
560	jr	ra
561	 nop
562	END(mips_cps_cache_init)
563#endif /* MIPS_ISA_REV > 0 */
564
565#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
566
567	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
568	.macro	psstate	dest
569	.set	push
570	.set	noat
571	lw	$1, TI_CPU(gp)
572	sll	$1, $1, LONGLOG
573	PTR_LA	\dest, __per_cpu_offset
574	PTR_ADDU $1, $1, \dest
575	lw	$1, 0($1)
576	PTR_LA	\dest, cps_cpu_state
577	PTR_ADDU \dest, \dest, $1
578	.set	pop
579	.endm
580
581LEAF(mips_cps_pm_save)
582	/* Save CPU state */
583	SUSPEND_SAVE_REGS
584	psstate	t1
585	SUSPEND_SAVE_STATIC
586	jr	v0
587	 nop
588	END(mips_cps_pm_save)
589
590LEAF(mips_cps_pm_restore)
591	/* Restore CPU state */
592	psstate	t1
593	RESUME_RESTORE_STATIC
594	RESUME_RESTORE_REGS_RETURN
595	END(mips_cps_pm_restore)
596
597#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */