Linux Audio

Check our new training course

Loading...
v5.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Copyright (C) 2013 Imagination Technologies
  4 * Author: Paul Burton <paul.burton@mips.com>
 
 
 
 
 
  5 */
  6
  7#include <asm/addrspace.h>
  8#include <asm/asm.h>
  9#include <asm/asm-offsets.h>
 10#include <asm/asmmacro.h>
 11#include <asm/cacheops.h>
 12#include <asm/eva.h>
 13#include <asm/mipsregs.h>
 14#include <asm/mipsmtregs.h>
 15#include <asm/pm.h>
 16
 17#define GCR_CPC_BASE_OFS	0x0088
 18#define GCR_CL_COHERENCE_OFS	0x2008
 19#define GCR_CL_ID_OFS		0x2028
 20
 21#define CPC_CL_VC_STOP_OFS	0x2020
 22#define CPC_CL_VC_RUN_OFS	0x2028
 23
 24.extern mips_cm_base
 25
 26.set noreorder
 27
 28#ifdef CONFIG_64BIT
 29# define STATUS_BITDEPS		ST0_KX
 30#else
 31# define STATUS_BITDEPS		0
 32#endif
 33
 34#ifdef CONFIG_MIPS_CPS_NS16550
 35
 36#define DUMP_EXCEP(name)		\
 37	PTR_LA	a0, 8f;			\
 38	jal	mips_cps_bev_dump;	\
 39	 nop;				\
 40	TEXT(name)
 41
 42#else /* !CONFIG_MIPS_CPS_NS16550 */
 43
 44#define DUMP_EXCEP(name)
 45
 46#endif /* !CONFIG_MIPS_CPS_NS16550 */
 47
 48	/*
 49	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
 50	 * MT is not supported then branch to nomt.
 51	 */
 52	.macro	has_mt	dest, nomt
 53	mfc0	\dest, CP0_CONFIG, 1
 54	bgez	\dest, \nomt
 55	 mfc0	\dest, CP0_CONFIG, 2
 56	bgez	\dest, \nomt
 57	 mfc0	\dest, CP0_CONFIG, 3
 58	andi	\dest, \dest, MIPS_CONF3_MT
 59	beqz	\dest, \nomt
 60	 nop
 61	.endm
 62
 63	/*
 64	 * Set dest to non-zero if the core supports MIPSr6 multithreading
 65	 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
 66	 * branch to nomt.
 67	 */
 68	.macro	has_vp	dest, nomt
 69	mfc0	\dest, CP0_CONFIG, 1
 70	bgez	\dest, \nomt
 71	 mfc0	\dest, CP0_CONFIG, 2
 72	bgez	\dest, \nomt
 73	 mfc0	\dest, CP0_CONFIG, 3
 74	bgez	\dest, \nomt
 75	 mfc0	\dest, CP0_CONFIG, 4
 76	bgez	\dest, \nomt
 77	 mfc0	\dest, CP0_CONFIG, 5
 78	andi	\dest, \dest, MIPS_CONF5_VP
 79	beqz	\dest, \nomt
 80	 nop
 81	.endm
 82
 83	/* Calculate an uncached address for the CM GCRs */
 84	.macro	cmgcrb	dest
 85	.set	push
 86	.set	noat
 87	MFC0	$1, CP0_CMGCRBASE
 88	PTR_SLL	$1, $1, 4
 89	PTR_LI	\dest, UNCAC_BASE
 90	PTR_ADDU \dest, \dest, $1
 91	.set	pop
 92	.endm
 93
 94.section .text.cps-vec
 95.balign 0x1000
 96
 97LEAF(mips_cps_core_entry)
 98	/*
 99	 * These first 4 bytes will be patched by cps_smp_setup to load the
100	 * CCA to use into register s0.
101	 */
102	.word	0
103
104	/* Check whether we're here due to an NMI */
105	mfc0	k0, CP0_STATUS
106	and	k0, k0, ST0_NMI
107	beqz	k0, not_nmi
108	 nop
109
110	/* This is an NMI */
111	PTR_LA	k0, nmi_handler
112	jr	k0
113	 nop
114
115not_nmi:
116	/* Setup Cause */
117	li	t0, CAUSEF_IV
118	mtc0	t0, CP0_CAUSE
119
120	/* Setup Status */
121	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
122	mtc0	t0, CP0_STATUS
123
124	/* Skip cache & coherence setup if we're already coherent */
125	cmgcrb	v1
126	lw	s7, GCR_CL_COHERENCE_OFS(v1)
127	bnez	s7, 1f
128	 nop
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
130	/* Initialize the L1 caches */
131	jal	mips_cps_cache_init
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132	 nop
 
133
134	/* Enter the coherent domain */
135	li	t0, 0xff
136	sw	t0, GCR_CL_COHERENCE_OFS(v1)
137	ehb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
139	/* Set Kseg0 CCA to that in s0 */
1401:	mfc0	t0, CP0_CONFIG
141	ori	t0, 0x7
142	xori	t0, 0x7
143	or	t0, t0, s0
144	mtc0	t0, CP0_CONFIG
145	ehb
146
 
 
 
 
 
 
 
 
 
 
 
147	/* Jump to kseg0 */
148	PTR_LA	t0, 1f
149	jr	t0
150	 nop
151
152	/*
153	 * We're up, cached & coherent. Perform any EVA initialization necessary
154	 * before we access memory.
155	 */
1561:	eva_init
157
158	/* Retrieve boot configuration pointers */
159	jal	mips_cps_get_bootcfg
160	 nop
161
162	/* Skip core-level init if we started up coherent */
163	bnez	s7, 1f
164	 nop
165
166	/* Perform any further required core-level initialisation */
167	jal	mips_cps_core_init
168	 nop
169
170	/*
171	 * Boot any other VPEs within this core that should be online, and
172	 * deactivate this VPE if it should be offline.
173	 */
174	move	a1, t9
175	jal	mips_cps_boot_vpes
176	 move	a0, v0
177
178	/* Off we go! */
1791:	PTR_L	t1, VPEBOOTCFG_PC(v1)
180	PTR_L	gp, VPEBOOTCFG_GP(v1)
181	PTR_L	sp, VPEBOOTCFG_SP(v1)
182	jr	t1
183	 nop
184	END(mips_cps_core_entry)
185
186.org 0x200
187LEAF(excep_tlbfill)
188	DUMP_EXCEP("TLB Fill")
189	b	.
190	 nop
191	END(excep_tlbfill)
192
193.org 0x280
194LEAF(excep_xtlbfill)
195	DUMP_EXCEP("XTLB Fill")
196	b	.
197	 nop
198	END(excep_xtlbfill)
199
200.org 0x300
201LEAF(excep_cache)
202	DUMP_EXCEP("Cache")
203	b	.
204	 nop
205	END(excep_cache)
206
207.org 0x380
208LEAF(excep_genex)
209	DUMP_EXCEP("General")
210	b	.
211	 nop
212	END(excep_genex)
213
214.org 0x400
215LEAF(excep_intex)
216	DUMP_EXCEP("Interrupt")
217	b	.
218	 nop
219	END(excep_intex)
220
221.org 0x480
222LEAF(excep_ejtag)
 
223	PTR_LA	k0, ejtag_debug_handler
224	jr	k0
225	 nop
226	END(excep_ejtag)
227
228LEAF(mips_cps_core_init)
229#ifdef CONFIG_MIPS_MT_SMP
230	/* Check that the core implements the MT ASE */
231	has_mt	t0, 3f
232
233	.set	push
234	.set	MIPS_ISA_LEVEL_RAW
235	.set	mt
236
237	/* Only allow 1 TC per VPE to execute... */
238	dmt
239
240	/* ...and for the moment only 1 VPE */
241	dvpe
242	PTR_LA	t1, 1f
243	jr.hb	t1
244	 nop
245
246	/* Enter VPE configuration state */
2471:	mfc0	t0, CP0_MVPCONTROL
248	ori	t0, t0, MVPCONTROL_VPC
249	mtc0	t0, CP0_MVPCONTROL
250
251	/* Retrieve the number of VPEs within the core */
252	mfc0	t0, CP0_MVPCONF0
253	srl	t0, t0, MVPCONF0_PVPE_SHIFT
254	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
255	addiu	ta3, t0, 1
256
257	/* If there's only 1, we're done */
258	beqz	t0, 2f
259	 nop
260
261	/* Loop through each VPE within this core */
262	li	ta1, 1
263
2641:	/* Operate on the appropriate TC */
265	mtc0	ta1, CP0_VPECONTROL
266	ehb
267
268	/* Bind TC to VPE (1:1 TC:VPE mapping) */
269	mttc0	ta1, CP0_TCBIND
270
271	/* Set exclusive TC, non-active, master */
272	li	t0, VPECONF0_MVP
273	sll	t1, ta1, VPECONF0_XTC_SHIFT
274	or	t0, t0, t1
275	mttc0	t0, CP0_VPECONF0
276
277	/* Set TC non-active, non-allocatable */
278	mttc0	zero, CP0_TCSTATUS
279
280	/* Set TC halted */
281	li	t0, TCHALT_H
282	mttc0	t0, CP0_TCHALT
283
284	/* Next VPE */
285	addiu	ta1, ta1, 1
286	slt	t0, ta1, ta3
287	bnez	t0, 1b
288	 nop
289
290	/* Leave VPE configuration state */
2912:	mfc0	t0, CP0_MVPCONTROL
292	xori	t0, t0, MVPCONTROL_VPC
293	mtc0	t0, CP0_MVPCONTROL
294
2953:	.set	pop
296#endif
297	jr	ra
298	 nop
299	END(mips_cps_core_init)
300
301/**
302 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
303 *
304 * Returns: pointer to struct core_boot_config in v0, pointer to
305 *          struct vpe_boot_config in v1, VPE ID in t9
306 */
307LEAF(mips_cps_get_bootcfg)
308	/* Calculate a pointer to this cores struct core_boot_config */
309	cmgcrb	t0
310	lw	t0, GCR_CL_ID_OFS(t0)
311	li	t1, COREBOOTCFG_SIZE
312	mul	t0, t0, t1
313	PTR_LA	t1, mips_cps_core_bootcfg
314	PTR_L	t1, 0(t1)
315	PTR_ADDU v0, t0, t1
316
317	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
318	li	t9, 0
319#if defined(CONFIG_CPU_MIPSR6)
320	has_vp	ta2, 1f
321
322	/*
323	 * Assume non-contiguous numbering. Perhaps some day we'll need
324	 * to handle contiguous VP numbering, but no such systems yet
325	 * exist.
326	 */
327	mfc0	t9, CP0_GLOBALNUMBER
328	andi	t9, t9, MIPS_GLOBALNUMBER_VP
329#elif defined(CONFIG_MIPS_MT_SMP)
330	has_mt	ta2, 1f
331
332	/* Find the number of VPEs present in the core */
333	mfc0	t1, CP0_MVPCONF0
334	srl	t1, t1, MVPCONF0_PVPE_SHIFT
335	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
336	addiu	t1, t1, 1
337
338	/* Calculate a mask for the VPE ID from EBase.CPUNum */
339	clz	t1, t1
340	li	t2, 31
341	subu	t1, t2, t1
342	li	t2, 1
343	sll	t1, t2, t1
344	addiu	t1, t1, -1
345
346	/* Retrieve the VPE ID from EBase.CPUNum */
347	mfc0	t9, $15, 1
348	and	t9, t9, t1
349#endif
350
3511:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
352	li	t1, VPEBOOTCFG_SIZE
353	mul	v1, t9, t1
354	PTR_L	ta3, COREBOOTCFG_VPECONFIG(v0)
355	PTR_ADDU v1, v1, ta3
356
357	jr	ra
358	 nop
359	END(mips_cps_get_bootcfg)
360
361LEAF(mips_cps_boot_vpes)
362	lw	ta2, COREBOOTCFG_VPEMASK(a0)
363	PTR_L	ta3, COREBOOTCFG_VPECONFIG(a0)
364
365#if defined(CONFIG_CPU_MIPSR6)
366
367	has_vp	t0, 5f
368
369	/* Find base address of CPC */
370	cmgcrb	t3
371	PTR_L	t1, GCR_CPC_BASE_OFS(t3)
372	PTR_LI	t2, ~0x7fff
373	and	t1, t1, t2
374	PTR_LI	t2, UNCAC_BASE
375	PTR_ADD	t1, t1, t2
376
377	/* Start any other VPs that ought to be running */
378	PTR_S	ta2, CPC_CL_VC_RUN_OFS(t1)
379
380	/* Ensure this VP stops running if it shouldn't be */
381	not	ta2
382	PTR_S	ta2, CPC_CL_VC_STOP_OFS(t1)
383	ehb
384
385#elif defined(CONFIG_MIPS_MT)
386
387	/* If the core doesn't support MT then return */
388	has_mt	t0, 5f
 
 
 
389
390	/* Enter VPE configuration state */
391	.set	push
392	.set	MIPS_ISA_LEVEL_RAW
393	.set	mt
394	dvpe
395	.set	pop
396
 
 
397	PTR_LA	t1, 1f
398	jr.hb	t1
399	 nop
4001:	mfc0	t1, CP0_MVPCONTROL
401	ori	t1, t1, MVPCONTROL_VPC
402	mtc0	t1, CP0_MVPCONTROL
403	ehb
404
405	/* Loop through each VPE */
 
406	move	t8, ta2
407	li	ta1, 0
408
409	/* Check whether the VPE should be running. If not, skip it */
4101:	andi	t0, ta2, 1
411	beqz	t0, 2f
412	 nop
413
414	/* Operate on the appropriate TC */
415	mfc0	t0, CP0_VPECONTROL
416	ori	t0, t0, VPECONTROL_TARGTC
417	xori	t0, t0, VPECONTROL_TARGTC
418	or	t0, t0, ta1
419	mtc0	t0, CP0_VPECONTROL
420	ehb
421
422	.set	push
423	.set	MIPS_ISA_LEVEL_RAW
424	.set	mt
425
426	/* Skip the VPE if its TC is not halted */
427	mftc0	t0, CP0_TCHALT
428	beqz	t0, 2f
429	 nop
430
431	/* Calculate a pointer to the VPEs struct vpe_boot_config */
432	li	t0, VPEBOOTCFG_SIZE
433	mul	t0, t0, ta1
434	addu	t0, t0, ta3
435
436	/* Set the TC restart PC */
437	lw	t1, VPEBOOTCFG_PC(t0)
438	mttc0	t1, CP0_TCRESTART
439
440	/* Set the TC stack pointer */
441	lw	t1, VPEBOOTCFG_SP(t0)
442	mttgpr	t1, sp
443
444	/* Set the TC global pointer */
445	lw	t1, VPEBOOTCFG_GP(t0)
446	mttgpr	t1, gp
447
448	/* Copy config from this VPE */
449	mfc0	t0, CP0_CONFIG
450	mttc0	t0, CP0_CONFIG
451
452	/*
453	 * Copy the EVA config from this VPE if the CPU supports it.
454	 * CONFIG3 must exist to be running MT startup - just read it.
455	 */
456	mfc0	t0, CP0_CONFIG, 3
457	and	t0, t0, MIPS_CONF3_SC
458	beqz	t0, 3f
459	 nop
460	mfc0    t0, CP0_SEGCTL0
461	mttc0	t0, CP0_SEGCTL0
462	mfc0    t0, CP0_SEGCTL1
463	mttc0	t0, CP0_SEGCTL1
464	mfc0    t0, CP0_SEGCTL2
465	mttc0	t0, CP0_SEGCTL2
4663:
467	/* Ensure no software interrupts are pending */
468	mttc0	zero, CP0_CAUSE
469	mttc0	zero, CP0_STATUS
470
471	/* Set TC active, not interrupt exempt */
472	mftc0	t0, CP0_TCSTATUS
473	li	t1, ~TCSTATUS_IXMT
474	and	t0, t0, t1
475	ori	t0, t0, TCSTATUS_A
476	mttc0	t0, CP0_TCSTATUS
477
478	/* Clear the TC halt bit */
479	mttc0	zero, CP0_TCHALT
480
481	/* Set VPE active */
482	mftc0	t0, CP0_VPECONF0
483	ori	t0, t0, VPECONF0_VPA
484	mttc0	t0, CP0_VPECONF0
485
486	/* Next VPE */
4872:	srl	ta2, ta2, 1
488	addiu	ta1, ta1, 1
489	bnez	ta2, 1b
490	 nop
491
492	/* Leave VPE configuration state */
493	mfc0	t1, CP0_MVPCONTROL
494	xori	t1, t1, MVPCONTROL_VPC
495	mtc0	t1, CP0_MVPCONTROL
496	ehb
497	evpe
498
499	.set	pop
500
501	/* Check whether this VPE is meant to be running */
502	li	t0, 1
503	sll	t0, t0, a1
504	and	t0, t0, t8
505	bnez	t0, 2f
506	 nop
507
508	/* This VPE should be offline, halt the TC */
509	li	t0, TCHALT_H
510	mtc0	t0, CP0_TCHALT
511	PTR_LA	t0, 1f
5121:	jr.hb	t0
513	 nop
514
5152:
516
517#endif /* CONFIG_MIPS_MT_SMP */
518
519	/* Return */
5205:	jr	ra
521	 nop
522	END(mips_cps_boot_vpes)
523
524LEAF(mips_cps_cache_init)
525	/*
526	 * Clear the bits used to index the caches. Note that the architecture
527	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
528	 * be valid for all MIPS32 CPUs, even those for which said writes are
529	 * unnecessary.
530	 */
531	mtc0	zero, CP0_TAGLO, 0
532	mtc0	zero, CP0_TAGHI, 0
533	mtc0	zero, CP0_TAGLO, 2
534	mtc0	zero, CP0_TAGHI, 2
535	ehb
536
537	/* Primary cache configuration is indicated by Config1 */
538	mfc0	v0, CP0_CONFIG, 1
539
540	/* Detect I-cache line size */
541	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
542	beqz	t0, icache_done
543	 li	t1, 2
544	sllv	t0, t1, t0
545
546	/* Detect I-cache size */
547	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
548	xori	t2, t1, 0x7
549	beqz	t2, 1f
550	 li	t3, 32
551	addiu	t1, t1, 1
552	sllv	t1, t3, t1
5531:	/* At this point t1 == I-cache sets per way */
554	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
555	addiu	t2, t2, 1
556	mul	t1, t1, t0
557	mul	t1, t1, t2
558
559	li	a0, CKSEG0
560	PTR_ADD	a1, a0, t1
5611:	cache	Index_Store_Tag_I, 0(a0)
562	PTR_ADD	a0, a0, t0
563	bne	a0, a1, 1b
564	 nop
565icache_done:
566
567	/* Detect D-cache line size */
568	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
569	beqz	t0, dcache_done
570	 li	t1, 2
571	sllv	t0, t1, t0
572
573	/* Detect D-cache size */
574	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
575	xori	t2, t1, 0x7
576	beqz	t2, 1f
577	 li	t3, 32
578	addiu	t1, t1, 1
579	sllv	t1, t3, t1
5801:	/* At this point t1 == D-cache sets per way */
581	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
582	addiu	t2, t2, 1
583	mul	t1, t1, t0
584	mul	t1, t1, t2
585
586	li	a0, CKSEG0
587	PTR_ADDU a1, a0, t1
588	PTR_SUBU a1, a1, t0
5891:	cache	Index_Store_Tag_D, 0(a0)
590	bne	a0, a1, 1b
591	 PTR_ADD a0, a0, t0
592dcache_done:
593
594	jr	ra
595	 nop
596	END(mips_cps_cache_init)
597
598#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
599
600	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
601	.macro	psstate	dest
602	.set	push
603	.set	noat
604	lw	$1, TI_CPU(gp)
605	sll	$1, $1, LONGLOG
606	PTR_LA	\dest, __per_cpu_offset
607	addu	$1, $1, \dest
608	lw	$1, 0($1)
609	PTR_LA	\dest, cps_cpu_state
610	addu	\dest, \dest, $1
611	.set	pop
612	.endm
613
614LEAF(mips_cps_pm_save)
615	/* Save CPU state */
616	SUSPEND_SAVE_REGS
617	psstate	t1
618	SUSPEND_SAVE_STATIC
619	jr	v0
620	 nop
621	END(mips_cps_pm_save)
622
623LEAF(mips_cps_pm_restore)
624	/* Restore CPU state */
625	psstate	t1
626	RESUME_RESTORE_STATIC
627	RESUME_RESTORE_REGS_RETURN
628	END(mips_cps_pm_restore)
629
630#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
v4.6
 
  1/*
  2 * Copyright (C) 2013 Imagination Technologies
  3 * Author: Paul Burton <paul.burton@imgtec.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License as published by the
  7 * Free Software Foundation;  either version 2 of the  License, or (at your
  8 * option) any later version.
  9 */
 10
 11#include <asm/addrspace.h>
 12#include <asm/asm.h>
 13#include <asm/asm-offsets.h>
 14#include <asm/asmmacro.h>
 15#include <asm/cacheops.h>
 16#include <asm/eva.h>
 17#include <asm/mipsregs.h>
 18#include <asm/mipsmtregs.h>
 19#include <asm/pm.h>
 20
 
 21#define GCR_CL_COHERENCE_OFS	0x2008
 22#define GCR_CL_ID_OFS		0x2028
 23
 
 
 
 24.extern mips_cm_base
 25
 26.set noreorder
 27
 28#ifdef CONFIG_64BIT
 29# define STATUS_BITDEPS		ST0_KX
 30#else
 31# define STATUS_BITDEPS		0
 32#endif
 33
 34#ifdef CONFIG_MIPS_CPS_NS16550
 35
 36#define DUMP_EXCEP(name)		\
 37	PTR_LA	a0, 8f;			\
 38	jal	mips_cps_bev_dump;	\
 39	 nop;				\
 40	TEXT(name)
 41
 42#else /* !CONFIG_MIPS_CPS_NS16550 */
 43
 44#define DUMP_EXCEP(name)
 45
 46#endif /* !CONFIG_MIPS_CPS_NS16550 */
 47
 48	/*
 49	 * Set dest to non-zero if the core supports the MT ASE, else zero. If
 50	 * MT is not supported then branch to nomt.
 51	 */
 52	.macro	has_mt	dest, nomt
 53	mfc0	\dest, CP0_CONFIG, 1
 54	bgez	\dest, \nomt
 55	 mfc0	\dest, CP0_CONFIG, 2
 56	bgez	\dest, \nomt
 57	 mfc0	\dest, CP0_CONFIG, 3
 58	andi	\dest, \dest, MIPS_CONF3_MT
 59	beqz	\dest, \nomt
 60	 nop
 61	.endm
 62
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63.section .text.cps-vec
 64.balign 0x1000
 65
 66LEAF(mips_cps_core_entry)
 67	/*
 68	 * These first 4 bytes will be patched by cps_smp_setup to load the
 69	 * CCA to use into register s0.
 70	 */
 71	.word	0
 72
 73	/* Check whether we're here due to an NMI */
 74	mfc0	k0, CP0_STATUS
 75	and	k0, k0, ST0_NMI
 76	beqz	k0, not_nmi
 77	 nop
 78
 79	/* This is an NMI */
 80	PTR_LA	k0, nmi_handler
 81	jr	k0
 82	 nop
 83
 84not_nmi:
 85	/* Setup Cause */
 86	li	t0, CAUSEF_IV
 87	mtc0	t0, CP0_CAUSE
 88
 89	/* Setup Status */
 90	li	t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
 91	mtc0	t0, CP0_STATUS
 92
 93	/*
 94	 * Clear the bits used to index the caches. Note that the architecture
 95	 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
 96	 * be valid for all MIPS32 CPUs, even those for which said writes are
 97	 * unnecessary.
 98	 */
 99	mtc0	zero, CP0_TAGLO, 0
100	mtc0	zero, CP0_TAGHI, 0
101	mtc0	zero, CP0_TAGLO, 2
102	mtc0	zero, CP0_TAGHI, 2
103	ehb
104
105	/* Primary cache configuration is indicated by Config1 */
106	mfc0	v0, CP0_CONFIG, 1
107
108	/* Detect I-cache line size */
109	_EXT	t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
110	beqz	t0, icache_done
111	 li	t1, 2
112	sllv	t0, t1, t0
113
114	/* Detect I-cache size */
115	_EXT	t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
116	xori	t2, t1, 0x7
117	beqz	t2, 1f
118	 li	t3, 32
119	addiu	t1, t1, 1
120	sllv	t1, t3, t1
1211:	/* At this point t1 == I-cache sets per way */
122	_EXT	t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
123	addiu	t2, t2, 1
124	mul	t1, t1, t0
125	mul	t1, t1, t2
126
127	li	a0, CKSEG0
128	PTR_ADD	a1, a0, t1
1291:	cache	Index_Store_Tag_I, 0(a0)
130	PTR_ADD	a0, a0, t0
131	bne	a0, a1, 1b
132	 nop
133icache_done:
134
135	/* Detect D-cache line size */
136	_EXT	t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
137	beqz	t0, dcache_done
138	 li	t1, 2
139	sllv	t0, t1, t0
140
141	/* Detect D-cache size */
142	_EXT	t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
143	xori	t2, t1, 0x7
144	beqz	t2, 1f
145	 li	t3, 32
146	addiu	t1, t1, 1
147	sllv	t1, t3, t1
1481:	/* At this point t1 == D-cache sets per way */
149	_EXT	t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
150	addiu	t2, t2, 1
151	mul	t1, t1, t0
152	mul	t1, t1, t2
153
154	li	a0, CKSEG0
155	PTR_ADDU a1, a0, t1
156	PTR_SUBU a1, a1, t0
1571:	cache	Index_Store_Tag_D, 0(a0)
158	bne	a0, a1, 1b
159	 PTR_ADD a0, a0, t0
160dcache_done:
161
162	/* Set Kseg0 CCA to that in s0 */
163	mfc0	t0, CP0_CONFIG
164	ori	t0, 0x7
165	xori	t0, 0x7
166	or	t0, t0, s0
167	mtc0	t0, CP0_CONFIG
168	ehb
169
170	/* Calculate an uncached address for the CM GCRs */
171	MFC0	v1, CP0_CMGCRBASE
172	PTR_SLL	v1, v1, 4
173	PTR_LI	t0, UNCAC_BASE
174	PTR_ADDU v1, v1, t0
175
176	/* Enter the coherent domain */
177	li	t0, 0xff
178	sw	t0, GCR_CL_COHERENCE_OFS(v1)
179	ehb
180
181	/* Jump to kseg0 */
182	PTR_LA	t0, 1f
183	jr	t0
184	 nop
185
186	/*
187	 * We're up, cached & coherent. Perform any further required core-level
188	 * initialisation.
189	 */
1901:	jal	mips_cps_core_init
 
 
 
 
 
 
 
191	 nop
192
193	/* Do any EVA initialization if necessary */
194	eva_init
 
195
196	/*
197	 * Boot any other VPEs within this core that should be online, and
198	 * deactivate this VPE if it should be offline.
199	 */
 
200	jal	mips_cps_boot_vpes
201	 nop
202
203	/* Off we go! */
204	PTR_L	t1, VPEBOOTCFG_PC(v0)
205	PTR_L	gp, VPEBOOTCFG_GP(v0)
206	PTR_L	sp, VPEBOOTCFG_SP(v0)
207	jr	t1
208	 nop
209	END(mips_cps_core_entry)
210
211.org 0x200
212LEAF(excep_tlbfill)
213	DUMP_EXCEP("TLB Fill")
214	b	.
215	 nop
216	END(excep_tlbfill)
217
218.org 0x280
219LEAF(excep_xtlbfill)
220	DUMP_EXCEP("XTLB Fill")
221	b	.
222	 nop
223	END(excep_xtlbfill)
224
225.org 0x300
226LEAF(excep_cache)
227	DUMP_EXCEP("Cache")
228	b	.
229	 nop
230	END(excep_cache)
231
232.org 0x380
233LEAF(excep_genex)
234	DUMP_EXCEP("General")
235	b	.
236	 nop
237	END(excep_genex)
238
239.org 0x400
240LEAF(excep_intex)
241	DUMP_EXCEP("Interrupt")
242	b	.
243	 nop
244	END(excep_intex)
245
246.org 0x480
247LEAF(excep_ejtag)
248	DUMP_EXCEP("EJTAG")
249	PTR_LA	k0, ejtag_debug_handler
250	jr	k0
251	 nop
252	END(excep_ejtag)
253
254LEAF(mips_cps_core_init)
255#ifdef CONFIG_MIPS_MT_SMP
256	/* Check that the core implements the MT ASE */
257	has_mt	t0, 3f
258
259	.set	push
 
260	.set	mt
261
262	/* Only allow 1 TC per VPE to execute... */
263	dmt
264
265	/* ...and for the moment only 1 VPE */
266	dvpe
267	PTR_LA	t1, 1f
268	jr.hb	t1
269	 nop
270
271	/* Enter VPE configuration state */
2721:	mfc0	t0, CP0_MVPCONTROL
273	ori	t0, t0, MVPCONTROL_VPC
274	mtc0	t0, CP0_MVPCONTROL
275
276	/* Retrieve the number of VPEs within the core */
277	mfc0	t0, CP0_MVPCONF0
278	srl	t0, t0, MVPCONF0_PVPE_SHIFT
279	andi	t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
280	addiu	ta3, t0, 1
281
282	/* If there's only 1, we're done */
283	beqz	t0, 2f
284	 nop
285
286	/* Loop through each VPE within this core */
287	li	ta1, 1
288
2891:	/* Operate on the appropriate TC */
290	mtc0	ta1, CP0_VPECONTROL
291	ehb
292
293	/* Bind TC to VPE (1:1 TC:VPE mapping) */
294	mttc0	ta1, CP0_TCBIND
295
296	/* Set exclusive TC, non-active, master */
297	li	t0, VPECONF0_MVP
298	sll	t1, ta1, VPECONF0_XTC_SHIFT
299	or	t0, t0, t1
300	mttc0	t0, CP0_VPECONF0
301
302	/* Set TC non-active, non-allocatable */
303	mttc0	zero, CP0_TCSTATUS
304
305	/* Set TC halted */
306	li	t0, TCHALT_H
307	mttc0	t0, CP0_TCHALT
308
309	/* Next VPE */
310	addiu	ta1, ta1, 1
311	slt	t0, ta1, ta3
312	bnez	t0, 1b
313	 nop
314
315	/* Leave VPE configuration state */
3162:	mfc0	t0, CP0_MVPCONTROL
317	xori	t0, t0, MVPCONTROL_VPC
318	mtc0	t0, CP0_MVPCONTROL
319
3203:	.set	pop
321#endif
322	jr	ra
323	 nop
324	END(mips_cps_core_init)
325
326LEAF(mips_cps_boot_vpes)
327	/* Retrieve CM base address */
328	PTR_LA	t0, mips_cm_base
329	PTR_L	t0, 0(t0)
330
 
 
331	/* Calculate a pointer to this cores struct core_boot_config */
 
332	lw	t0, GCR_CL_ID_OFS(t0)
333	li	t1, COREBOOTCFG_SIZE
334	mul	t0, t0, t1
335	PTR_LA	t1, mips_cps_core_bootcfg
336	PTR_L	t1, 0(t1)
337	PTR_ADDU t0, t0, t1
338
339	/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
340	li	t9, 0
341#ifdef CONFIG_MIPS_MT_SMP
 
 
 
 
 
 
 
 
 
 
342	has_mt	ta2, 1f
343
344	/* Find the number of VPEs present in the core */
345	mfc0	t1, CP0_MVPCONF0
346	srl	t1, t1, MVPCONF0_PVPE_SHIFT
347	andi	t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
348	addiu	t1, t1, 1
349
350	/* Calculate a mask for the VPE ID from EBase.CPUNum */
351	clz	t1, t1
352	li	t2, 31
353	subu	t1, t2, t1
354	li	t2, 1
355	sll	t1, t2, t1
356	addiu	t1, t1, -1
357
358	/* Retrieve the VPE ID from EBase.CPUNum */
359	mfc0	t9, $15, 1
360	and	t9, t9, t1
361#endif
362
3631:	/* Calculate a pointer to this VPEs struct vpe_boot_config */
364	li	t1, VPEBOOTCFG_SIZE
365	mul	v0, t9, t1
366	PTR_L	ta3, COREBOOTCFG_VPECONFIG(t0)
367	PTR_ADDU v0, v0, ta3
 
 
 
 
 
 
 
 
 
 
368
369#ifdef CONFIG_MIPS_MT_SMP
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
371	/* If the core doesn't support MT then return */
372	bnez	ta2, 1f
373	 nop
374	jr	ra
375	 nop
376
 
377	.set	push
 
378	.set	mt
 
 
379
3801:	/* Enter VPE configuration state */
381	dvpe
382	PTR_LA	t1, 1f
383	jr.hb	t1
384	 nop
3851:	mfc0	t1, CP0_MVPCONTROL
386	ori	t1, t1, MVPCONTROL_VPC
387	mtc0	t1, CP0_MVPCONTROL
388	ehb
389
390	/* Loop through each VPE */
391	PTR_L	ta2, COREBOOTCFG_VPEMASK(t0)
392	move	t8, ta2
393	li	ta1, 0
394
395	/* Check whether the VPE should be running. If not, skip it */
3961:	andi	t0, ta2, 1
397	beqz	t0, 2f
398	 nop
399
400	/* Operate on the appropriate TC */
401	mfc0	t0, CP0_VPECONTROL
402	ori	t0, t0, VPECONTROL_TARGTC
403	xori	t0, t0, VPECONTROL_TARGTC
404	or	t0, t0, ta1
405	mtc0	t0, CP0_VPECONTROL
406	ehb
407
 
 
 
 
408	/* Skip the VPE if its TC is not halted */
409	mftc0	t0, CP0_TCHALT
410	beqz	t0, 2f
411	 nop
412
413	/* Calculate a pointer to the VPEs struct vpe_boot_config */
414	li	t0, VPEBOOTCFG_SIZE
415	mul	t0, t0, ta1
416	addu	t0, t0, ta3
417
418	/* Set the TC restart PC */
419	lw	t1, VPEBOOTCFG_PC(t0)
420	mttc0	t1, CP0_TCRESTART
421
422	/* Set the TC stack pointer */
423	lw	t1, VPEBOOTCFG_SP(t0)
424	mttgpr	t1, sp
425
426	/* Set the TC global pointer */
427	lw	t1, VPEBOOTCFG_GP(t0)
428	mttgpr	t1, gp
429
430	/* Copy config from this VPE */
431	mfc0	t0, CP0_CONFIG
432	mttc0	t0, CP0_CONFIG
433
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
434	/* Ensure no software interrupts are pending */
435	mttc0	zero, CP0_CAUSE
436	mttc0	zero, CP0_STATUS
437
438	/* Set TC active, not interrupt exempt */
439	mftc0	t0, CP0_TCSTATUS
440	li	t1, ~TCSTATUS_IXMT
441	and	t0, t0, t1
442	ori	t0, t0, TCSTATUS_A
443	mttc0	t0, CP0_TCSTATUS
444
445	/* Clear the TC halt bit */
446	mttc0	zero, CP0_TCHALT
447
448	/* Set VPE active */
449	mftc0	t0, CP0_VPECONF0
450	ori	t0, t0, VPECONF0_VPA
451	mttc0	t0, CP0_VPECONF0
452
453	/* Next VPE */
4542:	srl	ta2, ta2, 1
455	addiu	ta1, ta1, 1
456	bnez	ta2, 1b
457	 nop
458
459	/* Leave VPE configuration state */
460	mfc0	t1, CP0_MVPCONTROL
461	xori	t1, t1, MVPCONTROL_VPC
462	mtc0	t1, CP0_MVPCONTROL
463	ehb
464	evpe
465
 
 
466	/* Check whether this VPE is meant to be running */
467	li	t0, 1
468	sll	t0, t0, t9
469	and	t0, t0, t8
470	bnez	t0, 2f
471	 nop
472
473	/* This VPE should be offline, halt the TC */
474	li	t0, TCHALT_H
475	mtc0	t0, CP0_TCHALT
476	PTR_LA	t0, 1f
4771:	jr.hb	t0
478	 nop
479
4802:	.set	pop
481
482#endif /* CONFIG_MIPS_MT_SMP */
483
484	/* Return */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485	jr	ra
486	 nop
487	END(mips_cps_boot_vpes)
488
489#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
490
491	/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
492	.macro	psstate	dest
493	.set	push
494	.set	noat
495	lw	$1, TI_CPU(gp)
496	sll	$1, $1, LONGLOG
497	PTR_LA	\dest, __per_cpu_offset
498	addu	$1, $1, \dest
499	lw	$1, 0($1)
500	PTR_LA	\dest, cps_cpu_state
501	addu	\dest, \dest, $1
502	.set	pop
503	.endm
504
505LEAF(mips_cps_pm_save)
506	/* Save CPU state */
507	SUSPEND_SAVE_REGS
508	psstate	t1
509	SUSPEND_SAVE_STATIC
510	jr	v0
511	 nop
512	END(mips_cps_pm_save)
513
514LEAF(mips_cps_pm_restore)
515	/* Restore CPU state */
516	psstate	t1
517	RESUME_RESTORE_STATIC
518	RESUME_RESTORE_REGS_RETURN
519	END(mips_cps_pm_restore)
520
521#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */