Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <asm/addrspace.h>
8#include <asm/asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/asmmacro.h>
11#include <asm/cacheops.h>
12#include <asm/eva.h>
13#include <asm/mipsregs.h>
14#include <asm/mipsmtregs.h>
15#include <asm/pm.h>
16
17#define GCR_CPC_BASE_OFS 0x0088
18#define GCR_CL_COHERENCE_OFS 0x2008
19#define GCR_CL_ID_OFS 0x2028
20
21#define CPC_CL_VC_STOP_OFS 0x2020
22#define CPC_CL_VC_RUN_OFS 0x2028
23
24.extern mips_cm_base
25
26.set noreorder
27
28#ifdef CONFIG_64BIT
29# define STATUS_BITDEPS ST0_KX
30#else
31# define STATUS_BITDEPS 0
32#endif
33
34#ifdef CONFIG_MIPS_CPS_NS16550
35
36#define DUMP_EXCEP(name) \
37 PTR_LA a0, 8f; \
38 jal mips_cps_bev_dump; \
39 nop; \
40 TEXT(name)
41
42#else /* !CONFIG_MIPS_CPS_NS16550 */
43
44#define DUMP_EXCEP(name)
45
46#endif /* !CONFIG_MIPS_CPS_NS16550 */
47
48 /*
49 * Set dest to non-zero if the core supports the MT ASE, else zero. If
50 * MT is not supported then branch to nomt.
51 */
52 .macro has_mt dest, nomt
53 mfc0 \dest, CP0_CONFIG, 1
54 bgez \dest, \nomt
55 mfc0 \dest, CP0_CONFIG, 2
56 bgez \dest, \nomt
57 mfc0 \dest, CP0_CONFIG, 3
58 andi \dest, \dest, MIPS_CONF3_MT
59 beqz \dest, \nomt
60 nop
61 .endm
62
63 /*
64 * Set dest to non-zero if the core supports MIPSr6 multithreading
65 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
66 * branch to nomt.
67 */
68 .macro has_vp dest, nomt
69 mfc0 \dest, CP0_CONFIG, 1
70 bgez \dest, \nomt
71 mfc0 \dest, CP0_CONFIG, 2
72 bgez \dest, \nomt
73 mfc0 \dest, CP0_CONFIG, 3
74 bgez \dest, \nomt
75 mfc0 \dest, CP0_CONFIG, 4
76 bgez \dest, \nomt
77 mfc0 \dest, CP0_CONFIG, 5
78 andi \dest, \dest, MIPS_CONF5_VP
79 beqz \dest, \nomt
80 nop
81 .endm
82
83 /* Calculate an uncached address for the CM GCRs */
84 .macro cmgcrb dest
85 .set push
86 .set noat
87 MFC0 $1, CP0_CMGCRBASE
88 PTR_SLL $1, $1, 4
89 PTR_LI \dest, UNCAC_BASE
90 PTR_ADDU \dest, \dest, $1
91 .set pop
92 .endm
93
94.balign 0x1000
95
96LEAF(mips_cps_core_entry)
97 /*
98 * These first 4 bytes will be patched by cps_smp_setup to load the
99 * CCA to use into register s0.
100 */
101 .word 0
102
103 /* Check whether we're here due to an NMI */
104 mfc0 k0, CP0_STATUS
105 and k0, k0, ST0_NMI
106 beqz k0, not_nmi
107 nop
108
109 /* This is an NMI */
110 PTR_LA k0, nmi_handler
111 jr k0
112 nop
113
114not_nmi:
115 /* Setup Cause */
116 li t0, CAUSEF_IV
117 mtc0 t0, CP0_CAUSE
118
119 /* Setup Status */
120 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
121 mtc0 t0, CP0_STATUS
122
123 /* Skip cache & coherence setup if we're already coherent */
124 cmgcrb v1
125 lw s7, GCR_CL_COHERENCE_OFS(v1)
126 bnez s7, 1f
127 nop
128
129 /* Initialize the L1 caches */
130 jal mips_cps_cache_init
131 nop
132
133 /* Enter the coherent domain */
134 li t0, 0xff
135 sw t0, GCR_CL_COHERENCE_OFS(v1)
136 ehb
137
138 /* Set Kseg0 CCA to that in s0 */
1391: mfc0 t0, CP0_CONFIG
140 ori t0, 0x7
141 xori t0, 0x7
142 or t0, t0, s0
143 mtc0 t0, CP0_CONFIG
144 ehb
145
146 /* Jump to kseg0 */
147 PTR_LA t0, 1f
148 jr t0
149 nop
150
151 /*
152 * We're up, cached & coherent. Perform any EVA initialization necessary
153 * before we access memory.
154 */
1551: eva_init
156
157 /* Retrieve boot configuration pointers */
158 jal mips_cps_get_bootcfg
159 nop
160
161 /* Skip core-level init if we started up coherent */
162 bnez s7, 1f
163 nop
164
165 /* Perform any further required core-level initialisation */
166 jal mips_cps_core_init
167 nop
168
169 /*
170 * Boot any other VPEs within this core that should be online, and
171 * deactivate this VPE if it should be offline.
172 */
173 move a1, t9
174 jal mips_cps_boot_vpes
175 move a0, v0
176
177 /* Off we go! */
1781: PTR_L t1, VPEBOOTCFG_PC(v1)
179 PTR_L gp, VPEBOOTCFG_GP(v1)
180 PTR_L sp, VPEBOOTCFG_SP(v1)
181 jr t1
182 nop
183 END(mips_cps_core_entry)
184
185.org 0x200
186LEAF(excep_tlbfill)
187 DUMP_EXCEP("TLB Fill")
188 b .
189 nop
190 END(excep_tlbfill)
191
192.org 0x280
193LEAF(excep_xtlbfill)
194 DUMP_EXCEP("XTLB Fill")
195 b .
196 nop
197 END(excep_xtlbfill)
198
199.org 0x300
200LEAF(excep_cache)
201 DUMP_EXCEP("Cache")
202 b .
203 nop
204 END(excep_cache)
205
206.org 0x380
207LEAF(excep_genex)
208 DUMP_EXCEP("General")
209 b .
210 nop
211 END(excep_genex)
212
213.org 0x400
214LEAF(excep_intex)
215 DUMP_EXCEP("Interrupt")
216 b .
217 nop
218 END(excep_intex)
219
220.org 0x480
221LEAF(excep_ejtag)
222 PTR_LA k0, ejtag_debug_handler
223 jr k0
224 nop
225 END(excep_ejtag)
226
227LEAF(mips_cps_core_init)
228#ifdef CONFIG_MIPS_MT_SMP
229 /* Check that the core implements the MT ASE */
230 has_mt t0, 3f
231
232 .set push
233 .set MIPS_ISA_LEVEL_RAW
234 .set mt
235
236 /* Only allow 1 TC per VPE to execute... */
237 dmt
238
239 /* ...and for the moment only 1 VPE */
240 dvpe
241 PTR_LA t1, 1f
242 jr.hb t1
243 nop
244
245 /* Enter VPE configuration state */
2461: mfc0 t0, CP0_MVPCONTROL
247 ori t0, t0, MVPCONTROL_VPC
248 mtc0 t0, CP0_MVPCONTROL
249
250 /* Retrieve the number of VPEs within the core */
251 mfc0 t0, CP0_MVPCONF0
252 srl t0, t0, MVPCONF0_PVPE_SHIFT
253 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
254 addiu ta3, t0, 1
255
256 /* If there's only 1, we're done */
257 beqz t0, 2f
258 nop
259
260 /* Loop through each VPE within this core */
261 li ta1, 1
262
2631: /* Operate on the appropriate TC */
264 mtc0 ta1, CP0_VPECONTROL
265 ehb
266
267 /* Bind TC to VPE (1:1 TC:VPE mapping) */
268 mttc0 ta1, CP0_TCBIND
269
270 /* Set exclusive TC, non-active, master */
271 li t0, VPECONF0_MVP
272 sll t1, ta1, VPECONF0_XTC_SHIFT
273 or t0, t0, t1
274 mttc0 t0, CP0_VPECONF0
275
276 /* Set TC non-active, non-allocatable */
277 mttc0 zero, CP0_TCSTATUS
278
279 /* Set TC halted */
280 li t0, TCHALT_H
281 mttc0 t0, CP0_TCHALT
282
283 /* Next VPE */
284 addiu ta1, ta1, 1
285 slt t0, ta1, ta3
286 bnez t0, 1b
287 nop
288
289 /* Leave VPE configuration state */
2902: mfc0 t0, CP0_MVPCONTROL
291 xori t0, t0, MVPCONTROL_VPC
292 mtc0 t0, CP0_MVPCONTROL
293
2943: .set pop
295#endif
296 jr ra
297 nop
298 END(mips_cps_core_init)
299
300/**
301 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
302 *
303 * Returns: pointer to struct core_boot_config in v0, pointer to
304 * struct vpe_boot_config in v1, VPE ID in t9
305 */
306LEAF(mips_cps_get_bootcfg)
307 /* Calculate a pointer to this cores struct core_boot_config */
308 cmgcrb t0
309 lw t0, GCR_CL_ID_OFS(t0)
310 li t1, COREBOOTCFG_SIZE
311 mul t0, t0, t1
312 PTR_LA t1, mips_cps_core_bootcfg
313 PTR_L t1, 0(t1)
314 PTR_ADDU v0, t0, t1
315
316 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
317 li t9, 0
318#if defined(CONFIG_CPU_MIPSR6)
319 has_vp ta2, 1f
320
321 /*
322 * Assume non-contiguous numbering. Perhaps some day we'll need
323 * to handle contiguous VP numbering, but no such systems yet
324 * exist.
325 */
326 mfc0 t9, CP0_GLOBALNUMBER
327 andi t9, t9, MIPS_GLOBALNUMBER_VP
328#elif defined(CONFIG_MIPS_MT_SMP)
329 has_mt ta2, 1f
330
331 /* Find the number of VPEs present in the core */
332 mfc0 t1, CP0_MVPCONF0
333 srl t1, t1, MVPCONF0_PVPE_SHIFT
334 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
335 addiu t1, t1, 1
336
337 /* Calculate a mask for the VPE ID from EBase.CPUNum */
338 clz t1, t1
339 li t2, 31
340 subu t1, t2, t1
341 li t2, 1
342 sll t1, t2, t1
343 addiu t1, t1, -1
344
345 /* Retrieve the VPE ID from EBase.CPUNum */
346 mfc0 t9, $15, 1
347 and t9, t9, t1
348#endif
349
3501: /* Calculate a pointer to this VPEs struct vpe_boot_config */
351 li t1, VPEBOOTCFG_SIZE
352 mul v1, t9, t1
353 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
354 PTR_ADDU v1, v1, ta3
355
356 jr ra
357 nop
358 END(mips_cps_get_bootcfg)
359
360LEAF(mips_cps_boot_vpes)
361 lw ta2, COREBOOTCFG_VPEMASK(a0)
362 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
363
364#if defined(CONFIG_CPU_MIPSR6)
365
366 has_vp t0, 5f
367
368 /* Find base address of CPC */
369 cmgcrb t3
370 PTR_L t1, GCR_CPC_BASE_OFS(t3)
371 PTR_LI t2, ~0x7fff
372 and t1, t1, t2
373 PTR_LI t2, UNCAC_BASE
374 PTR_ADD t1, t1, t2
375
376 /* Start any other VPs that ought to be running */
377 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
378
379 /* Ensure this VP stops running if it shouldn't be */
380 not ta2
381 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
382 ehb
383
384#elif defined(CONFIG_MIPS_MT)
385
386 /* If the core doesn't support MT then return */
387 has_mt t0, 5f
388
389 /* Enter VPE configuration state */
390 .set push
391 .set MIPS_ISA_LEVEL_RAW
392 .set mt
393 dvpe
394 .set pop
395
396 PTR_LA t1, 1f
397 jr.hb t1
398 nop
3991: mfc0 t1, CP0_MVPCONTROL
400 ori t1, t1, MVPCONTROL_VPC
401 mtc0 t1, CP0_MVPCONTROL
402 ehb
403
404 /* Loop through each VPE */
405 move t8, ta2
406 li ta1, 0
407
408 /* Check whether the VPE should be running. If not, skip it */
4091: andi t0, ta2, 1
410 beqz t0, 2f
411 nop
412
413 /* Operate on the appropriate TC */
414 mfc0 t0, CP0_VPECONTROL
415 ori t0, t0, VPECONTROL_TARGTC
416 xori t0, t0, VPECONTROL_TARGTC
417 or t0, t0, ta1
418 mtc0 t0, CP0_VPECONTROL
419 ehb
420
421 .set push
422 .set MIPS_ISA_LEVEL_RAW
423 .set mt
424
425 /* Skip the VPE if its TC is not halted */
426 mftc0 t0, CP0_TCHALT
427 beqz t0, 2f
428 nop
429
430 /* Calculate a pointer to the VPEs struct vpe_boot_config */
431 li t0, VPEBOOTCFG_SIZE
432 mul t0, t0, ta1
433 addu t0, t0, ta3
434
435 /* Set the TC restart PC */
436 lw t1, VPEBOOTCFG_PC(t0)
437 mttc0 t1, CP0_TCRESTART
438
439 /* Set the TC stack pointer */
440 lw t1, VPEBOOTCFG_SP(t0)
441 mttgpr t1, sp
442
443 /* Set the TC global pointer */
444 lw t1, VPEBOOTCFG_GP(t0)
445 mttgpr t1, gp
446
447 /* Copy config from this VPE */
448 mfc0 t0, CP0_CONFIG
449 mttc0 t0, CP0_CONFIG
450
451 /*
452 * Copy the EVA config from this VPE if the CPU supports it.
453 * CONFIG3 must exist to be running MT startup - just read it.
454 */
455 mfc0 t0, CP0_CONFIG, 3
456 and t0, t0, MIPS_CONF3_SC
457 beqz t0, 3f
458 nop
459 mfc0 t0, CP0_SEGCTL0
460 mttc0 t0, CP0_SEGCTL0
461 mfc0 t0, CP0_SEGCTL1
462 mttc0 t0, CP0_SEGCTL1
463 mfc0 t0, CP0_SEGCTL2
464 mttc0 t0, CP0_SEGCTL2
4653:
466 /* Ensure no software interrupts are pending */
467 mttc0 zero, CP0_CAUSE
468 mttc0 zero, CP0_STATUS
469
470 /* Set TC active, not interrupt exempt */
471 mftc0 t0, CP0_TCSTATUS
472 li t1, ~TCSTATUS_IXMT
473 and t0, t0, t1
474 ori t0, t0, TCSTATUS_A
475 mttc0 t0, CP0_TCSTATUS
476
477 /* Clear the TC halt bit */
478 mttc0 zero, CP0_TCHALT
479
480 /* Set VPE active */
481 mftc0 t0, CP0_VPECONF0
482 ori t0, t0, VPECONF0_VPA
483 mttc0 t0, CP0_VPECONF0
484
485 /* Next VPE */
4862: srl ta2, ta2, 1
487 addiu ta1, ta1, 1
488 bnez ta2, 1b
489 nop
490
491 /* Leave VPE configuration state */
492 mfc0 t1, CP0_MVPCONTROL
493 xori t1, t1, MVPCONTROL_VPC
494 mtc0 t1, CP0_MVPCONTROL
495 ehb
496 evpe
497
498 .set pop
499
500 /* Check whether this VPE is meant to be running */
501 li t0, 1
502 sll t0, t0, a1
503 and t0, t0, t8
504 bnez t0, 2f
505 nop
506
507 /* This VPE should be offline, halt the TC */
508 li t0, TCHALT_H
509 mtc0 t0, CP0_TCHALT
510 PTR_LA t0, 1f
5111: jr.hb t0
512 nop
513
5142:
515
516#endif /* CONFIG_MIPS_MT_SMP */
517
518 /* Return */
5195: jr ra
520 nop
521 END(mips_cps_boot_vpes)
522
523LEAF(mips_cps_cache_init)
524 /*
525 * Clear the bits used to index the caches. Note that the architecture
526 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
527 * be valid for all MIPS32 CPUs, even those for which said writes are
528 * unnecessary.
529 */
530 mtc0 zero, CP0_TAGLO, 0
531 mtc0 zero, CP0_TAGHI, 0
532 mtc0 zero, CP0_TAGLO, 2
533 mtc0 zero, CP0_TAGHI, 2
534 ehb
535
536 /* Primary cache configuration is indicated by Config1 */
537 mfc0 v0, CP0_CONFIG, 1
538
539 /* Detect I-cache line size */
540 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
541 beqz t0, icache_done
542 li t1, 2
543 sllv t0, t1, t0
544
545 /* Detect I-cache size */
546 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
547 xori t2, t1, 0x7
548 beqz t2, 1f
549 li t3, 32
550 addiu t1, t1, 1
551 sllv t1, t3, t1
5521: /* At this point t1 == I-cache sets per way */
553 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
554 addiu t2, t2, 1
555 mul t1, t1, t0
556 mul t1, t1, t2
557
558 li a0, CKSEG0
559 PTR_ADD a1, a0, t1
5601: cache Index_Store_Tag_I, 0(a0)
561 PTR_ADD a0, a0, t0
562 bne a0, a1, 1b
563 nop
564icache_done:
565
566 /* Detect D-cache line size */
567 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
568 beqz t0, dcache_done
569 li t1, 2
570 sllv t0, t1, t0
571
572 /* Detect D-cache size */
573 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
574 xori t2, t1, 0x7
575 beqz t2, 1f
576 li t3, 32
577 addiu t1, t1, 1
578 sllv t1, t3, t1
5791: /* At this point t1 == D-cache sets per way */
580 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
581 addiu t2, t2, 1
582 mul t1, t1, t0
583 mul t1, t1, t2
584
585 li a0, CKSEG0
586 PTR_ADDU a1, a0, t1
587 PTR_SUBU a1, a1, t0
5881: cache Index_Store_Tag_D, 0(a0)
589 bne a0, a1, 1b
590 PTR_ADD a0, a0, t0
591dcache_done:
592
593 jr ra
594 nop
595 END(mips_cps_cache_init)
596
597#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
598
599 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
600 .macro psstate dest
601 .set push
602 .set noat
603 lw $1, TI_CPU(gp)
604 sll $1, $1, LONGLOG
605 PTR_LA \dest, __per_cpu_offset
606 addu $1, $1, \dest
607 lw $1, 0($1)
608 PTR_LA \dest, cps_cpu_state
609 addu \dest, \dest, $1
610 .set pop
611 .endm
612
613LEAF(mips_cps_pm_save)
614 /* Save CPU state */
615 SUSPEND_SAVE_REGS
616 psstate t1
617 SUSPEND_SAVE_STATIC
618 jr v0
619 nop
620 END(mips_cps_pm_save)
621
622LEAF(mips_cps_pm_restore)
623 /* Restore CPU state */
624 psstate t1
625 RESUME_RESTORE_STATIC
626 RESUME_RESTORE_REGS_RETURN
627 END(mips_cps_pm_restore)
628
629#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <asm/addrspace.h>
8#include <asm/asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/asmmacro.h>
11#include <asm/cacheops.h>
12#include <asm/eva.h>
13#include <asm/mipsregs.h>
14#include <asm/mipsmtregs.h>
15#include <asm/pm.h>
16#include <asm/smp-cps.h>
17
18#define GCR_CPC_BASE_OFS 0x0088
19#define GCR_CL_COHERENCE_OFS 0x2008
20#define GCR_CL_ID_OFS 0x2028
21
22#define CPC_CL_VC_STOP_OFS 0x2020
23#define CPC_CL_VC_RUN_OFS 0x2028
24
25.extern mips_cm_base
26
27.set noreorder
28
29#ifdef CONFIG_64BIT
30# define STATUS_BITDEPS ST0_KX
31#else
32# define STATUS_BITDEPS 0
33#endif
34
35#ifdef CONFIG_MIPS_CPS_NS16550
36
37#define DUMP_EXCEP(name) \
38 PTR_LA a0, 8f; \
39 jal mips_cps_bev_dump; \
40 nop; \
41 TEXT(name)
42
43#else /* !CONFIG_MIPS_CPS_NS16550 */
44
45#define DUMP_EXCEP(name)
46
47#endif /* !CONFIG_MIPS_CPS_NS16550 */
48
49 /*
50 * Set dest to non-zero if the core supports the MT ASE, else zero. If
51 * MT is not supported then branch to nomt.
52 */
53 .macro has_mt dest, nomt
54 mfc0 \dest, CP0_CONFIG, 1
55 bgez \dest, \nomt
56 mfc0 \dest, CP0_CONFIG, 2
57 bgez \dest, \nomt
58 mfc0 \dest, CP0_CONFIG, 3
59 andi \dest, \dest, MIPS_CONF3_MT
60 beqz \dest, \nomt
61 nop
62 .endm
63
64 /*
65 * Set dest to non-zero if the core supports MIPSr6 multithreading
66 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
67 * branch to nomt.
68 */
69 .macro has_vp dest, nomt
70 mfc0 \dest, CP0_CONFIG, 1
71 bgez \dest, \nomt
72 mfc0 \dest, CP0_CONFIG, 2
73 bgez \dest, \nomt
74 mfc0 \dest, CP0_CONFIG, 3
75 bgez \dest, \nomt
76 mfc0 \dest, CP0_CONFIG, 4
77 bgez \dest, \nomt
78 mfc0 \dest, CP0_CONFIG, 5
79 andi \dest, \dest, MIPS_CONF5_VP
80 beqz \dest, \nomt
81 nop
82 .endm
83
84
85.balign 0x1000
86
87LEAF(mips_cps_core_entry)
88 /*
89 * These first several instructions will be patched by cps_smp_setup to load the
90 * CCA to use into register s0 and GCR base address to register s1.
91 */
92 .rept CPS_ENTRY_PATCH_INSNS
93 nop
94 .endr
95
96 .global mips_cps_core_entry_patch_end
97mips_cps_core_entry_patch_end:
98
99 /* Check whether we're here due to an NMI */
100 mfc0 k0, CP0_STATUS
101 and k0, k0, ST0_NMI
102 beqz k0, not_nmi
103 nop
104
105 /* This is an NMI */
106 PTR_LA k0, nmi_handler
107 jr k0
108 nop
109
110not_nmi:
111 /* Setup Cause */
112 li t0, CAUSEF_IV
113 mtc0 t0, CP0_CAUSE
114
115 /* Setup Status */
116 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
117 mtc0 t0, CP0_STATUS
118
119 /* We don't know how to do coherence setup on earlier ISA */
120#if MIPS_ISA_REV > 0
121 /* Skip cache & coherence setup if we're already coherent */
122 lw s7, GCR_CL_COHERENCE_OFS(s1)
123 bnez s7, 1f
124 nop
125
126 /* Initialize the L1 caches */
127 jal mips_cps_cache_init
128 nop
129
130 /* Enter the coherent domain */
131 li t0, 0xff
132 sw t0, GCR_CL_COHERENCE_OFS(s1)
133 ehb
134#endif /* MIPS_ISA_REV > 0 */
135
136 /* Set Kseg0 CCA to that in s0 */
1371: mfc0 t0, CP0_CONFIG
138 ori t0, 0x7
139 xori t0, 0x7
140 or t0, t0, s0
141 mtc0 t0, CP0_CONFIG
142 ehb
143
144 /* Jump to kseg0 */
145 PTR_LA t0, 1f
146 jr t0
147 nop
148
149 /*
150 * We're up, cached & coherent. Perform any EVA initialization necessary
151 * before we access memory.
152 */
1531: eva_init
154
155 /* Retrieve boot configuration pointers */
156 jal mips_cps_get_bootcfg
157 nop
158
159 /* Skip core-level init if we started up coherent */
160 bnez s7, 1f
161 nop
162
163 /* Perform any further required core-level initialisation */
164 jal mips_cps_core_init
165 nop
166
167 /*
168 * Boot any other VPEs within this core that should be online, and
169 * deactivate this VPE if it should be offline.
170 */
171 move a1, t9
172 jal mips_cps_boot_vpes
173 move a0, v0
174
175 /* Off we go! */
1761: PTR_L t1, VPEBOOTCFG_PC(v1)
177 PTR_L gp, VPEBOOTCFG_GP(v1)
178 PTR_L sp, VPEBOOTCFG_SP(v1)
179 jr t1
180 nop
181 END(mips_cps_core_entry)
182
183.org 0x200
184LEAF(excep_tlbfill)
185 DUMP_EXCEP("TLB Fill")
186 b .
187 nop
188 END(excep_tlbfill)
189
190.org 0x280
191LEAF(excep_xtlbfill)
192 DUMP_EXCEP("XTLB Fill")
193 b .
194 nop
195 END(excep_xtlbfill)
196
197.org 0x300
198LEAF(excep_cache)
199 DUMP_EXCEP("Cache")
200 b .
201 nop
202 END(excep_cache)
203
204.org 0x380
205LEAF(excep_genex)
206 DUMP_EXCEP("General")
207 b .
208 nop
209 END(excep_genex)
210
211.org 0x400
212LEAF(excep_intex)
213 DUMP_EXCEP("Interrupt")
214 b .
215 nop
216 END(excep_intex)
217
218.org 0x480
219LEAF(excep_ejtag)
220 PTR_LA k0, ejtag_debug_handler
221 jr k0
222 nop
223 END(excep_ejtag)
224
225LEAF(mips_cps_core_init)
226#ifdef CONFIG_MIPS_MT_SMP
227 /* Check that the core implements the MT ASE */
228 has_mt t0, 3f
229
230 .set push
231 .set MIPS_ISA_LEVEL_RAW
232 .set mt
233
234 /* Only allow 1 TC per VPE to execute... */
235 dmt
236
237 /* ...and for the moment only 1 VPE */
238 dvpe
239 PTR_LA t1, 1f
240 jr.hb t1
241 nop
242
243 /* Enter VPE configuration state */
2441: mfc0 t0, CP0_MVPCONTROL
245 ori t0, t0, MVPCONTROL_VPC
246 mtc0 t0, CP0_MVPCONTROL
247
248 /* Retrieve the number of VPEs within the core */
249 mfc0 t0, CP0_MVPCONF0
250 srl t0, t0, MVPCONF0_PVPE_SHIFT
251 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
252 addiu ta3, t0, 1
253
254 /* If there's only 1, we're done */
255 beqz t0, 2f
256 nop
257
258 /* Loop through each VPE within this core */
259 li ta1, 1
260
2611: /* Operate on the appropriate TC */
262 mtc0 ta1, CP0_VPECONTROL
263 ehb
264
265 /* Bind TC to VPE (1:1 TC:VPE mapping) */
266 mttc0 ta1, CP0_TCBIND
267
268 /* Set exclusive TC, non-active, master */
269 li t0, VPECONF0_MVP
270 sll t1, ta1, VPECONF0_XTC_SHIFT
271 or t0, t0, t1
272 mttc0 t0, CP0_VPECONF0
273
274 /* Set TC non-active, non-allocatable */
275 mttc0 zero, CP0_TCSTATUS
276
277 /* Set TC halted */
278 li t0, TCHALT_H
279 mttc0 t0, CP0_TCHALT
280
281 /* Next VPE */
282 addiu ta1, ta1, 1
283 slt t0, ta1, ta3
284 bnez t0, 1b
285 nop
286
287 /* Leave VPE configuration state */
2882: mfc0 t0, CP0_MVPCONTROL
289 xori t0, t0, MVPCONTROL_VPC
290 mtc0 t0, CP0_MVPCONTROL
291
2923: .set pop
293#endif
294 jr ra
295 nop
296 END(mips_cps_core_init)
297
298/**
299 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
300 *
301 * Returns: pointer to struct core_boot_config in v0, pointer to
302 * struct vpe_boot_config in v1, VPE ID in t9
303 */
304LEAF(mips_cps_get_bootcfg)
305 /* Calculate a pointer to this cores struct core_boot_config */
306 lw t0, GCR_CL_ID_OFS(s1)
307 li t1, COREBOOTCFG_SIZE
308 mul t0, t0, t1
309 PTR_LA t1, mips_cps_core_bootcfg
310 PTR_L t1, 0(t1)
311 PTR_ADDU v0, t0, t1
312
313 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
314 li t9, 0
315#if defined(CONFIG_CPU_MIPSR6)
316 has_vp ta2, 1f
317
318 /*
319 * Assume non-contiguous numbering. Perhaps some day we'll need
320 * to handle contiguous VP numbering, but no such systems yet
321 * exist.
322 */
323 mfc0 t9, CP0_GLOBALNUMBER
324 andi t9, t9, MIPS_GLOBALNUMBER_VP
325#elif defined(CONFIG_MIPS_MT_SMP)
326 has_mt ta2, 1f
327
328 /* Find the number of VPEs present in the core */
329 mfc0 t1, CP0_MVPCONF0
330 srl t1, t1, MVPCONF0_PVPE_SHIFT
331 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
332 addiu t1, t1, 1
333
334 /* Calculate a mask for the VPE ID from EBase.CPUNum */
335 clz t1, t1
336 li t2, 31
337 subu t1, t2, t1
338 li t2, 1
339 sll t1, t2, t1
340 addiu t1, t1, -1
341
342 /* Retrieve the VPE ID from EBase.CPUNum */
343 mfc0 t9, $15, 1
344 and t9, t9, t1
345#endif
346
3471: /* Calculate a pointer to this VPEs struct vpe_boot_config */
348 li t1, VPEBOOTCFG_SIZE
349 mul v1, t9, t1
350 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
351 PTR_ADDU v1, v1, ta3
352
353 jr ra
354 nop
355 END(mips_cps_get_bootcfg)
356
357LEAF(mips_cps_boot_vpes)
358 lw ta2, COREBOOTCFG_VPEMASK(a0)
359 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
360
361#if defined(CONFIG_CPU_MIPSR6)
362
363 has_vp t0, 5f
364
365 /* Find base address of CPC */
366 PTR_LA t1, mips_gcr_base
367 PTR_L t1, 0(t1)
368 PTR_L t1, GCR_CPC_BASE_OFS(t1)
369 PTR_LI t2, ~0x7fff
370 and t1, t1, t2
371 PTR_LI t2, UNCAC_BASE
372 PTR_ADD t1, t1, t2
373
374 /* Start any other VPs that ought to be running */
375 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
376
377 /* Ensure this VP stops running if it shouldn't be */
378 not ta2
379 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
380 ehb
381
382#elif defined(CONFIG_MIPS_MT)
383
384 /* If the core doesn't support MT then return */
385 has_mt t0, 5f
386
387 /* Enter VPE configuration state */
388 .set push
389 .set MIPS_ISA_LEVEL_RAW
390 .set mt
391 dvpe
392 .set pop
393
394 PTR_LA t1, 1f
395 jr.hb t1
396 nop
3971: mfc0 t1, CP0_MVPCONTROL
398 ori t1, t1, MVPCONTROL_VPC
399 mtc0 t1, CP0_MVPCONTROL
400 ehb
401
402 /* Loop through each VPE */
403 move t8, ta2
404 li ta1, 0
405
406 /* Check whether the VPE should be running. If not, skip it */
4071: andi t0, ta2, 1
408 beqz t0, 2f
409 nop
410
411 /* Operate on the appropriate TC */
412 mfc0 t0, CP0_VPECONTROL
413 ori t0, t0, VPECONTROL_TARGTC
414 xori t0, t0, VPECONTROL_TARGTC
415 or t0, t0, ta1
416 mtc0 t0, CP0_VPECONTROL
417 ehb
418
419 .set push
420 .set MIPS_ISA_LEVEL_RAW
421 .set mt
422
423 /* Skip the VPE if its TC is not halted */
424 mftc0 t0, CP0_TCHALT
425 beqz t0, 2f
426 nop
427
428 /* Calculate a pointer to the VPEs struct vpe_boot_config */
429 li t0, VPEBOOTCFG_SIZE
430 mul t0, t0, ta1
431 addu t0, t0, ta3
432
433 /* Set the TC restart PC */
434 lw t1, VPEBOOTCFG_PC(t0)
435 mttc0 t1, CP0_TCRESTART
436
437 /* Set the TC stack pointer */
438 lw t1, VPEBOOTCFG_SP(t0)
439 mttgpr t1, sp
440
441 /* Set the TC global pointer */
442 lw t1, VPEBOOTCFG_GP(t0)
443 mttgpr t1, gp
444
445 /* Copy config from this VPE */
446 mfc0 t0, CP0_CONFIG
447 mttc0 t0, CP0_CONFIG
448
449 /*
450 * Copy the EVA config from this VPE if the CPU supports it.
451 * CONFIG3 must exist to be running MT startup - just read it.
452 */
453 mfc0 t0, CP0_CONFIG, 3
454 and t0, t0, MIPS_CONF3_SC
455 beqz t0, 3f
456 nop
457 mfc0 t0, CP0_SEGCTL0
458 mttc0 t0, CP0_SEGCTL0
459 mfc0 t0, CP0_SEGCTL1
460 mttc0 t0, CP0_SEGCTL1
461 mfc0 t0, CP0_SEGCTL2
462 mttc0 t0, CP0_SEGCTL2
4633:
464 /* Ensure no software interrupts are pending */
465 mttc0 zero, CP0_CAUSE
466 mttc0 zero, CP0_STATUS
467
468 /* Set TC active, not interrupt exempt */
469 mftc0 t0, CP0_TCSTATUS
470 li t1, ~TCSTATUS_IXMT
471 and t0, t0, t1
472 ori t0, t0, TCSTATUS_A
473 mttc0 t0, CP0_TCSTATUS
474
475 /* Clear the TC halt bit */
476 mttc0 zero, CP0_TCHALT
477
478 /* Set VPE active */
479 mftc0 t0, CP0_VPECONF0
480 ori t0, t0, VPECONF0_VPA
481 mttc0 t0, CP0_VPECONF0
482
483 /* Next VPE */
4842: srl ta2, ta2, 1
485 addiu ta1, ta1, 1
486 bnez ta2, 1b
487 nop
488
489 /* Leave VPE configuration state */
490 mfc0 t1, CP0_MVPCONTROL
491 xori t1, t1, MVPCONTROL_VPC
492 mtc0 t1, CP0_MVPCONTROL
493 ehb
494 evpe
495
496 .set pop
497
498 /* Check whether this VPE is meant to be running */
499 li t0, 1
500 sll t0, t0, a1
501 and t0, t0, t8
502 bnez t0, 2f
503 nop
504
505 /* This VPE should be offline, halt the TC */
506 li t0, TCHALT_H
507 mtc0 t0, CP0_TCHALT
508 PTR_LA t0, 1f
5091: jr.hb t0
510 nop
511
5122:
513
514#endif /* CONFIG_MIPS_MT_SMP */
515
516 /* Return */
5175: jr ra
518 nop
519 END(mips_cps_boot_vpes)
520
521#if MIPS_ISA_REV > 0
522LEAF(mips_cps_cache_init)
523 /*
524 * Clear the bits used to index the caches. Note that the architecture
525 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
526 * be valid for all MIPS32 CPUs, even those for which said writes are
527 * unnecessary.
528 */
529 mtc0 zero, CP0_TAGLO, 0
530 mtc0 zero, CP0_TAGHI, 0
531 mtc0 zero, CP0_TAGLO, 2
532 mtc0 zero, CP0_TAGHI, 2
533 ehb
534
535 /* Primary cache configuration is indicated by Config1 */
536 mfc0 v0, CP0_CONFIG, 1
537
538 /* Detect I-cache line size */
539 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
540 beqz t0, icache_done
541 li t1, 2
542 sllv t0, t1, t0
543
544 /* Detect I-cache size */
545 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
546 xori t2, t1, 0x7
547 beqz t2, 1f
548 li t3, 32
549 addiu t1, t1, 1
550 sllv t1, t3, t1
5511: /* At this point t1 == I-cache sets per way */
552 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
553 addiu t2, t2, 1
554 mul t1, t1, t0
555 mul t1, t1, t2
556
557 li a0, CKSEG0
558 PTR_ADD a1, a0, t1
5591: cache Index_Store_Tag_I, 0(a0)
560 PTR_ADD a0, a0, t0
561 bne a0, a1, 1b
562 nop
563icache_done:
564
565 /* Detect D-cache line size */
566 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
567 beqz t0, dcache_done
568 li t1, 2
569 sllv t0, t1, t0
570
571 /* Detect D-cache size */
572 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
573 xori t2, t1, 0x7
574 beqz t2, 1f
575 li t3, 32
576 addiu t1, t1, 1
577 sllv t1, t3, t1
5781: /* At this point t1 == D-cache sets per way */
579 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
580 addiu t2, t2, 1
581 mul t1, t1, t0
582 mul t1, t1, t2
583
584 li a0, CKSEG0
585 PTR_ADDU a1, a0, t1
586 PTR_SUBU a1, a1, t0
5871: cache Index_Store_Tag_D, 0(a0)
588 bne a0, a1, 1b
589 PTR_ADD a0, a0, t0
590dcache_done:
591
592 jr ra
593 nop
594 END(mips_cps_cache_init)
595#endif /* MIPS_ISA_REV > 0 */
596
597#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
598
599 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
600 .macro psstate dest
601 .set push
602 .set noat
603 lw $1, TI_CPU(gp)
604 sll $1, $1, LONGLOG
605 PTR_LA \dest, __per_cpu_offset
606 addu $1, $1, \dest
607 lw $1, 0($1)
608 PTR_LA \dest, cps_cpu_state
609 addu \dest, \dest, $1
610 .set pop
611 .endm
612
613LEAF(mips_cps_pm_save)
614 /* Save CPU state */
615 SUSPEND_SAVE_REGS
616 psstate t1
617 SUSPEND_SAVE_STATIC
618 jr v0
619 nop
620 END(mips_cps_pm_save)
621
622LEAF(mips_cps_pm_restore)
623 /* Restore CPU state */
624 psstate t1
625 RESUME_RESTORE_STATIC
626 RESUME_RESTORE_REGS_RETURN
627 END(mips_cps_pm_restore)
628
629#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */