Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <asm/addrspace.h>
8#include <asm/asm.h>
9#include <asm/asm-offsets.h>
10#include <asm/asmmacro.h>
11#include <asm/cacheops.h>
12#include <asm/eva.h>
13#include <asm/mipsregs.h>
14#include <asm/mipsmtregs.h>
15#include <asm/pm.h>
16
17#define GCR_CPC_BASE_OFS 0x0088
18#define GCR_CL_COHERENCE_OFS 0x2008
19#define GCR_CL_ID_OFS 0x2028
20
21#define CPC_CL_VC_STOP_OFS 0x2020
22#define CPC_CL_VC_RUN_OFS 0x2028
23
24.extern mips_cm_base
25
26.set noreorder
27
28#ifdef CONFIG_64BIT
29# define STATUS_BITDEPS ST0_KX
30#else
31# define STATUS_BITDEPS 0
32#endif
33
34#ifdef CONFIG_MIPS_CPS_NS16550
35
36#define DUMP_EXCEP(name) \
37 PTR_LA a0, 8f; \
38 jal mips_cps_bev_dump; \
39 nop; \
40 TEXT(name)
41
42#else /* !CONFIG_MIPS_CPS_NS16550 */
43
44#define DUMP_EXCEP(name)
45
46#endif /* !CONFIG_MIPS_CPS_NS16550 */
47
48 /*
49 * Set dest to non-zero if the core supports the MT ASE, else zero. If
50 * MT is not supported then branch to nomt.
51 */
52 .macro has_mt dest, nomt
53 mfc0 \dest, CP0_CONFIG, 1
54 bgez \dest, \nomt
55 mfc0 \dest, CP0_CONFIG, 2
56 bgez \dest, \nomt
57 mfc0 \dest, CP0_CONFIG, 3
58 andi \dest, \dest, MIPS_CONF3_MT
59 beqz \dest, \nomt
60 nop
61 .endm
62
63 /*
64 * Set dest to non-zero if the core supports MIPSr6 multithreading
65 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
66 * branch to nomt.
67 */
68 .macro has_vp dest, nomt
69 mfc0 \dest, CP0_CONFIG, 1
70 bgez \dest, \nomt
71 mfc0 \dest, CP0_CONFIG, 2
72 bgez \dest, \nomt
73 mfc0 \dest, CP0_CONFIG, 3
74 bgez \dest, \nomt
75 mfc0 \dest, CP0_CONFIG, 4
76 bgez \dest, \nomt
77 mfc0 \dest, CP0_CONFIG, 5
78 andi \dest, \dest, MIPS_CONF5_VP
79 beqz \dest, \nomt
80 nop
81 .endm
82
83 /* Calculate an uncached address for the CM GCRs */
84 .macro cmgcrb dest
85 .set push
86 .set noat
87 MFC0 $1, CP0_CMGCRBASE
88 PTR_SLL $1, $1, 4
89 PTR_LI \dest, UNCAC_BASE
90 PTR_ADDU \dest, \dest, $1
91 .set pop
92 .endm
93
94.section .text.cps-vec
95.balign 0x1000
96
97LEAF(mips_cps_core_entry)
98 /*
99 * These first 4 bytes will be patched by cps_smp_setup to load the
100 * CCA to use into register s0.
101 */
102 .word 0
103
104 /* Check whether we're here due to an NMI */
105 mfc0 k0, CP0_STATUS
106 and k0, k0, ST0_NMI
107 beqz k0, not_nmi
108 nop
109
110 /* This is an NMI */
111 PTR_LA k0, nmi_handler
112 jr k0
113 nop
114
115not_nmi:
116 /* Setup Cause */
117 li t0, CAUSEF_IV
118 mtc0 t0, CP0_CAUSE
119
120 /* Setup Status */
121 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
122 mtc0 t0, CP0_STATUS
123
124 /* Skip cache & coherence setup if we're already coherent */
125 cmgcrb v1
126 lw s7, GCR_CL_COHERENCE_OFS(v1)
127 bnez s7, 1f
128 nop
129
130 /* Initialize the L1 caches */
131 jal mips_cps_cache_init
132 nop
133
134 /* Enter the coherent domain */
135 li t0, 0xff
136 sw t0, GCR_CL_COHERENCE_OFS(v1)
137 ehb
138
139 /* Set Kseg0 CCA to that in s0 */
1401: mfc0 t0, CP0_CONFIG
141 ori t0, 0x7
142 xori t0, 0x7
143 or t0, t0, s0
144 mtc0 t0, CP0_CONFIG
145 ehb
146
147 /* Jump to kseg0 */
148 PTR_LA t0, 1f
149 jr t0
150 nop
151
152 /*
153 * We're up, cached & coherent. Perform any EVA initialization necessary
154 * before we access memory.
155 */
1561: eva_init
157
158 /* Retrieve boot configuration pointers */
159 jal mips_cps_get_bootcfg
160 nop
161
162 /* Skip core-level init if we started up coherent */
163 bnez s7, 1f
164 nop
165
166 /* Perform any further required core-level initialisation */
167 jal mips_cps_core_init
168 nop
169
170 /*
171 * Boot any other VPEs within this core that should be online, and
172 * deactivate this VPE if it should be offline.
173 */
174 move a1, t9
175 jal mips_cps_boot_vpes
176 move a0, v0
177
178 /* Off we go! */
1791: PTR_L t1, VPEBOOTCFG_PC(v1)
180 PTR_L gp, VPEBOOTCFG_GP(v1)
181 PTR_L sp, VPEBOOTCFG_SP(v1)
182 jr t1
183 nop
184 END(mips_cps_core_entry)
185
186.org 0x200
187LEAF(excep_tlbfill)
188 DUMP_EXCEP("TLB Fill")
189 b .
190 nop
191 END(excep_tlbfill)
192
193.org 0x280
194LEAF(excep_xtlbfill)
195 DUMP_EXCEP("XTLB Fill")
196 b .
197 nop
198 END(excep_xtlbfill)
199
200.org 0x300
201LEAF(excep_cache)
202 DUMP_EXCEP("Cache")
203 b .
204 nop
205 END(excep_cache)
206
207.org 0x380
208LEAF(excep_genex)
209 DUMP_EXCEP("General")
210 b .
211 nop
212 END(excep_genex)
213
214.org 0x400
215LEAF(excep_intex)
216 DUMP_EXCEP("Interrupt")
217 b .
218 nop
219 END(excep_intex)
220
221.org 0x480
222LEAF(excep_ejtag)
223 PTR_LA k0, ejtag_debug_handler
224 jr k0
225 nop
226 END(excep_ejtag)
227
228LEAF(mips_cps_core_init)
229#ifdef CONFIG_MIPS_MT_SMP
230 /* Check that the core implements the MT ASE */
231 has_mt t0, 3f
232
233 .set push
234 .set MIPS_ISA_LEVEL_RAW
235 .set mt
236
237 /* Only allow 1 TC per VPE to execute... */
238 dmt
239
240 /* ...and for the moment only 1 VPE */
241 dvpe
242 PTR_LA t1, 1f
243 jr.hb t1
244 nop
245
246 /* Enter VPE configuration state */
2471: mfc0 t0, CP0_MVPCONTROL
248 ori t0, t0, MVPCONTROL_VPC
249 mtc0 t0, CP0_MVPCONTROL
250
251 /* Retrieve the number of VPEs within the core */
252 mfc0 t0, CP0_MVPCONF0
253 srl t0, t0, MVPCONF0_PVPE_SHIFT
254 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
255 addiu ta3, t0, 1
256
257 /* If there's only 1, we're done */
258 beqz t0, 2f
259 nop
260
261 /* Loop through each VPE within this core */
262 li ta1, 1
263
2641: /* Operate on the appropriate TC */
265 mtc0 ta1, CP0_VPECONTROL
266 ehb
267
268 /* Bind TC to VPE (1:1 TC:VPE mapping) */
269 mttc0 ta1, CP0_TCBIND
270
271 /* Set exclusive TC, non-active, master */
272 li t0, VPECONF0_MVP
273 sll t1, ta1, VPECONF0_XTC_SHIFT
274 or t0, t0, t1
275 mttc0 t0, CP0_VPECONF0
276
277 /* Set TC non-active, non-allocatable */
278 mttc0 zero, CP0_TCSTATUS
279
280 /* Set TC halted */
281 li t0, TCHALT_H
282 mttc0 t0, CP0_TCHALT
283
284 /* Next VPE */
285 addiu ta1, ta1, 1
286 slt t0, ta1, ta3
287 bnez t0, 1b
288 nop
289
290 /* Leave VPE configuration state */
2912: mfc0 t0, CP0_MVPCONTROL
292 xori t0, t0, MVPCONTROL_VPC
293 mtc0 t0, CP0_MVPCONTROL
294
2953: .set pop
296#endif
297 jr ra
298 nop
299 END(mips_cps_core_init)
300
301/**
302 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
303 *
304 * Returns: pointer to struct core_boot_config in v0, pointer to
305 * struct vpe_boot_config in v1, VPE ID in t9
306 */
307LEAF(mips_cps_get_bootcfg)
308 /* Calculate a pointer to this cores struct core_boot_config */
309 cmgcrb t0
310 lw t0, GCR_CL_ID_OFS(t0)
311 li t1, COREBOOTCFG_SIZE
312 mul t0, t0, t1
313 PTR_LA t1, mips_cps_core_bootcfg
314 PTR_L t1, 0(t1)
315 PTR_ADDU v0, t0, t1
316
317 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
318 li t9, 0
319#if defined(CONFIG_CPU_MIPSR6)
320 has_vp ta2, 1f
321
322 /*
323 * Assume non-contiguous numbering. Perhaps some day we'll need
324 * to handle contiguous VP numbering, but no such systems yet
325 * exist.
326 */
327 mfc0 t9, CP0_GLOBALNUMBER
328 andi t9, t9, MIPS_GLOBALNUMBER_VP
329#elif defined(CONFIG_MIPS_MT_SMP)
330 has_mt ta2, 1f
331
332 /* Find the number of VPEs present in the core */
333 mfc0 t1, CP0_MVPCONF0
334 srl t1, t1, MVPCONF0_PVPE_SHIFT
335 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
336 addiu t1, t1, 1
337
338 /* Calculate a mask for the VPE ID from EBase.CPUNum */
339 clz t1, t1
340 li t2, 31
341 subu t1, t2, t1
342 li t2, 1
343 sll t1, t2, t1
344 addiu t1, t1, -1
345
346 /* Retrieve the VPE ID from EBase.CPUNum */
347 mfc0 t9, $15, 1
348 and t9, t9, t1
349#endif
350
3511: /* Calculate a pointer to this VPEs struct vpe_boot_config */
352 li t1, VPEBOOTCFG_SIZE
353 mul v1, t9, t1
354 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
355 PTR_ADDU v1, v1, ta3
356
357 jr ra
358 nop
359 END(mips_cps_get_bootcfg)
360
361LEAF(mips_cps_boot_vpes)
362 lw ta2, COREBOOTCFG_VPEMASK(a0)
363 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
364
365#if defined(CONFIG_CPU_MIPSR6)
366
367 has_vp t0, 5f
368
369 /* Find base address of CPC */
370 cmgcrb t3
371 PTR_L t1, GCR_CPC_BASE_OFS(t3)
372 PTR_LI t2, ~0x7fff
373 and t1, t1, t2
374 PTR_LI t2, UNCAC_BASE
375 PTR_ADD t1, t1, t2
376
377 /* Start any other VPs that ought to be running */
378 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
379
380 /* Ensure this VP stops running if it shouldn't be */
381 not ta2
382 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
383 ehb
384
385#elif defined(CONFIG_MIPS_MT)
386
387 /* If the core doesn't support MT then return */
388 has_mt t0, 5f
389
390 /* Enter VPE configuration state */
391 .set push
392 .set MIPS_ISA_LEVEL_RAW
393 .set mt
394 dvpe
395 .set pop
396
397 PTR_LA t1, 1f
398 jr.hb t1
399 nop
4001: mfc0 t1, CP0_MVPCONTROL
401 ori t1, t1, MVPCONTROL_VPC
402 mtc0 t1, CP0_MVPCONTROL
403 ehb
404
405 /* Loop through each VPE */
406 move t8, ta2
407 li ta1, 0
408
409 /* Check whether the VPE should be running. If not, skip it */
4101: andi t0, ta2, 1
411 beqz t0, 2f
412 nop
413
414 /* Operate on the appropriate TC */
415 mfc0 t0, CP0_VPECONTROL
416 ori t0, t0, VPECONTROL_TARGTC
417 xori t0, t0, VPECONTROL_TARGTC
418 or t0, t0, ta1
419 mtc0 t0, CP0_VPECONTROL
420 ehb
421
422 .set push
423 .set MIPS_ISA_LEVEL_RAW
424 .set mt
425
426 /* Skip the VPE if its TC is not halted */
427 mftc0 t0, CP0_TCHALT
428 beqz t0, 2f
429 nop
430
431 /* Calculate a pointer to the VPEs struct vpe_boot_config */
432 li t0, VPEBOOTCFG_SIZE
433 mul t0, t0, ta1
434 addu t0, t0, ta3
435
436 /* Set the TC restart PC */
437 lw t1, VPEBOOTCFG_PC(t0)
438 mttc0 t1, CP0_TCRESTART
439
440 /* Set the TC stack pointer */
441 lw t1, VPEBOOTCFG_SP(t0)
442 mttgpr t1, sp
443
444 /* Set the TC global pointer */
445 lw t1, VPEBOOTCFG_GP(t0)
446 mttgpr t1, gp
447
448 /* Copy config from this VPE */
449 mfc0 t0, CP0_CONFIG
450 mttc0 t0, CP0_CONFIG
451
452 /*
453 * Copy the EVA config from this VPE if the CPU supports it.
454 * CONFIG3 must exist to be running MT startup - just read it.
455 */
456 mfc0 t0, CP0_CONFIG, 3
457 and t0, t0, MIPS_CONF3_SC
458 beqz t0, 3f
459 nop
460 mfc0 t0, CP0_SEGCTL0
461 mttc0 t0, CP0_SEGCTL0
462 mfc0 t0, CP0_SEGCTL1
463 mttc0 t0, CP0_SEGCTL1
464 mfc0 t0, CP0_SEGCTL2
465 mttc0 t0, CP0_SEGCTL2
4663:
467 /* Ensure no software interrupts are pending */
468 mttc0 zero, CP0_CAUSE
469 mttc0 zero, CP0_STATUS
470
471 /* Set TC active, not interrupt exempt */
472 mftc0 t0, CP0_TCSTATUS
473 li t1, ~TCSTATUS_IXMT
474 and t0, t0, t1
475 ori t0, t0, TCSTATUS_A
476 mttc0 t0, CP0_TCSTATUS
477
478 /* Clear the TC halt bit */
479 mttc0 zero, CP0_TCHALT
480
481 /* Set VPE active */
482 mftc0 t0, CP0_VPECONF0
483 ori t0, t0, VPECONF0_VPA
484 mttc0 t0, CP0_VPECONF0
485
486 /* Next VPE */
4872: srl ta2, ta2, 1
488 addiu ta1, ta1, 1
489 bnez ta2, 1b
490 nop
491
492 /* Leave VPE configuration state */
493 mfc0 t1, CP0_MVPCONTROL
494 xori t1, t1, MVPCONTROL_VPC
495 mtc0 t1, CP0_MVPCONTROL
496 ehb
497 evpe
498
499 .set pop
500
501 /* Check whether this VPE is meant to be running */
502 li t0, 1
503 sll t0, t0, a1
504 and t0, t0, t8
505 bnez t0, 2f
506 nop
507
508 /* This VPE should be offline, halt the TC */
509 li t0, TCHALT_H
510 mtc0 t0, CP0_TCHALT
511 PTR_LA t0, 1f
5121: jr.hb t0
513 nop
514
5152:
516
517#endif /* CONFIG_MIPS_MT_SMP */
518
519 /* Return */
5205: jr ra
521 nop
522 END(mips_cps_boot_vpes)
523
524LEAF(mips_cps_cache_init)
525 /*
526 * Clear the bits used to index the caches. Note that the architecture
527 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
528 * be valid for all MIPS32 CPUs, even those for which said writes are
529 * unnecessary.
530 */
531 mtc0 zero, CP0_TAGLO, 0
532 mtc0 zero, CP0_TAGHI, 0
533 mtc0 zero, CP0_TAGLO, 2
534 mtc0 zero, CP0_TAGHI, 2
535 ehb
536
537 /* Primary cache configuration is indicated by Config1 */
538 mfc0 v0, CP0_CONFIG, 1
539
540 /* Detect I-cache line size */
541 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
542 beqz t0, icache_done
543 li t1, 2
544 sllv t0, t1, t0
545
546 /* Detect I-cache size */
547 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
548 xori t2, t1, 0x7
549 beqz t2, 1f
550 li t3, 32
551 addiu t1, t1, 1
552 sllv t1, t3, t1
5531: /* At this point t1 == I-cache sets per way */
554 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
555 addiu t2, t2, 1
556 mul t1, t1, t0
557 mul t1, t1, t2
558
559 li a0, CKSEG0
560 PTR_ADD a1, a0, t1
5611: cache Index_Store_Tag_I, 0(a0)
562 PTR_ADD a0, a0, t0
563 bne a0, a1, 1b
564 nop
565icache_done:
566
567 /* Detect D-cache line size */
568 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
569 beqz t0, dcache_done
570 li t1, 2
571 sllv t0, t1, t0
572
573 /* Detect D-cache size */
574 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
575 xori t2, t1, 0x7
576 beqz t2, 1f
577 li t3, 32
578 addiu t1, t1, 1
579 sllv t1, t3, t1
5801: /* At this point t1 == D-cache sets per way */
581 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
582 addiu t2, t2, 1
583 mul t1, t1, t0
584 mul t1, t1, t2
585
586 li a0, CKSEG0
587 PTR_ADDU a1, a0, t1
588 PTR_SUBU a1, a1, t0
5891: cache Index_Store_Tag_D, 0(a0)
590 bne a0, a1, 1b
591 PTR_ADD a0, a0, t0
592dcache_done:
593
594 jr ra
595 nop
596 END(mips_cps_cache_init)
597
598#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
599
600 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
601 .macro psstate dest
602 .set push
603 .set noat
604 lw $1, TI_CPU(gp)
605 sll $1, $1, LONGLOG
606 PTR_LA \dest, __per_cpu_offset
607 addu $1, $1, \dest
608 lw $1, 0($1)
609 PTR_LA \dest, cps_cpu_state
610 addu \dest, \dest, $1
611 .set pop
612 .endm
613
614LEAF(mips_cps_pm_save)
615 /* Save CPU state */
616 SUSPEND_SAVE_REGS
617 psstate t1
618 SUSPEND_SAVE_STATIC
619 jr v0
620 nop
621 END(mips_cps_pm_save)
622
623LEAF(mips_cps_pm_restore)
624 /* Restore CPU state */
625 psstate t1
626 RESUME_RESTORE_STATIC
627 RESUME_RESTORE_REGS_RETURN
628 END(mips_cps_pm_restore)
629
630#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/eva.h>
17#include <asm/mipsregs.h>
18#include <asm/mipsmtregs.h>
19#include <asm/pm.h>
20
21#define GCR_CPC_BASE_OFS 0x0088
22#define GCR_CL_COHERENCE_OFS 0x2008
23#define GCR_CL_ID_OFS 0x2028
24
25#define CPC_CL_VC_RUN_OFS 0x2028
26
27.extern mips_cm_base
28
29.set noreorder
30
31#ifdef CONFIG_64BIT
32# define STATUS_BITDEPS ST0_KX
33#else
34# define STATUS_BITDEPS 0
35#endif
36
37#ifdef CONFIG_MIPS_CPS_NS16550
38
39#define DUMP_EXCEP(name) \
40 PTR_LA a0, 8f; \
41 jal mips_cps_bev_dump; \
42 nop; \
43 TEXT(name)
44
45#else /* !CONFIG_MIPS_CPS_NS16550 */
46
47#define DUMP_EXCEP(name)
48
49#endif /* !CONFIG_MIPS_CPS_NS16550 */
50
51 /*
52 * Set dest to non-zero if the core supports the MT ASE, else zero. If
53 * MT is not supported then branch to nomt.
54 */
55 .macro has_mt dest, nomt
56 mfc0 \dest, CP0_CONFIG, 1
57 bgez \dest, \nomt
58 mfc0 \dest, CP0_CONFIG, 2
59 bgez \dest, \nomt
60 mfc0 \dest, CP0_CONFIG, 3
61 andi \dest, \dest, MIPS_CONF3_MT
62 beqz \dest, \nomt
63 nop
64 .endm
65
66 /*
67 * Set dest to non-zero if the core supports MIPSr6 multithreading
68 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
69 * branch to nomt.
70 */
71 .macro has_vp dest, nomt
72 mfc0 \dest, CP0_CONFIG, 1
73 bgez \dest, \nomt
74 mfc0 \dest, CP0_CONFIG, 2
75 bgez \dest, \nomt
76 mfc0 \dest, CP0_CONFIG, 3
77 bgez \dest, \nomt
78 mfc0 \dest, CP0_CONFIG, 4
79 bgez \dest, \nomt
80 mfc0 \dest, CP0_CONFIG, 5
81 andi \dest, \dest, MIPS_CONF5_VP
82 beqz \dest, \nomt
83 nop
84 .endm
85
86 /* Calculate an uncached address for the CM GCRs */
87 .macro cmgcrb dest
88 .set push
89 .set noat
90 MFC0 $1, CP0_CMGCRBASE
91 PTR_SLL $1, $1, 4
92 PTR_LI \dest, UNCAC_BASE
93 PTR_ADDU \dest, \dest, $1
94 .set pop
95 .endm
96
97.section .text.cps-vec
98.balign 0x1000
99
100LEAF(mips_cps_core_entry)
101 /*
102 * These first 4 bytes will be patched by cps_smp_setup to load the
103 * CCA to use into register s0.
104 */
105 .word 0
106
107 /* Check whether we're here due to an NMI */
108 mfc0 k0, CP0_STATUS
109 and k0, k0, ST0_NMI
110 beqz k0, not_nmi
111 nop
112
113 /* This is an NMI */
114 PTR_LA k0, nmi_handler
115 jr k0
116 nop
117
118not_nmi:
119 /* Setup Cause */
120 li t0, CAUSEF_IV
121 mtc0 t0, CP0_CAUSE
122
123 /* Setup Status */
124 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
125 mtc0 t0, CP0_STATUS
126
127 /* Skip cache & coherence setup if we're already coherent */
128 cmgcrb v1
129 lw s7, GCR_CL_COHERENCE_OFS(v1)
130 bnez s7, 1f
131 nop
132
133 /* Initialize the L1 caches */
134 jal mips_cps_cache_init
135 nop
136
137 /* Enter the coherent domain */
138 li t0, 0xff
139 sw t0, GCR_CL_COHERENCE_OFS(v1)
140 ehb
141
142 /* Set Kseg0 CCA to that in s0 */
1431: mfc0 t0, CP0_CONFIG
144 ori t0, 0x7
145 xori t0, 0x7
146 or t0, t0, s0
147 mtc0 t0, CP0_CONFIG
148 ehb
149
150 /* Jump to kseg0 */
151 PTR_LA t0, 1f
152 jr t0
153 nop
154
155 /*
156 * We're up, cached & coherent. Perform any EVA initialization necessary
157 * before we access memory.
158 */
1591: eva_init
160
161 /* Retrieve boot configuration pointers */
162 jal mips_cps_get_bootcfg
163 nop
164
165 /* Skip core-level init if we started up coherent */
166 bnez s7, 1f
167 nop
168
169 /* Perform any further required core-level initialisation */
170 jal mips_cps_core_init
171 nop
172
173 /*
174 * Boot any other VPEs within this core that should be online, and
175 * deactivate this VPE if it should be offline.
176 */
177 move a1, t9
178 jal mips_cps_boot_vpes
179 move a0, v0
180
181 /* Off we go! */
1821: PTR_L t1, VPEBOOTCFG_PC(v1)
183 PTR_L gp, VPEBOOTCFG_GP(v1)
184 PTR_L sp, VPEBOOTCFG_SP(v1)
185 jr t1
186 nop
187 END(mips_cps_core_entry)
188
189.org 0x200
190LEAF(excep_tlbfill)
191 DUMP_EXCEP("TLB Fill")
192 b .
193 nop
194 END(excep_tlbfill)
195
196.org 0x280
197LEAF(excep_xtlbfill)
198 DUMP_EXCEP("XTLB Fill")
199 b .
200 nop
201 END(excep_xtlbfill)
202
203.org 0x300
204LEAF(excep_cache)
205 DUMP_EXCEP("Cache")
206 b .
207 nop
208 END(excep_cache)
209
210.org 0x380
211LEAF(excep_genex)
212 DUMP_EXCEP("General")
213 b .
214 nop
215 END(excep_genex)
216
217.org 0x400
218LEAF(excep_intex)
219 DUMP_EXCEP("Interrupt")
220 b .
221 nop
222 END(excep_intex)
223
224.org 0x480
225LEAF(excep_ejtag)
226 PTR_LA k0, ejtag_debug_handler
227 jr k0
228 nop
229 END(excep_ejtag)
230
231LEAF(mips_cps_core_init)
232#ifdef CONFIG_MIPS_MT_SMP
233 /* Check that the core implements the MT ASE */
234 has_mt t0, 3f
235
236 .set push
237 .set mt
238
239 /* Only allow 1 TC per VPE to execute... */
240 dmt
241
242 /* ...and for the moment only 1 VPE */
243 dvpe
244 PTR_LA t1, 1f
245 jr.hb t1
246 nop
247
248 /* Enter VPE configuration state */
2491: mfc0 t0, CP0_MVPCONTROL
250 ori t0, t0, MVPCONTROL_VPC
251 mtc0 t0, CP0_MVPCONTROL
252
253 /* Retrieve the number of VPEs within the core */
254 mfc0 t0, CP0_MVPCONF0
255 srl t0, t0, MVPCONF0_PVPE_SHIFT
256 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
257 addiu ta3, t0, 1
258
259 /* If there's only 1, we're done */
260 beqz t0, 2f
261 nop
262
263 /* Loop through each VPE within this core */
264 li ta1, 1
265
2661: /* Operate on the appropriate TC */
267 mtc0 ta1, CP0_VPECONTROL
268 ehb
269
270 /* Bind TC to VPE (1:1 TC:VPE mapping) */
271 mttc0 ta1, CP0_TCBIND
272
273 /* Set exclusive TC, non-active, master */
274 li t0, VPECONF0_MVP
275 sll t1, ta1, VPECONF0_XTC_SHIFT
276 or t0, t0, t1
277 mttc0 t0, CP0_VPECONF0
278
279 /* Set TC non-active, non-allocatable */
280 mttc0 zero, CP0_TCSTATUS
281
282 /* Set TC halted */
283 li t0, TCHALT_H
284 mttc0 t0, CP0_TCHALT
285
286 /* Next VPE */
287 addiu ta1, ta1, 1
288 slt t0, ta1, ta3
289 bnez t0, 1b
290 nop
291
292 /* Leave VPE configuration state */
2932: mfc0 t0, CP0_MVPCONTROL
294 xori t0, t0, MVPCONTROL_VPC
295 mtc0 t0, CP0_MVPCONTROL
296
2973: .set pop
298#endif
299 jr ra
300 nop
301 END(mips_cps_core_init)
302
303/**
304 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
305 *
306 * Returns: pointer to struct core_boot_config in v0, pointer to
307 * struct vpe_boot_config in v1, VPE ID in t9
308 */
309LEAF(mips_cps_get_bootcfg)
310 /* Calculate a pointer to this cores struct core_boot_config */
311 cmgcrb t0
312 lw t0, GCR_CL_ID_OFS(t0)
313 li t1, COREBOOTCFG_SIZE
314 mul t0, t0, t1
315 PTR_LA t1, mips_cps_core_bootcfg
316 PTR_L t1, 0(t1)
317 PTR_ADDU v0, t0, t1
318
319 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
320 li t9, 0
321#if defined(CONFIG_CPU_MIPSR6)
322 has_vp ta2, 1f
323
324 /*
325 * Assume non-contiguous numbering. Perhaps some day we'll need
326 * to handle contiguous VP numbering, but no such systems yet
327 * exist.
328 */
329 mfc0 t9, $3, 1
330 andi t9, t9, 0xff
331#elif defined(CONFIG_MIPS_MT_SMP)
332 has_mt ta2, 1f
333
334 /* Find the number of VPEs present in the core */
335 mfc0 t1, CP0_MVPCONF0
336 srl t1, t1, MVPCONF0_PVPE_SHIFT
337 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
338 addiu t1, t1, 1
339
340 /* Calculate a mask for the VPE ID from EBase.CPUNum */
341 clz t1, t1
342 li t2, 31
343 subu t1, t2, t1
344 li t2, 1
345 sll t1, t2, t1
346 addiu t1, t1, -1
347
348 /* Retrieve the VPE ID from EBase.CPUNum */
349 mfc0 t9, $15, 1
350 and t9, t9, t1
351#endif
352
3531: /* Calculate a pointer to this VPEs struct vpe_boot_config */
354 li t1, VPEBOOTCFG_SIZE
355 mul v1, t9, t1
356 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
357 PTR_ADDU v1, v1, ta3
358
359 jr ra
360 nop
361 END(mips_cps_get_bootcfg)
362
363LEAF(mips_cps_boot_vpes)
364 PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
365 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
366
367#if defined(CONFIG_CPU_MIPSR6)
368
369 has_vp t0, 5f
370
371 /* Find base address of CPC */
372 cmgcrb t3
373 PTR_L t1, GCR_CPC_BASE_OFS(t3)
374 PTR_LI t2, ~0x7fff
375 and t1, t1, t2
376 PTR_LI t2, UNCAC_BASE
377 PTR_ADD t1, t1, t2
378
379 /* Set VC_RUN to the VPE mask */
380 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
381 ehb
382
383#elif defined(CONFIG_MIPS_MT)
384
385 .set push
386 .set mt
387
388 /* If the core doesn't support MT then return */
389 has_mt t0, 5f
390
391 /* Enter VPE configuration state */
392 dvpe
393 PTR_LA t1, 1f
394 jr.hb t1
395 nop
3961: mfc0 t1, CP0_MVPCONTROL
397 ori t1, t1, MVPCONTROL_VPC
398 mtc0 t1, CP0_MVPCONTROL
399 ehb
400
401 /* Loop through each VPE */
402 move t8, ta2
403 li ta1, 0
404
405 /* Check whether the VPE should be running. If not, skip it */
4061: andi t0, ta2, 1
407 beqz t0, 2f
408 nop
409
410 /* Operate on the appropriate TC */
411 mfc0 t0, CP0_VPECONTROL
412 ori t0, t0, VPECONTROL_TARGTC
413 xori t0, t0, VPECONTROL_TARGTC
414 or t0, t0, ta1
415 mtc0 t0, CP0_VPECONTROL
416 ehb
417
418 /* Skip the VPE if its TC is not halted */
419 mftc0 t0, CP0_TCHALT
420 beqz t0, 2f
421 nop
422
423 /* Calculate a pointer to the VPEs struct vpe_boot_config */
424 li t0, VPEBOOTCFG_SIZE
425 mul t0, t0, ta1
426 addu t0, t0, ta3
427
428 /* Set the TC restart PC */
429 lw t1, VPEBOOTCFG_PC(t0)
430 mttc0 t1, CP0_TCRESTART
431
432 /* Set the TC stack pointer */
433 lw t1, VPEBOOTCFG_SP(t0)
434 mttgpr t1, sp
435
436 /* Set the TC global pointer */
437 lw t1, VPEBOOTCFG_GP(t0)
438 mttgpr t1, gp
439
440 /* Copy config from this VPE */
441 mfc0 t0, CP0_CONFIG
442 mttc0 t0, CP0_CONFIG
443
444 /*
445 * Copy the EVA config from this VPE if the CPU supports it.
446 * CONFIG3 must exist to be running MT startup - just read it.
447 */
448 mfc0 t0, CP0_CONFIG, 3
449 and t0, t0, MIPS_CONF3_SC
450 beqz t0, 3f
451 nop
452 mfc0 t0, CP0_SEGCTL0
453 mttc0 t0, CP0_SEGCTL0
454 mfc0 t0, CP0_SEGCTL1
455 mttc0 t0, CP0_SEGCTL1
456 mfc0 t0, CP0_SEGCTL2
457 mttc0 t0, CP0_SEGCTL2
4583:
459 /* Ensure no software interrupts are pending */
460 mttc0 zero, CP0_CAUSE
461 mttc0 zero, CP0_STATUS
462
463 /* Set TC active, not interrupt exempt */
464 mftc0 t0, CP0_TCSTATUS
465 li t1, ~TCSTATUS_IXMT
466 and t0, t0, t1
467 ori t0, t0, TCSTATUS_A
468 mttc0 t0, CP0_TCSTATUS
469
470 /* Clear the TC halt bit */
471 mttc0 zero, CP0_TCHALT
472
473 /* Set VPE active */
474 mftc0 t0, CP0_VPECONF0
475 ori t0, t0, VPECONF0_VPA
476 mttc0 t0, CP0_VPECONF0
477
478 /* Next VPE */
4792: srl ta2, ta2, 1
480 addiu ta1, ta1, 1
481 bnez ta2, 1b
482 nop
483
484 /* Leave VPE configuration state */
485 mfc0 t1, CP0_MVPCONTROL
486 xori t1, t1, MVPCONTROL_VPC
487 mtc0 t1, CP0_MVPCONTROL
488 ehb
489 evpe
490
491 /* Check whether this VPE is meant to be running */
492 li t0, 1
493 sll t0, t0, a1
494 and t0, t0, t8
495 bnez t0, 2f
496 nop
497
498 /* This VPE should be offline, halt the TC */
499 li t0, TCHALT_H
500 mtc0 t0, CP0_TCHALT
501 PTR_LA t0, 1f
5021: jr.hb t0
503 nop
504
5052: .set pop
506
507#endif /* CONFIG_MIPS_MT_SMP */
508
509 /* Return */
5105: jr ra
511 nop
512 END(mips_cps_boot_vpes)
513
514LEAF(mips_cps_cache_init)
515 /*
516 * Clear the bits used to index the caches. Note that the architecture
517 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
518 * be valid for all MIPS32 CPUs, even those for which said writes are
519 * unnecessary.
520 */
521 mtc0 zero, CP0_TAGLO, 0
522 mtc0 zero, CP0_TAGHI, 0
523 mtc0 zero, CP0_TAGLO, 2
524 mtc0 zero, CP0_TAGHI, 2
525 ehb
526
527 /* Primary cache configuration is indicated by Config1 */
528 mfc0 v0, CP0_CONFIG, 1
529
530 /* Detect I-cache line size */
531 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
532 beqz t0, icache_done
533 li t1, 2
534 sllv t0, t1, t0
535
536 /* Detect I-cache size */
537 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
538 xori t2, t1, 0x7
539 beqz t2, 1f
540 li t3, 32
541 addiu t1, t1, 1
542 sllv t1, t3, t1
5431: /* At this point t1 == I-cache sets per way */
544 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
545 addiu t2, t2, 1
546 mul t1, t1, t0
547 mul t1, t1, t2
548
549 li a0, CKSEG0
550 PTR_ADD a1, a0, t1
5511: cache Index_Store_Tag_I, 0(a0)
552 PTR_ADD a0, a0, t0
553 bne a0, a1, 1b
554 nop
555icache_done:
556
557 /* Detect D-cache line size */
558 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
559 beqz t0, dcache_done
560 li t1, 2
561 sllv t0, t1, t0
562
563 /* Detect D-cache size */
564 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
565 xori t2, t1, 0x7
566 beqz t2, 1f
567 li t3, 32
568 addiu t1, t1, 1
569 sllv t1, t3, t1
5701: /* At this point t1 == D-cache sets per way */
571 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
572 addiu t2, t2, 1
573 mul t1, t1, t0
574 mul t1, t1, t2
575
576 li a0, CKSEG0
577 PTR_ADDU a1, a0, t1
578 PTR_SUBU a1, a1, t0
5791: cache Index_Store_Tag_D, 0(a0)
580 bne a0, a1, 1b
581 PTR_ADD a0, a0, t0
582dcache_done:
583
584 jr ra
585 nop
586 END(mips_cps_cache_init)
587
588#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
589
590 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
591 .macro psstate dest
592 .set push
593 .set noat
594 lw $1, TI_CPU(gp)
595 sll $1, $1, LONGLOG
596 PTR_LA \dest, __per_cpu_offset
597 addu $1, $1, \dest
598 lw $1, 0($1)
599 PTR_LA \dest, cps_cpu_state
600 addu \dest, \dest, $1
601 .set pop
602 .endm
603
604LEAF(mips_cps_pm_save)
605 /* Save CPU state */
606 SUSPEND_SAVE_REGS
607 psstate t1
608 SUSPEND_SAVE_STATIC
609 jr v0
610 nop
611 END(mips_cps_pm_save)
612
613LEAF(mips_cps_pm_restore)
614 /* Restore CPU state */
615 psstate t1
616 RESUME_RESTORE_STATIC
617 RESUME_RESTORE_REGS_RETURN
618 END(mips_cps_pm_restore)
619
620#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */