Loading...
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <asm/addrspace.h>
12#include <asm/asm.h>
13#include <asm/asm-offsets.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/eva.h>
17#include <asm/mipsregs.h>
18#include <asm/mipsmtregs.h>
19#include <asm/pm.h>
20
21#define GCR_CPC_BASE_OFS 0x0088
22#define GCR_CL_COHERENCE_OFS 0x2008
23#define GCR_CL_ID_OFS 0x2028
24
25#define CPC_CL_VC_STOP_OFS 0x2020
26#define CPC_CL_VC_RUN_OFS 0x2028
27
28.extern mips_cm_base
29
30.set noreorder
31
32#ifdef CONFIG_64BIT
33# define STATUS_BITDEPS ST0_KX
34#else
35# define STATUS_BITDEPS 0
36#endif
37
38#ifdef CONFIG_MIPS_CPS_NS16550
39
40#define DUMP_EXCEP(name) \
41 PTR_LA a0, 8f; \
42 jal mips_cps_bev_dump; \
43 nop; \
44 TEXT(name)
45
46#else /* !CONFIG_MIPS_CPS_NS16550 */
47
48#define DUMP_EXCEP(name)
49
50#endif /* !CONFIG_MIPS_CPS_NS16550 */
51
52 /*
53 * Set dest to non-zero if the core supports the MT ASE, else zero. If
54 * MT is not supported then branch to nomt.
55 */
56 .macro has_mt dest, nomt
57 mfc0 \dest, CP0_CONFIG, 1
58 bgez \dest, \nomt
59 mfc0 \dest, CP0_CONFIG, 2
60 bgez \dest, \nomt
61 mfc0 \dest, CP0_CONFIG, 3
62 andi \dest, \dest, MIPS_CONF3_MT
63 beqz \dest, \nomt
64 nop
65 .endm
66
67 /*
68 * Set dest to non-zero if the core supports MIPSr6 multithreading
69 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
70 * branch to nomt.
71 */
72 .macro has_vp dest, nomt
73 mfc0 \dest, CP0_CONFIG, 1
74 bgez \dest, \nomt
75 mfc0 \dest, CP0_CONFIG, 2
76 bgez \dest, \nomt
77 mfc0 \dest, CP0_CONFIG, 3
78 bgez \dest, \nomt
79 mfc0 \dest, CP0_CONFIG, 4
80 bgez \dest, \nomt
81 mfc0 \dest, CP0_CONFIG, 5
82 andi \dest, \dest, MIPS_CONF5_VP
83 beqz \dest, \nomt
84 nop
85 .endm
86
87 /* Calculate an uncached address for the CM GCRs */
88 .macro cmgcrb dest
89 .set push
90 .set noat
91 MFC0 $1, CP0_CMGCRBASE
92 PTR_SLL $1, $1, 4
93 PTR_LI \dest, UNCAC_BASE
94 PTR_ADDU \dest, \dest, $1
95 .set pop
96 .endm
97
98.section .text.cps-vec
99.balign 0x1000
100
101LEAF(mips_cps_core_entry)
102 /*
103 * These first 4 bytes will be patched by cps_smp_setup to load the
104 * CCA to use into register s0.
105 */
106 .word 0
107
108 /* Check whether we're here due to an NMI */
109 mfc0 k0, CP0_STATUS
110 and k0, k0, ST0_NMI
111 beqz k0, not_nmi
112 nop
113
114 /* This is an NMI */
115 PTR_LA k0, nmi_handler
116 jr k0
117 nop
118
119not_nmi:
120 /* Setup Cause */
121 li t0, CAUSEF_IV
122 mtc0 t0, CP0_CAUSE
123
124 /* Setup Status */
125 li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
126 mtc0 t0, CP0_STATUS
127
128 /* Skip cache & coherence setup if we're already coherent */
129 cmgcrb v1
130 lw s7, GCR_CL_COHERENCE_OFS(v1)
131 bnez s7, 1f
132 nop
133
134 /* Initialize the L1 caches */
135 jal mips_cps_cache_init
136 nop
137
138 /* Enter the coherent domain */
139 li t0, 0xff
140 sw t0, GCR_CL_COHERENCE_OFS(v1)
141 ehb
142
143 /* Set Kseg0 CCA to that in s0 */
1441: mfc0 t0, CP0_CONFIG
145 ori t0, 0x7
146 xori t0, 0x7
147 or t0, t0, s0
148 mtc0 t0, CP0_CONFIG
149 ehb
150
151 /* Jump to kseg0 */
152 PTR_LA t0, 1f
153 jr t0
154 nop
155
156 /*
157 * We're up, cached & coherent. Perform any EVA initialization necessary
158 * before we access memory.
159 */
1601: eva_init
161
162 /* Retrieve boot configuration pointers */
163 jal mips_cps_get_bootcfg
164 nop
165
166 /* Skip core-level init if we started up coherent */
167 bnez s7, 1f
168 nop
169
170 /* Perform any further required core-level initialisation */
171 jal mips_cps_core_init
172 nop
173
174 /*
175 * Boot any other VPEs within this core that should be online, and
176 * deactivate this VPE if it should be offline.
177 */
178 move a1, t9
179 jal mips_cps_boot_vpes
180 move a0, v0
181
182 /* Off we go! */
1831: PTR_L t1, VPEBOOTCFG_PC(v1)
184 PTR_L gp, VPEBOOTCFG_GP(v1)
185 PTR_L sp, VPEBOOTCFG_SP(v1)
186 jr t1
187 nop
188 END(mips_cps_core_entry)
189
190.org 0x200
191LEAF(excep_tlbfill)
192 DUMP_EXCEP("TLB Fill")
193 b .
194 nop
195 END(excep_tlbfill)
196
197.org 0x280
198LEAF(excep_xtlbfill)
199 DUMP_EXCEP("XTLB Fill")
200 b .
201 nop
202 END(excep_xtlbfill)
203
204.org 0x300
205LEAF(excep_cache)
206 DUMP_EXCEP("Cache")
207 b .
208 nop
209 END(excep_cache)
210
211.org 0x380
212LEAF(excep_genex)
213 DUMP_EXCEP("General")
214 b .
215 nop
216 END(excep_genex)
217
218.org 0x400
219LEAF(excep_intex)
220 DUMP_EXCEP("Interrupt")
221 b .
222 nop
223 END(excep_intex)
224
225.org 0x480
226LEAF(excep_ejtag)
227 PTR_LA k0, ejtag_debug_handler
228 jr k0
229 nop
230 END(excep_ejtag)
231
232LEAF(mips_cps_core_init)
233#ifdef CONFIG_MIPS_MT_SMP
234 /* Check that the core implements the MT ASE */
235 has_mt t0, 3f
236
237 .set push
238 .set MIPS_ISA_LEVEL_RAW
239 .set mt
240
241 /* Only allow 1 TC per VPE to execute... */
242 dmt
243
244 /* ...and for the moment only 1 VPE */
245 dvpe
246 PTR_LA t1, 1f
247 jr.hb t1
248 nop
249
250 /* Enter VPE configuration state */
2511: mfc0 t0, CP0_MVPCONTROL
252 ori t0, t0, MVPCONTROL_VPC
253 mtc0 t0, CP0_MVPCONTROL
254
255 /* Retrieve the number of VPEs within the core */
256 mfc0 t0, CP0_MVPCONF0
257 srl t0, t0, MVPCONF0_PVPE_SHIFT
258 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
259 addiu ta3, t0, 1
260
261 /* If there's only 1, we're done */
262 beqz t0, 2f
263 nop
264
265 /* Loop through each VPE within this core */
266 li ta1, 1
267
2681: /* Operate on the appropriate TC */
269 mtc0 ta1, CP0_VPECONTROL
270 ehb
271
272 /* Bind TC to VPE (1:1 TC:VPE mapping) */
273 mttc0 ta1, CP0_TCBIND
274
275 /* Set exclusive TC, non-active, master */
276 li t0, VPECONF0_MVP
277 sll t1, ta1, VPECONF0_XTC_SHIFT
278 or t0, t0, t1
279 mttc0 t0, CP0_VPECONF0
280
281 /* Set TC non-active, non-allocatable */
282 mttc0 zero, CP0_TCSTATUS
283
284 /* Set TC halted */
285 li t0, TCHALT_H
286 mttc0 t0, CP0_TCHALT
287
288 /* Next VPE */
289 addiu ta1, ta1, 1
290 slt t0, ta1, ta3
291 bnez t0, 1b
292 nop
293
294 /* Leave VPE configuration state */
2952: mfc0 t0, CP0_MVPCONTROL
296 xori t0, t0, MVPCONTROL_VPC
297 mtc0 t0, CP0_MVPCONTROL
298
2993: .set pop
300#endif
301 jr ra
302 nop
303 END(mips_cps_core_init)
304
305/**
306 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
307 *
308 * Returns: pointer to struct core_boot_config in v0, pointer to
309 * struct vpe_boot_config in v1, VPE ID in t9
310 */
311LEAF(mips_cps_get_bootcfg)
312 /* Calculate a pointer to this cores struct core_boot_config */
313 cmgcrb t0
314 lw t0, GCR_CL_ID_OFS(t0)
315 li t1, COREBOOTCFG_SIZE
316 mul t0, t0, t1
317 PTR_LA t1, mips_cps_core_bootcfg
318 PTR_L t1, 0(t1)
319 PTR_ADDU v0, t0, t1
320
321 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
322 li t9, 0
323#if defined(CONFIG_CPU_MIPSR6)
324 has_vp ta2, 1f
325
326 /*
327 * Assume non-contiguous numbering. Perhaps some day we'll need
328 * to handle contiguous VP numbering, but no such systems yet
329 * exist.
330 */
331 mfc0 t9, CP0_GLOBALNUMBER
332 andi t9, t9, MIPS_GLOBALNUMBER_VP
333#elif defined(CONFIG_MIPS_MT_SMP)
334 has_mt ta2, 1f
335
336 /* Find the number of VPEs present in the core */
337 mfc0 t1, CP0_MVPCONF0
338 srl t1, t1, MVPCONF0_PVPE_SHIFT
339 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
340 addiu t1, t1, 1
341
342 /* Calculate a mask for the VPE ID from EBase.CPUNum */
343 clz t1, t1
344 li t2, 31
345 subu t1, t2, t1
346 li t2, 1
347 sll t1, t2, t1
348 addiu t1, t1, -1
349
350 /* Retrieve the VPE ID from EBase.CPUNum */
351 mfc0 t9, $15, 1
352 and t9, t9, t1
353#endif
354
3551: /* Calculate a pointer to this VPEs struct vpe_boot_config */
356 li t1, VPEBOOTCFG_SIZE
357 mul v1, t9, t1
358 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
359 PTR_ADDU v1, v1, ta3
360
361 jr ra
362 nop
363 END(mips_cps_get_bootcfg)
364
365LEAF(mips_cps_boot_vpes)
366 lw ta2, COREBOOTCFG_VPEMASK(a0)
367 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
368
369#if defined(CONFIG_CPU_MIPSR6)
370
371 has_vp t0, 5f
372
373 /* Find base address of CPC */
374 cmgcrb t3
375 PTR_L t1, GCR_CPC_BASE_OFS(t3)
376 PTR_LI t2, ~0x7fff
377 and t1, t1, t2
378 PTR_LI t2, UNCAC_BASE
379 PTR_ADD t1, t1, t2
380
381 /* Start any other VPs that ought to be running */
382 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
383
384 /* Ensure this VP stops running if it shouldn't be */
385 not ta2
386 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
387 ehb
388
389#elif defined(CONFIG_MIPS_MT)
390
391 /* If the core doesn't support MT then return */
392 has_mt t0, 5f
393
394 /* Enter VPE configuration state */
395 .set push
396 .set MIPS_ISA_LEVEL_RAW
397 .set mt
398 dvpe
399 .set pop
400
401 PTR_LA t1, 1f
402 jr.hb t1
403 nop
4041: mfc0 t1, CP0_MVPCONTROL
405 ori t1, t1, MVPCONTROL_VPC
406 mtc0 t1, CP0_MVPCONTROL
407 ehb
408
409 /* Loop through each VPE */
410 move t8, ta2
411 li ta1, 0
412
413 /* Check whether the VPE should be running. If not, skip it */
4141: andi t0, ta2, 1
415 beqz t0, 2f
416 nop
417
418 /* Operate on the appropriate TC */
419 mfc0 t0, CP0_VPECONTROL
420 ori t0, t0, VPECONTROL_TARGTC
421 xori t0, t0, VPECONTROL_TARGTC
422 or t0, t0, ta1
423 mtc0 t0, CP0_VPECONTROL
424 ehb
425
426 .set push
427 .set MIPS_ISA_LEVEL_RAW
428 .set mt
429
430 /* Skip the VPE if its TC is not halted */
431 mftc0 t0, CP0_TCHALT
432 beqz t0, 2f
433 nop
434
435 /* Calculate a pointer to the VPEs struct vpe_boot_config */
436 li t0, VPEBOOTCFG_SIZE
437 mul t0, t0, ta1
438 addu t0, t0, ta3
439
440 /* Set the TC restart PC */
441 lw t1, VPEBOOTCFG_PC(t0)
442 mttc0 t1, CP0_TCRESTART
443
444 /* Set the TC stack pointer */
445 lw t1, VPEBOOTCFG_SP(t0)
446 mttgpr t1, sp
447
448 /* Set the TC global pointer */
449 lw t1, VPEBOOTCFG_GP(t0)
450 mttgpr t1, gp
451
452 /* Copy config from this VPE */
453 mfc0 t0, CP0_CONFIG
454 mttc0 t0, CP0_CONFIG
455
456 /*
457 * Copy the EVA config from this VPE if the CPU supports it.
458 * CONFIG3 must exist to be running MT startup - just read it.
459 */
460 mfc0 t0, CP0_CONFIG, 3
461 and t0, t0, MIPS_CONF3_SC
462 beqz t0, 3f
463 nop
464 mfc0 t0, CP0_SEGCTL0
465 mttc0 t0, CP0_SEGCTL0
466 mfc0 t0, CP0_SEGCTL1
467 mttc0 t0, CP0_SEGCTL1
468 mfc0 t0, CP0_SEGCTL2
469 mttc0 t0, CP0_SEGCTL2
4703:
471 /* Ensure no software interrupts are pending */
472 mttc0 zero, CP0_CAUSE
473 mttc0 zero, CP0_STATUS
474
475 /* Set TC active, not interrupt exempt */
476 mftc0 t0, CP0_TCSTATUS
477 li t1, ~TCSTATUS_IXMT
478 and t0, t0, t1
479 ori t0, t0, TCSTATUS_A
480 mttc0 t0, CP0_TCSTATUS
481
482 /* Clear the TC halt bit */
483 mttc0 zero, CP0_TCHALT
484
485 /* Set VPE active */
486 mftc0 t0, CP0_VPECONF0
487 ori t0, t0, VPECONF0_VPA
488 mttc0 t0, CP0_VPECONF0
489
490 /* Next VPE */
4912: srl ta2, ta2, 1
492 addiu ta1, ta1, 1
493 bnez ta2, 1b
494 nop
495
496 /* Leave VPE configuration state */
497 mfc0 t1, CP0_MVPCONTROL
498 xori t1, t1, MVPCONTROL_VPC
499 mtc0 t1, CP0_MVPCONTROL
500 ehb
501 evpe
502
503 .set pop
504
505 /* Check whether this VPE is meant to be running */
506 li t0, 1
507 sll t0, t0, a1
508 and t0, t0, t8
509 bnez t0, 2f
510 nop
511
512 /* This VPE should be offline, halt the TC */
513 li t0, TCHALT_H
514 mtc0 t0, CP0_TCHALT
515 PTR_LA t0, 1f
5161: jr.hb t0
517 nop
518
5192:
520
521#endif /* CONFIG_MIPS_MT_SMP */
522
523 /* Return */
5245: jr ra
525 nop
526 END(mips_cps_boot_vpes)
527
528LEAF(mips_cps_cache_init)
529 /*
530 * Clear the bits used to index the caches. Note that the architecture
531 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
532 * be valid for all MIPS32 CPUs, even those for which said writes are
533 * unnecessary.
534 */
535 mtc0 zero, CP0_TAGLO, 0
536 mtc0 zero, CP0_TAGHI, 0
537 mtc0 zero, CP0_TAGLO, 2
538 mtc0 zero, CP0_TAGHI, 2
539 ehb
540
541 /* Primary cache configuration is indicated by Config1 */
542 mfc0 v0, CP0_CONFIG, 1
543
544 /* Detect I-cache line size */
545 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
546 beqz t0, icache_done
547 li t1, 2
548 sllv t0, t1, t0
549
550 /* Detect I-cache size */
551 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
552 xori t2, t1, 0x7
553 beqz t2, 1f
554 li t3, 32
555 addiu t1, t1, 1
556 sllv t1, t3, t1
5571: /* At this point t1 == I-cache sets per way */
558 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
559 addiu t2, t2, 1
560 mul t1, t1, t0
561 mul t1, t1, t2
562
563 li a0, CKSEG0
564 PTR_ADD a1, a0, t1
5651: cache Index_Store_Tag_I, 0(a0)
566 PTR_ADD a0, a0, t0
567 bne a0, a1, 1b
568 nop
569icache_done:
570
571 /* Detect D-cache line size */
572 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
573 beqz t0, dcache_done
574 li t1, 2
575 sllv t0, t1, t0
576
577 /* Detect D-cache size */
578 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
579 xori t2, t1, 0x7
580 beqz t2, 1f
581 li t3, 32
582 addiu t1, t1, 1
583 sllv t1, t3, t1
5841: /* At this point t1 == D-cache sets per way */
585 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
586 addiu t2, t2, 1
587 mul t1, t1, t0
588 mul t1, t1, t2
589
590 li a0, CKSEG0
591 PTR_ADDU a1, a0, t1
592 PTR_SUBU a1, a1, t0
5931: cache Index_Store_Tag_D, 0(a0)
594 bne a0, a1, 1b
595 PTR_ADD a0, a0, t0
596dcache_done:
597
598 jr ra
599 nop
600 END(mips_cps_cache_init)
601
602#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
603
604 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
605 .macro psstate dest
606 .set push
607 .set noat
608 lw $1, TI_CPU(gp)
609 sll $1, $1, LONGLOG
610 PTR_LA \dest, __per_cpu_offset
611 addu $1, $1, \dest
612 lw $1, 0($1)
613 PTR_LA \dest, cps_cpu_state
614 addu \dest, \dest, $1
615 .set pop
616 .endm
617
618LEAF(mips_cps_pm_save)
619 /* Save CPU state */
620 SUSPEND_SAVE_REGS
621 psstate t1
622 SUSPEND_SAVE_STATIC
623 jr v0
624 nop
625 END(mips_cps_pm_save)
626
627LEAF(mips_cps_pm_restore)
628 /* Restore CPU state */
629 psstate t1
630 RESUME_RESTORE_STATIC
631 RESUME_RESTORE_REGS_RETURN
632 END(mips_cps_pm_restore)
633
634#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <linux/init.h>
8#include <asm/addrspace.h>
9#include <asm/asm.h>
10#include <asm/asm-offsets.h>
11#include <asm/asmmacro.h>
12#include <asm/cacheops.h>
13#include <asm/eva.h>
14#include <asm/mipsregs.h>
15#include <asm/mipsmtregs.h>
16#include <asm/pm.h>
17#include <asm/smp-cps.h>
18
19#define GCR_CPC_BASE_OFS 0x0088
20#define GCR_CL_COHERENCE_OFS 0x2008
21#define GCR_CL_ID_OFS 0x2028
22
23#define CPC_CL_VC_STOP_OFS 0x2020
24#define CPC_CL_VC_RUN_OFS 0x2028
25
26.extern mips_cm_base
27
28.set noreorder
29
30#ifdef CONFIG_64BIT
31# define STATUS_BITDEPS ST0_KX
32#else
33# define STATUS_BITDEPS 0
34#endif
35
36#ifdef CONFIG_MIPS_CPS_NS16550
37
38#define DUMP_EXCEP(name) \
39 PTR_LA a0, 8f; \
40 jal mips_cps_bev_dump; \
41 nop; \
42 TEXT(name)
43
44#else /* !CONFIG_MIPS_CPS_NS16550 */
45
46#define DUMP_EXCEP(name)
47
48#endif /* !CONFIG_MIPS_CPS_NS16550 */
49
50 /*
51 * Set dest to non-zero if the core supports the MT ASE, else zero. If
52 * MT is not supported then branch to nomt.
53 */
54 .macro has_mt dest, nomt
55 mfc0 \dest, CP0_CONFIG, 1
56 bgez \dest, \nomt
57 mfc0 \dest, CP0_CONFIG, 2
58 bgez \dest, \nomt
59 mfc0 \dest, CP0_CONFIG, 3
60 andi \dest, \dest, MIPS_CONF3_MT
61 beqz \dest, \nomt
62 nop
63 .endm
64
65 /*
66 * Set dest to non-zero if the core supports MIPSr6 multithreading
67 * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
68 * branch to nomt.
69 */
70 .macro has_vp dest, nomt
71 mfc0 \dest, CP0_CONFIG, 1
72 bgez \dest, \nomt
73 mfc0 \dest, CP0_CONFIG, 2
74 bgez \dest, \nomt
75 mfc0 \dest, CP0_CONFIG, 3
76 bgez \dest, \nomt
77 mfc0 \dest, CP0_CONFIG, 4
78 bgez \dest, \nomt
79 mfc0 \dest, CP0_CONFIG, 5
80 andi \dest, \dest, MIPS_CONF5_VP
81 beqz \dest, \nomt
82 nop
83 .endm
84
85
86LEAF(mips_cps_core_boot)
87 /* Save CCA and GCR base */
88 move s0, a0
89 move s1, a1
90
91 /* We don't know how to do coherence setup on earlier ISA */
92#if MIPS_ISA_REV > 0
93 /* Skip cache & coherence setup if we're already coherent */
94 lw s7, GCR_CL_COHERENCE_OFS(s1)
95 bnez s7, 1f
96 nop
97
98 /* Initialize the L1 caches */
99 jal mips_cps_cache_init
100 nop
101
102 /* Enter the coherent domain */
103 li t0, 0xff
104 sw t0, GCR_CL_COHERENCE_OFS(s1)
105 ehb
106#endif /* MIPS_ISA_REV > 0 */
107
108 /* Set Kseg0 CCA to that in s0 */
1091: mfc0 t0, CP0_CONFIG
110 ori t0, 0x7
111 xori t0, 0x7
112 or t0, t0, s0
113 mtc0 t0, CP0_CONFIG
114 ehb
115
116 /* Jump to kseg0 */
117 PTR_LA t0, 1f
118 jr t0
119 nop
120
121 /*
122 * We're up, cached & coherent. Perform any EVA initialization necessary
123 * before we access memory.
124 */
1251: eva_init
126
127 /* Retrieve boot configuration pointers */
128 jal mips_cps_get_bootcfg
129 nop
130
131 /* Skip core-level init if we started up coherent */
132 bnez s7, 1f
133 nop
134
135 /* Perform any further required core-level initialisation */
136 jal mips_cps_core_init
137 nop
138
139 /*
140 * Boot any other VPEs within this core that should be online, and
141 * deactivate this VPE if it should be offline.
142 */
143 move a1, t9
144 jal mips_cps_boot_vpes
145 move a0, v0
146
147 /* Off we go! */
1481: PTR_L t1, VPEBOOTCFG_PC(v1)
149 PTR_L gp, VPEBOOTCFG_GP(v1)
150 PTR_L sp, VPEBOOTCFG_SP(v1)
151 jr t1
152 nop
153 END(mips_cps_core_boot)
154
155 __INIT
156LEAF(excep_tlbfill)
157 DUMP_EXCEP("TLB Fill")
158 b .
159 nop
160 END(excep_tlbfill)
161
162LEAF(excep_xtlbfill)
163 DUMP_EXCEP("XTLB Fill")
164 b .
165 nop
166 END(excep_xtlbfill)
167
168LEAF(excep_cache)
169 DUMP_EXCEP("Cache")
170 b .
171 nop
172 END(excep_cache)
173
174LEAF(excep_genex)
175 DUMP_EXCEP("General")
176 b .
177 nop
178 END(excep_genex)
179
180LEAF(excep_intex)
181 DUMP_EXCEP("Interrupt")
182 b .
183 nop
184 END(excep_intex)
185
186LEAF(excep_ejtag)
187 PTR_LA k0, ejtag_debug_handler
188 jr k0
189 nop
190 END(excep_ejtag)
191 __FINIT
192
193LEAF(mips_cps_core_init)
194#ifdef CONFIG_MIPS_MT_SMP
195 /* Check that the core implements the MT ASE */
196 has_mt t0, 3f
197
198 .set push
199 .set MIPS_ISA_LEVEL_RAW
200 .set mt
201
202 /* Only allow 1 TC per VPE to execute... */
203 dmt
204
205 /* ...and for the moment only 1 VPE */
206 dvpe
207 PTR_LA t1, 1f
208 jr.hb t1
209 nop
210
211 /* Enter VPE configuration state */
2121: mfc0 t0, CP0_MVPCONTROL
213 ori t0, t0, MVPCONTROL_VPC
214 mtc0 t0, CP0_MVPCONTROL
215
216 /* Retrieve the number of VPEs within the core */
217 mfc0 t0, CP0_MVPCONF0
218 srl t0, t0, MVPCONF0_PVPE_SHIFT
219 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
220 addiu ta3, t0, 1
221
222 /* If there's only 1, we're done */
223 beqz t0, 2f
224 nop
225
226 /* Loop through each VPE within this core */
227 li ta1, 1
228
2291: /* Operate on the appropriate TC */
230 mtc0 ta1, CP0_VPECONTROL
231 ehb
232
233 /* Bind TC to VPE (1:1 TC:VPE mapping) */
234 mttc0 ta1, CP0_TCBIND
235
236 /* Set exclusive TC, non-active, master */
237 li t0, VPECONF0_MVP
238 sll t1, ta1, VPECONF0_XTC_SHIFT
239 or t0, t0, t1
240 mttc0 t0, CP0_VPECONF0
241
242 /* Set TC non-active, non-allocatable */
243 mttc0 zero, CP0_TCSTATUS
244
245 /* Set TC halted */
246 li t0, TCHALT_H
247 mttc0 t0, CP0_TCHALT
248
249 /* Next VPE */
250 addiu ta1, ta1, 1
251 slt t0, ta1, ta3
252 bnez t0, 1b
253 nop
254
255 /* Leave VPE configuration state */
2562: mfc0 t0, CP0_MVPCONTROL
257 xori t0, t0, MVPCONTROL_VPC
258 mtc0 t0, CP0_MVPCONTROL
259
2603: .set pop
261#endif
262 jr ra
263 nop
264 END(mips_cps_core_init)
265
266/**
267 * mips_cps_get_bootcfg() - retrieve boot configuration pointers
268 *
269 * Returns: pointer to struct core_boot_config in v0, pointer to
270 * struct vpe_boot_config in v1, VPE ID in t9
271 */
272LEAF(mips_cps_get_bootcfg)
273 /* Calculate a pointer to this cores struct core_boot_config */
274 lw t0, GCR_CL_ID_OFS(s1)
275 li t1, COREBOOTCFG_SIZE
276 mul t0, t0, t1
277 PTR_LA t1, mips_cps_core_bootcfg
278 PTR_L t1, 0(t1)
279 PTR_ADDU v0, t0, t1
280
281 /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
282 li t9, 0
283#if defined(CONFIG_CPU_MIPSR6)
284 has_vp ta2, 1f
285
286 /*
287 * Assume non-contiguous numbering. Perhaps some day we'll need
288 * to handle contiguous VP numbering, but no such systems yet
289 * exist.
290 */
291 mfc0 t9, CP0_GLOBALNUMBER
292 andi t9, t9, MIPS_GLOBALNUMBER_VP
293#elif defined(CONFIG_MIPS_MT_SMP)
294 has_mt ta2, 1f
295
296 /* Find the number of VPEs present in the core */
297 mfc0 t1, CP0_MVPCONF0
298 srl t1, t1, MVPCONF0_PVPE_SHIFT
299 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
300 addiu t1, t1, 1
301
302 /* Calculate a mask for the VPE ID from EBase.CPUNum */
303 clz t1, t1
304 li t2, 31
305 subu t1, t2, t1
306 li t2, 1
307 sll t1, t2, t1
308 addiu t1, t1, -1
309
310 /* Retrieve the VPE ID from EBase.CPUNum */
311 mfc0 t9, $15, 1
312 and t9, t9, t1
313#endif
314
3151: /* Calculate a pointer to this VPEs struct vpe_boot_config */
316 li t1, VPEBOOTCFG_SIZE
317 mul v1, t9, t1
318 PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
319 PTR_ADDU v1, v1, ta3
320
321 jr ra
322 nop
323 END(mips_cps_get_bootcfg)
324
325LEAF(mips_cps_boot_vpes)
326 lw ta2, COREBOOTCFG_VPEMASK(a0)
327 PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
328
329#if defined(CONFIG_CPU_MIPSR6)
330
331 has_vp t0, 5f
332
333 /* Find base address of CPC */
334 PTR_LA t1, mips_gcr_base
335 PTR_L t1, 0(t1)
336 PTR_L t1, GCR_CPC_BASE_OFS(t1)
337 PTR_LI t2, ~0x7fff
338 and t1, t1, t2
339 PTR_LI t2, UNCAC_BASE
340 PTR_ADD t1, t1, t2
341
342 /* Start any other VPs that ought to be running */
343 PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
344
345 /* Ensure this VP stops running if it shouldn't be */
346 not ta2
347 PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
348 ehb
349
350#elif defined(CONFIG_MIPS_MT)
351
352 /* If the core doesn't support MT then return */
353 has_mt t0, 5f
354
355 /* Enter VPE configuration state */
356 .set push
357 .set MIPS_ISA_LEVEL_RAW
358 .set mt
359 dvpe
360 .set pop
361
362 PTR_LA t1, 1f
363 jr.hb t1
364 nop
3651: mfc0 t1, CP0_MVPCONTROL
366 ori t1, t1, MVPCONTROL_VPC
367 mtc0 t1, CP0_MVPCONTROL
368 ehb
369
370 /* Loop through each VPE */
371 move t8, ta2
372 li ta1, 0
373
374 /* Check whether the VPE should be running. If not, skip it */
3751: andi t0, ta2, 1
376 beqz t0, 2f
377 nop
378
379 /* Operate on the appropriate TC */
380 mfc0 t0, CP0_VPECONTROL
381 ori t0, t0, VPECONTROL_TARGTC
382 xori t0, t0, VPECONTROL_TARGTC
383 or t0, t0, ta1
384 mtc0 t0, CP0_VPECONTROL
385 ehb
386
387 .set push
388 .set MIPS_ISA_LEVEL_RAW
389 .set mt
390
391 /* Skip the VPE if its TC is not halted */
392 mftc0 t0, CP0_TCHALT
393 beqz t0, 2f
394 nop
395
396 /* Calculate a pointer to the VPEs struct vpe_boot_config */
397 li t0, VPEBOOTCFG_SIZE
398 mul t0, t0, ta1
399 PTR_ADDU t0, t0, ta3
400
401 /* Set the TC restart PC */
402 lw t1, VPEBOOTCFG_PC(t0)
403 mttc0 t1, CP0_TCRESTART
404
405 /* Set the TC stack pointer */
406 lw t1, VPEBOOTCFG_SP(t0)
407 mttgpr t1, sp
408
409 /* Set the TC global pointer */
410 lw t1, VPEBOOTCFG_GP(t0)
411 mttgpr t1, gp
412
413 /* Copy config from this VPE */
414 mfc0 t0, CP0_CONFIG
415 mttc0 t0, CP0_CONFIG
416
417 /*
418 * Copy the EVA config from this VPE if the CPU supports it.
419 * CONFIG3 must exist to be running MT startup - just read it.
420 */
421 mfc0 t0, CP0_CONFIG, 3
422 and t0, t0, MIPS_CONF3_SC
423 beqz t0, 3f
424 nop
425 mfc0 t0, CP0_SEGCTL0
426 mttc0 t0, CP0_SEGCTL0
427 mfc0 t0, CP0_SEGCTL1
428 mttc0 t0, CP0_SEGCTL1
429 mfc0 t0, CP0_SEGCTL2
430 mttc0 t0, CP0_SEGCTL2
4313:
432 /* Ensure no software interrupts are pending */
433 mttc0 zero, CP0_CAUSE
434 mttc0 zero, CP0_STATUS
435
436 /* Set TC active, not interrupt exempt */
437 mftc0 t0, CP0_TCSTATUS
438 li t1, ~TCSTATUS_IXMT
439 and t0, t0, t1
440 ori t0, t0, TCSTATUS_A
441 mttc0 t0, CP0_TCSTATUS
442
443 /* Clear the TC halt bit */
444 mttc0 zero, CP0_TCHALT
445
446 /* Set VPE active */
447 mftc0 t0, CP0_VPECONF0
448 ori t0, t0, VPECONF0_VPA
449 mttc0 t0, CP0_VPECONF0
450
451 /* Next VPE */
4522: srl ta2, ta2, 1
453 addiu ta1, ta1, 1
454 bnez ta2, 1b
455 nop
456
457 /* Leave VPE configuration state */
458 mfc0 t1, CP0_MVPCONTROL
459 xori t1, t1, MVPCONTROL_VPC
460 mtc0 t1, CP0_MVPCONTROL
461 ehb
462 evpe
463
464 .set pop
465
466 /* Check whether this VPE is meant to be running */
467 li t0, 1
468 sll t0, t0, a1
469 and t0, t0, t8
470 bnez t0, 2f
471 nop
472
473 /* This VPE should be offline, halt the TC */
474 li t0, TCHALT_H
475 mtc0 t0, CP0_TCHALT
476 PTR_LA t0, 1f
4771: jr.hb t0
478 nop
479
4802:
481
482#endif /* CONFIG_MIPS_MT_SMP */
483
484 /* Return */
4855: jr ra
486 nop
487 END(mips_cps_boot_vpes)
488
489#if MIPS_ISA_REV > 0
490LEAF(mips_cps_cache_init)
491 /*
492 * Clear the bits used to index the caches. Note that the architecture
493 * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
494 * be valid for all MIPS32 CPUs, even those for which said writes are
495 * unnecessary.
496 */
497 mtc0 zero, CP0_TAGLO, 0
498 mtc0 zero, CP0_TAGHI, 0
499 mtc0 zero, CP0_TAGLO, 2
500 mtc0 zero, CP0_TAGHI, 2
501 ehb
502
503 /* Primary cache configuration is indicated by Config1 */
504 mfc0 v0, CP0_CONFIG, 1
505
506 /* Detect I-cache line size */
507 _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
508 beqz t0, icache_done
509 li t1, 2
510 sllv t0, t1, t0
511
512 /* Detect I-cache size */
513 _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
514 xori t2, t1, 0x7
515 beqz t2, 1f
516 li t3, 32
517 addiu t1, t1, 1
518 sllv t1, t3, t1
5191: /* At this point t1 == I-cache sets per way */
520 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
521 addiu t2, t2, 1
522 mul t1, t1, t0
523 mul t1, t1, t2
524
525 li a0, CKSEG0
526 PTR_ADD a1, a0, t1
5271: cache Index_Store_Tag_I, 0(a0)
528 PTR_ADD a0, a0, t0
529 bne a0, a1, 1b
530 nop
531icache_done:
532
533 /* Detect D-cache line size */
534 _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
535 beqz t0, dcache_done
536 li t1, 2
537 sllv t0, t1, t0
538
539 /* Detect D-cache size */
540 _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
541 xori t2, t1, 0x7
542 beqz t2, 1f
543 li t3, 32
544 addiu t1, t1, 1
545 sllv t1, t3, t1
5461: /* At this point t1 == D-cache sets per way */
547 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
548 addiu t2, t2, 1
549 mul t1, t1, t0
550 mul t1, t1, t2
551
552 li a0, CKSEG0
553 PTR_ADDU a1, a0, t1
554 PTR_SUBU a1, a1, t0
5551: cache Index_Store_Tag_D, 0(a0)
556 bne a0, a1, 1b
557 PTR_ADD a0, a0, t0
558dcache_done:
559
560 jr ra
561 nop
562 END(mips_cps_cache_init)
563#endif /* MIPS_ISA_REV > 0 */
564
565#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
566
567 /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
568 .macro psstate dest
569 .set push
570 .set noat
571 lw $1, TI_CPU(gp)
572 sll $1, $1, LONGLOG
573 PTR_LA \dest, __per_cpu_offset
574 PTR_ADDU $1, $1, \dest
575 lw $1, 0($1)
576 PTR_LA \dest, cps_cpu_state
577 PTR_ADDU \dest, \dest, $1
578 .set pop
579 .endm
580
581LEAF(mips_cps_pm_save)
582 /* Save CPU state */
583 SUSPEND_SAVE_REGS
584 psstate t1
585 SUSPEND_SAVE_STATIC
586 jr v0
587 nop
588 END(mips_cps_pm_save)
589
590LEAF(mips_cps_pm_restore)
591 /* Restore CPU state */
592 psstate t1
593 RESUME_RESTORE_STATIC
594 RESUME_RESTORE_REGS_RETURN
595 END(mips_cps_pm_restore)
596
597#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */