Loading...
1/*
2 * Copyright (C) 2013 Imagination Technologies
3 * Author: Paul Burton <paul.burton@mips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/cpu.h>
12#include <linux/delay.h>
13#include <linux/io.h>
14#include <linux/sched/task_stack.h>
15#include <linux/sched/hotplug.h>
16#include <linux/slab.h>
17#include <linux/smp.h>
18#include <linux/types.h>
19
20#include <asm/bcache.h>
21#include <asm/mips-cps.h>
22#include <asm/mips_mt.h>
23#include <asm/mipsregs.h>
24#include <asm/pm-cps.h>
25#include <asm/r4kcache.h>
26#include <asm/smp-cps.h>
27#include <asm/time.h>
28#include <asm/uasm.h>
29
30static bool threads_disabled;
31static DECLARE_BITMAP(core_power, NR_CPUS);
32
33struct core_boot_config *mips_cps_core_bootcfg;
34
35static int __init setup_nothreads(char *s)
36{
37 threads_disabled = true;
38 return 0;
39}
40early_param("nothreads", setup_nothreads);
41
42static unsigned core_vpe_count(unsigned int cluster, unsigned core)
43{
44 if (threads_disabled)
45 return 1;
46
47 return mips_cps_numvps(cluster, core);
48}
49
50static void __init cps_smp_setup(void)
51{
52 unsigned int nclusters, ncores, nvpes, core_vpes;
53 unsigned long core_entry;
54 int cl, c, v;
55
56 /* Detect & record VPE topology */
57 nvpes = 0;
58 nclusters = mips_cps_numclusters();
59 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
60 for (cl = 0; cl < nclusters; cl++) {
61 if (cl > 0)
62 pr_cont(",");
63 pr_cont("{");
64
65 ncores = mips_cps_numcores(cl);
66 for (c = 0; c < ncores; c++) {
67 core_vpes = core_vpe_count(cl, c);
68
69 if (c > 0)
70 pr_cont(",");
71 pr_cont("%u", core_vpes);
72
73 /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
74 if (!cl && !c)
75 smp_num_siblings = core_vpes;
76
77 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
78 cpu_set_cluster(&cpu_data[nvpes + v], cl);
79 cpu_set_core(&cpu_data[nvpes + v], c);
80 cpu_set_vpe_id(&cpu_data[nvpes + v], v);
81 }
82
83 nvpes += core_vpes;
84 }
85
86 pr_cont("}");
87 }
88 pr_cont(" total %u\n", nvpes);
89
90 /* Indicate present CPUs (CPU being synonymous with VPE) */
91 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
92 set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
93 set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
94 __cpu_number_map[v] = v;
95 __cpu_logical_map[v] = v;
96 }
97
98 /* Set a coherent default CCA (CWB) */
99 change_c0_config(CONF_CM_CMASK, 0x5);
100
101 /* Core 0 is powered up (we're running on it) */
102 bitmap_set(core_power, 0, 1);
103
104 /* Initialise core 0 */
105 mips_cps_core_init();
106
107 /* Make core 0 coherent with everything */
108 write_gcr_cl_coherence(0xff);
109
110 if (mips_cm_revision() >= CM_REV_CM3) {
111 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
112 write_gcr_bev_base(core_entry);
113 }
114
115#ifdef CONFIG_MIPS_MT_FPAFF
116 /* If we have an FPU, enroll ourselves in the FPU-full mask */
117 if (cpu_has_fpu)
118 cpumask_set_cpu(0, &mt_fpu_cpumask);
119#endif /* CONFIG_MIPS_MT_FPAFF */
120}
121
122static void __init cps_prepare_cpus(unsigned int max_cpus)
123{
124 unsigned ncores, core_vpes, c, cca;
125 bool cca_unsuitable, cores_limited;
126 u32 *entry_code;
127
128 mips_mt_set_cpuoptions();
129
130 /* Detect whether the CCA is unsuited to multi-core SMP */
131 cca = read_c0_config() & CONF_CM_CMASK;
132 switch (cca) {
133 case 0x4: /* CWBE */
134 case 0x5: /* CWB */
135 /* The CCA is coherent, multi-core is fine */
136 cca_unsuitable = false;
137 break;
138
139 default:
140 /* CCA is not coherent, multi-core is not usable */
141 cca_unsuitable = true;
142 }
143
144 /* Warn the user if the CCA prevents multi-core */
145 cores_limited = false;
146 if (cca_unsuitable || cpu_has_dc_aliases) {
147 for_each_present_cpu(c) {
148 if (cpus_are_siblings(smp_processor_id(), c))
149 continue;
150
151 set_cpu_present(c, false);
152 cores_limited = true;
153 }
154 }
155 if (cores_limited)
156 pr_warn("Using only one core due to %s%s%s\n",
157 cca_unsuitable ? "unsuitable CCA" : "",
158 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
159 cpu_has_dc_aliases ? "dcache aliasing" : "");
160
161 /*
162 * Patch the start of mips_cps_core_entry to provide:
163 *
164 * s0 = kseg0 CCA
165 */
166 entry_code = (u32 *)&mips_cps_core_entry;
167 uasm_i_addiu(&entry_code, 16, 0, cca);
168 blast_dcache_range((unsigned long)&mips_cps_core_entry,
169 (unsigned long)entry_code);
170 bc_wback_inv((unsigned long)&mips_cps_core_entry,
171 (void *)entry_code - (void *)&mips_cps_core_entry);
172 __sync();
173
174 /* Allocate core boot configuration structs */
175 ncores = mips_cps_numcores(0);
176 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
177 GFP_KERNEL);
178 if (!mips_cps_core_bootcfg) {
179 pr_err("Failed to allocate boot config for %u cores\n", ncores);
180 goto err_out;
181 }
182
183 /* Allocate VPE boot configuration structs */
184 for (c = 0; c < ncores; c++) {
185 core_vpes = core_vpe_count(0, c);
186 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
187 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
188 GFP_KERNEL);
189 if (!mips_cps_core_bootcfg[c].vpe_config) {
190 pr_err("Failed to allocate %u VPE boot configs\n",
191 core_vpes);
192 goto err_out;
193 }
194 }
195
196 /* Mark this CPU as booted */
197 atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
198 1 << cpu_vpe_id(¤t_cpu_data));
199
200 return;
201err_out:
202 /* Clean up allocations */
203 if (mips_cps_core_bootcfg) {
204 for (c = 0; c < ncores; c++)
205 kfree(mips_cps_core_bootcfg[c].vpe_config);
206 kfree(mips_cps_core_bootcfg);
207 mips_cps_core_bootcfg = NULL;
208 }
209
210 /* Effectively disable SMP by declaring CPUs not present */
211 for_each_possible_cpu(c) {
212 if (c == 0)
213 continue;
214 set_cpu_present(c, false);
215 }
216}
217
218static void boot_core(unsigned int core, unsigned int vpe_id)
219{
220 u32 stat, seq_state;
221 unsigned timeout;
222
223 /* Select the appropriate core */
224 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
225
226 /* Set its reset vector */
227 write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
228
229 /* Ensure its coherency is disabled */
230 write_gcr_co_coherence(0);
231
232 /* Start it with the legacy memory map and exception base */
233 write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
234
235 /* Ensure the core can access the GCRs */
236 set_gcr_access(1 << core);
237
238 if (mips_cpc_present()) {
239 /* Reset the core */
240 mips_cpc_lock_other(core);
241
242 if (mips_cm_revision() >= CM_REV_CM3) {
243 /* Run only the requested VP following the reset */
244 write_cpc_co_vp_stop(0xf);
245 write_cpc_co_vp_run(1 << vpe_id);
246
247 /*
248 * Ensure that the VP_RUN register is written before the
249 * core leaves reset.
250 */
251 wmb();
252 }
253
254 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
255
256 timeout = 100;
257 while (true) {
258 stat = read_cpc_co_stat_conf();
259 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
260 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
261
262 /* U6 == coherent execution, ie. the core is up */
263 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
264 break;
265
266 /* Delay a little while before we start warning */
267 if (timeout) {
268 timeout--;
269 mdelay(10);
270 continue;
271 }
272
273 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
274 core, stat);
275 mdelay(1000);
276 }
277
278 mips_cpc_unlock_other();
279 } else {
280 /* Take the core out of reset */
281 write_gcr_co_reset_release(0);
282 }
283
284 mips_cm_unlock_other();
285
286 /* The core is now powered up */
287 bitmap_set(core_power, core, 1);
288}
289
290static void remote_vpe_boot(void *dummy)
291{
292 unsigned core = cpu_core(¤t_cpu_data);
293 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
294
295 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
296}
297
298static int cps_boot_secondary(int cpu, struct task_struct *idle)
299{
300 unsigned core = cpu_core(&cpu_data[cpu]);
301 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
302 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
303 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
304 unsigned long core_entry;
305 unsigned int remote;
306 int err;
307
308 /* We don't yet support booting CPUs in other clusters */
309 if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
310 return -ENOSYS;
311
312 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
313 vpe_cfg->sp = __KSTK_TOS(idle);
314 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
315
316 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
317
318 preempt_disable();
319
320 if (!test_bit(core, core_power)) {
321 /* Boot a VPE on a powered down core */
322 boot_core(core, vpe_id);
323 goto out;
324 }
325
326 if (cpu_has_vp) {
327 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
328 core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
329 write_gcr_co_reset_base(core_entry);
330 mips_cm_unlock_other();
331 }
332
333 if (!cpus_are_siblings(cpu, smp_processor_id())) {
334 /* Boot a VPE on another powered up core */
335 for (remote = 0; remote < NR_CPUS; remote++) {
336 if (!cpus_are_siblings(cpu, remote))
337 continue;
338 if (cpu_online(remote))
339 break;
340 }
341 if (remote >= NR_CPUS) {
342 pr_crit("No online CPU in core %u to start CPU%d\n",
343 core, cpu);
344 goto out;
345 }
346
347 err = smp_call_function_single(remote, remote_vpe_boot,
348 NULL, 1);
349 if (err)
350 panic("Failed to call remote CPU\n");
351 goto out;
352 }
353
354 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
355
356 /* Boot a VPE on this core */
357 mips_cps_boot_vpes(core_cfg, vpe_id);
358out:
359 preempt_enable();
360 return 0;
361}
362
363static void cps_init_secondary(void)
364{
365 /* Disable MT - we only want to run 1 TC per VPE */
366 if (cpu_has_mipsmt)
367 dmt();
368
369 if (mips_cm_revision() >= CM_REV_CM3) {
370 unsigned int ident = read_gic_vl_ident();
371
372 /*
373 * Ensure that our calculation of the VP ID matches up with
374 * what the GIC reports, otherwise we'll have configured
375 * interrupts incorrectly.
376 */
377 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
378 }
379
380 if (cpu_has_veic)
381 clear_c0_status(ST0_IM);
382 else
383 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
384 STATUSF_IP4 | STATUSF_IP5 |
385 STATUSF_IP6 | STATUSF_IP7);
386}
387
388static void cps_smp_finish(void)
389{
390 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
391
392#ifdef CONFIG_MIPS_MT_FPAFF
393 /* If we have an FPU, enroll ourselves in the FPU-full mask */
394 if (cpu_has_fpu)
395 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
396#endif /* CONFIG_MIPS_MT_FPAFF */
397
398 local_irq_enable();
399}
400
401#ifdef CONFIG_HOTPLUG_CPU
402
403static int cps_cpu_disable(void)
404{
405 unsigned cpu = smp_processor_id();
406 struct core_boot_config *core_cfg;
407
408 if (!cpu)
409 return -EBUSY;
410
411 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
412 return -EINVAL;
413
414 core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
415 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
416 smp_mb__after_atomic();
417 set_cpu_online(cpu, false);
418 calculate_cpu_foreign_map();
419
420 return 0;
421}
422
423static unsigned cpu_death_sibling;
424static enum {
425 CPU_DEATH_HALT,
426 CPU_DEATH_POWER,
427} cpu_death;
428
429void play_dead(void)
430{
431 unsigned int cpu, core, vpe_id;
432
433 local_irq_disable();
434 idle_task_exit();
435 cpu = smp_processor_id();
436 core = cpu_core(&cpu_data[cpu]);
437 cpu_death = CPU_DEATH_POWER;
438
439 pr_debug("CPU%d going offline\n", cpu);
440
441 if (cpu_has_mipsmt || cpu_has_vp) {
442 /* Look for another online VPE within the core */
443 for_each_online_cpu(cpu_death_sibling) {
444 if (!cpus_are_siblings(cpu, cpu_death_sibling))
445 continue;
446
447 /*
448 * There is an online VPE within the core. Just halt
449 * this TC and leave the core alone.
450 */
451 cpu_death = CPU_DEATH_HALT;
452 break;
453 }
454 }
455
456 /* This CPU has chosen its way out */
457 (void)cpu_report_death();
458
459 if (cpu_death == CPU_DEATH_HALT) {
460 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
461
462 pr_debug("Halting core %d VP%d\n", core, vpe_id);
463 if (cpu_has_mipsmt) {
464 /* Halt this TC */
465 write_c0_tchalt(TCHALT_H);
466 instruction_hazard();
467 } else if (cpu_has_vp) {
468 write_cpc_cl_vp_stop(1 << vpe_id);
469
470 /* Ensure that the VP_STOP register is written */
471 wmb();
472 }
473 } else {
474 pr_debug("Gating power to core %d\n", core);
475 /* Power down the core */
476 cps_pm_enter_state(CPS_PM_POWER_GATED);
477 }
478
479 /* This should never be reached */
480 panic("Failed to offline CPU %u", cpu);
481}
482
483static void wait_for_sibling_halt(void *ptr_cpu)
484{
485 unsigned cpu = (unsigned long)ptr_cpu;
486 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
487 unsigned halted;
488 unsigned long flags;
489
490 do {
491 local_irq_save(flags);
492 settc(vpe_id);
493 halted = read_tc_c0_tchalt();
494 local_irq_restore(flags);
495 } while (!(halted & TCHALT_H));
496}
497
498static void cps_cpu_die(unsigned int cpu)
499{
500 unsigned core = cpu_core(&cpu_data[cpu]);
501 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
502 ktime_t fail_time;
503 unsigned stat;
504 int err;
505
506 /* Wait for the cpu to choose its way out */
507 if (!cpu_wait_death(cpu, 5)) {
508 pr_err("CPU%u: didn't offline\n", cpu);
509 return;
510 }
511
512 /*
513 * Now wait for the CPU to actually offline. Without doing this that
514 * offlining may race with one or more of:
515 *
516 * - Onlining the CPU again.
517 * - Powering down the core if another VPE within it is offlined.
518 * - A sibling VPE entering a non-coherent state.
519 *
520 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
521 * with which we could race, so do nothing.
522 */
523 if (cpu_death == CPU_DEATH_POWER) {
524 /*
525 * Wait for the core to enter a powered down or clock gated
526 * state, the latter happening when a JTAG probe is connected
527 * in which case the CPC will refuse to power down the core.
528 */
529 fail_time = ktime_add_ms(ktime_get(), 2000);
530 do {
531 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
532 mips_cpc_lock_other(core);
533 stat = read_cpc_co_stat_conf();
534 stat &= CPC_Cx_STAT_CONF_SEQSTATE;
535 stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
536 mips_cpc_unlock_other();
537 mips_cm_unlock_other();
538
539 if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
540 stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
541 stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
542 break;
543
544 /*
545 * The core ought to have powered down, but didn't &
546 * now we don't really know what state it's in. It's
547 * likely that its _pwr_up pin has been wired to logic
548 * 1 & it powered back up as soon as we powered it
549 * down...
550 *
551 * The best we can do is warn the user & continue in
552 * the hope that the core is doing nothing harmful &
553 * might behave properly if we online it later.
554 */
555 if (WARN(ktime_after(ktime_get(), fail_time),
556 "CPU%u hasn't powered down, seq. state %u\n",
557 cpu, stat))
558 break;
559 } while (1);
560
561 /* Indicate the core is powered off */
562 bitmap_clear(core_power, core, 1);
563 } else if (cpu_has_mipsmt) {
564 /*
565 * Have a CPU with access to the offlined CPUs registers wait
566 * for its TC to halt.
567 */
568 err = smp_call_function_single(cpu_death_sibling,
569 wait_for_sibling_halt,
570 (void *)(unsigned long)cpu, 1);
571 if (err)
572 panic("Failed to call remote sibling CPU\n");
573 } else if (cpu_has_vp) {
574 do {
575 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
576 stat = read_cpc_co_vp_running();
577 mips_cm_unlock_other();
578 } while (stat & (1 << vpe_id));
579 }
580}
581
582#endif /* CONFIG_HOTPLUG_CPU */
583
584static const struct plat_smp_ops cps_smp_ops = {
585 .smp_setup = cps_smp_setup,
586 .prepare_cpus = cps_prepare_cpus,
587 .boot_secondary = cps_boot_secondary,
588 .init_secondary = cps_init_secondary,
589 .smp_finish = cps_smp_finish,
590 .send_ipi_single = mips_smp_send_ipi_single,
591 .send_ipi_mask = mips_smp_send_ipi_mask,
592#ifdef CONFIG_HOTPLUG_CPU
593 .cpu_disable = cps_cpu_disable,
594 .cpu_die = cps_cpu_die,
595#endif
596};
597
598bool mips_cps_smp_in_use(void)
599{
600 extern const struct plat_smp_ops *mp_ops;
601 return mp_ops == &cps_smp_ops;
602}
603
604int register_cps_smp_ops(void)
605{
606 if (!mips_cm_present()) {
607 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
608 return -ENODEV;
609 }
610
611 /* check we have a GIC - we need one for IPIs */
612 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
613 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
614 return -ENODEV;
615 }
616
617 register_smp_ops(&cps_smp_ops);
618 return 0;
619}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2013 Imagination Technologies
4 * Author: Paul Burton <paul.burton@mips.com>
5 */
6
7#include <linux/cpu.h>
8#include <linux/delay.h>
9#include <linux/io.h>
10#include <linux/memblock.h>
11#include <linux/sched/task_stack.h>
12#include <linux/sched/hotplug.h>
13#include <linux/slab.h>
14#include <linux/smp.h>
15#include <linux/types.h>
16#include <linux/irq.h>
17
18#include <asm/bcache.h>
19#include <asm/mips-cps.h>
20#include <asm/mips_mt.h>
21#include <asm/mipsregs.h>
22#include <asm/pm-cps.h>
23#include <asm/r4kcache.h>
24#include <asm/regdef.h>
25#include <asm/smp.h>
26#include <asm/smp-cps.h>
27#include <asm/time.h>
28#include <asm/uasm.h>
29
30#define BEV_VEC_SIZE 0x500
31#define BEV_VEC_ALIGN 0x1000
32
33enum label_id {
34 label_not_nmi = 1,
35};
36
37UASM_L_LA(_not_nmi)
38
39static DECLARE_BITMAP(core_power, NR_CPUS);
40static u64 core_entry_reg;
41static phys_addr_t cps_vec_pa;
42
43struct core_boot_config *mips_cps_core_bootcfg;
44
45static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
46{
47 return min(smp_max_threads, mips_cps_numvps(cluster, core));
48}
49
50static void __init *mips_cps_build_core_entry(void *addr)
51{
52 extern void (*nmi_handler)(void);
53 u32 *p = addr;
54 u32 val;
55 struct uasm_label labels[2];
56 struct uasm_reloc relocs[2];
57 struct uasm_label *l = labels;
58 struct uasm_reloc *r = relocs;
59
60 memset(labels, 0, sizeof(labels));
61 memset(relocs, 0, sizeof(relocs));
62
63 uasm_i_mfc0(&p, GPR_K0, C0_STATUS);
64 UASM_i_LA(&p, GPR_T9, ST0_NMI);
65 uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9);
66
67 uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi);
68 uasm_i_nop(&p);
69 UASM_i_LA(&p, GPR_K0, (long)&nmi_handler);
70
71 uasm_l_not_nmi(&l, p);
72
73 val = CAUSEF_IV;
74 uasm_i_lui(&p, GPR_K0, val >> 16);
75 uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
76 uasm_i_mtc0(&p, GPR_K0, C0_CAUSE);
77 val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64;
78 uasm_i_lui(&p, GPR_K0, val >> 16);
79 uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff);
80 uasm_i_mtc0(&p, GPR_K0, C0_STATUS);
81 uasm_i_ehb(&p);
82 uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK);
83 UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base);
84#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT)
85 UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot)));
86#else
87 UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot)));
88#endif
89 uasm_i_jr(&p, GPR_T9);
90 uasm_i_nop(&p);
91
92 uasm_resolve_relocs(relocs, labels);
93
94 return p;
95}
96
97static bool __init check_64bit_reset(void)
98{
99 bool cx_64bit_reset = false;
100
101 mips_cm_lock_other(0, 0, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
102 write_gcr_co_reset64_base(CM_GCR_Cx_RESET64_BASE_BEVEXCBASE);
103 if ((read_gcr_co_reset64_base() & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) ==
104 CM_GCR_Cx_RESET64_BASE_BEVEXCBASE)
105 cx_64bit_reset = true;
106 mips_cm_unlock_other();
107
108 return cx_64bit_reset;
109}
110
111static int __init allocate_cps_vecs(void)
112{
113 /* Try to allocate in KSEG1 first */
114 cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN,
115 0x0, CSEGX_SIZE - 1);
116
117 if (cps_vec_pa)
118 core_entry_reg = CKSEG1ADDR(cps_vec_pa) &
119 CM_GCR_Cx_RESET_BASE_BEVEXCBASE;
120
121 if (!cps_vec_pa && mips_cm_is64) {
122 phys_addr_t end;
123
124 if (check_64bit_reset()) {
125 pr_info("VP Local Reset Exception Base support 47 bits address\n");
126 end = MEMBLOCK_ALLOC_ANYWHERE;
127 } else {
128 end = SZ_4G - 1;
129 }
130 cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 0, end);
131 if (cps_vec_pa) {
132 if (check_64bit_reset())
133 core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET64_BASE_BEVEXCBASE) |
134 CM_GCR_Cx_RESET_BASE_MODE;
135 else
136 core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) |
137 CM_GCR_Cx_RESET_BASE_MODE;
138 }
139 }
140
141 if (!cps_vec_pa)
142 return -ENOMEM;
143
144 return 0;
145}
146
147static void __init setup_cps_vecs(void)
148{
149 void *cps_vec;
150
151 cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa);
152 mips_cps_build_core_entry(cps_vec);
153
154 memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80);
155 memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80);
156 memcpy(cps_vec + 0x300, &excep_cache, 0x80);
157 memcpy(cps_vec + 0x380, &excep_genex, 0x80);
158 memcpy(cps_vec + 0x400, &excep_intex, 0x80);
159 memcpy(cps_vec + 0x480, &excep_ejtag, 0x80);
160
161 /* Make sure no prefetched data in cache */
162 blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE);
163 bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE);
164 __sync();
165}
166
167static void __init cps_smp_setup(void)
168{
169 unsigned int nclusters, ncores, nvpes, core_vpes;
170 int cl, c, v;
171
172 /* Detect & record VPE topology */
173 nvpes = 0;
174 nclusters = mips_cps_numclusters();
175 pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
176 for (cl = 0; cl < nclusters; cl++) {
177 if (cl > 0)
178 pr_cont(",");
179 pr_cont("{");
180
181 ncores = mips_cps_numcores(cl);
182 for (c = 0; c < ncores; c++) {
183 core_vpes = core_vpe_count(cl, c);
184
185 if (c > 0)
186 pr_cont(",");
187 pr_cont("%u", core_vpes);
188
189 /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
190 if (!cl && !c)
191 smp_num_siblings = core_vpes;
192
193 for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
194 cpu_set_cluster(&cpu_data[nvpes + v], cl);
195 cpu_set_core(&cpu_data[nvpes + v], c);
196 cpu_set_vpe_id(&cpu_data[nvpes + v], v);
197 }
198
199 nvpes += core_vpes;
200 }
201
202 pr_cont("}");
203 }
204 pr_cont(" total %u\n", nvpes);
205
206 /* Indicate present CPUs (CPU being synonymous with VPE) */
207 for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
208 set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
209 set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
210 __cpu_number_map[v] = v;
211 __cpu_logical_map[v] = v;
212 }
213
214 /* Set a coherent default CCA (CWB) */
215 change_c0_config(CONF_CM_CMASK, 0x5);
216
217 /* Core 0 is powered up (we're running on it) */
218 bitmap_set(core_power, 0, 1);
219
220 /* Initialise core 0 */
221 mips_cps_core_init();
222
223 /* Make core 0 coherent with everything */
224 write_gcr_cl_coherence(0xff);
225
226 if (allocate_cps_vecs())
227 pr_err("Failed to allocate CPS vectors\n");
228
229 if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3)
230 write_gcr_bev_base(core_entry_reg);
231
232#ifdef CONFIG_MIPS_MT_FPAFF
233 /* If we have an FPU, enroll ourselves in the FPU-full mask */
234 if (cpu_has_fpu)
235 cpumask_set_cpu(0, &mt_fpu_cpumask);
236#endif /* CONFIG_MIPS_MT_FPAFF */
237}
238
239static void __init cps_prepare_cpus(unsigned int max_cpus)
240{
241 unsigned ncores, core_vpes, c, cca;
242 bool cca_unsuitable, cores_limited;
243
244 mips_mt_set_cpuoptions();
245
246 if (!core_entry_reg) {
247 pr_err("core_entry address unsuitable, disabling smp-cps\n");
248 goto err_out;
249 }
250
251 /* Detect whether the CCA is unsuited to multi-core SMP */
252 cca = read_c0_config() & CONF_CM_CMASK;
253 switch (cca) {
254 case 0x4: /* CWBE */
255 case 0x5: /* CWB */
256 /* The CCA is coherent, multi-core is fine */
257 cca_unsuitable = false;
258 break;
259
260 default:
261 /* CCA is not coherent, multi-core is not usable */
262 cca_unsuitable = true;
263 }
264
265 /* Warn the user if the CCA prevents multi-core */
266 cores_limited = false;
267 if (cca_unsuitable || cpu_has_dc_aliases) {
268 for_each_present_cpu(c) {
269 if (cpus_are_siblings(smp_processor_id(), c))
270 continue;
271
272 set_cpu_present(c, false);
273 cores_limited = true;
274 }
275 }
276 if (cores_limited)
277 pr_warn("Using only one core due to %s%s%s\n",
278 cca_unsuitable ? "unsuitable CCA" : "",
279 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
280 cpu_has_dc_aliases ? "dcache aliasing" : "");
281
282 setup_cps_vecs();
283
284 /* Allocate core boot configuration structs */
285 ncores = mips_cps_numcores(0);
286 mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
287 GFP_KERNEL);
288 if (!mips_cps_core_bootcfg) {
289 pr_err("Failed to allocate boot config for %u cores\n", ncores);
290 goto err_out;
291 }
292
293 /* Allocate VPE boot configuration structs */
294 for (c = 0; c < ncores; c++) {
295 core_vpes = core_vpe_count(0, c);
296 mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
297 sizeof(*mips_cps_core_bootcfg[c].vpe_config),
298 GFP_KERNEL);
299 if (!mips_cps_core_bootcfg[c].vpe_config) {
300 pr_err("Failed to allocate %u VPE boot configs\n",
301 core_vpes);
302 goto err_out;
303 }
304 }
305
306 /* Mark this CPU as booted */
307 atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask,
308 1 << cpu_vpe_id(¤t_cpu_data));
309
310 return;
311err_out:
312 /* Clean up allocations */
313 if (mips_cps_core_bootcfg) {
314 for (c = 0; c < ncores; c++)
315 kfree(mips_cps_core_bootcfg[c].vpe_config);
316 kfree(mips_cps_core_bootcfg);
317 mips_cps_core_bootcfg = NULL;
318 }
319
320 /* Effectively disable SMP by declaring CPUs not present */
321 for_each_possible_cpu(c) {
322 if (c == 0)
323 continue;
324 set_cpu_present(c, false);
325 }
326}
327
328static void boot_core(unsigned int core, unsigned int vpe_id)
329{
330 u32 stat, seq_state;
331 unsigned timeout;
332
333 /* Select the appropriate core */
334 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
335
336 /* Set its reset vector */
337 if (mips_cm_is64)
338 write_gcr_co_reset64_base(core_entry_reg);
339 else
340 write_gcr_co_reset_base(core_entry_reg);
341
342 /* Ensure its coherency is disabled */
343 write_gcr_co_coherence(0);
344
345 /* Start it with the legacy memory map and exception base */
346 write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
347
348 /* Ensure the core can access the GCRs */
349 if (mips_cm_revision() < CM_REV_CM3)
350 set_gcr_access(1 << core);
351 else
352 set_gcr_access_cm3(1 << core);
353
354 if (mips_cpc_present()) {
355 /* Reset the core */
356 mips_cpc_lock_other(core);
357
358 if (mips_cm_revision() >= CM_REV_CM3) {
359 /* Run only the requested VP following the reset */
360 write_cpc_co_vp_stop(0xf);
361 write_cpc_co_vp_run(1 << vpe_id);
362
363 /*
364 * Ensure that the VP_RUN register is written before the
365 * core leaves reset.
366 */
367 wmb();
368 }
369
370 write_cpc_co_cmd(CPC_Cx_CMD_RESET);
371
372 timeout = 100;
373 while (true) {
374 stat = read_cpc_co_stat_conf();
375 seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
376 seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
377
378 /* U6 == coherent execution, ie. the core is up */
379 if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
380 break;
381
382 /* Delay a little while before we start warning */
383 if (timeout) {
384 timeout--;
385 mdelay(10);
386 continue;
387 }
388
389 pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
390 core, stat);
391 mdelay(1000);
392 }
393
394 mips_cpc_unlock_other();
395 } else {
396 /* Take the core out of reset */
397 write_gcr_co_reset_release(0);
398 }
399
400 mips_cm_unlock_other();
401
402 /* The core is now powered up */
403 bitmap_set(core_power, core, 1);
404}
405
406static void remote_vpe_boot(void *dummy)
407{
408 unsigned core = cpu_core(¤t_cpu_data);
409 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
410
411 mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data));
412}
413
414static int cps_boot_secondary(int cpu, struct task_struct *idle)
415{
416 unsigned core = cpu_core(&cpu_data[cpu]);
417 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
418 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
419 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
420 unsigned int remote;
421 int err;
422
423 /* We don't yet support booting CPUs in other clusters */
424 if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
425 return -ENOSYS;
426
427 vpe_cfg->pc = (unsigned long)&smp_bootstrap;
428 vpe_cfg->sp = __KSTK_TOS(idle);
429 vpe_cfg->gp = (unsigned long)task_thread_info(idle);
430
431 atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
432
433 preempt_disable();
434
435 if (!test_bit(core, core_power)) {
436 /* Boot a VPE on a powered down core */
437 boot_core(core, vpe_id);
438 goto out;
439 }
440
441 if (cpu_has_vp) {
442 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
443 if (mips_cm_is64)
444 write_gcr_co_reset64_base(core_entry_reg);
445 else
446 write_gcr_co_reset_base(core_entry_reg);
447 mips_cm_unlock_other();
448 }
449
450 if (!cpus_are_siblings(cpu, smp_processor_id())) {
451 /* Boot a VPE on another powered up core */
452 for (remote = 0; remote < NR_CPUS; remote++) {
453 if (!cpus_are_siblings(cpu, remote))
454 continue;
455 if (cpu_online(remote))
456 break;
457 }
458 if (remote >= NR_CPUS) {
459 pr_crit("No online CPU in core %u to start CPU%d\n",
460 core, cpu);
461 goto out;
462 }
463
464 err = smp_call_function_single(remote, remote_vpe_boot,
465 NULL, 1);
466 if (err)
467 panic("Failed to call remote CPU\n");
468 goto out;
469 }
470
471 BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
472
473 /* Boot a VPE on this core */
474 mips_cps_boot_vpes(core_cfg, vpe_id);
475out:
476 preempt_enable();
477 return 0;
478}
479
480static void cps_init_secondary(void)
481{
482 int core = cpu_core(¤t_cpu_data);
483
484 /* Disable MT - we only want to run 1 TC per VPE */
485 if (cpu_has_mipsmt)
486 dmt();
487
488 if (mips_cm_revision() >= CM_REV_CM3) {
489 unsigned int ident = read_gic_vl_ident();
490
491 /*
492 * Ensure that our calculation of the VP ID matches up with
493 * what the GIC reports, otherwise we'll have configured
494 * interrupts incorrectly.
495 */
496 BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
497 }
498
499 if (core > 0 && !read_gcr_cl_coherence())
500 pr_warn("Core %u is not in coherent domain\n", core);
501
502 if (cpu_has_veic)
503 clear_c0_status(ST0_IM);
504 else
505 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
506 STATUSF_IP4 | STATUSF_IP5 |
507 STATUSF_IP6 | STATUSF_IP7);
508}
509
510static void cps_smp_finish(void)
511{
512 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
513
514#ifdef CONFIG_MIPS_MT_FPAFF
515 /* If we have an FPU, enroll ourselves in the FPU-full mask */
516 if (cpu_has_fpu)
517 cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
518#endif /* CONFIG_MIPS_MT_FPAFF */
519
520 local_irq_enable();
521}
522
523#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
524
525enum cpu_death {
526 CPU_DEATH_HALT,
527 CPU_DEATH_POWER,
528};
529
530static void cps_shutdown_this_cpu(enum cpu_death death)
531{
532 unsigned int cpu, core, vpe_id;
533
534 cpu = smp_processor_id();
535 core = cpu_core(&cpu_data[cpu]);
536
537 if (death == CPU_DEATH_HALT) {
538 vpe_id = cpu_vpe_id(&cpu_data[cpu]);
539
540 pr_debug("Halting core %d VP%d\n", core, vpe_id);
541 if (cpu_has_mipsmt) {
542 /* Halt this TC */
543 write_c0_tchalt(TCHALT_H);
544 instruction_hazard();
545 } else if (cpu_has_vp) {
546 write_cpc_cl_vp_stop(1 << vpe_id);
547
548 /* Ensure that the VP_STOP register is written */
549 wmb();
550 }
551 } else {
552 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
553 pr_debug("Gating power to core %d\n", core);
554 /* Power down the core */
555 cps_pm_enter_state(CPS_PM_POWER_GATED);
556 }
557 }
558}
559
560#ifdef CONFIG_KEXEC_CORE
561
562static void cps_kexec_nonboot_cpu(void)
563{
564 if (cpu_has_mipsmt || cpu_has_vp)
565 cps_shutdown_this_cpu(CPU_DEATH_HALT);
566 else
567 cps_shutdown_this_cpu(CPU_DEATH_POWER);
568}
569
570#endif /* CONFIG_KEXEC_CORE */
571
572#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
573
574#ifdef CONFIG_HOTPLUG_CPU
575
576static int cps_cpu_disable(void)
577{
578 unsigned cpu = smp_processor_id();
579 struct core_boot_config *core_cfg;
580
581 if (!cps_pm_support_state(CPS_PM_POWER_GATED))
582 return -EINVAL;
583
584 core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)];
585 atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask);
586 smp_mb__after_atomic();
587 set_cpu_online(cpu, false);
588 calculate_cpu_foreign_map();
589 irq_migrate_all_off_this_cpu();
590
591 return 0;
592}
593
594static unsigned cpu_death_sibling;
595static enum cpu_death cpu_death;
596
597void play_dead(void)
598{
599 unsigned int cpu;
600
601 local_irq_disable();
602 idle_task_exit();
603 cpu = smp_processor_id();
604 cpu_death = CPU_DEATH_POWER;
605
606 pr_debug("CPU%d going offline\n", cpu);
607
608 if (cpu_has_mipsmt || cpu_has_vp) {
609 /* Look for another online VPE within the core */
610 for_each_online_cpu(cpu_death_sibling) {
611 if (!cpus_are_siblings(cpu, cpu_death_sibling))
612 continue;
613
614 /*
615 * There is an online VPE within the core. Just halt
616 * this TC and leave the core alone.
617 */
618 cpu_death = CPU_DEATH_HALT;
619 break;
620 }
621 }
622
623 cpuhp_ap_report_dead();
624
625 cps_shutdown_this_cpu(cpu_death);
626
627 /* This should never be reached */
628 panic("Failed to offline CPU %u", cpu);
629}
630
631static void wait_for_sibling_halt(void *ptr_cpu)
632{
633 unsigned cpu = (unsigned long)ptr_cpu;
634 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
635 unsigned halted;
636 unsigned long flags;
637
638 do {
639 local_irq_save(flags);
640 settc(vpe_id);
641 halted = read_tc_c0_tchalt();
642 local_irq_restore(flags);
643 } while (!(halted & TCHALT_H));
644}
645
646static void cps_cpu_die(unsigned int cpu) { }
647
648static void cps_cleanup_dead_cpu(unsigned cpu)
649{
650 unsigned core = cpu_core(&cpu_data[cpu]);
651 unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
652 ktime_t fail_time;
653 unsigned stat;
654 int err;
655
656 /*
657 * Now wait for the CPU to actually offline. Without doing this that
658 * offlining may race with one or more of:
659 *
660 * - Onlining the CPU again.
661 * - Powering down the core if another VPE within it is offlined.
662 * - A sibling VPE entering a non-coherent state.
663 *
664 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
665 * with which we could race, so do nothing.
666 */
667 if (cpu_death == CPU_DEATH_POWER) {
668 /*
669 * Wait for the core to enter a powered down or clock gated
670 * state, the latter happening when a JTAG probe is connected
671 * in which case the CPC will refuse to power down the core.
672 */
673 fail_time = ktime_add_ms(ktime_get(), 2000);
674 do {
675 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
676 mips_cpc_lock_other(core);
677 stat = read_cpc_co_stat_conf();
678 stat &= CPC_Cx_STAT_CONF_SEQSTATE;
679 stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
680 mips_cpc_unlock_other();
681 mips_cm_unlock_other();
682
683 if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
684 stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
685 stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
686 break;
687
688 /*
689 * The core ought to have powered down, but didn't &
690 * now we don't really know what state it's in. It's
691 * likely that its _pwr_up pin has been wired to logic
692 * 1 & it powered back up as soon as we powered it
693 * down...
694 *
695 * The best we can do is warn the user & continue in
696 * the hope that the core is doing nothing harmful &
697 * might behave properly if we online it later.
698 */
699 if (WARN(ktime_after(ktime_get(), fail_time),
700 "CPU%u hasn't powered down, seq. state %u\n",
701 cpu, stat))
702 break;
703 } while (1);
704
705 /* Indicate the core is powered off */
706 bitmap_clear(core_power, core, 1);
707 } else if (cpu_has_mipsmt) {
708 /*
709 * Have a CPU with access to the offlined CPUs registers wait
710 * for its TC to halt.
711 */
712 err = smp_call_function_single(cpu_death_sibling,
713 wait_for_sibling_halt,
714 (void *)(unsigned long)cpu, 1);
715 if (err)
716 panic("Failed to call remote sibling CPU\n");
717 } else if (cpu_has_vp) {
718 do {
719 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
720 stat = read_cpc_co_vp_running();
721 mips_cm_unlock_other();
722 } while (stat & (1 << vpe_id));
723 }
724}
725
726#endif /* CONFIG_HOTPLUG_CPU */
727
728static const struct plat_smp_ops cps_smp_ops = {
729 .smp_setup = cps_smp_setup,
730 .prepare_cpus = cps_prepare_cpus,
731 .boot_secondary = cps_boot_secondary,
732 .init_secondary = cps_init_secondary,
733 .smp_finish = cps_smp_finish,
734 .send_ipi_single = mips_smp_send_ipi_single,
735 .send_ipi_mask = mips_smp_send_ipi_mask,
736#ifdef CONFIG_HOTPLUG_CPU
737 .cpu_disable = cps_cpu_disable,
738 .cpu_die = cps_cpu_die,
739 .cleanup_dead_cpu = cps_cleanup_dead_cpu,
740#endif
741#ifdef CONFIG_KEXEC_CORE
742 .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
743#endif
744};
745
746bool mips_cps_smp_in_use(void)
747{
748 extern const struct plat_smp_ops *mp_ops;
749 return mp_ops == &cps_smp_ops;
750}
751
752int register_cps_smp_ops(void)
753{
754 if (!mips_cm_present()) {
755 pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
756 return -ENODEV;
757 }
758
759 /* check we have a GIC - we need one for IPIs */
760 if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
761 pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
762 return -ENODEV;
763 }
764
765 register_smp_ops(&cps_smp_ops);
766 return 0;
767}