Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Copyright (C) 2013 Imagination Technologies
  3 * Author: Paul Burton <paul.burton@imgtec.com>
  4 *
  5 * This program is free software; you can redistribute it and/or modify it
  6 * under the terms of the GNU General Public License as published by the
  7 * Free Software Foundation;  either version 2 of the  License, or (at your
  8 * option) any later version.
  9 */
 10
 
 11#include <linux/delay.h>
 12#include <linux/io.h>
 13#include <linux/irqchip/mips-gic.h>
 14#include <linux/sched.h>
 15#include <linux/slab.h>
 16#include <linux/smp.h>
 17#include <linux/types.h>
 
 18
 19#include <asm/bcache.h>
 20#include <asm/mips-cm.h>
 21#include <asm/mips-cpc.h>
 22#include <asm/mips_mt.h>
 23#include <asm/mipsregs.h>
 24#include <asm/pm-cps.h>
 25#include <asm/r4kcache.h>
 
 26#include <asm/smp-cps.h>
 27#include <asm/time.h>
 28#include <asm/uasm.h>
 29
 30static DECLARE_BITMAP(core_power, NR_CPUS);
 31
 32struct core_boot_config *mips_cps_core_bootcfg;
 33
 34static unsigned core_vpe_count(unsigned core)
 35{
 36	unsigned cfg;
 37
 38	if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
 39		return 1;
 40
 41	mips_cm_lock_other(core, 0);
 42	cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK;
 43	mips_cm_unlock_other();
 44	return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1;
 45}
 46
 47static void __init cps_smp_setup(void)
 48{
 49	unsigned int ncores, nvpes, core_vpes;
 50	int c, v;
 
 51
 52	/* Detect & record VPE topology */
 53	ncores = mips_cm_numcores();
 54	pr_info("VPE topology ");
 55	for (c = nvpes = 0; c < ncores; c++) {
 56		core_vpes = core_vpe_count(c);
 57		pr_cont("%c%u", c ? ',' : '{', core_vpes);
 58
 59		/* Use the number of VPEs in core 0 for smp_num_siblings */
 60		if (!c)
 61			smp_num_siblings = core_vpes;
 62
 63		for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
 64			cpu_data[nvpes + v].core = c;
 65#ifdef CONFIG_MIPS_MT_SMP
 66			cpu_data[nvpes + v].vpe_id = v;
 67#endif
 
 
 
 
 
 
 
 
 
 
 
 
 68		}
 69
 70		nvpes += core_vpes;
 71	}
 72	pr_cont("} total %u\n", nvpes);
 73
 74	/* Indicate present CPUs (CPU being synonymous with VPE) */
 75	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
 76		set_cpu_possible(v, true);
 77		set_cpu_present(v, true);
 78		__cpu_number_map[v] = v;
 79		__cpu_logical_map[v] = v;
 80	}
 81
 82	/* Set a coherent default CCA (CWB) */
 83	change_c0_config(CONF_CM_CMASK, 0x5);
 84
 85	/* Core 0 is powered up (we're running on it) */
 86	bitmap_set(core_power, 0, 1);
 87
 88	/* Initialise core 0 */
 89	mips_cps_core_init();
 90
 91	/* Make core 0 coherent with everything */
 92	write_gcr_cl_coherence(0xff);
 93
 
 
 
 
 
 94#ifdef CONFIG_MIPS_MT_FPAFF
 95	/* If we have an FPU, enroll ourselves in the FPU-full mask */
 96	if (cpu_has_fpu)
 97		cpumask_set_cpu(0, &mt_fpu_cpumask);
 98#endif /* CONFIG_MIPS_MT_FPAFF */
 99}
100
101static void __init cps_prepare_cpus(unsigned int max_cpus)
102{
103	unsigned ncores, core_vpes, c, cca;
104	bool cca_unsuitable;
105	u32 *entry_code;
106
107	mips_mt_set_cpuoptions();
108
109	/* Detect whether the CCA is unsuited to multi-core SMP */
110	cca = read_c0_config() & CONF_CM_CMASK;
111	switch (cca) {
112	case 0x4: /* CWBE */
113	case 0x5: /* CWB */
114		/* The CCA is coherent, multi-core is fine */
115		cca_unsuitable = false;
116		break;
117
118	default:
119		/* CCA is not coherent, multi-core is not usable */
120		cca_unsuitable = true;
121	}
122
123	/* Warn the user if the CCA prevents multi-core */
124	ncores = mips_cm_numcores();
125	if (cca_unsuitable && ncores > 1) {
126		pr_warn("Using only one core due to unsuitable CCA 0x%x\n",
127			cca);
128
129		for_each_present_cpu(c) {
130			if (cpu_data[c].core)
131				set_cpu_present(c, false);
 
 
 
132		}
133	}
 
 
 
 
 
134
135	/*
136	 * Patch the start of mips_cps_core_entry to provide:
137	 *
138	 * s0 = kseg0 CCA
139	 */
140	entry_code = (u32 *)&mips_cps_core_entry;
141	uasm_i_addiu(&entry_code, 16, 0, cca);
 
 
142	blast_dcache_range((unsigned long)&mips_cps_core_entry,
143			   (unsigned long)entry_code);
144	bc_wback_inv((unsigned long)&mips_cps_core_entry,
145		     (void *)entry_code - (void *)&mips_cps_core_entry);
146	__sync();
147
148	/* Allocate core boot configuration structs */
 
149	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
150					GFP_KERNEL);
151	if (!mips_cps_core_bootcfg) {
152		pr_err("Failed to allocate boot config for %u cores\n", ncores);
153		goto err_out;
154	}
155
156	/* Allocate VPE boot configuration structs */
157	for (c = 0; c < ncores; c++) {
158		core_vpes = core_vpe_count(c);
159		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
160				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
161				GFP_KERNEL);
162		if (!mips_cps_core_bootcfg[c].vpe_config) {
163			pr_err("Failed to allocate %u VPE boot configs\n",
164			       core_vpes);
165			goto err_out;
166		}
167	}
168
169	/* Mark this CPU as booted */
170	atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask,
171		   1 << cpu_vpe_id(&current_cpu_data));
172
173	return;
174err_out:
175	/* Clean up allocations */
176	if (mips_cps_core_bootcfg) {
177		for (c = 0; c < ncores; c++)
178			kfree(mips_cps_core_bootcfg[c].vpe_config);
179		kfree(mips_cps_core_bootcfg);
180		mips_cps_core_bootcfg = NULL;
181	}
182
183	/* Effectively disable SMP by declaring CPUs not present */
184	for_each_possible_cpu(c) {
185		if (c == 0)
186			continue;
187		set_cpu_present(c, false);
188	}
189}
190
191static void boot_core(unsigned core)
192{
193	u32 access, stat, seq_state;
194	unsigned timeout;
195
196	/* Select the appropriate core */
197	mips_cm_lock_other(core, 0);
198
199	/* Set its reset vector */
200	write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
201
202	/* Ensure its coherency is disabled */
203	write_gcr_co_coherence(0);
204
205	/* Start it with the legacy memory map and exception base */
206	write_gcr_co_reset_ext_base(CM_GCR_RESET_EXT_BASE_UEB);
207
208	/* Ensure the core can access the GCRs */
209	access = read_gcr_access();
210	access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core);
211	write_gcr_access(access);
212
213	if (mips_cpc_present()) {
214		/* Reset the core */
215		mips_cpc_lock_other(core);
 
 
 
 
 
 
 
 
 
 
 
 
 
216		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
217
218		timeout = 100;
219		while (true) {
220			stat = read_cpc_co_stat_conf();
221			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 
222
223			/* U6 == coherent execution, ie. the core is up */
224			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
225				break;
226
227			/* Delay a little while before we start warning */
228			if (timeout) {
229				timeout--;
230				mdelay(10);
231				continue;
232			}
233
234			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
235				core, stat);
236			mdelay(1000);
237		}
238
239		mips_cpc_unlock_other();
240	} else {
241		/* Take the core out of reset */
242		write_gcr_co_reset_release(0);
243	}
244
245	mips_cm_unlock_other();
246
247	/* The core is now powered up */
248	bitmap_set(core_power, core, 1);
249}
250
251static void remote_vpe_boot(void *dummy)
252{
253	mips_cps_boot_vpes();
 
 
 
254}
255
256static void cps_boot_secondary(int cpu, struct task_struct *idle)
257{
258	unsigned core = cpu_data[cpu].core;
259	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
260	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
261	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
 
262	unsigned int remote;
263	int err;
264
 
 
 
 
265	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
266	vpe_cfg->sp = __KSTK_TOS(idle);
267	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
268
269	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
270
271	preempt_disable();
272
273	if (!test_bit(core, core_power)) {
274		/* Boot a VPE on a powered down core */
275		boot_core(core);
276		goto out;
277	}
278
279	if (core != current_cpu_data.core) {
 
 
 
 
 
 
 
280		/* Boot a VPE on another powered up core */
281		for (remote = 0; remote < NR_CPUS; remote++) {
282			if (cpu_data[remote].core != core)
283				continue;
284			if (cpu_online(remote))
285				break;
286		}
287		BUG_ON(remote >= NR_CPUS);
 
 
 
 
288
289		err = smp_call_function_single(remote, remote_vpe_boot,
290					       NULL, 1);
291		if (err)
292			panic("Failed to call remote CPU\n");
293		goto out;
294	}
295
296	BUG_ON(!cpu_has_mipsmt);
297
298	/* Boot a VPE on this core */
299	mips_cps_boot_vpes();
300out:
301	preempt_enable();
 
302}
303
304static void cps_init_secondary(void)
305{
 
 
306	/* Disable MT - we only want to run 1 TC per VPE */
307	if (cpu_has_mipsmt)
308		dmt();
309
310	change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
311				 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312}
313
314static void cps_smp_finish(void)
315{
316	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
317
318#ifdef CONFIG_MIPS_MT_FPAFF
319	/* If we have an FPU, enroll ourselves in the FPU-full mask */
320	if (cpu_has_fpu)
321		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
322#endif /* CONFIG_MIPS_MT_FPAFF */
323
324	local_irq_enable();
325}
326
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
327#ifdef CONFIG_HOTPLUG_CPU
328
329static int cps_cpu_disable(void)
330{
331	unsigned cpu = smp_processor_id();
332	struct core_boot_config *core_cfg;
333
334	if (!cpu)
335		return -EBUSY;
336
337	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
338		return -EINVAL;
339
340	core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
341	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
342	smp_mb__after_atomic();
343	set_cpu_online(cpu, false);
344	cpumask_clear_cpu(cpu, &cpu_callin_map);
 
345
346	return 0;
347}
348
349static DECLARE_COMPLETION(cpu_death_chosen);
350static unsigned cpu_death_sibling;
351static enum {
352	CPU_DEATH_HALT,
353	CPU_DEATH_POWER,
354} cpu_death;
355
356void play_dead(void)
357{
358	unsigned cpu, core;
359
360	local_irq_disable();
361	idle_task_exit();
362	cpu = smp_processor_id();
363	cpu_death = CPU_DEATH_POWER;
364
365	if (cpu_has_mipsmt) {
366		core = cpu_data[cpu].core;
367
 
368		/* Look for another online VPE within the core */
369		for_each_online_cpu(cpu_death_sibling) {
370			if (cpu_data[cpu_death_sibling].core != core)
371				continue;
372
373			/*
374			 * There is an online VPE within the core. Just halt
375			 * this TC and leave the core alone.
376			 */
377			cpu_death = CPU_DEATH_HALT;
378			break;
379		}
380	}
381
382	/* This CPU has chosen its way out */
383	complete(&cpu_death_chosen);
384
385	if (cpu_death == CPU_DEATH_HALT) {
386		/* Halt this TC */
387		write_c0_tchalt(TCHALT_H);
388		instruction_hazard();
389	} else {
390		/* Power down the core */
391		cps_pm_enter_state(CPS_PM_POWER_GATED);
392	}
393
394	/* This should never be reached */
395	panic("Failed to offline CPU %u", cpu);
396}
397
398static void wait_for_sibling_halt(void *ptr_cpu)
399{
400	unsigned cpu = (unsigned long)ptr_cpu;
401	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
402	unsigned halted;
403	unsigned long flags;
404
405	do {
406		local_irq_save(flags);
407		settc(vpe_id);
408		halted = read_tc_c0_tchalt();
409		local_irq_restore(flags);
410	} while (!(halted & TCHALT_H));
411}
412
413static void cps_cpu_die(unsigned int cpu)
 
 
414{
415	unsigned core = cpu_data[cpu].core;
 
 
416	unsigned stat;
417	int err;
418
419	/* Wait for the cpu to choose its way out */
420	if (!wait_for_completion_timeout(&cpu_death_chosen,
421					 msecs_to_jiffies(5000))) {
422		pr_err("CPU%u: didn't offline\n", cpu);
423		return;
424	}
425
426	/*
427	 * Now wait for the CPU to actually offline. Without doing this that
428	 * offlining may race with one or more of:
429	 *
430	 *   - Onlining the CPU again.
431	 *   - Powering down the core if another VPE within it is offlined.
432	 *   - A sibling VPE entering a non-coherent state.
433	 *
434	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
435	 * with which we could race, so do nothing.
436	 */
437	if (cpu_death == CPU_DEATH_POWER) {
438		/*
439		 * Wait for the core to enter a powered down or clock gated
440		 * state, the latter happening when a JTAG probe is connected
441		 * in which case the CPC will refuse to power down the core.
442		 */
 
443		do {
 
444			mips_cpc_lock_other(core);
445			stat = read_cpc_co_stat_conf();
446			stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
 
447			mips_cpc_unlock_other();
448		} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
449			 stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
450			 stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
451
452		/* Indicate the core is powered off */
453		bitmap_clear(core_power, core, 1);
454	} else if (cpu_has_mipsmt) {
455		/*
456		 * Have a CPU with access to the offlined CPUs registers wait
457		 * for its TC to halt.
458		 */
459		err = smp_call_function_single(cpu_death_sibling,
460					       wait_for_sibling_halt,
461					       (void *)(unsigned long)cpu, 1);
462		if (err)
463			panic("Failed to call remote sibling CPU\n");
 
 
 
 
 
 
464	}
465}
466
467#endif /* CONFIG_HOTPLUG_CPU */
468
469static struct plat_smp_ops cps_smp_ops = {
470	.smp_setup		= cps_smp_setup,
471	.prepare_cpus		= cps_prepare_cpus,
472	.boot_secondary		= cps_boot_secondary,
473	.init_secondary		= cps_init_secondary,
474	.smp_finish		= cps_smp_finish,
475	.send_ipi_single	= mips_smp_send_ipi_single,
476	.send_ipi_mask		= mips_smp_send_ipi_mask,
477#ifdef CONFIG_HOTPLUG_CPU
478	.cpu_disable		= cps_cpu_disable,
479	.cpu_die		= cps_cpu_die,
 
 
 
 
480#endif
481};
482
483bool mips_cps_smp_in_use(void)
484{
485	extern struct plat_smp_ops *mp_ops;
486	return mp_ops == &cps_smp_ops;
487}
488
489int register_cps_smp_ops(void)
490{
491	if (!mips_cm_present()) {
492		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
493		return -ENODEV;
494	}
495
496	/* check we have a GIC - we need one for IPIs */
497	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX_MSK)) {
498		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
499		return -ENODEV;
500	}
501
502	register_smp_ops(&cps_smp_ops);
503	return 0;
504}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2013 Imagination Technologies
  4 * Author: Paul Burton <paul.burton@mips.com>
 
 
 
 
 
  5 */
  6
  7#include <linux/cpu.h>
  8#include <linux/delay.h>
  9#include <linux/io.h>
 10#include <linux/sched/task_stack.h>
 11#include <linux/sched/hotplug.h>
 12#include <linux/slab.h>
 13#include <linux/smp.h>
 14#include <linux/types.h>
 15#include <linux/irq.h>
 16
 17#include <asm/bcache.h>
 18#include <asm/mips-cps.h>
 
 19#include <asm/mips_mt.h>
 20#include <asm/mipsregs.h>
 21#include <asm/pm-cps.h>
 22#include <asm/r4kcache.h>
 23#include <asm/smp.h>
 24#include <asm/smp-cps.h>
 25#include <asm/time.h>
 26#include <asm/uasm.h>
 27
 28static DECLARE_BITMAP(core_power, NR_CPUS);
 29
 30struct core_boot_config *mips_cps_core_bootcfg;
 31
 32static unsigned __init core_vpe_count(unsigned int cluster, unsigned core)
 33{
 34	return min(smp_max_threads, mips_cps_numvps(cluster, core));
 
 
 
 
 
 
 
 
 35}
 36
 37static void __init cps_smp_setup(void)
 38{
 39	unsigned int nclusters, ncores, nvpes, core_vpes;
 40	unsigned long core_entry;
 41	int cl, c, v;
 42
 43	/* Detect & record VPE topology */
 44	nvpes = 0;
 45	nclusters = mips_cps_numclusters();
 46	pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
 47	for (cl = 0; cl < nclusters; cl++) {
 48		if (cl > 0)
 49			pr_cont(",");
 50		pr_cont("{");
 51
 52		ncores = mips_cps_numcores(cl);
 53		for (c = 0; c < ncores; c++) {
 54			core_vpes = core_vpe_count(cl, c);
 55
 56			if (c > 0)
 57				pr_cont(",");
 58			pr_cont("%u", core_vpes);
 59
 60			/* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
 61			if (!cl && !c)
 62				smp_num_siblings = core_vpes;
 63
 64			for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
 65				cpu_set_cluster(&cpu_data[nvpes + v], cl);
 66				cpu_set_core(&cpu_data[nvpes + v], c);
 67				cpu_set_vpe_id(&cpu_data[nvpes + v], v);
 68			}
 69
 70			nvpes += core_vpes;
 71		}
 72
 73		pr_cont("}");
 74	}
 75	pr_cont(" total %u\n", nvpes);
 76
 77	/* Indicate present CPUs (CPU being synonymous with VPE) */
 78	for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
 79		set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
 80		set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
 81		__cpu_number_map[v] = v;
 82		__cpu_logical_map[v] = v;
 83	}
 84
 85	/* Set a coherent default CCA (CWB) */
 86	change_c0_config(CONF_CM_CMASK, 0x5);
 87
 88	/* Core 0 is powered up (we're running on it) */
 89	bitmap_set(core_power, 0, 1);
 90
 91	/* Initialise core 0 */
 92	mips_cps_core_init();
 93
 94	/* Make core 0 coherent with everything */
 95	write_gcr_cl_coherence(0xff);
 96
 97	if (mips_cm_revision() >= CM_REV_CM3) {
 98		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
 99		write_gcr_bev_base(core_entry);
100	}
101
102#ifdef CONFIG_MIPS_MT_FPAFF
103	/* If we have an FPU, enroll ourselves in the FPU-full mask */
104	if (cpu_has_fpu)
105		cpumask_set_cpu(0, &mt_fpu_cpumask);
106#endif /* CONFIG_MIPS_MT_FPAFF */
107}
108
109static void __init cps_prepare_cpus(unsigned int max_cpus)
110{
111	unsigned ncores, core_vpes, c, cca;
112	bool cca_unsuitable, cores_limited;
113	u32 *entry_code;
114
115	mips_mt_set_cpuoptions();
116
117	/* Detect whether the CCA is unsuited to multi-core SMP */
118	cca = read_c0_config() & CONF_CM_CMASK;
119	switch (cca) {
120	case 0x4: /* CWBE */
121	case 0x5: /* CWB */
122		/* The CCA is coherent, multi-core is fine */
123		cca_unsuitable = false;
124		break;
125
126	default:
127		/* CCA is not coherent, multi-core is not usable */
128		cca_unsuitable = true;
129	}
130
131	/* Warn the user if the CCA prevents multi-core */
132	cores_limited = false;
133	if (cca_unsuitable || cpu_has_dc_aliases) {
 
 
 
134		for_each_present_cpu(c) {
135			if (cpus_are_siblings(smp_processor_id(), c))
136				continue;
137
138			set_cpu_present(c, false);
139			cores_limited = true;
140		}
141	}
142	if (cores_limited)
143		pr_warn("Using only one core due to %s%s%s\n",
144			cca_unsuitable ? "unsuitable CCA" : "",
145			(cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
146			cpu_has_dc_aliases ? "dcache aliasing" : "");
147
148	/*
149	 * Patch the start of mips_cps_core_entry to provide:
150	 *
151	 * s0 = kseg0 CCA
152	 */
153	entry_code = (u32 *)&mips_cps_core_entry;
154	uasm_i_addiu(&entry_code, 16, 0, cca);
155	UASM_i_LA(&entry_code, 17, (long)mips_gcr_base);
156	BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end);
157	blast_dcache_range((unsigned long)&mips_cps_core_entry,
158			   (unsigned long)entry_code);
159	bc_wback_inv((unsigned long)&mips_cps_core_entry,
160		     (void *)entry_code - (void *)&mips_cps_core_entry);
161	__sync();
162
163	/* Allocate core boot configuration structs */
164	ncores = mips_cps_numcores(0);
165	mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
166					GFP_KERNEL);
167	if (!mips_cps_core_bootcfg) {
168		pr_err("Failed to allocate boot config for %u cores\n", ncores);
169		goto err_out;
170	}
171
172	/* Allocate VPE boot configuration structs */
173	for (c = 0; c < ncores; c++) {
174		core_vpes = core_vpe_count(0, c);
175		mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
176				sizeof(*mips_cps_core_bootcfg[c].vpe_config),
177				GFP_KERNEL);
178		if (!mips_cps_core_bootcfg[c].vpe_config) {
179			pr_err("Failed to allocate %u VPE boot configs\n",
180			       core_vpes);
181			goto err_out;
182		}
183	}
184
185	/* Mark this CPU as booted */
186	atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
187		   1 << cpu_vpe_id(&current_cpu_data));
188
189	return;
190err_out:
191	/* Clean up allocations */
192	if (mips_cps_core_bootcfg) {
193		for (c = 0; c < ncores; c++)
194			kfree(mips_cps_core_bootcfg[c].vpe_config);
195		kfree(mips_cps_core_bootcfg);
196		mips_cps_core_bootcfg = NULL;
197	}
198
199	/* Effectively disable SMP by declaring CPUs not present */
200	for_each_possible_cpu(c) {
201		if (c == 0)
202			continue;
203		set_cpu_present(c, false);
204	}
205}
206
207static void boot_core(unsigned int core, unsigned int vpe_id)
208{
209	u32 stat, seq_state;
210	unsigned timeout;
211
212	/* Select the appropriate core */
213	mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
214
215	/* Set its reset vector */
216	write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
217
218	/* Ensure its coherency is disabled */
219	write_gcr_co_coherence(0);
220
221	/* Start it with the legacy memory map and exception base */
222	write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
223
224	/* Ensure the core can access the GCRs */
225	set_gcr_access(1 << core);
 
 
226
227	if (mips_cpc_present()) {
228		/* Reset the core */
229		mips_cpc_lock_other(core);
230
231		if (mips_cm_revision() >= CM_REV_CM3) {
232			/* Run only the requested VP following the reset */
233			write_cpc_co_vp_stop(0xf);
234			write_cpc_co_vp_run(1 << vpe_id);
235
236			/*
237			 * Ensure that the VP_RUN register is written before the
238			 * core leaves reset.
239			 */
240			wmb();
241		}
242
243		write_cpc_co_cmd(CPC_Cx_CMD_RESET);
244
245		timeout = 100;
246		while (true) {
247			stat = read_cpc_co_stat_conf();
248			seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
249			seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
250
251			/* U6 == coherent execution, ie. the core is up */
252			if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
253				break;
254
255			/* Delay a little while before we start warning */
256			if (timeout) {
257				timeout--;
258				mdelay(10);
259				continue;
260			}
261
262			pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
263				core, stat);
264			mdelay(1000);
265		}
266
267		mips_cpc_unlock_other();
268	} else {
269		/* Take the core out of reset */
270		write_gcr_co_reset_release(0);
271	}
272
273	mips_cm_unlock_other();
274
275	/* The core is now powered up */
276	bitmap_set(core_power, core, 1);
277}
278
279static void remote_vpe_boot(void *dummy)
280{
281	unsigned core = cpu_core(&current_cpu_data);
282	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
283
284	mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
285}
286
287static int cps_boot_secondary(int cpu, struct task_struct *idle)
288{
289	unsigned core = cpu_core(&cpu_data[cpu]);
290	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
291	struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
292	struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
293	unsigned long core_entry;
294	unsigned int remote;
295	int err;
296
297	/* We don't yet support booting CPUs in other clusters */
298	if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
299		return -ENOSYS;
300
301	vpe_cfg->pc = (unsigned long)&smp_bootstrap;
302	vpe_cfg->sp = __KSTK_TOS(idle);
303	vpe_cfg->gp = (unsigned long)task_thread_info(idle);
304
305	atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
306
307	preempt_disable();
308
309	if (!test_bit(core, core_power)) {
310		/* Boot a VPE on a powered down core */
311		boot_core(core, vpe_id);
312		goto out;
313	}
314
315	if (cpu_has_vp) {
316		mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
317		core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
318		write_gcr_co_reset_base(core_entry);
319		mips_cm_unlock_other();
320	}
321
322	if (!cpus_are_siblings(cpu, smp_processor_id())) {
323		/* Boot a VPE on another powered up core */
324		for (remote = 0; remote < NR_CPUS; remote++) {
325			if (!cpus_are_siblings(cpu, remote))
326				continue;
327			if (cpu_online(remote))
328				break;
329		}
330		if (remote >= NR_CPUS) {
331			pr_crit("No online CPU in core %u to start CPU%d\n",
332				core, cpu);
333			goto out;
334		}
335
336		err = smp_call_function_single(remote, remote_vpe_boot,
337					       NULL, 1);
338		if (err)
339			panic("Failed to call remote CPU\n");
340		goto out;
341	}
342
343	BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
344
345	/* Boot a VPE on this core */
346	mips_cps_boot_vpes(core_cfg, vpe_id);
347out:
348	preempt_enable();
349	return 0;
350}
351
352static void cps_init_secondary(void)
353{
354	int core = cpu_core(&current_cpu_data);
355
356	/* Disable MT - we only want to run 1 TC per VPE */
357	if (cpu_has_mipsmt)
358		dmt();
359
360	if (mips_cm_revision() >= CM_REV_CM3) {
361		unsigned int ident = read_gic_vl_ident();
362
363		/*
364		 * Ensure that our calculation of the VP ID matches up with
365		 * what the GIC reports, otherwise we'll have configured
366		 * interrupts incorrectly.
367		 */
368		BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
369	}
370
371	if (core > 0 && !read_gcr_cl_coherence())
372		pr_warn("Core %u is not in coherent domain\n", core);
373
374	if (cpu_has_veic)
375		clear_c0_status(ST0_IM);
376	else
377		change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
378					 STATUSF_IP4 | STATUSF_IP5 |
379					 STATUSF_IP6 | STATUSF_IP7);
380}
381
382static void cps_smp_finish(void)
383{
384	write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
385
386#ifdef CONFIG_MIPS_MT_FPAFF
387	/* If we have an FPU, enroll ourselves in the FPU-full mask */
388	if (cpu_has_fpu)
389		cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
390#endif /* CONFIG_MIPS_MT_FPAFF */
391
392	local_irq_enable();
393}
394
395#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC_CORE)
396
397enum cpu_death {
398	CPU_DEATH_HALT,
399	CPU_DEATH_POWER,
400};
401
402static void cps_shutdown_this_cpu(enum cpu_death death)
403{
404	unsigned int cpu, core, vpe_id;
405
406	cpu = smp_processor_id();
407	core = cpu_core(&cpu_data[cpu]);
408
409	if (death == CPU_DEATH_HALT) {
410		vpe_id = cpu_vpe_id(&cpu_data[cpu]);
411
412		pr_debug("Halting core %d VP%d\n", core, vpe_id);
413		if (cpu_has_mipsmt) {
414			/* Halt this TC */
415			write_c0_tchalt(TCHALT_H);
416			instruction_hazard();
417		} else if (cpu_has_vp) {
418			write_cpc_cl_vp_stop(1 << vpe_id);
419
420			/* Ensure that the VP_STOP register is written */
421			wmb();
422		}
423	} else {
424		if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
425			pr_debug("Gating power to core %d\n", core);
426			/* Power down the core */
427			cps_pm_enter_state(CPS_PM_POWER_GATED);
428		}
429	}
430}
431
432#ifdef CONFIG_KEXEC_CORE
433
434static void cps_kexec_nonboot_cpu(void)
435{
436	if (cpu_has_mipsmt || cpu_has_vp)
437		cps_shutdown_this_cpu(CPU_DEATH_HALT);
438	else
439		cps_shutdown_this_cpu(CPU_DEATH_POWER);
440}
441
442#endif /* CONFIG_KEXEC_CORE */
443
444#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC_CORE */
445
446#ifdef CONFIG_HOTPLUG_CPU
447
448static int cps_cpu_disable(void)
449{
450	unsigned cpu = smp_processor_id();
451	struct core_boot_config *core_cfg;
452
 
 
 
453	if (!cps_pm_support_state(CPS_PM_POWER_GATED))
454		return -EINVAL;
455
456	core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
457	atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
458	smp_mb__after_atomic();
459	set_cpu_online(cpu, false);
460	calculate_cpu_foreign_map();
461	irq_migrate_all_off_this_cpu();
462
463	return 0;
464}
465
 
466static unsigned cpu_death_sibling;
467static enum cpu_death cpu_death;
 
 
 
468
469void play_dead(void)
470{
471	unsigned int cpu;
472
473	local_irq_disable();
474	idle_task_exit();
475	cpu = smp_processor_id();
476	cpu_death = CPU_DEATH_POWER;
477
478	pr_debug("CPU%d going offline\n", cpu);
 
479
480	if (cpu_has_mipsmt || cpu_has_vp) {
481		/* Look for another online VPE within the core */
482		for_each_online_cpu(cpu_death_sibling) {
483			if (!cpus_are_siblings(cpu, cpu_death_sibling))
484				continue;
485
486			/*
487			 * There is an online VPE within the core. Just halt
488			 * this TC and leave the core alone.
489			 */
490			cpu_death = CPU_DEATH_HALT;
491			break;
492		}
493	}
494
495	cpuhp_ap_report_dead();
 
496
497	cps_shutdown_this_cpu(cpu_death);
 
 
 
 
 
 
 
498
499	/* This should never be reached */
500	panic("Failed to offline CPU %u", cpu);
501}
502
503static void wait_for_sibling_halt(void *ptr_cpu)
504{
505	unsigned cpu = (unsigned long)ptr_cpu;
506	unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
507	unsigned halted;
508	unsigned long flags;
509
510	do {
511		local_irq_save(flags);
512		settc(vpe_id);
513		halted = read_tc_c0_tchalt();
514		local_irq_restore(flags);
515	} while (!(halted & TCHALT_H));
516}
517
518static void cps_cpu_die(unsigned int cpu) { }
519
520static void cps_cleanup_dead_cpu(unsigned cpu)
521{
522	unsigned core = cpu_core(&cpu_data[cpu]);
523	unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
524	ktime_t fail_time;
525	unsigned stat;
526	int err;
527
 
 
 
 
 
 
 
528	/*
529	 * Now wait for the CPU to actually offline. Without doing this that
530	 * offlining may race with one or more of:
531	 *
532	 *   - Onlining the CPU again.
533	 *   - Powering down the core if another VPE within it is offlined.
534	 *   - A sibling VPE entering a non-coherent state.
535	 *
536	 * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
537	 * with which we could race, so do nothing.
538	 */
539	if (cpu_death == CPU_DEATH_POWER) {
540		/*
541		 * Wait for the core to enter a powered down or clock gated
542		 * state, the latter happening when a JTAG probe is connected
543		 * in which case the CPC will refuse to power down the core.
544		 */
545		fail_time = ktime_add_ms(ktime_get(), 2000);
546		do {
547			mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
548			mips_cpc_lock_other(core);
549			stat = read_cpc_co_stat_conf();
550			stat &= CPC_Cx_STAT_CONF_SEQSTATE;
551			stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
552			mips_cpc_unlock_other();
553			mips_cm_unlock_other();
554
555			if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
556			    stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
557			    stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
558				break;
559
560			/*
561			 * The core ought to have powered down, but didn't &
562			 * now we don't really know what state it's in. It's
563			 * likely that its _pwr_up pin has been wired to logic
564			 * 1 & it powered back up as soon as we powered it
565			 * down...
566			 *
567			 * The best we can do is warn the user & continue in
568			 * the hope that the core is doing nothing harmful &
569			 * might behave properly if we online it later.
570			 */
571			if (WARN(ktime_after(ktime_get(), fail_time),
572				 "CPU%u hasn't powered down, seq. state %u\n",
573				 cpu, stat))
574				break;
575		} while (1);
576
577		/* Indicate the core is powered off */
578		bitmap_clear(core_power, core, 1);
579	} else if (cpu_has_mipsmt) {
580		/*
581		 * Have a CPU with access to the offlined CPUs registers wait
582		 * for its TC to halt.
583		 */
584		err = smp_call_function_single(cpu_death_sibling,
585					       wait_for_sibling_halt,
586					       (void *)(unsigned long)cpu, 1);
587		if (err)
588			panic("Failed to call remote sibling CPU\n");
589	} else if (cpu_has_vp) {
590		do {
591			mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
592			stat = read_cpc_co_vp_running();
593			mips_cm_unlock_other();
594		} while (stat & (1 << vpe_id));
595	}
596}
597
598#endif /* CONFIG_HOTPLUG_CPU */
599
600static const struct plat_smp_ops cps_smp_ops = {
601	.smp_setup		= cps_smp_setup,
602	.prepare_cpus		= cps_prepare_cpus,
603	.boot_secondary		= cps_boot_secondary,
604	.init_secondary		= cps_init_secondary,
605	.smp_finish		= cps_smp_finish,
606	.send_ipi_single	= mips_smp_send_ipi_single,
607	.send_ipi_mask		= mips_smp_send_ipi_mask,
608#ifdef CONFIG_HOTPLUG_CPU
609	.cpu_disable		= cps_cpu_disable,
610	.cpu_die		= cps_cpu_die,
611	.cleanup_dead_cpu	= cps_cleanup_dead_cpu,
612#endif
613#ifdef CONFIG_KEXEC_CORE
614	.kexec_nonboot_cpu	= cps_kexec_nonboot_cpu,
615#endif
616};
617
618bool mips_cps_smp_in_use(void)
619{
620	extern const struct plat_smp_ops *mp_ops;
621	return mp_ops == &cps_smp_ops;
622}
623
624int register_cps_smp_ops(void)
625{
626	if (!mips_cm_present()) {
627		pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
628		return -ENODEV;
629	}
630
631	/* check we have a GIC - we need one for IPIs */
632	if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
633		pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
634		return -ENODEV;
635	}
636
637	register_smp_ops(&cps_smp_ops);
638	return 0;
639}