Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *
  4 * Copyright (C) 2000, 2001 Kanoj Sarcar
  5 * Copyright (C) 2000, 2001 Ralf Baechle
  6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  8 */
  9#include <linux/cache.h>
 10#include <linux/delay.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 
 13#include <linux/smp.h>
 14#include <linux/spinlock.h>
 15#include <linux/threads.h>
 16#include <linux/export.h>
 17#include <linux/time.h>
 18#include <linux/timex.h>
 19#include <linux/sched/mm.h>
 20#include <linux/cpumask.h>
 21#include <linux/cpu.h>
 22#include <linux/err.h>
 23#include <linux/ftrace.h>
 24#include <linux/irqdomain.h>
 25#include <linux/of.h>
 26#include <linux/of_irq.h>
 27
 28#include <linux/atomic.h>
 29#include <asm/cpu.h>
 30#include <asm/ginvt.h>
 31#include <asm/processor.h>
 32#include <asm/idle.h>
 33#include <asm/r4k-timer.h>
 34#include <asm/mips-cps.h>
 35#include <asm/mmu_context.h>
 36#include <asm/time.h>
 37#include <asm/setup.h>
 38#include <asm/maar.h>
 39
 40int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
 41EXPORT_SYMBOL(__cpu_number_map);
 42
 43int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 44EXPORT_SYMBOL(__cpu_logical_map);
 45
 46/* Number of TCs (or siblings in Intel speak) per CPU core */
 47int smp_num_siblings = 1;
 48EXPORT_SYMBOL(smp_num_siblings);
 49
 50/* representing the TCs (or siblings in Intel speak) of each logical CPU */
 51cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 52EXPORT_SYMBOL(cpu_sibling_map);
 53
 54/* representing the core map of multi-core chips of each logical CPU */
 55cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 56EXPORT_SYMBOL(cpu_core_map);
 57
 58static DECLARE_COMPLETION(cpu_starting);
 59static DECLARE_COMPLETION(cpu_running);
 60
 61/*
 62 * A logical cpu mask containing only one VPE per core to
 63 * reduce the number of IPIs on large MT systems.
 64 */
 65cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
 66EXPORT_SYMBOL(cpu_foreign_map);
 67
 68/* representing cpus for which sibling maps can be computed */
 69static cpumask_t cpu_sibling_setup_map;
 70
 71/* representing cpus for which core maps can be computed */
 72static cpumask_t cpu_core_setup_map;
 73
 74cpumask_t cpu_coherent_mask;
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76#ifdef CONFIG_GENERIC_IRQ_IPI
 77static struct irq_desc *call_desc;
 78static struct irq_desc *sched_desc;
 79#endif
 80
 81static inline void set_cpu_sibling_map(int cpu)
 82{
 83	int i;
 84
 85	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
 86
 87	if (smp_num_siblings > 1) {
 88		for_each_cpu(i, &cpu_sibling_setup_map) {
 89			if (cpus_are_siblings(cpu, i)) {
 90				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
 91				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
 92			}
 93		}
 94	} else
 95		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
 96}
 97
 98static inline void set_cpu_core_map(int cpu)
 99{
100	int i;
101
102	cpumask_set_cpu(cpu, &cpu_core_setup_map);
103
104	for_each_cpu(i, &cpu_core_setup_map) {
105		if (cpu_data[cpu].package == cpu_data[i].package) {
106			cpumask_set_cpu(i, &cpu_core_map[cpu]);
107			cpumask_set_cpu(cpu, &cpu_core_map[i]);
108		}
109	}
110}
111
112/*
113 * Calculate a new cpu_foreign_map mask whenever a
114 * new cpu appears or disappears.
115 */
116void calculate_cpu_foreign_map(void)
117{
118	int i, k, core_present;
119	cpumask_t temp_foreign_map;
120
121	/* Re-calculate the mask */
122	cpumask_clear(&temp_foreign_map);
123	for_each_online_cpu(i) {
124		core_present = 0;
125		for_each_cpu(k, &temp_foreign_map)
126			if (cpus_are_siblings(i, k))
127				core_present = 1;
128		if (!core_present)
129			cpumask_set_cpu(i, &temp_foreign_map);
130	}
131
132	for_each_online_cpu(i)
133		cpumask_andnot(&cpu_foreign_map[i],
134			       &temp_foreign_map, &cpu_sibling_map[i]);
135}
136
137const struct plat_smp_ops *mp_ops;
138EXPORT_SYMBOL(mp_ops);
139
140void register_smp_ops(const struct plat_smp_ops *ops)
141{
142	if (mp_ops)
143		printk(KERN_WARNING "Overriding previously set SMP ops\n");
144
145	mp_ops = ops;
146}
147
148#ifdef CONFIG_GENERIC_IRQ_IPI
149void mips_smp_send_ipi_single(int cpu, unsigned int action)
150{
151	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152}
153
154void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155{
156	unsigned long flags;
157	unsigned int core;
158	int cpu;
159
160	local_irq_save(flags);
161
162	switch (action) {
163	case SMP_CALL_FUNCTION:
164		__ipi_send_mask(call_desc, mask);
165		break;
166
167	case SMP_RESCHEDULE_YOURSELF:
168		__ipi_send_mask(sched_desc, mask);
169		break;
170
171	default:
172		BUG();
173	}
174
175	if (mips_cpc_present()) {
176		for_each_cpu(cpu, mask) {
177			if (cpus_are_siblings(cpu, smp_processor_id()))
178				continue;
179
180			core = cpu_core(&cpu_data[cpu]);
181
182			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
183				mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
184				mips_cpc_lock_other(core);
185				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186				mips_cpc_unlock_other();
187				mips_cm_unlock_other();
188			}
189		}
190	}
191
192	local_irq_restore(flags);
193}
194
195
196static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197{
198	scheduler_ipi();
199
200	return IRQ_HANDLED;
201}
202
203static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204{
205	generic_smp_call_function_interrupt();
206
207	return IRQ_HANDLED;
208}
209
210static void smp_ipi_init_one(unsigned int virq, const char *name,
211			     irq_handler_t handler)
212{
213	int ret;
214
215	irq_set_handler(virq, handle_percpu_irq);
216	ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
217	BUG_ON(ret);
218}
219
220static unsigned int call_virq, sched_virq;
221
222int mips_smp_ipi_allocate(const struct cpumask *mask)
223{
224	int virq;
225	struct irq_domain *ipidomain;
226	struct device_node *node;
227
228	node = of_irq_find_parent(of_root);
229	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
230
231	/*
232	 * Some platforms have half DT setup. So if we found irq node but
233	 * didn't find an ipidomain, try to search for one that is not in the
234	 * DT.
235	 */
236	if (node && !ipidomain)
237		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
238
239	/*
240	 * There are systems which use IPI IRQ domains, but only have one
241	 * registered when some runtime condition is met. For example a Malta
242	 * kernel may include support for GIC & CPU interrupt controller IPI
243	 * IRQ domains, but if run on a system with no GIC & no MT ASE then
244	 * neither will be supported or registered.
245	 *
246	 * We only have a problem if we're actually using multiple CPUs so fail
247	 * loudly if that is the case. Otherwise simply return, skipping IPI
248	 * setup, if we're running with only a single CPU.
249	 */
250	if (!ipidomain) {
251		BUG_ON(num_present_cpus() > 1);
252		return 0;
253	}
254
255	virq = irq_reserve_ipi(ipidomain, mask);
256	BUG_ON(!virq);
257	if (!call_virq)
258		call_virq = virq;
259
260	virq = irq_reserve_ipi(ipidomain, mask);
261	BUG_ON(!virq);
262	if (!sched_virq)
263		sched_virq = virq;
264
265	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
266		int cpu;
267
268		for_each_cpu(cpu, mask) {
269			smp_ipi_init_one(call_virq + cpu, "IPI call",
270					 ipi_call_interrupt);
271			smp_ipi_init_one(sched_virq + cpu, "IPI resched",
272					 ipi_resched_interrupt);
273		}
274	} else {
275		smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
276		smp_ipi_init_one(sched_virq, "IPI resched",
277				 ipi_resched_interrupt);
278	}
279
280	return 0;
281}
282
283int mips_smp_ipi_free(const struct cpumask *mask)
284{
285	struct irq_domain *ipidomain;
286	struct device_node *node;
287
288	node = of_irq_find_parent(of_root);
289	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
290
291	/*
292	 * Some platforms have half DT setup. So if we found irq node but
293	 * didn't find an ipidomain, try to search for one that is not in the
294	 * DT.
295	 */
296	if (node && !ipidomain)
297		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
298
299	BUG_ON(!ipidomain);
300
301	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
302		int cpu;
303
304		for_each_cpu(cpu, mask) {
305			free_irq(call_virq + cpu, NULL);
306			free_irq(sched_virq + cpu, NULL);
307		}
308	}
309	irq_destroy_ipi(call_virq, mask);
310	irq_destroy_ipi(sched_virq, mask);
311	return 0;
312}
313
314
315static int __init mips_smp_ipi_init(void)
316{
317	if (num_possible_cpus() == 1)
318		return 0;
319
320	mips_smp_ipi_allocate(cpu_possible_mask);
321
322	call_desc = irq_to_desc(call_virq);
323	sched_desc = irq_to_desc(sched_virq);
324
325	return 0;
326}
327early_initcall(mips_smp_ipi_init);
328#endif
329
330/*
331 * First C code run on the secondary CPUs after being started up by
332 * the master.
333 */
334asmlinkage void start_secondary(void)
335{
336	unsigned int cpu;
337
338	cpu_probe();
339	per_cpu_trap_init(false);
 
340	mips_clockevent_init();
341	mp_ops->init_secondary();
342	cpu_report();
343	maar_init();
344
345	/*
346	 * XXX parity protection should be folded in here when it's converted
347	 * to an option instead of something based on .cputype
348	 */
349
350	calibrate_delay();
351	cpu = smp_processor_id();
352	cpu_data[cpu].udelay_val = loops_per_jiffy;
353
354	set_cpu_sibling_map(cpu);
355	set_cpu_core_map(cpu);
356
357	cpumask_set_cpu(cpu, &cpu_coherent_mask);
358	notify_cpu_starting(cpu);
359
360	/* Notify boot CPU that we're starting & ready to sync counters */
361	complete(&cpu_starting);
362
363	synchronise_count_slave(cpu);
364
365	/* The CPU is running and counters synchronised, now mark it online */
366	set_cpu_online(cpu, true);
367
368	calculate_cpu_foreign_map();
369
370	/*
371	 * Notify boot CPU that we're up & online and it can safely return
372	 * from __cpu_up
373	 */
374	complete(&cpu_running);
375
376	/*
377	 * irq will be enabled in ->smp_finish(), enabling it too early
378	 * is dangerous.
379	 */
380	WARN_ON_ONCE(!irqs_disabled());
381	mp_ops->smp_finish();
382
383	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
384}
385
386static void stop_this_cpu(void *dummy)
387{
388	/*
389	 * Remove this CPU:
390	 */
391
392	set_cpu_online(smp_processor_id(), false);
393	calculate_cpu_foreign_map();
394	local_irq_disable();
395	while (1);
396}
397
398void smp_send_stop(void)
399{
400	smp_call_function(stop_this_cpu, NULL, 0);
401}
402
403void __init smp_cpus_done(unsigned int max_cpus)
404{
405}
406
407/* called from main before smp_init() */
408void __init smp_prepare_cpus(unsigned int max_cpus)
409{
410	init_new_context(current, &init_mm);
411	current_thread_info()->cpu = 0;
412	mp_ops->prepare_cpus(max_cpus);
413	set_cpu_sibling_map(0);
414	set_cpu_core_map(0);
415	calculate_cpu_foreign_map();
416#ifndef CONFIG_HOTPLUG_CPU
417	init_cpu_present(cpu_possible_mask);
418#endif
419	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
420}
421
422/* preload SMP state for boot cpu */
423void smp_prepare_boot_cpu(void)
424{
425	if (mp_ops->prepare_boot_cpu)
426		mp_ops->prepare_boot_cpu();
427	set_cpu_possible(0, true);
428	set_cpu_online(0, true);
429}
430
431int __cpu_up(unsigned int cpu, struct task_struct *tidle)
432{
433	int err;
434
435	err = mp_ops->boot_secondary(cpu, tidle);
436	if (err)
437		return err;
438
439	/* Wait for CPU to start and be ready to sync counters */
440	if (!wait_for_completion_timeout(&cpu_starting,
441					 msecs_to_jiffies(1000))) {
442		pr_crit("CPU%u: failed to start\n", cpu);
443		return -EIO;
444	}
445
446	synchronise_count_master(cpu);
447
448	/* Wait for CPU to finish startup & mark itself online before return */
449	wait_for_completion(&cpu_running);
450	return 0;
451}
452
 
453/* Not really SMP stuff ... */
454int setup_profiling_timer(unsigned int multiplier)
455{
456	return 0;
457}
 
458
459static void flush_tlb_all_ipi(void *info)
460{
461	local_flush_tlb_all();
462}
463
464void flush_tlb_all(void)
465{
466	if (cpu_has_mmid) {
467		htw_stop();
468		ginvt_full();
469		sync_ginv();
470		instruction_hazard();
471		htw_start();
472		return;
473	}
474
475	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
476}
477
478static void flush_tlb_mm_ipi(void *mm)
479{
480	drop_mmu_context((struct mm_struct *)mm);
481}
482
483/*
484 * Special Variant of smp_call_function for use by TLB functions:
485 *
486 *  o No return value
487 *  o collapses to normal function call on UP kernels
488 *  o collapses to normal function call on systems with a single shared
489 *    primary cache.
490 */
491static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
492{
493	smp_call_function(func, info, 1);
494}
495
496static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
497{
498	preempt_disable();
499
500	smp_on_other_tlbs(func, info);
501	func(info);
502
503	preempt_enable();
504}
505
506/*
507 * The following tlb flush calls are invoked when old translations are
508 * being torn down, or pte attributes are changing. For single threaded
509 * address spaces, a new context is obtained on the current cpu, and tlb
510 * context on other cpus are invalidated to force a new context allocation
511 * at switch_mm time, should the mm ever be used on other cpus. For
512 * multithreaded address spaces, inter-CPU interrupts have to be sent.
513 * Another case where inter-CPU interrupts are required is when the target
514 * mm might be active on another cpu (eg debuggers doing the flushes on
515 * behalf of debugees, kswapd stealing pages from another process etc).
516 * Kanoj 07/00.
517 */
518
519void flush_tlb_mm(struct mm_struct *mm)
520{
521	if (!mm)
522		return;
523
524	if (atomic_read(&mm->mm_users) == 0)
525		return;		/* happens as a result of exit_mmap() */
526
527	preempt_disable();
528
529	if (cpu_has_mmid) {
530		/*
531		 * No need to worry about other CPUs - the ginvt in
532		 * drop_mmu_context() will be globalized.
533		 */
534	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
535		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
536	} else {
537		unsigned int cpu;
538
539		for_each_online_cpu(cpu) {
540			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
541				set_cpu_context(cpu, mm, 0);
542		}
543	}
544	drop_mmu_context(mm);
545
546	preempt_enable();
547}
548
549struct flush_tlb_data {
550	struct vm_area_struct *vma;
551	unsigned long addr1;
552	unsigned long addr2;
553};
554
555static void flush_tlb_range_ipi(void *info)
556{
557	struct flush_tlb_data *fd = info;
558
559	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
560}
561
562void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
563{
564	struct mm_struct *mm = vma->vm_mm;
565	unsigned long addr;
566	u32 old_mmid;
567
568	preempt_disable();
569	if (cpu_has_mmid) {
570		htw_stop();
571		old_mmid = read_c0_memorymapid();
572		write_c0_memorymapid(cpu_asid(0, mm));
573		mtc0_tlbw_hazard();
574		addr = round_down(start, PAGE_SIZE * 2);
575		end = round_up(end, PAGE_SIZE * 2);
576		do {
577			ginvt_va_mmid(addr);
578			sync_ginv();
579			addr += PAGE_SIZE * 2;
580		} while (addr < end);
581		write_c0_memorymapid(old_mmid);
582		instruction_hazard();
583		htw_start();
584	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
585		struct flush_tlb_data fd = {
586			.vma = vma,
587			.addr1 = start,
588			.addr2 = end,
589		};
590
591		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
592		local_flush_tlb_range(vma, start, end);
593	} else {
594		unsigned int cpu;
595		int exec = vma->vm_flags & VM_EXEC;
596
597		for_each_online_cpu(cpu) {
598			/*
599			 * flush_cache_range() will only fully flush icache if
600			 * the VMA is executable, otherwise we must invalidate
601			 * ASID without it appearing to has_valid_asid() as if
602			 * mm has been completely unused by that CPU.
603			 */
604			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
605				set_cpu_context(cpu, mm, !exec);
606		}
607		local_flush_tlb_range(vma, start, end);
608	}
609	preempt_enable();
610}
611
612static void flush_tlb_kernel_range_ipi(void *info)
613{
614	struct flush_tlb_data *fd = info;
615
616	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
617}
618
619void flush_tlb_kernel_range(unsigned long start, unsigned long end)
620{
621	struct flush_tlb_data fd = {
622		.addr1 = start,
623		.addr2 = end,
624	};
625
626	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
627}
628
629static void flush_tlb_page_ipi(void *info)
630{
631	struct flush_tlb_data *fd = info;
632
633	local_flush_tlb_page(fd->vma, fd->addr1);
634}
635
636void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
637{
638	u32 old_mmid;
639
640	preempt_disable();
641	if (cpu_has_mmid) {
642		htw_stop();
643		old_mmid = read_c0_memorymapid();
644		write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
645		mtc0_tlbw_hazard();
646		ginvt_va_mmid(page);
647		sync_ginv();
648		write_c0_memorymapid(old_mmid);
649		instruction_hazard();
650		htw_start();
651	} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
652		   (current->mm != vma->vm_mm)) {
653		struct flush_tlb_data fd = {
654			.vma = vma,
655			.addr1 = page,
656		};
657
658		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
659		local_flush_tlb_page(vma, page);
660	} else {
661		unsigned int cpu;
662
663		for_each_online_cpu(cpu) {
664			/*
665			 * flush_cache_page() only does partial flushes, so
666			 * invalidate ASID without it appearing to
667			 * has_valid_asid() as if mm has been completely unused
668			 * by that CPU.
669			 */
670			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
671				set_cpu_context(cpu, vma->vm_mm, 1);
672		}
673		local_flush_tlb_page(vma, page);
674	}
675	preempt_enable();
676}
677
678static void flush_tlb_one_ipi(void *info)
679{
680	unsigned long vaddr = (unsigned long) info;
681
682	local_flush_tlb_one(vaddr);
683}
684
685void flush_tlb_one(unsigned long vaddr)
686{
687	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
688}
689
690EXPORT_SYMBOL(flush_tlb_page);
691EXPORT_SYMBOL(flush_tlb_one);
 
 
 
 
 
 
 
 
692
693#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
694
695static void tick_broadcast_callee(void *info)
696{
697	tick_receive_broadcast();
698}
699
700static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
701	CSD_INIT(tick_broadcast_callee, NULL);
702
703void tick_broadcast(const struct cpumask *mask)
704{
705	call_single_data_t *csd;
706	int cpu;
707
708	for_each_cpu(cpu, mask) {
709		csd = &per_cpu(tick_broadcast_csd, cpu);
710		smp_call_function_single_async(cpu, csd);
711	}
712}
713
714#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *
  4 * Copyright (C) 2000, 2001 Kanoj Sarcar
  5 * Copyright (C) 2000, 2001 Ralf Baechle
  6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
  7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
  8 */
  9#include <linux/cache.h>
 10#include <linux/delay.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/profile.h>
 14#include <linux/smp.h>
 15#include <linux/spinlock.h>
 16#include <linux/threads.h>
 17#include <linux/export.h>
 18#include <linux/time.h>
 19#include <linux/timex.h>
 20#include <linux/sched/mm.h>
 21#include <linux/cpumask.h>
 22#include <linux/cpu.h>
 23#include <linux/err.h>
 24#include <linux/ftrace.h>
 25#include <linux/irqdomain.h>
 26#include <linux/of.h>
 27#include <linux/of_irq.h>
 28
 29#include <linux/atomic.h>
 30#include <asm/cpu.h>
 31#include <asm/ginvt.h>
 32#include <asm/processor.h>
 33#include <asm/idle.h>
 34#include <asm/r4k-timer.h>
 35#include <asm/mips-cps.h>
 36#include <asm/mmu_context.h>
 37#include <asm/time.h>
 38#include <asm/setup.h>
 39#include <asm/maar.h>
 40
 41int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
 42EXPORT_SYMBOL(__cpu_number_map);
 43
 44int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
 45EXPORT_SYMBOL(__cpu_logical_map);
 46
 47/* Number of TCs (or siblings in Intel speak) per CPU core */
 48int smp_num_siblings = 1;
 49EXPORT_SYMBOL(smp_num_siblings);
 50
 51/* representing the TCs (or siblings in Intel speak) of each logical CPU */
 52cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
 53EXPORT_SYMBOL(cpu_sibling_map);
 54
 55/* representing the core map of multi-core chips of each logical CPU */
 56cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
 57EXPORT_SYMBOL(cpu_core_map);
 58
 59static DECLARE_COMPLETION(cpu_starting);
 60static DECLARE_COMPLETION(cpu_running);
 61
 62/*
 63 * A logical cpu mask containing only one VPE per core to
 64 * reduce the number of IPIs on large MT systems.
 65 */
 66cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
 67EXPORT_SYMBOL(cpu_foreign_map);
 68
 69/* representing cpus for which sibling maps can be computed */
 70static cpumask_t cpu_sibling_setup_map;
 71
 72/* representing cpus for which core maps can be computed */
 73static cpumask_t cpu_core_setup_map;
 74
 75cpumask_t cpu_coherent_mask;
 76
 77unsigned int smp_max_threads __initdata = UINT_MAX;
 78
 79static int __init early_nosmt(char *s)
 80{
 81	smp_max_threads = 1;
 82	return 0;
 83}
 84early_param("nosmt", early_nosmt);
 85
 86static int __init early_smt(char *s)
 87{
 88	get_option(&s, &smp_max_threads);
 89	/* Ensure at least one thread is available */
 90	smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX);
 91	return 0;
 92}
 93early_param("smt", early_smt);
 94
 95#ifdef CONFIG_GENERIC_IRQ_IPI
 96static struct irq_desc *call_desc;
 97static struct irq_desc *sched_desc;
 98#endif
 99
100static inline void set_cpu_sibling_map(int cpu)
101{
102	int i;
103
104	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
105
106	if (smp_num_siblings > 1) {
107		for_each_cpu(i, &cpu_sibling_setup_map) {
108			if (cpus_are_siblings(cpu, i)) {
109				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
110				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
111			}
112		}
113	} else
114		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
115}
116
117static inline void set_cpu_core_map(int cpu)
118{
119	int i;
120
121	cpumask_set_cpu(cpu, &cpu_core_setup_map);
122
123	for_each_cpu(i, &cpu_core_setup_map) {
124		if (cpu_data[cpu].package == cpu_data[i].package) {
125			cpumask_set_cpu(i, &cpu_core_map[cpu]);
126			cpumask_set_cpu(cpu, &cpu_core_map[i]);
127		}
128	}
129}
130
131/*
132 * Calculate a new cpu_foreign_map mask whenever a
133 * new cpu appears or disappears.
134 */
135void calculate_cpu_foreign_map(void)
136{
137	int i, k, core_present;
138	cpumask_t temp_foreign_map;
139
140	/* Re-calculate the mask */
141	cpumask_clear(&temp_foreign_map);
142	for_each_online_cpu(i) {
143		core_present = 0;
144		for_each_cpu(k, &temp_foreign_map)
145			if (cpus_are_siblings(i, k))
146				core_present = 1;
147		if (!core_present)
148			cpumask_set_cpu(i, &temp_foreign_map);
149	}
150
151	for_each_online_cpu(i)
152		cpumask_andnot(&cpu_foreign_map[i],
153			       &temp_foreign_map, &cpu_sibling_map[i]);
154}
155
156const struct plat_smp_ops *mp_ops;
157EXPORT_SYMBOL(mp_ops);
158
159void register_smp_ops(const struct plat_smp_ops *ops)
160{
161	if (mp_ops)
162		printk(KERN_WARNING "Overriding previously set SMP ops\n");
163
164	mp_ops = ops;
165}
166
167#ifdef CONFIG_GENERIC_IRQ_IPI
168void mips_smp_send_ipi_single(int cpu, unsigned int action)
169{
170	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
171}
172
173void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
174{
175	unsigned long flags;
176	unsigned int core;
177	int cpu;
178
179	local_irq_save(flags);
180
181	switch (action) {
182	case SMP_CALL_FUNCTION:
183		__ipi_send_mask(call_desc, mask);
184		break;
185
186	case SMP_RESCHEDULE_YOURSELF:
187		__ipi_send_mask(sched_desc, mask);
188		break;
189
190	default:
191		BUG();
192	}
193
194	if (mips_cpc_present()) {
195		for_each_cpu(cpu, mask) {
196			if (cpus_are_siblings(cpu, smp_processor_id()))
197				continue;
198
199			core = cpu_core(&cpu_data[cpu]);
200
201			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
202				mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
203				mips_cpc_lock_other(core);
204				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
205				mips_cpc_unlock_other();
206				mips_cm_unlock_other();
207			}
208		}
209	}
210
211	local_irq_restore(flags);
212}
213
214
215static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
216{
217	scheduler_ipi();
218
219	return IRQ_HANDLED;
220}
221
222static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
223{
224	generic_smp_call_function_interrupt();
225
226	return IRQ_HANDLED;
227}
228
229static void smp_ipi_init_one(unsigned int virq, const char *name,
230			     irq_handler_t handler)
231{
232	int ret;
233
234	irq_set_handler(virq, handle_percpu_irq);
235	ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
236	BUG_ON(ret);
237}
238
239static unsigned int call_virq, sched_virq;
240
241int mips_smp_ipi_allocate(const struct cpumask *mask)
242{
243	int virq;
244	struct irq_domain *ipidomain;
245	struct device_node *node;
246
247	node = of_irq_find_parent(of_root);
248	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
249
250	/*
251	 * Some platforms have half DT setup. So if we found irq node but
252	 * didn't find an ipidomain, try to search for one that is not in the
253	 * DT.
254	 */
255	if (node && !ipidomain)
256		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
257
258	/*
259	 * There are systems which use IPI IRQ domains, but only have one
260	 * registered when some runtime condition is met. For example a Malta
261	 * kernel may include support for GIC & CPU interrupt controller IPI
262	 * IRQ domains, but if run on a system with no GIC & no MT ASE then
263	 * neither will be supported or registered.
264	 *
265	 * We only have a problem if we're actually using multiple CPUs so fail
266	 * loudly if that is the case. Otherwise simply return, skipping IPI
267	 * setup, if we're running with only a single CPU.
268	 */
269	if (!ipidomain) {
270		BUG_ON(num_present_cpus() > 1);
271		return 0;
272	}
273
274	virq = irq_reserve_ipi(ipidomain, mask);
275	BUG_ON(!virq);
276	if (!call_virq)
277		call_virq = virq;
278
279	virq = irq_reserve_ipi(ipidomain, mask);
280	BUG_ON(!virq);
281	if (!sched_virq)
282		sched_virq = virq;
283
284	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
285		int cpu;
286
287		for_each_cpu(cpu, mask) {
288			smp_ipi_init_one(call_virq + cpu, "IPI call",
289					 ipi_call_interrupt);
290			smp_ipi_init_one(sched_virq + cpu, "IPI resched",
291					 ipi_resched_interrupt);
292		}
293	} else {
294		smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
295		smp_ipi_init_one(sched_virq, "IPI resched",
296				 ipi_resched_interrupt);
297	}
298
299	return 0;
300}
301
302int mips_smp_ipi_free(const struct cpumask *mask)
303{
304	struct irq_domain *ipidomain;
305	struct device_node *node;
306
307	node = of_irq_find_parent(of_root);
308	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
309
310	/*
311	 * Some platforms have half DT setup. So if we found irq node but
312	 * didn't find an ipidomain, try to search for one that is not in the
313	 * DT.
314	 */
315	if (node && !ipidomain)
316		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
317
318	BUG_ON(!ipidomain);
319
320	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
321		int cpu;
322
323		for_each_cpu(cpu, mask) {
324			free_irq(call_virq + cpu, NULL);
325			free_irq(sched_virq + cpu, NULL);
326		}
327	}
328	irq_destroy_ipi(call_virq, mask);
329	irq_destroy_ipi(sched_virq, mask);
330	return 0;
331}
332
333
334static int __init mips_smp_ipi_init(void)
335{
336	if (num_possible_cpus() == 1)
337		return 0;
338
339	mips_smp_ipi_allocate(cpu_possible_mask);
340
341	call_desc = irq_to_desc(call_virq);
342	sched_desc = irq_to_desc(sched_virq);
343
344	return 0;
345}
346early_initcall(mips_smp_ipi_init);
347#endif
348
349/*
350 * First C code run on the secondary CPUs after being started up by
351 * the master.
352 */
353asmlinkage void start_secondary(void)
354{
355	unsigned int cpu = raw_smp_processor_id();
356
357	cpu_probe();
358	per_cpu_trap_init(false);
359	rcutree_report_cpu_starting(cpu);
360	mips_clockevent_init();
361	mp_ops->init_secondary();
362	cpu_report();
363	maar_init();
364
365	/*
366	 * XXX parity protection should be folded in here when it's converted
367	 * to an option instead of something based on .cputype
368	 */
369
370	calibrate_delay();
 
371	cpu_data[cpu].udelay_val = loops_per_jiffy;
372
373	set_cpu_sibling_map(cpu);
374	set_cpu_core_map(cpu);
375
376	cpumask_set_cpu(cpu, &cpu_coherent_mask);
377	notify_cpu_starting(cpu);
378
379	/* Notify boot CPU that we're starting & ready to sync counters */
380	complete(&cpu_starting);
381
382	synchronise_count_slave(cpu);
383
384	/* The CPU is running and counters synchronised, now mark it online */
385	set_cpu_online(cpu, true);
386
387	calculate_cpu_foreign_map();
388
389	/*
390	 * Notify boot CPU that we're up & online and it can safely return
391	 * from __cpu_up
392	 */
393	complete(&cpu_running);
394
395	/*
396	 * irq will be enabled in ->smp_finish(), enabling it too early
397	 * is dangerous.
398	 */
399	WARN_ON_ONCE(!irqs_disabled());
400	mp_ops->smp_finish();
401
402	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
403}
404
405static void stop_this_cpu(void *dummy)
406{
407	/*
408	 * Remove this CPU:
409	 */
410
411	set_cpu_online(smp_processor_id(), false);
412	calculate_cpu_foreign_map();
413	local_irq_disable();
414	while (1);
415}
416
417void smp_send_stop(void)
418{
419	smp_call_function(stop_this_cpu, NULL, 0);
420}
421
422void __init smp_cpus_done(unsigned int max_cpus)
423{
424}
425
426/* called from main before smp_init() */
427void __init smp_prepare_cpus(unsigned int max_cpus)
428{
429	init_new_context(current, &init_mm);
430	current_thread_info()->cpu = 0;
431	mp_ops->prepare_cpus(max_cpus);
432	set_cpu_sibling_map(0);
433	set_cpu_core_map(0);
434	calculate_cpu_foreign_map();
435#ifndef CONFIG_HOTPLUG_CPU
436	init_cpu_present(cpu_possible_mask);
437#endif
438	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
439}
440
441/* preload SMP state for boot cpu */
442void smp_prepare_boot_cpu(void)
443{
444	if (mp_ops->prepare_boot_cpu)
445		mp_ops->prepare_boot_cpu();
446	set_cpu_possible(0, true);
447	set_cpu_online(0, true);
448}
449
450int __cpu_up(unsigned int cpu, struct task_struct *tidle)
451{
452	int err;
453
454	err = mp_ops->boot_secondary(cpu, tidle);
455	if (err)
456		return err;
457
458	/* Wait for CPU to start and be ready to sync counters */
459	if (!wait_for_completion_timeout(&cpu_starting,
460					 msecs_to_jiffies(1000))) {
461		pr_crit("CPU%u: failed to start\n", cpu);
462		return -EIO;
463	}
464
465	synchronise_count_master(cpu);
466
467	/* Wait for CPU to finish startup & mark itself online before return */
468	wait_for_completion(&cpu_running);
469	return 0;
470}
471
472#ifdef CONFIG_PROFILING
473/* Not really SMP stuff ... */
474int setup_profiling_timer(unsigned int multiplier)
475{
476	return 0;
477}
478#endif
479
480static void flush_tlb_all_ipi(void *info)
481{
482	local_flush_tlb_all();
483}
484
485void flush_tlb_all(void)
486{
487	if (cpu_has_mmid) {
488		htw_stop();
489		ginvt_full();
490		sync_ginv();
491		instruction_hazard();
492		htw_start();
493		return;
494	}
495
496	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
497}
498
499static void flush_tlb_mm_ipi(void *mm)
500{
501	drop_mmu_context((struct mm_struct *)mm);
502}
503
504/*
505 * Special Variant of smp_call_function for use by TLB functions:
506 *
507 *  o No return value
508 *  o collapses to normal function call on UP kernels
509 *  o collapses to normal function call on systems with a single shared
510 *    primary cache.
511 */
512static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
513{
514	smp_call_function(func, info, 1);
515}
516
517static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
518{
519	preempt_disable();
520
521	smp_on_other_tlbs(func, info);
522	func(info);
523
524	preempt_enable();
525}
526
527/*
528 * The following tlb flush calls are invoked when old translations are
529 * being torn down, or pte attributes are changing. For single threaded
530 * address spaces, a new context is obtained on the current cpu, and tlb
531 * context on other cpus are invalidated to force a new context allocation
532 * at switch_mm time, should the mm ever be used on other cpus. For
533 * multithreaded address spaces, inter-CPU interrupts have to be sent.
534 * Another case where inter-CPU interrupts are required is when the target
535 * mm might be active on another cpu (eg debuggers doing the flushes on
536 * behalf of debugees, kswapd stealing pages from another process etc).
537 * Kanoj 07/00.
538 */
539
540void flush_tlb_mm(struct mm_struct *mm)
541{
542	if (!mm)
543		return;
544
545	if (atomic_read(&mm->mm_users) == 0)
546		return;		/* happens as a result of exit_mmap() */
547
548	preempt_disable();
549
550	if (cpu_has_mmid) {
551		/*
552		 * No need to worry about other CPUs - the ginvt in
553		 * drop_mmu_context() will be globalized.
554		 */
555	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
556		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
557	} else {
558		unsigned int cpu;
559
560		for_each_online_cpu(cpu) {
561			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
562				set_cpu_context(cpu, mm, 0);
563		}
564	}
565	drop_mmu_context(mm);
566
567	preempt_enable();
568}
569
570struct flush_tlb_data {
571	struct vm_area_struct *vma;
572	unsigned long addr1;
573	unsigned long addr2;
574};
575
576static void flush_tlb_range_ipi(void *info)
577{
578	struct flush_tlb_data *fd = info;
579
580	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
581}
582
583void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
584{
585	struct mm_struct *mm = vma->vm_mm;
586	unsigned long addr;
587	u32 old_mmid;
588
589	preempt_disable();
590	if (cpu_has_mmid) {
591		htw_stop();
592		old_mmid = read_c0_memorymapid();
593		write_c0_memorymapid(cpu_asid(0, mm));
594		mtc0_tlbw_hazard();
595		addr = round_down(start, PAGE_SIZE * 2);
596		end = round_up(end, PAGE_SIZE * 2);
597		do {
598			ginvt_va_mmid(addr);
599			sync_ginv();
600			addr += PAGE_SIZE * 2;
601		} while (addr < end);
602		write_c0_memorymapid(old_mmid);
603		instruction_hazard();
604		htw_start();
605	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
606		struct flush_tlb_data fd = {
607			.vma = vma,
608			.addr1 = start,
609			.addr2 = end,
610		};
611
612		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
613		local_flush_tlb_range(vma, start, end);
614	} else {
615		unsigned int cpu;
616		int exec = vma->vm_flags & VM_EXEC;
617
618		for_each_online_cpu(cpu) {
619			/*
620			 * flush_cache_range() will only fully flush icache if
621			 * the VMA is executable, otherwise we must invalidate
622			 * ASID without it appearing to has_valid_asid() as if
623			 * mm has been completely unused by that CPU.
624			 */
625			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
626				set_cpu_context(cpu, mm, !exec);
627		}
628		local_flush_tlb_range(vma, start, end);
629	}
630	preempt_enable();
631}
632
633static void flush_tlb_kernel_range_ipi(void *info)
634{
635	struct flush_tlb_data *fd = info;
636
637	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
638}
639
640void flush_tlb_kernel_range(unsigned long start, unsigned long end)
641{
642	struct flush_tlb_data fd = {
643		.addr1 = start,
644		.addr2 = end,
645	};
646
647	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
648}
649
650static void flush_tlb_page_ipi(void *info)
651{
652	struct flush_tlb_data *fd = info;
653
654	local_flush_tlb_page(fd->vma, fd->addr1);
655}
656
657void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
658{
659	u32 old_mmid;
660
661	preempt_disable();
662	if (cpu_has_mmid) {
663		htw_stop();
664		old_mmid = read_c0_memorymapid();
665		write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
666		mtc0_tlbw_hazard();
667		ginvt_va_mmid(page);
668		sync_ginv();
669		write_c0_memorymapid(old_mmid);
670		instruction_hazard();
671		htw_start();
672	} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
673		   (current->mm != vma->vm_mm)) {
674		struct flush_tlb_data fd = {
675			.vma = vma,
676			.addr1 = page,
677		};
678
679		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
680		local_flush_tlb_page(vma, page);
681	} else {
682		unsigned int cpu;
683
684		for_each_online_cpu(cpu) {
685			/*
686			 * flush_cache_page() only does partial flushes, so
687			 * invalidate ASID without it appearing to
688			 * has_valid_asid() as if mm has been completely unused
689			 * by that CPU.
690			 */
691			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
692				set_cpu_context(cpu, vma->vm_mm, 1);
693		}
694		local_flush_tlb_page(vma, page);
695	}
696	preempt_enable();
697}
698
699static void flush_tlb_one_ipi(void *info)
700{
701	unsigned long vaddr = (unsigned long) info;
702
703	local_flush_tlb_one(vaddr);
704}
705
706void flush_tlb_one(unsigned long vaddr)
707{
708	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
709}
710
711EXPORT_SYMBOL(flush_tlb_page);
712EXPORT_SYMBOL(flush_tlb_one);
713
714#ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD
715void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
716{
717	if (mp_ops->cleanup_dead_cpu)
718		mp_ops->cleanup_dead_cpu(cpu);
719}
720#endif
721
722#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
723
724static void tick_broadcast_callee(void *info)
725{
726	tick_receive_broadcast();
727}
728
729static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
730	CSD_INIT(tick_broadcast_callee, NULL);
731
732void tick_broadcast(const struct cpumask *mask)
733{
734	call_single_data_t *csd;
735	int cpu;
736
737	for_each_cpu(cpu, mask) {
738		csd = &per_cpu(tick_broadcast_csd, cpu);
739		smp_call_function_single_async(cpu, csd);
740	}
741}
742
743#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */