Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) 2000, 2001 Kanoj Sarcar
5 * Copyright (C) 2000, 2001 Ralf Baechle
6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
8 */
9#include <linux/cache.h>
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/smp.h>
14#include <linux/spinlock.h>
15#include <linux/threads.h>
16#include <linux/export.h>
17#include <linux/time.h>
18#include <linux/timex.h>
19#include <linux/sched/mm.h>
20#include <linux/cpumask.h>
21#include <linux/cpu.h>
22#include <linux/err.h>
23#include <linux/ftrace.h>
24#include <linux/irqdomain.h>
25#include <linux/of.h>
26#include <linux/of_irq.h>
27
28#include <linux/atomic.h>
29#include <asm/cpu.h>
30#include <asm/ginvt.h>
31#include <asm/processor.h>
32#include <asm/idle.h>
33#include <asm/r4k-timer.h>
34#include <asm/mips-cps.h>
35#include <asm/mmu_context.h>
36#include <asm/time.h>
37#include <asm/setup.h>
38#include <asm/maar.h>
39
40int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
41EXPORT_SYMBOL(__cpu_number_map);
42
43int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
44EXPORT_SYMBOL(__cpu_logical_map);
45
46/* Number of TCs (or siblings in Intel speak) per CPU core */
47int smp_num_siblings = 1;
48EXPORT_SYMBOL(smp_num_siblings);
49
50/* representing the TCs (or siblings in Intel speak) of each logical CPU */
51cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
52EXPORT_SYMBOL(cpu_sibling_map);
53
54/* representing the core map of multi-core chips of each logical CPU */
55cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
56EXPORT_SYMBOL(cpu_core_map);
57
58static DECLARE_COMPLETION(cpu_starting);
59static DECLARE_COMPLETION(cpu_running);
60
61/*
62 * A logical cpu mask containing only one VPE per core to
63 * reduce the number of IPIs on large MT systems.
64 */
65cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
66EXPORT_SYMBOL(cpu_foreign_map);
67
68/* representing cpus for which sibling maps can be computed */
69static cpumask_t cpu_sibling_setup_map;
70
71/* representing cpus for which core maps can be computed */
72static cpumask_t cpu_core_setup_map;
73
74cpumask_t cpu_coherent_mask;
75
76#ifdef CONFIG_GENERIC_IRQ_IPI
77static struct irq_desc *call_desc;
78static struct irq_desc *sched_desc;
79#endif
80
81static inline void set_cpu_sibling_map(int cpu)
82{
83 int i;
84
85 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
86
87 if (smp_num_siblings > 1) {
88 for_each_cpu(i, &cpu_sibling_setup_map) {
89 if (cpus_are_siblings(cpu, i)) {
90 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
91 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
92 }
93 }
94 } else
95 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
96}
97
98static inline void set_cpu_core_map(int cpu)
99{
100 int i;
101
102 cpumask_set_cpu(cpu, &cpu_core_setup_map);
103
104 for_each_cpu(i, &cpu_core_setup_map) {
105 if (cpu_data[cpu].package == cpu_data[i].package) {
106 cpumask_set_cpu(i, &cpu_core_map[cpu]);
107 cpumask_set_cpu(cpu, &cpu_core_map[i]);
108 }
109 }
110}
111
112/*
113 * Calculate a new cpu_foreign_map mask whenever a
114 * new cpu appears or disappears.
115 */
116void calculate_cpu_foreign_map(void)
117{
118 int i, k, core_present;
119 cpumask_t temp_foreign_map;
120
121 /* Re-calculate the mask */
122 cpumask_clear(&temp_foreign_map);
123 for_each_online_cpu(i) {
124 core_present = 0;
125 for_each_cpu(k, &temp_foreign_map)
126 if (cpus_are_siblings(i, k))
127 core_present = 1;
128 if (!core_present)
129 cpumask_set_cpu(i, &temp_foreign_map);
130 }
131
132 for_each_online_cpu(i)
133 cpumask_andnot(&cpu_foreign_map[i],
134 &temp_foreign_map, &cpu_sibling_map[i]);
135}
136
137const struct plat_smp_ops *mp_ops;
138EXPORT_SYMBOL(mp_ops);
139
140void register_smp_ops(const struct plat_smp_ops *ops)
141{
142 if (mp_ops)
143 printk(KERN_WARNING "Overriding previously set SMP ops\n");
144
145 mp_ops = ops;
146}
147
148#ifdef CONFIG_GENERIC_IRQ_IPI
149void mips_smp_send_ipi_single(int cpu, unsigned int action)
150{
151 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
152}
153
154void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
155{
156 unsigned long flags;
157 unsigned int core;
158 int cpu;
159
160 local_irq_save(flags);
161
162 switch (action) {
163 case SMP_CALL_FUNCTION:
164 __ipi_send_mask(call_desc, mask);
165 break;
166
167 case SMP_RESCHEDULE_YOURSELF:
168 __ipi_send_mask(sched_desc, mask);
169 break;
170
171 default:
172 BUG();
173 }
174
175 if (mips_cpc_present()) {
176 for_each_cpu(cpu, mask) {
177 if (cpus_are_siblings(cpu, smp_processor_id()))
178 continue;
179
180 core = cpu_core(&cpu_data[cpu]);
181
182 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
183 mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
184 mips_cpc_lock_other(core);
185 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
186 mips_cpc_unlock_other();
187 mips_cm_unlock_other();
188 }
189 }
190 }
191
192 local_irq_restore(flags);
193}
194
195
196static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
197{
198 scheduler_ipi();
199
200 return IRQ_HANDLED;
201}
202
203static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
204{
205 generic_smp_call_function_interrupt();
206
207 return IRQ_HANDLED;
208}
209
210static void smp_ipi_init_one(unsigned int virq, const char *name,
211 irq_handler_t handler)
212{
213 int ret;
214
215 irq_set_handler(virq, handle_percpu_irq);
216 ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
217 BUG_ON(ret);
218}
219
220static unsigned int call_virq, sched_virq;
221
222int mips_smp_ipi_allocate(const struct cpumask *mask)
223{
224 int virq;
225 struct irq_domain *ipidomain;
226 struct device_node *node;
227
228 node = of_irq_find_parent(of_root);
229 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
230
231 /*
232 * Some platforms have half DT setup. So if we found irq node but
233 * didn't find an ipidomain, try to search for one that is not in the
234 * DT.
235 */
236 if (node && !ipidomain)
237 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
238
239 /*
240 * There are systems which use IPI IRQ domains, but only have one
241 * registered when some runtime condition is met. For example a Malta
242 * kernel may include support for GIC & CPU interrupt controller IPI
243 * IRQ domains, but if run on a system with no GIC & no MT ASE then
244 * neither will be supported or registered.
245 *
246 * We only have a problem if we're actually using multiple CPUs so fail
247 * loudly if that is the case. Otherwise simply return, skipping IPI
248 * setup, if we're running with only a single CPU.
249 */
250 if (!ipidomain) {
251 BUG_ON(num_present_cpus() > 1);
252 return 0;
253 }
254
255 virq = irq_reserve_ipi(ipidomain, mask);
256 BUG_ON(!virq);
257 if (!call_virq)
258 call_virq = virq;
259
260 virq = irq_reserve_ipi(ipidomain, mask);
261 BUG_ON(!virq);
262 if (!sched_virq)
263 sched_virq = virq;
264
265 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
266 int cpu;
267
268 for_each_cpu(cpu, mask) {
269 smp_ipi_init_one(call_virq + cpu, "IPI call",
270 ipi_call_interrupt);
271 smp_ipi_init_one(sched_virq + cpu, "IPI resched",
272 ipi_resched_interrupt);
273 }
274 } else {
275 smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
276 smp_ipi_init_one(sched_virq, "IPI resched",
277 ipi_resched_interrupt);
278 }
279
280 return 0;
281}
282
283int mips_smp_ipi_free(const struct cpumask *mask)
284{
285 struct irq_domain *ipidomain;
286 struct device_node *node;
287
288 node = of_irq_find_parent(of_root);
289 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
290
291 /*
292 * Some platforms have half DT setup. So if we found irq node but
293 * didn't find an ipidomain, try to search for one that is not in the
294 * DT.
295 */
296 if (node && !ipidomain)
297 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
298
299 BUG_ON(!ipidomain);
300
301 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
302 int cpu;
303
304 for_each_cpu(cpu, mask) {
305 free_irq(call_virq + cpu, NULL);
306 free_irq(sched_virq + cpu, NULL);
307 }
308 }
309 irq_destroy_ipi(call_virq, mask);
310 irq_destroy_ipi(sched_virq, mask);
311 return 0;
312}
313
314
315static int __init mips_smp_ipi_init(void)
316{
317 if (num_possible_cpus() == 1)
318 return 0;
319
320 mips_smp_ipi_allocate(cpu_possible_mask);
321
322 call_desc = irq_to_desc(call_virq);
323 sched_desc = irq_to_desc(sched_virq);
324
325 return 0;
326}
327early_initcall(mips_smp_ipi_init);
328#endif
329
330/*
331 * First C code run on the secondary CPUs after being started up by
332 * the master.
333 */
334asmlinkage void start_secondary(void)
335{
336 unsigned int cpu;
337
338 cpu_probe();
339 per_cpu_trap_init(false);
340 mips_clockevent_init();
341 mp_ops->init_secondary();
342 cpu_report();
343 maar_init();
344
345 /*
346 * XXX parity protection should be folded in here when it's converted
347 * to an option instead of something based on .cputype
348 */
349
350 calibrate_delay();
351 cpu = smp_processor_id();
352 cpu_data[cpu].udelay_val = loops_per_jiffy;
353
354 set_cpu_sibling_map(cpu);
355 set_cpu_core_map(cpu);
356
357 cpumask_set_cpu(cpu, &cpu_coherent_mask);
358 notify_cpu_starting(cpu);
359
360 /* Notify boot CPU that we're starting & ready to sync counters */
361 complete(&cpu_starting);
362
363 synchronise_count_slave(cpu);
364
365 /* The CPU is running and counters synchronised, now mark it online */
366 set_cpu_online(cpu, true);
367
368 calculate_cpu_foreign_map();
369
370 /*
371 * Notify boot CPU that we're up & online and it can safely return
372 * from __cpu_up
373 */
374 complete(&cpu_running);
375
376 /*
377 * irq will be enabled in ->smp_finish(), enabling it too early
378 * is dangerous.
379 */
380 WARN_ON_ONCE(!irqs_disabled());
381 mp_ops->smp_finish();
382
383 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
384}
385
386static void stop_this_cpu(void *dummy)
387{
388 /*
389 * Remove this CPU:
390 */
391
392 set_cpu_online(smp_processor_id(), false);
393 calculate_cpu_foreign_map();
394 local_irq_disable();
395 while (1);
396}
397
398void smp_send_stop(void)
399{
400 smp_call_function(stop_this_cpu, NULL, 0);
401}
402
403void __init smp_cpus_done(unsigned int max_cpus)
404{
405}
406
407/* called from main before smp_init() */
408void __init smp_prepare_cpus(unsigned int max_cpus)
409{
410 init_new_context(current, &init_mm);
411 current_thread_info()->cpu = 0;
412 mp_ops->prepare_cpus(max_cpus);
413 set_cpu_sibling_map(0);
414 set_cpu_core_map(0);
415 calculate_cpu_foreign_map();
416#ifndef CONFIG_HOTPLUG_CPU
417 init_cpu_present(cpu_possible_mask);
418#endif
419 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
420}
421
422/* preload SMP state for boot cpu */
423void smp_prepare_boot_cpu(void)
424{
425 if (mp_ops->prepare_boot_cpu)
426 mp_ops->prepare_boot_cpu();
427 set_cpu_possible(0, true);
428 set_cpu_online(0, true);
429}
430
431int __cpu_up(unsigned int cpu, struct task_struct *tidle)
432{
433 int err;
434
435 err = mp_ops->boot_secondary(cpu, tidle);
436 if (err)
437 return err;
438
439 /* Wait for CPU to start and be ready to sync counters */
440 if (!wait_for_completion_timeout(&cpu_starting,
441 msecs_to_jiffies(1000))) {
442 pr_crit("CPU%u: failed to start\n", cpu);
443 return -EIO;
444 }
445
446 synchronise_count_master(cpu);
447
448 /* Wait for CPU to finish startup & mark itself online before return */
449 wait_for_completion(&cpu_running);
450 return 0;
451}
452
453/* Not really SMP stuff ... */
454int setup_profiling_timer(unsigned int multiplier)
455{
456 return 0;
457}
458
459static void flush_tlb_all_ipi(void *info)
460{
461 local_flush_tlb_all();
462}
463
464void flush_tlb_all(void)
465{
466 if (cpu_has_mmid) {
467 htw_stop();
468 ginvt_full();
469 sync_ginv();
470 instruction_hazard();
471 htw_start();
472 return;
473 }
474
475 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
476}
477
478static void flush_tlb_mm_ipi(void *mm)
479{
480 drop_mmu_context((struct mm_struct *)mm);
481}
482
483/*
484 * Special Variant of smp_call_function for use by TLB functions:
485 *
486 * o No return value
487 * o collapses to normal function call on UP kernels
488 * o collapses to normal function call on systems with a single shared
489 * primary cache.
490 */
491static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
492{
493 smp_call_function(func, info, 1);
494}
495
496static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
497{
498 preempt_disable();
499
500 smp_on_other_tlbs(func, info);
501 func(info);
502
503 preempt_enable();
504}
505
506/*
507 * The following tlb flush calls are invoked when old translations are
508 * being torn down, or pte attributes are changing. For single threaded
509 * address spaces, a new context is obtained on the current cpu, and tlb
510 * context on other cpus are invalidated to force a new context allocation
511 * at switch_mm time, should the mm ever be used on other cpus. For
512 * multithreaded address spaces, inter-CPU interrupts have to be sent.
513 * Another case where inter-CPU interrupts are required is when the target
514 * mm might be active on another cpu (eg debuggers doing the flushes on
515 * behalf of debugees, kswapd stealing pages from another process etc).
516 * Kanoj 07/00.
517 */
518
519void flush_tlb_mm(struct mm_struct *mm)
520{
521 if (!mm)
522 return;
523
524 if (atomic_read(&mm->mm_users) == 0)
525 return; /* happens as a result of exit_mmap() */
526
527 preempt_disable();
528
529 if (cpu_has_mmid) {
530 /*
531 * No need to worry about other CPUs - the ginvt in
532 * drop_mmu_context() will be globalized.
533 */
534 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
535 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
536 } else {
537 unsigned int cpu;
538
539 for_each_online_cpu(cpu) {
540 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
541 set_cpu_context(cpu, mm, 0);
542 }
543 }
544 drop_mmu_context(mm);
545
546 preempt_enable();
547}
548
549struct flush_tlb_data {
550 struct vm_area_struct *vma;
551 unsigned long addr1;
552 unsigned long addr2;
553};
554
555static void flush_tlb_range_ipi(void *info)
556{
557 struct flush_tlb_data *fd = info;
558
559 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
560}
561
562void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
563{
564 struct mm_struct *mm = vma->vm_mm;
565 unsigned long addr;
566 u32 old_mmid;
567
568 preempt_disable();
569 if (cpu_has_mmid) {
570 htw_stop();
571 old_mmid = read_c0_memorymapid();
572 write_c0_memorymapid(cpu_asid(0, mm));
573 mtc0_tlbw_hazard();
574 addr = round_down(start, PAGE_SIZE * 2);
575 end = round_up(end, PAGE_SIZE * 2);
576 do {
577 ginvt_va_mmid(addr);
578 sync_ginv();
579 addr += PAGE_SIZE * 2;
580 } while (addr < end);
581 write_c0_memorymapid(old_mmid);
582 instruction_hazard();
583 htw_start();
584 } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
585 struct flush_tlb_data fd = {
586 .vma = vma,
587 .addr1 = start,
588 .addr2 = end,
589 };
590
591 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
592 local_flush_tlb_range(vma, start, end);
593 } else {
594 unsigned int cpu;
595 int exec = vma->vm_flags & VM_EXEC;
596
597 for_each_online_cpu(cpu) {
598 /*
599 * flush_cache_range() will only fully flush icache if
600 * the VMA is executable, otherwise we must invalidate
601 * ASID without it appearing to has_valid_asid() as if
602 * mm has been completely unused by that CPU.
603 */
604 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
605 set_cpu_context(cpu, mm, !exec);
606 }
607 local_flush_tlb_range(vma, start, end);
608 }
609 preempt_enable();
610}
611
612static void flush_tlb_kernel_range_ipi(void *info)
613{
614 struct flush_tlb_data *fd = info;
615
616 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
617}
618
619void flush_tlb_kernel_range(unsigned long start, unsigned long end)
620{
621 struct flush_tlb_data fd = {
622 .addr1 = start,
623 .addr2 = end,
624 };
625
626 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
627}
628
629static void flush_tlb_page_ipi(void *info)
630{
631 struct flush_tlb_data *fd = info;
632
633 local_flush_tlb_page(fd->vma, fd->addr1);
634}
635
636void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
637{
638 u32 old_mmid;
639
640 preempt_disable();
641 if (cpu_has_mmid) {
642 htw_stop();
643 old_mmid = read_c0_memorymapid();
644 write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
645 mtc0_tlbw_hazard();
646 ginvt_va_mmid(page);
647 sync_ginv();
648 write_c0_memorymapid(old_mmid);
649 instruction_hazard();
650 htw_start();
651 } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
652 (current->mm != vma->vm_mm)) {
653 struct flush_tlb_data fd = {
654 .vma = vma,
655 .addr1 = page,
656 };
657
658 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
659 local_flush_tlb_page(vma, page);
660 } else {
661 unsigned int cpu;
662
663 for_each_online_cpu(cpu) {
664 /*
665 * flush_cache_page() only does partial flushes, so
666 * invalidate ASID without it appearing to
667 * has_valid_asid() as if mm has been completely unused
668 * by that CPU.
669 */
670 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
671 set_cpu_context(cpu, vma->vm_mm, 1);
672 }
673 local_flush_tlb_page(vma, page);
674 }
675 preempt_enable();
676}
677
678static void flush_tlb_one_ipi(void *info)
679{
680 unsigned long vaddr = (unsigned long) info;
681
682 local_flush_tlb_one(vaddr);
683}
684
685void flush_tlb_one(unsigned long vaddr)
686{
687 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
688}
689
690EXPORT_SYMBOL(flush_tlb_page);
691EXPORT_SYMBOL(flush_tlb_one);
692
693#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
694
695static void tick_broadcast_callee(void *info)
696{
697 tick_receive_broadcast();
698}
699
700static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
701 CSD_INIT(tick_broadcast_callee, NULL);
702
703void tick_broadcast(const struct cpumask *mask)
704{
705 call_single_data_t *csd;
706 int cpu;
707
708 for_each_cpu(cpu, mask) {
709 csd = &per_cpu(tick_broadcast_csd, cpu);
710 smp_call_function_single_async(cpu, csd);
711 }
712}
713
714#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2000, 2001 Kanoj Sarcar
17 * Copyright (C) 2000, 2001 Ralf Baechle
18 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20 */
21#include <linux/cache.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/spinlock.h>
27#include <linux/threads.h>
28#include <linux/export.h>
29#include <linux/time.h>
30#include <linux/timex.h>
31#include <linux/sched.h>
32#include <linux/cpumask.h>
33#include <linux/cpu.h>
34#include <linux/err.h>
35#include <linux/ftrace.h>
36#include <linux/irqdomain.h>
37#include <linux/of.h>
38#include <linux/of_irq.h>
39
40#include <linux/atomic.h>
41#include <asm/cpu.h>
42#include <asm/processor.h>
43#include <asm/idle.h>
44#include <asm/r4k-timer.h>
45#include <asm/mips-cpc.h>
46#include <asm/mmu_context.h>
47#include <asm/time.h>
48#include <asm/setup.h>
49#include <asm/maar.h>
50
51cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
52
53int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
54EXPORT_SYMBOL(__cpu_number_map);
55
56int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
57EXPORT_SYMBOL(__cpu_logical_map);
58
59/* Number of TCs (or siblings in Intel speak) per CPU core */
60int smp_num_siblings = 1;
61EXPORT_SYMBOL(smp_num_siblings);
62
63/* representing the TCs (or siblings in Intel speak) of each logical CPU */
64cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
65EXPORT_SYMBOL(cpu_sibling_map);
66
67/* representing the core map of multi-core chips of each logical CPU */
68cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
69EXPORT_SYMBOL(cpu_core_map);
70
71/*
72 * A logcal cpu mask containing only one VPE per core to
73 * reduce the number of IPIs on large MT systems.
74 */
75cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
76EXPORT_SYMBOL(cpu_foreign_map);
77
78/* representing cpus for which sibling maps can be computed */
79static cpumask_t cpu_sibling_setup_map;
80
81/* representing cpus for which core maps can be computed */
82static cpumask_t cpu_core_setup_map;
83
84cpumask_t cpu_coherent_mask;
85
86#ifdef CONFIG_GENERIC_IRQ_IPI
87static struct irq_desc *call_desc;
88static struct irq_desc *sched_desc;
89#endif
90
91static inline void set_cpu_sibling_map(int cpu)
92{
93 int i;
94
95 cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
96
97 if (smp_num_siblings > 1) {
98 for_each_cpu(i, &cpu_sibling_setup_map) {
99 if (cpu_data[cpu].package == cpu_data[i].package &&
100 cpu_data[cpu].core == cpu_data[i].core) {
101 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
102 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
103 }
104 }
105 } else
106 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
107}
108
109static inline void set_cpu_core_map(int cpu)
110{
111 int i;
112
113 cpumask_set_cpu(cpu, &cpu_core_setup_map);
114
115 for_each_cpu(i, &cpu_core_setup_map) {
116 if (cpu_data[cpu].package == cpu_data[i].package) {
117 cpumask_set_cpu(i, &cpu_core_map[cpu]);
118 cpumask_set_cpu(cpu, &cpu_core_map[i]);
119 }
120 }
121}
122
123/*
124 * Calculate a new cpu_foreign_map mask whenever a
125 * new cpu appears or disappears.
126 */
127void calculate_cpu_foreign_map(void)
128{
129 int i, k, core_present;
130 cpumask_t temp_foreign_map;
131
132 /* Re-calculate the mask */
133 cpumask_clear(&temp_foreign_map);
134 for_each_online_cpu(i) {
135 core_present = 0;
136 for_each_cpu(k, &temp_foreign_map)
137 if (cpu_data[i].package == cpu_data[k].package &&
138 cpu_data[i].core == cpu_data[k].core)
139 core_present = 1;
140 if (!core_present)
141 cpumask_set_cpu(i, &temp_foreign_map);
142 }
143
144 for_each_online_cpu(i)
145 cpumask_andnot(&cpu_foreign_map[i],
146 &temp_foreign_map, &cpu_sibling_map[i]);
147}
148
149struct plat_smp_ops *mp_ops;
150EXPORT_SYMBOL(mp_ops);
151
152void register_smp_ops(struct plat_smp_ops *ops)
153{
154 if (mp_ops)
155 printk(KERN_WARNING "Overriding previously set SMP ops\n");
156
157 mp_ops = ops;
158}
159
160#ifdef CONFIG_GENERIC_IRQ_IPI
161void mips_smp_send_ipi_single(int cpu, unsigned int action)
162{
163 mips_smp_send_ipi_mask(cpumask_of(cpu), action);
164}
165
166void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
167{
168 unsigned long flags;
169 unsigned int core;
170 int cpu;
171
172 local_irq_save(flags);
173
174 switch (action) {
175 case SMP_CALL_FUNCTION:
176 __ipi_send_mask(call_desc, mask);
177 break;
178
179 case SMP_RESCHEDULE_YOURSELF:
180 __ipi_send_mask(sched_desc, mask);
181 break;
182
183 default:
184 BUG();
185 }
186
187 if (mips_cpc_present()) {
188 for_each_cpu(cpu, mask) {
189 core = cpu_data[cpu].core;
190
191 if (core == current_cpu_data.core)
192 continue;
193
194 while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
195 mips_cm_lock_other(core, 0);
196 mips_cpc_lock_other(core);
197 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
198 mips_cpc_unlock_other();
199 mips_cm_unlock_other();
200 }
201 }
202 }
203
204 local_irq_restore(flags);
205}
206
207
208static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
209{
210 scheduler_ipi();
211
212 return IRQ_HANDLED;
213}
214
215static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
216{
217 generic_smp_call_function_interrupt();
218
219 return IRQ_HANDLED;
220}
221
222static struct irqaction irq_resched = {
223 .handler = ipi_resched_interrupt,
224 .flags = IRQF_PERCPU,
225 .name = "IPI resched"
226};
227
228static struct irqaction irq_call = {
229 .handler = ipi_call_interrupt,
230 .flags = IRQF_PERCPU,
231 .name = "IPI call"
232};
233
234static void smp_ipi_init_one(unsigned int virq,
235 struct irqaction *action)
236{
237 int ret;
238
239 irq_set_handler(virq, handle_percpu_irq);
240 ret = setup_irq(virq, action);
241 BUG_ON(ret);
242}
243
244static unsigned int call_virq, sched_virq;
245
246int mips_smp_ipi_allocate(const struct cpumask *mask)
247{
248 int virq;
249 struct irq_domain *ipidomain;
250 struct device_node *node;
251
252 node = of_irq_find_parent(of_root);
253 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
254
255 /*
256 * Some platforms have half DT setup. So if we found irq node but
257 * didn't find an ipidomain, try to search for one that is not in the
258 * DT.
259 */
260 if (node && !ipidomain)
261 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
262
263 /*
264 * There are systems which only use IPI domains some of the time,
265 * depending upon configuration we don't know until runtime. An
266 * example is Malta where we may compile in support for GIC & the
267 * MT ASE, but run on a system which has multiple VPEs in a single
268 * core and doesn't include a GIC. Until all IPI implementations
269 * have been converted to use IPI domains the best we can do here
270 * is to return & hope some other code sets up the IPIs.
271 */
272 if (!ipidomain)
273 return 0;
274
275 virq = irq_reserve_ipi(ipidomain, mask);
276 BUG_ON(!virq);
277 if (!call_virq)
278 call_virq = virq;
279
280 virq = irq_reserve_ipi(ipidomain, mask);
281 BUG_ON(!virq);
282 if (!sched_virq)
283 sched_virq = virq;
284
285 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
286 int cpu;
287
288 for_each_cpu(cpu, mask) {
289 smp_ipi_init_one(call_virq + cpu, &irq_call);
290 smp_ipi_init_one(sched_virq + cpu, &irq_resched);
291 }
292 } else {
293 smp_ipi_init_one(call_virq, &irq_call);
294 smp_ipi_init_one(sched_virq, &irq_resched);
295 }
296
297 return 0;
298}
299
300int mips_smp_ipi_free(const struct cpumask *mask)
301{
302 struct irq_domain *ipidomain;
303 struct device_node *node;
304
305 node = of_irq_find_parent(of_root);
306 ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
307
308 /*
309 * Some platforms have half DT setup. So if we found irq node but
310 * didn't find an ipidomain, try to search for one that is not in the
311 * DT.
312 */
313 if (node && !ipidomain)
314 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
315
316 BUG_ON(!ipidomain);
317
318 if (irq_domain_is_ipi_per_cpu(ipidomain)) {
319 int cpu;
320
321 for_each_cpu(cpu, mask) {
322 remove_irq(call_virq + cpu, &irq_call);
323 remove_irq(sched_virq + cpu, &irq_resched);
324 }
325 }
326 irq_destroy_ipi(call_virq, mask);
327 irq_destroy_ipi(sched_virq, mask);
328 return 0;
329}
330
331
332static int __init mips_smp_ipi_init(void)
333{
334 mips_smp_ipi_allocate(cpu_possible_mask);
335
336 call_desc = irq_to_desc(call_virq);
337 sched_desc = irq_to_desc(sched_virq);
338
339 return 0;
340}
341early_initcall(mips_smp_ipi_init);
342#endif
343
344/*
345 * First C code run on the secondary CPUs after being started up by
346 * the master.
347 */
348asmlinkage void start_secondary(void)
349{
350 unsigned int cpu;
351
352 cpu_probe();
353 per_cpu_trap_init(false);
354 mips_clockevent_init();
355 mp_ops->init_secondary();
356 cpu_report();
357 maar_init();
358
359 /*
360 * XXX parity protection should be folded in here when it's converted
361 * to an option instead of something based on .cputype
362 */
363
364 calibrate_delay();
365 preempt_disable();
366 cpu = smp_processor_id();
367 cpu_data[cpu].udelay_val = loops_per_jiffy;
368
369 cpumask_set_cpu(cpu, &cpu_coherent_mask);
370 notify_cpu_starting(cpu);
371
372 cpumask_set_cpu(cpu, &cpu_callin_map);
373 synchronise_count_slave(cpu);
374
375 set_cpu_online(cpu, true);
376
377 set_cpu_sibling_map(cpu);
378 set_cpu_core_map(cpu);
379
380 calculate_cpu_foreign_map();
381
382 /*
383 * irq will be enabled in ->smp_finish(), enabling it too early
384 * is dangerous.
385 */
386 WARN_ON_ONCE(!irqs_disabled());
387 mp_ops->smp_finish();
388
389 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
390}
391
392static void stop_this_cpu(void *dummy)
393{
394 /*
395 * Remove this CPU:
396 */
397
398 set_cpu_online(smp_processor_id(), false);
399 calculate_cpu_foreign_map();
400 local_irq_disable();
401 while (1);
402}
403
404void smp_send_stop(void)
405{
406 smp_call_function(stop_this_cpu, NULL, 0);
407}
408
409void __init smp_cpus_done(unsigned int max_cpus)
410{
411}
412
413/* called from main before smp_init() */
414void __init smp_prepare_cpus(unsigned int max_cpus)
415{
416 init_new_context(current, &init_mm);
417 current_thread_info()->cpu = 0;
418 mp_ops->prepare_cpus(max_cpus);
419 set_cpu_sibling_map(0);
420 set_cpu_core_map(0);
421 calculate_cpu_foreign_map();
422#ifndef CONFIG_HOTPLUG_CPU
423 init_cpu_present(cpu_possible_mask);
424#endif
425 cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
426}
427
428/* preload SMP state for boot cpu */
429void smp_prepare_boot_cpu(void)
430{
431 set_cpu_possible(0, true);
432 set_cpu_online(0, true);
433 cpumask_set_cpu(0, &cpu_callin_map);
434}
435
436int __cpu_up(unsigned int cpu, struct task_struct *tidle)
437{
438 mp_ops->boot_secondary(cpu, tidle);
439
440 /*
441 * Trust is futile. We should really have timeouts ...
442 */
443 while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
444 udelay(100);
445 schedule();
446 }
447
448 synchronise_count_master(cpu);
449 return 0;
450}
451
452/* Not really SMP stuff ... */
453int setup_profiling_timer(unsigned int multiplier)
454{
455 return 0;
456}
457
458static void flush_tlb_all_ipi(void *info)
459{
460 local_flush_tlb_all();
461}
462
463void flush_tlb_all(void)
464{
465 on_each_cpu(flush_tlb_all_ipi, NULL, 1);
466}
467
468static void flush_tlb_mm_ipi(void *mm)
469{
470 local_flush_tlb_mm((struct mm_struct *)mm);
471}
472
473/*
474 * Special Variant of smp_call_function for use by TLB functions:
475 *
476 * o No return value
477 * o collapses to normal function call on UP kernels
478 * o collapses to normal function call on systems with a single shared
479 * primary cache.
480 */
481static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
482{
483 smp_call_function(func, info, 1);
484}
485
486static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
487{
488 preempt_disable();
489
490 smp_on_other_tlbs(func, info);
491 func(info);
492
493 preempt_enable();
494}
495
496/*
497 * The following tlb flush calls are invoked when old translations are
498 * being torn down, or pte attributes are changing. For single threaded
499 * address spaces, a new context is obtained on the current cpu, and tlb
500 * context on other cpus are invalidated to force a new context allocation
501 * at switch_mm time, should the mm ever be used on other cpus. For
502 * multithreaded address spaces, intercpu interrupts have to be sent.
503 * Another case where intercpu interrupts are required is when the target
504 * mm might be active on another cpu (eg debuggers doing the flushes on
505 * behalf of debugees, kswapd stealing pages from another process etc).
506 * Kanoj 07/00.
507 */
508
509void flush_tlb_mm(struct mm_struct *mm)
510{
511 preempt_disable();
512
513 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
514 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
515 } else {
516 unsigned int cpu;
517
518 for_each_online_cpu(cpu) {
519 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
520 cpu_context(cpu, mm) = 0;
521 }
522 }
523 local_flush_tlb_mm(mm);
524
525 preempt_enable();
526}
527
528struct flush_tlb_data {
529 struct vm_area_struct *vma;
530 unsigned long addr1;
531 unsigned long addr2;
532};
533
534static void flush_tlb_range_ipi(void *info)
535{
536 struct flush_tlb_data *fd = info;
537
538 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
539}
540
541void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
542{
543 struct mm_struct *mm = vma->vm_mm;
544
545 preempt_disable();
546 if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
547 struct flush_tlb_data fd = {
548 .vma = vma,
549 .addr1 = start,
550 .addr2 = end,
551 };
552
553 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
554 } else {
555 unsigned int cpu;
556 int exec = vma->vm_flags & VM_EXEC;
557
558 for_each_online_cpu(cpu) {
559 /*
560 * flush_cache_range() will only fully flush icache if
561 * the VMA is executable, otherwise we must invalidate
562 * ASID without it appearing to has_valid_asid() as if
563 * mm has been completely unused by that CPU.
564 */
565 if (cpu != smp_processor_id() && cpu_context(cpu, mm))
566 cpu_context(cpu, mm) = !exec;
567 }
568 }
569 local_flush_tlb_range(vma, start, end);
570 preempt_enable();
571}
572
573static void flush_tlb_kernel_range_ipi(void *info)
574{
575 struct flush_tlb_data *fd = info;
576
577 local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
578}
579
580void flush_tlb_kernel_range(unsigned long start, unsigned long end)
581{
582 struct flush_tlb_data fd = {
583 .addr1 = start,
584 .addr2 = end,
585 };
586
587 on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
588}
589
590static void flush_tlb_page_ipi(void *info)
591{
592 struct flush_tlb_data *fd = info;
593
594 local_flush_tlb_page(fd->vma, fd->addr1);
595}
596
597void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
598{
599 preempt_disable();
600 if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
601 struct flush_tlb_data fd = {
602 .vma = vma,
603 .addr1 = page,
604 };
605
606 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
607 } else {
608 unsigned int cpu;
609
610 for_each_online_cpu(cpu) {
611 /*
612 * flush_cache_page() only does partial flushes, so
613 * invalidate ASID without it appearing to
614 * has_valid_asid() as if mm has been completely unused
615 * by that CPU.
616 */
617 if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
618 cpu_context(cpu, vma->vm_mm) = 1;
619 }
620 }
621 local_flush_tlb_page(vma, page);
622 preempt_enable();
623}
624
625static void flush_tlb_one_ipi(void *info)
626{
627 unsigned long vaddr = (unsigned long) info;
628
629 local_flush_tlb_one(vaddr);
630}
631
632void flush_tlb_one(unsigned long vaddr)
633{
634 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
635}
636
637EXPORT_SYMBOL(flush_tlb_page);
638EXPORT_SYMBOL(flush_tlb_one);
639
640#if defined(CONFIG_KEXEC)
641void (*dump_ipi_function_ptr)(void *) = NULL;
642void dump_send_ipi(void (*dump_ipi_callback)(void *))
643{
644 int i;
645 int cpu = smp_processor_id();
646
647 dump_ipi_function_ptr = dump_ipi_callback;
648 smp_mb();
649 for_each_online_cpu(i)
650 if (i != cpu)
651 mp_ops->send_ipi_single(i, SMP_DUMP);
652
653}
654EXPORT_SYMBOL(dump_send_ipi);
655#endif
656
657#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
658
659static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
660static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
661
662void tick_broadcast(const struct cpumask *mask)
663{
664 atomic_t *count;
665 struct call_single_data *csd;
666 int cpu;
667
668 for_each_cpu(cpu, mask) {
669 count = &per_cpu(tick_broadcast_count, cpu);
670 csd = &per_cpu(tick_broadcast_csd, cpu);
671
672 if (atomic_inc_return(count) == 1)
673 smp_call_function_single_async(cpu, csd);
674 }
675}
676
677static void tick_broadcast_callee(void *info)
678{
679 int cpu = smp_processor_id();
680 tick_receive_broadcast();
681 atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
682}
683
684static int __init tick_broadcast_init(void)
685{
686 struct call_single_data *csd;
687 int cpu;
688
689 for (cpu = 0; cpu < NR_CPUS; cpu++) {
690 csd = &per_cpu(tick_broadcast_csd, cpu);
691 csd->func = tick_broadcast_callee;
692 }
693
694 return 0;
695}
696early_initcall(tick_broadcast_init);
697
698#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */