Loading...
1/*
2 * SMP boot-related support
3 *
4 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 2001, 2004-2005 Intel Corp
7 * Rohit Seth <rohit.seth@intel.com>
8 * Suresh Siddha <suresh.b.siddha@intel.com>
9 * Gordon Jin <gordon.jin@intel.com>
10 * Ashok Raj <ashok.raj@intel.com>
11 *
12 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
13 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
14 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
15 * smp_boot_cpus()/smp_commence() is replaced by
16 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
17 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
18 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
19 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
20 * Add multi-threading and multi-core detection
21 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
22 * Setup cpu_sibling_map and cpu_core_map
23 */
24
25#include <linux/module.h>
26#include <linux/acpi.h>
27#include <linux/bootmem.h>
28#include <linux/cpu.h>
29#include <linux/delay.h>
30#include <linux/init.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/kernel.h>
34#include <linux/kernel_stat.h>
35#include <linux/mm.h>
36#include <linux/notifier.h>
37#include <linux/smp.h>
38#include <linux/spinlock.h>
39#include <linux/efi.h>
40#include <linux/percpu.h>
41#include <linux/bitops.h>
42
43#include <linux/atomic.h>
44#include <asm/cache.h>
45#include <asm/current.h>
46#include <asm/delay.h>
47#include <asm/io.h>
48#include <asm/irq.h>
49#include <asm/machvec.h>
50#include <asm/mca.h>
51#include <asm/page.h>
52#include <asm/paravirt.h>
53#include <asm/pgalloc.h>
54#include <asm/pgtable.h>
55#include <asm/processor.h>
56#include <asm/ptrace.h>
57#include <asm/sal.h>
58#include <asm/system.h>
59#include <asm/tlbflush.h>
60#include <asm/unistd.h>
61#include <asm/sn/arch.h>
62
63#define SMP_DEBUG 0
64
65#if SMP_DEBUG
66#define Dprintk(x...) printk(x)
67#else
68#define Dprintk(x...)
69#endif
70
71#ifdef CONFIG_HOTPLUG_CPU
72#ifdef CONFIG_PERMIT_BSP_REMOVE
73#define bsp_remove_ok 1
74#else
75#define bsp_remove_ok 0
76#endif
77
78/*
79 * Store all idle threads, this can be reused instead of creating
80 * a new thread. Also avoids complicated thread destroy functionality
81 * for idle threads.
82 */
83struct task_struct *idle_thread_array[NR_CPUS];
84
85/*
86 * Global array allocated for NR_CPUS at boot time
87 */
88struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
89
90/*
91 * start_ap in head.S uses this to store current booting cpu
92 * info.
93 */
94struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
95
96#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
97
98#define get_idle_for_cpu(x) (idle_thread_array[(x)])
99#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
100
101#else
102
103#define get_idle_for_cpu(x) (NULL)
104#define set_idle_for_cpu(x,p)
105#define set_brendez_area(x)
106#endif
107
108
109/*
110 * ITC synchronization related stuff:
111 */
112#define MASTER (0)
113#define SLAVE (SMP_CACHE_BYTES/8)
114
115#define NUM_ROUNDS 64 /* magic value */
116#define NUM_ITERS 5 /* likewise */
117
118static DEFINE_SPINLOCK(itc_sync_lock);
119static volatile unsigned long go[SLAVE + 1];
120
121#define DEBUG_ITC_SYNC 0
122
123extern void start_ap (void);
124extern unsigned long ia64_iobase;
125
126struct task_struct *task_for_booting_cpu;
127
128/*
129 * State for each CPU
130 */
131DEFINE_PER_CPU(int, cpu_state);
132
133cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
134EXPORT_SYMBOL(cpu_core_map);
135DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
136EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
137
138int smp_num_siblings = 1;
139
140/* which logical CPU number maps to which CPU (physical APIC ID) */
141volatile int ia64_cpu_to_sapicid[NR_CPUS];
142EXPORT_SYMBOL(ia64_cpu_to_sapicid);
143
144static volatile cpumask_t cpu_callin_map;
145
146struct smp_boot_data smp_boot_data __initdata;
147
148unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
149
150char __initdata no_int_routing;
151
152unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
153
154#ifdef CONFIG_FORCE_CPEI_RETARGET
155#define CPEI_OVERRIDE_DEFAULT (1)
156#else
157#define CPEI_OVERRIDE_DEFAULT (0)
158#endif
159
160unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
161
162static int __init
163cmdl_force_cpei(char *str)
164{
165 int value=0;
166
167 get_option (&str, &value);
168 force_cpei_retarget = value;
169
170 return 1;
171}
172
173__setup("force_cpei=", cmdl_force_cpei);
174
175static int __init
176nointroute (char *str)
177{
178 no_int_routing = 1;
179 printk ("no_int_routing on\n");
180 return 1;
181}
182
183__setup("nointroute", nointroute);
184
185static void fix_b0_for_bsp(void)
186{
187#ifdef CONFIG_HOTPLUG_CPU
188 int cpuid;
189 static int fix_bsp_b0 = 1;
190
191 cpuid = smp_processor_id();
192
193 /*
194 * Cache the b0 value on the first AP that comes up
195 */
196 if (!(fix_bsp_b0 && cpuid))
197 return;
198
199 sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
200 printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
201
202 fix_bsp_b0 = 0;
203#endif
204}
205
206void
207sync_master (void *arg)
208{
209 unsigned long flags, i;
210
211 go[MASTER] = 0;
212
213 local_irq_save(flags);
214 {
215 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
216 while (!go[MASTER])
217 cpu_relax();
218 go[MASTER] = 0;
219 go[SLAVE] = ia64_get_itc();
220 }
221 }
222 local_irq_restore(flags);
223}
224
225/*
226 * Return the number of cycles by which our itc differs from the itc on the master
227 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
228 * negative that it is behind.
229 */
230static inline long
231get_delta (long *rt, long *master)
232{
233 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
234 unsigned long tcenter, t0, t1, tm;
235 long i;
236
237 for (i = 0; i < NUM_ITERS; ++i) {
238 t0 = ia64_get_itc();
239 go[MASTER] = 1;
240 while (!(tm = go[SLAVE]))
241 cpu_relax();
242 go[SLAVE] = 0;
243 t1 = ia64_get_itc();
244
245 if (t1 - t0 < best_t1 - best_t0)
246 best_t0 = t0, best_t1 = t1, best_tm = tm;
247 }
248
249 *rt = best_t1 - best_t0;
250 *master = best_tm - best_t0;
251
252 /* average best_t0 and best_t1 without overflow: */
253 tcenter = (best_t0/2 + best_t1/2);
254 if (best_t0 % 2 + best_t1 % 2 == 2)
255 ++tcenter;
256 return tcenter - best_tm;
257}
258
259/*
260 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
261 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
262 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
263 * step). The basic idea is for the slave to ask the master what itc value it has and to
264 * read its own itc before and after the master responds. Each iteration gives us three
265 * timestamps:
266 *
267 * slave master
268 *
269 * t0 ---\
270 * ---\
271 * --->
272 * tm
273 * /---
274 * /---
275 * t1 <---
276 *
277 *
278 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
279 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
280 * between the slave and the master is symmetric. Even if the interconnect were
281 * asymmetric, we would still know that the synchronization error is smaller than the
282 * roundtrip latency (t0 - t1).
283 *
284 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
285 * within one or two cycles. However, we can only *guarantee* that the synchronization is
286 * accurate to within a round-trip time, which is typically in the range of several
287 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
288 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
289 * than half a micro second or so.
290 */
291void
292ia64_sync_itc (unsigned int master)
293{
294 long i, delta, adj, adjust_latency = 0, done = 0;
295 unsigned long flags, rt, master_time_stamp, bound;
296#if DEBUG_ITC_SYNC
297 struct {
298 long rt; /* roundtrip time */
299 long master; /* master's timestamp */
300 long diff; /* difference between midpoint and master's timestamp */
301 long lat; /* estimate of itc adjustment latency */
302 } t[NUM_ROUNDS];
303#endif
304
305 /*
306 * Make sure local timer ticks are disabled while we sync. If
307 * they were enabled, we'd have to worry about nasty issues
308 * like setting the ITC ahead of (or a long time before) the
309 * next scheduled tick.
310 */
311 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
312
313 go[MASTER] = 1;
314
315 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
316 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
317 return;
318 }
319
320 while (go[MASTER])
321 cpu_relax(); /* wait for master to be ready */
322
323 spin_lock_irqsave(&itc_sync_lock, flags);
324 {
325 for (i = 0; i < NUM_ROUNDS; ++i) {
326 delta = get_delta(&rt, &master_time_stamp);
327 if (delta == 0) {
328 done = 1; /* let's lock on to this... */
329 bound = rt;
330 }
331
332 if (!done) {
333 if (i > 0) {
334 adjust_latency += -delta;
335 adj = -delta + adjust_latency/4;
336 } else
337 adj = -delta;
338
339 ia64_set_itc(ia64_get_itc() + adj);
340 }
341#if DEBUG_ITC_SYNC
342 t[i].rt = rt;
343 t[i].master = master_time_stamp;
344 t[i].diff = delta;
345 t[i].lat = adjust_latency/4;
346#endif
347 }
348 }
349 spin_unlock_irqrestore(&itc_sync_lock, flags);
350
351#if DEBUG_ITC_SYNC
352 for (i = 0; i < NUM_ROUNDS; ++i)
353 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
354 t[i].rt, t[i].master, t[i].diff, t[i].lat);
355#endif
356
357 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
358 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
359}
360
361/*
362 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
363 */
364static inline void __devinit
365smp_setup_percpu_timer (void)
366{
367}
368
369static void __cpuinit
370smp_callin (void)
371{
372 int cpuid, phys_id, itc_master;
373 struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
374 extern void ia64_init_itm(void);
375 extern volatile int time_keeper_id;
376
377#ifdef CONFIG_PERFMON
378 extern void pfm_init_percpu(void);
379#endif
380
381 cpuid = smp_processor_id();
382 phys_id = hard_smp_processor_id();
383 itc_master = time_keeper_id;
384
385 if (cpu_online(cpuid)) {
386 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
387 phys_id, cpuid);
388 BUG();
389 }
390
391 fix_b0_for_bsp();
392
393 /*
394 * numa_node_id() works after this.
395 */
396 set_numa_node(cpu_to_node_map[cpuid]);
397 set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
398
399 ipi_call_lock_irq();
400 spin_lock(&vector_lock);
401 /* Setup the per cpu irq handling data structures */
402 __setup_vector_irq(cpuid);
403 notify_cpu_starting(cpuid);
404 cpu_set(cpuid, cpu_online_map);
405 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
406 spin_unlock(&vector_lock);
407 ipi_call_unlock_irq();
408
409 smp_setup_percpu_timer();
410
411 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
412
413#ifdef CONFIG_PERFMON
414 pfm_init_percpu();
415#endif
416
417 local_irq_enable();
418
419 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
420 /*
421 * Synchronize the ITC with the BP. Need to do this after irqs are
422 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
423 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
424 * local_bh_enable(), which bugs out if irqs are not enabled...
425 */
426 Dprintk("Going to syncup ITC with ITC Master.\n");
427 ia64_sync_itc(itc_master);
428 }
429
430 /*
431 * Get our bogomips.
432 */
433 ia64_init_itm();
434
435 /*
436 * Delay calibration can be skipped if new processor is identical to the
437 * previous processor.
438 */
439 last_cpuinfo = cpu_data(cpuid - 1);
440 this_cpuinfo = local_cpu_data;
441 if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
442 last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
443 last_cpuinfo->features != this_cpuinfo->features ||
444 last_cpuinfo->revision != this_cpuinfo->revision ||
445 last_cpuinfo->family != this_cpuinfo->family ||
446 last_cpuinfo->archrev != this_cpuinfo->archrev ||
447 last_cpuinfo->model != this_cpuinfo->model)
448 calibrate_delay();
449 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
450
451 /*
452 * Allow the master to continue.
453 */
454 cpu_set(cpuid, cpu_callin_map);
455 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
456}
457
458
459/*
460 * Activate a secondary processor. head.S calls this.
461 */
462int __cpuinit
463start_secondary (void *unused)
464{
465 /* Early console may use I/O ports */
466 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
467#ifndef CONFIG_PRINTK_TIME
468 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
469#endif
470 efi_map_pal_code();
471 cpu_init();
472 preempt_disable();
473 smp_callin();
474
475 cpu_idle();
476 return 0;
477}
478
479struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
480{
481 return NULL;
482}
483
484struct create_idle {
485 struct work_struct work;
486 struct task_struct *idle;
487 struct completion done;
488 int cpu;
489};
490
491void __cpuinit
492do_fork_idle(struct work_struct *work)
493{
494 struct create_idle *c_idle =
495 container_of(work, struct create_idle, work);
496
497 c_idle->idle = fork_idle(c_idle->cpu);
498 complete(&c_idle->done);
499}
500
501static int __cpuinit
502do_boot_cpu (int sapicid, int cpu)
503{
504 int timeout;
505 struct create_idle c_idle = {
506 .work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
507 .cpu = cpu,
508 .done = COMPLETION_INITIALIZER(c_idle.done),
509 };
510
511 /*
512 * We can't use kernel_thread since we must avoid to
513 * reschedule the child.
514 */
515 c_idle.idle = get_idle_for_cpu(cpu);
516 if (c_idle.idle) {
517 init_idle(c_idle.idle, cpu);
518 goto do_rest;
519 }
520
521 schedule_work(&c_idle.work);
522 wait_for_completion(&c_idle.done);
523
524 if (IS_ERR(c_idle.idle))
525 panic("failed fork for CPU %d", cpu);
526
527 set_idle_for_cpu(cpu, c_idle.idle);
528
529do_rest:
530 task_for_booting_cpu = c_idle.idle;
531
532 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
533
534 set_brendez_area(cpu);
535 platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
536
537 /*
538 * Wait 10s total for the AP to start
539 */
540 Dprintk("Waiting on callin_map ...");
541 for (timeout = 0; timeout < 100000; timeout++) {
542 if (cpu_isset(cpu, cpu_callin_map))
543 break; /* It has booted */
544 udelay(100);
545 }
546 Dprintk("\n");
547
548 if (!cpu_isset(cpu, cpu_callin_map)) {
549 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
550 ia64_cpu_to_sapicid[cpu] = -1;
551 cpu_clear(cpu, cpu_online_map); /* was set in smp_callin() */
552 return -EINVAL;
553 }
554 return 0;
555}
556
557static int __init
558decay (char *str)
559{
560 int ticks;
561 get_option (&str, &ticks);
562 return 1;
563}
564
565__setup("decay=", decay);
566
567/*
568 * Initialize the logical CPU number to SAPICID mapping
569 */
570void __init
571smp_build_cpu_map (void)
572{
573 int sapicid, cpu, i;
574 int boot_cpu_id = hard_smp_processor_id();
575
576 for (cpu = 0; cpu < NR_CPUS; cpu++) {
577 ia64_cpu_to_sapicid[cpu] = -1;
578 }
579
580 ia64_cpu_to_sapicid[0] = boot_cpu_id;
581 cpus_clear(cpu_present_map);
582 set_cpu_present(0, true);
583 set_cpu_possible(0, true);
584 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
585 sapicid = smp_boot_data.cpu_phys_id[i];
586 if (sapicid == boot_cpu_id)
587 continue;
588 set_cpu_present(cpu, true);
589 set_cpu_possible(cpu, true);
590 ia64_cpu_to_sapicid[cpu] = sapicid;
591 cpu++;
592 }
593}
594
595/*
596 * Cycle through the APs sending Wakeup IPIs to boot each.
597 */
598void __init
599smp_prepare_cpus (unsigned int max_cpus)
600{
601 int boot_cpu_id = hard_smp_processor_id();
602
603 /*
604 * Initialize the per-CPU profiling counter/multiplier
605 */
606
607 smp_setup_percpu_timer();
608
609 /*
610 * We have the boot CPU online for sure.
611 */
612 cpu_set(0, cpu_online_map);
613 cpu_set(0, cpu_callin_map);
614
615 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
616 ia64_cpu_to_sapicid[0] = boot_cpu_id;
617
618 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
619
620 current_thread_info()->cpu = 0;
621
622 /*
623 * If SMP should be disabled, then really disable it!
624 */
625 if (!max_cpus) {
626 printk(KERN_INFO "SMP mode deactivated.\n");
627 init_cpu_online(cpumask_of(0));
628 init_cpu_present(cpumask_of(0));
629 init_cpu_possible(cpumask_of(0));
630 return;
631 }
632}
633
634void __devinit smp_prepare_boot_cpu(void)
635{
636 cpu_set(smp_processor_id(), cpu_online_map);
637 cpu_set(smp_processor_id(), cpu_callin_map);
638 set_numa_node(cpu_to_node_map[smp_processor_id()]);
639 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
640 paravirt_post_smp_prepare_boot_cpu();
641}
642
643#ifdef CONFIG_HOTPLUG_CPU
644static inline void
645clear_cpu_sibling_map(int cpu)
646{
647 int i;
648
649 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
650 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
651 for_each_cpu_mask(i, cpu_core_map[cpu])
652 cpu_clear(cpu, cpu_core_map[i]);
653
654 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
655}
656
657static void
658remove_siblinginfo(int cpu)
659{
660 int last = 0;
661
662 if (cpu_data(cpu)->threads_per_core == 1 &&
663 cpu_data(cpu)->cores_per_socket == 1) {
664 cpu_clear(cpu, cpu_core_map[cpu]);
665 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
666 return;
667 }
668
669 last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
670
671 /* remove it from all sibling map's */
672 clear_cpu_sibling_map(cpu);
673}
674
675extern void fixup_irqs(void);
676
677int migrate_platform_irqs(unsigned int cpu)
678{
679 int new_cpei_cpu;
680 struct irq_data *data = NULL;
681 const struct cpumask *mask;
682 int retval = 0;
683
684 /*
685 * dont permit CPEI target to removed.
686 */
687 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
688 printk ("CPU (%d) is CPEI Target\n", cpu);
689 if (can_cpei_retarget()) {
690 /*
691 * Now re-target the CPEI to a different processor
692 */
693 new_cpei_cpu = any_online_cpu(cpu_online_map);
694 mask = cpumask_of(new_cpei_cpu);
695 set_cpei_target_cpu(new_cpei_cpu);
696 data = irq_get_irq_data(ia64_cpe_irq);
697 /*
698 * Switch for now, immediately, we need to do fake intr
699 * as other interrupts, but need to study CPEI behaviour with
700 * polling before making changes.
701 */
702 if (data && data->chip) {
703 data->chip->irq_disable(data);
704 data->chip->irq_set_affinity(data, mask, false);
705 data->chip->irq_enable(data);
706 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
707 }
708 }
709 if (!data) {
710 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
711 retval = -EBUSY;
712 }
713 }
714 return retval;
715}
716
717/* must be called with cpucontrol mutex held */
718int __cpu_disable(void)
719{
720 int cpu = smp_processor_id();
721
722 /*
723 * dont permit boot processor for now
724 */
725 if (cpu == 0 && !bsp_remove_ok) {
726 printk ("Your platform does not support removal of BSP\n");
727 return (-EBUSY);
728 }
729
730 if (ia64_platform_is("sn2")) {
731 if (!sn_cpu_disable_allowed(cpu))
732 return -EBUSY;
733 }
734
735 cpu_clear(cpu, cpu_online_map);
736
737 if (migrate_platform_irqs(cpu)) {
738 cpu_set(cpu, cpu_online_map);
739 return -EBUSY;
740 }
741
742 remove_siblinginfo(cpu);
743 fixup_irqs();
744 local_flush_tlb_all();
745 cpu_clear(cpu, cpu_callin_map);
746 return 0;
747}
748
749void __cpu_die(unsigned int cpu)
750{
751 unsigned int i;
752
753 for (i = 0; i < 100; i++) {
754 /* They ack this in play_dead by setting CPU_DEAD */
755 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
756 {
757 printk ("CPU %d is now offline\n", cpu);
758 return;
759 }
760 msleep(100);
761 }
762 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
763}
764#endif /* CONFIG_HOTPLUG_CPU */
765
766void
767smp_cpus_done (unsigned int dummy)
768{
769 int cpu;
770 unsigned long bogosum = 0;
771
772 /*
773 * Allow the user to impress friends.
774 */
775
776 for_each_online_cpu(cpu) {
777 bogosum += cpu_data(cpu)->loops_per_jiffy;
778 }
779
780 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
781 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
782}
783
784static inline void __devinit
785set_cpu_sibling_map(int cpu)
786{
787 int i;
788
789 for_each_online_cpu(i) {
790 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
791 cpu_set(i, cpu_core_map[cpu]);
792 cpu_set(cpu, cpu_core_map[i]);
793 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
794 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
795 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
796 }
797 }
798 }
799}
800
801int __cpuinit
802__cpu_up (unsigned int cpu)
803{
804 int ret;
805 int sapicid;
806
807 sapicid = ia64_cpu_to_sapicid[cpu];
808 if (sapicid == -1)
809 return -EINVAL;
810
811 /*
812 * Already booted cpu? not valid anymore since we dont
813 * do idle loop tightspin anymore.
814 */
815 if (cpu_isset(cpu, cpu_callin_map))
816 return -EINVAL;
817
818 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
819 /* Processor goes to start_secondary(), sets online flag */
820 ret = do_boot_cpu(sapicid, cpu);
821 if (ret < 0)
822 return ret;
823
824 if (cpu_data(cpu)->threads_per_core == 1 &&
825 cpu_data(cpu)->cores_per_socket == 1) {
826 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
827 cpu_set(cpu, cpu_core_map[cpu]);
828 return 0;
829 }
830
831 set_cpu_sibling_map(cpu);
832
833 return 0;
834}
835
836/*
837 * Assume that CPUs have been discovered by some platform-dependent interface. For
838 * SoftSDV/Lion, that would be ACPI.
839 *
840 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
841 */
842void __init
843init_smp_config(void)
844{
845 struct fptr {
846 unsigned long fp;
847 unsigned long gp;
848 } *ap_startup;
849 long sal_ret;
850
851 /* Tell SAL where to drop the APs. */
852 ap_startup = (struct fptr *) start_ap;
853 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
854 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
855 if (sal_ret < 0)
856 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
857 ia64_sal_strerror(sal_ret));
858}
859
860/*
861 * identify_siblings(cpu) gets called from identify_cpu. This populates the
862 * information related to logical execution units in per_cpu_data structure.
863 */
864void __devinit
865identify_siblings(struct cpuinfo_ia64 *c)
866{
867 long status;
868 u16 pltid;
869 pal_logical_to_physical_t info;
870
871 status = ia64_pal_logical_to_phys(-1, &info);
872 if (status != PAL_STATUS_SUCCESS) {
873 if (status != PAL_STATUS_UNIMPLEMENTED) {
874 printk(KERN_ERR
875 "ia64_pal_logical_to_phys failed with %ld\n",
876 status);
877 return;
878 }
879
880 info.overview_ppid = 0;
881 info.overview_cpp = 1;
882 info.overview_tpc = 1;
883 }
884
885 status = ia64_sal_physical_id_info(&pltid);
886 if (status != PAL_STATUS_SUCCESS) {
887 if (status != PAL_STATUS_UNIMPLEMENTED)
888 printk(KERN_ERR
889 "ia64_sal_pltid failed with %ld\n",
890 status);
891 return;
892 }
893
894 c->socket_id = (pltid << 8) | info.overview_ppid;
895
896 if (info.overview_cpp == 1 && info.overview_tpc == 1)
897 return;
898
899 c->cores_per_socket = info.overview_cpp;
900 c->threads_per_core = info.overview_tpc;
901 c->num_log = info.overview_num_log;
902
903 c->core_id = info.log1_cid;
904 c->thread_id = info.log1_tid;
905}
906
907/*
908 * returns non zero, if multi-threading is enabled
909 * on at least one physical package. Due to hotplug cpu
910 * and (maxcpus=), all threads may not necessarily be enabled
911 * even though the processor supports multi-threading.
912 */
913int is_multithreading_enabled(void)
914{
915 int i, j;
916
917 for_each_present_cpu(i) {
918 for_each_present_cpu(j) {
919 if (j == i)
920 continue;
921 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
922 if (cpu_data(j)->core_id == cpu_data(i)->core_id)
923 return 1;
924 }
925 }
926 }
927 return 0;
928}
929EXPORT_SYMBOL_GPL(is_multithreading_enabled);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * SMP boot-related support
4 *
5 * Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 2001, 2004-2005 Intel Corp
8 * Rohit Seth <rohit.seth@intel.com>
9 * Suresh Siddha <suresh.b.siddha@intel.com>
10 * Gordon Jin <gordon.jin@intel.com>
11 * Ashok Raj <ashok.raj@intel.com>
12 *
13 * 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
14 * 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
15 * 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
16 * smp_boot_cpus()/smp_commence() is replaced by
17 * smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
18 * 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
19 * 04/12/26 Jin Gordon <gordon.jin@intel.com>
20 * 04/12/26 Rohit Seth <rohit.seth@intel.com>
21 * Add multi-threading and multi-core detection
22 * 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
23 * Setup cpu_sibling_map and cpu_core_map
24 */
25
26#include <linux/module.h>
27#include <linux/acpi.h>
28#include <linux/memblock.h>
29#include <linux/cpu.h>
30#include <linux/delay.h>
31#include <linux/init.h>
32#include <linux/interrupt.h>
33#include <linux/irq.h>
34#include <linux/kernel.h>
35#include <linux/kernel_stat.h>
36#include <linux/mm.h>
37#include <linux/notifier.h>
38#include <linux/smp.h>
39#include <linux/spinlock.h>
40#include <linux/efi.h>
41#include <linux/percpu.h>
42#include <linux/bitops.h>
43
44#include <linux/atomic.h>
45#include <asm/cache.h>
46#include <asm/current.h>
47#include <asm/delay.h>
48#include <asm/io.h>
49#include <asm/irq.h>
50#include <asm/mca.h>
51#include <asm/page.h>
52#include <asm/processor.h>
53#include <asm/ptrace.h>
54#include <asm/sal.h>
55#include <asm/tlbflush.h>
56#include <asm/unistd.h>
57
58#define SMP_DEBUG 0
59
60#if SMP_DEBUG
61#define Dprintk(x...) printk(x)
62#else
63#define Dprintk(x...)
64#endif
65
66#ifdef CONFIG_HOTPLUG_CPU
67#ifdef CONFIG_PERMIT_BSP_REMOVE
68#define bsp_remove_ok 1
69#else
70#define bsp_remove_ok 0
71#endif
72
73/*
74 * Global array allocated for NR_CPUS at boot time
75 */
76struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
77
78/*
79 * start_ap in head.S uses this to store current booting cpu
80 * info.
81 */
82struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
83
84#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
85
86#else
87#define set_brendez_area(x)
88#endif
89
90
91/*
92 * ITC synchronization related stuff:
93 */
94#define MASTER (0)
95#define SLAVE (SMP_CACHE_BYTES/8)
96
97#define NUM_ROUNDS 64 /* magic value */
98#define NUM_ITERS 5 /* likewise */
99
100static DEFINE_SPINLOCK(itc_sync_lock);
101static volatile unsigned long go[SLAVE + 1];
102
103#define DEBUG_ITC_SYNC 0
104
105extern void start_ap (void);
106extern unsigned long ia64_iobase;
107
108struct task_struct *task_for_booting_cpu;
109
110/*
111 * State for each CPU
112 */
113DEFINE_PER_CPU(int, cpu_state);
114
115cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
116EXPORT_SYMBOL(cpu_core_map);
117DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
118EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
119
120int smp_num_siblings = 1;
121
122/* which logical CPU number maps to which CPU (physical APIC ID) */
123volatile int ia64_cpu_to_sapicid[NR_CPUS];
124EXPORT_SYMBOL(ia64_cpu_to_sapicid);
125
126static cpumask_t cpu_callin_map;
127
128struct smp_boot_data smp_boot_data __initdata;
129
130unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
131
132char __initdata no_int_routing;
133
134unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
135
136#ifdef CONFIG_FORCE_CPEI_RETARGET
137#define CPEI_OVERRIDE_DEFAULT (1)
138#else
139#define CPEI_OVERRIDE_DEFAULT (0)
140#endif
141
142unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
143
144static int __init
145cmdl_force_cpei(char *str)
146{
147 int value=0;
148
149 get_option (&str, &value);
150 force_cpei_retarget = value;
151
152 return 1;
153}
154
155__setup("force_cpei=", cmdl_force_cpei);
156
157static int __init
158nointroute (char *str)
159{
160 no_int_routing = 1;
161 printk ("no_int_routing on\n");
162 return 1;
163}
164
165__setup("nointroute", nointroute);
166
167static void fix_b0_for_bsp(void)
168{
169#ifdef CONFIG_HOTPLUG_CPU
170 int cpuid;
171 static int fix_bsp_b0 = 1;
172
173 cpuid = smp_processor_id();
174
175 /*
176 * Cache the b0 value on the first AP that comes up
177 */
178 if (!(fix_bsp_b0 && cpuid))
179 return;
180
181 sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
182 printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
183
184 fix_bsp_b0 = 0;
185#endif
186}
187
188void
189sync_master (void *arg)
190{
191 unsigned long flags, i;
192
193 go[MASTER] = 0;
194
195 local_irq_save(flags);
196 {
197 for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
198 while (!go[MASTER])
199 cpu_relax();
200 go[MASTER] = 0;
201 go[SLAVE] = ia64_get_itc();
202 }
203 }
204 local_irq_restore(flags);
205}
206
207/*
208 * Return the number of cycles by which our itc differs from the itc on the master
209 * (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
210 * negative that it is behind.
211 */
212static inline long
213get_delta (long *rt, long *master)
214{
215 unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
216 unsigned long tcenter, t0, t1, tm;
217 long i;
218
219 for (i = 0; i < NUM_ITERS; ++i) {
220 t0 = ia64_get_itc();
221 go[MASTER] = 1;
222 while (!(tm = go[SLAVE]))
223 cpu_relax();
224 go[SLAVE] = 0;
225 t1 = ia64_get_itc();
226
227 if (t1 - t0 < best_t1 - best_t0)
228 best_t0 = t0, best_t1 = t1, best_tm = tm;
229 }
230
231 *rt = best_t1 - best_t0;
232 *master = best_tm - best_t0;
233
234 /* average best_t0 and best_t1 without overflow: */
235 tcenter = (best_t0/2 + best_t1/2);
236 if (best_t0 % 2 + best_t1 % 2 == 2)
237 ++tcenter;
238 return tcenter - best_tm;
239}
240
241/*
242 * Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
243 * (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
244 * unaccounted-for errors (such as getting a machine check in the middle of a calibration
245 * step). The basic idea is for the slave to ask the master what itc value it has and to
246 * read its own itc before and after the master responds. Each iteration gives us three
247 * timestamps:
248 *
249 * slave master
250 *
251 * t0 ---\
252 * ---\
253 * --->
254 * tm
255 * /---
256 * /---
257 * t1 <---
258 *
259 *
260 * The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
261 * and t1. If we achieve this, the clocks are synchronized provided the interconnect
262 * between the slave and the master is symmetric. Even if the interconnect were
263 * asymmetric, we would still know that the synchronization error is smaller than the
264 * roundtrip latency (t0 - t1).
265 *
266 * When the interconnect is quiet and symmetric, this lets us synchronize the itc to
267 * within one or two cycles. However, we can only *guarantee* that the synchronization is
268 * accurate to within a round-trip time, which is typically in the range of several
269 * hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
270 * almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
271 * than half a micro second or so.
272 */
273void
274ia64_sync_itc (unsigned int master)
275{
276 long i, delta, adj, adjust_latency = 0, done = 0;
277 unsigned long flags, rt, master_time_stamp, bound;
278#if DEBUG_ITC_SYNC
279 struct {
280 long rt; /* roundtrip time */
281 long master; /* master's timestamp */
282 long diff; /* difference between midpoint and master's timestamp */
283 long lat; /* estimate of itc adjustment latency */
284 } t[NUM_ROUNDS];
285#endif
286
287 /*
288 * Make sure local timer ticks are disabled while we sync. If
289 * they were enabled, we'd have to worry about nasty issues
290 * like setting the ITC ahead of (or a long time before) the
291 * next scheduled tick.
292 */
293 BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
294
295 go[MASTER] = 1;
296
297 if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
298 printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
299 return;
300 }
301
302 while (go[MASTER])
303 cpu_relax(); /* wait for master to be ready */
304
305 spin_lock_irqsave(&itc_sync_lock, flags);
306 {
307 for (i = 0; i < NUM_ROUNDS; ++i) {
308 delta = get_delta(&rt, &master_time_stamp);
309 if (delta == 0) {
310 done = 1; /* let's lock on to this... */
311 bound = rt;
312 }
313
314 if (!done) {
315 if (i > 0) {
316 adjust_latency += -delta;
317 adj = -delta + adjust_latency/4;
318 } else
319 adj = -delta;
320
321 ia64_set_itc(ia64_get_itc() + adj);
322 }
323#if DEBUG_ITC_SYNC
324 t[i].rt = rt;
325 t[i].master = master_time_stamp;
326 t[i].diff = delta;
327 t[i].lat = adjust_latency/4;
328#endif
329 }
330 }
331 spin_unlock_irqrestore(&itc_sync_lock, flags);
332
333#if DEBUG_ITC_SYNC
334 for (i = 0; i < NUM_ROUNDS; ++i)
335 printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
336 t[i].rt, t[i].master, t[i].diff, t[i].lat);
337#endif
338
339 printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
340 "maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
341}
342
343/*
344 * Ideally sets up per-cpu profiling hooks. Doesn't do much now...
345 */
346static inline void smp_setup_percpu_timer(void)
347{
348}
349
350static void
351smp_callin (void)
352{
353 int cpuid, phys_id, itc_master;
354 struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
355 extern void ia64_init_itm(void);
356 extern volatile int time_keeper_id;
357
358#ifdef CONFIG_PERFMON
359 extern void pfm_init_percpu(void);
360#endif
361
362 cpuid = smp_processor_id();
363 phys_id = hard_smp_processor_id();
364 itc_master = time_keeper_id;
365
366 if (cpu_online(cpuid)) {
367 printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
368 phys_id, cpuid);
369 BUG();
370 }
371
372 fix_b0_for_bsp();
373
374 /*
375 * numa_node_id() works after this.
376 */
377 set_numa_node(cpu_to_node_map[cpuid]);
378 set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
379
380 spin_lock(&vector_lock);
381 /* Setup the per cpu irq handling data structures */
382 __setup_vector_irq(cpuid);
383 notify_cpu_starting(cpuid);
384 set_cpu_online(cpuid, true);
385 per_cpu(cpu_state, cpuid) = CPU_ONLINE;
386 spin_unlock(&vector_lock);
387
388 smp_setup_percpu_timer();
389
390 ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
391
392#ifdef CONFIG_PERFMON
393 pfm_init_percpu();
394#endif
395
396 local_irq_enable();
397
398 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
399 /*
400 * Synchronize the ITC with the BP. Need to do this after irqs are
401 * enabled because ia64_sync_itc() calls smp_call_function_single(), which
402 * calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
403 * local_bh_enable(), which bugs out if irqs are not enabled...
404 */
405 Dprintk("Going to syncup ITC with ITC Master.\n");
406 ia64_sync_itc(itc_master);
407 }
408
409 /*
410 * Get our bogomips.
411 */
412 ia64_init_itm();
413
414 /*
415 * Delay calibration can be skipped if new processor is identical to the
416 * previous processor.
417 */
418 last_cpuinfo = cpu_data(cpuid - 1);
419 this_cpuinfo = local_cpu_data;
420 if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
421 last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
422 last_cpuinfo->features != this_cpuinfo->features ||
423 last_cpuinfo->revision != this_cpuinfo->revision ||
424 last_cpuinfo->family != this_cpuinfo->family ||
425 last_cpuinfo->archrev != this_cpuinfo->archrev ||
426 last_cpuinfo->model != this_cpuinfo->model)
427 calibrate_delay();
428 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
429
430 /*
431 * Allow the master to continue.
432 */
433 cpumask_set_cpu(cpuid, &cpu_callin_map);
434 Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
435}
436
437
438/*
439 * Activate a secondary processor. head.S calls this.
440 */
441int
442start_secondary (void *unused)
443{
444 /* Early console may use I/O ports */
445 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
446#ifndef CONFIG_PRINTK_TIME
447 Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
448#endif
449 efi_map_pal_code();
450 cpu_init();
451 preempt_disable();
452 smp_callin();
453
454 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
455 return 0;
456}
457
458static int
459do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
460{
461 int timeout;
462
463 task_for_booting_cpu = idle;
464 Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
465
466 set_brendez_area(cpu);
467 ia64_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
468
469 /*
470 * Wait 10s total for the AP to start
471 */
472 Dprintk("Waiting on callin_map ...");
473 for (timeout = 0; timeout < 100000; timeout++) {
474 if (cpumask_test_cpu(cpu, &cpu_callin_map))
475 break; /* It has booted */
476 barrier(); /* Make sure we re-read cpu_callin_map */
477 udelay(100);
478 }
479 Dprintk("\n");
480
481 if (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
482 printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
483 ia64_cpu_to_sapicid[cpu] = -1;
484 set_cpu_online(cpu, false); /* was set in smp_callin() */
485 return -EINVAL;
486 }
487 return 0;
488}
489
490static int __init
491decay (char *str)
492{
493 int ticks;
494 get_option (&str, &ticks);
495 return 1;
496}
497
498__setup("decay=", decay);
499
500/*
501 * Initialize the logical CPU number to SAPICID mapping
502 */
503void __init
504smp_build_cpu_map (void)
505{
506 int sapicid, cpu, i;
507 int boot_cpu_id = hard_smp_processor_id();
508
509 for (cpu = 0; cpu < NR_CPUS; cpu++) {
510 ia64_cpu_to_sapicid[cpu] = -1;
511 }
512
513 ia64_cpu_to_sapicid[0] = boot_cpu_id;
514 init_cpu_present(cpumask_of(0));
515 set_cpu_possible(0, true);
516 for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
517 sapicid = smp_boot_data.cpu_phys_id[i];
518 if (sapicid == boot_cpu_id)
519 continue;
520 set_cpu_present(cpu, true);
521 set_cpu_possible(cpu, true);
522 ia64_cpu_to_sapicid[cpu] = sapicid;
523 cpu++;
524 }
525}
526
527/*
528 * Cycle through the APs sending Wakeup IPIs to boot each.
529 */
530void __init
531smp_prepare_cpus (unsigned int max_cpus)
532{
533 int boot_cpu_id = hard_smp_processor_id();
534
535 /*
536 * Initialize the per-CPU profiling counter/multiplier
537 */
538
539 smp_setup_percpu_timer();
540
541 cpumask_set_cpu(0, &cpu_callin_map);
542
543 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
544 ia64_cpu_to_sapicid[0] = boot_cpu_id;
545
546 printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
547
548 current_thread_info()->cpu = 0;
549
550 /*
551 * If SMP should be disabled, then really disable it!
552 */
553 if (!max_cpus) {
554 printk(KERN_INFO "SMP mode deactivated.\n");
555 init_cpu_online(cpumask_of(0));
556 init_cpu_present(cpumask_of(0));
557 init_cpu_possible(cpumask_of(0));
558 return;
559 }
560}
561
562void smp_prepare_boot_cpu(void)
563{
564 set_cpu_online(smp_processor_id(), true);
565 cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
566 set_numa_node(cpu_to_node_map[smp_processor_id()]);
567 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
568}
569
570#ifdef CONFIG_HOTPLUG_CPU
571static inline void
572clear_cpu_sibling_map(int cpu)
573{
574 int i;
575
576 for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu))
577 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i));
578 for_each_cpu(i, &cpu_core_map[cpu])
579 cpumask_clear_cpu(cpu, &cpu_core_map[i]);
580
581 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
582}
583
584static void
585remove_siblinginfo(int cpu)
586{
587 int last = 0;
588
589 if (cpu_data(cpu)->threads_per_core == 1 &&
590 cpu_data(cpu)->cores_per_socket == 1) {
591 cpumask_clear_cpu(cpu, &cpu_core_map[cpu]);
592 cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
593 return;
594 }
595
596 last = (cpumask_weight(&cpu_core_map[cpu]) == 1 ? 1 : 0);
597
598 /* remove it from all sibling map's */
599 clear_cpu_sibling_map(cpu);
600}
601
602extern void fixup_irqs(void);
603
604int migrate_platform_irqs(unsigned int cpu)
605{
606 int new_cpei_cpu;
607 struct irq_data *data = NULL;
608 const struct cpumask *mask;
609 int retval = 0;
610
611 /*
612 * dont permit CPEI target to removed.
613 */
614 if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
615 printk ("CPU (%d) is CPEI Target\n", cpu);
616 if (can_cpei_retarget()) {
617 /*
618 * Now re-target the CPEI to a different processor
619 */
620 new_cpei_cpu = cpumask_any(cpu_online_mask);
621 mask = cpumask_of(new_cpei_cpu);
622 set_cpei_target_cpu(new_cpei_cpu);
623 data = irq_get_irq_data(ia64_cpe_irq);
624 /*
625 * Switch for now, immediately, we need to do fake intr
626 * as other interrupts, but need to study CPEI behaviour with
627 * polling before making changes.
628 */
629 if (data && data->chip) {
630 data->chip->irq_disable(data);
631 data->chip->irq_set_affinity(data, mask, false);
632 data->chip->irq_enable(data);
633 printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
634 }
635 }
636 if (!data) {
637 printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
638 retval = -EBUSY;
639 }
640 }
641 return retval;
642}
643
644/* must be called with cpucontrol mutex held */
645int __cpu_disable(void)
646{
647 int cpu = smp_processor_id();
648
649 /*
650 * dont permit boot processor for now
651 */
652 if (cpu == 0 && !bsp_remove_ok) {
653 printk ("Your platform does not support removal of BSP\n");
654 return (-EBUSY);
655 }
656
657 set_cpu_online(cpu, false);
658
659 if (migrate_platform_irqs(cpu)) {
660 set_cpu_online(cpu, true);
661 return -EBUSY;
662 }
663
664 remove_siblinginfo(cpu);
665 fixup_irqs();
666 local_flush_tlb_all();
667 cpumask_clear_cpu(cpu, &cpu_callin_map);
668 return 0;
669}
670
671void __cpu_die(unsigned int cpu)
672{
673 unsigned int i;
674
675 for (i = 0; i < 100; i++) {
676 /* They ack this in play_dead by setting CPU_DEAD */
677 if (per_cpu(cpu_state, cpu) == CPU_DEAD)
678 {
679 printk ("CPU %d is now offline\n", cpu);
680 return;
681 }
682 msleep(100);
683 }
684 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
685}
686#endif /* CONFIG_HOTPLUG_CPU */
687
688void
689smp_cpus_done (unsigned int dummy)
690{
691 int cpu;
692 unsigned long bogosum = 0;
693
694 /*
695 * Allow the user to impress friends.
696 */
697
698 for_each_online_cpu(cpu) {
699 bogosum += cpu_data(cpu)->loops_per_jiffy;
700 }
701
702 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
703 (int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
704}
705
706static inline void set_cpu_sibling_map(int cpu)
707{
708 int i;
709
710 for_each_online_cpu(i) {
711 if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
712 cpumask_set_cpu(i, &cpu_core_map[cpu]);
713 cpumask_set_cpu(cpu, &cpu_core_map[i]);
714 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
715 cpumask_set_cpu(i,
716 &per_cpu(cpu_sibling_map, cpu));
717 cpumask_set_cpu(cpu,
718 &per_cpu(cpu_sibling_map, i));
719 }
720 }
721 }
722}
723
724int
725__cpu_up(unsigned int cpu, struct task_struct *tidle)
726{
727 int ret;
728 int sapicid;
729
730 sapicid = ia64_cpu_to_sapicid[cpu];
731 if (sapicid == -1)
732 return -EINVAL;
733
734 /*
735 * Already booted cpu? not valid anymore since we dont
736 * do idle loop tightspin anymore.
737 */
738 if (cpumask_test_cpu(cpu, &cpu_callin_map))
739 return -EINVAL;
740
741 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
742 /* Processor goes to start_secondary(), sets online flag */
743 ret = do_boot_cpu(sapicid, cpu, tidle);
744 if (ret < 0)
745 return ret;
746
747 if (cpu_data(cpu)->threads_per_core == 1 &&
748 cpu_data(cpu)->cores_per_socket == 1) {
749 cpumask_set_cpu(cpu, &per_cpu(cpu_sibling_map, cpu));
750 cpumask_set_cpu(cpu, &cpu_core_map[cpu]);
751 return 0;
752 }
753
754 set_cpu_sibling_map(cpu);
755
756 return 0;
757}
758
759/*
760 * Assume that CPUs have been discovered by some platform-dependent interface. For
761 * SoftSDV/Lion, that would be ACPI.
762 *
763 * Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
764 */
765void __init
766init_smp_config(void)
767{
768 struct fptr {
769 unsigned long fp;
770 unsigned long gp;
771 } *ap_startup;
772 long sal_ret;
773
774 /* Tell SAL where to drop the APs. */
775 ap_startup = (struct fptr *) start_ap;
776 sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
777 ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
778 if (sal_ret < 0)
779 printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
780 ia64_sal_strerror(sal_ret));
781}
782
783/*
784 * identify_siblings(cpu) gets called from identify_cpu. This populates the
785 * information related to logical execution units in per_cpu_data structure.
786 */
787void identify_siblings(struct cpuinfo_ia64 *c)
788{
789 long status;
790 u16 pltid;
791 pal_logical_to_physical_t info;
792
793 status = ia64_pal_logical_to_phys(-1, &info);
794 if (status != PAL_STATUS_SUCCESS) {
795 if (status != PAL_STATUS_UNIMPLEMENTED) {
796 printk(KERN_ERR
797 "ia64_pal_logical_to_phys failed with %ld\n",
798 status);
799 return;
800 }
801
802 info.overview_ppid = 0;
803 info.overview_cpp = 1;
804 info.overview_tpc = 1;
805 }
806
807 status = ia64_sal_physical_id_info(&pltid);
808 if (status != PAL_STATUS_SUCCESS) {
809 if (status != PAL_STATUS_UNIMPLEMENTED)
810 printk(KERN_ERR
811 "ia64_sal_pltid failed with %ld\n",
812 status);
813 return;
814 }
815
816 c->socket_id = (pltid << 8) | info.overview_ppid;
817
818 if (info.overview_cpp == 1 && info.overview_tpc == 1)
819 return;
820
821 c->cores_per_socket = info.overview_cpp;
822 c->threads_per_core = info.overview_tpc;
823 c->num_log = info.overview_num_log;
824
825 c->core_id = info.log1_cid;
826 c->thread_id = info.log1_tid;
827}
828
829/*
830 * returns non zero, if multi-threading is enabled
831 * on at least one physical package. Due to hotplug cpu
832 * and (maxcpus=), all threads may not necessarily be enabled
833 * even though the processor supports multi-threading.
834 */
835int is_multithreading_enabled(void)
836{
837 int i, j;
838
839 for_each_present_cpu(i) {
840 for_each_present_cpu(j) {
841 if (j == i)
842 continue;
843 if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
844 if (cpu_data(j)->core_id == cpu_data(i)->core_id)
845 return 1;
846 }
847 }
848 }
849 return 0;
850}
851EXPORT_SYMBOL_GPL(is_multithreading_enabled);