Loading...
1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/irq_work.h>
20
21#include <asm/paravirt.h>
22#include <asm/desc.h>
23#include <asm/pgtable.h>
24#include <asm/cpu.h>
25
26#include <xen/interface/xen.h>
27#include <xen/interface/vcpu.h>
28
29#include <asm/xen/interface.h>
30#include <asm/xen/hypercall.h>
31
32#include <xen/xen.h>
33#include <xen/page.h>
34#include <xen/events.h>
35
36#include <xen/hvc-console.h>
37#include "xen-ops.h"
38#include "mmu.h"
39
40cpumask_var_t xen_cpu_initialized_map;
41
42static DEFINE_PER_CPU(int, xen_resched_irq);
43static DEFINE_PER_CPU(int, xen_callfunc_irq);
44static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45static DEFINE_PER_CPU(int, xen_irq_work);
46static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
47
48static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
49static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
51
52/*
53 * Reschedule call back.
54 */
55static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
56{
57 inc_irq_stat(irq_resched_count);
58 scheduler_ipi();
59
60 return IRQ_HANDLED;
61}
62
63static void __cpuinit cpu_bringup(void)
64{
65 int cpu;
66
67 cpu_init();
68 touch_softlockup_watchdog();
69 preempt_disable();
70
71 xen_enable_sysenter();
72 xen_enable_syscall();
73
74 cpu = smp_processor_id();
75 smp_store_cpu_info(cpu);
76 cpu_data(cpu).x86_max_cores = 1;
77 set_cpu_sibling_map(cpu);
78
79 xen_setup_cpu_clockevents();
80
81 notify_cpu_starting(cpu);
82
83 ipi_call_lock();
84 set_cpu_online(cpu, true);
85 ipi_call_unlock();
86
87 this_cpu_write(cpu_state, CPU_ONLINE);
88
89 wmb();
90
91 /* We can take interrupts now: we're officially "up". */
92 local_irq_enable();
93
94 wmb(); /* make sure everything is out */
95}
96
97static void __cpuinit cpu_bringup_and_idle(void)
98{
99 cpu_bringup();
100 cpu_idle();
101}
102
103static int xen_smp_intr_init(unsigned int cpu)
104{
105 int rc;
106 const char *resched_name, *callfunc_name, *debug_name;
107
108 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
109 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
110 cpu,
111 xen_reschedule_interrupt,
112 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
113 resched_name,
114 NULL);
115 if (rc < 0)
116 goto fail;
117 per_cpu(xen_resched_irq, cpu) = rc;
118
119 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
120 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
121 cpu,
122 xen_call_function_interrupt,
123 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
124 callfunc_name,
125 NULL);
126 if (rc < 0)
127 goto fail;
128 per_cpu(xen_callfunc_irq, cpu) = rc;
129
130 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
131 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
132 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
133 debug_name, NULL);
134 if (rc < 0)
135 goto fail;
136 per_cpu(xen_debug_irq, cpu) = rc;
137
138 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
139 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
140 cpu,
141 xen_call_function_single_interrupt,
142 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
143 callfunc_name,
144 NULL);
145 if (rc < 0)
146 goto fail;
147 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
148
149 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
150 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
151 cpu,
152 xen_irq_work_interrupt,
153 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
154 callfunc_name,
155 NULL);
156 if (rc < 0)
157 goto fail;
158 per_cpu(xen_irq_work, cpu) = rc;
159
160 return 0;
161
162 fail:
163 if (per_cpu(xen_resched_irq, cpu) >= 0)
164 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
165 if (per_cpu(xen_callfunc_irq, cpu) >= 0)
166 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
167 if (per_cpu(xen_debug_irq, cpu) >= 0)
168 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
169 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
170 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
171 NULL);
172 if (per_cpu(xen_irq_work, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
174
175 return rc;
176}
177
178static void __init xen_fill_possible_map(void)
179{
180 int i, rc;
181
182 if (xen_initial_domain())
183 return;
184
185 for (i = 0; i < nr_cpu_ids; i++) {
186 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
187 if (rc >= 0) {
188 num_processors++;
189 set_cpu_possible(i, true);
190 }
191 }
192}
193
194static void __init xen_filter_cpu_maps(void)
195{
196 int i, rc;
197 unsigned int subtract = 0;
198
199 if (!xen_initial_domain())
200 return;
201
202 num_processors = 0;
203 disabled_cpus = 0;
204 for (i = 0; i < nr_cpu_ids; i++) {
205 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
206 if (rc >= 0) {
207 num_processors++;
208 set_cpu_possible(i, true);
209 } else {
210 set_cpu_possible(i, false);
211 set_cpu_present(i, false);
212 subtract++;
213 }
214 }
215#ifdef CONFIG_HOTPLUG_CPU
216 /* This is akin to using 'nr_cpus' on the Linux command line.
217 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
218 * have up to X, while nr_cpu_ids is greater than X. This
219 * normally is not a problem, except when CPU hotplugging
220 * is involved and then there might be more than X CPUs
221 * in the guest - which will not work as there is no
222 * hypercall to expand the max number of VCPUs an already
223 * running guest has. So cap it up to X. */
224 if (subtract)
225 nr_cpu_ids = nr_cpu_ids - subtract;
226#endif
227
228}
229
230static void __init xen_smp_prepare_boot_cpu(void)
231{
232 BUG_ON(smp_processor_id() != 0);
233 native_smp_prepare_boot_cpu();
234
235 /* We've switched to the "real" per-cpu gdt, so make sure the
236 old memory can be recycled */
237 make_lowmem_page_readwrite(xen_initial_gdt);
238
239 xen_filter_cpu_maps();
240 xen_setup_vcpu_info_placement();
241}
242
243static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
244{
245 unsigned cpu;
246 unsigned int i;
247
248 if (skip_ioapic_setup) {
249 char *m = (max_cpus == 0) ?
250 "The nosmp parameter is incompatible with Xen; " \
251 "use Xen dom0_max_vcpus=1 parameter" :
252 "The noapic parameter is incompatible with Xen";
253
254 xen_raw_printk(m);
255 panic(m);
256 }
257 xen_init_lock_cpu(0);
258
259 smp_store_cpu_info(0);
260 cpu_data(0).x86_max_cores = 1;
261
262 for_each_possible_cpu(i) {
263 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
264 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
265 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
266 }
267 set_cpu_sibling_map(0);
268
269 if (xen_smp_intr_init(0))
270 BUG();
271
272 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
273 panic("could not allocate xen_cpu_initialized_map\n");
274
275 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
276
277 /* Restrict the possible_map according to max_cpus. */
278 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
279 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
280 continue;
281 set_cpu_possible(cpu, false);
282 }
283
284 for_each_possible_cpu(cpu)
285 set_cpu_present(cpu, true);
286}
287
288static int __cpuinit
289cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
290{
291 struct vcpu_guest_context *ctxt;
292 struct desc_struct *gdt;
293 unsigned long gdt_mfn;
294
295 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
296 return 0;
297
298 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
299 if (ctxt == NULL)
300 return -ENOMEM;
301
302 gdt = get_cpu_gdt_table(cpu);
303
304 ctxt->flags = VGCF_IN_KERNEL;
305 ctxt->user_regs.ds = __USER_DS;
306 ctxt->user_regs.es = __USER_DS;
307 ctxt->user_regs.ss = __KERNEL_DS;
308#ifdef CONFIG_X86_32
309 ctxt->user_regs.fs = __KERNEL_PERCPU;
310 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
311#else
312 ctxt->gs_base_kernel = per_cpu_offset(cpu);
313#endif
314 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
315 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
316
317 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
318
319 xen_copy_trap_info(ctxt->trap_ctxt);
320
321 ctxt->ldt_ents = 0;
322
323 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
324
325 gdt_mfn = arbitrary_virt_to_mfn(gdt);
326 make_lowmem_page_readonly(gdt);
327 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
328
329 ctxt->gdt_frames[0] = gdt_mfn;
330 ctxt->gdt_ents = GDT_ENTRIES;
331
332 ctxt->user_regs.cs = __KERNEL_CS;
333 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
334
335 ctxt->kernel_ss = __KERNEL_DS;
336 ctxt->kernel_sp = idle->thread.sp0;
337
338#ifdef CONFIG_X86_32
339 ctxt->event_callback_cs = __KERNEL_CS;
340 ctxt->failsafe_callback_cs = __KERNEL_CS;
341#endif
342 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
343 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
344
345 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
346 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
347
348 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
349 BUG();
350
351 kfree(ctxt);
352 return 0;
353}
354
355static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
356{
357 int rc;
358
359 per_cpu(current_task, cpu) = idle;
360#ifdef CONFIG_X86_32
361 irq_ctx_init(cpu);
362#else
363 clear_tsk_thread_flag(idle, TIF_FORK);
364 per_cpu(kernel_stack, cpu) =
365 (unsigned long)task_stack_page(idle) -
366 KERNEL_STACK_OFFSET + THREAD_SIZE;
367#endif
368 xen_setup_runstate_info(cpu);
369 xen_setup_timer(cpu);
370 xen_init_lock_cpu(cpu);
371
372 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
373
374 /* make sure interrupts start blocked */
375 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
376
377 rc = cpu_initialize_context(cpu, idle);
378 if (rc)
379 return rc;
380
381 if (num_online_cpus() == 1)
382 alternatives_smp_switch(1);
383
384 rc = xen_smp_intr_init(cpu);
385 if (rc)
386 return rc;
387
388 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
389 BUG_ON(rc);
390
391 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
392 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
393 barrier();
394 }
395
396 return 0;
397}
398
399static void xen_smp_cpus_done(unsigned int max_cpus)
400{
401}
402
403#ifdef CONFIG_HOTPLUG_CPU
404static int xen_cpu_disable(void)
405{
406 unsigned int cpu = smp_processor_id();
407 if (cpu == 0)
408 return -EBUSY;
409
410 cpu_disable_common();
411
412 load_cr3(swapper_pg_dir);
413 return 0;
414}
415
416static void xen_cpu_die(unsigned int cpu)
417{
418 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
419 current->state = TASK_UNINTERRUPTIBLE;
420 schedule_timeout(HZ/10);
421 }
422 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
423 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
424 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
425 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
426 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
427 xen_uninit_lock_cpu(cpu);
428 xen_teardown_timer(cpu);
429
430 if (num_online_cpus() == 1)
431 alternatives_smp_switch(0);
432}
433
434static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */
435{
436 play_dead_common();
437 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
438 cpu_bringup();
439 /*
440 * Balance out the preempt calls - as we are running in cpu_idle
441 * loop which has been called at bootup from cpu_bringup_and_idle.
442 * The cpucpu_bringup_and_idle called cpu_bringup which made a
443 * preempt_disable() So this preempt_enable will balance it out.
444 */
445 preempt_enable();
446}
447
448#else /* !CONFIG_HOTPLUG_CPU */
449static int xen_cpu_disable(void)
450{
451 return -ENOSYS;
452}
453
454static void xen_cpu_die(unsigned int cpu)
455{
456 BUG();
457}
458
459static void xen_play_dead(void)
460{
461 BUG();
462}
463
464#endif
465static void stop_self(void *v)
466{
467 int cpu = smp_processor_id();
468
469 /* make sure we're not pinning something down */
470 load_cr3(swapper_pg_dir);
471 /* should set up a minimal gdt */
472
473 set_cpu_online(cpu, false);
474
475 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
476 BUG();
477}
478
479static void xen_stop_other_cpus(int wait)
480{
481 smp_call_function(stop_self, NULL, wait);
482}
483
484static void xen_smp_send_reschedule(int cpu)
485{
486 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
487}
488
489static void __xen_send_IPI_mask(const struct cpumask *mask,
490 int vector)
491{
492 unsigned cpu;
493
494 for_each_cpu_and(cpu, mask, cpu_online_mask)
495 xen_send_IPI_one(cpu, vector);
496}
497
498static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
499{
500 int cpu;
501
502 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
503
504 /* Make sure other vcpus get a chance to run if they need to. */
505 for_each_cpu(cpu, mask) {
506 if (xen_vcpu_stolen(cpu)) {
507 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
508 break;
509 }
510 }
511}
512
513static void xen_smp_send_call_function_single_ipi(int cpu)
514{
515 __xen_send_IPI_mask(cpumask_of(cpu),
516 XEN_CALL_FUNCTION_SINGLE_VECTOR);
517}
518
519static inline int xen_map_vector(int vector)
520{
521 int xen_vector;
522
523 switch (vector) {
524 case RESCHEDULE_VECTOR:
525 xen_vector = XEN_RESCHEDULE_VECTOR;
526 break;
527 case CALL_FUNCTION_VECTOR:
528 xen_vector = XEN_CALL_FUNCTION_VECTOR;
529 break;
530 case CALL_FUNCTION_SINGLE_VECTOR:
531 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
532 break;
533 case IRQ_WORK_VECTOR:
534 xen_vector = XEN_IRQ_WORK_VECTOR;
535 break;
536 default:
537 xen_vector = -1;
538 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
539 vector);
540 }
541
542 return xen_vector;
543}
544
545void xen_send_IPI_mask(const struct cpumask *mask,
546 int vector)
547{
548 int xen_vector = xen_map_vector(vector);
549
550 if (xen_vector >= 0)
551 __xen_send_IPI_mask(mask, xen_vector);
552}
553
554void xen_send_IPI_all(int vector)
555{
556 int xen_vector = xen_map_vector(vector);
557
558 if (xen_vector >= 0)
559 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
560}
561
562void xen_send_IPI_self(int vector)
563{
564 int xen_vector = xen_map_vector(vector);
565
566 if (xen_vector >= 0)
567 xen_send_IPI_one(smp_processor_id(), xen_vector);
568}
569
570void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
571 int vector)
572{
573 unsigned cpu;
574 unsigned int this_cpu = smp_processor_id();
575
576 if (!(num_online_cpus() > 1))
577 return;
578
579 for_each_cpu_and(cpu, mask, cpu_online_mask) {
580 if (this_cpu == cpu)
581 continue;
582
583 xen_smp_send_call_function_single_ipi(cpu);
584 }
585}
586
587void xen_send_IPI_allbutself(int vector)
588{
589 int xen_vector = xen_map_vector(vector);
590
591 if (xen_vector >= 0)
592 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
593}
594
595static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
596{
597 irq_enter();
598 generic_smp_call_function_interrupt();
599 inc_irq_stat(irq_call_count);
600 irq_exit();
601
602 return IRQ_HANDLED;
603}
604
605static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
606{
607 irq_enter();
608 generic_smp_call_function_single_interrupt();
609 inc_irq_stat(irq_call_count);
610 irq_exit();
611
612 return IRQ_HANDLED;
613}
614
615static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
616{
617 irq_enter();
618 irq_work_run();
619 inc_irq_stat(apic_irq_work_irqs);
620 irq_exit();
621
622 return IRQ_HANDLED;
623}
624
625static const struct smp_ops xen_smp_ops __initconst = {
626 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
627 .smp_prepare_cpus = xen_smp_prepare_cpus,
628 .smp_cpus_done = xen_smp_cpus_done,
629
630 .cpu_up = xen_cpu_up,
631 .cpu_die = xen_cpu_die,
632 .cpu_disable = xen_cpu_disable,
633 .play_dead = xen_play_dead,
634
635 .stop_other_cpus = xen_stop_other_cpus,
636 .smp_send_reschedule = xen_smp_send_reschedule,
637
638 .send_call_func_ipi = xen_smp_send_call_function_ipi,
639 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
640};
641
642void __init xen_smp_init(void)
643{
644 smp_ops = xen_smp_ops;
645 xen_fill_possible_map();
646 xen_init_spinlocks();
647}
648
649static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
650{
651 native_smp_prepare_cpus(max_cpus);
652 WARN_ON(xen_smp_intr_init(0));
653
654 xen_init_lock_cpu(0);
655}
656
657static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
658{
659 int rc;
660 rc = native_cpu_up(cpu, tidle);
661 WARN_ON (xen_smp_intr_init(cpu));
662 return rc;
663}
664
665static void xen_hvm_cpu_die(unsigned int cpu)
666{
667 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL);
668 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
669 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
670 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
671 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
672 native_cpu_die(cpu);
673}
674
675void __init xen_hvm_smp_init(void)
676{
677 if (!xen_have_vector_callback)
678 return;
679 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
680 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
681 smp_ops.cpu_up = xen_hvm_cpu_up;
682 smp_ops.cpu_die = xen_hvm_cpu_die;
683 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
684 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
685}
1/*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 */
15#include <linux/sched.h>
16#include <linux/err.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/irq_work.h>
20#include <linux/tick.h>
21
22#include <asm/paravirt.h>
23#include <asm/desc.h>
24#include <asm/pgtable.h>
25#include <asm/cpu.h>
26
27#include <xen/interface/xen.h>
28#include <xen/interface/vcpu.h>
29#include <xen/interface/xenpmu.h>
30
31#include <asm/xen/interface.h>
32#include <asm/xen/hypercall.h>
33
34#include <xen/xen.h>
35#include <xen/page.h>
36#include <xen/events.h>
37
38#include <xen/hvc-console.h>
39#include "xen-ops.h"
40#include "mmu.h"
41#include "smp.h"
42#include "pmu.h"
43
44cpumask_var_t xen_cpu_initialized_map;
45
46struct xen_common_irq {
47 int irq;
48 char *name;
49};
50static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
51static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
52static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
53static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
54static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
55static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
56
57static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
58static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
59static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
60
61/*
62 * Reschedule call back.
63 */
64static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
65{
66 inc_irq_stat(irq_resched_count);
67 scheduler_ipi();
68
69 return IRQ_HANDLED;
70}
71
72static void cpu_bringup(void)
73{
74 int cpu;
75
76 cpu_init();
77 touch_softlockup_watchdog();
78 preempt_disable();
79
80 /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
81 if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
82 xen_enable_sysenter();
83 xen_enable_syscall();
84 }
85 cpu = smp_processor_id();
86 smp_store_cpu_info(cpu);
87 cpu_data(cpu).x86_max_cores = 1;
88 set_cpu_sibling_map(cpu);
89
90 xen_setup_cpu_clockevents();
91
92 notify_cpu_starting(cpu);
93
94 set_cpu_online(cpu, true);
95
96 cpu_set_state_online(cpu); /* Implies full memory barrier. */
97
98 /* We can take interrupts now: we're officially "up". */
99 local_irq_enable();
100}
101
102/*
103 * Note: cpu parameter is only relevant for PVH. The reason for passing it
104 * is we can't do smp_processor_id until the percpu segments are loaded, for
105 * which we need the cpu number! So we pass it in rdi as first parameter.
106 */
107asmlinkage __visible void cpu_bringup_and_idle(int cpu)
108{
109#ifdef CONFIG_XEN_PVH
110 if (xen_feature(XENFEAT_auto_translated_physmap) &&
111 xen_feature(XENFEAT_supervisor_mode_kernel))
112 xen_pvh_secondary_vcpu_init(cpu);
113#endif
114 cpu_bringup();
115 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
116}
117
118static void xen_smp_intr_free(unsigned int cpu)
119{
120 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
121 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
122 per_cpu(xen_resched_irq, cpu).irq = -1;
123 kfree(per_cpu(xen_resched_irq, cpu).name);
124 per_cpu(xen_resched_irq, cpu).name = NULL;
125 }
126 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
127 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
128 per_cpu(xen_callfunc_irq, cpu).irq = -1;
129 kfree(per_cpu(xen_callfunc_irq, cpu).name);
130 per_cpu(xen_callfunc_irq, cpu).name = NULL;
131 }
132 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
133 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
134 per_cpu(xen_debug_irq, cpu).irq = -1;
135 kfree(per_cpu(xen_debug_irq, cpu).name);
136 per_cpu(xen_debug_irq, cpu).name = NULL;
137 }
138 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
139 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
140 NULL);
141 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
142 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
143 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
144 }
145 if (xen_hvm_domain())
146 return;
147
148 if (per_cpu(xen_irq_work, cpu).irq >= 0) {
149 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
150 per_cpu(xen_irq_work, cpu).irq = -1;
151 kfree(per_cpu(xen_irq_work, cpu).name);
152 per_cpu(xen_irq_work, cpu).name = NULL;
153 }
154
155 if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
156 unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
157 per_cpu(xen_pmu_irq, cpu).irq = -1;
158 kfree(per_cpu(xen_pmu_irq, cpu).name);
159 per_cpu(xen_pmu_irq, cpu).name = NULL;
160 }
161};
162static int xen_smp_intr_init(unsigned int cpu)
163{
164 int rc;
165 char *resched_name, *callfunc_name, *debug_name, *pmu_name;
166
167 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
168 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
169 cpu,
170 xen_reschedule_interrupt,
171 IRQF_PERCPU|IRQF_NOBALANCING,
172 resched_name,
173 NULL);
174 if (rc < 0)
175 goto fail;
176 per_cpu(xen_resched_irq, cpu).irq = rc;
177 per_cpu(xen_resched_irq, cpu).name = resched_name;
178
179 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
180 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
181 cpu,
182 xen_call_function_interrupt,
183 IRQF_PERCPU|IRQF_NOBALANCING,
184 callfunc_name,
185 NULL);
186 if (rc < 0)
187 goto fail;
188 per_cpu(xen_callfunc_irq, cpu).irq = rc;
189 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
190
191 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
192 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
193 IRQF_PERCPU | IRQF_NOBALANCING,
194 debug_name, NULL);
195 if (rc < 0)
196 goto fail;
197 per_cpu(xen_debug_irq, cpu).irq = rc;
198 per_cpu(xen_debug_irq, cpu).name = debug_name;
199
200 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
201 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
202 cpu,
203 xen_call_function_single_interrupt,
204 IRQF_PERCPU|IRQF_NOBALANCING,
205 callfunc_name,
206 NULL);
207 if (rc < 0)
208 goto fail;
209 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
210 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
211
212 /*
213 * The IRQ worker on PVHVM goes through the native path and uses the
214 * IPI mechanism.
215 */
216 if (xen_hvm_domain())
217 return 0;
218
219 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
220 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
221 cpu,
222 xen_irq_work_interrupt,
223 IRQF_PERCPU|IRQF_NOBALANCING,
224 callfunc_name,
225 NULL);
226 if (rc < 0)
227 goto fail;
228 per_cpu(xen_irq_work, cpu).irq = rc;
229 per_cpu(xen_irq_work, cpu).name = callfunc_name;
230
231 if (is_xen_pmu(cpu)) {
232 pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
233 rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
234 xen_pmu_irq_handler,
235 IRQF_PERCPU|IRQF_NOBALANCING,
236 pmu_name, NULL);
237 if (rc < 0)
238 goto fail;
239 per_cpu(xen_pmu_irq, cpu).irq = rc;
240 per_cpu(xen_pmu_irq, cpu).name = pmu_name;
241 }
242
243 return 0;
244
245 fail:
246 xen_smp_intr_free(cpu);
247 return rc;
248}
249
250static void __init xen_fill_possible_map(void)
251{
252 int i, rc;
253
254 if (xen_initial_domain())
255 return;
256
257 for (i = 0; i < nr_cpu_ids; i++) {
258 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
259 if (rc >= 0) {
260 num_processors++;
261 set_cpu_possible(i, true);
262 }
263 }
264}
265
266static void __init xen_filter_cpu_maps(void)
267{
268 int i, rc;
269 unsigned int subtract = 0;
270
271 if (!xen_initial_domain())
272 return;
273
274 num_processors = 0;
275 disabled_cpus = 0;
276 for (i = 0; i < nr_cpu_ids; i++) {
277 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
278 if (rc >= 0) {
279 num_processors++;
280 set_cpu_possible(i, true);
281 } else {
282 set_cpu_possible(i, false);
283 set_cpu_present(i, false);
284 subtract++;
285 }
286 }
287#ifdef CONFIG_HOTPLUG_CPU
288 /* This is akin to using 'nr_cpus' on the Linux command line.
289 * Which is OK as when we use 'dom0_max_vcpus=X' we can only
290 * have up to X, while nr_cpu_ids is greater than X. This
291 * normally is not a problem, except when CPU hotplugging
292 * is involved and then there might be more than X CPUs
293 * in the guest - which will not work as there is no
294 * hypercall to expand the max number of VCPUs an already
295 * running guest has. So cap it up to X. */
296 if (subtract)
297 nr_cpu_ids = nr_cpu_ids - subtract;
298#endif
299
300}
301
302static void __init xen_smp_prepare_boot_cpu(void)
303{
304 BUG_ON(smp_processor_id() != 0);
305 native_smp_prepare_boot_cpu();
306
307 if (xen_pv_domain()) {
308 if (!xen_feature(XENFEAT_writable_page_tables))
309 /* We've switched to the "real" per-cpu gdt, so make
310 * sure the old memory can be recycled. */
311 make_lowmem_page_readwrite(xen_initial_gdt);
312
313#ifdef CONFIG_X86_32
314 /*
315 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
316 * expects __USER_DS
317 */
318 loadsegment(ds, __USER_DS);
319 loadsegment(es, __USER_DS);
320#endif
321
322 xen_filter_cpu_maps();
323 xen_setup_vcpu_info_placement();
324 }
325 /*
326 * The alternative logic (which patches the unlock/lock) runs before
327 * the smp bootup up code is activated. Hence we need to set this up
328 * the core kernel is being patched. Otherwise we will have only
329 * modules patched but not core code.
330 */
331 xen_init_spinlocks();
332}
333
334static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
335{
336 unsigned cpu;
337 unsigned int i;
338
339 if (skip_ioapic_setup) {
340 char *m = (max_cpus == 0) ?
341 "The nosmp parameter is incompatible with Xen; " \
342 "use Xen dom0_max_vcpus=1 parameter" :
343 "The noapic parameter is incompatible with Xen";
344
345 xen_raw_printk(m);
346 panic(m);
347 }
348 xen_init_lock_cpu(0);
349
350 smp_store_boot_cpu_info();
351 cpu_data(0).x86_max_cores = 1;
352
353 for_each_possible_cpu(i) {
354 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
355 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
356 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
357 }
358 set_cpu_sibling_map(0);
359
360 xen_pmu_init(0);
361
362 if (xen_smp_intr_init(0))
363 BUG();
364
365 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
366 panic("could not allocate xen_cpu_initialized_map\n");
367
368 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
369
370 /* Restrict the possible_map according to max_cpus. */
371 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
372 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
373 continue;
374 set_cpu_possible(cpu, false);
375 }
376
377 for_each_possible_cpu(cpu)
378 set_cpu_present(cpu, true);
379}
380
381static int
382cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
383{
384 struct vcpu_guest_context *ctxt;
385 struct desc_struct *gdt;
386 unsigned long gdt_mfn;
387
388 /* used to tell cpu_init() that it can proceed with initialization */
389 cpumask_set_cpu(cpu, cpu_callout_mask);
390 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
391 return 0;
392
393 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
394 if (ctxt == NULL)
395 return -ENOMEM;
396
397 gdt = get_cpu_gdt_table(cpu);
398
399#ifdef CONFIG_X86_32
400 /* Note: PVH is not yet supported on x86_32. */
401 ctxt->user_regs.fs = __KERNEL_PERCPU;
402 ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
403#endif
404 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
405
406 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
407 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
408 ctxt->flags = VGCF_IN_KERNEL;
409 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
410 ctxt->user_regs.ds = __USER_DS;
411 ctxt->user_regs.es = __USER_DS;
412 ctxt->user_regs.ss = __KERNEL_DS;
413
414 xen_copy_trap_info(ctxt->trap_ctxt);
415
416 ctxt->ldt_ents = 0;
417
418 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
419
420 gdt_mfn = arbitrary_virt_to_mfn(gdt);
421 make_lowmem_page_readonly(gdt);
422 make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
423
424 ctxt->gdt_frames[0] = gdt_mfn;
425 ctxt->gdt_ents = GDT_ENTRIES;
426
427 ctxt->kernel_ss = __KERNEL_DS;
428 ctxt->kernel_sp = idle->thread.sp0;
429
430#ifdef CONFIG_X86_32
431 ctxt->event_callback_cs = __KERNEL_CS;
432 ctxt->failsafe_callback_cs = __KERNEL_CS;
433#else
434 ctxt->gs_base_kernel = per_cpu_offset(cpu);
435#endif
436 ctxt->event_callback_eip =
437 (unsigned long)xen_hypervisor_callback;
438 ctxt->failsafe_callback_eip =
439 (unsigned long)xen_failsafe_callback;
440 ctxt->user_regs.cs = __KERNEL_CS;
441 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
442 }
443#ifdef CONFIG_XEN_PVH
444 else {
445 /*
446 * The vcpu comes on kernel page tables which have the NX pte
447 * bit set. This means before DS/SS is touched, NX in
448 * EFER must be set. Hence the following assembly glue code.
449 */
450 ctxt->user_regs.eip = (unsigned long)xen_pvh_early_cpu_init;
451 ctxt->user_regs.rdi = cpu;
452 ctxt->user_regs.rsi = true; /* entry == true */
453 }
454#endif
455 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
456 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
458 BUG();
459
460 kfree(ctxt);
461 return 0;
462}
463
464static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
465{
466 int rc;
467
468 common_cpu_up(cpu, idle);
469
470 xen_setup_runstate_info(cpu);
471 xen_setup_timer(cpu);
472 xen_init_lock_cpu(cpu);
473
474 /*
475 * PV VCPUs are always successfully taken down (see 'while' loop
476 * in xen_cpu_die()), so -EBUSY is an error.
477 */
478 rc = cpu_check_up_prepare(cpu);
479 if (rc)
480 return rc;
481
482 /* make sure interrupts start blocked */
483 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
484
485 rc = cpu_initialize_context(cpu, idle);
486 if (rc)
487 return rc;
488
489 xen_pmu_init(cpu);
490
491 rc = xen_smp_intr_init(cpu);
492 if (rc)
493 return rc;
494
495 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
496 BUG_ON(rc);
497
498 while (cpu_report_state(cpu) != CPU_ONLINE)
499 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
500
501 return 0;
502}
503
504static void xen_smp_cpus_done(unsigned int max_cpus)
505{
506}
507
508#ifdef CONFIG_HOTPLUG_CPU
509static int xen_cpu_disable(void)
510{
511 unsigned int cpu = smp_processor_id();
512 if (cpu == 0)
513 return -EBUSY;
514
515 cpu_disable_common();
516
517 load_cr3(swapper_pg_dir);
518 return 0;
519}
520
521static void xen_cpu_die(unsigned int cpu)
522{
523 while (xen_pv_domain() && HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
524 __set_current_state(TASK_UNINTERRUPTIBLE);
525 schedule_timeout(HZ/10);
526 }
527
528 if (common_cpu_die(cpu) == 0) {
529 xen_smp_intr_free(cpu);
530 xen_uninit_lock_cpu(cpu);
531 xen_teardown_timer(cpu);
532 xen_pmu_finish(cpu);
533 }
534}
535
536static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
537{
538 play_dead_common();
539 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
540 cpu_bringup();
541 /*
542 * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
543 * clears certain data that the cpu_idle loop (which called us
544 * and that we return from) expects. The only way to get that
545 * data back is to call:
546 */
547 tick_nohz_idle_enter();
548
549 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
550}
551
552#else /* !CONFIG_HOTPLUG_CPU */
553static int xen_cpu_disable(void)
554{
555 return -ENOSYS;
556}
557
558static void xen_cpu_die(unsigned int cpu)
559{
560 BUG();
561}
562
563static void xen_play_dead(void)
564{
565 BUG();
566}
567
568#endif
569static void stop_self(void *v)
570{
571 int cpu = smp_processor_id();
572
573 /* make sure we're not pinning something down */
574 load_cr3(swapper_pg_dir);
575 /* should set up a minimal gdt */
576
577 set_cpu_online(cpu, false);
578
579 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
580 BUG();
581}
582
583static void xen_stop_other_cpus(int wait)
584{
585 smp_call_function(stop_self, NULL, wait);
586}
587
588static void xen_smp_send_reschedule(int cpu)
589{
590 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
591}
592
593static void __xen_send_IPI_mask(const struct cpumask *mask,
594 int vector)
595{
596 unsigned cpu;
597
598 for_each_cpu_and(cpu, mask, cpu_online_mask)
599 xen_send_IPI_one(cpu, vector);
600}
601
602static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
603{
604 int cpu;
605
606 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
607
608 /* Make sure other vcpus get a chance to run if they need to. */
609 for_each_cpu(cpu, mask) {
610 if (xen_vcpu_stolen(cpu)) {
611 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
612 break;
613 }
614 }
615}
616
617static void xen_smp_send_call_function_single_ipi(int cpu)
618{
619 __xen_send_IPI_mask(cpumask_of(cpu),
620 XEN_CALL_FUNCTION_SINGLE_VECTOR);
621}
622
623static inline int xen_map_vector(int vector)
624{
625 int xen_vector;
626
627 switch (vector) {
628 case RESCHEDULE_VECTOR:
629 xen_vector = XEN_RESCHEDULE_VECTOR;
630 break;
631 case CALL_FUNCTION_VECTOR:
632 xen_vector = XEN_CALL_FUNCTION_VECTOR;
633 break;
634 case CALL_FUNCTION_SINGLE_VECTOR:
635 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
636 break;
637 case IRQ_WORK_VECTOR:
638 xen_vector = XEN_IRQ_WORK_VECTOR;
639 break;
640#ifdef CONFIG_X86_64
641 case NMI_VECTOR:
642 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
643 xen_vector = XEN_NMI_VECTOR;
644 break;
645#endif
646 default:
647 xen_vector = -1;
648 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
649 vector);
650 }
651
652 return xen_vector;
653}
654
655void xen_send_IPI_mask(const struct cpumask *mask,
656 int vector)
657{
658 int xen_vector = xen_map_vector(vector);
659
660 if (xen_vector >= 0)
661 __xen_send_IPI_mask(mask, xen_vector);
662}
663
664void xen_send_IPI_all(int vector)
665{
666 int xen_vector = xen_map_vector(vector);
667
668 if (xen_vector >= 0)
669 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
670}
671
672void xen_send_IPI_self(int vector)
673{
674 int xen_vector = xen_map_vector(vector);
675
676 if (xen_vector >= 0)
677 xen_send_IPI_one(smp_processor_id(), xen_vector);
678}
679
680void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
681 int vector)
682{
683 unsigned cpu;
684 unsigned int this_cpu = smp_processor_id();
685 int xen_vector = xen_map_vector(vector);
686
687 if (!(num_online_cpus() > 1) || (xen_vector < 0))
688 return;
689
690 for_each_cpu_and(cpu, mask, cpu_online_mask) {
691 if (this_cpu == cpu)
692 continue;
693
694 xen_send_IPI_one(cpu, xen_vector);
695 }
696}
697
698void xen_send_IPI_allbutself(int vector)
699{
700 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
701}
702
703static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
704{
705 irq_enter();
706 generic_smp_call_function_interrupt();
707 inc_irq_stat(irq_call_count);
708 irq_exit();
709
710 return IRQ_HANDLED;
711}
712
713static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
714{
715 irq_enter();
716 generic_smp_call_function_single_interrupt();
717 inc_irq_stat(irq_call_count);
718 irq_exit();
719
720 return IRQ_HANDLED;
721}
722
723static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
724{
725 irq_enter();
726 irq_work_run();
727 inc_irq_stat(apic_irq_work_irqs);
728 irq_exit();
729
730 return IRQ_HANDLED;
731}
732
733static const struct smp_ops xen_smp_ops __initconst = {
734 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
735 .smp_prepare_cpus = xen_smp_prepare_cpus,
736 .smp_cpus_done = xen_smp_cpus_done,
737
738 .cpu_up = xen_cpu_up,
739 .cpu_die = xen_cpu_die,
740 .cpu_disable = xen_cpu_disable,
741 .play_dead = xen_play_dead,
742
743 .stop_other_cpus = xen_stop_other_cpus,
744 .smp_send_reschedule = xen_smp_send_reschedule,
745
746 .send_call_func_ipi = xen_smp_send_call_function_ipi,
747 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
748};
749
750void __init xen_smp_init(void)
751{
752 smp_ops = xen_smp_ops;
753 xen_fill_possible_map();
754}
755
756static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
757{
758 native_smp_prepare_cpus(max_cpus);
759 WARN_ON(xen_smp_intr_init(0));
760
761 xen_init_lock_cpu(0);
762}
763
764static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
765{
766 int rc;
767
768 /*
769 * This can happen if CPU was offlined earlier and
770 * offlining timed out in common_cpu_die().
771 */
772 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
773 xen_smp_intr_free(cpu);
774 xen_uninit_lock_cpu(cpu);
775 }
776
777 /*
778 * xen_smp_intr_init() needs to run before native_cpu_up()
779 * so that IPI vectors are set up on the booting CPU before
780 * it is marked online in native_cpu_up().
781 */
782 rc = xen_smp_intr_init(cpu);
783 WARN_ON(rc);
784 if (!rc)
785 rc = native_cpu_up(cpu, tidle);
786
787 /*
788 * We must initialize the slowpath CPU kicker _after_ the native
789 * path has executed. If we initialized it before none of the
790 * unlocker IPI kicks would reach the booting CPU as the booting
791 * CPU had not set itself 'online' in cpu_online_mask. That mask
792 * is checked when IPIs are sent (on HVM at least).
793 */
794 xen_init_lock_cpu(cpu);
795 return rc;
796}
797
798void __init xen_hvm_smp_init(void)
799{
800 if (!xen_have_vector_callback)
801 return;
802 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
803 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
804 smp_ops.cpu_up = xen_hvm_cpu_up;
805 smp_ops.cpu_die = xen_cpu_die;
806 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
807 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
808 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;
809}