Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/thread_info.h>
3#include <asm/smp.h>
4
5#include <xen/events.h>
6
7#include "xen-ops.h"
8#include "smp.h"
9
10
11static void __init xen_hvm_smp_prepare_boot_cpu(void)
12{
13 BUG_ON(smp_processor_id() != 0);
14 native_smp_prepare_boot_cpu();
15
16 /*
17 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
18 * in xen_cpu_up_prepare_hvm().
19 */
20 xen_vcpu_setup(0);
21
22 /*
23 * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
24 * Refer to comments in xen_hvm_init_time_ops().
25 */
26 xen_hvm_init_time_ops();
27
28 /*
29 * The alternative logic (which patches the unlock/lock) runs before
30 * the smp bootup up code is activated. Hence we need to set this up
31 * the core kernel is being patched. Otherwise we will have only
32 * modules patched but not core code.
33 */
34 xen_init_spinlocks();
35}
36
37static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
38{
39 int cpu;
40
41 native_smp_prepare_cpus(max_cpus);
42
43 if (xen_have_vector_callback) {
44 WARN_ON(xen_smp_intr_init(0));
45 xen_init_lock_cpu(0);
46 }
47
48 for_each_possible_cpu(cpu) {
49 if (cpu == 0)
50 continue;
51
52 /* Set default vcpu_id to make sure that we don't use cpu-0's */
53 per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
54 }
55}
56
57#ifdef CONFIG_HOTPLUG_CPU
58static void xen_hvm_cpu_die(unsigned int cpu)
59{
60 if (common_cpu_die(cpu) == 0) {
61 if (xen_have_vector_callback) {
62 xen_smp_intr_free(cpu);
63 xen_uninit_lock_cpu(cpu);
64 xen_teardown_timer(cpu);
65 }
66 }
67}
68#else
69static void xen_hvm_cpu_die(unsigned int cpu)
70{
71 BUG();
72}
73#endif
74
75void __init xen_hvm_smp_init(void)
76{
77 smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
78 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
79 smp_ops.smp_cpus_done = xen_smp_cpus_done;
80 smp_ops.cpu_die = xen_hvm_cpu_die;
81
82 if (!xen_have_vector_callback) {
83#ifdef CONFIG_PARAVIRT_SPINLOCKS
84 nopvspin = true;
85#endif
86 return;
87 }
88
89 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
90 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
91 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
92}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/thread_info.h>
3#include <asm/smp.h>
4
5#include <xen/events.h>
6
7#include "xen-ops.h"
8
9static void __init xen_hvm_smp_prepare_boot_cpu(void)
10{
11 BUG_ON(smp_processor_id() != 0);
12 native_smp_prepare_boot_cpu();
13
14 /*
15 * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info
16 * in xen_cpu_up_prepare_hvm().
17 */
18 xen_vcpu_setup(0);
19
20 /*
21 * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS.
22 * Refer to comments in xen_hvm_init_time_ops().
23 */
24 xen_hvm_init_time_ops();
25
26 /*
27 * The alternative logic (which patches the unlock/lock) runs before
28 * the smp bootup up code is activated. Hence we need to set this up
29 * the core kernel is being patched. Otherwise we will have only
30 * modules patched but not core code.
31 */
32 xen_init_spinlocks();
33}
34
35static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
36{
37 int cpu;
38
39 native_smp_prepare_cpus(max_cpus);
40
41 if (xen_have_vector_callback) {
42 WARN_ON(xen_smp_intr_init(0));
43 xen_init_lock_cpu(0);
44 }
45
46 for_each_possible_cpu(cpu) {
47 if (cpu == 0)
48 continue;
49
50 /* Set default vcpu_id to make sure that we don't use cpu-0's */
51 per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID;
52 }
53}
54
55#ifdef CONFIG_HOTPLUG_CPU
56static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
57{
58 if (xen_have_vector_callback) {
59 xen_smp_intr_free(cpu);
60 xen_uninit_lock_cpu(cpu);
61 xen_teardown_timer(cpu);
62 }
63}
64#else
65static void xen_hvm_cleanup_dead_cpu(unsigned int cpu)
66{
67 BUG();
68}
69#endif
70
71void __init xen_hvm_smp_init(void)
72{
73 smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
74 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
75 smp_ops.smp_cpus_done = xen_smp_cpus_done;
76 smp_ops.cleanup_dead_cpu = xen_hvm_cleanup_dead_cpu;
77
78 if (!xen_have_vector_callback) {
79#ifdef CONFIG_PARAVIRT_SPINLOCKS
80 nopvspin = true;
81#endif
82 return;
83 }
84
85 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
86 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
87 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
88}