Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/smp.h>
3#include <linux/cpu.h>
4#include <linux/slab.h>
5#include <linux/cpumask.h>
6#include <linux/percpu.h>
7
8#include <xen/events.h>
9
10#include <xen/hvc-console.h>
11#include "xen-ops.h"
12#include "smp.h"
13
14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18
19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21
22/*
23 * Reschedule call back.
24 */
25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26{
27 inc_irq_stat(irq_resched_count);
28 scheduler_ipi();
29
30 return IRQ_HANDLED;
31}
32
33void xen_smp_intr_free(unsigned int cpu)
34{
35 kfree(per_cpu(xen_resched_irq, cpu).name);
36 per_cpu(xen_resched_irq, cpu).name = NULL;
37 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
38 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
39 per_cpu(xen_resched_irq, cpu).irq = -1;
40 }
41 kfree(per_cpu(xen_callfunc_irq, cpu).name);
42 per_cpu(xen_callfunc_irq, cpu).name = NULL;
43 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
44 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
45 per_cpu(xen_callfunc_irq, cpu).irq = -1;
46 }
47 kfree(per_cpu(xen_debug_irq, cpu).name);
48 per_cpu(xen_debug_irq, cpu).name = NULL;
49 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
50 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
51 per_cpu(xen_debug_irq, cpu).irq = -1;
52 }
53 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
55 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
56 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
57 NULL);
58 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
59 }
60}
61
62int xen_smp_intr_init(unsigned int cpu)
63{
64 int rc;
65 char *resched_name, *callfunc_name, *debug_name;
66
67 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68 per_cpu(xen_resched_irq, cpu).name = resched_name;
69 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
70 cpu,
71 xen_reschedule_interrupt,
72 IRQF_PERCPU|IRQF_NOBALANCING,
73 resched_name,
74 NULL);
75 if (rc < 0)
76 goto fail;
77 per_cpu(xen_resched_irq, cpu).irq = rc;
78
79 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
81 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
82 cpu,
83 xen_call_function_interrupt,
84 IRQF_PERCPU|IRQF_NOBALANCING,
85 callfunc_name,
86 NULL);
87 if (rc < 0)
88 goto fail;
89 per_cpu(xen_callfunc_irq, cpu).irq = rc;
90
91 if (!xen_fifo_events) {
92 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
93 per_cpu(xen_debug_irq, cpu).name = debug_name;
94 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
95 xen_debug_interrupt,
96 IRQF_PERCPU | IRQF_NOBALANCING,
97 debug_name, NULL);
98 if (rc < 0)
99 goto fail;
100 per_cpu(xen_debug_irq, cpu).irq = rc;
101 }
102
103 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
104 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
105 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
106 cpu,
107 xen_call_function_single_interrupt,
108 IRQF_PERCPU|IRQF_NOBALANCING,
109 callfunc_name,
110 NULL);
111 if (rc < 0)
112 goto fail;
113 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
114
115 return 0;
116
117 fail:
118 xen_smp_intr_free(cpu);
119 return rc;
120}
121
122void __init xen_smp_cpus_done(unsigned int max_cpus)
123{
124 if (xen_hvm_domain())
125 native_smp_cpus_done(max_cpus);
126 else
127 calculate_max_logical_packages();
128}
129
130void xen_smp_send_reschedule(int cpu)
131{
132 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
133}
134
135static void __xen_send_IPI_mask(const struct cpumask *mask,
136 int vector)
137{
138 unsigned cpu;
139
140 for_each_cpu_and(cpu, mask, cpu_online_mask)
141 xen_send_IPI_one(cpu, vector);
142}
143
144void xen_smp_send_call_function_ipi(const struct cpumask *mask)
145{
146 int cpu;
147
148 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
149
150 /* Make sure other vcpus get a chance to run if they need to. */
151 for_each_cpu(cpu, mask) {
152 if (xen_vcpu_stolen(cpu)) {
153 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
154 break;
155 }
156 }
157}
158
159void xen_smp_send_call_function_single_ipi(int cpu)
160{
161 __xen_send_IPI_mask(cpumask_of(cpu),
162 XEN_CALL_FUNCTION_SINGLE_VECTOR);
163}
164
165static inline int xen_map_vector(int vector)
166{
167 int xen_vector;
168
169 switch (vector) {
170 case RESCHEDULE_VECTOR:
171 xen_vector = XEN_RESCHEDULE_VECTOR;
172 break;
173 case CALL_FUNCTION_VECTOR:
174 xen_vector = XEN_CALL_FUNCTION_VECTOR;
175 break;
176 case CALL_FUNCTION_SINGLE_VECTOR:
177 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
178 break;
179 case IRQ_WORK_VECTOR:
180 xen_vector = XEN_IRQ_WORK_VECTOR;
181 break;
182#ifdef CONFIG_X86_64
183 case NMI_VECTOR:
184 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
185 xen_vector = XEN_NMI_VECTOR;
186 break;
187#endif
188 default:
189 xen_vector = -1;
190 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
191 vector);
192 }
193
194 return xen_vector;
195}
196
197void xen_send_IPI_mask(const struct cpumask *mask,
198 int vector)
199{
200 int xen_vector = xen_map_vector(vector);
201
202 if (xen_vector >= 0)
203 __xen_send_IPI_mask(mask, xen_vector);
204}
205
206void xen_send_IPI_all(int vector)
207{
208 int xen_vector = xen_map_vector(vector);
209
210 if (xen_vector >= 0)
211 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
212}
213
214void xen_send_IPI_self(int vector)
215{
216 int xen_vector = xen_map_vector(vector);
217
218 if (xen_vector >= 0)
219 xen_send_IPI_one(smp_processor_id(), xen_vector);
220}
221
222void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
223 int vector)
224{
225 unsigned cpu;
226 unsigned int this_cpu = smp_processor_id();
227 int xen_vector = xen_map_vector(vector);
228
229 if (!(num_online_cpus() > 1) || (xen_vector < 0))
230 return;
231
232 for_each_cpu_and(cpu, mask, cpu_online_mask) {
233 if (this_cpu == cpu)
234 continue;
235
236 xen_send_IPI_one(cpu, xen_vector);
237 }
238}
239
240void xen_send_IPI_allbutself(int vector)
241{
242 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
243}
244
245static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
246{
247 generic_smp_call_function_interrupt();
248 inc_irq_stat(irq_call_count);
249
250 return IRQ_HANDLED;
251}
252
253static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
254{
255 generic_smp_call_function_single_interrupt();
256 inc_irq_stat(irq_call_count);
257
258 return IRQ_HANDLED;
259}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/smp.h>
3#include <linux/cpu.h>
4#include <linux/slab.h>
5#include <linux/cpumask.h>
6#include <linux/percpu.h>
7
8#include <xen/events.h>
9
10#include <xen/hvc-console.h>
11#include "xen-ops.h"
12#include "smp.h"
13
14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
18
19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
21
22/*
23 * Reschedule call back.
24 */
25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
26{
27 inc_irq_stat(irq_resched_count);
28 scheduler_ipi();
29
30 return IRQ_HANDLED;
31}
32
33void xen_smp_intr_free(unsigned int cpu)
34{
35 if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
36 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
37 per_cpu(xen_resched_irq, cpu).irq = -1;
38 kfree(per_cpu(xen_resched_irq, cpu).name);
39 per_cpu(xen_resched_irq, cpu).name = NULL;
40 }
41 if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
42 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
43 per_cpu(xen_callfunc_irq, cpu).irq = -1;
44 kfree(per_cpu(xen_callfunc_irq, cpu).name);
45 per_cpu(xen_callfunc_irq, cpu).name = NULL;
46 }
47 if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
48 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
49 per_cpu(xen_debug_irq, cpu).irq = -1;
50 kfree(per_cpu(xen_debug_irq, cpu).name);
51 per_cpu(xen_debug_irq, cpu).name = NULL;
52 }
53 if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
54 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
55 NULL);
56 per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
57 kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
58 per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
59 }
60}
61
62int xen_smp_intr_init(unsigned int cpu)
63{
64 int rc;
65 char *resched_name, *callfunc_name, *debug_name;
66
67 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
69 cpu,
70 xen_reschedule_interrupt,
71 IRQF_PERCPU|IRQF_NOBALANCING,
72 resched_name,
73 NULL);
74 if (rc < 0)
75 goto fail;
76 per_cpu(xen_resched_irq, cpu).irq = rc;
77 per_cpu(xen_resched_irq, cpu).name = resched_name;
78
79 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
81 cpu,
82 xen_call_function_interrupt,
83 IRQF_PERCPU|IRQF_NOBALANCING,
84 callfunc_name,
85 NULL);
86 if (rc < 0)
87 goto fail;
88 per_cpu(xen_callfunc_irq, cpu).irq = rc;
89 per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
90
91 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
92 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
93 IRQF_PERCPU | IRQF_NOBALANCING,
94 debug_name, NULL);
95 if (rc < 0)
96 goto fail;
97 per_cpu(xen_debug_irq, cpu).irq = rc;
98 per_cpu(xen_debug_irq, cpu).name = debug_name;
99
100 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
101 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
102 cpu,
103 xen_call_function_single_interrupt,
104 IRQF_PERCPU|IRQF_NOBALANCING,
105 callfunc_name,
106 NULL);
107 if (rc < 0)
108 goto fail;
109 per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
110 per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
111
112 return 0;
113
114 fail:
115 xen_smp_intr_free(cpu);
116 return rc;
117}
118
119void __init xen_smp_cpus_done(unsigned int max_cpus)
120{
121 int cpu, rc, count = 0;
122
123 if (xen_hvm_domain())
124 native_smp_cpus_done(max_cpus);
125 else
126 calculate_max_logical_packages();
127
128 if (xen_have_vcpu_info_placement)
129 return;
130
131 for_each_online_cpu(cpu) {
132 if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
133 continue;
134
135 rc = remove_cpu(cpu);
136
137 if (rc == 0) {
138 /*
139 * Reset vcpu_info so this cpu cannot be onlined again.
140 */
141 xen_vcpu_info_reset(cpu);
142 count++;
143 } else {
144 pr_warn("%s: failed to bring CPU %d down, error %d\n",
145 __func__, cpu, rc);
146 }
147 }
148 WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
149}
150
151void xen_smp_send_reschedule(int cpu)
152{
153 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
154}
155
156static void __xen_send_IPI_mask(const struct cpumask *mask,
157 int vector)
158{
159 unsigned cpu;
160
161 for_each_cpu_and(cpu, mask, cpu_online_mask)
162 xen_send_IPI_one(cpu, vector);
163}
164
165void xen_smp_send_call_function_ipi(const struct cpumask *mask)
166{
167 int cpu;
168
169 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
170
171 /* Make sure other vcpus get a chance to run if they need to. */
172 for_each_cpu(cpu, mask) {
173 if (xen_vcpu_stolen(cpu)) {
174 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
175 break;
176 }
177 }
178}
179
180void xen_smp_send_call_function_single_ipi(int cpu)
181{
182 __xen_send_IPI_mask(cpumask_of(cpu),
183 XEN_CALL_FUNCTION_SINGLE_VECTOR);
184}
185
186static inline int xen_map_vector(int vector)
187{
188 int xen_vector;
189
190 switch (vector) {
191 case RESCHEDULE_VECTOR:
192 xen_vector = XEN_RESCHEDULE_VECTOR;
193 break;
194 case CALL_FUNCTION_VECTOR:
195 xen_vector = XEN_CALL_FUNCTION_VECTOR;
196 break;
197 case CALL_FUNCTION_SINGLE_VECTOR:
198 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
199 break;
200 case IRQ_WORK_VECTOR:
201 xen_vector = XEN_IRQ_WORK_VECTOR;
202 break;
203#ifdef CONFIG_X86_64
204 case NMI_VECTOR:
205 case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
206 xen_vector = XEN_NMI_VECTOR;
207 break;
208#endif
209 default:
210 xen_vector = -1;
211 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
212 vector);
213 }
214
215 return xen_vector;
216}
217
218void xen_send_IPI_mask(const struct cpumask *mask,
219 int vector)
220{
221 int xen_vector = xen_map_vector(vector);
222
223 if (xen_vector >= 0)
224 __xen_send_IPI_mask(mask, xen_vector);
225}
226
227void xen_send_IPI_all(int vector)
228{
229 int xen_vector = xen_map_vector(vector);
230
231 if (xen_vector >= 0)
232 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
233}
234
235void xen_send_IPI_self(int vector)
236{
237 int xen_vector = xen_map_vector(vector);
238
239 if (xen_vector >= 0)
240 xen_send_IPI_one(smp_processor_id(), xen_vector);
241}
242
243void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
244 int vector)
245{
246 unsigned cpu;
247 unsigned int this_cpu = smp_processor_id();
248 int xen_vector = xen_map_vector(vector);
249
250 if (!(num_online_cpus() > 1) || (xen_vector < 0))
251 return;
252
253 for_each_cpu_and(cpu, mask, cpu_online_mask) {
254 if (this_cpu == cpu)
255 continue;
256
257 xen_send_IPI_one(cpu, xen_vector);
258 }
259}
260
261void xen_send_IPI_allbutself(int vector)
262{
263 xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
264}
265
266static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
267{
268 irq_enter();
269 generic_smp_call_function_interrupt();
270 inc_irq_stat(irq_call_count);
271 irq_exit();
272
273 return IRQ_HANDLED;
274}
275
276static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
277{
278 irq_enter();
279 generic_smp_call_function_single_interrupt();
280 inc_irq_stat(irq_call_count);
281 irq_exit();
282
283 return IRQ_HANDLED;
284}