Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/smp.h>
  3#include <linux/cpu.h>
  4#include <linux/slab.h>
  5#include <linux/cpumask.h>
  6#include <linux/percpu.h>
  7
  8#include <xen/events.h>
  9
 10#include <xen/hvc-console.h>
 11#include "xen-ops.h"
 12#include "smp.h"
 13
 14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
 15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
 16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
 17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
 18
 19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 21
 22/*
 23 * Reschedule call back.
 24 */
 25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 26{
 27	inc_irq_stat(irq_resched_count);
 28	scheduler_ipi();
 29
 30	return IRQ_HANDLED;
 31}
 32
 33void xen_smp_intr_free(unsigned int cpu)
 34{
 35	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
 36		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
 37		per_cpu(xen_resched_irq, cpu).irq = -1;
 38		kfree(per_cpu(xen_resched_irq, cpu).name);
 39		per_cpu(xen_resched_irq, cpu).name = NULL;
 40	}
 41	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
 42		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
 43		per_cpu(xen_callfunc_irq, cpu).irq = -1;
 44		kfree(per_cpu(xen_callfunc_irq, cpu).name);
 45		per_cpu(xen_callfunc_irq, cpu).name = NULL;
 46	}
 47	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
 48		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
 49		per_cpu(xen_debug_irq, cpu).irq = -1;
 50		kfree(per_cpu(xen_debug_irq, cpu).name);
 51		per_cpu(xen_debug_irq, cpu).name = NULL;
 52	}
 53	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
 54		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
 55				       NULL);
 56		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
 57		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
 58		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
 59	}
 60}
 61
 62int xen_smp_intr_init(unsigned int cpu)
 63{
 64	int rc;
 65	char *resched_name, *callfunc_name, *debug_name;
 66
 67	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 68	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
 69				    cpu,
 70				    xen_reschedule_interrupt,
 71				    IRQF_PERCPU|IRQF_NOBALANCING,
 72				    resched_name,
 73				    NULL);
 74	if (rc < 0)
 75		goto fail;
 76	per_cpu(xen_resched_irq, cpu).irq = rc;
 77	per_cpu(xen_resched_irq, cpu).name = resched_name;
 78
 79	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
 80	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
 81				    cpu,
 82				    xen_call_function_interrupt,
 83				    IRQF_PERCPU|IRQF_NOBALANCING,
 84				    callfunc_name,
 85				    NULL);
 86	if (rc < 0)
 87		goto fail;
 88	per_cpu(xen_callfunc_irq, cpu).irq = rc;
 89	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
 90
 91	debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
 92	rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
 93				     IRQF_PERCPU | IRQF_NOBALANCING,
 94				     debug_name, NULL);
 95	if (rc < 0)
 96		goto fail;
 97	per_cpu(xen_debug_irq, cpu).irq = rc;
 98	per_cpu(xen_debug_irq, cpu).name = debug_name;
 
 
 
 99
100	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
101	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
102				    cpu,
103				    xen_call_function_single_interrupt,
104				    IRQF_PERCPU|IRQF_NOBALANCING,
105				    callfunc_name,
106				    NULL);
107	if (rc < 0)
108		goto fail;
109	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
110	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
111
112	return 0;
113
114 fail:
115	xen_smp_intr_free(cpu);
116	return rc;
117}
118
119void __init xen_smp_cpus_done(unsigned int max_cpus)
120{
121	int cpu, rc, count = 0;
122
123	if (xen_hvm_domain())
124		native_smp_cpus_done(max_cpus);
125	else
126		calculate_max_logical_packages();
127
128	if (xen_have_vcpu_info_placement)
129		return;
130
131	for_each_online_cpu(cpu) {
132		if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
133			continue;
134
135		rc = remove_cpu(cpu);
136
137		if (rc == 0) {
138			/*
139			 * Reset vcpu_info so this cpu cannot be onlined again.
140			 */
141			xen_vcpu_info_reset(cpu);
142			count++;
143		} else {
144			pr_warn("%s: failed to bring CPU %d down, error %d\n",
145				__func__, cpu, rc);
146		}
147	}
148	WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
149}
150
151void xen_smp_send_reschedule(int cpu)
152{
153	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
154}
155
156static void __xen_send_IPI_mask(const struct cpumask *mask,
157			      int vector)
158{
159	unsigned cpu;
160
161	for_each_cpu_and(cpu, mask, cpu_online_mask)
162		xen_send_IPI_one(cpu, vector);
163}
164
165void xen_smp_send_call_function_ipi(const struct cpumask *mask)
166{
167	int cpu;
168
169	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
170
171	/* Make sure other vcpus get a chance to run if they need to. */
172	for_each_cpu(cpu, mask) {
173		if (xen_vcpu_stolen(cpu)) {
174			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
175			break;
176		}
177	}
178}
179
180void xen_smp_send_call_function_single_ipi(int cpu)
181{
182	__xen_send_IPI_mask(cpumask_of(cpu),
183			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
184}
185
186static inline int xen_map_vector(int vector)
187{
188	int xen_vector;
189
190	switch (vector) {
191	case RESCHEDULE_VECTOR:
192		xen_vector = XEN_RESCHEDULE_VECTOR;
193		break;
194	case CALL_FUNCTION_VECTOR:
195		xen_vector = XEN_CALL_FUNCTION_VECTOR;
196		break;
197	case CALL_FUNCTION_SINGLE_VECTOR:
198		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
199		break;
200	case IRQ_WORK_VECTOR:
201		xen_vector = XEN_IRQ_WORK_VECTOR;
202		break;
203#ifdef CONFIG_X86_64
204	case NMI_VECTOR:
205	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
206		xen_vector = XEN_NMI_VECTOR;
207		break;
208#endif
209	default:
210		xen_vector = -1;
211		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
212			vector);
213	}
214
215	return xen_vector;
216}
217
218void xen_send_IPI_mask(const struct cpumask *mask,
219			      int vector)
220{
221	int xen_vector = xen_map_vector(vector);
222
223	if (xen_vector >= 0)
224		__xen_send_IPI_mask(mask, xen_vector);
225}
226
227void xen_send_IPI_all(int vector)
228{
229	int xen_vector = xen_map_vector(vector);
230
231	if (xen_vector >= 0)
232		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
233}
234
235void xen_send_IPI_self(int vector)
236{
237	int xen_vector = xen_map_vector(vector);
238
239	if (xen_vector >= 0)
240		xen_send_IPI_one(smp_processor_id(), xen_vector);
241}
242
243void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
244				int vector)
245{
246	unsigned cpu;
247	unsigned int this_cpu = smp_processor_id();
248	int xen_vector = xen_map_vector(vector);
249
250	if (!(num_online_cpus() > 1) || (xen_vector < 0))
251		return;
252
253	for_each_cpu_and(cpu, mask, cpu_online_mask) {
254		if (this_cpu == cpu)
255			continue;
256
257		xen_send_IPI_one(cpu, xen_vector);
258	}
259}
260
261void xen_send_IPI_allbutself(int vector)
262{
263	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
264}
265
266static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
267{
268	irq_enter();
269	generic_smp_call_function_interrupt();
270	inc_irq_stat(irq_call_count);
271	irq_exit();
272
273	return IRQ_HANDLED;
274}
275
276static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
277{
278	irq_enter();
279	generic_smp_call_function_single_interrupt();
280	inc_irq_stat(irq_call_count);
281	irq_exit();
282
283	return IRQ_HANDLED;
284}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/smp.h>
  3#include <linux/cpu.h>
  4#include <linux/slab.h>
  5#include <linux/cpumask.h>
  6#include <linux/percpu.h>
  7
  8#include <xen/events.h>
  9
 10#include <xen/hvc-console.h>
 11#include "xen-ops.h"
 12#include "smp.h"
 13
 14static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 };
 15static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 };
 16static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 };
 17static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 };
 18
 19static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
 20static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
 21
 22/*
 23 * Reschedule call back.
 24 */
 25static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
 26{
 27	inc_irq_stat(irq_resched_count);
 28	scheduler_ipi();
 29
 30	return IRQ_HANDLED;
 31}
 32
 33void xen_smp_intr_free(unsigned int cpu)
 34{
 35	if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
 36		unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
 37		per_cpu(xen_resched_irq, cpu).irq = -1;
 38		kfree(per_cpu(xen_resched_irq, cpu).name);
 39		per_cpu(xen_resched_irq, cpu).name = NULL;
 40	}
 41	if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
 42		unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
 43		per_cpu(xen_callfunc_irq, cpu).irq = -1;
 44		kfree(per_cpu(xen_callfunc_irq, cpu).name);
 45		per_cpu(xen_callfunc_irq, cpu).name = NULL;
 46	}
 47	if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
 48		unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
 49		per_cpu(xen_debug_irq, cpu).irq = -1;
 50		kfree(per_cpu(xen_debug_irq, cpu).name);
 51		per_cpu(xen_debug_irq, cpu).name = NULL;
 52	}
 53	if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
 54		unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
 55				       NULL);
 56		per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
 57		kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
 58		per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
 59	}
 60}
 61
 62int xen_smp_intr_init(unsigned int cpu)
 63{
 64	int rc;
 65	char *resched_name, *callfunc_name, *debug_name;
 66
 67	resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
 68	rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
 69				    cpu,
 70				    xen_reschedule_interrupt,
 71				    IRQF_PERCPU|IRQF_NOBALANCING,
 72				    resched_name,
 73				    NULL);
 74	if (rc < 0)
 75		goto fail;
 76	per_cpu(xen_resched_irq, cpu).irq = rc;
 77	per_cpu(xen_resched_irq, cpu).name = resched_name;
 78
 79	callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
 80	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
 81				    cpu,
 82				    xen_call_function_interrupt,
 83				    IRQF_PERCPU|IRQF_NOBALANCING,
 84				    callfunc_name,
 85				    NULL);
 86	if (rc < 0)
 87		goto fail;
 88	per_cpu(xen_callfunc_irq, cpu).irq = rc;
 89	per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
 90
 91	if (!xen_fifo_events) {
 92		debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
 93		rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
 94					     xen_debug_interrupt,
 95					     IRQF_PERCPU | IRQF_NOBALANCING,
 96					     debug_name, NULL);
 97		if (rc < 0)
 98			goto fail;
 99		per_cpu(xen_debug_irq, cpu).irq = rc;
100		per_cpu(xen_debug_irq, cpu).name = debug_name;
101	}
102
103	callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
104	rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
105				    cpu,
106				    xen_call_function_single_interrupt,
107				    IRQF_PERCPU|IRQF_NOBALANCING,
108				    callfunc_name,
109				    NULL);
110	if (rc < 0)
111		goto fail;
112	per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
113	per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
114
115	return 0;
116
117 fail:
118	xen_smp_intr_free(cpu);
119	return rc;
120}
121
122void __init xen_smp_cpus_done(unsigned int max_cpus)
123{
124	int cpu, rc, count = 0;
125
126	if (xen_hvm_domain())
127		native_smp_cpus_done(max_cpus);
128	else
129		calculate_max_logical_packages();
130
131	if (xen_have_vcpu_info_placement)
132		return;
133
134	for_each_online_cpu(cpu) {
135		if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS)
136			continue;
137
138		rc = remove_cpu(cpu);
139
140		if (rc == 0) {
141			/*
142			 * Reset vcpu_info so this cpu cannot be onlined again.
143			 */
144			xen_vcpu_info_reset(cpu);
145			count++;
146		} else {
147			pr_warn("%s: failed to bring CPU %d down, error %d\n",
148				__func__, cpu, rc);
149		}
150	}
151	WARN(count, "%s: brought %d CPUs offline\n", __func__, count);
152}
153
154void xen_smp_send_reschedule(int cpu)
155{
156	xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
157}
158
159static void __xen_send_IPI_mask(const struct cpumask *mask,
160			      int vector)
161{
162	unsigned cpu;
163
164	for_each_cpu_and(cpu, mask, cpu_online_mask)
165		xen_send_IPI_one(cpu, vector);
166}
167
168void xen_smp_send_call_function_ipi(const struct cpumask *mask)
169{
170	int cpu;
171
172	__xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
173
174	/* Make sure other vcpus get a chance to run if they need to. */
175	for_each_cpu(cpu, mask) {
176		if (xen_vcpu_stolen(cpu)) {
177			HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
178			break;
179		}
180	}
181}
182
183void xen_smp_send_call_function_single_ipi(int cpu)
184{
185	__xen_send_IPI_mask(cpumask_of(cpu),
186			  XEN_CALL_FUNCTION_SINGLE_VECTOR);
187}
188
189static inline int xen_map_vector(int vector)
190{
191	int xen_vector;
192
193	switch (vector) {
194	case RESCHEDULE_VECTOR:
195		xen_vector = XEN_RESCHEDULE_VECTOR;
196		break;
197	case CALL_FUNCTION_VECTOR:
198		xen_vector = XEN_CALL_FUNCTION_VECTOR;
199		break;
200	case CALL_FUNCTION_SINGLE_VECTOR:
201		xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
202		break;
203	case IRQ_WORK_VECTOR:
204		xen_vector = XEN_IRQ_WORK_VECTOR;
205		break;
206#ifdef CONFIG_X86_64
207	case NMI_VECTOR:
208	case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
209		xen_vector = XEN_NMI_VECTOR;
210		break;
211#endif
212	default:
213		xen_vector = -1;
214		printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
215			vector);
216	}
217
218	return xen_vector;
219}
220
221void xen_send_IPI_mask(const struct cpumask *mask,
222			      int vector)
223{
224	int xen_vector = xen_map_vector(vector);
225
226	if (xen_vector >= 0)
227		__xen_send_IPI_mask(mask, xen_vector);
228}
229
230void xen_send_IPI_all(int vector)
231{
232	int xen_vector = xen_map_vector(vector);
233
234	if (xen_vector >= 0)
235		__xen_send_IPI_mask(cpu_online_mask, xen_vector);
236}
237
238void xen_send_IPI_self(int vector)
239{
240	int xen_vector = xen_map_vector(vector);
241
242	if (xen_vector >= 0)
243		xen_send_IPI_one(smp_processor_id(), xen_vector);
244}
245
246void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
247				int vector)
248{
249	unsigned cpu;
250	unsigned int this_cpu = smp_processor_id();
251	int xen_vector = xen_map_vector(vector);
252
253	if (!(num_online_cpus() > 1) || (xen_vector < 0))
254		return;
255
256	for_each_cpu_and(cpu, mask, cpu_online_mask) {
257		if (this_cpu == cpu)
258			continue;
259
260		xen_send_IPI_one(cpu, xen_vector);
261	}
262}
263
264void xen_send_IPI_allbutself(int vector)
265{
266	xen_send_IPI_mask_allbutself(cpu_online_mask, vector);
267}
268
269static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
270{
271	irq_enter();
272	generic_smp_call_function_interrupt();
273	inc_irq_stat(irq_call_count);
274	irq_exit();
275
276	return IRQ_HANDLED;
277}
278
279static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
280{
281	irq_enter();
282	generic_smp_call_function_single_interrupt();
283	inc_irq_stat(irq_call_count);
284	irq_exit();
285
286	return IRQ_HANDLED;
287}