Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * linux/kernel/irq/ipi.c
  3 *
  4 * Copyright (C) 2015 Imagination Technologies Ltd
  5 * Author: Qais Yousef <qais.yousef@imgtec.com>
  6 *
  7 * This file contains driver APIs to the IPI subsystem.
  8 */
  9
 10#define pr_fmt(fmt) "genirq/ipi: " fmt
 11
 12#include <linux/irqdomain.h>
 13#include <linux/irq.h>
 14
 15/**
 16 * irq_reserve_ipi() - Setup an IPI to destination cpumask
 17 * @domain:	IPI domain
 18 * @dest:	cpumask of cpus which can receive the IPI
 19 *
 20 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
 21 *
 22 * On success it'll return linux irq number and error code on failure
 23 */
 24int irq_reserve_ipi(struct irq_domain *domain,
 25			     const struct cpumask *dest)
 26{
 27	unsigned int nr_irqs, offset;
 28	struct irq_data *data;
 29	int virq, i;
 30
 31	if (!domain ||!irq_domain_is_ipi(domain)) {
 32		pr_warn("Reservation on a non IPI domain\n");
 33		return -EINVAL;
 34	}
 35
 36	if (!cpumask_subset(dest, cpu_possible_mask)) {
 37		pr_warn("Reservation is not in possible_cpu_mask\n");
 38		return -EINVAL;
 39	}
 40
 41	nr_irqs = cpumask_weight(dest);
 42	if (!nr_irqs) {
 43		pr_warn("Reservation for empty destination mask\n");
 44		return -EINVAL;
 45	}
 46
 47	if (irq_domain_is_ipi_single(domain)) {
 48		/*
 49		 * If the underlying implementation uses a single HW irq on
 50		 * all cpus then we only need a single Linux irq number for
 51		 * it. We have no restrictions vs. the destination mask. The
 52		 * underlying implementation can deal with holes nicely.
 53		 */
 54		nr_irqs = 1;
 55		offset = 0;
 56	} else {
 57		unsigned int next;
 58
 59		/*
 60		 * The IPI requires a seperate HW irq on each CPU. We require
 61		 * that the destination mask is consecutive. If an
 62		 * implementation needs to support holes, it can reserve
 63		 * several IPI ranges.
 64		 */
 65		offset = cpumask_first(dest);
 66		/*
 67		 * Find a hole and if found look for another set bit after the
 68		 * hole. For now we don't support this scenario.
 69		 */
 70		next = cpumask_next_zero(offset, dest);
 71		if (next < nr_cpu_ids)
 72			next = cpumask_next(next, dest);
 73		if (next < nr_cpu_ids) {
 74			pr_warn("Destination mask has holes\n");
 75			return -EINVAL;
 76		}
 77	}
 78
 79	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
 80	if (virq <= 0) {
 81		pr_warn("Can't reserve IPI, failed to alloc descs\n");
 82		return -ENOMEM;
 83	}
 84
 85	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
 86				       (void *) dest, true, NULL);
 87
 88	if (virq <= 0) {
 89		pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
 90		goto free_descs;
 91	}
 92
 93	for (i = 0; i < nr_irqs; i++) {
 94		data = irq_get_irq_data(virq + i);
 95		cpumask_copy(data->common->affinity, dest);
 96		data->common->ipi_offset = offset;
 97		irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
 98	}
 99	return virq;
100
101free_descs:
102	irq_free_descs(virq, nr_irqs);
103	return -EBUSY;
104}
105
106/**
107 * irq_destroy_ipi() - unreserve an IPI that was previously allocated
108 * @irq:	linux irq number to be destroyed
109 * @dest:	cpumask of cpus which should have the IPI removed
110 *
111 * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
112 * destroying all virqs associated with them.
113 *
114 * Return 0 on success or error code on failure.
115 */
116int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
117{
118	struct irq_data *data = irq_get_irq_data(irq);
119	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
120	struct irq_domain *domain;
121	unsigned int nr_irqs;
122
123	if (!irq || !data || !ipimask)
124		return -EINVAL;
125
126	domain = data->domain;
127	if (WARN_ON(domain == NULL))
128		return -EINVAL;
129
130	if (!irq_domain_is_ipi(domain)) {
131		pr_warn("Trying to destroy a non IPI domain!\n");
132		return -EINVAL;
133	}
134
135	if (WARN_ON(!cpumask_subset(dest, ipimask)))
 
136		/*
137		 * Must be destroying a subset of CPUs to which this IPI
138		 * was set up to target
139		 */
140		return -EINVAL;
141
142	if (irq_domain_is_ipi_per_cpu(domain)) {
143		irq = irq + cpumask_first(dest) - data->common->ipi_offset;
144		nr_irqs = cpumask_weight(dest);
145	} else {
146		nr_irqs = 1;
147	}
148
149	irq_domain_free_irqs(irq, nr_irqs);
150	return 0;
151}
152
153/**
154 * ipi_get_hwirq - Get the hwirq associated with an IPI to a cpu
155 * @irq:	linux irq number
156 * @cpu:	the target cpu
157 *
158 * When dealing with coprocessors IPI, we need to inform the coprocessor of
159 * the hwirq it needs to use to receive and send IPIs.
160 *
161 * Returns hwirq value on success and INVALID_HWIRQ on failure.
162 */
163irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
164{
165	struct irq_data *data = irq_get_irq_data(irq);
166	struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
167
168	if (!data || !ipimask || cpu > nr_cpu_ids)
169		return INVALID_HWIRQ;
170
171	if (!cpumask_test_cpu(cpu, ipimask))
 
172		return INVALID_HWIRQ;
173
174	/*
175	 * Get the real hardware irq number if the underlying implementation
176	 * uses a seperate irq per cpu. If the underlying implementation uses
177	 * a single hardware irq for all cpus then the IPI send mechanism
178	 * needs to take care of the cpu destinations.
179	 */
180	if (irq_domain_is_ipi_per_cpu(data->domain))
181		data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
182
183	return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
184}
185EXPORT_SYMBOL_GPL(ipi_get_hwirq);
186
187static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
188			   const struct cpumask *dest, unsigned int cpu)
189{
190	struct cpumask *ipimask = irq_data_get_affinity_mask(data);
191
192	if (!chip || !ipimask)
193		return -EINVAL;
194
195	if (!chip->ipi_send_single && !chip->ipi_send_mask)
196		return -EINVAL;
197
198	if (cpu > nr_cpu_ids)
 
 
 
 
199		return -EINVAL;
200
201	if (dest) {
202		if (!cpumask_subset(dest, ipimask))
203			return -EINVAL;
204	} else {
205		if (!cpumask_test_cpu(cpu, ipimask))
206			return -EINVAL;
207	}
208	return 0;
209}
210
211/**
212 * __ipi_send_single - send an IPI to a target Linux SMP CPU
213 * @desc:	pointer to irq_desc of the IRQ
214 * @cpu:	destination CPU, must in the destination mask passed to
215 *		irq_reserve_ipi()
216 *
217 * This function is for architecture or core code to speed up IPI sending. Not
218 * usable from driver code.
219 *
220 * Returns zero on success and negative error number on failure.
221 */
222int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
223{
224	struct irq_data *data = irq_desc_get_irq_data(desc);
225	struct irq_chip *chip = irq_data_get_irq_chip(data);
226
227#ifdef DEBUG
228	/*
229	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
230	 * Since the callers should be arch or core code which is generally
231	 * trusted, only check for errors when debugging.
232	 */
233	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
234		return -EINVAL;
235#endif
236	if (!chip->ipi_send_single) {
237		chip->ipi_send_mask(data, cpumask_of(cpu));
238		return 0;
239	}
240
241	/* FIXME: Store this information in irqdata flags */
242	if (irq_domain_is_ipi_per_cpu(data->domain) &&
243	    cpu != data->common->ipi_offset) {
244		/* use the correct data for that cpu */
245		unsigned irq = data->irq + cpu - data->common->ipi_offset;
246
247		data = irq_get_irq_data(irq);
248	}
249	chip->ipi_send_single(data, cpu);
250	return 0;
251}
252
253/**
254 * ipi_send_mask - send an IPI to target Linux SMP CPU(s)
255 * @desc:	pointer to irq_desc of the IRQ
256 * @dest:	dest CPU(s), must be a subset of the mask passed to
257 *		irq_reserve_ipi()
258 *
259 * This function is for architecture or core code to speed up IPI sending. Not
260 * usable from driver code.
261 *
262 * Returns zero on success and negative error number on failure.
263 */
264int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
265{
266	struct irq_data *data = irq_desc_get_irq_data(desc);
267	struct irq_chip *chip = irq_data_get_irq_chip(data);
268	unsigned int cpu;
269
270#ifdef DEBUG
271	/*
272	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
273	 * Since the callers should be arch or core code which is generally
274	 * trusted, only check for errors when debugging.
275	 */
276	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
277		return -EINVAL;
278#endif
279	if (chip->ipi_send_mask) {
280		chip->ipi_send_mask(data, dest);
281		return 0;
282	}
283
284	if (irq_domain_is_ipi_per_cpu(data->domain)) {
285		unsigned int base = data->irq;
286
287		for_each_cpu(cpu, dest) {
288			unsigned irq = base + cpu - data->common->ipi_offset;
289
290			data = irq_get_irq_data(irq);
291			chip->ipi_send_single(data, cpu);
292		}
293	} else {
294		for_each_cpu(cpu, dest)
295			chip->ipi_send_single(data, cpu);
296	}
297	return 0;
298}
299
300/**
301 * ipi_send_single - Send an IPI to a single CPU
302 * @virq:	linux irq number from irq_reserve_ipi()
303 * @cpu:	destination CPU, must in the destination mask passed to
304 *		irq_reserve_ipi()
305 *
306 * Returns zero on success and negative error number on failure.
307 */
308int ipi_send_single(unsigned int virq, unsigned int cpu)
309{
310	struct irq_desc *desc = irq_to_desc(virq);
311	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
312	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
313
314	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
315		return -EINVAL;
316
317	return __ipi_send_single(desc, cpu);
318}
319EXPORT_SYMBOL_GPL(ipi_send_single);
320
321/**
322 * ipi_send_mask - Send an IPI to target CPU(s)
323 * @virq:	linux irq number from irq_reserve_ipi()
324 * @dest:	dest CPU(s), must be a subset of the mask passed to
325 *		irq_reserve_ipi()
326 *
327 * Returns zero on success and negative error number on failure.
328 */
329int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
330{
331	struct irq_desc *desc = irq_to_desc(virq);
332	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
333	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
334
335	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
336		return -EINVAL;
337
338	return __ipi_send_mask(desc, dest);
339}
340EXPORT_SYMBOL_GPL(ipi_send_mask);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
 
 
  3 * Copyright (C) 2015 Imagination Technologies Ltd
  4 * Author: Qais Yousef <qais.yousef@imgtec.com>
  5 *
  6 * This file contains driver APIs to the IPI subsystem.
  7 */
  8
  9#define pr_fmt(fmt) "genirq/ipi: " fmt
 10
 11#include <linux/irqdomain.h>
 12#include <linux/irq.h>
 13
 14/**
 15 * irq_reserve_ipi() - Setup an IPI to destination cpumask
 16 * @domain:	IPI domain
 17 * @dest:	cpumask of CPUs which can receive the IPI
 18 *
 19 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
 20 *
 21 * Return: Linux IRQ number on success or error code on failure
 22 */
 23int irq_reserve_ipi(struct irq_domain *domain,
 24			     const struct cpumask *dest)
 25{
 26	unsigned int nr_irqs, offset;
 27	struct irq_data *data;
 28	int virq, i;
 29
 30	if (!domain ||!irq_domain_is_ipi(domain)) {
 31		pr_warn("Reservation on a non IPI domain\n");
 32		return -EINVAL;
 33	}
 34
 35	if (!cpumask_subset(dest, cpu_possible_mask)) {
 36		pr_warn("Reservation is not in possible_cpu_mask\n");
 37		return -EINVAL;
 38	}
 39
 40	nr_irqs = cpumask_weight(dest);
 41	if (!nr_irqs) {
 42		pr_warn("Reservation for empty destination mask\n");
 43		return -EINVAL;
 44	}
 45
 46	if (irq_domain_is_ipi_single(domain)) {
 47		/*
 48		 * If the underlying implementation uses a single HW irq on
 49		 * all cpus then we only need a single Linux irq number for
 50		 * it. We have no restrictions vs. the destination mask. The
 51		 * underlying implementation can deal with holes nicely.
 52		 */
 53		nr_irqs = 1;
 54		offset = 0;
 55	} else {
 56		unsigned int next;
 57
 58		/*
 59		 * The IPI requires a separate HW irq on each CPU. We require
 60		 * that the destination mask is consecutive. If an
 61		 * implementation needs to support holes, it can reserve
 62		 * several IPI ranges.
 63		 */
 64		offset = cpumask_first(dest);
 65		/*
 66		 * Find a hole and if found look for another set bit after the
 67		 * hole. For now we don't support this scenario.
 68		 */
 69		next = cpumask_next_zero(offset, dest);
 70		if (next < nr_cpu_ids)
 71			next = cpumask_next(next, dest);
 72		if (next < nr_cpu_ids) {
 73			pr_warn("Destination mask has holes\n");
 74			return -EINVAL;
 75		}
 76	}
 77
 78	virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE, NULL);
 79	if (virq <= 0) {
 80		pr_warn("Can't reserve IPI, failed to alloc descs\n");
 81		return -ENOMEM;
 82	}
 83
 84	virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
 85				       (void *) dest, true, NULL);
 86
 87	if (virq <= 0) {
 88		pr_warn("Can't reserve IPI, failed to alloc hw irqs\n");
 89		goto free_descs;
 90	}
 91
 92	for (i = 0; i < nr_irqs; i++) {
 93		data = irq_get_irq_data(virq + i);
 94		cpumask_copy(data->common->affinity, dest);
 95		data->common->ipi_offset = offset;
 96		irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
 97	}
 98	return virq;
 99
100free_descs:
101	irq_free_descs(virq, nr_irqs);
102	return -EBUSY;
103}
104
105/**
106 * irq_destroy_ipi() - unreserve an IPI that was previously allocated
107 * @irq:	Linux IRQ number to be destroyed
108 * @dest:	cpumask of CPUs which should have the IPI removed
109 *
110 * The IPIs allocated with irq_reserve_ipi() are returned to the system
111 * destroying all virqs associated with them.
112 *
113 * Return: %0 on success or error code on failure.
114 */
115int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
116{
117	struct irq_data *data = irq_get_irq_data(irq);
118	const struct cpumask *ipimask;
119	struct irq_domain *domain;
120	unsigned int nr_irqs;
121
122	if (!irq || !data)
123		return -EINVAL;
124
125	domain = data->domain;
126	if (WARN_ON(domain == NULL))
127		return -EINVAL;
128
129	if (!irq_domain_is_ipi(domain)) {
130		pr_warn("Trying to destroy a non IPI domain!\n");
131		return -EINVAL;
132	}
133
134	ipimask = irq_data_get_affinity_mask(data);
135	if (!ipimask || WARN_ON(!cpumask_subset(dest, ipimask)))
136		/*
137		 * Must be destroying a subset of CPUs to which this IPI
138		 * was set up to target
139		 */
140		return -EINVAL;
141
142	if (irq_domain_is_ipi_per_cpu(domain)) {
143		irq = irq + cpumask_first(dest) - data->common->ipi_offset;
144		nr_irqs = cpumask_weight(dest);
145	} else {
146		nr_irqs = 1;
147	}
148
149	irq_domain_free_irqs(irq, nr_irqs);
150	return 0;
151}
152
153/**
154 * ipi_get_hwirq - Get the hwirq associated with an IPI to a CPU
155 * @irq:	Linux IRQ number
156 * @cpu:	the target CPU
157 *
158 * When dealing with coprocessors IPI, we need to inform the coprocessor of
159 * the hwirq it needs to use to receive and send IPIs.
160 *
161 * Return: hwirq value on success or INVALID_HWIRQ on failure.
162 */
163irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu)
164{
165	struct irq_data *data = irq_get_irq_data(irq);
166	const struct cpumask *ipimask;
167
168	if (!data || cpu >= nr_cpu_ids)
169		return INVALID_HWIRQ;
170
171	ipimask = irq_data_get_affinity_mask(data);
172	if (!ipimask || !cpumask_test_cpu(cpu, ipimask))
173		return INVALID_HWIRQ;
174
175	/*
176	 * Get the real hardware irq number if the underlying implementation
177	 * uses a separate irq per cpu. If the underlying implementation uses
178	 * a single hardware irq for all cpus then the IPI send mechanism
179	 * needs to take care of the cpu destinations.
180	 */
181	if (irq_domain_is_ipi_per_cpu(data->domain))
182		data = irq_get_irq_data(irq + cpu - data->common->ipi_offset);
183
184	return data ? irqd_to_hwirq(data) : INVALID_HWIRQ;
185}
186EXPORT_SYMBOL_GPL(ipi_get_hwirq);
187
188static int ipi_send_verify(struct irq_chip *chip, struct irq_data *data,
189			   const struct cpumask *dest, unsigned int cpu)
190{
191	const struct cpumask *ipimask;
192
193	if (!chip || !data)
194		return -EINVAL;
195
196	if (!chip->ipi_send_single && !chip->ipi_send_mask)
197		return -EINVAL;
198
199	if (cpu >= nr_cpu_ids)
200		return -EINVAL;
201
202	ipimask = irq_data_get_affinity_mask(data);
203	if (!ipimask)
204		return -EINVAL;
205
206	if (dest) {
207		if (!cpumask_subset(dest, ipimask))
208			return -EINVAL;
209	} else {
210		if (!cpumask_test_cpu(cpu, ipimask))
211			return -EINVAL;
212	}
213	return 0;
214}
215
216/**
217 * __ipi_send_single - send an IPI to a target Linux SMP CPU
218 * @desc:	pointer to irq_desc of the IRQ
219 * @cpu:	destination CPU, must in the destination mask passed to
220 *		irq_reserve_ipi()
221 *
222 * This function is for architecture or core code to speed up IPI sending. Not
223 * usable from driver code.
224 *
225 * Return: %0 on success or negative error number on failure.
226 */
227int __ipi_send_single(struct irq_desc *desc, unsigned int cpu)
228{
229	struct irq_data *data = irq_desc_get_irq_data(desc);
230	struct irq_chip *chip = irq_data_get_irq_chip(data);
231
232#ifdef DEBUG
233	/*
234	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
235	 * Since the callers should be arch or core code which is generally
236	 * trusted, only check for errors when debugging.
237	 */
238	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
239		return -EINVAL;
240#endif
241	if (!chip->ipi_send_single) {
242		chip->ipi_send_mask(data, cpumask_of(cpu));
243		return 0;
244	}
245
246	/* FIXME: Store this information in irqdata flags */
247	if (irq_domain_is_ipi_per_cpu(data->domain) &&
248	    cpu != data->common->ipi_offset) {
249		/* use the correct data for that cpu */
250		unsigned irq = data->irq + cpu - data->common->ipi_offset;
251
252		data = irq_get_irq_data(irq);
253	}
254	chip->ipi_send_single(data, cpu);
255	return 0;
256}
257
258/**
259 * __ipi_send_mask - send an IPI to target Linux SMP CPU(s)
260 * @desc:	pointer to irq_desc of the IRQ
261 * @dest:	dest CPU(s), must be a subset of the mask passed to
262 *		irq_reserve_ipi()
263 *
264 * This function is for architecture or core code to speed up IPI sending. Not
265 * usable from driver code.
266 *
267 * Return: %0 on success or negative error number on failure.
268 */
269int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest)
270{
271	struct irq_data *data = irq_desc_get_irq_data(desc);
272	struct irq_chip *chip = irq_data_get_irq_chip(data);
273	unsigned int cpu;
274
275#ifdef DEBUG
276	/*
277	 * Minimise the overhead by omitting the checks for Linux SMP IPIs.
278	 * Since the callers should be arch or core code which is generally
279	 * trusted, only check for errors when debugging.
280	 */
281	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
282		return -EINVAL;
283#endif
284	if (chip->ipi_send_mask) {
285		chip->ipi_send_mask(data, dest);
286		return 0;
287	}
288
289	if (irq_domain_is_ipi_per_cpu(data->domain)) {
290		unsigned int base = data->irq;
291
292		for_each_cpu(cpu, dest) {
293			unsigned irq = base + cpu - data->common->ipi_offset;
294
295			data = irq_get_irq_data(irq);
296			chip->ipi_send_single(data, cpu);
297		}
298	} else {
299		for_each_cpu(cpu, dest)
300			chip->ipi_send_single(data, cpu);
301	}
302	return 0;
303}
304
305/**
306 * ipi_send_single - Send an IPI to a single CPU
307 * @virq:	Linux IRQ number from irq_reserve_ipi()
308 * @cpu:	destination CPU, must in the destination mask passed to
309 *		irq_reserve_ipi()
310 *
311 * Return: %0 on success or negative error number on failure.
312 */
313int ipi_send_single(unsigned int virq, unsigned int cpu)
314{
315	struct irq_desc *desc = irq_to_desc(virq);
316	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
317	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
318
319	if (WARN_ON_ONCE(ipi_send_verify(chip, data, NULL, cpu)))
320		return -EINVAL;
321
322	return __ipi_send_single(desc, cpu);
323}
324EXPORT_SYMBOL_GPL(ipi_send_single);
325
326/**
327 * ipi_send_mask - Send an IPI to target CPU(s)
328 * @virq:	Linux IRQ number from irq_reserve_ipi()
329 * @dest:	dest CPU(s), must be a subset of the mask passed to
330 *		irq_reserve_ipi()
331 *
332 * Return: %0 on success or negative error number on failure.
333 */
334int ipi_send_mask(unsigned int virq, const struct cpumask *dest)
335{
336	struct irq_desc *desc = irq_to_desc(virq);
337	struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL;
338	struct irq_chip *chip = data ? irq_data_get_irq_chip(data) : NULL;
339
340	if (WARN_ON_ONCE(ipi_send_verify(chip, data, dest, 0)))
341		return -EINVAL;
342
343	return __ipi_send_mask(desc, dest);
344}
345EXPORT_SYMBOL_GPL(ipi_send_mask);