Linux Audio

Check our new training course

Loading...
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2014 Intel Corp.
  4 * Author: Jiang Liu <jiang.liu@linux.intel.com>
  5 *
  6 * This file is licensed under GPLv2.
  7 *
  8 * This file contains common code to support Message Signalled Interrupt for
  9 * PCI compatible and non PCI compatible devices.
 10 */
 11#include <linux/types.h>
 12#include <linux/device.h>
 13#include <linux/irq.h>
 14#include <linux/irqdomain.h>
 15#include <linux/msi.h>
 16#include <linux/slab.h>
 17
 18#include "internals.h"
 19
 20/**
 21 * alloc_msi_entry - Allocate an initialize msi_entry
 22 * @dev:	Pointer to the device for which this is allocated
 23 * @nvec:	The number of vectors used in this entry
 24 * @affinity:	Optional pointer to an affinity mask array size of @nvec
 25 *
 26 * If @affinity is not NULL then an affinity array[@nvec] is allocated
 27 * and the affinity masks and flags from @affinity are copied.
 28 */
 29struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
 30				 const struct irq_affinity_desc *affinity)
 31{
 32	struct msi_desc *desc;
 33
 34	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 35	if (!desc)
 36		return NULL;
 37
 38	INIT_LIST_HEAD(&desc->list);
 39	desc->dev = dev;
 40	desc->nvec_used = nvec;
 41	if (affinity) {
 42		desc->affinity = kmemdup(affinity,
 43			nvec * sizeof(*desc->affinity), GFP_KERNEL);
 44		if (!desc->affinity) {
 45			kfree(desc);
 46			return NULL;
 47		}
 48	}
 49
 50	return desc;
 51}
 52
 53void free_msi_entry(struct msi_desc *entry)
 54{
 55	kfree(entry->affinity);
 56	kfree(entry);
 57}
 58
 59void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
 60{
 61	*msg = entry->msg;
 62}
 63
 64void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
 65{
 66	struct msi_desc *entry = irq_get_msi_desc(irq);
 67
 68	__get_cached_msi_msg(entry, msg);
 69}
 70EXPORT_SYMBOL_GPL(get_cached_msi_msg);
 71
 72#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 73static inline void irq_chip_write_msi_msg(struct irq_data *data,
 74					  struct msi_msg *msg)
 75{
 76	data->chip->irq_write_msi_msg(data, msg);
 77}
 78
 79static void msi_check_level(struct irq_domain *domain, struct msi_msg *msg)
 80{
 81	struct msi_domain_info *info = domain->host_data;
 82
 83	/*
 84	 * If the MSI provider has messed with the second message and
 85	 * not advertized that it is level-capable, signal the breakage.
 86	 */
 87	WARN_ON(!((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
 88		  (info->chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)) &&
 89		(msg[1].address_lo || msg[1].address_hi || msg[1].data));
 90}
 91
 92/**
 93 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
 94 * @irq_data:	The irq data associated to the interrupt
 95 * @mask:	The affinity mask to set
 96 * @force:	Flag to enforce setting (disable online checks)
 97 *
 98 * Intended to be used by MSI interrupt controllers which are
 99 * implemented with hierarchical domains.
100 */
101int msi_domain_set_affinity(struct irq_data *irq_data,
102			    const struct cpumask *mask, bool force)
103{
104	struct irq_data *parent = irq_data->parent_data;
105	struct msi_msg msg[2] = { [1] = { }, };
106	int ret;
107
108	ret = parent->chip->irq_set_affinity(parent, mask, force);
109	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
110		BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
111		msi_check_level(irq_data->domain, msg);
112		irq_chip_write_msi_msg(irq_data, msg);
113	}
114
115	return ret;
116}
117
118static int msi_domain_activate(struct irq_domain *domain,
119			       struct irq_data *irq_data, bool early)
120{
121	struct msi_msg msg[2] = { [1] = { }, };
122
123	BUG_ON(irq_chip_compose_msi_msg(irq_data, msg));
124	msi_check_level(irq_data->domain, msg);
125	irq_chip_write_msi_msg(irq_data, msg);
126	return 0;
127}
128
129static void msi_domain_deactivate(struct irq_domain *domain,
130				  struct irq_data *irq_data)
131{
132	struct msi_msg msg[2];
133
134	memset(msg, 0, sizeof(msg));
135	irq_chip_write_msi_msg(irq_data, msg);
136}
137
138static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
139			    unsigned int nr_irqs, void *arg)
140{
141	struct msi_domain_info *info = domain->host_data;
142	struct msi_domain_ops *ops = info->ops;
143	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
144	int i, ret;
145
146	if (irq_find_mapping(domain, hwirq) > 0)
147		return -EEXIST;
148
149	if (domain->parent) {
150		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
151		if (ret < 0)
152			return ret;
153	}
154
155	for (i = 0; i < nr_irqs; i++) {
156		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
157		if (ret < 0) {
158			if (ops->msi_free) {
159				for (i--; i > 0; i--)
160					ops->msi_free(domain, info, virq + i);
161			}
162			irq_domain_free_irqs_top(domain, virq, nr_irqs);
163			return ret;
164		}
165	}
166
167	return 0;
168}
169
170static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
171			    unsigned int nr_irqs)
172{
173	struct msi_domain_info *info = domain->host_data;
174	int i;
175
176	if (info->ops->msi_free) {
177		for (i = 0; i < nr_irqs; i++)
178			info->ops->msi_free(domain, info, virq + i);
179	}
180	irq_domain_free_irqs_top(domain, virq, nr_irqs);
181}
182
183static const struct irq_domain_ops msi_domain_ops = {
184	.alloc		= msi_domain_alloc,
185	.free		= msi_domain_free,
186	.activate	= msi_domain_activate,
187	.deactivate	= msi_domain_deactivate,
188};
189
190#ifdef GENERIC_MSI_DOMAIN_OPS
191static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
192						msi_alloc_info_t *arg)
193{
194	return arg->hwirq;
195}
196
197static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
198				  int nvec, msi_alloc_info_t *arg)
199{
200	memset(arg, 0, sizeof(*arg));
201	return 0;
202}
203
204static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
205				    struct msi_desc *desc)
206{
207	arg->desc = desc;
208}
209#else
210#define msi_domain_ops_get_hwirq	NULL
211#define msi_domain_ops_prepare		NULL
212#define msi_domain_ops_set_desc		NULL
213#endif /* !GENERIC_MSI_DOMAIN_OPS */
214
215static int msi_domain_ops_init(struct irq_domain *domain,
216			       struct msi_domain_info *info,
217			       unsigned int virq, irq_hw_number_t hwirq,
218			       msi_alloc_info_t *arg)
219{
220	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
221				      info->chip_data);
222	if (info->handler && info->handler_name) {
223		__irq_set_handler(virq, info->handler, 0, info->handler_name);
224		if (info->handler_data)
225			irq_set_handler_data(virq, info->handler_data);
226	}
227	return 0;
228}
229
230static int msi_domain_ops_check(struct irq_domain *domain,
231				struct msi_domain_info *info,
232				struct device *dev)
233{
234	return 0;
235}
236
237static struct msi_domain_ops msi_domain_ops_default = {
238	.get_hwirq	= msi_domain_ops_get_hwirq,
239	.msi_init	= msi_domain_ops_init,
240	.msi_check	= msi_domain_ops_check,
241	.msi_prepare	= msi_domain_ops_prepare,
242	.set_desc	= msi_domain_ops_set_desc,
243};
244
245static void msi_domain_update_dom_ops(struct msi_domain_info *info)
246{
247	struct msi_domain_ops *ops = info->ops;
248
249	if (ops == NULL) {
250		info->ops = &msi_domain_ops_default;
251		return;
252	}
253
254	if (ops->get_hwirq == NULL)
255		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
256	if (ops->msi_init == NULL)
257		ops->msi_init = msi_domain_ops_default.msi_init;
258	if (ops->msi_check == NULL)
259		ops->msi_check = msi_domain_ops_default.msi_check;
260	if (ops->msi_prepare == NULL)
261		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
262	if (ops->set_desc == NULL)
263		ops->set_desc = msi_domain_ops_default.set_desc;
264}
265
266static void msi_domain_update_chip_ops(struct msi_domain_info *info)
267{
268	struct irq_chip *chip = info->chip;
269
270	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
271	if (!chip->irq_set_affinity)
272		chip->irq_set_affinity = msi_domain_set_affinity;
273}
274
275/**
276 * msi_create_irq_domain - Create a MSI interrupt domain
277 * @fwnode:	Optional fwnode of the interrupt controller
278 * @info:	MSI domain info
279 * @parent:	Parent irq domain
280 */
281struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
282					 struct msi_domain_info *info,
283					 struct irq_domain *parent)
284{
285	struct irq_domain *domain;
286
287	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
288		msi_domain_update_dom_ops(info);
289	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
290		msi_domain_update_chip_ops(info);
291
292	domain = irq_domain_create_hierarchy(parent, IRQ_DOMAIN_FLAG_MSI, 0,
293					     fwnode, &msi_domain_ops, info);
294
295	if (domain && !domain->name && info->chip)
296		domain->name = info->chip->name;
297
298	return domain;
299}
300
301int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
302			    int nvec, msi_alloc_info_t *arg)
303{
304	struct msi_domain_info *info = domain->host_data;
305	struct msi_domain_ops *ops = info->ops;
306	int ret;
307
308	ret = ops->msi_check(domain, info, dev);
309	if (ret == 0)
310		ret = ops->msi_prepare(domain, dev, nvec, arg);
311
312	return ret;
313}
314
315int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
316			     int virq, int nvec, msi_alloc_info_t *arg)
317{
318	struct msi_domain_info *info = domain->host_data;
319	struct msi_domain_ops *ops = info->ops;
320	struct msi_desc *desc;
321	int ret = 0;
322
323	for_each_msi_entry(desc, dev) {
324		/* Don't even try the multi-MSI brain damage. */
325		if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
326			ret = -EINVAL;
327			break;
328		}
329
330		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
331			continue;
332
333		ops->set_desc(arg, desc);
334		/* Assumes the domain mutex is held! */
335		ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
336						      arg);
337		if (ret)
338			break;
339
340		irq_set_msi_desc_off(desc->irq, 0, desc);
341	}
342
343	if (ret) {
344		/* Mop up the damage */
345		for_each_msi_entry(desc, dev) {
346			if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
347				continue;
348
349			irq_domain_free_irqs_common(domain, desc->irq, 1);
350		}
351	}
352
353	return ret;
354}
355
356/*
357 * Carefully check whether the device can use reservation mode. If
358 * reservation mode is enabled then the early activation will assign a
359 * dummy vector to the device. If the PCI/MSI device does not support
360 * masking of the entry then this can result in spurious interrupts when
361 * the device driver is not absolutely careful. But even then a malfunction
362 * of the hardware could result in a spurious interrupt on the dummy vector
363 * and render the device unusable. If the entry can be masked then the core
364 * logic will prevent the spurious interrupt and reservation mode can be
365 * used. For now reservation mode is restricted to PCI/MSI.
366 */
367static bool msi_check_reservation_mode(struct irq_domain *domain,
368				       struct msi_domain_info *info,
369				       struct device *dev)
370{
371	struct msi_desc *desc;
372
373	if (domain->bus_token != DOMAIN_BUS_PCI_MSI)
374		return false;
375
376	if (!(info->flags & MSI_FLAG_MUST_REACTIVATE))
377		return false;
378
379	if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_ignore_mask)
380		return false;
381
382	/*
383	 * Checking the first MSI descriptor is sufficient. MSIX supports
384	 * masking and MSI does so when the maskbit is set.
385	 */
386	desc = first_msi_entry(dev);
387	return desc->msi_attrib.is_msix || desc->msi_attrib.maskbit;
388}
389
390/**
391 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
392 * @domain:	The domain to allocate from
393 * @dev:	Pointer to device struct of the device for which the interrupts
394 *		are allocated
395 * @nvec:	The number of interrupts to allocate
396 *
397 * Returns 0 on success or an error code.
398 */
399int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
400			  int nvec)
401{
402	struct msi_domain_info *info = domain->host_data;
403	struct msi_domain_ops *ops = info->ops;
404	struct irq_data *irq_data;
405	struct msi_desc *desc;
406	msi_alloc_info_t arg;
407	int i, ret, virq;
408	bool can_reserve;
409
410	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
411	if (ret)
412		return ret;
413
414	for_each_msi_entry(desc, dev) {
415		ops->set_desc(&arg, desc);
416
417		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
418					       dev_to_node(dev), &arg, false,
419					       desc->affinity);
420		if (virq < 0) {
421			ret = -ENOSPC;
422			if (ops->handle_error)
423				ret = ops->handle_error(domain, desc, ret);
424			if (ops->msi_finish)
425				ops->msi_finish(&arg, ret);
426			return ret;
427		}
428
429		for (i = 0; i < desc->nvec_used; i++) {
430			irq_set_msi_desc_off(virq, i, desc);
431			irq_debugfs_copy_devname(virq + i, dev);
432		}
433	}
434
435	if (ops->msi_finish)
436		ops->msi_finish(&arg, 0);
437
438	can_reserve = msi_check_reservation_mode(domain, info, dev);
439
440	for_each_msi_entry(desc, dev) {
441		virq = desc->irq;
442		if (desc->nvec_used == 1)
443			dev_dbg(dev, "irq %d for MSI\n", virq);
444		else
445			dev_dbg(dev, "irq [%d-%d] for MSI\n",
446				virq, virq + desc->nvec_used - 1);
447		/*
448		 * This flag is set by the PCI layer as we need to activate
449		 * the MSI entries before the PCI layer enables MSI in the
450		 * card. Otherwise the card latches a random msi message.
451		 */
452		if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
453			continue;
454
455		irq_data = irq_domain_get_irq_data(domain, desc->irq);
456		if (!can_reserve)
457			irqd_clr_can_reserve(irq_data);
458		ret = irq_domain_activate_irq(irq_data, can_reserve);
459		if (ret)
460			goto cleanup;
461	}
462
463	/*
464	 * If these interrupts use reservation mode, clear the activated bit
465	 * so request_irq() will assign the final vector.
466	 */
467	if (can_reserve) {
468		for_each_msi_entry(desc, dev) {
469			irq_data = irq_domain_get_irq_data(domain, desc->irq);
470			irqd_clr_activated(irq_data);
471		}
472	}
473	return 0;
474
475cleanup:
476	for_each_msi_entry(desc, dev) {
477		struct irq_data *irqd;
478
479		if (desc->irq == virq)
480			break;
481
482		irqd = irq_domain_get_irq_data(domain, desc->irq);
483		if (irqd_is_activated(irqd))
484			irq_domain_deactivate_irq(irqd);
485	}
486	msi_domain_free_irqs(domain, dev);
487	return ret;
488}
489
490/**
491 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
492 * @domain:	The domain to managing the interrupts
493 * @dev:	Pointer to device struct of the device for which the interrupts
494 *		are free
495 */
496void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
497{
498	struct msi_desc *desc;
499
500	for_each_msi_entry(desc, dev) {
501		/*
502		 * We might have failed to allocate an MSI early
503		 * enough that there is no IRQ associated to this
504		 * entry. If that's the case, don't do anything.
505		 */
506		if (desc->irq) {
507			irq_domain_free_irqs(desc->irq, desc->nvec_used);
508			desc->irq = 0;
509		}
510	}
511}
512
513/**
514 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
515 * @domain:	The interrupt domain to retrieve data from
516 *
517 * Returns the pointer to the msi_domain_info stored in
518 * @domain->host_data.
519 */
520struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
521{
522	return (struct msi_domain_info *)domain->host_data;
523}
524
525#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
  1/*
  2 * linux/kernel/irq/msi.c
  3 *
  4 * Copyright (C) 2014 Intel Corp.
  5 * Author: Jiang Liu <jiang.liu@linux.intel.com>
  6 *
  7 * This file is licensed under GPLv2.
  8 *
  9 * This file contains common code to support Message Signalled Interrupt for
 10 * PCI compatible and non PCI compatible devices.
 11 */
 12#include <linux/types.h>
 13#include <linux/device.h>
 14#include <linux/irq.h>
 15#include <linux/irqdomain.h>
 16#include <linux/msi.h>
 17#include <linux/slab.h>
 18
 19/**
 20 * alloc_msi_entry - Allocate an initialize msi_entry
 21 * @dev:	Pointer to the device for which this is allocated
 22 * @nvec:	The number of vectors used in this entry
 23 * @affinity:	Optional pointer to an affinity mask array size of @nvec
 24 *
 25 * If @affinity is not NULL then a an affinity array[@nvec] is allocated
 26 * and the affinity masks from @affinity are copied.
 27 */
 28struct msi_desc *
 29alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
 30{
 31	struct msi_desc *desc;
 32
 33	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
 34	if (!desc)
 35		return NULL;
 36
 37	INIT_LIST_HEAD(&desc->list);
 38	desc->dev = dev;
 39	desc->nvec_used = nvec;
 40	if (affinity) {
 41		desc->affinity = kmemdup(affinity,
 42			nvec * sizeof(*desc->affinity), GFP_KERNEL);
 43		if (!desc->affinity) {
 44			kfree(desc);
 45			return NULL;
 46		}
 47	}
 48
 49	return desc;
 50}
 51
 52void free_msi_entry(struct msi_desc *entry)
 53{
 54	kfree(entry->affinity);
 55	kfree(entry);
 56}
 57
 58void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
 59{
 60	*msg = entry->msg;
 61}
 62
 63void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
 64{
 65	struct msi_desc *entry = irq_get_msi_desc(irq);
 66
 67	__get_cached_msi_msg(entry, msg);
 68}
 69EXPORT_SYMBOL_GPL(get_cached_msi_msg);
 70
 71#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
 72static inline void irq_chip_write_msi_msg(struct irq_data *data,
 73					  struct msi_msg *msg)
 74{
 75	data->chip->irq_write_msi_msg(data, msg);
 76}
 77
 78/**
 79 * msi_domain_set_affinity - Generic affinity setter function for MSI domains
 80 * @irq_data:	The irq data associated to the interrupt
 81 * @mask:	The affinity mask to set
 82 * @force:	Flag to enforce setting (disable online checks)
 83 *
 84 * Intended to be used by MSI interrupt controllers which are
 85 * implemented with hierarchical domains.
 86 */
 87int msi_domain_set_affinity(struct irq_data *irq_data,
 88			    const struct cpumask *mask, bool force)
 89{
 90	struct irq_data *parent = irq_data->parent_data;
 91	struct msi_msg msg;
 92	int ret;
 93
 94	ret = parent->chip->irq_set_affinity(parent, mask, force);
 95	if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
 96		BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
 97		irq_chip_write_msi_msg(irq_data, &msg);
 98	}
 99
100	return ret;
101}
102
103static void msi_domain_activate(struct irq_domain *domain,
104				struct irq_data *irq_data)
105{
106	struct msi_msg msg;
107
108	BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg));
109	irq_chip_write_msi_msg(irq_data, &msg);
110}
111
112static void msi_domain_deactivate(struct irq_domain *domain,
113				  struct irq_data *irq_data)
114{
115	struct msi_msg msg;
116
117	memset(&msg, 0, sizeof(msg));
118	irq_chip_write_msi_msg(irq_data, &msg);
119}
120
121static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
122			    unsigned int nr_irqs, void *arg)
123{
124	struct msi_domain_info *info = domain->host_data;
125	struct msi_domain_ops *ops = info->ops;
126	irq_hw_number_t hwirq = ops->get_hwirq(info, arg);
127	int i, ret;
128
129	if (irq_find_mapping(domain, hwirq) > 0)
130		return -EEXIST;
131
132	if (domain->parent) {
133		ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
134		if (ret < 0)
135			return ret;
136	}
137
138	for (i = 0; i < nr_irqs; i++) {
139		ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg);
140		if (ret < 0) {
141			if (ops->msi_free) {
142				for (i--; i > 0; i--)
143					ops->msi_free(domain, info, virq + i);
144			}
145			irq_domain_free_irqs_top(domain, virq, nr_irqs);
146			return ret;
147		}
148	}
149
150	return 0;
151}
152
153static void msi_domain_free(struct irq_domain *domain, unsigned int virq,
154			    unsigned int nr_irqs)
155{
156	struct msi_domain_info *info = domain->host_data;
157	int i;
158
159	if (info->ops->msi_free) {
160		for (i = 0; i < nr_irqs; i++)
161			info->ops->msi_free(domain, info, virq + i);
162	}
163	irq_domain_free_irqs_top(domain, virq, nr_irqs);
164}
165
166static const struct irq_domain_ops msi_domain_ops = {
167	.alloc		= msi_domain_alloc,
168	.free		= msi_domain_free,
169	.activate	= msi_domain_activate,
170	.deactivate	= msi_domain_deactivate,
171};
172
173#ifdef GENERIC_MSI_DOMAIN_OPS
174static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info,
175						msi_alloc_info_t *arg)
176{
177	return arg->hwirq;
178}
179
180static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev,
181				  int nvec, msi_alloc_info_t *arg)
182{
183	memset(arg, 0, sizeof(*arg));
184	return 0;
185}
186
187static void msi_domain_ops_set_desc(msi_alloc_info_t *arg,
188				    struct msi_desc *desc)
189{
190	arg->desc = desc;
191}
192#else
193#define msi_domain_ops_get_hwirq	NULL
194#define msi_domain_ops_prepare		NULL
195#define msi_domain_ops_set_desc		NULL
196#endif /* !GENERIC_MSI_DOMAIN_OPS */
197
198static int msi_domain_ops_init(struct irq_domain *domain,
199			       struct msi_domain_info *info,
200			       unsigned int virq, irq_hw_number_t hwirq,
201			       msi_alloc_info_t *arg)
202{
203	irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip,
204				      info->chip_data);
205	if (info->handler && info->handler_name) {
206		__irq_set_handler(virq, info->handler, 0, info->handler_name);
207		if (info->handler_data)
208			irq_set_handler_data(virq, info->handler_data);
209	}
210	return 0;
211}
212
213static int msi_domain_ops_check(struct irq_domain *domain,
214				struct msi_domain_info *info,
215				struct device *dev)
216{
217	return 0;
218}
219
220static struct msi_domain_ops msi_domain_ops_default = {
221	.get_hwirq	= msi_domain_ops_get_hwirq,
222	.msi_init	= msi_domain_ops_init,
223	.msi_check	= msi_domain_ops_check,
224	.msi_prepare	= msi_domain_ops_prepare,
225	.set_desc	= msi_domain_ops_set_desc,
226};
227
228static void msi_domain_update_dom_ops(struct msi_domain_info *info)
229{
230	struct msi_domain_ops *ops = info->ops;
231
232	if (ops == NULL) {
233		info->ops = &msi_domain_ops_default;
234		return;
235	}
236
237	if (ops->get_hwirq == NULL)
238		ops->get_hwirq = msi_domain_ops_default.get_hwirq;
239	if (ops->msi_init == NULL)
240		ops->msi_init = msi_domain_ops_default.msi_init;
241	if (ops->msi_check == NULL)
242		ops->msi_check = msi_domain_ops_default.msi_check;
243	if (ops->msi_prepare == NULL)
244		ops->msi_prepare = msi_domain_ops_default.msi_prepare;
245	if (ops->set_desc == NULL)
246		ops->set_desc = msi_domain_ops_default.set_desc;
247}
248
249static void msi_domain_update_chip_ops(struct msi_domain_info *info)
250{
251	struct irq_chip *chip = info->chip;
252
253	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
254	if (!chip->irq_set_affinity)
255		chip->irq_set_affinity = msi_domain_set_affinity;
256}
257
258/**
259 * msi_create_irq_domain - Create a MSI interrupt domain
260 * @fwnode:	Optional fwnode of the interrupt controller
261 * @info:	MSI domain info
262 * @parent:	Parent irq domain
263 */
264struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
265					 struct msi_domain_info *info,
266					 struct irq_domain *parent)
267{
268	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
269		msi_domain_update_dom_ops(info);
270	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
271		msi_domain_update_chip_ops(info);
272
273	return irq_domain_create_hierarchy(parent, 0, 0, fwnode,
274					   &msi_domain_ops, info);
275}
276
277int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
278			    int nvec, msi_alloc_info_t *arg)
279{
280	struct msi_domain_info *info = domain->host_data;
281	struct msi_domain_ops *ops = info->ops;
282	int ret;
283
284	ret = ops->msi_check(domain, info, dev);
285	if (ret == 0)
286		ret = ops->msi_prepare(domain, dev, nvec, arg);
287
288	return ret;
289}
290
291int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
292			     int virq, int nvec, msi_alloc_info_t *arg)
293{
294	struct msi_domain_info *info = domain->host_data;
295	struct msi_domain_ops *ops = info->ops;
296	struct msi_desc *desc;
297	int ret = 0;
298
299	for_each_msi_entry(desc, dev) {
300		/* Don't even try the multi-MSI brain damage. */
301		if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
302			ret = -EINVAL;
303			break;
304		}
305
306		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
307			continue;
308
309		ops->set_desc(arg, desc);
310		/* Assumes the domain mutex is held! */
311		ret = irq_domain_alloc_irqs_recursive(domain, virq, 1, arg);
312		if (ret)
313			break;
314
315		irq_set_msi_desc_off(virq, 0, desc);
316	}
317
318	if (ret) {
319		/* Mop up the damage */
320		for_each_msi_entry(desc, dev) {
321			if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
322				continue;
323
324			irq_domain_free_irqs_common(domain, desc->irq, 1);
325		}
326	}
327
328	return ret;
329}
330
331/**
332 * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain
333 * @domain:	The domain to allocate from
334 * @dev:	Pointer to device struct of the device for which the interrupts
335 *		are allocated
336 * @nvec:	The number of interrupts to allocate
337 *
338 * Returns 0 on success or an error code.
339 */
340int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
341			  int nvec)
342{
343	struct msi_domain_info *info = domain->host_data;
344	struct msi_domain_ops *ops = info->ops;
345	msi_alloc_info_t arg;
346	struct msi_desc *desc;
347	int i, ret, virq;
348
349	ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
350	if (ret)
351		return ret;
352
353	for_each_msi_entry(desc, dev) {
354		ops->set_desc(&arg, desc);
355
356		virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
357					       dev_to_node(dev), &arg, false,
358					       desc->affinity);
359		if (virq < 0) {
360			ret = -ENOSPC;
361			if (ops->handle_error)
362				ret = ops->handle_error(domain, desc, ret);
363			if (ops->msi_finish)
364				ops->msi_finish(&arg, ret);
365			return ret;
366		}
367
368		for (i = 0; i < desc->nvec_used; i++)
369			irq_set_msi_desc_off(virq, i, desc);
370	}
371
372	if (ops->msi_finish)
373		ops->msi_finish(&arg, 0);
374
375	for_each_msi_entry(desc, dev) {
376		virq = desc->irq;
377		if (desc->nvec_used == 1)
378			dev_dbg(dev, "irq %d for MSI\n", virq);
379		else
380			dev_dbg(dev, "irq [%d-%d] for MSI\n",
381				virq, virq + desc->nvec_used - 1);
382		/*
383		 * This flag is set by the PCI layer as we need to activate
384		 * the MSI entries before the PCI layer enables MSI in the
385		 * card. Otherwise the card latches a random msi message.
386		 */
387		if (info->flags & MSI_FLAG_ACTIVATE_EARLY) {
388			struct irq_data *irq_data;
389
390			irq_data = irq_domain_get_irq_data(domain, desc->irq);
391			irq_domain_activate_irq(irq_data);
392		}
393	}
394
395	return 0;
396}
397
398/**
399 * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev
400 * @domain:	The domain to managing the interrupts
401 * @dev:	Pointer to device struct of the device for which the interrupts
402 *		are free
403 */
404void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
405{
406	struct msi_desc *desc;
407
408	for_each_msi_entry(desc, dev) {
409		/*
410		 * We might have failed to allocate an MSI early
411		 * enough that there is no IRQ associated to this
412		 * entry. If that's the case, don't do anything.
413		 */
414		if (desc->irq) {
415			irq_domain_free_irqs(desc->irq, desc->nvec_used);
416			desc->irq = 0;
417		}
418	}
419}
420
421/**
422 * msi_get_domain_info - Get the MSI interrupt domain info for @domain
423 * @domain:	The interrupt domain to retrieve data from
424 *
425 * Returns the pointer to the msi_domain_info stored in
426 * @domain->host_data.
427 */
428struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain)
429{
430	return (struct msi_domain_info *)domain->host_data;
431}
432
433#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */