Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Texas Instruments' K3 Interrupt Aggregator irqchip driver
  4 *
  5 * Copyright (C) 2018-2019 Texas Instruments Incorporated - https://www.ti.com/
  6 *	Lokesh Vutla <lokeshvutla@ti.com>
  7 */
  8
  9#include <linux/err.h>
 10#include <linux/io.h>
 11#include <linux/irq.h>
 12#include <linux/irqchip.h>
 13#include <linux/irqdomain.h>
 14#include <linux/interrupt.h>
 15#include <linux/msi.h>
 16#include <linux/module.h>
 17#include <linux/moduleparam.h>
 18#include <linux/of_address.h>
 19#include <linux/of_irq.h>
 20#include <linux/of_platform.h>
 21#include <linux/irqchip/chained_irq.h>
 22#include <linux/soc/ti/ti_sci_inta_msi.h>
 23#include <linux/soc/ti/ti_sci_protocol.h>
 24#include <asm-generic/msi.h>
 25
 26#define TI_SCI_DEV_ID_MASK	0xffff
 27#define TI_SCI_DEV_ID_SHIFT	16
 28#define TI_SCI_IRQ_ID_MASK	0xffff
 29#define TI_SCI_IRQ_ID_SHIFT	0
 30#define HWIRQ_TO_DEVID(hwirq)	(((hwirq) >> (TI_SCI_DEV_ID_SHIFT)) & \
 31				 (TI_SCI_DEV_ID_MASK))
 32#define HWIRQ_TO_IRQID(hwirq)	((hwirq) & (TI_SCI_IRQ_ID_MASK))
 33#define TO_HWIRQ(dev, index)	((((dev) & TI_SCI_DEV_ID_MASK) << \
 34				 TI_SCI_DEV_ID_SHIFT) | \
 35				((index) & TI_SCI_IRQ_ID_MASK))
 36
 37#define MAX_EVENTS_PER_VINT	64
 38#define VINT_ENABLE_SET_OFFSET	0x0
 39#define VINT_ENABLE_CLR_OFFSET	0x8
 40#define VINT_STATUS_OFFSET	0x18
 41#define VINT_STATUS_MASKED_OFFSET	0x20
 42
 43/**
 44 * struct ti_sci_inta_event_desc - Description of an event coming to
 45 *				   Interrupt Aggregator. This serves
 46 *				   as a mapping table for global event,
 47 *				   hwirq and vint bit.
 48 * @global_event:	Global event number corresponding to this event
 49 * @hwirq:		Hwirq of the incoming interrupt
 50 * @vint_bit:		Corresponding vint bit to which this event is attached.
 51 */
 52struct ti_sci_inta_event_desc {
 53	u16 global_event;
 54	u32 hwirq;
 55	u8 vint_bit;
 56};
 57
 58/**
 59 * struct ti_sci_inta_vint_desc - Description of a virtual interrupt coming out
 60 *				  of Interrupt Aggregator.
 61 * @domain:		Pointer to IRQ domain to which this vint belongs.
 62 * @list:		List entry for the vint list
 63 * @event_map:		Bitmap to manage the allocation of events to vint.
 64 * @events:		Array of event descriptors assigned to this vint.
 65 * @parent_virq:	Linux IRQ number that gets attached to parent
 66 * @vint_id:		TISCI vint ID
 67 */
 68struct ti_sci_inta_vint_desc {
 69	struct irq_domain *domain;
 70	struct list_head list;
 71	DECLARE_BITMAP(event_map, MAX_EVENTS_PER_VINT);
 72	struct ti_sci_inta_event_desc events[MAX_EVENTS_PER_VINT];
 73	unsigned int parent_virq;
 74	u16 vint_id;
 75};
 76
 77/**
 78 * struct ti_sci_inta_irq_domain - Structure representing a TISCI based
 79 *				   Interrupt Aggregator IRQ domain.
 80 * @sci:		Pointer to TISCI handle
 81 * @vint:		TISCI resource pointer representing IA inerrupts.
 82 * @global_event:	TISCI resource pointer representing global events.
 83 * @vint_list:		List of the vints active in the system
 84 * @vint_mutex:		Mutex to protect vint_list
 85 * @base:		Base address of the memory mapped IO registers
 86 * @pdev:		Pointer to platform device.
 87 * @ti_sci_id:		TI-SCI device identifier
 88 */
 89struct ti_sci_inta_irq_domain {
 90	const struct ti_sci_handle *sci;
 91	struct ti_sci_resource *vint;
 92	struct ti_sci_resource *global_event;
 93	struct list_head vint_list;
 94	/* Mutex to protect vint list */
 95	struct mutex vint_mutex;
 96	void __iomem *base;
 97	struct platform_device *pdev;
 98	u32 ti_sci_id;
 99};
100
101#define to_vint_desc(e, i) container_of(e, struct ti_sci_inta_vint_desc, \
102					events[i])
103
104/**
105 * ti_sci_inta_irq_handler() - Chained IRQ handler for the vint irqs
106 * @desc:	Pointer to irq_desc corresponding to the irq
107 */
108static void ti_sci_inta_irq_handler(struct irq_desc *desc)
109{
110	struct ti_sci_inta_vint_desc *vint_desc;
111	struct ti_sci_inta_irq_domain *inta;
112	struct irq_domain *domain;
113	unsigned int virq, bit;
114	unsigned long val;
115
116	vint_desc = irq_desc_get_handler_data(desc);
117	domain = vint_desc->domain;
118	inta = domain->host_data;
119
120	chained_irq_enter(irq_desc_get_chip(desc), desc);
121
122	val = readq_relaxed(inta->base + vint_desc->vint_id * 0x1000 +
123			    VINT_STATUS_MASKED_OFFSET);
124
125	for_each_set_bit(bit, &val, MAX_EVENTS_PER_VINT) {
126		virq = irq_find_mapping(domain, vint_desc->events[bit].hwirq);
127		if (virq)
128			generic_handle_irq(virq);
129	}
130
131	chained_irq_exit(irq_desc_get_chip(desc), desc);
132}
133
134/**
135 * ti_sci_inta_xlate_irq() - Translate hwirq to parent's hwirq.
136 * @inta:	IRQ domain corresponding to Interrupt Aggregator
137 * @irq:	Hardware irq corresponding to the above irq domain
138 *
139 * Return parent irq number if translation is available else -ENOENT.
140 */
141static int ti_sci_inta_xlate_irq(struct ti_sci_inta_irq_domain *inta,
142				 u16 vint_id)
143{
144	struct device_node *np = dev_of_node(&inta->pdev->dev);
145	u32 base, parent_base, size;
146	const __be32 *range;
147	int len;
148
149	range = of_get_property(np, "ti,interrupt-ranges", &len);
150	if (!range)
151		return vint_id;
152
153	for (len /= sizeof(*range); len >= 3; len -= 3) {
154		base = be32_to_cpu(*range++);
155		parent_base = be32_to_cpu(*range++);
156		size = be32_to_cpu(*range++);
157
158		if (base <= vint_id && vint_id < base + size)
159			return vint_id - base + parent_base;
160	}
161
162	return -ENOENT;
163}
164
165/**
166 * ti_sci_inta_alloc_parent_irq() - Allocate parent irq to Interrupt aggregator
167 * @domain:	IRQ domain corresponding to Interrupt Aggregator
168 *
169 * Return 0 if all went well else corresponding error value.
170 */
171static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_domain *domain)
172{
173	struct ti_sci_inta_irq_domain *inta = domain->host_data;
174	struct ti_sci_inta_vint_desc *vint_desc;
175	struct irq_fwspec parent_fwspec;
176	struct device_node *parent_node;
177	unsigned int parent_virq;
178	u16 vint_id, p_hwirq;
179	int ret;
180
181	vint_id = ti_sci_get_free_resource(inta->vint);
182	if (vint_id == TI_SCI_RESOURCE_NULL)
183		return ERR_PTR(-EINVAL);
184
185	p_hwirq = ti_sci_inta_xlate_irq(inta, vint_id);
186	if (p_hwirq < 0) {
187		ret = p_hwirq;
188		goto free_vint;
189	}
190
191	vint_desc = kzalloc(sizeof(*vint_desc), GFP_KERNEL);
192	if (!vint_desc) {
193		ret = -ENOMEM;
194		goto free_vint;
195	}
196
197	vint_desc->domain = domain;
198	vint_desc->vint_id = vint_id;
199	INIT_LIST_HEAD(&vint_desc->list);
200
201	parent_node = of_irq_find_parent(dev_of_node(&inta->pdev->dev));
202	parent_fwspec.fwnode = of_node_to_fwnode(parent_node);
203
204	if (of_device_is_compatible(parent_node, "arm,gic-v3")) {
205		/* Parent is GIC */
206		parent_fwspec.param_count = 3;
207		parent_fwspec.param[0] = 0;
208		parent_fwspec.param[1] = p_hwirq - 32;
209		parent_fwspec.param[2] = IRQ_TYPE_LEVEL_HIGH;
210	} else {
211		/* Parent is Interrupt Router */
212		parent_fwspec.param_count = 1;
213		parent_fwspec.param[0] = p_hwirq;
214	}
215
216	parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
217	if (parent_virq == 0) {
218		dev_err(&inta->pdev->dev, "Parent IRQ allocation failed\n");
219		ret = -EINVAL;
220		goto free_vint_desc;
221
222	}
223	vint_desc->parent_virq = parent_virq;
224
225	list_add_tail(&vint_desc->list, &inta->vint_list);
226	irq_set_chained_handler_and_data(vint_desc->parent_virq,
227					 ti_sci_inta_irq_handler, vint_desc);
228
229	return vint_desc;
230free_vint_desc:
231	kfree(vint_desc);
232free_vint:
233	ti_sci_release_resource(inta->vint, vint_id);
234	return ERR_PTR(ret);
235}
236
237/**
238 * ti_sci_inta_alloc_event() - Attach an event to a IA vint.
239 * @vint_desc:	Pointer to vint_desc to which the event gets attached
240 * @free_bit:	Bit inside vint to which event gets attached
241 * @hwirq:	hwirq of the input event
242 *
243 * Return event_desc pointer if all went ok else appropriate error value.
244 */
245static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_event(struct ti_sci_inta_vint_desc *vint_desc,
246							      u16 free_bit,
247							      u32 hwirq)
248{
249	struct ti_sci_inta_irq_domain *inta = vint_desc->domain->host_data;
250	struct ti_sci_inta_event_desc *event_desc;
251	u16 dev_id, dev_index;
252	int err;
253
254	dev_id = HWIRQ_TO_DEVID(hwirq);
255	dev_index = HWIRQ_TO_IRQID(hwirq);
256
257	event_desc = &vint_desc->events[free_bit];
258	event_desc->hwirq = hwirq;
259	event_desc->vint_bit = free_bit;
260	event_desc->global_event = ti_sci_get_free_resource(inta->global_event);
261	if (event_desc->global_event == TI_SCI_RESOURCE_NULL)
262		return ERR_PTR(-EINVAL);
263
264	err = inta->sci->ops.rm_irq_ops.set_event_map(inta->sci,
265						      dev_id, dev_index,
266						      inta->ti_sci_id,
267						      vint_desc->vint_id,
268						      event_desc->global_event,
269						      free_bit);
270	if (err)
271		goto free_global_event;
272
273	return event_desc;
274free_global_event:
275	ti_sci_release_resource(inta->global_event, event_desc->global_event);
276	return ERR_PTR(err);
277}
278
279/**
280 * ti_sci_inta_alloc_irq() -  Allocate an irq within INTA domain
281 * @domain:	irq_domain pointer corresponding to INTA
282 * @hwirq:	hwirq of the input event
283 *
284 * Note: Allocation happens in the following manner:
285 *	- Find a free bit available in any of the vints available in the list.
286 *	- If not found, allocate a vint from the vint pool
287 *	- Attach the free bit to input hwirq.
288 * Return event_desc if all went ok else appropriate error value.
289 */
290static struct ti_sci_inta_event_desc *ti_sci_inta_alloc_irq(struct irq_domain *domain,
291							    u32 hwirq)
292{
293	struct ti_sci_inta_irq_domain *inta = domain->host_data;
294	struct ti_sci_inta_vint_desc *vint_desc = NULL;
295	struct ti_sci_inta_event_desc *event_desc;
296	u16 free_bit;
297
298	mutex_lock(&inta->vint_mutex);
299	list_for_each_entry(vint_desc, &inta->vint_list, list) {
300		free_bit = find_first_zero_bit(vint_desc->event_map,
301					       MAX_EVENTS_PER_VINT);
302		if (free_bit != MAX_EVENTS_PER_VINT) {
303			set_bit(free_bit, vint_desc->event_map);
304			goto alloc_event;
305		}
306	}
307
308	/* No free bits available. Allocate a new vint */
309	vint_desc = ti_sci_inta_alloc_parent_irq(domain);
310	if (IS_ERR(vint_desc)) {
311		event_desc = ERR_CAST(vint_desc);
312		goto unlock;
313	}
314
315	free_bit = find_first_zero_bit(vint_desc->event_map,
316				       MAX_EVENTS_PER_VINT);
317	set_bit(free_bit, vint_desc->event_map);
318
319alloc_event:
320	event_desc = ti_sci_inta_alloc_event(vint_desc, free_bit, hwirq);
321	if (IS_ERR(event_desc))
322		clear_bit(free_bit, vint_desc->event_map);
323
324unlock:
325	mutex_unlock(&inta->vint_mutex);
326	return event_desc;
327}
328
329/**
330 * ti_sci_inta_free_parent_irq() - Free a parent irq to INTA
331 * @inta:	Pointer to inta domain.
332 * @vint_desc:	Pointer to vint_desc that needs to be freed.
333 */
334static void ti_sci_inta_free_parent_irq(struct ti_sci_inta_irq_domain *inta,
335					struct ti_sci_inta_vint_desc *vint_desc)
336{
337	if (find_first_bit(vint_desc->event_map, MAX_EVENTS_PER_VINT) == MAX_EVENTS_PER_VINT) {
338		list_del(&vint_desc->list);
339		ti_sci_release_resource(inta->vint, vint_desc->vint_id);
340		irq_dispose_mapping(vint_desc->parent_virq);
341		kfree(vint_desc);
342	}
343}
344
345/**
346 * ti_sci_inta_free_irq() - Free an IRQ within INTA domain
347 * @event_desc:	Pointer to event_desc that needs to be freed.
348 * @hwirq:	Hwirq number within INTA domain that needs to be freed
349 */
350static void ti_sci_inta_free_irq(struct ti_sci_inta_event_desc *event_desc,
351				 u32 hwirq)
352{
353	struct ti_sci_inta_vint_desc *vint_desc;
354	struct ti_sci_inta_irq_domain *inta;
355
356	vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
357	inta = vint_desc->domain->host_data;
358	/* free event irq */
359	mutex_lock(&inta->vint_mutex);
360	inta->sci->ops.rm_irq_ops.free_event_map(inta->sci,
361						 HWIRQ_TO_DEVID(hwirq),
362						 HWIRQ_TO_IRQID(hwirq),
363						 inta->ti_sci_id,
364						 vint_desc->vint_id,
365						 event_desc->global_event,
366						 event_desc->vint_bit);
367
368	clear_bit(event_desc->vint_bit, vint_desc->event_map);
369	ti_sci_release_resource(inta->global_event, event_desc->global_event);
370	event_desc->global_event = TI_SCI_RESOURCE_NULL;
371	event_desc->hwirq = 0;
372
373	ti_sci_inta_free_parent_irq(inta, vint_desc);
374	mutex_unlock(&inta->vint_mutex);
375}
376
377/**
378 * ti_sci_inta_request_resources() - Allocate resources for input irq
379 * @data: Pointer to corresponding irq_data
380 *
381 * Note: This is the core api where the actual allocation happens for input
382 *	 hwirq. This allocation involves creating a parent irq for vint.
383 *	 If this is done in irq_domain_ops.alloc() then a deadlock is reached
384 *	 for allocation. So this allocation is being done in request_resources()
385 *
386 * Return: 0 if all went well else corresponding error.
387 */
388static int ti_sci_inta_request_resources(struct irq_data *data)
389{
390	struct ti_sci_inta_event_desc *event_desc;
391
392	event_desc = ti_sci_inta_alloc_irq(data->domain, data->hwirq);
393	if (IS_ERR(event_desc))
394		return PTR_ERR(event_desc);
395
396	data->chip_data = event_desc;
397
398	return 0;
399}
400
401/**
402 * ti_sci_inta_release_resources - Release resources for input irq
403 * @data: Pointer to corresponding irq_data
404 *
405 * Note: Corresponding to request_resources(), all the unmapping and deletion
406 *	 of parent vint irqs happens in this api.
407 */
408static void ti_sci_inta_release_resources(struct irq_data *data)
409{
410	struct ti_sci_inta_event_desc *event_desc;
411
412	event_desc = irq_data_get_irq_chip_data(data);
413	ti_sci_inta_free_irq(event_desc, data->hwirq);
414}
415
416/**
417 * ti_sci_inta_manage_event() - Control the event based on the offset
418 * @data:	Pointer to corresponding irq_data
419 * @offset:	register offset using which event is controlled.
420 */
421static void ti_sci_inta_manage_event(struct irq_data *data, u32 offset)
422{
423	struct ti_sci_inta_event_desc *event_desc;
424	struct ti_sci_inta_vint_desc *vint_desc;
425	struct ti_sci_inta_irq_domain *inta;
426
427	event_desc = irq_data_get_irq_chip_data(data);
428	vint_desc = to_vint_desc(event_desc, event_desc->vint_bit);
429	inta = data->domain->host_data;
430
431	writeq_relaxed(BIT(event_desc->vint_bit),
432		       inta->base + vint_desc->vint_id * 0x1000 + offset);
433}
434
435/**
436 * ti_sci_inta_mask_irq() - Mask an event
437 * @data:	Pointer to corresponding irq_data
438 */
439static void ti_sci_inta_mask_irq(struct irq_data *data)
440{
441	ti_sci_inta_manage_event(data, VINT_ENABLE_CLR_OFFSET);
442}
443
444/**
445 * ti_sci_inta_unmask_irq() - Unmask an event
446 * @data:	Pointer to corresponding irq_data
447 */
448static void ti_sci_inta_unmask_irq(struct irq_data *data)
449{
450	ti_sci_inta_manage_event(data, VINT_ENABLE_SET_OFFSET);
451}
452
453/**
454 * ti_sci_inta_ack_irq() - Ack an event
455 * @data:	Pointer to corresponding irq_data
456 */
457static void ti_sci_inta_ack_irq(struct irq_data *data)
458{
459	/*
460	 * Do not clear the event if hardware is capable of sending
461	 * a down event.
462	 */
463	if (irqd_get_trigger_type(data) != IRQF_TRIGGER_HIGH)
464		ti_sci_inta_manage_event(data, VINT_STATUS_OFFSET);
465}
466
467static int ti_sci_inta_set_affinity(struct irq_data *d,
468				    const struct cpumask *mask_val, bool force)
469{
470	return -EINVAL;
471}
472
473/**
474 * ti_sci_inta_set_type() - Update the trigger type of the irq.
475 * @data:	Pointer to corresponding irq_data
476 * @type:	Trigger type as specified by user
477 *
478 * Note: This updates the handle_irq callback for level msi.
479 *
480 * Return 0 if all went well else appropriate error.
481 */
482static int ti_sci_inta_set_type(struct irq_data *data, unsigned int type)
483{
484	/*
485	 * .alloc default sets handle_edge_irq. But if the user specifies
486	 * that IRQ is level MSI, then update the handle to handle_level_irq
487	 */
488	switch (type & IRQ_TYPE_SENSE_MASK) {
489	case IRQF_TRIGGER_HIGH:
490		irq_set_handler_locked(data, handle_level_irq);
491		return 0;
492	case IRQF_TRIGGER_RISING:
493		return 0;
494	default:
495		return -EINVAL;
496	}
497}
498
499static struct irq_chip ti_sci_inta_irq_chip = {
500	.name			= "INTA",
501	.irq_ack		= ti_sci_inta_ack_irq,
502	.irq_mask		= ti_sci_inta_mask_irq,
503	.irq_set_type		= ti_sci_inta_set_type,
504	.irq_unmask		= ti_sci_inta_unmask_irq,
505	.irq_set_affinity	= ti_sci_inta_set_affinity,
506	.irq_request_resources	= ti_sci_inta_request_resources,
507	.irq_release_resources	= ti_sci_inta_release_resources,
508};
509
510/**
511 * ti_sci_inta_irq_domain_free() - Free an IRQ from the IRQ domain
512 * @domain:	Domain to which the irqs belong
513 * @virq:	base linux virtual IRQ to be freed.
514 * @nr_irqs:	Number of continuous irqs to be freed
515 */
516static void ti_sci_inta_irq_domain_free(struct irq_domain *domain,
517					unsigned int virq, unsigned int nr_irqs)
518{
519	struct irq_data *data = irq_domain_get_irq_data(domain, virq);
520
521	irq_domain_reset_irq_data(data);
522}
523
524/**
525 * ti_sci_inta_irq_domain_alloc() - Allocate Interrupt aggregator IRQs
526 * @domain:	Point to the interrupt aggregator IRQ domain
527 * @virq:	Corresponding Linux virtual IRQ number
528 * @nr_irqs:	Continuous irqs to be allocated
529 * @data:	Pointer to firmware specifier
530 *
531 * No actual allocation happens here.
532 *
533 * Return 0 if all went well else appropriate error value.
534 */
535static int ti_sci_inta_irq_domain_alloc(struct irq_domain *domain,
536					unsigned int virq, unsigned int nr_irqs,
537					void *data)
538{
539	msi_alloc_info_t *arg = data;
540
541	irq_domain_set_info(domain, virq, arg->hwirq, &ti_sci_inta_irq_chip,
542			    NULL, handle_edge_irq, NULL, NULL);
543
544	return 0;
545}
546
547static const struct irq_domain_ops ti_sci_inta_irq_domain_ops = {
548	.free		= ti_sci_inta_irq_domain_free,
549	.alloc		= ti_sci_inta_irq_domain_alloc,
550};
551
552static struct irq_chip ti_sci_inta_msi_irq_chip = {
553	.name			= "MSI-INTA",
554	.flags			= IRQCHIP_SUPPORTS_LEVEL_MSI,
555};
556
557static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
558				     struct msi_desc *desc)
559{
560	struct platform_device *pdev = to_platform_device(desc->dev);
561
562	arg->desc = desc;
563	arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index);
564}
565
566static struct msi_domain_ops ti_sci_inta_msi_ops = {
567	.set_desc	= ti_sci_inta_msi_set_desc,
568};
569
570static struct msi_domain_info ti_sci_inta_msi_domain_info = {
571	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
572		   MSI_FLAG_LEVEL_CAPABLE),
573	.ops	= &ti_sci_inta_msi_ops,
574	.chip	= &ti_sci_inta_msi_irq_chip,
575};
576
577static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
578{
579	struct irq_domain *parent_domain, *domain, *msi_domain;
580	struct device_node *parent_node, *node;
581	struct ti_sci_inta_irq_domain *inta;
582	struct device *dev = &pdev->dev;
583	struct resource *res;
584	int ret;
585
586	node = dev_of_node(dev);
587	parent_node = of_irq_find_parent(node);
588	if (!parent_node) {
589		dev_err(dev, "Failed to get IRQ parent node\n");
590		return -ENODEV;
591	}
592
593	parent_domain = irq_find_host(parent_node);
594	if (!parent_domain)
595		return -EPROBE_DEFER;
596
597	inta = devm_kzalloc(dev, sizeof(*inta), GFP_KERNEL);
598	if (!inta)
599		return -ENOMEM;
600
601	inta->pdev = pdev;
602	inta->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
603	if (IS_ERR(inta->sci)) {
604		ret = PTR_ERR(inta->sci);
605		if (ret != -EPROBE_DEFER)
606			dev_err(dev, "ti,sci read fail %d\n", ret);
607		inta->sci = NULL;
608		return ret;
609	}
610
611	ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", &inta->ti_sci_id);
612	if (ret) {
613		dev_err(dev, "missing 'ti,sci-dev-id' property\n");
614		return -EINVAL;
615	}
616
617	inta->vint = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
618					      TI_SCI_RESASG_SUBTYPE_IA_VINT);
619	if (IS_ERR(inta->vint)) {
620		dev_err(dev, "VINT resource allocation failed\n");
621		return PTR_ERR(inta->vint);
622	}
623
624	inta->global_event = devm_ti_sci_get_resource(inta->sci, dev, inta->ti_sci_id,
625						      TI_SCI_RESASG_SUBTYPE_GLOBAL_EVENT_SEVT);
626	if (IS_ERR(inta->global_event)) {
627		dev_err(dev, "Global event resource allocation failed\n");
628		return PTR_ERR(inta->global_event);
629	}
630
631	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
632	inta->base = devm_ioremap_resource(dev, res);
633	if (IS_ERR(inta->base))
634		return PTR_ERR(inta->base);
635
636	domain = irq_domain_add_linear(dev_of_node(dev),
637				       ti_sci_get_num_resources(inta->vint),
638				       &ti_sci_inta_irq_domain_ops, inta);
639	if (!domain) {
640		dev_err(dev, "Failed to allocate IRQ domain\n");
641		return -ENOMEM;
642	}
643
644	msi_domain = ti_sci_inta_msi_create_irq_domain(of_node_to_fwnode(node),
645						&ti_sci_inta_msi_domain_info,
646						domain);
647	if (!msi_domain) {
648		irq_domain_remove(domain);
649		dev_err(dev, "Failed to allocate msi domain\n");
650		return -ENOMEM;
651	}
652
653	INIT_LIST_HEAD(&inta->vint_list);
654	mutex_init(&inta->vint_mutex);
655
656	dev_info(dev, "Interrupt Aggregator domain %d created\n", pdev->id);
657
658	return 0;
659}
660
661static const struct of_device_id ti_sci_inta_irq_domain_of_match[] = {
662	{ .compatible = "ti,sci-inta", },
663	{ /* sentinel */ },
664};
665MODULE_DEVICE_TABLE(of, ti_sci_inta_irq_domain_of_match);
666
667static struct platform_driver ti_sci_inta_irq_domain_driver = {
668	.probe = ti_sci_inta_irq_domain_probe,
669	.driver = {
670		.name = "ti-sci-inta",
671		.of_match_table = ti_sci_inta_irq_domain_of_match,
672	},
673};
674module_platform_driver(ti_sci_inta_irq_domain_driver);
675
676MODULE_AUTHOR("Lokesh Vutla <lokeshvutla@ti.com>");
677MODULE_DESCRIPTION("K3 Interrupt Aggregator driver over TI SCI protocol");
678MODULE_LICENSE("GPL v2");