Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * linux/arch/arm/mach-omap2/irq.c
  3 *
  4 * Interrupt handler for OMAP2 boards.
  5 *
  6 * Copyright (C) 2005 Nokia Corporation
  7 * Author: Paul Mundt <paul.mundt@nokia.com>
  8 *
  9 * This file is subject to the terms and conditions of the GNU General Public
 10 * License. See the file "COPYING" in the main directory of this archive
 11 * for more details.
 12 */
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 15#include <linux/init.h>
 16#include <linux/interrupt.h>
 17#include <linux/io.h>
 18
 19#include <asm/exception.h>
 20#include <linux/irqchip.h>
 21#include <linux/irqdomain.h>
 22#include <linux/of.h>
 23#include <linux/of_address.h>
 24#include <linux/of_irq.h>
 25
 26/* Define these here for now until we drop all board-files */
 27#define OMAP24XX_IC_BASE	0x480fe000
 28#define OMAP34XX_IC_BASE	0x48200000
 29
 30/* selected INTC register offsets */
 31
 32#define INTC_REVISION		0x0000
 33#define INTC_SYSCONFIG		0x0010
 34#define INTC_SYSSTATUS		0x0014
 35#define INTC_SIR		0x0040
 36#define INTC_CONTROL		0x0048
 37#define INTC_PROTECTION		0x004C
 38#define INTC_IDLE		0x0050
 39#define INTC_THRESHOLD		0x0068
 40#define INTC_MIR0		0x0084
 41#define INTC_MIR_CLEAR0		0x0088
 42#define INTC_MIR_SET0		0x008c
 43#define INTC_PENDING_IRQ0	0x0098
 44#define INTC_PENDING_IRQ1	0x00b8
 45#define INTC_PENDING_IRQ2	0x00d8
 46#define INTC_PENDING_IRQ3	0x00f8
 47#define INTC_ILR0		0x0100
 48
 49#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
 50#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
 51#define INTCPS_NR_ILR_REGS	128
 52#define INTCPS_NR_MIR_REGS	4
 53
 54#define INTC_IDLE_FUNCIDLE	(1 << 0)
 55#define INTC_IDLE_TURBO		(1 << 1)
 56
 57#define INTC_PROTECTION_ENABLE	(1 << 0)
 58
 59struct omap_intc_regs {
 60	u32 sysconfig;
 61	u32 protection;
 62	u32 idle;
 63	u32 threshold;
 64	u32 ilr[INTCPS_NR_ILR_REGS];
 65	u32 mir[INTCPS_NR_MIR_REGS];
 66};
 67static struct omap_intc_regs intc_context;
 68
 69static struct irq_domain *domain;
 70static void __iomem *omap_irq_base;
 71static int omap_nr_pending = 3;
 72static int omap_nr_irqs = 96;
 73
 74static void intc_writel(u32 reg, u32 val)
 75{
 76	writel_relaxed(val, omap_irq_base + reg);
 77}
 78
 79static u32 intc_readl(u32 reg)
 80{
 81	return readl_relaxed(omap_irq_base + reg);
 82}
 83
 84void omap_intc_save_context(void)
 85{
 86	int i;
 87
 88	intc_context.sysconfig =
 89		intc_readl(INTC_SYSCONFIG);
 90	intc_context.protection =
 91		intc_readl(INTC_PROTECTION);
 92	intc_context.idle =
 93		intc_readl(INTC_IDLE);
 94	intc_context.threshold =
 95		intc_readl(INTC_THRESHOLD);
 96
 97	for (i = 0; i < omap_nr_irqs; i++)
 98		intc_context.ilr[i] =
 99			intc_readl((INTC_ILR0 + 0x4 * i));
100	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
101		intc_context.mir[i] =
102			intc_readl(INTC_MIR0 + (0x20 * i));
103}
104
105void omap_intc_restore_context(void)
106{
107	int i;
108
109	intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
110	intc_writel(INTC_PROTECTION, intc_context.protection);
111	intc_writel(INTC_IDLE, intc_context.idle);
112	intc_writel(INTC_THRESHOLD, intc_context.threshold);
113
114	for (i = 0; i < omap_nr_irqs; i++)
115		intc_writel(INTC_ILR0 + 0x4 * i,
116				intc_context.ilr[i]);
117
118	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
119		intc_writel(INTC_MIR0 + 0x20 * i,
120			intc_context.mir[i]);
121	/* MIRs are saved and restore with other PRCM registers */
122}
123
124void omap3_intc_prepare_idle(void)
125{
126	/*
127	 * Disable autoidle as it can stall interrupt controller,
128	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
129	 */
130	intc_writel(INTC_SYSCONFIG, 0);
131	intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
132}
133
134void omap3_intc_resume_idle(void)
135{
136	/* Re-enable autoidle */
137	intc_writel(INTC_SYSCONFIG, 1);
138	intc_writel(INTC_IDLE, 0);
139}
140
141/* XXX: FIQ and additional INTC support (only MPU at the moment) */
142static void omap_ack_irq(struct irq_data *d)
143{
144	intc_writel(INTC_CONTROL, 0x1);
145}
146
147static void omap_mask_ack_irq(struct irq_data *d)
148{
149	irq_gc_mask_disable_reg(d);
150	omap_ack_irq(d);
151}
152
153static void __init omap_irq_soft_reset(void)
154{
155	unsigned long tmp;
156
157	tmp = intc_readl(INTC_REVISION) & 0xff;
158
159	pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
160		omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
161
162	tmp = intc_readl(INTC_SYSCONFIG);
163	tmp |= 1 << 1;	/* soft reset */
164	intc_writel(INTC_SYSCONFIG, tmp);
165
166	while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
167		/* Wait for reset to complete */;
168
169	/* Enable autoidle */
170	intc_writel(INTC_SYSCONFIG, 1 << 0);
171}
172
173int omap_irq_pending(void)
174{
175	int i;
176
177	for (i = 0; i < omap_nr_pending; i++)
178		if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
179			return 1;
180	return 0;
181}
182
183void omap3_intc_suspend(void)
184{
185	/* A pending interrupt would prevent OMAP from entering suspend */
186	omap_ack_irq(NULL);
187}
188
189static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
190{
191	int ret;
192	int i;
193
194	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
195			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
196			IRQ_LEVEL, 0);
197	if (ret) {
198		pr_warn("Failed to allocate irq chips\n");
199		return ret;
200	}
201
202	for (i = 0; i < omap_nr_pending; i++) {
203		struct irq_chip_generic *gc;
204		struct irq_chip_type *ct;
205
206		gc = irq_get_domain_generic_chip(d, 32 * i);
207		gc->reg_base = base;
208		ct = gc->chip_types;
209
210		ct->type = IRQ_TYPE_LEVEL_MASK;
211
212		ct->chip.irq_ack = omap_mask_ack_irq;
213		ct->chip.irq_mask = irq_gc_mask_disable_reg;
214		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
215
216		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
217
218		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
219		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
220	}
221
222	return 0;
223}
224
225static void __init omap_alloc_gc_legacy(void __iomem *base,
226		unsigned int irq_start, unsigned int num)
227{
228	struct irq_chip_generic *gc;
229	struct irq_chip_type *ct;
230
231	gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
232			handle_level_irq);
233	ct = gc->chip_types;
234	ct->chip.irq_ack = omap_mask_ack_irq;
235	ct->chip.irq_mask = irq_gc_mask_disable_reg;
236	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
237	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
238
239	ct->regs.enable = INTC_MIR_CLEAR0;
240	ct->regs.disable = INTC_MIR_SET0;
241	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
242			IRQ_NOREQUEST | IRQ_NOPROBE, 0);
243}
244
245static int __init omap_init_irq_of(struct device_node *node)
246{
247	int ret;
248
249	omap_irq_base = of_iomap(node, 0);
250	if (WARN_ON(!omap_irq_base))
251		return -ENOMEM;
252
253	domain = irq_domain_add_linear(node, omap_nr_irqs,
254			&irq_generic_chip_ops, NULL);
255
256	omap_irq_soft_reset();
257
258	ret = omap_alloc_gc_of(domain, omap_irq_base);
259	if (ret < 0)
260		irq_domain_remove(domain);
261
262	return ret;
263}
264
265static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
266{
267	int j, irq_base;
268
269	omap_irq_base = ioremap(base, SZ_4K);
270	if (WARN_ON(!omap_irq_base))
271		return -ENOMEM;
272
273	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
274	if (irq_base < 0) {
275		pr_warn("Couldn't allocate IRQ numbers\n");
276		irq_base = 0;
277	}
278
279	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
280			&irq_domain_simple_ops, NULL);
281
282	omap_irq_soft_reset();
283
284	for (j = 0; j < omap_nr_irqs; j += 32)
285		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
286
287	return 0;
288}
289
290static void __init omap_irq_enable_protection(void)
291{
292	u32 reg;
293
294	reg = intc_readl(INTC_PROTECTION);
295	reg |= INTC_PROTECTION_ENABLE;
296	intc_writel(INTC_PROTECTION, reg);
297}
298
299static int __init omap_init_irq(u32 base, struct device_node *node)
300{
301	int ret;
302
303	/*
304	 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
305	 * depends is still not ready for linear IRQ domains; because of that
306	 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
307	 * linear IRQ Domain until that driver is finally fixed.
308	 */
309	if (of_device_is_compatible(node, "ti,omap2-intc") ||
310			of_device_is_compatible(node, "ti,omap3-intc")) {
311		struct resource res;
312
313		if (of_address_to_resource(node, 0, &res))
314			return -ENOMEM;
315
316		base = res.start;
317		ret = omap_init_irq_legacy(base, node);
318	} else if (node) {
319		ret = omap_init_irq_of(node);
320	} else {
321		ret = omap_init_irq_legacy(base, NULL);
322	}
323
324	if (ret == 0)
325		omap_irq_enable_protection();
326
327	return ret;
328}
329
330static asmlinkage void __exception_irq_entry
331omap_intc_handle_irq(struct pt_regs *regs)
332{
333	extern unsigned long irq_err_count;
334	u32 irqnr;
335
336	irqnr = intc_readl(INTC_SIR);
337
338	/*
339	 * A spurious IRQ can result if interrupt that triggered the
340	 * sorting is no longer active during the sorting (10 INTC
341	 * functional clock cycles after interrupt assertion). Or a
342	 * change in interrupt mask affected the result during sorting
343	 * time. There is no special handling required except ignoring
344	 * the SIR register value just read and retrying.
345	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
346	 *
347	 * Many a times, a spurious interrupt situation has been fixed
348	 * by adding a flush for the posted write acking the IRQ in
349	 * the device driver. Typically, this is going be the device
350	 * driver whose interrupt was handled just before the spurious
351	 * IRQ occurred. Pay attention to those device drivers if you
352	 * run into hitting the spurious IRQ condition below.
353	 */
354	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
355		pr_err_once("%s: spurious irq!\n", __func__);
356		irq_err_count++;
357		omap_ack_irq(NULL);
358		return;
359	}
360
361	irqnr &= ACTIVEIRQ_MASK;
362	handle_domain_irq(domain, irqnr, regs);
363}
364
365void __init omap3_init_irq(void)
366{
367	omap_nr_irqs = 96;
368	omap_nr_pending = 3;
369	omap_init_irq(OMAP34XX_IC_BASE, NULL);
370	set_handle_irq(omap_intc_handle_irq);
371}
372
373static int __init intc_of_init(struct device_node *node,
374			     struct device_node *parent)
375{
376	int ret;
377
378	omap_nr_pending = 3;
379	omap_nr_irqs = 96;
380
381	if (WARN_ON(!node))
382		return -ENODEV;
383
384	if (of_device_is_compatible(node, "ti,dm814-intc") ||
385	    of_device_is_compatible(node, "ti,dm816-intc") ||
386	    of_device_is_compatible(node, "ti,am33xx-intc")) {
387		omap_nr_irqs = 128;
388		omap_nr_pending = 4;
389	}
390
391	ret = omap_init_irq(-1, of_node_get(node));
392	if (ret < 0)
393		return ret;
394
395	set_handle_irq(omap_intc_handle_irq);
396
397	return 0;
398}
399
400IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
401IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
402IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
403IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
404IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);
v6.2
  1/*
  2 * linux/arch/arm/mach-omap2/irq.c
  3 *
  4 * Interrupt handler for OMAP2 boards.
  5 *
  6 * Copyright (C) 2005 Nokia Corporation
  7 * Author: Paul Mundt <paul.mundt@nokia.com>
  8 *
  9 * This file is subject to the terms and conditions of the GNU General Public
 10 * License. See the file "COPYING" in the main directory of this archive
 11 * for more details.
 12 */
 13#include <linux/kernel.h>
 14#include <linux/module.h>
 15#include <linux/init.h>
 16#include <linux/interrupt.h>
 17#include <linux/io.h>
 18
 19#include <asm/exception.h>
 20#include <linux/irqchip.h>
 21#include <linux/irqdomain.h>
 22#include <linux/of.h>
 23#include <linux/of_address.h>
 24#include <linux/of_irq.h>
 25
 26#include <linux/irqchip/irq-omap-intc.h>
 
 
 27
 28/* selected INTC register offsets */
 29
 30#define INTC_REVISION		0x0000
 31#define INTC_SYSCONFIG		0x0010
 32#define INTC_SYSSTATUS		0x0014
 33#define INTC_SIR		0x0040
 34#define INTC_CONTROL		0x0048
 35#define INTC_PROTECTION		0x004C
 36#define INTC_IDLE		0x0050
 37#define INTC_THRESHOLD		0x0068
 38#define INTC_MIR0		0x0084
 39#define INTC_MIR_CLEAR0		0x0088
 40#define INTC_MIR_SET0		0x008c
 41#define INTC_PENDING_IRQ0	0x0098
 42#define INTC_PENDING_IRQ1	0x00b8
 43#define INTC_PENDING_IRQ2	0x00d8
 44#define INTC_PENDING_IRQ3	0x00f8
 45#define INTC_ILR0		0x0100
 46
 47#define ACTIVEIRQ_MASK		0x7f	/* omap2/3 active interrupt bits */
 48#define SPURIOUSIRQ_MASK	(0x1ffffff << 7)
 49#define INTCPS_NR_ILR_REGS	128
 50#define INTCPS_NR_MIR_REGS	4
 51
 52#define INTC_IDLE_FUNCIDLE	(1 << 0)
 53#define INTC_IDLE_TURBO		(1 << 1)
 54
 55#define INTC_PROTECTION_ENABLE	(1 << 0)
 56
 57struct omap_intc_regs {
 58	u32 sysconfig;
 59	u32 protection;
 60	u32 idle;
 61	u32 threshold;
 62	u32 ilr[INTCPS_NR_ILR_REGS];
 63	u32 mir[INTCPS_NR_MIR_REGS];
 64};
 65static struct omap_intc_regs intc_context;
 66
 67static struct irq_domain *domain;
 68static void __iomem *omap_irq_base;
 69static int omap_nr_pending;
 70static int omap_nr_irqs;
 71
 72static void intc_writel(u32 reg, u32 val)
 73{
 74	writel_relaxed(val, omap_irq_base + reg);
 75}
 76
 77static u32 intc_readl(u32 reg)
 78{
 79	return readl_relaxed(omap_irq_base + reg);
 80}
 81
 82void omap_intc_save_context(void)
 83{
 84	int i;
 85
 86	intc_context.sysconfig =
 87		intc_readl(INTC_SYSCONFIG);
 88	intc_context.protection =
 89		intc_readl(INTC_PROTECTION);
 90	intc_context.idle =
 91		intc_readl(INTC_IDLE);
 92	intc_context.threshold =
 93		intc_readl(INTC_THRESHOLD);
 94
 95	for (i = 0; i < omap_nr_irqs; i++)
 96		intc_context.ilr[i] =
 97			intc_readl((INTC_ILR0 + 0x4 * i));
 98	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
 99		intc_context.mir[i] =
100			intc_readl(INTC_MIR0 + (0x20 * i));
101}
102
103void omap_intc_restore_context(void)
104{
105	int i;
106
107	intc_writel(INTC_SYSCONFIG, intc_context.sysconfig);
108	intc_writel(INTC_PROTECTION, intc_context.protection);
109	intc_writel(INTC_IDLE, intc_context.idle);
110	intc_writel(INTC_THRESHOLD, intc_context.threshold);
111
112	for (i = 0; i < omap_nr_irqs; i++)
113		intc_writel(INTC_ILR0 + 0x4 * i,
114				intc_context.ilr[i]);
115
116	for (i = 0; i < INTCPS_NR_MIR_REGS; i++)
117		intc_writel(INTC_MIR0 + 0x20 * i,
118			intc_context.mir[i]);
119	/* MIRs are saved and restore with other PRCM registers */
120}
121
122void omap3_intc_prepare_idle(void)
123{
124	/*
125	 * Disable autoidle as it can stall interrupt controller,
126	 * cf. errata ID i540 for 3430 (all revisions up to 3.1.x)
127	 */
128	intc_writel(INTC_SYSCONFIG, 0);
129	intc_writel(INTC_IDLE, INTC_IDLE_TURBO);
130}
131
132void omap3_intc_resume_idle(void)
133{
134	/* Re-enable autoidle */
135	intc_writel(INTC_SYSCONFIG, 1);
136	intc_writel(INTC_IDLE, 0);
137}
138
139/* XXX: FIQ and additional INTC support (only MPU at the moment) */
140static void omap_ack_irq(struct irq_data *d)
141{
142	intc_writel(INTC_CONTROL, 0x1);
143}
144
145static void omap_mask_ack_irq(struct irq_data *d)
146{
147	irq_gc_mask_disable_reg(d);
148	omap_ack_irq(d);
149}
150
151static void __init omap_irq_soft_reset(void)
152{
153	unsigned long tmp;
154
155	tmp = intc_readl(INTC_REVISION) & 0xff;
156
157	pr_info("IRQ: Found an INTC at 0x%p (revision %ld.%ld) with %d interrupts\n",
158		omap_irq_base, tmp >> 4, tmp & 0xf, omap_nr_irqs);
159
160	tmp = intc_readl(INTC_SYSCONFIG);
161	tmp |= 1 << 1;	/* soft reset */
162	intc_writel(INTC_SYSCONFIG, tmp);
163
164	while (!(intc_readl(INTC_SYSSTATUS) & 0x1))
165		/* Wait for reset to complete */;
166
167	/* Enable autoidle */
168	intc_writel(INTC_SYSCONFIG, 1 << 0);
169}
170
171int omap_irq_pending(void)
172{
173	int i;
174
175	for (i = 0; i < omap_nr_pending; i++)
176		if (intc_readl(INTC_PENDING_IRQ0 + (0x20 * i)))
177			return 1;
178	return 0;
179}
180
181void omap3_intc_suspend(void)
182{
183	/* A pending interrupt would prevent OMAP from entering suspend */
184	omap_ack_irq(NULL);
185}
186
187static int __init omap_alloc_gc_of(struct irq_domain *d, void __iomem *base)
188{
189	int ret;
190	int i;
191
192	ret = irq_alloc_domain_generic_chips(d, 32, 1, "INTC",
193			handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE,
194			IRQ_LEVEL, 0);
195	if (ret) {
196		pr_warn("Failed to allocate irq chips\n");
197		return ret;
198	}
199
200	for (i = 0; i < omap_nr_pending; i++) {
201		struct irq_chip_generic *gc;
202		struct irq_chip_type *ct;
203
204		gc = irq_get_domain_generic_chip(d, 32 * i);
205		gc->reg_base = base;
206		ct = gc->chip_types;
207
208		ct->type = IRQ_TYPE_LEVEL_MASK;
209
210		ct->chip.irq_ack = omap_mask_ack_irq;
211		ct->chip.irq_mask = irq_gc_mask_disable_reg;
212		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
213
214		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
215
216		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
217		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
218	}
219
220	return 0;
221}
222
223static void __init omap_alloc_gc_legacy(void __iomem *base,
224		unsigned int irq_start, unsigned int num)
225{
226	struct irq_chip_generic *gc;
227	struct irq_chip_type *ct;
228
229	gc = irq_alloc_generic_chip("INTC", 1, irq_start, base,
230			handle_level_irq);
231	ct = gc->chip_types;
232	ct->chip.irq_ack = omap_mask_ack_irq;
233	ct->chip.irq_mask = irq_gc_mask_disable_reg;
234	ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
235	ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
236
237	ct->regs.enable = INTC_MIR_CLEAR0;
238	ct->regs.disable = INTC_MIR_SET0;
239	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
240			IRQ_NOREQUEST | IRQ_NOPROBE, 0);
241}
242
243static int __init omap_init_irq_of(struct device_node *node)
244{
245	int ret;
246
247	omap_irq_base = of_iomap(node, 0);
248	if (WARN_ON(!omap_irq_base))
249		return -ENOMEM;
250
251	domain = irq_domain_add_linear(node, omap_nr_irqs,
252			&irq_generic_chip_ops, NULL);
253
254	omap_irq_soft_reset();
255
256	ret = omap_alloc_gc_of(domain, omap_irq_base);
257	if (ret < 0)
258		irq_domain_remove(domain);
259
260	return ret;
261}
262
263static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
264{
265	int j, irq_base;
266
267	omap_irq_base = ioremap(base, SZ_4K);
268	if (WARN_ON(!omap_irq_base))
269		return -ENOMEM;
270
271	irq_base = irq_alloc_descs(-1, 0, omap_nr_irqs, 0);
272	if (irq_base < 0) {
273		pr_warn("Couldn't allocate IRQ numbers\n");
274		irq_base = 0;
275	}
276
277	domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
278			&irq_domain_simple_ops, NULL);
279
280	omap_irq_soft_reset();
281
282	for (j = 0; j < omap_nr_irqs; j += 32)
283		omap_alloc_gc_legacy(omap_irq_base + j, j + irq_base, 32);
284
285	return 0;
286}
287
288static void __init omap_irq_enable_protection(void)
289{
290	u32 reg;
291
292	reg = intc_readl(INTC_PROTECTION);
293	reg |= INTC_PROTECTION_ENABLE;
294	intc_writel(INTC_PROTECTION, reg);
295}
296
297static int __init omap_init_irq(u32 base, struct device_node *node)
298{
299	int ret;
300
301	/*
302	 * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
303	 * depends is still not ready for linear IRQ domains; because of that
304	 * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
305	 * linear IRQ Domain until that driver is finally fixed.
306	 */
307	if (of_device_is_compatible(node, "ti,omap2-intc") ||
308			of_device_is_compatible(node, "ti,omap3-intc")) {
309		struct resource res;
310
311		if (of_address_to_resource(node, 0, &res))
312			return -ENOMEM;
313
314		base = res.start;
315		ret = omap_init_irq_legacy(base, node);
316	} else if (node) {
317		ret = omap_init_irq_of(node);
318	} else {
319		ret = omap_init_irq_legacy(base, NULL);
320	}
321
322	if (ret == 0)
323		omap_irq_enable_protection();
324
325	return ret;
326}
327
328static asmlinkage void __exception_irq_entry
329omap_intc_handle_irq(struct pt_regs *regs)
330{
331	extern unsigned long irq_err_count;
332	u32 irqnr;
333
334	irqnr = intc_readl(INTC_SIR);
335
336	/*
337	 * A spurious IRQ can result if interrupt that triggered the
338	 * sorting is no longer active during the sorting (10 INTC
339	 * functional clock cycles after interrupt assertion). Or a
340	 * change in interrupt mask affected the result during sorting
341	 * time. There is no special handling required except ignoring
342	 * the SIR register value just read and retrying.
343	 * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
344	 *
345	 * Many a times, a spurious interrupt situation has been fixed
346	 * by adding a flush for the posted write acking the IRQ in
347	 * the device driver. Typically, this is going be the device
348	 * driver whose interrupt was handled just before the spurious
349	 * IRQ occurred. Pay attention to those device drivers if you
350	 * run into hitting the spurious IRQ condition below.
351	 */
352	if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
353		pr_err_once("%s: spurious irq!\n", __func__);
354		irq_err_count++;
355		omap_ack_irq(NULL);
356		return;
357	}
358
359	irqnr &= ACTIVEIRQ_MASK;
360	generic_handle_domain_irq(domain, irqnr);
 
 
 
 
 
 
 
 
361}
362
363static int __init intc_of_init(struct device_node *node,
364			     struct device_node *parent)
365{
366	int ret;
367
368	omap_nr_pending = 3;
369	omap_nr_irqs = 96;
370
371	if (WARN_ON(!node))
372		return -ENODEV;
373
374	if (of_device_is_compatible(node, "ti,dm814-intc") ||
375	    of_device_is_compatible(node, "ti,dm816-intc") ||
376	    of_device_is_compatible(node, "ti,am33xx-intc")) {
377		omap_nr_irqs = 128;
378		omap_nr_pending = 4;
379	}
380
381	ret = omap_init_irq(-1, of_node_get(node));
382	if (ret < 0)
383		return ret;
384
385	set_handle_irq(omap_intc_handle_irq);
386
387	return 0;
388}
389
390IRQCHIP_DECLARE(omap2_intc, "ti,omap2-intc", intc_of_init);
391IRQCHIP_DECLARE(omap3_intc, "ti,omap3-intc", intc_of_init);
392IRQCHIP_DECLARE(dm814x_intc, "ti,dm814-intc", intc_of_init);
393IRQCHIP_DECLARE(dm816x_intc, "ti,dm816-intc", intc_of_init);
394IRQCHIP_DECLARE(am33xx_intc, "ti,am33xx-intc", intc_of_init);