Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Marvell Orion SoCs IRQ chip driver.
  3 *
  4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#include <linux/io.h>
 12#include <linux/irq.h>
 
 13#include <linux/of.h>
 14#include <linux/of_address.h>
 15#include <linux/of_irq.h>
 16#include <asm/exception.h>
 17#include <asm/mach/irq.h>
 18
 19#include "irqchip.h"
 20
 21/*
 22 * Orion SoC main interrupt controller
 23 */
 24#define ORION_IRQS_PER_CHIP		32
 25
 26#define ORION_IRQ_CAUSE			0x00
 27#define ORION_IRQ_MASK			0x04
 28#define ORION_IRQ_FIQ_MASK		0x08
 29#define ORION_IRQ_ENDP_MASK		0x0c
 30
 31static struct irq_domain *orion_irq_domain;
 32
 33static void
 34__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
 35{
 36	struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
 37	int n, base = 0;
 38
 39	for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
 40		struct irq_chip_generic *gc =
 41			irq_get_domain_generic_chip(orion_irq_domain, base);
 42		u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
 43			gc->mask_cache;
 44		while (stat) {
 45			u32 hwirq = ffs(stat) - 1;
 46			u32 irq = irq_find_mapping(orion_irq_domain,
 47						   gc->irq_base + hwirq);
 48			handle_IRQ(irq, regs);
 49			stat &= ~(1 << hwirq);
 50		}
 51	}
 52}
 53
 54static int __init orion_irq_init(struct device_node *np,
 55				 struct device_node *parent)
 56{
 57	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 58	int n, ret, base, num_chips = 0;
 59	struct resource r;
 60
 61	/* count number of irq chips by valid reg addresses */
 62	while (of_address_to_resource(np, num_chips, &r) == 0)
 63		num_chips++;
 64
 65	orion_irq_domain = irq_domain_add_linear(np,
 66				num_chips * ORION_IRQS_PER_CHIP,
 67				&irq_generic_chip_ops, NULL);
 68	if (!orion_irq_domain)
 69		panic("%s: unable to add irq domain\n", np->name);
 70
 71	ret = irq_alloc_domain_generic_chips(orion_irq_domain,
 72				ORION_IRQS_PER_CHIP, 1, np->name,
 73				handle_level_irq, clr, 0,
 74				IRQ_GC_INIT_MASK_CACHE);
 75	if (ret)
 76		panic("%s: unable to alloc irq domain gc\n", np->name);
 77
 78	for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
 79		struct irq_chip_generic *gc =
 80			irq_get_domain_generic_chip(orion_irq_domain, base);
 81
 82		of_address_to_resource(np, n, &r);
 83
 84		if (!request_mem_region(r.start, resource_size(&r), np->name))
 85			panic("%s: unable to request mem region %d",
 86			      np->name, n);
 87
 88		gc->reg_base = ioremap(r.start, resource_size(&r));
 89		if (!gc->reg_base)
 90			panic("%s: unable to map resource %d", np->name, n);
 91
 92		gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
 93		gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
 94		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
 95
 96		/* mask all interrupts */
 97		writel(0, gc->reg_base + ORION_IRQ_MASK);
 98	}
 99
100	set_handle_irq(orion_handle_irq);
101	return 0;
102}
103IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
104
105/*
106 * Orion SoC bridge interrupt controller
107 */
108#define ORION_BRIDGE_IRQ_CAUSE	0x00
109#define ORION_BRIDGE_IRQ_MASK	0x04
110
111static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
112{
113	struct irq_domain *d = irq_get_handler_data(irq);
114
115	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
116	u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
117		   gc->mask_cache;
118
119	while (stat) {
120		u32 hwirq = ffs(stat) - 1;
121
122		generic_handle_irq(irq_find_mapping(d, gc->irq_base + hwirq));
123		stat &= ~(1 << hwirq);
124	}
125}
126
127/*
128 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
129 * To avoid interrupt events on stale irqs, we clear them before unmask.
130 */
131static unsigned int orion_bridge_irq_startup(struct irq_data *d)
132{
133	struct irq_chip_type *ct = irq_data_get_chip_type(d);
134
135	ct->chip.irq_ack(d);
136	ct->chip.irq_unmask(d);
137	return 0;
138}
139
140static int __init orion_bridge_irq_init(struct device_node *np,
141					struct device_node *parent)
142{
143	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
144	struct resource r;
145	struct irq_domain *domain;
146	struct irq_chip_generic *gc;
147	int ret, irq, nrirqs = 32;
148
149	/* get optional number of interrupts provided */
150	of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
151
152	domain = irq_domain_add_linear(np, nrirqs,
153				       &irq_generic_chip_ops, NULL);
154	if (!domain) {
155		pr_err("%s: unable to add irq domain\n", np->name);
156		return -ENOMEM;
157	}
158
159	ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
160			     handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
161	if (ret) {
162		pr_err("%s: unable to alloc irq domain gc\n", np->name);
163		return ret;
164	}
165
166	ret = of_address_to_resource(np, 0, &r);
167	if (ret) {
168		pr_err("%s: unable to get resource\n", np->name);
169		return ret;
170	}
171
172	if (!request_mem_region(r.start, resource_size(&r), np->name)) {
173		pr_err("%s: unable to request mem region\n", np->name);
174		return -ENOMEM;
175	}
176
177	/* Map the parent interrupt for the chained handler */
178	irq = irq_of_parse_and_map(np, 0);
179	if (irq <= 0) {
180		pr_err("%s: unable to parse irq\n", np->name);
181		return -EINVAL;
182	}
183
184	gc = irq_get_domain_generic_chip(domain, 0);
185	gc->reg_base = ioremap(r.start, resource_size(&r));
186	if (!gc->reg_base) {
187		pr_err("%s: unable to map resource\n", np->name);
188		return -ENOMEM;
189	}
190
191	gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
192	gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
193	gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
194	gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
195	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
196	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
197
198	/* mask and clear all interrupts */
199	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
200	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
201
202	irq_set_handler_data(irq, domain);
203	irq_set_chained_handler(irq, orion_bridge_irq_handler);
204
205	return 0;
206}
207IRQCHIP_DECLARE(orion_bridge_intc,
208		"marvell,orion-bridge-intc", orion_bridge_irq_init);
v6.13.7
  1/*
  2 * Marvell Orion SoCs IRQ chip driver.
  3 *
  4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2.  This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#include <linux/io.h>
 12#include <linux/irq.h>
 13#include <linux/irqchip.h>
 14#include <linux/of.h>
 15#include <linux/of_address.h>
 16#include <linux/of_irq.h>
 17#include <asm/exception.h>
 18#include <asm/mach/irq.h>
 19
 
 
 20/*
 21 * Orion SoC main interrupt controller
 22 */
 23#define ORION_IRQS_PER_CHIP		32
 24
 25#define ORION_IRQ_CAUSE			0x00
 26#define ORION_IRQ_MASK			0x04
 27#define ORION_IRQ_FIQ_MASK		0x08
 28#define ORION_IRQ_ENDP_MASK		0x0c
 29
 30static struct irq_domain *orion_irq_domain;
 31
 32static void
 33__exception_irq_entry orion_handle_irq(struct pt_regs *regs)
 34{
 35	struct irq_domain_chip_generic *dgc = orion_irq_domain->gc;
 36	int n, base = 0;
 37
 38	for (n = 0; n < dgc->num_chips; n++, base += ORION_IRQS_PER_CHIP) {
 39		struct irq_chip_generic *gc =
 40			irq_get_domain_generic_chip(orion_irq_domain, base);
 41		u32 stat = readl_relaxed(gc->reg_base + ORION_IRQ_CAUSE) &
 42			gc->mask_cache;
 43		while (stat) {
 44			u32 hwirq = __fls(stat);
 45			generic_handle_domain_irq(orion_irq_domain,
 46						  gc->irq_base + hwirq);
 
 47			stat &= ~(1 << hwirq);
 48		}
 49	}
 50}
 51
 52static int __init orion_irq_init(struct device_node *np,
 53				 struct device_node *parent)
 54{
 55	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 56	int n, ret, base, num_chips = 0;
 57	struct resource r;
 58
 59	/* count number of irq chips by valid reg addresses */
 60	num_chips = of_address_count(np);
 
 61
 62	orion_irq_domain = irq_domain_add_linear(np,
 63				num_chips * ORION_IRQS_PER_CHIP,
 64				&irq_generic_chip_ops, NULL);
 65	if (!orion_irq_domain)
 66		panic("%pOFn: unable to add irq domain\n", np);
 67
 68	ret = irq_alloc_domain_generic_chips(orion_irq_domain,
 69				ORION_IRQS_PER_CHIP, 1, np->full_name,
 70				handle_level_irq, clr, 0,
 71				IRQ_GC_INIT_MASK_CACHE);
 72	if (ret)
 73		panic("%pOFn: unable to alloc irq domain gc\n", np);
 74
 75	for (n = 0, base = 0; n < num_chips; n++, base += ORION_IRQS_PER_CHIP) {
 76		struct irq_chip_generic *gc =
 77			irq_get_domain_generic_chip(orion_irq_domain, base);
 78
 79		of_address_to_resource(np, n, &r);
 80
 81		if (!request_mem_region(r.start, resource_size(&r), np->name))
 82			panic("%pOFn: unable to request mem region %d",
 83			      np, n);
 84
 85		gc->reg_base = ioremap(r.start, resource_size(&r));
 86		if (!gc->reg_base)
 87			panic("%pOFn: unable to map resource %d", np, n);
 88
 89		gc->chip_types[0].regs.mask = ORION_IRQ_MASK;
 90		gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
 91		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
 92
 93		/* mask all interrupts */
 94		writel(0, gc->reg_base + ORION_IRQ_MASK);
 95	}
 96
 97	set_handle_irq(orion_handle_irq);
 98	return 0;
 99}
100IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
101
102/*
103 * Orion SoC bridge interrupt controller
104 */
105#define ORION_BRIDGE_IRQ_CAUSE	0x00
106#define ORION_BRIDGE_IRQ_MASK	0x04
107
108static void orion_bridge_irq_handler(struct irq_desc *desc)
109{
110	struct irq_domain *d = irq_desc_get_handler_data(desc);
111
112	struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
113	u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
114		   gc->mask_cache;
115
116	while (stat) {
117		u32 hwirq = __fls(stat);
118
119		generic_handle_domain_irq(d, gc->irq_base + hwirq);
120		stat &= ~(1 << hwirq);
121	}
122}
123
124/*
125 * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
126 * To avoid interrupt events on stale irqs, we clear them before unmask.
127 */
128static unsigned int orion_bridge_irq_startup(struct irq_data *d)
129{
130	struct irq_chip_type *ct = irq_data_get_chip_type(d);
131
132	ct->chip.irq_ack(d);
133	ct->chip.irq_unmask(d);
134	return 0;
135}
136
137static int __init orion_bridge_irq_init(struct device_node *np,
138					struct device_node *parent)
139{
140	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
141	struct resource r;
142	struct irq_domain *domain;
143	struct irq_chip_generic *gc;
144	int ret, irq, nrirqs = 32;
145
146	/* get optional number of interrupts provided */
147	of_property_read_u32(np, "marvell,#interrupts", &nrirqs);
148
149	domain = irq_domain_add_linear(np, nrirqs,
150				       &irq_generic_chip_ops, NULL);
151	if (!domain) {
152		pr_err("%pOFn: unable to add irq domain\n", np);
153		return -ENOMEM;
154	}
155
156	ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
157			     handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
158	if (ret) {
159		pr_err("%pOFn: unable to alloc irq domain gc\n", np);
160		return ret;
161	}
162
163	ret = of_address_to_resource(np, 0, &r);
164	if (ret) {
165		pr_err("%pOFn: unable to get resource\n", np);
166		return ret;
167	}
168
169	if (!request_mem_region(r.start, resource_size(&r), np->name)) {
170		pr_err("%s: unable to request mem region\n", np->name);
171		return -ENOMEM;
172	}
173
174	/* Map the parent interrupt for the chained handler */
175	irq = irq_of_parse_and_map(np, 0);
176	if (irq <= 0) {
177		pr_err("%pOFn: unable to parse irq\n", np);
178		return -EINVAL;
179	}
180
181	gc = irq_get_domain_generic_chip(domain, 0);
182	gc->reg_base = ioremap(r.start, resource_size(&r));
183	if (!gc->reg_base) {
184		pr_err("%pOFn: unable to map resource\n", np);
185		return -ENOMEM;
186	}
187
188	gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
189	gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
190	gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
191	gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
192	gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
193	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
194
195	/* mask and clear all interrupts */
196	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
197	writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
198
199	irq_set_chained_handler_and_data(irq, orion_bridge_irq_handler,
200					 domain);
201
202	return 0;
203}
204IRQCHIP_DECLARE(orion_bridge_intc,
205		"marvell,orion-bridge-intc", orion_bridge_irq_init);