Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Synopsys DW APB ICTL irqchip driver.
  3 *
  4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  5 *
  6 * based on GPL'ed 2.6 kernel sources
  7 *  (c) Marvell International Ltd.
  8 *
  9 * This file is licensed under the terms of the GNU General Public
 10 * License version 2.  This program is licensed "as is" without any
 11 * warranty of any kind, whether express or implied.
 12 */
 13
 14#include <linux/io.h>
 15#include <linux/irq.h>
 
 16#include <linux/irqchip/chained_irq.h>
 17#include <linux/of_address.h>
 18#include <linux/of_irq.h>
 19
 20#include "irqchip.h"
 21
 22#define APB_INT_ENABLE_L	0x00
 23#define APB_INT_ENABLE_H	0x04
 24#define APB_INT_MASK_L		0x08
 25#define APB_INT_MASK_H		0x0c
 26#define APB_INT_FINALSTATUS_L	0x30
 27#define APB_INT_FINALSTATUS_H	0x34
 
 28
 29static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
 30{
 31	struct irq_chip *chip = irq_get_chip(irq);
 32	struct irq_chip_generic *gc = irq_get_handler_data(irq);
 33	struct irq_domain *d = gc->private;
 34	u32 stat;
 35	int n;
 36
 37	chained_irq_enter(chip, desc);
 38
 39	for (n = 0; n < gc->num_ct; n++) {
 40		stat = readl_relaxed(gc->reg_base +
 41				     APB_INT_FINALSTATUS_L + 4 * n);
 
 42		while (stat) {
 43			u32 hwirq = ffs(stat) - 1;
 44			generic_handle_irq(irq_find_mapping(d,
 45					    gc->irq_base + hwirq + 32 * n));
 
 46			stat &= ~(1 << hwirq);
 47		}
 48	}
 49
 50	chained_irq_exit(chip, desc);
 51}
 52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53static int __init dw_apb_ictl_init(struct device_node *np,
 54				   struct device_node *parent)
 55{
 56	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 57	struct resource r;
 58	struct irq_domain *domain;
 59	struct irq_chip_generic *gc;
 60	void __iomem *iobase;
 61	int ret, nrirqs, irq;
 62	u32 reg;
 63
 64	/* Map the parent interrupt for the chained handler */
 65	irq = irq_of_parse_and_map(np, 0);
 66	if (irq <= 0) {
 67		pr_err("%s: unable to parse irq\n", np->full_name);
 68		return -EINVAL;
 69	}
 70
 71	ret = of_address_to_resource(np, 0, &r);
 72	if (ret) {
 73		pr_err("%s: unable to get resource\n", np->full_name);
 74		return ret;
 75	}
 76
 77	if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
 78		pr_err("%s: unable to request mem region\n", np->full_name);
 79		return -ENOMEM;
 80	}
 81
 82	iobase = ioremap(r.start, resource_size(&r));
 83	if (!iobase) {
 84		pr_err("%s: unable to map resource\n", np->full_name);
 85		ret = -ENOMEM;
 86		goto err_release;
 87	}
 88
 89	/*
 90	 * DW IP can be configured to allow 2-64 irqs. We can determine
 91	 * the number of irqs supported by writing into enable register
 92	 * and look for bits not set, as corresponding flip-flops will
 93	 * have been removed by sythesis tool.
 94	 */
 95
 96	/* mask and enable all interrupts */
 97	writel(~0, iobase + APB_INT_MASK_L);
 98	writel(~0, iobase + APB_INT_MASK_H);
 99	writel(~0, iobase + APB_INT_ENABLE_L);
100	writel(~0, iobase + APB_INT_ENABLE_H);
101
102	reg = readl(iobase + APB_INT_ENABLE_H);
103	if (reg)
104		nrirqs = 32 + fls(reg);
105	else
106		nrirqs = fls(readl(iobase + APB_INT_ENABLE_L));
107
108	domain = irq_domain_add_linear(np, nrirqs,
109				       &irq_generic_chip_ops, NULL);
110	if (!domain) {
111		pr_err("%s: unable to add irq domain\n", np->full_name);
112		ret = -ENOMEM;
113		goto err_unmap;
114	}
115
116	ret = irq_alloc_domain_generic_chips(domain, 32, (nrirqs > 32) ? 2 : 1,
117					     np->name, handle_level_irq, clr, 0,
118					     IRQ_GC_INIT_MASK_CACHE);
119	if (ret) {
120		pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
121		goto err_unmap;
122	}
123
124	gc = irq_get_domain_generic_chip(domain, 0);
125	gc->private = domain;
126	gc->reg_base = iobase;
127
128	gc->chip_types[0].regs.mask = APB_INT_MASK_L;
129	gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
130	gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
131
132	if (nrirqs > 32) {
133		gc->chip_types[1].regs.mask = APB_INT_MASK_H;
134		gc->chip_types[1].chip.irq_mask = irq_gc_mask_set_bit;
135		gc->chip_types[1].chip.irq_unmask = irq_gc_mask_clr_bit;
136	}
137
138	irq_set_handler_data(irq, gc);
139	irq_set_chained_handler(irq, dw_apb_ictl_handler);
140
141	return 0;
142
143err_unmap:
144	iounmap(iobase);
145err_release:
146	release_mem_region(r.start, resource_size(&r));
147	return ret;
148}
149IRQCHIP_DECLARE(dw_apb_ictl,
150		"snps,dw-apb-ictl", dw_apb_ictl_init);
v4.10.11
  1/*
  2 * Synopsys DW APB ICTL irqchip driver.
  3 *
  4 * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
  5 *
  6 * based on GPL'ed 2.6 kernel sources
  7 *  (c) Marvell International Ltd.
  8 *
  9 * This file is licensed under the terms of the GNU General Public
 10 * License version 2.  This program is licensed "as is" without any
 11 * warranty of any kind, whether express or implied.
 12 */
 13
 14#include <linux/io.h>
 15#include <linux/irq.h>
 16#include <linux/irqchip.h>
 17#include <linux/irqchip/chained_irq.h>
 18#include <linux/of_address.h>
 19#include <linux/of_irq.h>
 20
 
 
 21#define APB_INT_ENABLE_L	0x00
 22#define APB_INT_ENABLE_H	0x04
 23#define APB_INT_MASK_L		0x08
 24#define APB_INT_MASK_H		0x0c
 25#define APB_INT_FINALSTATUS_L	0x30
 26#define APB_INT_FINALSTATUS_H	0x34
 27#define APB_INT_BASE_OFFSET	0x04
 28
 29static void dw_apb_ictl_handler(struct irq_desc *desc)
 30{
 31	struct irq_domain *d = irq_desc_get_handler_data(desc);
 32	struct irq_chip *chip = irq_desc_get_chip(desc);
 
 
 33	int n;
 34
 35	chained_irq_enter(chip, desc);
 36
 37	for (n = 0; n < d->revmap_size; n += 32) {
 38		struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n);
 39		u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L);
 40
 41		while (stat) {
 42			u32 hwirq = ffs(stat) - 1;
 43			u32 virq = irq_find_mapping(d, gc->irq_base + hwirq);
 44
 45			generic_handle_irq(virq);
 46			stat &= ~(1 << hwirq);
 47		}
 48	}
 49
 50	chained_irq_exit(chip, desc);
 51}
 52
 53#ifdef CONFIG_PM
 54static void dw_apb_ictl_resume(struct irq_data *d)
 55{
 56	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 57	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 58
 59	irq_gc_lock(gc);
 60	writel_relaxed(~0, gc->reg_base + ct->regs.enable);
 61	writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask);
 62	irq_gc_unlock(gc);
 63}
 64#else
 65#define dw_apb_ictl_resume	NULL
 66#endif /* CONFIG_PM */
 67
 68static int __init dw_apb_ictl_init(struct device_node *np,
 69				   struct device_node *parent)
 70{
 71	unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
 72	struct resource r;
 73	struct irq_domain *domain;
 74	struct irq_chip_generic *gc;
 75	void __iomem *iobase;
 76	int ret, nrirqs, irq, i;
 77	u32 reg;
 78
 79	/* Map the parent interrupt for the chained handler */
 80	irq = irq_of_parse_and_map(np, 0);
 81	if (irq <= 0) {
 82		pr_err("%s: unable to parse irq\n", np->full_name);
 83		return -EINVAL;
 84	}
 85
 86	ret = of_address_to_resource(np, 0, &r);
 87	if (ret) {
 88		pr_err("%s: unable to get resource\n", np->full_name);
 89		return ret;
 90	}
 91
 92	if (!request_mem_region(r.start, resource_size(&r), np->full_name)) {
 93		pr_err("%s: unable to request mem region\n", np->full_name);
 94		return -ENOMEM;
 95	}
 96
 97	iobase = ioremap(r.start, resource_size(&r));
 98	if (!iobase) {
 99		pr_err("%s: unable to map resource\n", np->full_name);
100		ret = -ENOMEM;
101		goto err_release;
102	}
103
104	/*
105	 * DW IP can be configured to allow 2-64 irqs. We can determine
106	 * the number of irqs supported by writing into enable register
107	 * and look for bits not set, as corresponding flip-flops will
108	 * have been removed by sythesis tool.
109	 */
110
111	/* mask and enable all interrupts */
112	writel_relaxed(~0, iobase + APB_INT_MASK_L);
113	writel_relaxed(~0, iobase + APB_INT_MASK_H);
114	writel_relaxed(~0, iobase + APB_INT_ENABLE_L);
115	writel_relaxed(~0, iobase + APB_INT_ENABLE_H);
116
117	reg = readl_relaxed(iobase + APB_INT_ENABLE_H);
118	if (reg)
119		nrirqs = 32 + fls(reg);
120	else
121		nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L));
122
123	domain = irq_domain_add_linear(np, nrirqs,
124				       &irq_generic_chip_ops, NULL);
125	if (!domain) {
126		pr_err("%s: unable to add irq domain\n", np->full_name);
127		ret = -ENOMEM;
128		goto err_unmap;
129	}
130
131	ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name,
132					     handle_level_irq, clr, 0,
133					     IRQ_GC_INIT_MASK_CACHE);
134	if (ret) {
135		pr_err("%s: unable to alloc irq domain gc\n", np->full_name);
136		goto err_unmap;
137	}
138
139	for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) {
140		gc = irq_get_domain_generic_chip(domain, i * 32);
141		gc->reg_base = iobase + i * APB_INT_BASE_OFFSET;
142		gc->chip_types[0].regs.mask = APB_INT_MASK_L;
143		gc->chip_types[0].regs.enable = APB_INT_ENABLE_L;
144		gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
145		gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
146		gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume;
 
 
 
 
147	}
148
149	irq_set_chained_handler_and_data(irq, dw_apb_ictl_handler, domain);
 
150
151	return 0;
152
153err_unmap:
154	iounmap(iobase);
155err_release:
156	release_mem_region(r.start, resource_size(&r));
157	return ret;
158}
159IRQCHIP_DECLARE(dw_apb_ictl,
160		"snps,dw-apb-ictl", dw_apb_ictl_init);