Loading...
1/*
2 * Renesas IRQC Driver
3 *
4 * Copyright (C) 2013 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/irq.h>
27#include <linux/irqdomain.h>
28#include <linux/err.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31#include <linux/pm_runtime.h>
32
33#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
34
35#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
36#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
37#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
38#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
39 /* SYS-CPU vs. RT-CPU */
40#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
41#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
42#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
43#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
44#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
45#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
46#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
47#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
48#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
49#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
50 /* IRQn Configuration Register */
51
52struct irqc_irq {
53 int hw_irq;
54 int requested_irq;
55 struct irqc_priv *p;
56};
57
58struct irqc_priv {
59 void __iomem *iomem;
60 void __iomem *cpu_int_base;
61 struct irqc_irq irq[IRQC_IRQ_MAX];
62 unsigned int number_of_irqs;
63 struct platform_device *pdev;
64 struct irq_chip_generic *gc;
65 struct irq_domain *irq_domain;
66 atomic_t wakeup_path;
67};
68
69static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
70{
71 return data->domain->host_data;
72}
73
74static void irqc_dbg(struct irqc_irq *i, char *str)
75{
76 dev_dbg(&i->p->pdev->dev, "%s (%d:%d)\n",
77 str, i->requested_irq, i->hw_irq);
78}
79
80static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
81 [IRQ_TYPE_LEVEL_LOW] = 0x01,
82 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
83 [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
84 [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
85 [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
86};
87
88static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
89{
90 struct irqc_priv *p = irq_data_to_priv(d);
91 int hw_irq = irqd_to_hwirq(d);
92 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
93 u32 tmp;
94
95 irqc_dbg(&p->irq[hw_irq], "sense");
96
97 if (!value)
98 return -EINVAL;
99
100 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
101 tmp &= ~0x3f;
102 tmp |= value;
103 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
104 return 0;
105}
106
107static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
108{
109 struct irqc_priv *p = irq_data_to_priv(d);
110 int hw_irq = irqd_to_hwirq(d);
111
112 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
113 if (on)
114 atomic_inc(&p->wakeup_path);
115 else
116 atomic_dec(&p->wakeup_path);
117
118 return 0;
119}
120
121static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
122{
123 struct irqc_irq *i = dev_id;
124 struct irqc_priv *p = i->p;
125 u32 bit = BIT(i->hw_irq);
126
127 irqc_dbg(i, "demux1");
128
129 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
130 iowrite32(bit, p->iomem + DETECT_STATUS);
131 irqc_dbg(i, "demux2");
132 generic_handle_irq(irq_find_mapping(p->irq_domain, i->hw_irq));
133 return IRQ_HANDLED;
134 }
135 return IRQ_NONE;
136}
137
138static int irqc_probe(struct platform_device *pdev)
139{
140 struct irqc_priv *p;
141 struct resource *io;
142 struct resource *irq;
143 const char *name = dev_name(&pdev->dev);
144 int ret;
145 int k;
146
147 p = kzalloc(sizeof(*p), GFP_KERNEL);
148 if (!p) {
149 dev_err(&pdev->dev, "failed to allocate driver data\n");
150 ret = -ENOMEM;
151 goto err0;
152 }
153
154 p->pdev = pdev;
155 platform_set_drvdata(pdev, p);
156
157 pm_runtime_enable(&pdev->dev);
158 pm_runtime_get_sync(&pdev->dev);
159
160 /* get hold of manadatory IOMEM */
161 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
162 if (!io) {
163 dev_err(&pdev->dev, "not enough IOMEM resources\n");
164 ret = -EINVAL;
165 goto err1;
166 }
167
168 /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
169 for (k = 0; k < IRQC_IRQ_MAX; k++) {
170 irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
171 if (!irq)
172 break;
173
174 p->irq[k].p = p;
175 p->irq[k].hw_irq = k;
176 p->irq[k].requested_irq = irq->start;
177 }
178
179 p->number_of_irqs = k;
180 if (p->number_of_irqs < 1) {
181 dev_err(&pdev->dev, "not enough IRQ resources\n");
182 ret = -EINVAL;
183 goto err1;
184 }
185
186 /* ioremap IOMEM and setup read/write callbacks */
187 p->iomem = ioremap_nocache(io->start, resource_size(io));
188 if (!p->iomem) {
189 dev_err(&pdev->dev, "failed to remap IOMEM\n");
190 ret = -ENXIO;
191 goto err2;
192 }
193
194 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
195
196 p->irq_domain = irq_domain_add_linear(pdev->dev.of_node,
197 p->number_of_irqs,
198 &irq_generic_chip_ops, p);
199 if (!p->irq_domain) {
200 ret = -ENXIO;
201 dev_err(&pdev->dev, "cannot initialize irq domain\n");
202 goto err2;
203 }
204
205 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
206 1, name, handle_level_irq,
207 0, 0, IRQ_GC_INIT_NESTED_LOCK);
208 if (ret) {
209 dev_err(&pdev->dev, "cannot allocate generic chip\n");
210 goto err3;
211 }
212
213 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
214 p->gc->reg_base = p->cpu_int_base;
215 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
216 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
217 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
218 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
219 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
220 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
221 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
222
223 /* request interrupts one by one */
224 for (k = 0; k < p->number_of_irqs; k++) {
225 if (request_irq(p->irq[k].requested_irq, irqc_irq_handler,
226 0, name, &p->irq[k])) {
227 dev_err(&pdev->dev, "failed to request IRQ\n");
228 ret = -ENOENT;
229 goto err4;
230 }
231 }
232
233 dev_info(&pdev->dev, "driving %d irqs\n", p->number_of_irqs);
234
235 return 0;
236err4:
237 while (--k >= 0)
238 free_irq(p->irq[k].requested_irq, &p->irq[k]);
239
240err3:
241 irq_domain_remove(p->irq_domain);
242err2:
243 iounmap(p->iomem);
244err1:
245 pm_runtime_put(&pdev->dev);
246 pm_runtime_disable(&pdev->dev);
247 kfree(p);
248err0:
249 return ret;
250}
251
252static int irqc_remove(struct platform_device *pdev)
253{
254 struct irqc_priv *p = platform_get_drvdata(pdev);
255 int k;
256
257 for (k = 0; k < p->number_of_irqs; k++)
258 free_irq(p->irq[k].requested_irq, &p->irq[k]);
259
260 irq_domain_remove(p->irq_domain);
261 iounmap(p->iomem);
262 pm_runtime_put(&pdev->dev);
263 pm_runtime_disable(&pdev->dev);
264 kfree(p);
265 return 0;
266}
267
268static int __maybe_unused irqc_suspend(struct device *dev)
269{
270 struct irqc_priv *p = dev_get_drvdata(dev);
271
272 if (atomic_read(&p->wakeup_path))
273 device_set_wakeup_path(dev);
274
275 return 0;
276}
277
278static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
279
280static const struct of_device_id irqc_dt_ids[] = {
281 { .compatible = "renesas,irqc", },
282 {},
283};
284MODULE_DEVICE_TABLE(of, irqc_dt_ids);
285
286static struct platform_driver irqc_device_driver = {
287 .probe = irqc_probe,
288 .remove = irqc_remove,
289 .driver = {
290 .name = "renesas_irqc",
291 .of_match_table = irqc_dt_ids,
292 .pm = &irqc_pm_ops,
293 }
294};
295
296static int __init irqc_init(void)
297{
298 return platform_driver_register(&irqc_device_driver);
299}
300postcore_initcall(irqc_init);
301
302static void __exit irqc_exit(void)
303{
304 platform_driver_unregister(&irqc_device_driver);
305}
306module_exit(irqc_exit);
307
308MODULE_AUTHOR("Magnus Damm");
309MODULE_DESCRIPTION("Renesas IRQC Driver");
310MODULE_LICENSE("GPL v2");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Renesas IRQC Driver
4 *
5 * Copyright (C) 2013 Magnus Damm
6 */
7
8#include <linux/init.h>
9#include <linux/platform_device.h>
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12#include <linux/io.h>
13#include <linux/irq.h>
14#include <linux/irqdomain.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/module.h>
18#include <linux/pm_runtime.h>
19
20#define IRQC_IRQ_MAX 32 /* maximum 32 interrupts per driver instance */
21
22#define IRQC_REQ_STS 0x00 /* Interrupt Request Status Register */
23#define IRQC_EN_STS 0x04 /* Interrupt Enable Status Register */
24#define IRQC_EN_SET 0x08 /* Interrupt Enable Set Register */
25#define IRQC_INT_CPU_BASE(n) (0x000 + ((n) * 0x10))
26 /* SYS-CPU vs. RT-CPU */
27#define DETECT_STATUS 0x100 /* IRQn Detect Status Register */
28#define MONITOR 0x104 /* IRQn Signal Level Monitor Register */
29#define HLVL_STS 0x108 /* IRQn High Level Detect Status Register */
30#define LLVL_STS 0x10c /* IRQn Low Level Detect Status Register */
31#define S_R_EDGE_STS 0x110 /* IRQn Sync Rising Edge Detect Status Reg. */
32#define S_F_EDGE_STS 0x114 /* IRQn Sync Falling Edge Detect Status Reg. */
33#define A_R_EDGE_STS 0x118 /* IRQn Async Rising Edge Detect Status Reg. */
34#define A_F_EDGE_STS 0x11c /* IRQn Async Falling Edge Detect Status Reg. */
35#define CHTEN_STS 0x120 /* Chattering Reduction Status Register */
36#define IRQC_CONFIG(n) (0x180 + ((n) * 0x04))
37 /* IRQn Configuration Register */
38
39struct irqc_irq {
40 int hw_irq;
41 int requested_irq;
42 struct irqc_priv *p;
43};
44
45struct irqc_priv {
46 void __iomem *iomem;
47 void __iomem *cpu_int_base;
48 struct irqc_irq irq[IRQC_IRQ_MAX];
49 unsigned int number_of_irqs;
50 struct device *dev;
51 struct irq_chip_generic *gc;
52 struct irq_domain *irq_domain;
53 atomic_t wakeup_path;
54};
55
56static struct irqc_priv *irq_data_to_priv(struct irq_data *data)
57{
58 return data->domain->host_data;
59}
60
61static void irqc_dbg(struct irqc_irq *i, char *str)
62{
63 dev_dbg(i->p->dev, "%s (%d:%d)\n", str, i->requested_irq, i->hw_irq);
64}
65
66static unsigned char irqc_sense[IRQ_TYPE_SENSE_MASK + 1] = {
67 [IRQ_TYPE_LEVEL_LOW] = 0x01,
68 [IRQ_TYPE_LEVEL_HIGH] = 0x02,
69 [IRQ_TYPE_EDGE_FALLING] = 0x04, /* Synchronous */
70 [IRQ_TYPE_EDGE_RISING] = 0x08, /* Synchronous */
71 [IRQ_TYPE_EDGE_BOTH] = 0x0c, /* Synchronous */
72};
73
74static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
75{
76 struct irqc_priv *p = irq_data_to_priv(d);
77 int hw_irq = irqd_to_hwirq(d);
78 unsigned char value = irqc_sense[type & IRQ_TYPE_SENSE_MASK];
79 u32 tmp;
80
81 irqc_dbg(&p->irq[hw_irq], "sense");
82
83 if (!value)
84 return -EINVAL;
85
86 tmp = ioread32(p->iomem + IRQC_CONFIG(hw_irq));
87 tmp &= ~0x3f;
88 tmp |= value;
89 iowrite32(tmp, p->iomem + IRQC_CONFIG(hw_irq));
90 return 0;
91}
92
93static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
94{
95 struct irqc_priv *p = irq_data_to_priv(d);
96 int hw_irq = irqd_to_hwirq(d);
97
98 irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
99 if (on)
100 atomic_inc(&p->wakeup_path);
101 else
102 atomic_dec(&p->wakeup_path);
103
104 return 0;
105}
106
107static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
108{
109 struct irqc_irq *i = dev_id;
110 struct irqc_priv *p = i->p;
111 u32 bit = BIT(i->hw_irq);
112
113 irqc_dbg(i, "demux1");
114
115 if (ioread32(p->iomem + DETECT_STATUS) & bit) {
116 iowrite32(bit, p->iomem + DETECT_STATUS);
117 irqc_dbg(i, "demux2");
118 generic_handle_domain_irq(p->irq_domain, i->hw_irq);
119 return IRQ_HANDLED;
120 }
121 return IRQ_NONE;
122}
123
124static int irqc_probe(struct platform_device *pdev)
125{
126 struct device *dev = &pdev->dev;
127 const char *name = dev_name(dev);
128 struct irqc_priv *p;
129 int ret;
130 int k;
131
132 p = devm_kzalloc(dev, sizeof(*p), GFP_KERNEL);
133 if (!p)
134 return -ENOMEM;
135
136 p->dev = dev;
137 platform_set_drvdata(pdev, p);
138
139 pm_runtime_enable(dev);
140 pm_runtime_get_sync(dev);
141
142 /* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
143 for (k = 0; k < IRQC_IRQ_MAX; k++) {
144 ret = platform_get_irq_optional(pdev, k);
145 if (ret == -ENXIO)
146 break;
147 if (ret < 0)
148 goto err_runtime_pm_disable;
149
150 p->irq[k].p = p;
151 p->irq[k].hw_irq = k;
152 p->irq[k].requested_irq = ret;
153 }
154
155 p->number_of_irqs = k;
156 if (p->number_of_irqs < 1) {
157 dev_err(dev, "not enough IRQ resources\n");
158 ret = -EINVAL;
159 goto err_runtime_pm_disable;
160 }
161
162 /* ioremap IOMEM and setup read/write callbacks */
163 p->iomem = devm_platform_ioremap_resource(pdev, 0);
164 if (IS_ERR(p->iomem)) {
165 ret = PTR_ERR(p->iomem);
166 goto err_runtime_pm_disable;
167 }
168
169 p->cpu_int_base = p->iomem + IRQC_INT_CPU_BASE(0); /* SYS-SPI */
170
171 p->irq_domain = irq_domain_add_linear(dev->of_node, p->number_of_irqs,
172 &irq_generic_chip_ops, p);
173 if (!p->irq_domain) {
174 ret = -ENXIO;
175 dev_err(dev, "cannot initialize irq domain\n");
176 goto err_runtime_pm_disable;
177 }
178
179 ret = irq_alloc_domain_generic_chips(p->irq_domain, p->number_of_irqs,
180 1, "irqc", handle_level_irq,
181 0, 0, IRQ_GC_INIT_NESTED_LOCK);
182 if (ret) {
183 dev_err(dev, "cannot allocate generic chip\n");
184 goto err_remove_domain;
185 }
186
187 p->gc = irq_get_domain_generic_chip(p->irq_domain, 0);
188 p->gc->reg_base = p->cpu_int_base;
189 p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
190 p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
191 p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
192 p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
193 p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
194 p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
195 p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
196
197 irq_domain_set_pm_device(p->irq_domain, dev);
198
199 /* request interrupts one by one */
200 for (k = 0; k < p->number_of_irqs; k++) {
201 if (devm_request_irq(dev, p->irq[k].requested_irq,
202 irqc_irq_handler, 0, name, &p->irq[k])) {
203 dev_err(dev, "failed to request IRQ\n");
204 ret = -ENOENT;
205 goto err_remove_domain;
206 }
207 }
208
209 dev_info(dev, "driving %d irqs\n", p->number_of_irqs);
210
211 return 0;
212
213err_remove_domain:
214 irq_domain_remove(p->irq_domain);
215err_runtime_pm_disable:
216 pm_runtime_put(dev);
217 pm_runtime_disable(dev);
218 return ret;
219}
220
221static int irqc_remove(struct platform_device *pdev)
222{
223 struct irqc_priv *p = platform_get_drvdata(pdev);
224
225 irq_domain_remove(p->irq_domain);
226 pm_runtime_put(&pdev->dev);
227 pm_runtime_disable(&pdev->dev);
228 return 0;
229}
230
231static int __maybe_unused irqc_suspend(struct device *dev)
232{
233 struct irqc_priv *p = dev_get_drvdata(dev);
234
235 if (atomic_read(&p->wakeup_path))
236 device_set_wakeup_path(dev);
237
238 return 0;
239}
240
241static SIMPLE_DEV_PM_OPS(irqc_pm_ops, irqc_suspend, NULL);
242
243static const struct of_device_id irqc_dt_ids[] = {
244 { .compatible = "renesas,irqc", },
245 {},
246};
247MODULE_DEVICE_TABLE(of, irqc_dt_ids);
248
249static struct platform_driver irqc_device_driver = {
250 .probe = irqc_probe,
251 .remove = irqc_remove,
252 .driver = {
253 .name = "renesas_irqc",
254 .of_match_table = irqc_dt_ids,
255 .pm = &irqc_pm_ops,
256 }
257};
258
259static int __init irqc_init(void)
260{
261 return platform_driver_register(&irqc_device_driver);
262}
263postcore_initcall(irqc_init);
264
265static void __exit irqc_exit(void)
266{
267 platform_driver_unregister(&irqc_device_driver);
268}
269module_exit(irqc_exit);
270
271MODULE_AUTHOR("Magnus Damm");
272MODULE_DESCRIPTION("Renesas IRQC Driver");