Loading...
1/*
2 * Library implementing the most common irq chip callback functions
3 *
4 * Copyright (C) 2011, Thomas Gleixner
5 */
6#include <linux/io.h>
7#include <linux/irq.h>
8#include <linux/slab.h>
9#include <linux/interrupt.h>
10#include <linux/kernel_stat.h>
11#include <linux/syscore_ops.h>
12
13#include "internals.h"
14
15static LIST_HEAD(gc_list);
16static DEFINE_RAW_SPINLOCK(gc_lock);
17
18static inline struct irq_chip_regs *cur_regs(struct irq_data *d)
19{
20 return &container_of(d->chip, struct irq_chip_type, chip)->regs;
21}
22
23/**
24 * irq_gc_noop - NOOP function
25 * @d: irq_data
26 */
27void irq_gc_noop(struct irq_data *d)
28{
29}
30
31/**
32 * irq_gc_mask_disable_reg - Mask chip via disable register
33 * @d: irq_data
34 *
35 * Chip has separate enable/disable registers instead of a single mask
36 * register.
37 */
38void irq_gc_mask_disable_reg(struct irq_data *d)
39{
40 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
41 u32 mask = 1 << (d->irq - gc->irq_base);
42
43 irq_gc_lock(gc);
44 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->disable);
45 gc->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
47}
48
49/**
50 * irq_gc_mask_set_mask_bit - Mask chip via setting bit in mask register
51 * @d: irq_data
52 *
53 * Chip has a single mask register. Values of this register are cached
54 * and protected by gc->lock
55 */
56void irq_gc_mask_set_bit(struct irq_data *d)
57{
58 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
59 u32 mask = 1 << (d->irq - gc->irq_base);
60
61 irq_gc_lock(gc);
62 gc->mask_cache |= mask;
63 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
64 irq_gc_unlock(gc);
65}
66
67/**
68 * irq_gc_mask_set_mask_bit - Mask chip via clearing bit in mask register
69 * @d: irq_data
70 *
71 * Chip has a single mask register. Values of this register are cached
72 * and protected by gc->lock
73 */
74void irq_gc_mask_clr_bit(struct irq_data *d)
75{
76 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
77 u32 mask = 1 << (d->irq - gc->irq_base);
78
79 irq_gc_lock(gc);
80 gc->mask_cache &= ~mask;
81 irq_reg_writel(gc->mask_cache, gc->reg_base + cur_regs(d)->mask);
82 irq_gc_unlock(gc);
83}
84
85/**
86 * irq_gc_unmask_enable_reg - Unmask chip via enable register
87 * @d: irq_data
88 *
89 * Chip has separate enable/disable registers instead of a single mask
90 * register.
91 */
92void irq_gc_unmask_enable_reg(struct irq_data *d)
93{
94 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
95 u32 mask = 1 << (d->irq - gc->irq_base);
96
97 irq_gc_lock(gc);
98 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->enable);
99 gc->mask_cache |= mask;
100 irq_gc_unlock(gc);
101}
102
103/**
104 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
105 * @d: irq_data
106 */
107void irq_gc_ack_set_bit(struct irq_data *d)
108{
109 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
110 u32 mask = 1 << (d->irq - gc->irq_base);
111
112 irq_gc_lock(gc);
113 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
114 irq_gc_unlock(gc);
115}
116
117/**
118 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
119 * @d: irq_data
120 */
121void irq_gc_ack_clr_bit(struct irq_data *d)
122{
123 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
124 u32 mask = ~(1 << (d->irq - gc->irq_base));
125
126 irq_gc_lock(gc);
127 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
128 irq_gc_unlock(gc);
129}
130
131/**
132 * irq_gc_mask_disable_reg_and_ack- Mask and ack pending interrupt
133 * @d: irq_data
134 */
135void irq_gc_mask_disable_reg_and_ack(struct irq_data *d)
136{
137 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
138 u32 mask = 1 << (d->irq - gc->irq_base);
139
140 irq_gc_lock(gc);
141 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->mask);
142 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->ack);
143 irq_gc_unlock(gc);
144}
145
146/**
147 * irq_gc_eoi - EOI interrupt
148 * @d: irq_data
149 */
150void irq_gc_eoi(struct irq_data *d)
151{
152 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
153 u32 mask = 1 << (d->irq - gc->irq_base);
154
155 irq_gc_lock(gc);
156 irq_reg_writel(mask, gc->reg_base + cur_regs(d)->eoi);
157 irq_gc_unlock(gc);
158}
159
160/**
161 * irq_gc_set_wake - Set/clr wake bit for an interrupt
162 * @d: irq_data
163 *
164 * For chips where the wake from suspend functionality is not
165 * configured in a separate register and the wakeup active state is
166 * just stored in a bitmask.
167 */
168int irq_gc_set_wake(struct irq_data *d, unsigned int on)
169{
170 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
171 u32 mask = 1 << (d->irq - gc->irq_base);
172
173 if (!(mask & gc->wake_enabled))
174 return -EINVAL;
175
176 irq_gc_lock(gc);
177 if (on)
178 gc->wake_active |= mask;
179 else
180 gc->wake_active &= ~mask;
181 irq_gc_unlock(gc);
182 return 0;
183}
184
185/**
186 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
187 * @name: Name of the irq chip
188 * @num_ct: Number of irq_chip_type instances associated with this
189 * @irq_base: Interrupt base nr for this chip
190 * @reg_base: Register base address (virtual)
191 * @handler: Default flow handler associated with this chip
192 *
193 * Returns an initialized irq_chip_generic structure. The chip defaults
194 * to the primary (index 0) irq_chip_type and @handler
195 */
196struct irq_chip_generic *
197irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
198 void __iomem *reg_base, irq_flow_handler_t handler)
199{
200 struct irq_chip_generic *gc;
201 unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
202
203 gc = kzalloc(sz, GFP_KERNEL);
204 if (gc) {
205 raw_spin_lock_init(&gc->lock);
206 gc->num_ct = num_ct;
207 gc->irq_base = irq_base;
208 gc->reg_base = reg_base;
209 gc->chip_types->chip.name = name;
210 gc->chip_types->handler = handler;
211 }
212 return gc;
213}
214
215/*
216 * Separate lockdep class for interrupt chip which can nest irq_desc
217 * lock.
218 */
219static struct lock_class_key irq_nested_lock_class;
220
221/**
222 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
223 * @gc: Generic irq chip holding all data
224 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
225 * @flags: Flags for initialization
226 * @clr: IRQ_* bits to clear
227 * @set: IRQ_* bits to set
228 *
229 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
230 * initializes all interrupts to the primary irq_chip_type and its
231 * associated handler.
232 */
233void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
234 enum irq_gc_flags flags, unsigned int clr,
235 unsigned int set)
236{
237 struct irq_chip_type *ct = gc->chip_types;
238 unsigned int i;
239
240 raw_spin_lock(&gc_lock);
241 list_add_tail(&gc->list, &gc_list);
242 raw_spin_unlock(&gc_lock);
243
244 /* Init mask cache ? */
245 if (flags & IRQ_GC_INIT_MASK_CACHE)
246 gc->mask_cache = irq_reg_readl(gc->reg_base + ct->regs.mask);
247
248 for (i = gc->irq_base; msk; msk >>= 1, i++) {
249 if (!(msk & 0x01))
250 continue;
251
252 if (flags & IRQ_GC_INIT_NESTED_LOCK)
253 irq_set_lockdep_class(i, &irq_nested_lock_class);
254
255 irq_set_chip_and_handler(i, &ct->chip, ct->handler);
256 irq_set_chip_data(i, gc);
257 irq_modify_status(i, clr, set);
258 }
259 gc->irq_cnt = i - gc->irq_base;
260}
261
262/**
263 * irq_setup_alt_chip - Switch to alternative chip
264 * @d: irq_data for this interrupt
265 * @type Flow type to be initialized
266 *
267 * Only to be called from chip->irq_set_type() callbacks.
268 */
269int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
270{
271 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
272 struct irq_chip_type *ct = gc->chip_types;
273 unsigned int i;
274
275 for (i = 0; i < gc->num_ct; i++, ct++) {
276 if (ct->type & type) {
277 d->chip = &ct->chip;
278 irq_data_to_desc(d)->handle_irq = ct->handler;
279 return 0;
280 }
281 }
282 return -EINVAL;
283}
284
285/**
286 * irq_remove_generic_chip - Remove a chip
287 * @gc: Generic irq chip holding all data
288 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
289 * @clr: IRQ_* bits to clear
290 * @set: IRQ_* bits to set
291 *
292 * Remove up to 32 interrupts starting from gc->irq_base.
293 */
294void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
295 unsigned int clr, unsigned int set)
296{
297 unsigned int i = gc->irq_base;
298
299 raw_spin_lock(&gc_lock);
300 list_del(&gc->list);
301 raw_spin_unlock(&gc_lock);
302
303 for (; msk; msk >>= 1, i++) {
304 if (!(msk & 0x01))
305 continue;
306
307 /* Remove handler first. That will mask the irq line */
308 irq_set_handler(i, NULL);
309 irq_set_chip(i, &no_irq_chip);
310 irq_set_chip_data(i, NULL);
311 irq_modify_status(i, clr, set);
312 }
313}
314
315#ifdef CONFIG_PM
316static int irq_gc_suspend(void)
317{
318 struct irq_chip_generic *gc;
319
320 list_for_each_entry(gc, &gc_list, list) {
321 struct irq_chip_type *ct = gc->chip_types;
322
323 if (ct->chip.irq_suspend)
324 ct->chip.irq_suspend(irq_get_irq_data(gc->irq_base));
325 }
326 return 0;
327}
328
329static void irq_gc_resume(void)
330{
331 struct irq_chip_generic *gc;
332
333 list_for_each_entry(gc, &gc_list, list) {
334 struct irq_chip_type *ct = gc->chip_types;
335
336 if (ct->chip.irq_resume)
337 ct->chip.irq_resume(irq_get_irq_data(gc->irq_base));
338 }
339}
340#else
341#define irq_gc_suspend NULL
342#define irq_gc_resume NULL
343#endif
344
345static void irq_gc_shutdown(void)
346{
347 struct irq_chip_generic *gc;
348
349 list_for_each_entry(gc, &gc_list, list) {
350 struct irq_chip_type *ct = gc->chip_types;
351
352 if (ct->chip.irq_pm_shutdown)
353 ct->chip.irq_pm_shutdown(irq_get_irq_data(gc->irq_base));
354 }
355}
356
357static struct syscore_ops irq_gc_syscore_ops = {
358 .suspend = irq_gc_suspend,
359 .resume = irq_gc_resume,
360 .shutdown = irq_gc_shutdown,
361};
362
363static int __init irq_gc_init_ops(void)
364{
365 register_syscore_ops(&irq_gc_syscore_ops);
366 return 0;
367}
368device_initcall(irq_gc_init_ops);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Library implementing the most common irq chip callback functions
4 *
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7#include <linux/io.h>
8#include <linux/irq.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/irqdomain.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/syscore_ops.h>
15
16#include "internals.h"
17
18static LIST_HEAD(gc_list);
19static DEFINE_RAW_SPINLOCK(gc_lock);
20
21/**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
24 */
25void irq_gc_noop(struct irq_data *d)
26{
27}
28EXPORT_SYMBOL_GPL(irq_gc_noop);
29
30/**
31 * irq_gc_mask_disable_reg - Mask chip via disable register
32 * @d: irq_data
33 *
34 * Chip has separate enable/disable registers instead of a single mask
35 * register.
36 */
37void irq_gc_mask_disable_reg(struct irq_data *d)
38{
39 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
40 struct irq_chip_type *ct = irq_data_get_chip_type(d);
41 u32 mask = d->mask;
42
43 irq_gc_lock(gc);
44 irq_reg_writel(gc, mask, ct->regs.disable);
45 *ct->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
47}
48EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
49
50/**
51 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 * @d: irq_data
53 *
54 * Chip has a single mask register. Values of this register are cached
55 * and protected by gc->lock
56 */
57void irq_gc_mask_set_bit(struct irq_data *d)
58{
59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60 struct irq_chip_type *ct = irq_data_get_chip_type(d);
61 u32 mask = d->mask;
62
63 irq_gc_lock(gc);
64 *ct->mask_cache |= mask;
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
66 irq_gc_unlock(gc);
67}
68EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
69
70/**
71 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
72 * @d: irq_data
73 *
74 * Chip has a single mask register. Values of this register are cached
75 * and protected by gc->lock
76 */
77void irq_gc_mask_clr_bit(struct irq_data *d)
78{
79 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
80 struct irq_chip_type *ct = irq_data_get_chip_type(d);
81 u32 mask = d->mask;
82
83 irq_gc_lock(gc);
84 *ct->mask_cache &= ~mask;
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
86 irq_gc_unlock(gc);
87}
88EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
89
90/**
91 * irq_gc_unmask_enable_reg - Unmask chip via enable register
92 * @d: irq_data
93 *
94 * Chip has separate enable/disable registers instead of a single mask
95 * register.
96 */
97void irq_gc_unmask_enable_reg(struct irq_data *d)
98{
99 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
100 struct irq_chip_type *ct = irq_data_get_chip_type(d);
101 u32 mask = d->mask;
102
103 irq_gc_lock(gc);
104 irq_reg_writel(gc, mask, ct->regs.enable);
105 *ct->mask_cache |= mask;
106 irq_gc_unlock(gc);
107}
108EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
109
110/**
111 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
112 * @d: irq_data
113 */
114void irq_gc_ack_set_bit(struct irq_data *d)
115{
116 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
117 struct irq_chip_type *ct = irq_data_get_chip_type(d);
118 u32 mask = d->mask;
119
120 irq_gc_lock(gc);
121 irq_reg_writel(gc, mask, ct->regs.ack);
122 irq_gc_unlock(gc);
123}
124EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
125
126/**
127 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
128 * @d: irq_data
129 */
130void irq_gc_ack_clr_bit(struct irq_data *d)
131{
132 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134 u32 mask = ~d->mask;
135
136 irq_gc_lock(gc);
137 irq_reg_writel(gc, mask, ct->regs.ack);
138 irq_gc_unlock(gc);
139}
140
141/**
142 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
143 * @d: irq_data
144 *
145 * This generic implementation of the irq_mask_ack method is for chips
146 * with separate enable/disable registers instead of a single mask
147 * register and where a pending interrupt is acknowledged by setting a
148 * bit.
149 *
150 * Note: This is the only permutation currently used. Similar generic
151 * functions should be added here if other permutations are required.
152 */
153void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
154{
155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156 struct irq_chip_type *ct = irq_data_get_chip_type(d);
157 u32 mask = d->mask;
158
159 irq_gc_lock(gc);
160 irq_reg_writel(gc, mask, ct->regs.disable);
161 *ct->mask_cache &= ~mask;
162 irq_reg_writel(gc, mask, ct->regs.ack);
163 irq_gc_unlock(gc);
164}
165
166/**
167 * irq_gc_eoi - EOI interrupt
168 * @d: irq_data
169 */
170void irq_gc_eoi(struct irq_data *d)
171{
172 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
173 struct irq_chip_type *ct = irq_data_get_chip_type(d);
174 u32 mask = d->mask;
175
176 irq_gc_lock(gc);
177 irq_reg_writel(gc, mask, ct->regs.eoi);
178 irq_gc_unlock(gc);
179}
180
181/**
182 * irq_gc_set_wake - Set/clr wake bit for an interrupt
183 * @d: irq_data
184 * @on: Indicates whether the wake bit should be set or cleared
185 *
186 * For chips where the wake from suspend functionality is not
187 * configured in a separate register and the wakeup active state is
188 * just stored in a bitmask.
189 */
190int irq_gc_set_wake(struct irq_data *d, unsigned int on)
191{
192 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
193 u32 mask = d->mask;
194
195 if (!(mask & gc->wake_enabled))
196 return -EINVAL;
197
198 irq_gc_lock(gc);
199 if (on)
200 gc->wake_active |= mask;
201 else
202 gc->wake_active &= ~mask;
203 irq_gc_unlock(gc);
204 return 0;
205}
206EXPORT_SYMBOL_GPL(irq_gc_set_wake);
207
208static u32 irq_readl_be(void __iomem *addr)
209{
210 return ioread32be(addr);
211}
212
213static void irq_writel_be(u32 val, void __iomem *addr)
214{
215 iowrite32be(val, addr);
216}
217
218void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
219 int num_ct, unsigned int irq_base,
220 void __iomem *reg_base, irq_flow_handler_t handler)
221{
222 struct irq_chip_type *ct = gc->chip_types;
223 int i;
224
225 raw_spin_lock_init(&gc->lock);
226 gc->num_ct = num_ct;
227 gc->irq_base = irq_base;
228 gc->reg_base = reg_base;
229 for (i = 0; i < num_ct; i++)
230 ct[i].chip.name = name;
231 gc->chip_types->handler = handler;
232}
233
234/**
235 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
236 * @name: Name of the irq chip
237 * @num_ct: Number of irq_chip_type instances associated with this
238 * @irq_base: Interrupt base nr for this chip
239 * @reg_base: Register base address (virtual)
240 * @handler: Default flow handler associated with this chip
241 *
242 * Returns an initialized irq_chip_generic structure. The chip defaults
243 * to the primary (index 0) irq_chip_type and @handler
244 */
245struct irq_chip_generic *
246irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
247 void __iomem *reg_base, irq_flow_handler_t handler)
248{
249 struct irq_chip_generic *gc;
250
251 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
252 if (gc) {
253 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
254 handler);
255 }
256 return gc;
257}
258EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
259
260static void
261irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
262{
263 struct irq_chip_type *ct = gc->chip_types;
264 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
265 int i;
266
267 for (i = 0; i < gc->num_ct; i++) {
268 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
269 mskptr = &ct[i].mask_cache_priv;
270 mskreg = ct[i].regs.mask;
271 }
272 ct[i].mask_cache = mskptr;
273 if (flags & IRQ_GC_INIT_MASK_CACHE)
274 *mskptr = irq_reg_readl(gc, mskreg);
275 }
276}
277
278/**
279 * irq_domain_alloc_generic_chips - Allocate generic chips for an irq domain
280 * @d: irq domain for which to allocate chips
281 * @info: Generic chip information
282 *
283 * Return: 0 on success, negative error code on failure
284 */
285int irq_domain_alloc_generic_chips(struct irq_domain *d,
286 const struct irq_domain_chip_generic_info *info)
287{
288 struct irq_domain_chip_generic *dgc;
289 struct irq_chip_generic *gc;
290 unsigned long flags;
291 int numchips, i;
292 size_t dgc_sz;
293 size_t gc_sz;
294 size_t sz;
295 void *tmp;
296 int ret;
297
298 if (d->gc)
299 return -EBUSY;
300
301 numchips = DIV_ROUND_UP(d->revmap_size, info->irqs_per_chip);
302 if (!numchips)
303 return -EINVAL;
304
305 /* Allocate a pointer, generic chip and chiptypes for each chip */
306 gc_sz = struct_size(gc, chip_types, info->num_ct);
307 dgc_sz = struct_size(dgc, gc, numchips);
308 sz = dgc_sz + numchips * gc_sz;
309
310 tmp = dgc = kzalloc(sz, GFP_KERNEL);
311 if (!dgc)
312 return -ENOMEM;
313 dgc->irqs_per_chip = info->irqs_per_chip;
314 dgc->num_chips = numchips;
315 dgc->irq_flags_to_set = info->irq_flags_to_set;
316 dgc->irq_flags_to_clear = info->irq_flags_to_clear;
317 dgc->gc_flags = info->gc_flags;
318 dgc->exit = info->exit;
319 d->gc = dgc;
320
321 /* Calc pointer to the first generic chip */
322 tmp += dgc_sz;
323 for (i = 0; i < numchips; i++) {
324 /* Store the pointer to the generic chip */
325 dgc->gc[i] = gc = tmp;
326 irq_init_generic_chip(gc, info->name, info->num_ct,
327 i * dgc->irqs_per_chip, NULL,
328 info->handler);
329
330 gc->domain = d;
331 if (dgc->gc_flags & IRQ_GC_BE_IO) {
332 gc->reg_readl = &irq_readl_be;
333 gc->reg_writel = &irq_writel_be;
334 }
335
336 if (info->init) {
337 ret = info->init(gc);
338 if (ret)
339 goto err;
340 }
341
342 raw_spin_lock_irqsave(&gc_lock, flags);
343 list_add_tail(&gc->list, &gc_list);
344 raw_spin_unlock_irqrestore(&gc_lock, flags);
345 /* Calc pointer to the next generic chip */
346 tmp += gc_sz;
347 }
348 return 0;
349
350err:
351 while (i--) {
352 if (dgc->exit)
353 dgc->exit(dgc->gc[i]);
354 irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
355 }
356 d->gc = NULL;
357 kfree(dgc);
358 return ret;
359}
360EXPORT_SYMBOL_GPL(irq_domain_alloc_generic_chips);
361
362/**
363 * irq_domain_remove_generic_chips - Remove generic chips from an irq domain
364 * @d: irq domain for which generic chips are to be removed
365 */
366void irq_domain_remove_generic_chips(struct irq_domain *d)
367{
368 struct irq_domain_chip_generic *dgc = d->gc;
369 unsigned int i;
370
371 if (!dgc)
372 return;
373
374 for (i = 0; i < dgc->num_chips; i++) {
375 if (dgc->exit)
376 dgc->exit(dgc->gc[i]);
377 irq_remove_generic_chip(dgc->gc[i], ~0U, 0, 0);
378 }
379 d->gc = NULL;
380 kfree(dgc);
381}
382EXPORT_SYMBOL_GPL(irq_domain_remove_generic_chips);
383
384/**
385 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
386 * @d: irq domain for which to allocate chips
387 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
388 * @num_ct: Number of irq_chip_type instances associated with this
389 * @name: Name of the irq chip
390 * @handler: Default flow handler associated with these chips
391 * @clr: IRQ_* bits to clear in the mapping function
392 * @set: IRQ_* bits to set in the mapping function
393 * @gcflags: Generic chip specific setup flags
394 */
395int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
396 int num_ct, const char *name,
397 irq_flow_handler_t handler,
398 unsigned int clr, unsigned int set,
399 enum irq_gc_flags gcflags)
400{
401 struct irq_domain_chip_generic_info info = {
402 .irqs_per_chip = irqs_per_chip,
403 .num_ct = num_ct,
404 .name = name,
405 .handler = handler,
406 .irq_flags_to_clear = clr,
407 .irq_flags_to_set = set,
408 .gc_flags = gcflags,
409 };
410
411 return irq_domain_alloc_generic_chips(d, &info);
412}
413EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
414
415static struct irq_chip_generic *
416__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
417{
418 struct irq_domain_chip_generic *dgc = d->gc;
419 int idx;
420
421 if (!dgc)
422 return ERR_PTR(-ENODEV);
423 idx = hw_irq / dgc->irqs_per_chip;
424 if (idx >= dgc->num_chips)
425 return ERR_PTR(-EINVAL);
426 return dgc->gc[idx];
427}
428
429/**
430 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
431 * @d: irq domain pointer
432 * @hw_irq: Hardware interrupt number
433 */
434struct irq_chip_generic *
435irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
436{
437 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
438
439 return !IS_ERR(gc) ? gc : NULL;
440}
441EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
442
443/*
444 * Separate lockdep classes for interrupt chip which can nest irq_desc
445 * lock and request mutex.
446 */
447static struct lock_class_key irq_nested_lock_class;
448static struct lock_class_key irq_nested_request_class;
449
450/*
451 * irq_map_generic_chip - Map a generic chip for an irq domain
452 */
453int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
454 irq_hw_number_t hw_irq)
455{
456 struct irq_data *data = irq_domain_get_irq_data(d, virq);
457 struct irq_domain_chip_generic *dgc = d->gc;
458 struct irq_chip_generic *gc;
459 struct irq_chip_type *ct;
460 struct irq_chip *chip;
461 unsigned long flags;
462 int idx;
463
464 gc = __irq_get_domain_generic_chip(d, hw_irq);
465 if (IS_ERR(gc))
466 return PTR_ERR(gc);
467
468 idx = hw_irq % dgc->irqs_per_chip;
469
470 if (test_bit(idx, &gc->unused))
471 return -ENOTSUPP;
472
473 if (test_bit(idx, &gc->installed))
474 return -EBUSY;
475
476 ct = gc->chip_types;
477 chip = &ct->chip;
478
479 /* We only init the cache for the first mapping of a generic chip */
480 if (!gc->installed) {
481 raw_spin_lock_irqsave(&gc->lock, flags);
482 irq_gc_init_mask_cache(gc, dgc->gc_flags);
483 raw_spin_unlock_irqrestore(&gc->lock, flags);
484 }
485
486 /* Mark the interrupt as installed */
487 set_bit(idx, &gc->installed);
488
489 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
490 irq_set_lockdep_class(virq, &irq_nested_lock_class,
491 &irq_nested_request_class);
492
493 if (chip->irq_calc_mask)
494 chip->irq_calc_mask(data);
495 else
496 data->mask = 1 << idx;
497
498 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
499 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
500 return 0;
501}
502
503void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
504{
505 struct irq_data *data = irq_domain_get_irq_data(d, virq);
506 struct irq_domain_chip_generic *dgc = d->gc;
507 unsigned int hw_irq = data->hwirq;
508 struct irq_chip_generic *gc;
509 int irq_idx;
510
511 gc = irq_get_domain_generic_chip(d, hw_irq);
512 if (!gc)
513 return;
514
515 irq_idx = hw_irq % dgc->irqs_per_chip;
516
517 clear_bit(irq_idx, &gc->installed);
518 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
519 NULL);
520
521}
522
523const struct irq_domain_ops irq_generic_chip_ops = {
524 .map = irq_map_generic_chip,
525 .unmap = irq_unmap_generic_chip,
526 .xlate = irq_domain_xlate_onetwocell,
527};
528EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
529
530/**
531 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
532 * @gc: Generic irq chip holding all data
533 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
534 * @flags: Flags for initialization
535 * @clr: IRQ_* bits to clear
536 * @set: IRQ_* bits to set
537 *
538 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
539 * initializes all interrupts to the primary irq_chip_type and its
540 * associated handler.
541 */
542void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
543 enum irq_gc_flags flags, unsigned int clr,
544 unsigned int set)
545{
546 struct irq_chip_type *ct = gc->chip_types;
547 struct irq_chip *chip = &ct->chip;
548 unsigned int i;
549
550 raw_spin_lock(&gc_lock);
551 list_add_tail(&gc->list, &gc_list);
552 raw_spin_unlock(&gc_lock);
553
554 irq_gc_init_mask_cache(gc, flags);
555
556 for (i = gc->irq_base; msk; msk >>= 1, i++) {
557 if (!(msk & 0x01))
558 continue;
559
560 if (flags & IRQ_GC_INIT_NESTED_LOCK)
561 irq_set_lockdep_class(i, &irq_nested_lock_class,
562 &irq_nested_request_class);
563
564 if (!(flags & IRQ_GC_NO_MASK)) {
565 struct irq_data *d = irq_get_irq_data(i);
566
567 if (chip->irq_calc_mask)
568 chip->irq_calc_mask(d);
569 else
570 d->mask = 1 << (i - gc->irq_base);
571 }
572 irq_set_chip_and_handler(i, chip, ct->handler);
573 irq_set_chip_data(i, gc);
574 irq_modify_status(i, clr, set);
575 }
576 gc->irq_cnt = i - gc->irq_base;
577}
578EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
579
580/**
581 * irq_setup_alt_chip - Switch to alternative chip
582 * @d: irq_data for this interrupt
583 * @type: Flow type to be initialized
584 *
585 * Only to be called from chip->irq_set_type() callbacks.
586 */
587int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
588{
589 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
590 struct irq_chip_type *ct = gc->chip_types;
591 unsigned int i;
592
593 for (i = 0; i < gc->num_ct; i++, ct++) {
594 if (ct->type & type) {
595 d->chip = &ct->chip;
596 irq_data_to_desc(d)->handle_irq = ct->handler;
597 return 0;
598 }
599 }
600 return -EINVAL;
601}
602EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
603
604/**
605 * irq_remove_generic_chip - Remove a chip
606 * @gc: Generic irq chip holding all data
607 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
608 * @clr: IRQ_* bits to clear
609 * @set: IRQ_* bits to set
610 *
611 * Remove up to 32 interrupts starting from gc->irq_base.
612 */
613void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
614 unsigned int clr, unsigned int set)
615{
616 unsigned int i, virq;
617
618 raw_spin_lock(&gc_lock);
619 list_del(&gc->list);
620 raw_spin_unlock(&gc_lock);
621
622 for (i = 0; msk; msk >>= 1, i++) {
623 if (!(msk & 0x01))
624 continue;
625
626 /*
627 * Interrupt domain based chips store the base hardware
628 * interrupt number in gc::irq_base. Otherwise gc::irq_base
629 * contains the base Linux interrupt number.
630 */
631 if (gc->domain) {
632 virq = irq_find_mapping(gc->domain, gc->irq_base + i);
633 if (!virq)
634 continue;
635 } else {
636 virq = gc->irq_base + i;
637 }
638
639 /* Remove handler first. That will mask the irq line */
640 irq_set_handler(virq, NULL);
641 irq_set_chip(virq, &no_irq_chip);
642 irq_set_chip_data(virq, NULL);
643 irq_modify_status(virq, clr, set);
644 }
645}
646EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
647
648static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
649{
650 unsigned int virq;
651
652 if (!gc->domain)
653 return irq_get_irq_data(gc->irq_base);
654
655 /*
656 * We don't know which of the irqs has been actually
657 * installed. Use the first one.
658 */
659 if (!gc->installed)
660 return NULL;
661
662 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
663 return virq ? irq_get_irq_data(virq) : NULL;
664}
665
666#ifdef CONFIG_PM
667static int irq_gc_suspend(void)
668{
669 struct irq_chip_generic *gc;
670
671 list_for_each_entry(gc, &gc_list, list) {
672 struct irq_chip_type *ct = gc->chip_types;
673
674 if (ct->chip.irq_suspend) {
675 struct irq_data *data = irq_gc_get_irq_data(gc);
676
677 if (data)
678 ct->chip.irq_suspend(data);
679 }
680
681 if (gc->suspend)
682 gc->suspend(gc);
683 }
684 return 0;
685}
686
687static void irq_gc_resume(void)
688{
689 struct irq_chip_generic *gc;
690
691 list_for_each_entry(gc, &gc_list, list) {
692 struct irq_chip_type *ct = gc->chip_types;
693
694 if (gc->resume)
695 gc->resume(gc);
696
697 if (ct->chip.irq_resume) {
698 struct irq_data *data = irq_gc_get_irq_data(gc);
699
700 if (data)
701 ct->chip.irq_resume(data);
702 }
703 }
704}
705#else
706#define irq_gc_suspend NULL
707#define irq_gc_resume NULL
708#endif
709
710static void irq_gc_shutdown(void)
711{
712 struct irq_chip_generic *gc;
713
714 list_for_each_entry(gc, &gc_list, list) {
715 struct irq_chip_type *ct = gc->chip_types;
716
717 if (ct->chip.irq_pm_shutdown) {
718 struct irq_data *data = irq_gc_get_irq_data(gc);
719
720 if (data)
721 ct->chip.irq_pm_shutdown(data);
722 }
723 }
724}
725
726static struct syscore_ops irq_gc_syscore_ops = {
727 .suspend = irq_gc_suspend,
728 .resume = irq_gc_resume,
729 .shutdown = irq_gc_shutdown,
730};
731
732static int __init irq_gc_init_ops(void)
733{
734 register_syscore_ops(&irq_gc_syscore_ops);
735 return 0;
736}
737device_initcall(irq_gc_init_ops);