Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Library implementing the most common irq chip callback functions
4 *
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7#include <linux/io.h>
8#include <linux/irq.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/irqdomain.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/syscore_ops.h>
15
16#include "internals.h"
17
18static LIST_HEAD(gc_list);
19static DEFINE_RAW_SPINLOCK(gc_lock);
20
21/**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
24 */
25void irq_gc_noop(struct irq_data *d)
26{
27}
28EXPORT_SYMBOL_GPL(irq_gc_noop);
29
30/**
31 * irq_gc_mask_disable_reg - Mask chip via disable register
32 * @d: irq_data
33 *
34 * Chip has separate enable/disable registers instead of a single mask
35 * register.
36 */
37void irq_gc_mask_disable_reg(struct irq_data *d)
38{
39 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
40 struct irq_chip_type *ct = irq_data_get_chip_type(d);
41 u32 mask = d->mask;
42
43 irq_gc_lock(gc);
44 irq_reg_writel(gc, mask, ct->regs.disable);
45 *ct->mask_cache &= ~mask;
46 irq_gc_unlock(gc);
47}
48EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
49
50/**
51 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
52 * @d: irq_data
53 *
54 * Chip has a single mask register. Values of this register are cached
55 * and protected by gc->lock
56 */
57void irq_gc_mask_set_bit(struct irq_data *d)
58{
59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
60 struct irq_chip_type *ct = irq_data_get_chip_type(d);
61 u32 mask = d->mask;
62
63 irq_gc_lock(gc);
64 *ct->mask_cache |= mask;
65 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
66 irq_gc_unlock(gc);
67}
68EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
69
70/**
71 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
72 * @d: irq_data
73 *
74 * Chip has a single mask register. Values of this register are cached
75 * and protected by gc->lock
76 */
77void irq_gc_mask_clr_bit(struct irq_data *d)
78{
79 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
80 struct irq_chip_type *ct = irq_data_get_chip_type(d);
81 u32 mask = d->mask;
82
83 irq_gc_lock(gc);
84 *ct->mask_cache &= ~mask;
85 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
86 irq_gc_unlock(gc);
87}
88EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
89
90/**
91 * irq_gc_unmask_enable_reg - Unmask chip via enable register
92 * @d: irq_data
93 *
94 * Chip has separate enable/disable registers instead of a single mask
95 * register.
96 */
97void irq_gc_unmask_enable_reg(struct irq_data *d)
98{
99 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
100 struct irq_chip_type *ct = irq_data_get_chip_type(d);
101 u32 mask = d->mask;
102
103 irq_gc_lock(gc);
104 irq_reg_writel(gc, mask, ct->regs.enable);
105 *ct->mask_cache |= mask;
106 irq_gc_unlock(gc);
107}
108EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
109
110/**
111 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
112 * @d: irq_data
113 */
114void irq_gc_ack_set_bit(struct irq_data *d)
115{
116 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
117 struct irq_chip_type *ct = irq_data_get_chip_type(d);
118 u32 mask = d->mask;
119
120 irq_gc_lock(gc);
121 irq_reg_writel(gc, mask, ct->regs.ack);
122 irq_gc_unlock(gc);
123}
124EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
125
126/**
127 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
128 * @d: irq_data
129 */
130void irq_gc_ack_clr_bit(struct irq_data *d)
131{
132 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
133 struct irq_chip_type *ct = irq_data_get_chip_type(d);
134 u32 mask = ~d->mask;
135
136 irq_gc_lock(gc);
137 irq_reg_writel(gc, mask, ct->regs.ack);
138 irq_gc_unlock(gc);
139}
140
141/**
142 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
143 * @d: irq_data
144 *
145 * This generic implementation of the irq_mask_ack method is for chips
146 * with separate enable/disable registers instead of a single mask
147 * register and where a pending interrupt is acknowledged by setting a
148 * bit.
149 *
150 * Note: This is the only permutation currently used. Similar generic
151 * functions should be added here if other permutations are required.
152 */
153void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
154{
155 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156 struct irq_chip_type *ct = irq_data_get_chip_type(d);
157 u32 mask = d->mask;
158
159 irq_gc_lock(gc);
160 irq_reg_writel(gc, mask, ct->regs.disable);
161 *ct->mask_cache &= ~mask;
162 irq_reg_writel(gc, mask, ct->regs.ack);
163 irq_gc_unlock(gc);
164}
165
166/**
167 * irq_gc_eoi - EOI interrupt
168 * @d: irq_data
169 */
170void irq_gc_eoi(struct irq_data *d)
171{
172 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
173 struct irq_chip_type *ct = irq_data_get_chip_type(d);
174 u32 mask = d->mask;
175
176 irq_gc_lock(gc);
177 irq_reg_writel(gc, mask, ct->regs.eoi);
178 irq_gc_unlock(gc);
179}
180
181/**
182 * irq_gc_set_wake - Set/clr wake bit for an interrupt
183 * @d: irq_data
184 * @on: Indicates whether the wake bit should be set or cleared
185 *
186 * For chips where the wake from suspend functionality is not
187 * configured in a separate register and the wakeup active state is
188 * just stored in a bitmask.
189 */
190int irq_gc_set_wake(struct irq_data *d, unsigned int on)
191{
192 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
193 u32 mask = d->mask;
194
195 if (!(mask & gc->wake_enabled))
196 return -EINVAL;
197
198 irq_gc_lock(gc);
199 if (on)
200 gc->wake_active |= mask;
201 else
202 gc->wake_active &= ~mask;
203 irq_gc_unlock(gc);
204 return 0;
205}
206EXPORT_SYMBOL_GPL(irq_gc_set_wake);
207
208static u32 irq_readl_be(void __iomem *addr)
209{
210 return ioread32be(addr);
211}
212
213static void irq_writel_be(u32 val, void __iomem *addr)
214{
215 iowrite32be(val, addr);
216}
217
218void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
219 int num_ct, unsigned int irq_base,
220 void __iomem *reg_base, irq_flow_handler_t handler)
221{
222 struct irq_chip_type *ct = gc->chip_types;
223 int i;
224
225 raw_spin_lock_init(&gc->lock);
226 gc->num_ct = num_ct;
227 gc->irq_base = irq_base;
228 gc->reg_base = reg_base;
229 for (i = 0; i < num_ct; i++)
230 ct[i].chip.name = name;
231 gc->chip_types->handler = handler;
232}
233
234/**
235 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
236 * @name: Name of the irq chip
237 * @num_ct: Number of irq_chip_type instances associated with this
238 * @irq_base: Interrupt base nr for this chip
239 * @reg_base: Register base address (virtual)
240 * @handler: Default flow handler associated with this chip
241 *
242 * Returns an initialized irq_chip_generic structure. The chip defaults
243 * to the primary (index 0) irq_chip_type and @handler
244 */
245struct irq_chip_generic *
246irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
247 void __iomem *reg_base, irq_flow_handler_t handler)
248{
249 struct irq_chip_generic *gc;
250
251 gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
252 if (gc) {
253 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
254 handler);
255 }
256 return gc;
257}
258EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
259
260static void
261irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
262{
263 struct irq_chip_type *ct = gc->chip_types;
264 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
265 int i;
266
267 for (i = 0; i < gc->num_ct; i++) {
268 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
269 mskptr = &ct[i].mask_cache_priv;
270 mskreg = ct[i].regs.mask;
271 }
272 ct[i].mask_cache = mskptr;
273 if (flags & IRQ_GC_INIT_MASK_CACHE)
274 *mskptr = irq_reg_readl(gc, mskreg);
275 }
276}
277
278/**
279 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
280 * @d: irq domain for which to allocate chips
281 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
282 * @num_ct: Number of irq_chip_type instances associated with this
283 * @name: Name of the irq chip
284 * @handler: Default flow handler associated with these chips
285 * @clr: IRQ_* bits to clear in the mapping function
286 * @set: IRQ_* bits to set in the mapping function
287 * @gcflags: Generic chip specific setup flags
288 */
289int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
290 int num_ct, const char *name,
291 irq_flow_handler_t handler,
292 unsigned int clr, unsigned int set,
293 enum irq_gc_flags gcflags)
294{
295 struct irq_domain_chip_generic *dgc;
296 struct irq_chip_generic *gc;
297 unsigned long flags;
298 int numchips, i;
299 size_t dgc_sz;
300 size_t gc_sz;
301 size_t sz;
302 void *tmp;
303
304 if (d->gc)
305 return -EBUSY;
306
307 numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
308 if (!numchips)
309 return -EINVAL;
310
311 /* Allocate a pointer, generic chip and chiptypes for each chip */
312 gc_sz = struct_size(gc, chip_types, num_ct);
313 dgc_sz = struct_size(dgc, gc, numchips);
314 sz = dgc_sz + numchips * gc_sz;
315
316 tmp = dgc = kzalloc(sz, GFP_KERNEL);
317 if (!dgc)
318 return -ENOMEM;
319 dgc->irqs_per_chip = irqs_per_chip;
320 dgc->num_chips = numchips;
321 dgc->irq_flags_to_set = set;
322 dgc->irq_flags_to_clear = clr;
323 dgc->gc_flags = gcflags;
324 d->gc = dgc;
325
326 /* Calc pointer to the first generic chip */
327 tmp += dgc_sz;
328 for (i = 0; i < numchips; i++) {
329 /* Store the pointer to the generic chip */
330 dgc->gc[i] = gc = tmp;
331 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
332 NULL, handler);
333
334 gc->domain = d;
335 if (gcflags & IRQ_GC_BE_IO) {
336 gc->reg_readl = &irq_readl_be;
337 gc->reg_writel = &irq_writel_be;
338 }
339
340 raw_spin_lock_irqsave(&gc_lock, flags);
341 list_add_tail(&gc->list, &gc_list);
342 raw_spin_unlock_irqrestore(&gc_lock, flags);
343 /* Calc pointer to the next generic chip */
344 tmp += gc_sz;
345 }
346 return 0;
347}
348EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
349
350static struct irq_chip_generic *
351__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
352{
353 struct irq_domain_chip_generic *dgc = d->gc;
354 int idx;
355
356 if (!dgc)
357 return ERR_PTR(-ENODEV);
358 idx = hw_irq / dgc->irqs_per_chip;
359 if (idx >= dgc->num_chips)
360 return ERR_PTR(-EINVAL);
361 return dgc->gc[idx];
362}
363
364/**
365 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
366 * @d: irq domain pointer
367 * @hw_irq: Hardware interrupt number
368 */
369struct irq_chip_generic *
370irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
371{
372 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
373
374 return !IS_ERR(gc) ? gc : NULL;
375}
376EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
377
378/*
379 * Separate lockdep classes for interrupt chip which can nest irq_desc
380 * lock and request mutex.
381 */
382static struct lock_class_key irq_nested_lock_class;
383static struct lock_class_key irq_nested_request_class;
384
385/*
386 * irq_map_generic_chip - Map a generic chip for an irq domain
387 */
388int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
389 irq_hw_number_t hw_irq)
390{
391 struct irq_data *data = irq_domain_get_irq_data(d, virq);
392 struct irq_domain_chip_generic *dgc = d->gc;
393 struct irq_chip_generic *gc;
394 struct irq_chip_type *ct;
395 struct irq_chip *chip;
396 unsigned long flags;
397 int idx;
398
399 gc = __irq_get_domain_generic_chip(d, hw_irq);
400 if (IS_ERR(gc))
401 return PTR_ERR(gc);
402
403 idx = hw_irq % dgc->irqs_per_chip;
404
405 if (test_bit(idx, &gc->unused))
406 return -ENOTSUPP;
407
408 if (test_bit(idx, &gc->installed))
409 return -EBUSY;
410
411 ct = gc->chip_types;
412 chip = &ct->chip;
413
414 /* We only init the cache for the first mapping of a generic chip */
415 if (!gc->installed) {
416 raw_spin_lock_irqsave(&gc->lock, flags);
417 irq_gc_init_mask_cache(gc, dgc->gc_flags);
418 raw_spin_unlock_irqrestore(&gc->lock, flags);
419 }
420
421 /* Mark the interrupt as installed */
422 set_bit(idx, &gc->installed);
423
424 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
425 irq_set_lockdep_class(virq, &irq_nested_lock_class,
426 &irq_nested_request_class);
427
428 if (chip->irq_calc_mask)
429 chip->irq_calc_mask(data);
430 else
431 data->mask = 1 << idx;
432
433 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
434 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
435 return 0;
436}
437
438void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
439{
440 struct irq_data *data = irq_domain_get_irq_data(d, virq);
441 struct irq_domain_chip_generic *dgc = d->gc;
442 unsigned int hw_irq = data->hwirq;
443 struct irq_chip_generic *gc;
444 int irq_idx;
445
446 gc = irq_get_domain_generic_chip(d, hw_irq);
447 if (!gc)
448 return;
449
450 irq_idx = hw_irq % dgc->irqs_per_chip;
451
452 clear_bit(irq_idx, &gc->installed);
453 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
454 NULL);
455
456}
457
458const struct irq_domain_ops irq_generic_chip_ops = {
459 .map = irq_map_generic_chip,
460 .unmap = irq_unmap_generic_chip,
461 .xlate = irq_domain_xlate_onetwocell,
462};
463EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
464
465/**
466 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
467 * @gc: Generic irq chip holding all data
468 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
469 * @flags: Flags for initialization
470 * @clr: IRQ_* bits to clear
471 * @set: IRQ_* bits to set
472 *
473 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
474 * initializes all interrupts to the primary irq_chip_type and its
475 * associated handler.
476 */
477void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
478 enum irq_gc_flags flags, unsigned int clr,
479 unsigned int set)
480{
481 struct irq_chip_type *ct = gc->chip_types;
482 struct irq_chip *chip = &ct->chip;
483 unsigned int i;
484
485 raw_spin_lock(&gc_lock);
486 list_add_tail(&gc->list, &gc_list);
487 raw_spin_unlock(&gc_lock);
488
489 irq_gc_init_mask_cache(gc, flags);
490
491 for (i = gc->irq_base; msk; msk >>= 1, i++) {
492 if (!(msk & 0x01))
493 continue;
494
495 if (flags & IRQ_GC_INIT_NESTED_LOCK)
496 irq_set_lockdep_class(i, &irq_nested_lock_class,
497 &irq_nested_request_class);
498
499 if (!(flags & IRQ_GC_NO_MASK)) {
500 struct irq_data *d = irq_get_irq_data(i);
501
502 if (chip->irq_calc_mask)
503 chip->irq_calc_mask(d);
504 else
505 d->mask = 1 << (i - gc->irq_base);
506 }
507 irq_set_chip_and_handler(i, chip, ct->handler);
508 irq_set_chip_data(i, gc);
509 irq_modify_status(i, clr, set);
510 }
511 gc->irq_cnt = i - gc->irq_base;
512}
513EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
514
515/**
516 * irq_setup_alt_chip - Switch to alternative chip
517 * @d: irq_data for this interrupt
518 * @type: Flow type to be initialized
519 *
520 * Only to be called from chip->irq_set_type() callbacks.
521 */
522int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
523{
524 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
525 struct irq_chip_type *ct = gc->chip_types;
526 unsigned int i;
527
528 for (i = 0; i < gc->num_ct; i++, ct++) {
529 if (ct->type & type) {
530 d->chip = &ct->chip;
531 irq_data_to_desc(d)->handle_irq = ct->handler;
532 return 0;
533 }
534 }
535 return -EINVAL;
536}
537EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
538
539/**
540 * irq_remove_generic_chip - Remove a chip
541 * @gc: Generic irq chip holding all data
542 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
543 * @clr: IRQ_* bits to clear
544 * @set: IRQ_* bits to set
545 *
546 * Remove up to 32 interrupts starting from gc->irq_base.
547 */
548void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
549 unsigned int clr, unsigned int set)
550{
551 unsigned int i, virq;
552
553 raw_spin_lock(&gc_lock);
554 list_del(&gc->list);
555 raw_spin_unlock(&gc_lock);
556
557 for (i = 0; msk; msk >>= 1, i++) {
558 if (!(msk & 0x01))
559 continue;
560
561 /*
562 * Interrupt domain based chips store the base hardware
563 * interrupt number in gc::irq_base. Otherwise gc::irq_base
564 * contains the base Linux interrupt number.
565 */
566 if (gc->domain) {
567 virq = irq_find_mapping(gc->domain, gc->irq_base + i);
568 if (!virq)
569 continue;
570 } else {
571 virq = gc->irq_base + i;
572 }
573
574 /* Remove handler first. That will mask the irq line */
575 irq_set_handler(virq, NULL);
576 irq_set_chip(virq, &no_irq_chip);
577 irq_set_chip_data(virq, NULL);
578 irq_modify_status(virq, clr, set);
579 }
580}
581EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
582
583static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
584{
585 unsigned int virq;
586
587 if (!gc->domain)
588 return irq_get_irq_data(gc->irq_base);
589
590 /*
591 * We don't know which of the irqs has been actually
592 * installed. Use the first one.
593 */
594 if (!gc->installed)
595 return NULL;
596
597 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
598 return virq ? irq_get_irq_data(virq) : NULL;
599}
600
601#ifdef CONFIG_PM
602static int irq_gc_suspend(void)
603{
604 struct irq_chip_generic *gc;
605
606 list_for_each_entry(gc, &gc_list, list) {
607 struct irq_chip_type *ct = gc->chip_types;
608
609 if (ct->chip.irq_suspend) {
610 struct irq_data *data = irq_gc_get_irq_data(gc);
611
612 if (data)
613 ct->chip.irq_suspend(data);
614 }
615
616 if (gc->suspend)
617 gc->suspend(gc);
618 }
619 return 0;
620}
621
622static void irq_gc_resume(void)
623{
624 struct irq_chip_generic *gc;
625
626 list_for_each_entry(gc, &gc_list, list) {
627 struct irq_chip_type *ct = gc->chip_types;
628
629 if (gc->resume)
630 gc->resume(gc);
631
632 if (ct->chip.irq_resume) {
633 struct irq_data *data = irq_gc_get_irq_data(gc);
634
635 if (data)
636 ct->chip.irq_resume(data);
637 }
638 }
639}
640#else
641#define irq_gc_suspend NULL
642#define irq_gc_resume NULL
643#endif
644
645static void irq_gc_shutdown(void)
646{
647 struct irq_chip_generic *gc;
648
649 list_for_each_entry(gc, &gc_list, list) {
650 struct irq_chip_type *ct = gc->chip_types;
651
652 if (ct->chip.irq_pm_shutdown) {
653 struct irq_data *data = irq_gc_get_irq_data(gc);
654
655 if (data)
656 ct->chip.irq_pm_shutdown(data);
657 }
658 }
659}
660
661static struct syscore_ops irq_gc_syscore_ops = {
662 .suspend = irq_gc_suspend,
663 .resume = irq_gc_resume,
664 .shutdown = irq_gc_shutdown,
665};
666
667static int __init irq_gc_init_ops(void)
668{
669 register_syscore_ops(&irq_gc_syscore_ops);
670 return 0;
671}
672device_initcall(irq_gc_init_ops);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Library implementing the most common irq chip callback functions
4 *
5 * Copyright (C) 2011, Thomas Gleixner
6 */
7#include <linux/io.h>
8#include <linux/irq.h>
9#include <linux/slab.h>
10#include <linux/export.h>
11#include <linux/irqdomain.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/syscore_ops.h>
15
16#include "internals.h"
17
18static LIST_HEAD(gc_list);
19static DEFINE_RAW_SPINLOCK(gc_lock);
20
21/**
22 * irq_gc_noop - NOOP function
23 * @d: irq_data
24 */
25void irq_gc_noop(struct irq_data *d)
26{
27}
28
29/**
30 * irq_gc_mask_disable_reg - Mask chip via disable register
31 * @d: irq_data
32 *
33 * Chip has separate enable/disable registers instead of a single mask
34 * register.
35 */
36void irq_gc_mask_disable_reg(struct irq_data *d)
37{
38 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
39 struct irq_chip_type *ct = irq_data_get_chip_type(d);
40 u32 mask = d->mask;
41
42 irq_gc_lock(gc);
43 irq_reg_writel(gc, mask, ct->regs.disable);
44 *ct->mask_cache &= ~mask;
45 irq_gc_unlock(gc);
46}
47
48/**
49 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
50 * @d: irq_data
51 *
52 * Chip has a single mask register. Values of this register are cached
53 * and protected by gc->lock
54 */
55void irq_gc_mask_set_bit(struct irq_data *d)
56{
57 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
58 struct irq_chip_type *ct = irq_data_get_chip_type(d);
59 u32 mask = d->mask;
60
61 irq_gc_lock(gc);
62 *ct->mask_cache |= mask;
63 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
64 irq_gc_unlock(gc);
65}
66EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
67
68/**
69 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
70 * @d: irq_data
71 *
72 * Chip has a single mask register. Values of this register are cached
73 * and protected by gc->lock
74 */
75void irq_gc_mask_clr_bit(struct irq_data *d)
76{
77 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
78 struct irq_chip_type *ct = irq_data_get_chip_type(d);
79 u32 mask = d->mask;
80
81 irq_gc_lock(gc);
82 *ct->mask_cache &= ~mask;
83 irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
84 irq_gc_unlock(gc);
85}
86EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
87
88/**
89 * irq_gc_unmask_enable_reg - Unmask chip via enable register
90 * @d: irq_data
91 *
92 * Chip has separate enable/disable registers instead of a single mask
93 * register.
94 */
95void irq_gc_unmask_enable_reg(struct irq_data *d)
96{
97 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
98 struct irq_chip_type *ct = irq_data_get_chip_type(d);
99 u32 mask = d->mask;
100
101 irq_gc_lock(gc);
102 irq_reg_writel(gc, mask, ct->regs.enable);
103 *ct->mask_cache |= mask;
104 irq_gc_unlock(gc);
105}
106
107/**
108 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
109 * @d: irq_data
110 */
111void irq_gc_ack_set_bit(struct irq_data *d)
112{
113 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
114 struct irq_chip_type *ct = irq_data_get_chip_type(d);
115 u32 mask = d->mask;
116
117 irq_gc_lock(gc);
118 irq_reg_writel(gc, mask, ct->regs.ack);
119 irq_gc_unlock(gc);
120}
121EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
122
123/**
124 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
125 * @d: irq_data
126 */
127void irq_gc_ack_clr_bit(struct irq_data *d)
128{
129 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
130 struct irq_chip_type *ct = irq_data_get_chip_type(d);
131 u32 mask = ~d->mask;
132
133 irq_gc_lock(gc);
134 irq_reg_writel(gc, mask, ct->regs.ack);
135 irq_gc_unlock(gc);
136}
137
138/**
139 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
140 * @d: irq_data
141 *
142 * This generic implementation of the irq_mask_ack method is for chips
143 * with separate enable/disable registers instead of a single mask
144 * register and where a pending interrupt is acknowledged by setting a
145 * bit.
146 *
147 * Note: This is the only permutation currently used. Similar generic
148 * functions should be added here if other permutations are required.
149 */
150void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
151{
152 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
153 struct irq_chip_type *ct = irq_data_get_chip_type(d);
154 u32 mask = d->mask;
155
156 irq_gc_lock(gc);
157 irq_reg_writel(gc, mask, ct->regs.disable);
158 *ct->mask_cache &= ~mask;
159 irq_reg_writel(gc, mask, ct->regs.ack);
160 irq_gc_unlock(gc);
161}
162
163/**
164 * irq_gc_eoi - EOI interrupt
165 * @d: irq_data
166 */
167void irq_gc_eoi(struct irq_data *d)
168{
169 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
170 struct irq_chip_type *ct = irq_data_get_chip_type(d);
171 u32 mask = d->mask;
172
173 irq_gc_lock(gc);
174 irq_reg_writel(gc, mask, ct->regs.eoi);
175 irq_gc_unlock(gc);
176}
177
178/**
179 * irq_gc_set_wake - Set/clr wake bit for an interrupt
180 * @d: irq_data
181 * @on: Indicates whether the wake bit should be set or cleared
182 *
183 * For chips where the wake from suspend functionality is not
184 * configured in a separate register and the wakeup active state is
185 * just stored in a bitmask.
186 */
187int irq_gc_set_wake(struct irq_data *d, unsigned int on)
188{
189 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
190 u32 mask = d->mask;
191
192 if (!(mask & gc->wake_enabled))
193 return -EINVAL;
194
195 irq_gc_lock(gc);
196 if (on)
197 gc->wake_active |= mask;
198 else
199 gc->wake_active &= ~mask;
200 irq_gc_unlock(gc);
201 return 0;
202}
203EXPORT_SYMBOL_GPL(irq_gc_set_wake);
204
205static u32 irq_readl_be(void __iomem *addr)
206{
207 return ioread32be(addr);
208}
209
210static void irq_writel_be(u32 val, void __iomem *addr)
211{
212 iowrite32be(val, addr);
213}
214
215void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
216 int num_ct, unsigned int irq_base,
217 void __iomem *reg_base, irq_flow_handler_t handler)
218{
219 raw_spin_lock_init(&gc->lock);
220 gc->num_ct = num_ct;
221 gc->irq_base = irq_base;
222 gc->reg_base = reg_base;
223 gc->chip_types->chip.name = name;
224 gc->chip_types->handler = handler;
225}
226
227/**
228 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
229 * @name: Name of the irq chip
230 * @num_ct: Number of irq_chip_type instances associated with this
231 * @irq_base: Interrupt base nr for this chip
232 * @reg_base: Register base address (virtual)
233 * @handler: Default flow handler associated with this chip
234 *
235 * Returns an initialized irq_chip_generic structure. The chip defaults
236 * to the primary (index 0) irq_chip_type and @handler
237 */
238struct irq_chip_generic *
239irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
240 void __iomem *reg_base, irq_flow_handler_t handler)
241{
242 struct irq_chip_generic *gc;
243 unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
244
245 gc = kzalloc(sz, GFP_KERNEL);
246 if (gc) {
247 irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
248 handler);
249 }
250 return gc;
251}
252EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
253
254static void
255irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
256{
257 struct irq_chip_type *ct = gc->chip_types;
258 u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
259 int i;
260
261 for (i = 0; i < gc->num_ct; i++) {
262 if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
263 mskptr = &ct[i].mask_cache_priv;
264 mskreg = ct[i].regs.mask;
265 }
266 ct[i].mask_cache = mskptr;
267 if (flags & IRQ_GC_INIT_MASK_CACHE)
268 *mskptr = irq_reg_readl(gc, mskreg);
269 }
270}
271
272/**
273 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
274 * @d: irq domain for which to allocate chips
275 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
276 * @num_ct: Number of irq_chip_type instances associated with this
277 * @name: Name of the irq chip
278 * @handler: Default flow handler associated with these chips
279 * @clr: IRQ_* bits to clear in the mapping function
280 * @set: IRQ_* bits to set in the mapping function
281 * @gcflags: Generic chip specific setup flags
282 */
283int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
284 int num_ct, const char *name,
285 irq_flow_handler_t handler,
286 unsigned int clr, unsigned int set,
287 enum irq_gc_flags gcflags)
288{
289 struct irq_domain_chip_generic *dgc;
290 struct irq_chip_generic *gc;
291 int numchips, sz, i;
292 unsigned long flags;
293 void *tmp;
294
295 if (d->gc)
296 return -EBUSY;
297
298 numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
299 if (!numchips)
300 return -EINVAL;
301
302 /* Allocate a pointer, generic chip and chiptypes for each chip */
303 sz = sizeof(*dgc) + numchips * sizeof(gc);
304 sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
305
306 tmp = dgc = kzalloc(sz, GFP_KERNEL);
307 if (!dgc)
308 return -ENOMEM;
309 dgc->irqs_per_chip = irqs_per_chip;
310 dgc->num_chips = numchips;
311 dgc->irq_flags_to_set = set;
312 dgc->irq_flags_to_clear = clr;
313 dgc->gc_flags = gcflags;
314 d->gc = dgc;
315
316 /* Calc pointer to the first generic chip */
317 tmp += sizeof(*dgc) + numchips * sizeof(gc);
318 for (i = 0; i < numchips; i++) {
319 /* Store the pointer to the generic chip */
320 dgc->gc[i] = gc = tmp;
321 irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
322 NULL, handler);
323
324 gc->domain = d;
325 if (gcflags & IRQ_GC_BE_IO) {
326 gc->reg_readl = &irq_readl_be;
327 gc->reg_writel = &irq_writel_be;
328 }
329
330 raw_spin_lock_irqsave(&gc_lock, flags);
331 list_add_tail(&gc->list, &gc_list);
332 raw_spin_unlock_irqrestore(&gc_lock, flags);
333 /* Calc pointer to the next generic chip */
334 tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
335 }
336 return 0;
337}
338EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
339
340static struct irq_chip_generic *
341__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
342{
343 struct irq_domain_chip_generic *dgc = d->gc;
344 int idx;
345
346 if (!dgc)
347 return ERR_PTR(-ENODEV);
348 idx = hw_irq / dgc->irqs_per_chip;
349 if (idx >= dgc->num_chips)
350 return ERR_PTR(-EINVAL);
351 return dgc->gc[idx];
352}
353
354/**
355 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
356 * @d: irq domain pointer
357 * @hw_irq: Hardware interrupt number
358 */
359struct irq_chip_generic *
360irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
361{
362 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
363
364 return !IS_ERR(gc) ? gc : NULL;
365}
366EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
367
368/*
369 * Separate lockdep classes for interrupt chip which can nest irq_desc
370 * lock and request mutex.
371 */
372static struct lock_class_key irq_nested_lock_class;
373static struct lock_class_key irq_nested_request_class;
374
375/*
376 * irq_map_generic_chip - Map a generic chip for an irq domain
377 */
378int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
379 irq_hw_number_t hw_irq)
380{
381 struct irq_data *data = irq_domain_get_irq_data(d, virq);
382 struct irq_domain_chip_generic *dgc = d->gc;
383 struct irq_chip_generic *gc;
384 struct irq_chip_type *ct;
385 struct irq_chip *chip;
386 unsigned long flags;
387 int idx;
388
389 gc = __irq_get_domain_generic_chip(d, hw_irq);
390 if (IS_ERR(gc))
391 return PTR_ERR(gc);
392
393 idx = hw_irq % dgc->irqs_per_chip;
394
395 if (test_bit(idx, &gc->unused))
396 return -ENOTSUPP;
397
398 if (test_bit(idx, &gc->installed))
399 return -EBUSY;
400
401 ct = gc->chip_types;
402 chip = &ct->chip;
403
404 /* We only init the cache for the first mapping of a generic chip */
405 if (!gc->installed) {
406 raw_spin_lock_irqsave(&gc->lock, flags);
407 irq_gc_init_mask_cache(gc, dgc->gc_flags);
408 raw_spin_unlock_irqrestore(&gc->lock, flags);
409 }
410
411 /* Mark the interrupt as installed */
412 set_bit(idx, &gc->installed);
413
414 if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
415 irq_set_lockdep_class(virq, &irq_nested_lock_class,
416 &irq_nested_request_class);
417
418 if (chip->irq_calc_mask)
419 chip->irq_calc_mask(data);
420 else
421 data->mask = 1 << idx;
422
423 irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
424 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
425 return 0;
426}
427
428static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
429{
430 struct irq_data *data = irq_domain_get_irq_data(d, virq);
431 struct irq_domain_chip_generic *dgc = d->gc;
432 unsigned int hw_irq = data->hwirq;
433 struct irq_chip_generic *gc;
434 int irq_idx;
435
436 gc = irq_get_domain_generic_chip(d, hw_irq);
437 if (!gc)
438 return;
439
440 irq_idx = hw_irq % dgc->irqs_per_chip;
441
442 clear_bit(irq_idx, &gc->installed);
443 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
444 NULL);
445
446}
447
448struct irq_domain_ops irq_generic_chip_ops = {
449 .map = irq_map_generic_chip,
450 .unmap = irq_unmap_generic_chip,
451 .xlate = irq_domain_xlate_onetwocell,
452};
453EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
454
455/**
456 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
457 * @gc: Generic irq chip holding all data
458 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
459 * @flags: Flags for initialization
460 * @clr: IRQ_* bits to clear
461 * @set: IRQ_* bits to set
462 *
463 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
464 * initializes all interrupts to the primary irq_chip_type and its
465 * associated handler.
466 */
467void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
468 enum irq_gc_flags flags, unsigned int clr,
469 unsigned int set)
470{
471 struct irq_chip_type *ct = gc->chip_types;
472 struct irq_chip *chip = &ct->chip;
473 unsigned int i;
474
475 raw_spin_lock(&gc_lock);
476 list_add_tail(&gc->list, &gc_list);
477 raw_spin_unlock(&gc_lock);
478
479 irq_gc_init_mask_cache(gc, flags);
480
481 for (i = gc->irq_base; msk; msk >>= 1, i++) {
482 if (!(msk & 0x01))
483 continue;
484
485 if (flags & IRQ_GC_INIT_NESTED_LOCK)
486 irq_set_lockdep_class(i, &irq_nested_lock_class,
487 &irq_nested_request_class);
488
489 if (!(flags & IRQ_GC_NO_MASK)) {
490 struct irq_data *d = irq_get_irq_data(i);
491
492 if (chip->irq_calc_mask)
493 chip->irq_calc_mask(d);
494 else
495 d->mask = 1 << (i - gc->irq_base);
496 }
497 irq_set_chip_and_handler(i, chip, ct->handler);
498 irq_set_chip_data(i, gc);
499 irq_modify_status(i, clr, set);
500 }
501 gc->irq_cnt = i - gc->irq_base;
502}
503EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
504
505/**
506 * irq_setup_alt_chip - Switch to alternative chip
507 * @d: irq_data for this interrupt
508 * @type: Flow type to be initialized
509 *
510 * Only to be called from chip->irq_set_type() callbacks.
511 */
512int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
513{
514 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
515 struct irq_chip_type *ct = gc->chip_types;
516 unsigned int i;
517
518 for (i = 0; i < gc->num_ct; i++, ct++) {
519 if (ct->type & type) {
520 d->chip = &ct->chip;
521 irq_data_to_desc(d)->handle_irq = ct->handler;
522 return 0;
523 }
524 }
525 return -EINVAL;
526}
527EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
528
529/**
530 * irq_remove_generic_chip - Remove a chip
531 * @gc: Generic irq chip holding all data
532 * @msk: Bitmask holding the irqs to initialize relative to gc->irq_base
533 * @clr: IRQ_* bits to clear
534 * @set: IRQ_* bits to set
535 *
536 * Remove up to 32 interrupts starting from gc->irq_base.
537 */
538void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
539 unsigned int clr, unsigned int set)
540{
541 unsigned int i = gc->irq_base;
542
543 raw_spin_lock(&gc_lock);
544 list_del(&gc->list);
545 raw_spin_unlock(&gc_lock);
546
547 for (; msk; msk >>= 1, i++) {
548 if (!(msk & 0x01))
549 continue;
550
551 /* Remove handler first. That will mask the irq line */
552 irq_set_handler(i, NULL);
553 irq_set_chip(i, &no_irq_chip);
554 irq_set_chip_data(i, NULL);
555 irq_modify_status(i, clr, set);
556 }
557}
558EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
559
560static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
561{
562 unsigned int virq;
563
564 if (!gc->domain)
565 return irq_get_irq_data(gc->irq_base);
566
567 /*
568 * We don't know which of the irqs has been actually
569 * installed. Use the first one.
570 */
571 if (!gc->installed)
572 return NULL;
573
574 virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
575 return virq ? irq_get_irq_data(virq) : NULL;
576}
577
578#ifdef CONFIG_PM
579static int irq_gc_suspend(void)
580{
581 struct irq_chip_generic *gc;
582
583 list_for_each_entry(gc, &gc_list, list) {
584 struct irq_chip_type *ct = gc->chip_types;
585
586 if (ct->chip.irq_suspend) {
587 struct irq_data *data = irq_gc_get_irq_data(gc);
588
589 if (data)
590 ct->chip.irq_suspend(data);
591 }
592
593 if (gc->suspend)
594 gc->suspend(gc);
595 }
596 return 0;
597}
598
599static void irq_gc_resume(void)
600{
601 struct irq_chip_generic *gc;
602
603 list_for_each_entry(gc, &gc_list, list) {
604 struct irq_chip_type *ct = gc->chip_types;
605
606 if (gc->resume)
607 gc->resume(gc);
608
609 if (ct->chip.irq_resume) {
610 struct irq_data *data = irq_gc_get_irq_data(gc);
611
612 if (data)
613 ct->chip.irq_resume(data);
614 }
615 }
616}
617#else
618#define irq_gc_suspend NULL
619#define irq_gc_resume NULL
620#endif
621
622static void irq_gc_shutdown(void)
623{
624 struct irq_chip_generic *gc;
625
626 list_for_each_entry(gc, &gc_list, list) {
627 struct irq_chip_type *ct = gc->chip_types;
628
629 if (ct->chip.irq_pm_shutdown) {
630 struct irq_data *data = irq_gc_get_irq_data(gc);
631
632 if (data)
633 ct->chip.irq_pm_shutdown(data);
634 }
635 }
636}
637
638static struct syscore_ops irq_gc_syscore_ops = {
639 .suspend = irq_gc_suspend,
640 .resume = irq_gc_resume,
641 .shutdown = irq_gc_shutdown,
642};
643
644static int __init irq_gc_init_ops(void)
645{
646 register_syscore_ops(&irq_gc_syscore_ops);
647 return 0;
648}
649device_initcall(irq_gc_init_ops);