Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Library implementing the most common irq chip callback functions
  4 *
  5 * Copyright (C) 2011, Thomas Gleixner
  6 */
  7#include <linux/io.h>
  8#include <linux/irq.h>
  9#include <linux/slab.h>
 10#include <linux/export.h>
 11#include <linux/irqdomain.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/syscore_ops.h>
 15
 16#include "internals.h"
 17
 18static LIST_HEAD(gc_list);
 19static DEFINE_RAW_SPINLOCK(gc_lock);
 20
 21/**
 22 * irq_gc_noop - NOOP function
 23 * @d: irq_data
 24 */
 25void irq_gc_noop(struct irq_data *d)
 26{
 27}
 
 28
 29/**
 30 * irq_gc_mask_disable_reg - Mask chip via disable register
 31 * @d: irq_data
 32 *
 33 * Chip has separate enable/disable registers instead of a single mask
 34 * register.
 35 */
 36void irq_gc_mask_disable_reg(struct irq_data *d)
 37{
 38	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 39	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 40	u32 mask = d->mask;
 41
 42	irq_gc_lock(gc);
 43	irq_reg_writel(gc, mask, ct->regs.disable);
 44	*ct->mask_cache &= ~mask;
 45	irq_gc_unlock(gc);
 46}
 
 47
 48/**
 49 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
 50 * @d: irq_data
 51 *
 52 * Chip has a single mask register. Values of this register are cached
 53 * and protected by gc->lock
 54 */
 55void irq_gc_mask_set_bit(struct irq_data *d)
 56{
 57	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 58	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 59	u32 mask = d->mask;
 60
 61	irq_gc_lock(gc);
 62	*ct->mask_cache |= mask;
 63	irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
 64	irq_gc_unlock(gc);
 65}
 66EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
 67
 68/**
 69 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
 70 * @d: irq_data
 71 *
 72 * Chip has a single mask register. Values of this register are cached
 73 * and protected by gc->lock
 74 */
 75void irq_gc_mask_clr_bit(struct irq_data *d)
 76{
 77	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 78	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 79	u32 mask = d->mask;
 80
 81	irq_gc_lock(gc);
 82	*ct->mask_cache &= ~mask;
 83	irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
 84	irq_gc_unlock(gc);
 85}
 86EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
 87
 88/**
 89 * irq_gc_unmask_enable_reg - Unmask chip via enable register
 90 * @d: irq_data
 91 *
 92 * Chip has separate enable/disable registers instead of a single mask
 93 * register.
 94 */
 95void irq_gc_unmask_enable_reg(struct irq_data *d)
 96{
 97	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 98	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 99	u32 mask = d->mask;
100
101	irq_gc_lock(gc);
102	irq_reg_writel(gc, mask, ct->regs.enable);
103	*ct->mask_cache |= mask;
104	irq_gc_unlock(gc);
105}
 
106
107/**
108 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
109 * @d: irq_data
110 */
111void irq_gc_ack_set_bit(struct irq_data *d)
112{
113	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
114	struct irq_chip_type *ct = irq_data_get_chip_type(d);
115	u32 mask = d->mask;
116
117	irq_gc_lock(gc);
118	irq_reg_writel(gc, mask, ct->regs.ack);
119	irq_gc_unlock(gc);
120}
121EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
122
123/**
124 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
125 * @d: irq_data
126 */
127void irq_gc_ack_clr_bit(struct irq_data *d)
128{
129	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
130	struct irq_chip_type *ct = irq_data_get_chip_type(d);
131	u32 mask = ~d->mask;
132
133	irq_gc_lock(gc);
134	irq_reg_writel(gc, mask, ct->regs.ack);
135	irq_gc_unlock(gc);
136}
137
138/**
139 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
140 * @d: irq_data
141 *
142 * This generic implementation of the irq_mask_ack method is for chips
143 * with separate enable/disable registers instead of a single mask
144 * register and where a pending interrupt is acknowledged by setting a
145 * bit.
146 *
147 * Note: This is the only permutation currently used.  Similar generic
148 * functions should be added here if other permutations are required.
149 */
150void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
151{
152	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
153	struct irq_chip_type *ct = irq_data_get_chip_type(d);
154	u32 mask = d->mask;
155
156	irq_gc_lock(gc);
157	irq_reg_writel(gc, mask, ct->regs.disable);
158	*ct->mask_cache &= ~mask;
159	irq_reg_writel(gc, mask, ct->regs.ack);
160	irq_gc_unlock(gc);
161}
162
163/**
164 * irq_gc_eoi - EOI interrupt
165 * @d: irq_data
166 */
167void irq_gc_eoi(struct irq_data *d)
168{
169	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
170	struct irq_chip_type *ct = irq_data_get_chip_type(d);
171	u32 mask = d->mask;
172
173	irq_gc_lock(gc);
174	irq_reg_writel(gc, mask, ct->regs.eoi);
175	irq_gc_unlock(gc);
176}
177
178/**
179 * irq_gc_set_wake - Set/clr wake bit for an interrupt
180 * @d:  irq_data
181 * @on: Indicates whether the wake bit should be set or cleared
182 *
183 * For chips where the wake from suspend functionality is not
184 * configured in a separate register and the wakeup active state is
185 * just stored in a bitmask.
186 */
187int irq_gc_set_wake(struct irq_data *d, unsigned int on)
188{
189	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
190	u32 mask = d->mask;
191
192	if (!(mask & gc->wake_enabled))
193		return -EINVAL;
194
195	irq_gc_lock(gc);
196	if (on)
197		gc->wake_active |= mask;
198	else
199		gc->wake_active &= ~mask;
200	irq_gc_unlock(gc);
201	return 0;
202}
 
203
204static u32 irq_readl_be(void __iomem *addr)
205{
206	return ioread32be(addr);
207}
208
209static void irq_writel_be(u32 val, void __iomem *addr)
210{
211	iowrite32be(val, addr);
212}
213
214void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
215			   int num_ct, unsigned int irq_base,
216			   void __iomem *reg_base, irq_flow_handler_t handler)
217{
 
 
 
218	raw_spin_lock_init(&gc->lock);
219	gc->num_ct = num_ct;
220	gc->irq_base = irq_base;
221	gc->reg_base = reg_base;
222	gc->chip_types->chip.name = name;
 
223	gc->chip_types->handler = handler;
224}
225
226/**
227 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
228 * @name:	Name of the irq chip
229 * @num_ct:	Number of irq_chip_type instances associated with this
230 * @irq_base:	Interrupt base nr for this chip
231 * @reg_base:	Register base address (virtual)
232 * @handler:	Default flow handler associated with this chip
233 *
234 * Returns an initialized irq_chip_generic structure. The chip defaults
235 * to the primary (index 0) irq_chip_type and @handler
236 */
237struct irq_chip_generic *
238irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
239		       void __iomem *reg_base, irq_flow_handler_t handler)
240{
241	struct irq_chip_generic *gc;
242	unsigned long sz = sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
243
244	gc = kzalloc(sz, GFP_KERNEL);
245	if (gc) {
246		irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
247				      handler);
248	}
249	return gc;
250}
251EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
252
253static void
254irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
255{
256	struct irq_chip_type *ct = gc->chip_types;
257	u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
258	int i;
259
260	for (i = 0; i < gc->num_ct; i++) {
261		if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
262			mskptr = &ct[i].mask_cache_priv;
263			mskreg = ct[i].regs.mask;
264		}
265		ct[i].mask_cache = mskptr;
266		if (flags & IRQ_GC_INIT_MASK_CACHE)
267			*mskptr = irq_reg_readl(gc, mskreg);
268	}
269}
270
271/**
272 * __irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
273 * @d:			irq domain for which to allocate chips
274 * @irqs_per_chip:	Number of interrupts each chip handles (max 32)
275 * @num_ct:		Number of irq_chip_type instances associated with this
276 * @name:		Name of the irq chip
277 * @handler:		Default flow handler associated with these chips
278 * @clr:		IRQ_* bits to clear in the mapping function
279 * @set:		IRQ_* bits to set in the mapping function
280 * @gcflags:		Generic chip specific setup flags
281 */
282int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
283				     int num_ct, const char *name,
284				     irq_flow_handler_t handler,
285				     unsigned int clr, unsigned int set,
286				     enum irq_gc_flags gcflags)
287{
288	struct irq_domain_chip_generic *dgc;
289	struct irq_chip_generic *gc;
290	int numchips, sz, i;
291	unsigned long flags;
 
 
 
 
292	void *tmp;
293
294	if (d->gc)
295		return -EBUSY;
296
297	numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
298	if (!numchips)
299		return -EINVAL;
300
301	/* Allocate a pointer, generic chip and chiptypes for each chip */
302	sz = sizeof(*dgc) + numchips * sizeof(gc);
303	sz += numchips * (sizeof(*gc) + num_ct * sizeof(struct irq_chip_type));
 
304
305	tmp = dgc = kzalloc(sz, GFP_KERNEL);
306	if (!dgc)
307		return -ENOMEM;
308	dgc->irqs_per_chip = irqs_per_chip;
309	dgc->num_chips = numchips;
310	dgc->irq_flags_to_set = set;
311	dgc->irq_flags_to_clear = clr;
312	dgc->gc_flags = gcflags;
313	d->gc = dgc;
314
315	/* Calc pointer to the first generic chip */
316	tmp += sizeof(*dgc) + numchips * sizeof(gc);
317	for (i = 0; i < numchips; i++) {
318		/* Store the pointer to the generic chip */
319		dgc->gc[i] = gc = tmp;
320		irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
321				      NULL, handler);
322
323		gc->domain = d;
324		if (gcflags & IRQ_GC_BE_IO) {
325			gc->reg_readl = &irq_readl_be;
326			gc->reg_writel = &irq_writel_be;
327		}
328
329		raw_spin_lock_irqsave(&gc_lock, flags);
330		list_add_tail(&gc->list, &gc_list);
331		raw_spin_unlock_irqrestore(&gc_lock, flags);
332		/* Calc pointer to the next generic chip */
333		tmp += sizeof(*gc) + num_ct * sizeof(struct irq_chip_type);
334	}
335	return 0;
336}
337EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
338
339static struct irq_chip_generic *
340__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
341{
342	struct irq_domain_chip_generic *dgc = d->gc;
343	int idx;
344
345	if (!dgc)
346		return ERR_PTR(-ENODEV);
347	idx = hw_irq / dgc->irqs_per_chip;
348	if (idx >= dgc->num_chips)
349		return ERR_PTR(-EINVAL);
350	return dgc->gc[idx];
351}
352
353/**
354 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
355 * @d:			irq domain pointer
356 * @hw_irq:		Hardware interrupt number
357 */
358struct irq_chip_generic *
359irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
360{
361	struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
362
363	return !IS_ERR(gc) ? gc : NULL;
364}
365EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
366
367/*
368 * Separate lockdep classes for interrupt chip which can nest irq_desc
369 * lock and request mutex.
370 */
371static struct lock_class_key irq_nested_lock_class;
372static struct lock_class_key irq_nested_request_class;
373
374/*
375 * irq_map_generic_chip - Map a generic chip for an irq domain
376 */
377int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
378			 irq_hw_number_t hw_irq)
379{
380	struct irq_data *data = irq_domain_get_irq_data(d, virq);
381	struct irq_domain_chip_generic *dgc = d->gc;
382	struct irq_chip_generic *gc;
383	struct irq_chip_type *ct;
384	struct irq_chip *chip;
385	unsigned long flags;
386	int idx;
387
388	gc = __irq_get_domain_generic_chip(d, hw_irq);
389	if (IS_ERR(gc))
390		return PTR_ERR(gc);
391
392	idx = hw_irq % dgc->irqs_per_chip;
393
394	if (test_bit(idx, &gc->unused))
395		return -ENOTSUPP;
396
397	if (test_bit(idx, &gc->installed))
398		return -EBUSY;
399
400	ct = gc->chip_types;
401	chip = &ct->chip;
402
403	/* We only init the cache for the first mapping of a generic chip */
404	if (!gc->installed) {
405		raw_spin_lock_irqsave(&gc->lock, flags);
406		irq_gc_init_mask_cache(gc, dgc->gc_flags);
407		raw_spin_unlock_irqrestore(&gc->lock, flags);
408	}
409
410	/* Mark the interrupt as installed */
411	set_bit(idx, &gc->installed);
412
413	if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
414		irq_set_lockdep_class(virq, &irq_nested_lock_class,
415				      &irq_nested_request_class);
416
417	if (chip->irq_calc_mask)
418		chip->irq_calc_mask(data);
419	else
420		data->mask = 1 << idx;
421
422	irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
423	irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
424	return 0;
425}
426
427static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
428{
429	struct irq_data *data = irq_domain_get_irq_data(d, virq);
430	struct irq_domain_chip_generic *dgc = d->gc;
431	unsigned int hw_irq = data->hwirq;
432	struct irq_chip_generic *gc;
433	int irq_idx;
434
435	gc = irq_get_domain_generic_chip(d, hw_irq);
436	if (!gc)
437		return;
438
439	irq_idx = hw_irq % dgc->irqs_per_chip;
440
441	clear_bit(irq_idx, &gc->installed);
442	irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
443			    NULL);
444
445}
446
447struct irq_domain_ops irq_generic_chip_ops = {
448	.map	= irq_map_generic_chip,
449	.unmap  = irq_unmap_generic_chip,
450	.xlate	= irq_domain_xlate_onetwocell,
451};
452EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
453
454/**
455 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
456 * @gc:		Generic irq chip holding all data
457 * @msk:	Bitmask holding the irqs to initialize relative to gc->irq_base
458 * @flags:	Flags for initialization
459 * @clr:	IRQ_* bits to clear
460 * @set:	IRQ_* bits to set
461 *
462 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
463 * initializes all interrupts to the primary irq_chip_type and its
464 * associated handler.
465 */
466void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
467			    enum irq_gc_flags flags, unsigned int clr,
468			    unsigned int set)
469{
470	struct irq_chip_type *ct = gc->chip_types;
471	struct irq_chip *chip = &ct->chip;
472	unsigned int i;
473
474	raw_spin_lock(&gc_lock);
475	list_add_tail(&gc->list, &gc_list);
476	raw_spin_unlock(&gc_lock);
477
478	irq_gc_init_mask_cache(gc, flags);
479
480	for (i = gc->irq_base; msk; msk >>= 1, i++) {
481		if (!(msk & 0x01))
482			continue;
483
484		if (flags & IRQ_GC_INIT_NESTED_LOCK)
485			irq_set_lockdep_class(i, &irq_nested_lock_class,
486					      &irq_nested_request_class);
487
488		if (!(flags & IRQ_GC_NO_MASK)) {
489			struct irq_data *d = irq_get_irq_data(i);
490
491			if (chip->irq_calc_mask)
492				chip->irq_calc_mask(d);
493			else
494				d->mask = 1 << (i - gc->irq_base);
495		}
496		irq_set_chip_and_handler(i, chip, ct->handler);
497		irq_set_chip_data(i, gc);
498		irq_modify_status(i, clr, set);
499	}
500	gc->irq_cnt = i - gc->irq_base;
501}
502EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
503
504/**
505 * irq_setup_alt_chip - Switch to alternative chip
506 * @d:		irq_data for this interrupt
507 * @type:	Flow type to be initialized
508 *
509 * Only to be called from chip->irq_set_type() callbacks.
510 */
511int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
512{
513	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
514	struct irq_chip_type *ct = gc->chip_types;
515	unsigned int i;
516
517	for (i = 0; i < gc->num_ct; i++, ct++) {
518		if (ct->type & type) {
519			d->chip = &ct->chip;
520			irq_data_to_desc(d)->handle_irq = ct->handler;
521			return 0;
522		}
523	}
524	return -EINVAL;
525}
526EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
527
528/**
529 * irq_remove_generic_chip - Remove a chip
530 * @gc:		Generic irq chip holding all data
531 * @msk:	Bitmask holding the irqs to initialize relative to gc->irq_base
532 * @clr:	IRQ_* bits to clear
533 * @set:	IRQ_* bits to set
534 *
535 * Remove up to 32 interrupts starting from gc->irq_base.
536 */
537void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
538			     unsigned int clr, unsigned int set)
539{
540	unsigned int i = gc->irq_base;
541
542	raw_spin_lock(&gc_lock);
543	list_del(&gc->list);
544	raw_spin_unlock(&gc_lock);
545
546	for (; msk; msk >>= 1, i++) {
547		if (!(msk & 0x01))
548			continue;
549
 
 
 
 
 
 
 
 
 
 
 
 
 
550		/* Remove handler first. That will mask the irq line */
551		irq_set_handler(i, NULL);
552		irq_set_chip(i, &no_irq_chip);
553		irq_set_chip_data(i, NULL);
554		irq_modify_status(i, clr, set);
555	}
556}
557EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
558
559static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
560{
561	unsigned int virq;
562
563	if (!gc->domain)
564		return irq_get_irq_data(gc->irq_base);
565
566	/*
567	 * We don't know which of the irqs has been actually
568	 * installed. Use the first one.
569	 */
570	if (!gc->installed)
571		return NULL;
572
573	virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
574	return virq ? irq_get_irq_data(virq) : NULL;
575}
576
577#ifdef CONFIG_PM
578static int irq_gc_suspend(void)
579{
580	struct irq_chip_generic *gc;
581
582	list_for_each_entry(gc, &gc_list, list) {
583		struct irq_chip_type *ct = gc->chip_types;
584
585		if (ct->chip.irq_suspend) {
586			struct irq_data *data = irq_gc_get_irq_data(gc);
587
588			if (data)
589				ct->chip.irq_suspend(data);
590		}
591
592		if (gc->suspend)
593			gc->suspend(gc);
594	}
595	return 0;
596}
597
598static void irq_gc_resume(void)
599{
600	struct irq_chip_generic *gc;
601
602	list_for_each_entry(gc, &gc_list, list) {
603		struct irq_chip_type *ct = gc->chip_types;
604
605		if (gc->resume)
606			gc->resume(gc);
607
608		if (ct->chip.irq_resume) {
609			struct irq_data *data = irq_gc_get_irq_data(gc);
610
611			if (data)
612				ct->chip.irq_resume(data);
613		}
614	}
615}
616#else
617#define irq_gc_suspend NULL
618#define irq_gc_resume NULL
619#endif
620
621static void irq_gc_shutdown(void)
622{
623	struct irq_chip_generic *gc;
624
625	list_for_each_entry(gc, &gc_list, list) {
626		struct irq_chip_type *ct = gc->chip_types;
627
628		if (ct->chip.irq_pm_shutdown) {
629			struct irq_data *data = irq_gc_get_irq_data(gc);
630
631			if (data)
632				ct->chip.irq_pm_shutdown(data);
633		}
634	}
635}
636
637static struct syscore_ops irq_gc_syscore_ops = {
638	.suspend = irq_gc_suspend,
639	.resume = irq_gc_resume,
640	.shutdown = irq_gc_shutdown,
641};
642
643static int __init irq_gc_init_ops(void)
644{
645	register_syscore_ops(&irq_gc_syscore_ops);
646	return 0;
647}
648device_initcall(irq_gc_init_ops);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Library implementing the most common irq chip callback functions
  4 *
  5 * Copyright (C) 2011, Thomas Gleixner
  6 */
  7#include <linux/io.h>
  8#include <linux/irq.h>
  9#include <linux/slab.h>
 10#include <linux/export.h>
 11#include <linux/irqdomain.h>
 12#include <linux/interrupt.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/syscore_ops.h>
 15
 16#include "internals.h"
 17
 18static LIST_HEAD(gc_list);
 19static DEFINE_RAW_SPINLOCK(gc_lock);
 20
 21/**
 22 * irq_gc_noop - NOOP function
 23 * @d: irq_data
 24 */
 25void irq_gc_noop(struct irq_data *d)
 26{
 27}
 28EXPORT_SYMBOL_GPL(irq_gc_noop);
 29
 30/**
 31 * irq_gc_mask_disable_reg - Mask chip via disable register
 32 * @d: irq_data
 33 *
 34 * Chip has separate enable/disable registers instead of a single mask
 35 * register.
 36 */
 37void irq_gc_mask_disable_reg(struct irq_data *d)
 38{
 39	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 40	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 41	u32 mask = d->mask;
 42
 43	irq_gc_lock(gc);
 44	irq_reg_writel(gc, mask, ct->regs.disable);
 45	*ct->mask_cache &= ~mask;
 46	irq_gc_unlock(gc);
 47}
 48EXPORT_SYMBOL_GPL(irq_gc_mask_disable_reg);
 49
 50/**
 51 * irq_gc_mask_set_bit - Mask chip via setting bit in mask register
 52 * @d: irq_data
 53 *
 54 * Chip has a single mask register. Values of this register are cached
 55 * and protected by gc->lock
 56 */
 57void irq_gc_mask_set_bit(struct irq_data *d)
 58{
 59	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 60	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 61	u32 mask = d->mask;
 62
 63	irq_gc_lock(gc);
 64	*ct->mask_cache |= mask;
 65	irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
 66	irq_gc_unlock(gc);
 67}
 68EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
 69
 70/**
 71 * irq_gc_mask_clr_bit - Mask chip via clearing bit in mask register
 72 * @d: irq_data
 73 *
 74 * Chip has a single mask register. Values of this register are cached
 75 * and protected by gc->lock
 76 */
 77void irq_gc_mask_clr_bit(struct irq_data *d)
 78{
 79	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
 80	struct irq_chip_type *ct = irq_data_get_chip_type(d);
 81	u32 mask = d->mask;
 82
 83	irq_gc_lock(gc);
 84	*ct->mask_cache &= ~mask;
 85	irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
 86	irq_gc_unlock(gc);
 87}
 88EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
 89
 90/**
 91 * irq_gc_unmask_enable_reg - Unmask chip via enable register
 92 * @d: irq_data
 93 *
 94 * Chip has separate enable/disable registers instead of a single mask
 95 * register.
 96 */
 97void irq_gc_unmask_enable_reg(struct irq_data *d)
 98{
 99	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
100	struct irq_chip_type *ct = irq_data_get_chip_type(d);
101	u32 mask = d->mask;
102
103	irq_gc_lock(gc);
104	irq_reg_writel(gc, mask, ct->regs.enable);
105	*ct->mask_cache |= mask;
106	irq_gc_unlock(gc);
107}
108EXPORT_SYMBOL_GPL(irq_gc_unmask_enable_reg);
109
110/**
111 * irq_gc_ack_set_bit - Ack pending interrupt via setting bit
112 * @d: irq_data
113 */
114void irq_gc_ack_set_bit(struct irq_data *d)
115{
116	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
117	struct irq_chip_type *ct = irq_data_get_chip_type(d);
118	u32 mask = d->mask;
119
120	irq_gc_lock(gc);
121	irq_reg_writel(gc, mask, ct->regs.ack);
122	irq_gc_unlock(gc);
123}
124EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
125
126/**
127 * irq_gc_ack_clr_bit - Ack pending interrupt via clearing bit
128 * @d: irq_data
129 */
130void irq_gc_ack_clr_bit(struct irq_data *d)
131{
132	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
133	struct irq_chip_type *ct = irq_data_get_chip_type(d);
134	u32 mask = ~d->mask;
135
136	irq_gc_lock(gc);
137	irq_reg_writel(gc, mask, ct->regs.ack);
138	irq_gc_unlock(gc);
139}
140
141/**
142 * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt
143 * @d: irq_data
144 *
145 * This generic implementation of the irq_mask_ack method is for chips
146 * with separate enable/disable registers instead of a single mask
147 * register and where a pending interrupt is acknowledged by setting a
148 * bit.
149 *
150 * Note: This is the only permutation currently used.  Similar generic
151 * functions should be added here if other permutations are required.
152 */
153void irq_gc_mask_disable_and_ack_set(struct irq_data *d)
154{
155	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
156	struct irq_chip_type *ct = irq_data_get_chip_type(d);
157	u32 mask = d->mask;
158
159	irq_gc_lock(gc);
160	irq_reg_writel(gc, mask, ct->regs.disable);
161	*ct->mask_cache &= ~mask;
162	irq_reg_writel(gc, mask, ct->regs.ack);
163	irq_gc_unlock(gc);
164}
165
166/**
167 * irq_gc_eoi - EOI interrupt
168 * @d: irq_data
169 */
170void irq_gc_eoi(struct irq_data *d)
171{
172	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
173	struct irq_chip_type *ct = irq_data_get_chip_type(d);
174	u32 mask = d->mask;
175
176	irq_gc_lock(gc);
177	irq_reg_writel(gc, mask, ct->regs.eoi);
178	irq_gc_unlock(gc);
179}
180
181/**
182 * irq_gc_set_wake - Set/clr wake bit for an interrupt
183 * @d:  irq_data
184 * @on: Indicates whether the wake bit should be set or cleared
185 *
186 * For chips where the wake from suspend functionality is not
187 * configured in a separate register and the wakeup active state is
188 * just stored in a bitmask.
189 */
190int irq_gc_set_wake(struct irq_data *d, unsigned int on)
191{
192	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
193	u32 mask = d->mask;
194
195	if (!(mask & gc->wake_enabled))
196		return -EINVAL;
197
198	irq_gc_lock(gc);
199	if (on)
200		gc->wake_active |= mask;
201	else
202		gc->wake_active &= ~mask;
203	irq_gc_unlock(gc);
204	return 0;
205}
206EXPORT_SYMBOL_GPL(irq_gc_set_wake);
207
208static u32 irq_readl_be(void __iomem *addr)
209{
210	return ioread32be(addr);
211}
212
213static void irq_writel_be(u32 val, void __iomem *addr)
214{
215	iowrite32be(val, addr);
216}
217
218void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
219			   int num_ct, unsigned int irq_base,
220			   void __iomem *reg_base, irq_flow_handler_t handler)
221{
222	struct irq_chip_type *ct = gc->chip_types;
223	int i;
224
225	raw_spin_lock_init(&gc->lock);
226	gc->num_ct = num_ct;
227	gc->irq_base = irq_base;
228	gc->reg_base = reg_base;
229	for (i = 0; i < num_ct; i++)
230		ct[i].chip.name = name;
231	gc->chip_types->handler = handler;
232}
233
234/**
235 * irq_alloc_generic_chip - Allocate a generic chip and initialize it
236 * @name:	Name of the irq chip
237 * @num_ct:	Number of irq_chip_type instances associated with this
238 * @irq_base:	Interrupt base nr for this chip
239 * @reg_base:	Register base address (virtual)
240 * @handler:	Default flow handler associated with this chip
241 *
242 * Returns an initialized irq_chip_generic structure. The chip defaults
243 * to the primary (index 0) irq_chip_type and @handler
244 */
245struct irq_chip_generic *
246irq_alloc_generic_chip(const char *name, int num_ct, unsigned int irq_base,
247		       void __iomem *reg_base, irq_flow_handler_t handler)
248{
249	struct irq_chip_generic *gc;
 
250
251	gc = kzalloc(struct_size(gc, chip_types, num_ct), GFP_KERNEL);
252	if (gc) {
253		irq_init_generic_chip(gc, name, num_ct, irq_base, reg_base,
254				      handler);
255	}
256	return gc;
257}
258EXPORT_SYMBOL_GPL(irq_alloc_generic_chip);
259
260static void
261irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
262{
263	struct irq_chip_type *ct = gc->chip_types;
264	u32 *mskptr = &gc->mask_cache, mskreg = ct->regs.mask;
265	int i;
266
267	for (i = 0; i < gc->num_ct; i++) {
268		if (flags & IRQ_GC_MASK_CACHE_PER_TYPE) {
269			mskptr = &ct[i].mask_cache_priv;
270			mskreg = ct[i].regs.mask;
271		}
272		ct[i].mask_cache = mskptr;
273		if (flags & IRQ_GC_INIT_MASK_CACHE)
274			*mskptr = irq_reg_readl(gc, mskreg);
275	}
276}
277
278/**
279 * __irq_alloc_domain_generic_chips - Allocate generic chips for an irq domain
280 * @d:			irq domain for which to allocate chips
281 * @irqs_per_chip:	Number of interrupts each chip handles (max 32)
282 * @num_ct:		Number of irq_chip_type instances associated with this
283 * @name:		Name of the irq chip
284 * @handler:		Default flow handler associated with these chips
285 * @clr:		IRQ_* bits to clear in the mapping function
286 * @set:		IRQ_* bits to set in the mapping function
287 * @gcflags:		Generic chip specific setup flags
288 */
289int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
290				     int num_ct, const char *name,
291				     irq_flow_handler_t handler,
292				     unsigned int clr, unsigned int set,
293				     enum irq_gc_flags gcflags)
294{
295	struct irq_domain_chip_generic *dgc;
296	struct irq_chip_generic *gc;
 
297	unsigned long flags;
298	int numchips, i;
299	size_t dgc_sz;
300	size_t gc_sz;
301	size_t sz;
302	void *tmp;
303
304	if (d->gc)
305		return -EBUSY;
306
307	numchips = DIV_ROUND_UP(d->revmap_size, irqs_per_chip);
308	if (!numchips)
309		return -EINVAL;
310
311	/* Allocate a pointer, generic chip and chiptypes for each chip */
312	gc_sz = struct_size(gc, chip_types, num_ct);
313	dgc_sz = struct_size(dgc, gc, numchips);
314	sz = dgc_sz + numchips * gc_sz;
315
316	tmp = dgc = kzalloc(sz, GFP_KERNEL);
317	if (!dgc)
318		return -ENOMEM;
319	dgc->irqs_per_chip = irqs_per_chip;
320	dgc->num_chips = numchips;
321	dgc->irq_flags_to_set = set;
322	dgc->irq_flags_to_clear = clr;
323	dgc->gc_flags = gcflags;
324	d->gc = dgc;
325
326	/* Calc pointer to the first generic chip */
327	tmp += dgc_sz;
328	for (i = 0; i < numchips; i++) {
329		/* Store the pointer to the generic chip */
330		dgc->gc[i] = gc = tmp;
331		irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip,
332				      NULL, handler);
333
334		gc->domain = d;
335		if (gcflags & IRQ_GC_BE_IO) {
336			gc->reg_readl = &irq_readl_be;
337			gc->reg_writel = &irq_writel_be;
338		}
339
340		raw_spin_lock_irqsave(&gc_lock, flags);
341		list_add_tail(&gc->list, &gc_list);
342		raw_spin_unlock_irqrestore(&gc_lock, flags);
343		/* Calc pointer to the next generic chip */
344		tmp += gc_sz;
345	}
346	return 0;
347}
348EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
349
350static struct irq_chip_generic *
351__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
352{
353	struct irq_domain_chip_generic *dgc = d->gc;
354	int idx;
355
356	if (!dgc)
357		return ERR_PTR(-ENODEV);
358	idx = hw_irq / dgc->irqs_per_chip;
359	if (idx >= dgc->num_chips)
360		return ERR_PTR(-EINVAL);
361	return dgc->gc[idx];
362}
363
364/**
365 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
366 * @d:			irq domain pointer
367 * @hw_irq:		Hardware interrupt number
368 */
369struct irq_chip_generic *
370irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
371{
372	struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
373
374	return !IS_ERR(gc) ? gc : NULL;
375}
376EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
377
378/*
379 * Separate lockdep classes for interrupt chip which can nest irq_desc
380 * lock and request mutex.
381 */
382static struct lock_class_key irq_nested_lock_class;
383static struct lock_class_key irq_nested_request_class;
384
385/*
386 * irq_map_generic_chip - Map a generic chip for an irq domain
387 */
388int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
389			 irq_hw_number_t hw_irq)
390{
391	struct irq_data *data = irq_domain_get_irq_data(d, virq);
392	struct irq_domain_chip_generic *dgc = d->gc;
393	struct irq_chip_generic *gc;
394	struct irq_chip_type *ct;
395	struct irq_chip *chip;
396	unsigned long flags;
397	int idx;
398
399	gc = __irq_get_domain_generic_chip(d, hw_irq);
400	if (IS_ERR(gc))
401		return PTR_ERR(gc);
402
403	idx = hw_irq % dgc->irqs_per_chip;
404
405	if (test_bit(idx, &gc->unused))
406		return -ENOTSUPP;
407
408	if (test_bit(idx, &gc->installed))
409		return -EBUSY;
410
411	ct = gc->chip_types;
412	chip = &ct->chip;
413
414	/* We only init the cache for the first mapping of a generic chip */
415	if (!gc->installed) {
416		raw_spin_lock_irqsave(&gc->lock, flags);
417		irq_gc_init_mask_cache(gc, dgc->gc_flags);
418		raw_spin_unlock_irqrestore(&gc->lock, flags);
419	}
420
421	/* Mark the interrupt as installed */
422	set_bit(idx, &gc->installed);
423
424	if (dgc->gc_flags & IRQ_GC_INIT_NESTED_LOCK)
425		irq_set_lockdep_class(virq, &irq_nested_lock_class,
426				      &irq_nested_request_class);
427
428	if (chip->irq_calc_mask)
429		chip->irq_calc_mask(data);
430	else
431		data->mask = 1 << idx;
432
433	irq_domain_set_info(d, virq, hw_irq, chip, gc, ct->handler, NULL, NULL);
434	irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
435	return 0;
436}
437
438void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
439{
440	struct irq_data *data = irq_domain_get_irq_data(d, virq);
441	struct irq_domain_chip_generic *dgc = d->gc;
442	unsigned int hw_irq = data->hwirq;
443	struct irq_chip_generic *gc;
444	int irq_idx;
445
446	gc = irq_get_domain_generic_chip(d, hw_irq);
447	if (!gc)
448		return;
449
450	irq_idx = hw_irq % dgc->irqs_per_chip;
451
452	clear_bit(irq_idx, &gc->installed);
453	irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
454			    NULL);
455
456}
457
458const struct irq_domain_ops irq_generic_chip_ops = {
459	.map	= irq_map_generic_chip,
460	.unmap  = irq_unmap_generic_chip,
461	.xlate	= irq_domain_xlate_onetwocell,
462};
463EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
464
465/**
466 * irq_setup_generic_chip - Setup a range of interrupts with a generic chip
467 * @gc:		Generic irq chip holding all data
468 * @msk:	Bitmask holding the irqs to initialize relative to gc->irq_base
469 * @flags:	Flags for initialization
470 * @clr:	IRQ_* bits to clear
471 * @set:	IRQ_* bits to set
472 *
473 * Set up max. 32 interrupts starting from gc->irq_base. Note, this
474 * initializes all interrupts to the primary irq_chip_type and its
475 * associated handler.
476 */
477void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk,
478			    enum irq_gc_flags flags, unsigned int clr,
479			    unsigned int set)
480{
481	struct irq_chip_type *ct = gc->chip_types;
482	struct irq_chip *chip = &ct->chip;
483	unsigned int i;
484
485	raw_spin_lock(&gc_lock);
486	list_add_tail(&gc->list, &gc_list);
487	raw_spin_unlock(&gc_lock);
488
489	irq_gc_init_mask_cache(gc, flags);
490
491	for (i = gc->irq_base; msk; msk >>= 1, i++) {
492		if (!(msk & 0x01))
493			continue;
494
495		if (flags & IRQ_GC_INIT_NESTED_LOCK)
496			irq_set_lockdep_class(i, &irq_nested_lock_class,
497					      &irq_nested_request_class);
498
499		if (!(flags & IRQ_GC_NO_MASK)) {
500			struct irq_data *d = irq_get_irq_data(i);
501
502			if (chip->irq_calc_mask)
503				chip->irq_calc_mask(d);
504			else
505				d->mask = 1 << (i - gc->irq_base);
506		}
507		irq_set_chip_and_handler(i, chip, ct->handler);
508		irq_set_chip_data(i, gc);
509		irq_modify_status(i, clr, set);
510	}
511	gc->irq_cnt = i - gc->irq_base;
512}
513EXPORT_SYMBOL_GPL(irq_setup_generic_chip);
514
515/**
516 * irq_setup_alt_chip - Switch to alternative chip
517 * @d:		irq_data for this interrupt
518 * @type:	Flow type to be initialized
519 *
520 * Only to be called from chip->irq_set_type() callbacks.
521 */
522int irq_setup_alt_chip(struct irq_data *d, unsigned int type)
523{
524	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
525	struct irq_chip_type *ct = gc->chip_types;
526	unsigned int i;
527
528	for (i = 0; i < gc->num_ct; i++, ct++) {
529		if (ct->type & type) {
530			d->chip = &ct->chip;
531			irq_data_to_desc(d)->handle_irq = ct->handler;
532			return 0;
533		}
534	}
535	return -EINVAL;
536}
537EXPORT_SYMBOL_GPL(irq_setup_alt_chip);
538
539/**
540 * irq_remove_generic_chip - Remove a chip
541 * @gc:		Generic irq chip holding all data
542 * @msk:	Bitmask holding the irqs to initialize relative to gc->irq_base
543 * @clr:	IRQ_* bits to clear
544 * @set:	IRQ_* bits to set
545 *
546 * Remove up to 32 interrupts starting from gc->irq_base.
547 */
548void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
549			     unsigned int clr, unsigned int set)
550{
551	unsigned int i, virq;
552
553	raw_spin_lock(&gc_lock);
554	list_del(&gc->list);
555	raw_spin_unlock(&gc_lock);
556
557	for (i = 0; msk; msk >>= 1, i++) {
558		if (!(msk & 0x01))
559			continue;
560
561		/*
562		 * Interrupt domain based chips store the base hardware
563		 * interrupt number in gc::irq_base. Otherwise gc::irq_base
564		 * contains the base Linux interrupt number.
565		 */
566		if (gc->domain) {
567			virq = irq_find_mapping(gc->domain, gc->irq_base + i);
568			if (!virq)
569				continue;
570		} else {
571			virq = gc->irq_base + i;
572		}
573
574		/* Remove handler first. That will mask the irq line */
575		irq_set_handler(virq, NULL);
576		irq_set_chip(virq, &no_irq_chip);
577		irq_set_chip_data(virq, NULL);
578		irq_modify_status(virq, clr, set);
579	}
580}
581EXPORT_SYMBOL_GPL(irq_remove_generic_chip);
582
583static struct irq_data *irq_gc_get_irq_data(struct irq_chip_generic *gc)
584{
585	unsigned int virq;
586
587	if (!gc->domain)
588		return irq_get_irq_data(gc->irq_base);
589
590	/*
591	 * We don't know which of the irqs has been actually
592	 * installed. Use the first one.
593	 */
594	if (!gc->installed)
595		return NULL;
596
597	virq = irq_find_mapping(gc->domain, gc->irq_base + __ffs(gc->installed));
598	return virq ? irq_get_irq_data(virq) : NULL;
599}
600
601#ifdef CONFIG_PM
602static int irq_gc_suspend(void)
603{
604	struct irq_chip_generic *gc;
605
606	list_for_each_entry(gc, &gc_list, list) {
607		struct irq_chip_type *ct = gc->chip_types;
608
609		if (ct->chip.irq_suspend) {
610			struct irq_data *data = irq_gc_get_irq_data(gc);
611
612			if (data)
613				ct->chip.irq_suspend(data);
614		}
615
616		if (gc->suspend)
617			gc->suspend(gc);
618	}
619	return 0;
620}
621
622static void irq_gc_resume(void)
623{
624	struct irq_chip_generic *gc;
625
626	list_for_each_entry(gc, &gc_list, list) {
627		struct irq_chip_type *ct = gc->chip_types;
628
629		if (gc->resume)
630			gc->resume(gc);
631
632		if (ct->chip.irq_resume) {
633			struct irq_data *data = irq_gc_get_irq_data(gc);
634
635			if (data)
636				ct->chip.irq_resume(data);
637		}
638	}
639}
640#else
641#define irq_gc_suspend NULL
642#define irq_gc_resume NULL
643#endif
644
645static void irq_gc_shutdown(void)
646{
647	struct irq_chip_generic *gc;
648
649	list_for_each_entry(gc, &gc_list, list) {
650		struct irq_chip_type *ct = gc->chip_types;
651
652		if (ct->chip.irq_pm_shutdown) {
653			struct irq_data *data = irq_gc_get_irq_data(gc);
654
655			if (data)
656				ct->chip.irq_pm_shutdown(data);
657		}
658	}
659}
660
661static struct syscore_ops irq_gc_syscore_ops = {
662	.suspend = irq_gc_suspend,
663	.resume = irq_gc_resume,
664	.shutdown = irq_gc_shutdown,
665};
666
667static int __init irq_gc_init_ops(void)
668{
669	register_syscore_ops(&irq_gc_syscore_ops);
670	return 0;
671}
672device_initcall(irq_gc_init_ops);