Linux Audio

Check our new training course

Loading...
v4.6
  1/*
  2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  4 *
  5 * This file contains the interrupt descriptor management code
  6 *
  7 * Detailed information is available in Documentation/DocBook/genericirq
  8 *
  9 */
 10#include <linux/irq.h>
 11#include <linux/slab.h>
 12#include <linux/export.h>
 13#include <linux/interrupt.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/radix-tree.h>
 16#include <linux/bitmap.h>
 17#include <linux/irqdomain.h>
 18
 19#include "internals.h"
 20
 21/*
 22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 23 */
 24static struct lock_class_key irq_desc_lock_class;
 25
 26#if defined(CONFIG_SMP)
 27static int __init irq_affinity_setup(char *str)
 28{
 29	zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
 30	cpulist_parse(str, irq_default_affinity);
 31	/*
 32	 * Set at least the boot cpu. We don't want to end up with
 33	 * bugreports caused by random comandline masks
 34	 */
 35	cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 36	return 1;
 37}
 38__setup("irqaffinity=", irq_affinity_setup);
 39
 40static void __init init_irq_default_affinity(void)
 41{
 42#ifdef CONFIG_CPUMASK_OFFSTACK
 43	if (!irq_default_affinity)
 44		zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
 45#endif
 46	if (cpumask_empty(irq_default_affinity))
 47		cpumask_setall(irq_default_affinity);
 48}
 49#else
 50static void __init init_irq_default_affinity(void)
 51{
 52}
 53#endif
 54
 55#ifdef CONFIG_SMP
 56static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 57{
 58	if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
 59				     gfp, node))
 60		return -ENOMEM;
 61
 62#ifdef CONFIG_GENERIC_PENDING_IRQ
 63	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
 64		free_cpumask_var(desc->irq_common_data.affinity);
 65		return -ENOMEM;
 66	}
 67#endif
 68	return 0;
 69}
 70
 71static void desc_smp_init(struct irq_desc *desc, int node)
 72{
 73	cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity);
 
 74#ifdef CONFIG_GENERIC_PENDING_IRQ
 75	cpumask_clear(desc->pending_mask);
 76#endif
 77#ifdef CONFIG_NUMA
 78	desc->irq_common_data.node = node;
 79#endif
 
 
 80}
 81
 82#else
 83static inline int
 84alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
 85static inline void desc_smp_init(struct irq_desc *desc, int node) { }
 
 86#endif
 87
 88static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
 89		struct module *owner)
 90{
 91	int cpu;
 92
 93	desc->irq_common_data.handler_data = NULL;
 94	desc->irq_common_data.msi_desc = NULL;
 95
 96	desc->irq_data.common = &desc->irq_common_data;
 97	desc->irq_data.irq = irq;
 98	desc->irq_data.chip = &no_irq_chip;
 99	desc->irq_data.chip_data = NULL;
 
 
100	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
101	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
102	desc->handle_irq = handle_bad_irq;
103	desc->depth = 1;
104	desc->irq_count = 0;
105	desc->irqs_unhandled = 0;
106	desc->name = NULL;
107	desc->owner = owner;
108	for_each_possible_cpu(cpu)
109		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
110	desc_smp_init(desc, node);
111}
112
113int nr_irqs = NR_IRQS;
114EXPORT_SYMBOL_GPL(nr_irqs);
115
116static DEFINE_MUTEX(sparse_irq_lock);
117static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
118
119#ifdef CONFIG_SPARSE_IRQ
120
121static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
122
123static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
124{
125	radix_tree_insert(&irq_desc_tree, irq, desc);
126}
127
128struct irq_desc *irq_to_desc(unsigned int irq)
129{
130	return radix_tree_lookup(&irq_desc_tree, irq);
131}
132EXPORT_SYMBOL(irq_to_desc);
133
134static void delete_irq_desc(unsigned int irq)
135{
136	radix_tree_delete(&irq_desc_tree, irq);
137}
138
139#ifdef CONFIG_SMP
140static void free_masks(struct irq_desc *desc)
141{
142#ifdef CONFIG_GENERIC_PENDING_IRQ
143	free_cpumask_var(desc->pending_mask);
144#endif
145	free_cpumask_var(desc->irq_common_data.affinity);
146}
147#else
148static inline void free_masks(struct irq_desc *desc) { }
149#endif
150
151void irq_lock_sparse(void)
152{
153	mutex_lock(&sparse_irq_lock);
154}
155
156void irq_unlock_sparse(void)
157{
158	mutex_unlock(&sparse_irq_lock);
159}
160
161static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
162{
163	struct irq_desc *desc;
164	gfp_t gfp = GFP_KERNEL;
165
166	desc = kzalloc_node(sizeof(*desc), gfp, node);
167	if (!desc)
168		return NULL;
169	/* allocate based on nr_cpu_ids */
170	desc->kstat_irqs = alloc_percpu(unsigned int);
171	if (!desc->kstat_irqs)
172		goto err_desc;
173
174	if (alloc_masks(desc, gfp, node))
175		goto err_kstat;
176
177	raw_spin_lock_init(&desc->lock);
178	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
179	init_rcu_head(&desc->rcu);
180
181	desc_set_defaults(irq, desc, node, owner);
182
183	return desc;
184
185err_kstat:
186	free_percpu(desc->kstat_irqs);
187err_desc:
188	kfree(desc);
189	return NULL;
190}
191
192static void delayed_free_desc(struct rcu_head *rhp)
193{
194	struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
195
196	free_masks(desc);
197	free_percpu(desc->kstat_irqs);
198	kfree(desc);
199}
200
201static void free_desc(unsigned int irq)
202{
203	struct irq_desc *desc = irq_to_desc(irq);
204
205	unregister_irq_proc(irq, desc);
206
207	/*
208	 * sparse_irq_lock protects also show_interrupts() and
209	 * kstat_irq_usr(). Once we deleted the descriptor from the
210	 * sparse tree we can free it. Access in proc will fail to
211	 * lookup the descriptor.
212	 */
213	mutex_lock(&sparse_irq_lock);
214	delete_irq_desc(irq);
215	mutex_unlock(&sparse_irq_lock);
216
217	/*
218	 * We free the descriptor, masks and stat fields via RCU. That
219	 * allows demultiplex interrupts to do rcu based management of
220	 * the child interrupts.
221	 */
222	call_rcu(&desc->rcu, delayed_free_desc);
223}
224
225static int alloc_descs(unsigned int start, unsigned int cnt, int node,
226		       struct module *owner)
227{
228	struct irq_desc *desc;
229	int i;
230
231	for (i = 0; i < cnt; i++) {
232		desc = alloc_desc(start + i, node, owner);
233		if (!desc)
234			goto err;
235		mutex_lock(&sparse_irq_lock);
236		irq_insert_desc(start + i, desc);
237		mutex_unlock(&sparse_irq_lock);
238	}
239	return start;
240
241err:
242	for (i--; i >= 0; i--)
243		free_desc(start + i);
244
245	mutex_lock(&sparse_irq_lock);
246	bitmap_clear(allocated_irqs, start, cnt);
247	mutex_unlock(&sparse_irq_lock);
248	return -ENOMEM;
249}
250
251static int irq_expand_nr_irqs(unsigned int nr)
252{
253	if (nr > IRQ_BITMAP_BITS)
254		return -ENOMEM;
255	nr_irqs = nr;
256	return 0;
257}
258
259int __init early_irq_init(void)
260{
261	int i, initcnt, node = first_online_node;
262	struct irq_desc *desc;
263
264	init_irq_default_affinity();
265
266	/* Let arch update nr_irqs and return the nr of preallocated irqs */
267	initcnt = arch_probe_nr_irqs();
268	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
269
270	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
271		nr_irqs = IRQ_BITMAP_BITS;
272
273	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
274		initcnt = IRQ_BITMAP_BITS;
275
276	if (initcnt > nr_irqs)
277		nr_irqs = initcnt;
278
279	for (i = 0; i < initcnt; i++) {
280		desc = alloc_desc(i, node, NULL);
281		set_bit(i, allocated_irqs);
282		irq_insert_desc(i, desc);
283	}
284	return arch_early_irq_init();
285}
286
287#else /* !CONFIG_SPARSE_IRQ */
288
289struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
290	[0 ... NR_IRQS-1] = {
291		.handle_irq	= handle_bad_irq,
292		.depth		= 1,
293		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
294	}
295};
296
297int __init early_irq_init(void)
298{
299	int count, i, node = first_online_node;
300	struct irq_desc *desc;
301
302	init_irq_default_affinity();
303
304	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
305
306	desc = irq_desc;
307	count = ARRAY_SIZE(irq_desc);
308
309	for (i = 0; i < count; i++) {
310		desc[i].kstat_irqs = alloc_percpu(unsigned int);
311		alloc_masks(&desc[i], GFP_KERNEL, node);
312		raw_spin_lock_init(&desc[i].lock);
313		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
314		desc_set_defaults(i, &desc[i], node, NULL);
315	}
316	return arch_early_irq_init();
317}
318
319struct irq_desc *irq_to_desc(unsigned int irq)
320{
321	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
322}
323EXPORT_SYMBOL(irq_to_desc);
324
325static void free_desc(unsigned int irq)
326{
327	struct irq_desc *desc = irq_to_desc(irq);
328	unsigned long flags;
329
330	raw_spin_lock_irqsave(&desc->lock, flags);
331	desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL);
332	raw_spin_unlock_irqrestore(&desc->lock, flags);
333}
334
335static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
336			      struct module *owner)
337{
338	u32 i;
339
340	for (i = 0; i < cnt; i++) {
341		struct irq_desc *desc = irq_to_desc(start + i);
342
343		desc->owner = owner;
344	}
345	return start;
346}
347
348static int irq_expand_nr_irqs(unsigned int nr)
349{
350	return -ENOMEM;
351}
352
353void irq_mark_irq(unsigned int irq)
354{
355	mutex_lock(&sparse_irq_lock);
356	bitmap_set(allocated_irqs, irq, 1);
357	mutex_unlock(&sparse_irq_lock);
358}
359
360#ifdef CONFIG_GENERIC_IRQ_LEGACY
361void irq_init_desc(unsigned int irq)
362{
363	free_desc(irq);
364}
365#endif
366
367#endif /* !CONFIG_SPARSE_IRQ */
368
369/**
370 * generic_handle_irq - Invoke the handler for a particular irq
371 * @irq:	The irq number to handle
372 *
373 */
374int generic_handle_irq(unsigned int irq)
375{
376	struct irq_desc *desc = irq_to_desc(irq);
377
378	if (!desc)
379		return -EINVAL;
380	generic_handle_irq_desc(desc);
381	return 0;
382}
383EXPORT_SYMBOL_GPL(generic_handle_irq);
384
385#ifdef CONFIG_HANDLE_DOMAIN_IRQ
386/**
387 * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
388 * @domain:	The domain where to perform the lookup
389 * @hwirq:	The HW irq number to convert to a logical one
390 * @lookup:	Whether to perform the domain lookup or not
391 * @regs:	Register file coming from the low-level handling code
392 *
393 * Returns:	0 on success, or -EINVAL if conversion has failed
394 */
395int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
396			bool lookup, struct pt_regs *regs)
397{
398	struct pt_regs *old_regs = set_irq_regs(regs);
399	unsigned int irq = hwirq;
400	int ret = 0;
401
402	irq_enter();
403
404#ifdef CONFIG_IRQ_DOMAIN
405	if (lookup)
406		irq = irq_find_mapping(domain, hwirq);
407#endif
408
409	/*
410	 * Some hardware gives randomly wrong interrupts.  Rather
411	 * than crashing, do something sensible.
412	 */
413	if (unlikely(!irq || irq >= nr_irqs)) {
414		ack_bad_irq(irq);
415		ret = -EINVAL;
416	} else {
417		generic_handle_irq(irq);
418	}
419
420	irq_exit();
421	set_irq_regs(old_regs);
422	return ret;
423}
424#endif
425
426/* Dynamic interrupt handling */
427
428/**
429 * irq_free_descs - free irq descriptors
430 * @from:	Start of descriptor range
431 * @cnt:	Number of consecutive irqs to free
432 */
433void irq_free_descs(unsigned int from, unsigned int cnt)
434{
435	int i;
436
437	if (from >= nr_irqs || (from + cnt) > nr_irqs)
438		return;
439
440	for (i = 0; i < cnt; i++)
441		free_desc(from + i);
442
443	mutex_lock(&sparse_irq_lock);
444	bitmap_clear(allocated_irqs, from, cnt);
445	mutex_unlock(&sparse_irq_lock);
446}
447EXPORT_SYMBOL_GPL(irq_free_descs);
448
449/**
450 * irq_alloc_descs - allocate and initialize a range of irq descriptors
451 * @irq:	Allocate for specific irq number if irq >= 0
452 * @from:	Start the search from this irq number
453 * @cnt:	Number of consecutive irqs to allocate.
454 * @node:	Preferred node on which the irq descriptor should be allocated
455 * @owner:	Owning module (can be NULL)
456 *
457 * Returns the first irq number or error code
458 */
459int __ref
460__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
461		  struct module *owner)
462{
463	int start, ret;
464
465	if (!cnt)
466		return -EINVAL;
467
468	if (irq >= 0) {
469		if (from > irq)
470			return -EINVAL;
471		from = irq;
472	} else {
473		/*
474		 * For interrupts which are freely allocated the
475		 * architecture can force a lower bound to the @from
476		 * argument. x86 uses this to exclude the GSI space.
477		 */
478		from = arch_dynirq_lower_bound(from);
479	}
480
481	mutex_lock(&sparse_irq_lock);
482
483	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
484					   from, cnt, 0);
485	ret = -EEXIST;
486	if (irq >=0 && start != irq)
487		goto err;
488
489	if (start + cnt > nr_irqs) {
490		ret = irq_expand_nr_irqs(start + cnt);
491		if (ret)
492			goto err;
493	}
494
495	bitmap_set(allocated_irqs, start, cnt);
496	mutex_unlock(&sparse_irq_lock);
497	return alloc_descs(start, cnt, node, owner);
498
499err:
500	mutex_unlock(&sparse_irq_lock);
501	return ret;
502}
503EXPORT_SYMBOL_GPL(__irq_alloc_descs);
504
505#ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
506/**
507 * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
508 * @cnt:	number of interrupts to allocate
509 * @node:	node on which to allocate
510 *
511 * Returns an interrupt number > 0 or 0, if the allocation fails.
512 */
513unsigned int irq_alloc_hwirqs(int cnt, int node)
514{
515	int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL);
516
517	if (irq < 0)
518		return 0;
519
520	for (i = irq; cnt > 0; i++, cnt--) {
521		if (arch_setup_hwirq(i, node))
522			goto err;
523		irq_clear_status_flags(i, _IRQ_NOREQUEST);
524	}
525	return irq;
526
527err:
528	for (i--; i >= irq; i--) {
529		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
530		arch_teardown_hwirq(i);
531	}
532	irq_free_descs(irq, cnt);
533	return 0;
534}
535EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
536
537/**
538 * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
539 * @from:	Free from irq number
540 * @cnt:	number of interrupts to free
541 *
542 */
543void irq_free_hwirqs(unsigned int from, int cnt)
544{
545	int i, j;
546
547	for (i = from, j = cnt; j > 0; i++, j--) {
548		irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
549		arch_teardown_hwirq(i);
550	}
551	irq_free_descs(from, cnt);
 
 
 
552}
553EXPORT_SYMBOL_GPL(irq_free_hwirqs);
554#endif
555
556/**
557 * irq_get_next_irq - get next allocated irq number
558 * @offset:	where to start the search
559 *
560 * Returns next irq number after offset or nr_irqs if none is found.
561 */
562unsigned int irq_get_next_irq(unsigned int offset)
563{
564	return find_next_bit(allocated_irqs, nr_irqs, offset);
565}
566
567struct irq_desc *
568__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
569		    unsigned int check)
570{
571	struct irq_desc *desc = irq_to_desc(irq);
572
573	if (desc) {
574		if (check & _IRQ_DESC_CHECK) {
575			if ((check & _IRQ_DESC_PERCPU) &&
576			    !irq_settings_is_per_cpu_devid(desc))
577				return NULL;
578
579			if (!(check & _IRQ_DESC_PERCPU) &&
580			    irq_settings_is_per_cpu_devid(desc))
581				return NULL;
582		}
583
584		if (bus)
585			chip_bus_lock(desc);
586		raw_spin_lock_irqsave(&desc->lock, *flags);
587	}
588	return desc;
589}
590
591void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
592{
593	raw_spin_unlock_irqrestore(&desc->lock, flags);
594	if (bus)
595		chip_bus_sync_unlock(desc);
596}
597
598int irq_set_percpu_devid(unsigned int irq)
599{
600	struct irq_desc *desc = irq_to_desc(irq);
601
602	if (!desc)
603		return -EINVAL;
604
605	if (desc->percpu_enabled)
606		return -EINVAL;
607
608	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
609
610	if (!desc->percpu_enabled)
611		return -ENOMEM;
612
613	irq_set_percpu_devid_flags(irq);
614	return 0;
615}
616
617void kstat_incr_irq_this_cpu(unsigned int irq)
 
 
 
 
618{
619	kstat_incr_irqs_this_cpu(irq_to_desc(irq));
 
 
 
 
 
620}
621
622/**
623 * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
624 * @irq:	The interrupt number
625 * @cpu:	The cpu number
626 *
627 * Returns the sum of interrupt counts on @cpu since boot for
628 * @irq. The caller must ensure that the interrupt is not removed
629 * concurrently.
630 */
631unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
632{
633	struct irq_desc *desc = irq_to_desc(irq);
634
635	return desc && desc->kstat_irqs ?
636			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
637}
638
639/**
640 * kstat_irqs - Get the statistics for an interrupt
641 * @irq:	The interrupt number
642 *
643 * Returns the sum of interrupt counts on all cpus since boot for
644 * @irq. The caller must ensure that the interrupt is not removed
645 * concurrently.
646 */
647unsigned int kstat_irqs(unsigned int irq)
648{
649	struct irq_desc *desc = irq_to_desc(irq);
650	int cpu;
651	unsigned int sum = 0;
652
653	if (!desc || !desc->kstat_irqs)
654		return 0;
655	for_each_possible_cpu(cpu)
656		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
657	return sum;
658}
659
660/**
661 * kstat_irqs_usr - Get the statistics for an interrupt
662 * @irq:	The interrupt number
663 *
664 * Returns the sum of interrupt counts on all cpus since boot for
665 * @irq. Contrary to kstat_irqs() this can be called from any
666 * preemptible context. It's protected against concurrent removal of
667 * an interrupt descriptor when sparse irqs are enabled.
668 */
669unsigned int kstat_irqs_usr(unsigned int irq)
670{
671	unsigned int sum;
672
673	irq_lock_sparse();
674	sum = kstat_irqs(irq);
675	irq_unlock_sparse();
676	return sum;
677}
v3.5.6
  1/*
  2 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  3 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
  4 *
  5 * This file contains the interrupt descriptor management code
  6 *
  7 * Detailed information is available in Documentation/DocBook/genericirq
  8 *
  9 */
 10#include <linux/irq.h>
 11#include <linux/slab.h>
 12#include <linux/export.h>
 13#include <linux/interrupt.h>
 14#include <linux/kernel_stat.h>
 15#include <linux/radix-tree.h>
 16#include <linux/bitmap.h>
 
 17
 18#include "internals.h"
 19
 20/*
 21 * lockdep: we want to handle all irq_desc locks as a single lock-class:
 22 */
 23static struct lock_class_key irq_desc_lock_class;
 24
 25#if defined(CONFIG_SMP)
 
 
 
 
 
 
 
 
 
 
 
 
 
 26static void __init init_irq_default_affinity(void)
 27{
 28	alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
 29	cpumask_setall(irq_default_affinity);
 
 
 
 
 30}
 31#else
 32static void __init init_irq_default_affinity(void)
 33{
 34}
 35#endif
 36
 37#ifdef CONFIG_SMP
 38static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node)
 39{
 40	if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
 
 41		return -ENOMEM;
 42
 43#ifdef CONFIG_GENERIC_PENDING_IRQ
 44	if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
 45		free_cpumask_var(desc->irq_data.affinity);
 46		return -ENOMEM;
 47	}
 48#endif
 49	return 0;
 50}
 51
 52static void desc_smp_init(struct irq_desc *desc, int node)
 53{
 54	desc->irq_data.node = node;
 55	cpumask_copy(desc->irq_data.affinity, irq_default_affinity);
 56#ifdef CONFIG_GENERIC_PENDING_IRQ
 57	cpumask_clear(desc->pending_mask);
 58#endif
 59}
 60
 61static inline int desc_node(struct irq_desc *desc)
 62{
 63	return desc->irq_data.node;
 64}
 65
 66#else
 67static inline int
 68alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; }
 69static inline void desc_smp_init(struct irq_desc *desc, int node) { }
 70static inline int desc_node(struct irq_desc *desc) { return 0; }
 71#endif
 72
 73static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
 74		struct module *owner)
 75{
 76	int cpu;
 77
 
 
 
 
 78	desc->irq_data.irq = irq;
 79	desc->irq_data.chip = &no_irq_chip;
 80	desc->irq_data.chip_data = NULL;
 81	desc->irq_data.handler_data = NULL;
 82	desc->irq_data.msi_desc = NULL;
 83	irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
 84	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
 85	desc->handle_irq = handle_bad_irq;
 86	desc->depth = 1;
 87	desc->irq_count = 0;
 88	desc->irqs_unhandled = 0;
 89	desc->name = NULL;
 90	desc->owner = owner;
 91	for_each_possible_cpu(cpu)
 92		*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
 93	desc_smp_init(desc, node);
 94}
 95
 96int nr_irqs = NR_IRQS;
 97EXPORT_SYMBOL_GPL(nr_irqs);
 98
 99static DEFINE_MUTEX(sparse_irq_lock);
100static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
101
102#ifdef CONFIG_SPARSE_IRQ
103
104static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
105
106static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
107{
108	radix_tree_insert(&irq_desc_tree, irq, desc);
109}
110
111struct irq_desc *irq_to_desc(unsigned int irq)
112{
113	return radix_tree_lookup(&irq_desc_tree, irq);
114}
115EXPORT_SYMBOL(irq_to_desc);
116
117static void delete_irq_desc(unsigned int irq)
118{
119	radix_tree_delete(&irq_desc_tree, irq);
120}
121
122#ifdef CONFIG_SMP
123static void free_masks(struct irq_desc *desc)
124{
125#ifdef CONFIG_GENERIC_PENDING_IRQ
126	free_cpumask_var(desc->pending_mask);
127#endif
128	free_cpumask_var(desc->irq_data.affinity);
129}
130#else
131static inline void free_masks(struct irq_desc *desc) { }
132#endif
133
 
 
 
 
 
 
 
 
 
 
134static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
135{
136	struct irq_desc *desc;
137	gfp_t gfp = GFP_KERNEL;
138
139	desc = kzalloc_node(sizeof(*desc), gfp, node);
140	if (!desc)
141		return NULL;
142	/* allocate based on nr_cpu_ids */
143	desc->kstat_irqs = alloc_percpu(unsigned int);
144	if (!desc->kstat_irqs)
145		goto err_desc;
146
147	if (alloc_masks(desc, gfp, node))
148		goto err_kstat;
149
150	raw_spin_lock_init(&desc->lock);
151	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
 
152
153	desc_set_defaults(irq, desc, node, owner);
154
155	return desc;
156
157err_kstat:
158	free_percpu(desc->kstat_irqs);
159err_desc:
160	kfree(desc);
161	return NULL;
162}
163
 
 
 
 
 
 
 
 
 
164static void free_desc(unsigned int irq)
165{
166	struct irq_desc *desc = irq_to_desc(irq);
167
168	unregister_irq_proc(irq, desc);
169
 
 
 
 
 
 
170	mutex_lock(&sparse_irq_lock);
171	delete_irq_desc(irq);
172	mutex_unlock(&sparse_irq_lock);
173
174	free_masks(desc);
175	free_percpu(desc->kstat_irqs);
176	kfree(desc);
 
 
 
177}
178
179static int alloc_descs(unsigned int start, unsigned int cnt, int node,
180		       struct module *owner)
181{
182	struct irq_desc *desc;
183	int i;
184
185	for (i = 0; i < cnt; i++) {
186		desc = alloc_desc(start + i, node, owner);
187		if (!desc)
188			goto err;
189		mutex_lock(&sparse_irq_lock);
190		irq_insert_desc(start + i, desc);
191		mutex_unlock(&sparse_irq_lock);
192	}
193	return start;
194
195err:
196	for (i--; i >= 0; i--)
197		free_desc(start + i);
198
199	mutex_lock(&sparse_irq_lock);
200	bitmap_clear(allocated_irqs, start, cnt);
201	mutex_unlock(&sparse_irq_lock);
202	return -ENOMEM;
203}
204
205static int irq_expand_nr_irqs(unsigned int nr)
206{
207	if (nr > IRQ_BITMAP_BITS)
208		return -ENOMEM;
209	nr_irqs = nr;
210	return 0;
211}
212
213int __init early_irq_init(void)
214{
215	int i, initcnt, node = first_online_node;
216	struct irq_desc *desc;
217
218	init_irq_default_affinity();
219
220	/* Let arch update nr_irqs and return the nr of preallocated irqs */
221	initcnt = arch_probe_nr_irqs();
222	printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
223
224	if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
225		nr_irqs = IRQ_BITMAP_BITS;
226
227	if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
228		initcnt = IRQ_BITMAP_BITS;
229
230	if (initcnt > nr_irqs)
231		nr_irqs = initcnt;
232
233	for (i = 0; i < initcnt; i++) {
234		desc = alloc_desc(i, node, NULL);
235		set_bit(i, allocated_irqs);
236		irq_insert_desc(i, desc);
237	}
238	return arch_early_irq_init();
239}
240
241#else /* !CONFIG_SPARSE_IRQ */
242
243struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
244	[0 ... NR_IRQS-1] = {
245		.handle_irq	= handle_bad_irq,
246		.depth		= 1,
247		.lock		= __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
248	}
249};
250
251int __init early_irq_init(void)
252{
253	int count, i, node = first_online_node;
254	struct irq_desc *desc;
255
256	init_irq_default_affinity();
257
258	printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
259
260	desc = irq_desc;
261	count = ARRAY_SIZE(irq_desc);
262
263	for (i = 0; i < count; i++) {
264		desc[i].kstat_irqs = alloc_percpu(unsigned int);
265		alloc_masks(&desc[i], GFP_KERNEL, node);
266		raw_spin_lock_init(&desc[i].lock);
267		lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
268		desc_set_defaults(i, &desc[i], node, NULL);
269	}
270	return arch_early_irq_init();
271}
272
273struct irq_desc *irq_to_desc(unsigned int irq)
274{
275	return (irq < NR_IRQS) ? irq_desc + irq : NULL;
276}
 
277
278static void free_desc(unsigned int irq)
279{
280	dynamic_irq_cleanup(irq);
 
 
 
 
 
281}
282
283static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
284			      struct module *owner)
285{
286	u32 i;
287
288	for (i = 0; i < cnt; i++) {
289		struct irq_desc *desc = irq_to_desc(start + i);
290
291		desc->owner = owner;
292	}
293	return start;
294}
295
296static int irq_expand_nr_irqs(unsigned int nr)
297{
298	return -ENOMEM;
299}
300
 
 
 
 
 
 
 
 
 
 
 
 
 
 
301#endif /* !CONFIG_SPARSE_IRQ */
302
303/**
304 * generic_handle_irq - Invoke the handler for a particular irq
305 * @irq:	The irq number to handle
306 *
307 */
308int generic_handle_irq(unsigned int irq)
309{
310	struct irq_desc *desc = irq_to_desc(irq);
311
312	if (!desc)
313		return -EINVAL;
314	generic_handle_irq_desc(irq, desc);
315	return 0;
316}
317EXPORT_SYMBOL_GPL(generic_handle_irq);
318
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319/* Dynamic interrupt handling */
320
321/**
322 * irq_free_descs - free irq descriptors
323 * @from:	Start of descriptor range
324 * @cnt:	Number of consecutive irqs to free
325 */
326void irq_free_descs(unsigned int from, unsigned int cnt)
327{
328	int i;
329
330	if (from >= nr_irqs || (from + cnt) > nr_irqs)
331		return;
332
333	for (i = 0; i < cnt; i++)
334		free_desc(from + i);
335
336	mutex_lock(&sparse_irq_lock);
337	bitmap_clear(allocated_irqs, from, cnt);
338	mutex_unlock(&sparse_irq_lock);
339}
340EXPORT_SYMBOL_GPL(irq_free_descs);
341
342/**
343 * irq_alloc_descs - allocate and initialize a range of irq descriptors
344 * @irq:	Allocate for specific irq number if irq >= 0
345 * @from:	Start the search from this irq number
346 * @cnt:	Number of consecutive irqs to allocate.
347 * @node:	Preferred node on which the irq descriptor should be allocated
348 * @owner:	Owning module (can be NULL)
349 *
350 * Returns the first irq number or error code
351 */
352int __ref
353__irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
354		  struct module *owner)
355{
356	int start, ret;
357
358	if (!cnt)
359		return -EINVAL;
360
361	if (irq >= 0) {
362		if (from > irq)
363			return -EINVAL;
364		from = irq;
 
 
 
 
 
 
 
365	}
366
367	mutex_lock(&sparse_irq_lock);
368
369	start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
370					   from, cnt, 0);
371	ret = -EEXIST;
372	if (irq >=0 && start != irq)
373		goto err;
374
375	if (start + cnt > nr_irqs) {
376		ret = irq_expand_nr_irqs(start + cnt);
377		if (ret)
378			goto err;
379	}
380
381	bitmap_set(allocated_irqs, start, cnt);
382	mutex_unlock(&sparse_irq_lock);
383	return alloc_descs(start, cnt, node, owner);
384
385err:
386	mutex_unlock(&sparse_irq_lock);
387	return ret;
388}
389EXPORT_SYMBOL_GPL(__irq_alloc_descs);
390
 
391/**
392 * irq_reserve_irqs - mark irqs allocated
393 * @from:	mark from irq number
394 * @cnt:	number of irqs to mark
395 *
396 * Returns 0 on success or an appropriate error code
397 */
398int irq_reserve_irqs(unsigned int from, unsigned int cnt)
399{
400	unsigned int start;
401	int ret = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
402
403	if (!cnt || (from + cnt) > nr_irqs)
404		return -EINVAL;
 
 
 
 
 
 
 
405
406	mutex_lock(&sparse_irq_lock);
407	start = bitmap_find_next_zero_area(allocated_irqs, nr_irqs, from, cnt, 0);
408	if (start == from)
409		bitmap_set(allocated_irqs, start, cnt);
410	else
411		ret = -EEXIST;
412	mutex_unlock(&sparse_irq_lock);
413	return ret;
414}
 
 
415
416/**
417 * irq_get_next_irq - get next allocated irq number
418 * @offset:	where to start the search
419 *
420 * Returns next irq number after offset or nr_irqs if none is found.
421 */
422unsigned int irq_get_next_irq(unsigned int offset)
423{
424	return find_next_bit(allocated_irqs, nr_irqs, offset);
425}
426
427struct irq_desc *
428__irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
429		    unsigned int check)
430{
431	struct irq_desc *desc = irq_to_desc(irq);
432
433	if (desc) {
434		if (check & _IRQ_DESC_CHECK) {
435			if ((check & _IRQ_DESC_PERCPU) &&
436			    !irq_settings_is_per_cpu_devid(desc))
437				return NULL;
438
439			if (!(check & _IRQ_DESC_PERCPU) &&
440			    irq_settings_is_per_cpu_devid(desc))
441				return NULL;
442		}
443
444		if (bus)
445			chip_bus_lock(desc);
446		raw_spin_lock_irqsave(&desc->lock, *flags);
447	}
448	return desc;
449}
450
451void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
452{
453	raw_spin_unlock_irqrestore(&desc->lock, flags);
454	if (bus)
455		chip_bus_sync_unlock(desc);
456}
457
458int irq_set_percpu_devid(unsigned int irq)
459{
460	struct irq_desc *desc = irq_to_desc(irq);
461
462	if (!desc)
463		return -EINVAL;
464
465	if (desc->percpu_enabled)
466		return -EINVAL;
467
468	desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
469
470	if (!desc->percpu_enabled)
471		return -ENOMEM;
472
473	irq_set_percpu_devid_flags(irq);
474	return 0;
475}
476
477/**
478 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
479 * @irq:	irq number to initialize
480 */
481void dynamic_irq_cleanup(unsigned int irq)
482{
483	struct irq_desc *desc = irq_to_desc(irq);
484	unsigned long flags;
485
486	raw_spin_lock_irqsave(&desc->lock, flags);
487	desc_set_defaults(irq, desc, desc_node(desc), NULL);
488	raw_spin_unlock_irqrestore(&desc->lock, flags);
489}
490
 
 
 
 
 
 
 
 
 
491unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
492{
493	struct irq_desc *desc = irq_to_desc(irq);
494
495	return desc && desc->kstat_irqs ?
496			*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
497}
498
 
 
 
 
 
 
 
 
499unsigned int kstat_irqs(unsigned int irq)
500{
501	struct irq_desc *desc = irq_to_desc(irq);
502	int cpu;
503	int sum = 0;
504
505	if (!desc || !desc->kstat_irqs)
506		return 0;
507	for_each_possible_cpu(cpu)
508		sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
509	return sum;
510}