Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/proc_fs.h>
17#include <linux/mm.h>
18#include <linux/random.h>
19#include <linux/sched.h>
20#include <linux/seq_file.h>
21#include <linux/kallsyms.h>
22#include <linux/kgdb.h>
23#include <linux/ftrace.h>
24
25#include <linux/atomic.h>
26#include <asm/uaccess.h>
27
28/*
29 * 'what should we do if we get a hw irq event on an illegal vector'.
30 * each architecture has to answer this themselves.
31 */
32void ack_bad_irq(unsigned int irq)
33{
34 printk("unexpected IRQ # %d\n", irq);
35}
36
37atomic_t irq_err_count;
38
39int arch_show_interrupts(struct seq_file *p, int prec)
40{
41 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
42 return 0;
43}
44
45asmlinkage void spurious_interrupt(void)
46{
47 atomic_inc(&irq_err_count);
48}
49
50void __init init_IRQ(void)
51{
52 int i;
53
54 for (i = 0; i < NR_IRQS; i++)
55 irq_set_noprobe(i);
56
57 arch_init_irq();
58}
59
60#ifdef CONFIG_DEBUG_STACKOVERFLOW
61static inline void check_stack_overflow(void)
62{
63 unsigned long sp;
64
65 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
66 sp &= THREAD_MASK;
67
68 /*
69 * Check for stack overflow: is there less than STACK_WARN free?
70 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
71 */
72 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
73 printk("do_IRQ: stack overflow: %ld\n",
74 sp - sizeof(struct thread_info));
75 dump_stack();
76 }
77}
78#else
79static inline void check_stack_overflow(void) {}
80#endif
81
82
83/*
84 * do_IRQ handles all normal device IRQ's (the special
85 * SMP cross-CPU interrupts have their own specific
86 * handlers).
87 */
88void __irq_entry do_IRQ(unsigned int irq)
89{
90 irq_enter();
91 check_stack_overflow();
92 generic_handle_irq(irq);
93 irq_exit();
94}
95
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/proc_fs.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/kallsyms.h>
23#include <linux/kgdb.h>
24#include <linux/ftrace.h>
25
26#include <linux/atomic.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
30#ifdef CONFIG_KGDB
31int kgdb_early_setup;
32#endif
33
34static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
35
36int allocate_irqno(void)
37{
38 int irq;
39
40again:
41 irq = find_first_zero_bit(irq_map, NR_IRQS);
42
43 if (irq >= NR_IRQS)
44 return -ENOSPC;
45
46 if (test_and_set_bit(irq, irq_map))
47 goto again;
48
49 return irq;
50}
51
52/*
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
55 * ok.
56 */
57void __init alloc_legacy_irqno(void)
58{
59 int i;
60
61 for (i = 0; i <= 16; i++)
62 BUG_ON(test_and_set_bit(i, irq_map));
63}
64
65void free_irqno(unsigned int irq)
66{
67 smp_mb__before_clear_bit();
68 clear_bit(irq, irq_map);
69 smp_mb__after_clear_bit();
70}
71
72/*
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
75 */
76void ack_bad_irq(unsigned int irq)
77{
78 smtc_im_ack_irq(irq);
79 printk("unexpected IRQ # %d\n", irq);
80}
81
82atomic_t irq_err_count;
83
84int arch_show_interrupts(struct seq_file *p, int prec)
85{
86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
87 return 0;
88}
89
90asmlinkage void spurious_interrupt(void)
91{
92 atomic_inc(&irq_err_count);
93}
94
95void __init init_IRQ(void)
96{
97 int i;
98
99#ifdef CONFIG_KGDB
100 if (kgdb_early_setup)
101 return;
102#endif
103
104 for (i = 0; i < NR_IRQS; i++)
105 irq_set_noprobe(i);
106
107 arch_init_irq();
108
109#ifdef CONFIG_KGDB
110 if (!kgdb_early_setup)
111 kgdb_early_setup = 1;
112#endif
113}
114
115#ifdef DEBUG_STACKOVERFLOW
116static inline void check_stack_overflow(void)
117{
118 unsigned long sp;
119
120 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
121 sp &= THREAD_MASK;
122
123 /*
124 * Check for stack overflow: is there less than STACK_WARN free?
125 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
126 */
127 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
128 printk("do_IRQ: stack overflow: %ld\n",
129 sp - sizeof(struct thread_info));
130 dump_stack();
131 }
132}
133#else
134static inline void check_stack_overflow(void) {}
135#endif
136
137
138/*
139 * do_IRQ handles all normal device IRQ's (the special
140 * SMP cross-CPU interrupts have their own specific
141 * handlers).
142 */
143void __irq_entry do_IRQ(unsigned int irq)
144{
145 irq_enter();
146 check_stack_overflow();
147 if (!smtc_handle_on_other_cpu(irq))
148 generic_handle_irq(irq);
149 irq_exit();
150}
151
152#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
153/*
154 * To avoid inefficient and in some cases pathological re-checking of
155 * IRQ affinity, we have this variant that skips the affinity check.
156 */
157
158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
159{
160 irq_enter();
161 smtc_im_backstop(irq);
162 generic_handle_irq(irq);
163 irq_exit();
164}
165
166#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */