Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/proc_fs.h>
17#include <linux/mm.h>
18#include <linux/random.h>
19#include <linux/sched.h>
20#include <linux/seq_file.h>
21#include <linux/kallsyms.h>
22#include <linux/kgdb.h>
23#include <linux/ftrace.h>
24#include <linux/irqdomain.h>
25
26#include <linux/atomic.h>
27#include <linux/uaccess.h>
28
29void *irq_stack[NR_CPUS];
30
31/*
32 * 'what should we do if we get a hw irq event on an illegal vector'.
33 * each architecture has to answer this themselves.
34 */
35void ack_bad_irq(unsigned int irq)
36{
37 printk("unexpected IRQ # %d\n", irq);
38}
39
40atomic_t irq_err_count;
41
42int arch_show_interrupts(struct seq_file *p, int prec)
43{
44 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
45 return 0;
46}
47
48asmlinkage void spurious_interrupt(void)
49{
50 atomic_inc(&irq_err_count);
51}
52
53void __init init_IRQ(void)
54{
55 int i;
56 unsigned int order = get_order(IRQ_STACK_SIZE);
57
58 for (i = 0; i < NR_IRQS; i++)
59 irq_set_noprobe(i);
60
61 if (cpu_has_veic)
62 clear_c0_status(ST0_IM);
63
64 arch_init_irq();
65
66 for_each_possible_cpu(i) {
67 void *s = (void *)__get_free_pages(GFP_KERNEL, order);
68
69 irq_stack[i] = s;
70 pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
71 irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
72 }
73}
74
75#ifdef CONFIG_DEBUG_STACKOVERFLOW
76static inline void check_stack_overflow(void)
77{
78 unsigned long sp;
79
80 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
81 sp &= THREAD_MASK;
82
83 /*
84 * Check for stack overflow: is there less than STACK_WARN free?
85 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
86 */
87 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
88 printk("do_IRQ: stack overflow: %ld\n",
89 sp - sizeof(struct thread_info));
90 dump_stack();
91 }
92}
93#else
94static inline void check_stack_overflow(void) {}
95#endif
96
97
98/*
99 * do_IRQ handles all normal device IRQ's (the special
100 * SMP cross-CPU interrupts have their own specific
101 * handlers).
102 */
103void __irq_entry do_IRQ(unsigned int irq)
104{
105 irq_enter();
106 check_stack_overflow();
107 generic_handle_irq(irq);
108 irq_exit();
109}
110
111#ifdef CONFIG_IRQ_DOMAIN
112void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
113{
114 struct irq_desc *desc;
115
116 irq_enter();
117 check_stack_overflow();
118
119 desc = irq_resolve_mapping(domain, hwirq);
120 if (likely(desc))
121 handle_irq_desc(desc);
122
123 irq_exit();
124}
125#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Code to handle x86 style IRQs plus some generic interrupt stuff.
7 *
8 * Copyright (C) 1992 Linus Torvalds
9 * Copyright (C) 1994 - 2000 Ralf Baechle
10 */
11#include <linux/kernel.h>
12#include <linux/delay.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/module.h>
17#include <linux/proc_fs.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/kallsyms.h>
23#include <linux/kgdb.h>
24#include <linux/ftrace.h>
25
26#include <linux/atomic.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29
30#ifdef CONFIG_KGDB
31int kgdb_early_setup;
32#endif
33
34static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
35
36int allocate_irqno(void)
37{
38 int irq;
39
40again:
41 irq = find_first_zero_bit(irq_map, NR_IRQS);
42
43 if (irq >= NR_IRQS)
44 return -ENOSPC;
45
46 if (test_and_set_bit(irq, irq_map))
47 goto again;
48
49 return irq;
50}
51
52/*
53 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
54 * in the kernel initialization so treating allocation failure as BUG() is
55 * ok.
56 */
57void __init alloc_legacy_irqno(void)
58{
59 int i;
60
61 for (i = 0; i <= 16; i++)
62 BUG_ON(test_and_set_bit(i, irq_map));
63}
64
65void free_irqno(unsigned int irq)
66{
67 smp_mb__before_clear_bit();
68 clear_bit(irq, irq_map);
69 smp_mb__after_clear_bit();
70}
71
72/*
73 * 'what should we do if we get a hw irq event on an illegal vector'.
74 * each architecture has to answer this themselves.
75 */
76void ack_bad_irq(unsigned int irq)
77{
78 smtc_im_ack_irq(irq);
79 printk("unexpected IRQ # %d\n", irq);
80}
81
82atomic_t irq_err_count;
83
84int arch_show_interrupts(struct seq_file *p, int prec)
85{
86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
87 return 0;
88}
89
90asmlinkage void spurious_interrupt(void)
91{
92 atomic_inc(&irq_err_count);
93}
94
95void __init init_IRQ(void)
96{
97 int i;
98
99#ifdef CONFIG_KGDB
100 if (kgdb_early_setup)
101 return;
102#endif
103
104 for (i = 0; i < NR_IRQS; i++)
105 irq_set_noprobe(i);
106
107 arch_init_irq();
108
109#ifdef CONFIG_KGDB
110 if (!kgdb_early_setup)
111 kgdb_early_setup = 1;
112#endif
113}
114
115#ifdef DEBUG_STACKOVERFLOW
116static inline void check_stack_overflow(void)
117{
118 unsigned long sp;
119
120 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
121 sp &= THREAD_MASK;
122
123 /*
124 * Check for stack overflow: is there less than STACK_WARN free?
125 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
126 */
127 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
128 printk("do_IRQ: stack overflow: %ld\n",
129 sp - sizeof(struct thread_info));
130 dump_stack();
131 }
132}
133#else
134static inline void check_stack_overflow(void) {}
135#endif
136
137
138/*
139 * do_IRQ handles all normal device IRQ's (the special
140 * SMP cross-CPU interrupts have their own specific
141 * handlers).
142 */
143void __irq_entry do_IRQ(unsigned int irq)
144{
145 irq_enter();
146 check_stack_overflow();
147 if (!smtc_handle_on_other_cpu(irq))
148 generic_handle_irq(irq);
149 irq_exit();
150}
151
152#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
153/*
154 * To avoid inefficient and in some cases pathological re-checking of
155 * IRQ affinity, we have this variant that skips the affinity check.
156 */
157
158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
159{
160 irq_enter();
161 smtc_im_backstop(irq);
162 generic_handle_irq(irq);
163 irq_exit();
164}
165
166#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */