Linux Audio

Check our new training course

Loading...
v3.5.6
  1#include <linux/linkage.h>
  2#include <linux/errno.h>
  3#include <linux/signal.h>
  4#include <linux/sched.h>
  5#include <linux/ioport.h>
  6#include <linux/interrupt.h>
  7#include <linux/timex.h>
  8#include <linux/random.h>
  9#include <linux/kprobes.h>
 10#include <linux/init.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/device.h>
 13#include <linux/bitops.h>
 14#include <linux/acpi.h>
 15#include <linux/io.h>
 16#include <linux/delay.h>
 17
 18#include <linux/atomic.h>
 
 19#include <asm/timer.h>
 20#include <asm/hw_irq.h>
 21#include <asm/pgtable.h>
 22#include <asm/desc.h>
 23#include <asm/apic.h>
 24#include <asm/setup.h>
 25#include <asm/i8259.h>
 26#include <asm/traps.h>
 27#include <asm/prom.h>
 28
 29/*
 30 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
 31 * (these are usually mapped to vectors 0x30-0x3f)
 32 */
 33
 34/*
 35 * The IO-APIC gives us many more interrupt sources. Most of these
 36 * are unused but an SMP system is supposed to have enough memory ...
 37 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
 38 * across the spectrum, so we really want to be prepared to get all
 39 * of these. Plus, more powerful systems might have more than 64
 40 * IO-APIC registers.
 41 *
 42 * (these are usually mapped into the 0x30-0xff vector range)
 43 */
 44
 45#ifdef CONFIG_X86_32
 46/*
 47 * Note that on a 486, we don't want to do a SIGFPE on an irq13
 48 * as the irq is unreliable, and exception 16 works correctly
 49 * (ie as explained in the intel literature). On a 386, you
 50 * can't use exception 16 due to bad IBM design, so we have to
 51 * rely on the less exact irq13.
 52 *
 53 * Careful.. Not only is IRQ13 unreliable, but it is also
 54 * leads to races. IBM designers who came up with it should
 55 * be shot.
 56 */
 57
 58static irqreturn_t math_error_irq(int cpl, void *dev_id)
 59{
 60	outb(0, 0xF0);
 61	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
 62		return IRQ_NONE;
 63	math_error(get_irq_regs(), 0, X86_TRAP_MF);
 64	return IRQ_HANDLED;
 65}
 66
 67/*
 68 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
 69 * so allow interrupt sharing.
 70 */
 71static struct irqaction fpu_irq = {
 72	.handler = math_error_irq,
 73	.name = "fpu",
 74	.flags = IRQF_NO_THREAD,
 75};
 76#endif
 77
 78/*
 79 * IRQ2 is cascade interrupt to second interrupt controller
 80 */
 81static struct irqaction irq2 = {
 82	.handler = no_action,
 83	.name = "cascade",
 84	.flags = IRQF_NO_THREAD,
 85};
 86
 87DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
 88	[0 ... NR_VECTORS - 1] = -1,
 89};
 90
 91int vector_used_by_percpu_irq(unsigned int vector)
 92{
 93	int cpu;
 94
 95	for_each_online_cpu(cpu) {
 96		if (per_cpu(vector_irq, cpu)[vector] != -1)
 97			return 1;
 98	}
 99
100	return 0;
101}
102
103void __init init_ISA_irqs(void)
104{
105	struct irq_chip *chip = legacy_pic->chip;
106	const char *name = chip->name;
107	int i;
108
109#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
110	init_bsp_APIC();
111#endif
112	legacy_pic->init(0);
113
114	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
115		irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
116}
117
118void __init init_IRQ(void)
119{
120	int i;
121
122	/*
123	 * We probably need a better place for this, but it works for
124	 * now ...
125	 */
126	x86_add_irq_domains();
127
128	/*
129	 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
130	 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
131	 * then this configuration will likely be static after the boot. If
132	 * these IRQ's are handled by more mordern controllers like IO-APIC,
133	 * then this vector space can be freed and re-used dynamically as the
134	 * irq's migrate etc.
135	 */
136	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
137		per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
138
139	x86_init.irqs.intr_init();
140}
141
142/*
143 * Setup the vector to irq mappings.
144 */
145void setup_vector_irq(int cpu)
146{
147#ifndef CONFIG_X86_IO_APIC
148	int irq;
149
150	/*
151	 * On most of the platforms, legacy PIC delivers the interrupts on the
152	 * boot cpu. But there are certain platforms where PIC interrupts are
153	 * delivered to multiple cpu's. If the legacy IRQ is handled by the
154	 * legacy PIC, for the new cpu that is coming online, setup the static
155	 * legacy vector to irq mapping:
156	 */
157	for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
158		per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
159#endif
160
161	__setup_vector_irq(cpu);
162}
163
164static void __init smp_intr_init(void)
165{
166#ifdef CONFIG_SMP
167#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
168	/*
169	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
170	 * IPI, driven by wakeup.
171	 */
172	alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
173
174	/* IPIs for invalidation */
175#define ALLOC_INVTLB_VEC(NR) \
176	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
177		invalidate_interrupt##NR)
178
179	switch (NUM_INVALIDATE_TLB_VECTORS) {
180	default:
181		ALLOC_INVTLB_VEC(31);
182	case 31:
183		ALLOC_INVTLB_VEC(30);
184	case 30:
185		ALLOC_INVTLB_VEC(29);
186	case 29:
187		ALLOC_INVTLB_VEC(28);
188	case 28:
189		ALLOC_INVTLB_VEC(27);
190	case 27:
191		ALLOC_INVTLB_VEC(26);
192	case 26:
193		ALLOC_INVTLB_VEC(25);
194	case 25:
195		ALLOC_INVTLB_VEC(24);
196	case 24:
197		ALLOC_INVTLB_VEC(23);
198	case 23:
199		ALLOC_INVTLB_VEC(22);
200	case 22:
201		ALLOC_INVTLB_VEC(21);
202	case 21:
203		ALLOC_INVTLB_VEC(20);
204	case 20:
205		ALLOC_INVTLB_VEC(19);
206	case 19:
207		ALLOC_INVTLB_VEC(18);
208	case 18:
209		ALLOC_INVTLB_VEC(17);
210	case 17:
211		ALLOC_INVTLB_VEC(16);
212	case 16:
213		ALLOC_INVTLB_VEC(15);
214	case 15:
215		ALLOC_INVTLB_VEC(14);
216	case 14:
217		ALLOC_INVTLB_VEC(13);
218	case 13:
219		ALLOC_INVTLB_VEC(12);
220	case 12:
221		ALLOC_INVTLB_VEC(11);
222	case 11:
223		ALLOC_INVTLB_VEC(10);
224	case 10:
225		ALLOC_INVTLB_VEC(9);
226	case 9:
227		ALLOC_INVTLB_VEC(8);
228	case 8:
229		ALLOC_INVTLB_VEC(7);
230	case 7:
231		ALLOC_INVTLB_VEC(6);
232	case 6:
233		ALLOC_INVTLB_VEC(5);
234	case 5:
235		ALLOC_INVTLB_VEC(4);
236	case 4:
237		ALLOC_INVTLB_VEC(3);
238	case 3:
239		ALLOC_INVTLB_VEC(2);
240	case 2:
241		ALLOC_INVTLB_VEC(1);
242	case 1:
243		ALLOC_INVTLB_VEC(0);
244		break;
245	}
246
247	/* IPI for generic function call */
248	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
249
250	/* IPI for generic single function call */
251	alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
252			call_function_single_interrupt);
253
254	/* Low priority IPI to cleanup after moving an irq */
255	set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
256	set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
257
258	/* IPI used for rebooting/stopping */
259	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
260#endif
261#endif /* CONFIG_SMP */
262}
263
264static void __init apic_intr_init(void)
265{
266	smp_intr_init();
267
268#ifdef CONFIG_X86_THERMAL_VECTOR
269	alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
270#endif
271#ifdef CONFIG_X86_MCE_THRESHOLD
272	alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
273#endif
274
275#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
276	/* self generated IPI for local APIC timer */
277	alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
278
279	/* IPI for X86 platform specific use */
280	alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
281
282	/* IPI vectors for APIC spurious and error interrupts */
283	alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
284	alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
285
286	/* IRQ work interrupts: */
287# ifdef CONFIG_IRQ_WORK
288	alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
289# endif
290
291#endif
292}
293
294void __init native_init_IRQ(void)
295{
296	int i;
297
298	/* Execute any quirks before the call gates are initialised: */
299	x86_init.irqs.pre_vector_init();
300
301	apic_intr_init();
302
303	/*
304	 * Cover the whole vector space, no vector can escape
305	 * us. (some of these will be overridden and become
306	 * 'special' SMP interrupts)
307	 */
308	i = FIRST_EXTERNAL_VECTOR;
309	for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
310		/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
311		set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
 
312	}
313
314	if (!acpi_ioapic && !of_ioapic)
315		setup_irq(2, &irq2);
316
317#ifdef CONFIG_X86_32
318	/*
319	 * External FPU? Set up irq13 if so, for
320	 * original braindamaged IBM FERR coupling.
321	 */
322	if (boot_cpu_data.hard_math && !cpu_has_fpu)
323		setup_irq(FPU_IRQ, &fpu_irq);
324
325	irq_ctx_init(smp_processor_id());
326#endif
327}
v3.1
  1#include <linux/linkage.h>
  2#include <linux/errno.h>
  3#include <linux/signal.h>
  4#include <linux/sched.h>
  5#include <linux/ioport.h>
  6#include <linux/interrupt.h>
  7#include <linux/timex.h>
  8#include <linux/random.h>
  9#include <linux/kprobes.h>
 10#include <linux/init.h>
 11#include <linux/kernel_stat.h>
 12#include <linux/sysdev.h>
 13#include <linux/bitops.h>
 14#include <linux/acpi.h>
 15#include <linux/io.h>
 16#include <linux/delay.h>
 17
 18#include <linux/atomic.h>
 19#include <asm/system.h>
 20#include <asm/timer.h>
 21#include <asm/hw_irq.h>
 22#include <asm/pgtable.h>
 23#include <asm/desc.h>
 24#include <asm/apic.h>
 25#include <asm/setup.h>
 26#include <asm/i8259.h>
 27#include <asm/traps.h>
 28#include <asm/prom.h>
 29
 30/*
 31 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
 32 * (these are usually mapped to vectors 0x30-0x3f)
 33 */
 34
 35/*
 36 * The IO-APIC gives us many more interrupt sources. Most of these
 37 * are unused but an SMP system is supposed to have enough memory ...
 38 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
 39 * across the spectrum, so we really want to be prepared to get all
 40 * of these. Plus, more powerful systems might have more than 64
 41 * IO-APIC registers.
 42 *
 43 * (these are usually mapped into the 0x30-0xff vector range)
 44 */
 45
 46#ifdef CONFIG_X86_32
 47/*
 48 * Note that on a 486, we don't want to do a SIGFPE on an irq13
 49 * as the irq is unreliable, and exception 16 works correctly
 50 * (ie as explained in the intel literature). On a 386, you
 51 * can't use exception 16 due to bad IBM design, so we have to
 52 * rely on the less exact irq13.
 53 *
 54 * Careful.. Not only is IRQ13 unreliable, but it is also
 55 * leads to races. IBM designers who came up with it should
 56 * be shot.
 57 */
 58
 59static irqreturn_t math_error_irq(int cpl, void *dev_id)
 60{
 61	outb(0, 0xF0);
 62	if (ignore_fpu_irq || !boot_cpu_data.hard_math)
 63		return IRQ_NONE;
 64	math_error(get_irq_regs(), 0, 16);
 65	return IRQ_HANDLED;
 66}
 67
 68/*
 69 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
 70 * so allow interrupt sharing.
 71 */
 72static struct irqaction fpu_irq = {
 73	.handler = math_error_irq,
 74	.name = "fpu",
 75	.flags = IRQF_NO_THREAD,
 76};
 77#endif
 78
 79/*
 80 * IRQ2 is cascade interrupt to second interrupt controller
 81 */
 82static struct irqaction irq2 = {
 83	.handler = no_action,
 84	.name = "cascade",
 85	.flags = IRQF_NO_THREAD,
 86};
 87
 88DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
 89	[0 ... NR_VECTORS - 1] = -1,
 90};
 91
 92int vector_used_by_percpu_irq(unsigned int vector)
 93{
 94	int cpu;
 95
 96	for_each_online_cpu(cpu) {
 97		if (per_cpu(vector_irq, cpu)[vector] != -1)
 98			return 1;
 99	}
100
101	return 0;
102}
103
104void __init init_ISA_irqs(void)
105{
106	struct irq_chip *chip = legacy_pic->chip;
107	const char *name = chip->name;
108	int i;
109
110#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
111	init_bsp_APIC();
112#endif
113	legacy_pic->init(0);
114
115	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
116		irq_set_chip_and_handler_name(i, chip, handle_level_irq, name);
117}
118
119void __init init_IRQ(void)
120{
121	int i;
122
123	/*
124	 * We probably need a better place for this, but it works for
125	 * now ...
126	 */
127	x86_add_irq_domains();
128
129	/*
130	 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
131	 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
132	 * then this configuration will likely be static after the boot. If
133	 * these IRQ's are handled by more mordern controllers like IO-APIC,
134	 * then this vector space can be freed and re-used dynamically as the
135	 * irq's migrate etc.
136	 */
137	for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
138		per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
139
140	x86_init.irqs.intr_init();
141}
142
143/*
144 * Setup the vector to irq mappings.
145 */
146void setup_vector_irq(int cpu)
147{
148#ifndef CONFIG_X86_IO_APIC
149	int irq;
150
151	/*
152	 * On most of the platforms, legacy PIC delivers the interrupts on the
153	 * boot cpu. But there are certain platforms where PIC interrupts are
154	 * delivered to multiple cpu's. If the legacy IRQ is handled by the
155	 * legacy PIC, for the new cpu that is coming online, setup the static
156	 * legacy vector to irq mapping:
157	 */
158	for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
159		per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
160#endif
161
162	__setup_vector_irq(cpu);
163}
164
165static void __init smp_intr_init(void)
166{
167#ifdef CONFIG_SMP
168#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
169	/*
170	 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
171	 * IPI, driven by wakeup.
172	 */
173	alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
174
175	/* IPIs for invalidation */
176#define ALLOC_INVTLB_VEC(NR) \
177	alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+NR, \
178		invalidate_interrupt##NR)
179
180	switch (NUM_INVALIDATE_TLB_VECTORS) {
181	default:
182		ALLOC_INVTLB_VEC(31);
183	case 31:
184		ALLOC_INVTLB_VEC(30);
185	case 30:
186		ALLOC_INVTLB_VEC(29);
187	case 29:
188		ALLOC_INVTLB_VEC(28);
189	case 28:
190		ALLOC_INVTLB_VEC(27);
191	case 27:
192		ALLOC_INVTLB_VEC(26);
193	case 26:
194		ALLOC_INVTLB_VEC(25);
195	case 25:
196		ALLOC_INVTLB_VEC(24);
197	case 24:
198		ALLOC_INVTLB_VEC(23);
199	case 23:
200		ALLOC_INVTLB_VEC(22);
201	case 22:
202		ALLOC_INVTLB_VEC(21);
203	case 21:
204		ALLOC_INVTLB_VEC(20);
205	case 20:
206		ALLOC_INVTLB_VEC(19);
207	case 19:
208		ALLOC_INVTLB_VEC(18);
209	case 18:
210		ALLOC_INVTLB_VEC(17);
211	case 17:
212		ALLOC_INVTLB_VEC(16);
213	case 16:
214		ALLOC_INVTLB_VEC(15);
215	case 15:
216		ALLOC_INVTLB_VEC(14);
217	case 14:
218		ALLOC_INVTLB_VEC(13);
219	case 13:
220		ALLOC_INVTLB_VEC(12);
221	case 12:
222		ALLOC_INVTLB_VEC(11);
223	case 11:
224		ALLOC_INVTLB_VEC(10);
225	case 10:
226		ALLOC_INVTLB_VEC(9);
227	case 9:
228		ALLOC_INVTLB_VEC(8);
229	case 8:
230		ALLOC_INVTLB_VEC(7);
231	case 7:
232		ALLOC_INVTLB_VEC(6);
233	case 6:
234		ALLOC_INVTLB_VEC(5);
235	case 5:
236		ALLOC_INVTLB_VEC(4);
237	case 4:
238		ALLOC_INVTLB_VEC(3);
239	case 3:
240		ALLOC_INVTLB_VEC(2);
241	case 2:
242		ALLOC_INVTLB_VEC(1);
243	case 1:
244		ALLOC_INVTLB_VEC(0);
245		break;
246	}
247
248	/* IPI for generic function call */
249	alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
250
251	/* IPI for generic single function call */
252	alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
253			call_function_single_interrupt);
254
255	/* Low priority IPI to cleanup after moving an irq */
256	set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
257	set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
258
259	/* IPI used for rebooting/stopping */
260	alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
261#endif
262#endif /* CONFIG_SMP */
263}
264
265static void __init apic_intr_init(void)
266{
267	smp_intr_init();
268
269#ifdef CONFIG_X86_THERMAL_VECTOR
270	alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
271#endif
272#ifdef CONFIG_X86_MCE_THRESHOLD
273	alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
274#endif
275
276#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
277	/* self generated IPI for local APIC timer */
278	alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
279
280	/* IPI for X86 platform specific use */
281	alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
282
283	/* IPI vectors for APIC spurious and error interrupts */
284	alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
285	alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
286
287	/* IRQ work interrupts: */
288# ifdef CONFIG_IRQ_WORK
289	alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
290# endif
291
292#endif
293}
294
295void __init native_init_IRQ(void)
296{
297	int i;
298
299	/* Execute any quirks before the call gates are initialised: */
300	x86_init.irqs.pre_vector_init();
301
302	apic_intr_init();
303
304	/*
305	 * Cover the whole vector space, no vector can escape
306	 * us. (some of these will be overridden and become
307	 * 'special' SMP interrupts)
308	 */
309	for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
 
310		/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
311		if (!test_bit(i, used_vectors))
312			set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
313	}
314
315	if (!acpi_ioapic && !of_ioapic)
316		setup_irq(2, &irq2);
317
318#ifdef CONFIG_X86_32
319	/*
320	 * External FPU? Set up irq13 if so, for
321	 * original braindamaged IBM FERR coupling.
322	 */
323	if (boot_cpu_data.hard_math && !cpu_has_fpu)
324		setup_irq(FPU_IRQ, &fpu_irq);
325
326	irq_ctx_init(smp_processor_id());
327#endif
328}