Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common interrupt code for 32 and 64 bit
4 */
5#include <linux/cpu.h>
6#include <linux/interrupt.h>
7#include <linux/kernel_stat.h>
8#include <linux/of.h>
9#include <linux/seq_file.h>
10#include <linux/smp.h>
11#include <linux/ftrace.h>
12#include <linux/delay.h>
13#include <linux/export.h>
14#include <linux/irq.h>
15
16#include <asm/apic.h>
17#include <asm/io_apic.h>
18#include <asm/irq.h>
19#include <asm/mce.h>
20#include <asm/hw_irq.h>
21#include <asm/desc.h>
22
23#define CREATE_TRACE_POINTS
24#include <asm/trace/irq_vectors.h>
25
26DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
27EXPORT_PER_CPU_SYMBOL(irq_stat);
28
29DEFINE_PER_CPU(struct pt_regs *, irq_regs);
30EXPORT_PER_CPU_SYMBOL(irq_regs);
31
32atomic_t irq_err_count;
33
34/*
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
37 */
38void ack_bad_irq(unsigned int irq)
39{
40 if (printk_ratelimit())
41 pr_err("unexpected IRQ trap at vector %02x\n", irq);
42
43 /*
44 * Currently unexpected vectors happen only on SMP and APIC.
45 * We _must_ ack these because every local APIC has only N
46 * irq slots per priority level, and a 'hanging, unacked' IRQ
47 * holds up an irq slot - in excessive cases (when multiple
48 * unexpected vectors occur) that might lock up the APIC
49 * completely.
50 * But only ack when the APIC is enabled -AK
51 */
52 ack_APIC_irq();
53}
54
55#define irq_stats(x) (&per_cpu(irq_stat, x))
56/*
57 * /proc/interrupts printing for arch specific interrupts
58 */
59int arch_show_interrupts(struct seq_file *p, int prec)
60{
61 int j;
62
63 seq_printf(p, "%*s: ", prec, "NMI");
64 for_each_online_cpu(j)
65 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
66 seq_puts(p, " Non-maskable interrupts\n");
67#ifdef CONFIG_X86_LOCAL_APIC
68 seq_printf(p, "%*s: ", prec, "LOC");
69 for_each_online_cpu(j)
70 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
71 seq_puts(p, " Local timer interrupts\n");
72
73 seq_printf(p, "%*s: ", prec, "SPU");
74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
76 seq_puts(p, " Spurious interrupts\n");
77 seq_printf(p, "%*s: ", prec, "PMI");
78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
80 seq_puts(p, " Performance monitoring interrupts\n");
81 seq_printf(p, "%*s: ", prec, "IWI");
82 for_each_online_cpu(j)
83 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
84 seq_puts(p, " IRQ work interrupts\n");
85 seq_printf(p, "%*s: ", prec, "RTR");
86 for_each_online_cpu(j)
87 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
88 seq_puts(p, " APIC ICR read retries\n");
89 if (x86_platform_ipi_callback) {
90 seq_printf(p, "%*s: ", prec, "PLT");
91 for_each_online_cpu(j)
92 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
93 seq_puts(p, " Platform interrupts\n");
94 }
95#endif
96#ifdef CONFIG_SMP
97 seq_printf(p, "%*s: ", prec, "RES");
98 for_each_online_cpu(j)
99 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
100 seq_puts(p, " Rescheduling interrupts\n");
101 seq_printf(p, "%*s: ", prec, "CAL");
102 for_each_online_cpu(j)
103 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
104 seq_puts(p, " Function call interrupts\n");
105 seq_printf(p, "%*s: ", prec, "TLB");
106 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
108 seq_puts(p, " TLB shootdowns\n");
109#endif
110#ifdef CONFIG_X86_THERMAL_VECTOR
111 seq_printf(p, "%*s: ", prec, "TRM");
112 for_each_online_cpu(j)
113 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
114 seq_puts(p, " Thermal event interrupts\n");
115#endif
116#ifdef CONFIG_X86_MCE_THRESHOLD
117 seq_printf(p, "%*s: ", prec, "THR");
118 for_each_online_cpu(j)
119 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
120 seq_puts(p, " Threshold APIC interrupts\n");
121#endif
122#ifdef CONFIG_X86_MCE_AMD
123 seq_printf(p, "%*s: ", prec, "DFR");
124 for_each_online_cpu(j)
125 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
126 seq_puts(p, " Deferred Error APIC interrupts\n");
127#endif
128#ifdef CONFIG_X86_MCE
129 seq_printf(p, "%*s: ", prec, "MCE");
130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
132 seq_puts(p, " Machine check exceptions\n");
133 seq_printf(p, "%*s: ", prec, "MCP");
134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
136 seq_puts(p, " Machine check polls\n");
137#endif
138#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
139 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
140 seq_printf(p, "%*s: ", prec, "HYP");
141 for_each_online_cpu(j)
142 seq_printf(p, "%10u ",
143 irq_stats(j)->irq_hv_callback_count);
144 seq_puts(p, " Hypervisor callback interrupts\n");
145 }
146#endif
147#if IS_ENABLED(CONFIG_HYPERV)
148 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
149 seq_printf(p, "%*s: ", prec, "HRE");
150 for_each_online_cpu(j)
151 seq_printf(p, "%10u ",
152 irq_stats(j)->irq_hv_reenlightenment_count);
153 seq_puts(p, " Hyper-V reenlightenment interrupts\n");
154 }
155 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
156 seq_printf(p, "%*s: ", prec, "HVS");
157 for_each_online_cpu(j)
158 seq_printf(p, "%10u ",
159 irq_stats(j)->hyperv_stimer0_count);
160 seq_puts(p, " Hyper-V stimer0 interrupts\n");
161 }
162#endif
163 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
164#if defined(CONFIG_X86_IO_APIC)
165 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
166#endif
167#ifdef CONFIG_HAVE_KVM
168 seq_printf(p, "%*s: ", prec, "PIN");
169 for_each_online_cpu(j)
170 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
171 seq_puts(p, " Posted-interrupt notification event\n");
172
173 seq_printf(p, "%*s: ", prec, "NPI");
174 for_each_online_cpu(j)
175 seq_printf(p, "%10u ",
176 irq_stats(j)->kvm_posted_intr_nested_ipis);
177 seq_puts(p, " Nested posted-interrupt event\n");
178
179 seq_printf(p, "%*s: ", prec, "PIW");
180 for_each_online_cpu(j)
181 seq_printf(p, "%10u ",
182 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
183 seq_puts(p, " Posted-interrupt wakeup event\n");
184#endif
185 return 0;
186}
187
188/*
189 * /proc/stat helpers
190 */
191u64 arch_irq_stat_cpu(unsigned int cpu)
192{
193 u64 sum = irq_stats(cpu)->__nmi_count;
194
195#ifdef CONFIG_X86_LOCAL_APIC
196 sum += irq_stats(cpu)->apic_timer_irqs;
197 sum += irq_stats(cpu)->irq_spurious_count;
198 sum += irq_stats(cpu)->apic_perf_irqs;
199 sum += irq_stats(cpu)->apic_irq_work_irqs;
200 sum += irq_stats(cpu)->icr_read_retry_count;
201 if (x86_platform_ipi_callback)
202 sum += irq_stats(cpu)->x86_platform_ipis;
203#endif
204#ifdef CONFIG_SMP
205 sum += irq_stats(cpu)->irq_resched_count;
206 sum += irq_stats(cpu)->irq_call_count;
207#endif
208#ifdef CONFIG_X86_THERMAL_VECTOR
209 sum += irq_stats(cpu)->irq_thermal_count;
210#endif
211#ifdef CONFIG_X86_MCE_THRESHOLD
212 sum += irq_stats(cpu)->irq_threshold_count;
213#endif
214#ifdef CONFIG_X86_MCE
215 sum += per_cpu(mce_exception_count, cpu);
216 sum += per_cpu(mce_poll_count, cpu);
217#endif
218 return sum;
219}
220
221u64 arch_irq_stat(void)
222{
223 u64 sum = atomic_read(&irq_err_count);
224 return sum;
225}
226
227
228/*
229 * do_IRQ handles all normal device IRQ's (the special
230 * SMP cross-CPU interrupts have their own specific
231 * handlers).
232 */
233__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
234{
235 struct pt_regs *old_regs = set_irq_regs(regs);
236 struct irq_desc * desc;
237 /* high bit used in ret_from_ code */
238 unsigned vector = ~regs->orig_ax;
239
240 entering_irq();
241
242 /* entering_irq() tells RCU that we're not quiescent. Check it. */
243 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
244
245 desc = __this_cpu_read(vector_irq[vector]);
246 if (likely(!IS_ERR_OR_NULL(desc))) {
247 if (IS_ENABLED(CONFIG_X86_32))
248 handle_irq(desc, regs);
249 else
250 generic_handle_irq_desc(desc);
251 } else {
252 ack_APIC_irq();
253
254 if (desc == VECTOR_UNUSED) {
255 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
256 __func__, smp_processor_id(),
257 vector);
258 } else {
259 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
260 }
261 }
262
263 exiting_irq();
264
265 set_irq_regs(old_regs);
266 return 1;
267}
268
269#ifdef CONFIG_X86_LOCAL_APIC
270/* Function pointer for generic interrupt vector handling */
271void (*x86_platform_ipi_callback)(void) = NULL;
272/*
273 * Handler for X86_PLATFORM_IPI_VECTOR.
274 */
275__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
276{
277 struct pt_regs *old_regs = set_irq_regs(regs);
278
279 entering_ack_irq();
280 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
281 inc_irq_stat(x86_platform_ipis);
282 if (x86_platform_ipi_callback)
283 x86_platform_ipi_callback();
284 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
285 exiting_irq();
286 set_irq_regs(old_regs);
287}
288#endif
289
290#ifdef CONFIG_HAVE_KVM
291static void dummy_handler(void) {}
292static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
293
294void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
295{
296 if (handler)
297 kvm_posted_intr_wakeup_handler = handler;
298 else
299 kvm_posted_intr_wakeup_handler = dummy_handler;
300}
301EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
302
303/*
304 * Handler for POSTED_INTERRUPT_VECTOR.
305 */
306__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
307{
308 struct pt_regs *old_regs = set_irq_regs(regs);
309
310 entering_ack_irq();
311 inc_irq_stat(kvm_posted_intr_ipis);
312 exiting_irq();
313 set_irq_regs(old_regs);
314}
315
316/*
317 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
318 */
319__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
320{
321 struct pt_regs *old_regs = set_irq_regs(regs);
322
323 entering_ack_irq();
324 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
325 kvm_posted_intr_wakeup_handler();
326 exiting_irq();
327 set_irq_regs(old_regs);
328}
329
330/*
331 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
332 */
333__visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
334{
335 struct pt_regs *old_regs = set_irq_regs(regs);
336
337 entering_ack_irq();
338 inc_irq_stat(kvm_posted_intr_nested_ipis);
339 exiting_irq();
340 set_irq_regs(old_regs);
341}
342#endif
343
344
345#ifdef CONFIG_HOTPLUG_CPU
346/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
347void fixup_irqs(void)
348{
349 unsigned int irr, vector;
350 struct irq_desc *desc;
351 struct irq_data *data;
352 struct irq_chip *chip;
353
354 irq_migrate_all_off_this_cpu();
355
356 /*
357 * We can remove mdelay() and then send spuriuous interrupts to
358 * new cpu targets for all the irqs that were handled previously by
359 * this cpu. While it works, I have seen spurious interrupt messages
360 * (nothing wrong but still...).
361 *
362 * So for now, retain mdelay(1) and check the IRR and then send those
363 * interrupts to new targets as this cpu is already offlined...
364 */
365 mdelay(1);
366
367 /*
368 * We can walk the vector array of this cpu without holding
369 * vector_lock because the cpu is already marked !online, so
370 * nothing else will touch it.
371 */
372 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
373 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
374 continue;
375
376 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
377 if (irr & (1 << (vector % 32))) {
378 desc = __this_cpu_read(vector_irq[vector]);
379
380 raw_spin_lock(&desc->lock);
381 data = irq_desc_get_irq_data(desc);
382 chip = irq_data_get_irq_chip(data);
383 if (chip->irq_retrigger) {
384 chip->irq_retrigger(data);
385 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
386 }
387 raw_spin_unlock(&desc->lock);
388 }
389 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
390 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
391 }
392}
393#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Common interrupt code for 32 and 64 bit
4 */
5#include <linux/cpu.h>
6#include <linux/interrupt.h>
7#include <linux/kernel_stat.h>
8#include <linux/of.h>
9#include <linux/seq_file.h>
10#include <linux/smp.h>
11#include <linux/ftrace.h>
12#include <linux/delay.h>
13#include <linux/export.h>
14#include <linux/irq.h>
15
16#include <asm/irq_stack.h>
17#include <asm/apic.h>
18#include <asm/io_apic.h>
19#include <asm/irq.h>
20#include <asm/mce.h>
21#include <asm/hw_irq.h>
22#include <asm/desc.h>
23#include <asm/traps.h>
24#include <asm/thermal.h>
25
26#define CREATE_TRACE_POINTS
27#include <asm/trace/irq_vectors.h>
28
29DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
30EXPORT_PER_CPU_SYMBOL(irq_stat);
31
32atomic_t irq_err_count;
33
34/*
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
37 */
38void ack_bad_irq(unsigned int irq)
39{
40 if (printk_ratelimit())
41 pr_err("unexpected IRQ trap at vector %02x\n", irq);
42
43 /*
44 * Currently unexpected vectors happen only on SMP and APIC.
45 * We _must_ ack these because every local APIC has only N
46 * irq slots per priority level, and a 'hanging, unacked' IRQ
47 * holds up an irq slot - in excessive cases (when multiple
48 * unexpected vectors occur) that might lock up the APIC
49 * completely.
50 * But only ack when the APIC is enabled -AK
51 */
52 ack_APIC_irq();
53}
54
55#define irq_stats(x) (&per_cpu(irq_stat, x))
56/*
57 * /proc/interrupts printing for arch specific interrupts
58 */
59int arch_show_interrupts(struct seq_file *p, int prec)
60{
61 int j;
62
63 seq_printf(p, "%*s: ", prec, "NMI");
64 for_each_online_cpu(j)
65 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
66 seq_puts(p, " Non-maskable interrupts\n");
67#ifdef CONFIG_X86_LOCAL_APIC
68 seq_printf(p, "%*s: ", prec, "LOC");
69 for_each_online_cpu(j)
70 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
71 seq_puts(p, " Local timer interrupts\n");
72
73 seq_printf(p, "%*s: ", prec, "SPU");
74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
76 seq_puts(p, " Spurious interrupts\n");
77 seq_printf(p, "%*s: ", prec, "PMI");
78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
80 seq_puts(p, " Performance monitoring interrupts\n");
81 seq_printf(p, "%*s: ", prec, "IWI");
82 for_each_online_cpu(j)
83 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
84 seq_puts(p, " IRQ work interrupts\n");
85 seq_printf(p, "%*s: ", prec, "RTR");
86 for_each_online_cpu(j)
87 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
88 seq_puts(p, " APIC ICR read retries\n");
89 if (x86_platform_ipi_callback) {
90 seq_printf(p, "%*s: ", prec, "PLT");
91 for_each_online_cpu(j)
92 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
93 seq_puts(p, " Platform interrupts\n");
94 }
95#endif
96#ifdef CONFIG_SMP
97 seq_printf(p, "%*s: ", prec, "RES");
98 for_each_online_cpu(j)
99 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
100 seq_puts(p, " Rescheduling interrupts\n");
101 seq_printf(p, "%*s: ", prec, "CAL");
102 for_each_online_cpu(j)
103 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
104 seq_puts(p, " Function call interrupts\n");
105 seq_printf(p, "%*s: ", prec, "TLB");
106 for_each_online_cpu(j)
107 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
108 seq_puts(p, " TLB shootdowns\n");
109#endif
110#ifdef CONFIG_X86_THERMAL_VECTOR
111 seq_printf(p, "%*s: ", prec, "TRM");
112 for_each_online_cpu(j)
113 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
114 seq_puts(p, " Thermal event interrupts\n");
115#endif
116#ifdef CONFIG_X86_MCE_THRESHOLD
117 seq_printf(p, "%*s: ", prec, "THR");
118 for_each_online_cpu(j)
119 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
120 seq_puts(p, " Threshold APIC interrupts\n");
121#endif
122#ifdef CONFIG_X86_MCE_AMD
123 seq_printf(p, "%*s: ", prec, "DFR");
124 for_each_online_cpu(j)
125 seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
126 seq_puts(p, " Deferred Error APIC interrupts\n");
127#endif
128#ifdef CONFIG_X86_MCE
129 seq_printf(p, "%*s: ", prec, "MCE");
130 for_each_online_cpu(j)
131 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
132 seq_puts(p, " Machine check exceptions\n");
133 seq_printf(p, "%*s: ", prec, "MCP");
134 for_each_online_cpu(j)
135 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
136 seq_puts(p, " Machine check polls\n");
137#endif
138#ifdef CONFIG_X86_HV_CALLBACK_VECTOR
139 if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
140 seq_printf(p, "%*s: ", prec, "HYP");
141 for_each_online_cpu(j)
142 seq_printf(p, "%10u ",
143 irq_stats(j)->irq_hv_callback_count);
144 seq_puts(p, " Hypervisor callback interrupts\n");
145 }
146#endif
147#if IS_ENABLED(CONFIG_HYPERV)
148 if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
149 seq_printf(p, "%*s: ", prec, "HRE");
150 for_each_online_cpu(j)
151 seq_printf(p, "%10u ",
152 irq_stats(j)->irq_hv_reenlightenment_count);
153 seq_puts(p, " Hyper-V reenlightenment interrupts\n");
154 }
155 if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
156 seq_printf(p, "%*s: ", prec, "HVS");
157 for_each_online_cpu(j)
158 seq_printf(p, "%10u ",
159 irq_stats(j)->hyperv_stimer0_count);
160 seq_puts(p, " Hyper-V stimer0 interrupts\n");
161 }
162#endif
163 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
164#if defined(CONFIG_X86_IO_APIC)
165 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
166#endif
167#ifdef CONFIG_HAVE_KVM
168 seq_printf(p, "%*s: ", prec, "PIN");
169 for_each_online_cpu(j)
170 seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
171 seq_puts(p, " Posted-interrupt notification event\n");
172
173 seq_printf(p, "%*s: ", prec, "NPI");
174 for_each_online_cpu(j)
175 seq_printf(p, "%10u ",
176 irq_stats(j)->kvm_posted_intr_nested_ipis);
177 seq_puts(p, " Nested posted-interrupt event\n");
178
179 seq_printf(p, "%*s: ", prec, "PIW");
180 for_each_online_cpu(j)
181 seq_printf(p, "%10u ",
182 irq_stats(j)->kvm_posted_intr_wakeup_ipis);
183 seq_puts(p, " Posted-interrupt wakeup event\n");
184#endif
185 return 0;
186}
187
188/*
189 * /proc/stat helpers
190 */
191u64 arch_irq_stat_cpu(unsigned int cpu)
192{
193 u64 sum = irq_stats(cpu)->__nmi_count;
194
195#ifdef CONFIG_X86_LOCAL_APIC
196 sum += irq_stats(cpu)->apic_timer_irqs;
197 sum += irq_stats(cpu)->irq_spurious_count;
198 sum += irq_stats(cpu)->apic_perf_irqs;
199 sum += irq_stats(cpu)->apic_irq_work_irqs;
200 sum += irq_stats(cpu)->icr_read_retry_count;
201 if (x86_platform_ipi_callback)
202 sum += irq_stats(cpu)->x86_platform_ipis;
203#endif
204#ifdef CONFIG_SMP
205 sum += irq_stats(cpu)->irq_resched_count;
206 sum += irq_stats(cpu)->irq_call_count;
207#endif
208#ifdef CONFIG_X86_THERMAL_VECTOR
209 sum += irq_stats(cpu)->irq_thermal_count;
210#endif
211#ifdef CONFIG_X86_MCE_THRESHOLD
212 sum += irq_stats(cpu)->irq_threshold_count;
213#endif
214#ifdef CONFIG_X86_MCE
215 sum += per_cpu(mce_exception_count, cpu);
216 sum += per_cpu(mce_poll_count, cpu);
217#endif
218 return sum;
219}
220
221u64 arch_irq_stat(void)
222{
223 u64 sum = atomic_read(&irq_err_count);
224 return sum;
225}
226
227static __always_inline void handle_irq(struct irq_desc *desc,
228 struct pt_regs *regs)
229{
230 if (IS_ENABLED(CONFIG_X86_64))
231 generic_handle_irq_desc(desc);
232 else
233 __handle_irq(desc, regs);
234}
235
236/*
237 * common_interrupt() handles all normal device IRQ's (the special SMP
238 * cross-CPU interrupts have their own entry points).
239 */
240DEFINE_IDTENTRY_IRQ(common_interrupt)
241{
242 struct pt_regs *old_regs = set_irq_regs(regs);
243 struct irq_desc *desc;
244
245 /* entry code tells RCU that we're not quiescent. Check it. */
246 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
247
248 desc = __this_cpu_read(vector_irq[vector]);
249 if (likely(!IS_ERR_OR_NULL(desc))) {
250 handle_irq(desc, regs);
251 } else {
252 ack_APIC_irq();
253
254 if (desc == VECTOR_UNUSED) {
255 pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
256 __func__, smp_processor_id(),
257 vector);
258 } else {
259 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
260 }
261 }
262
263 set_irq_regs(old_regs);
264}
265
266#ifdef CONFIG_X86_LOCAL_APIC
267/* Function pointer for generic interrupt vector handling */
268void (*x86_platform_ipi_callback)(void) = NULL;
269/*
270 * Handler for X86_PLATFORM_IPI_VECTOR.
271 */
272DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
273{
274 struct pt_regs *old_regs = set_irq_regs(regs);
275
276 ack_APIC_irq();
277 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
278 inc_irq_stat(x86_platform_ipis);
279 if (x86_platform_ipi_callback)
280 x86_platform_ipi_callback();
281 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
282 set_irq_regs(old_regs);
283}
284#endif
285
286#ifdef CONFIG_HAVE_KVM
287static void dummy_handler(void) {}
288static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
289
290void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
291{
292 if (handler)
293 kvm_posted_intr_wakeup_handler = handler;
294 else {
295 kvm_posted_intr_wakeup_handler = dummy_handler;
296 synchronize_rcu();
297 }
298}
299EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
300
301/*
302 * Handler for POSTED_INTERRUPT_VECTOR.
303 */
304DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
305{
306 ack_APIC_irq();
307 inc_irq_stat(kvm_posted_intr_ipis);
308}
309
310/*
311 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
312 */
313DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
314{
315 ack_APIC_irq();
316 inc_irq_stat(kvm_posted_intr_wakeup_ipis);
317 kvm_posted_intr_wakeup_handler();
318}
319
320/*
321 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
322 */
323DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
324{
325 ack_APIC_irq();
326 inc_irq_stat(kvm_posted_intr_nested_ipis);
327}
328#endif
329
330
331#ifdef CONFIG_HOTPLUG_CPU
332/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
333void fixup_irqs(void)
334{
335 unsigned int irr, vector;
336 struct irq_desc *desc;
337 struct irq_data *data;
338 struct irq_chip *chip;
339
340 irq_migrate_all_off_this_cpu();
341
342 /*
343 * We can remove mdelay() and then send spurious interrupts to
344 * new cpu targets for all the irqs that were handled previously by
345 * this cpu. While it works, I have seen spurious interrupt messages
346 * (nothing wrong but still...).
347 *
348 * So for now, retain mdelay(1) and check the IRR and then send those
349 * interrupts to new targets as this cpu is already offlined...
350 */
351 mdelay(1);
352
353 /*
354 * We can walk the vector array of this cpu without holding
355 * vector_lock because the cpu is already marked !online, so
356 * nothing else will touch it.
357 */
358 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
359 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
360 continue;
361
362 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
363 if (irr & (1 << (vector % 32))) {
364 desc = __this_cpu_read(vector_irq[vector]);
365
366 raw_spin_lock(&desc->lock);
367 data = irq_desc_get_irq_data(desc);
368 chip = irq_data_get_irq_chip(data);
369 if (chip->irq_retrigger) {
370 chip->irq_retrigger(data);
371 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
372 }
373 raw_spin_unlock(&desc->lock);
374 }
375 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
376 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
377 }
378}
379#endif
380
381#ifdef CONFIG_X86_THERMAL_VECTOR
382static void smp_thermal_vector(void)
383{
384 if (x86_thermal_enabled())
385 intel_thermal_interrupt();
386 else
387 pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
388 smp_processor_id());
389}
390
391DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
392{
393 trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
394 inc_irq_stat(irq_thermal_count);
395 smp_thermal_vector();
396 trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
397 ack_APIC_irq();
398}
399#endif