Loading...
1/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
7#include <linux/of.h>
8#include <linux/seq_file.h>
9#include <linux/smp.h>
10#include <linux/ftrace.h>
11#include <linux/delay.h>
12#include <linux/export.h>
13
14#include <asm/apic.h>
15#include <asm/io_apic.h>
16#include <asm/irq.h>
17#include <asm/idle.h>
18#include <asm/mce.h>
19#include <asm/hw_irq.h>
20
21atomic_t irq_err_count;
22
23/* Function pointer for generic interrupt vector handling */
24void (*x86_platform_ipi_callback)(void) = NULL;
25
26/*
27 * 'what should we do if we get a hw irq event on an illegal vector'.
28 * each architecture has to answer this themselves.
29 */
30void ack_bad_irq(unsigned int irq)
31{
32 if (printk_ratelimit())
33 pr_err("unexpected IRQ trap at vector %02x\n", irq);
34
35 /*
36 * Currently unexpected vectors happen only on SMP and APIC.
37 * We _must_ ack these because every local APIC has only N
38 * irq slots per priority level, and a 'hanging, unacked' IRQ
39 * holds up an irq slot - in excessive cases (when multiple
40 * unexpected vectors occur) that might lock up the APIC
41 * completely.
42 * But only ack when the APIC is enabled -AK
43 */
44 ack_APIC_irq();
45}
46
47#define irq_stats(x) (&per_cpu(irq_stat, x))
48/*
49 * /proc/interrupts printing for arch specific interrupts
50 */
51int arch_show_interrupts(struct seq_file *p, int prec)
52{
53 int j;
54
55 seq_printf(p, "%*s: ", prec, "NMI");
56 for_each_online_cpu(j)
57 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
58 seq_printf(p, " Non-maskable interrupts\n");
59#ifdef CONFIG_X86_LOCAL_APIC
60 seq_printf(p, "%*s: ", prec, "LOC");
61 for_each_online_cpu(j)
62 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
63 seq_printf(p, " Local timer interrupts\n");
64
65 seq_printf(p, "%*s: ", prec, "SPU");
66 for_each_online_cpu(j)
67 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
68 seq_printf(p, " Spurious interrupts\n");
69 seq_printf(p, "%*s: ", prec, "PMI");
70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
72 seq_printf(p, " Performance monitoring interrupts\n");
73 seq_printf(p, "%*s: ", prec, "IWI");
74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
76 seq_printf(p, " IRQ work interrupts\n");
77 seq_printf(p, "%*s: ", prec, "RTR");
78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
80 seq_printf(p, " APIC ICR read retries\n");
81#endif
82 if (x86_platform_ipi_callback) {
83 seq_printf(p, "%*s: ", prec, "PLT");
84 for_each_online_cpu(j)
85 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
86 seq_printf(p, " Platform interrupts\n");
87 }
88#ifdef CONFIG_SMP
89 seq_printf(p, "%*s: ", prec, "RES");
90 for_each_online_cpu(j)
91 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
92 seq_printf(p, " Rescheduling interrupts\n");
93 seq_printf(p, "%*s: ", prec, "CAL");
94 for_each_online_cpu(j)
95 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
96 seq_printf(p, " Function call interrupts\n");
97 seq_printf(p, "%*s: ", prec, "TLB");
98 for_each_online_cpu(j)
99 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
100 seq_printf(p, " TLB shootdowns\n");
101#endif
102#ifdef CONFIG_X86_THERMAL_VECTOR
103 seq_printf(p, "%*s: ", prec, "TRM");
104 for_each_online_cpu(j)
105 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
106 seq_printf(p, " Thermal event interrupts\n");
107#endif
108#ifdef CONFIG_X86_MCE_THRESHOLD
109 seq_printf(p, "%*s: ", prec, "THR");
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
112 seq_printf(p, " Threshold APIC interrupts\n");
113#endif
114#ifdef CONFIG_X86_MCE
115 seq_printf(p, "%*s: ", prec, "MCE");
116 for_each_online_cpu(j)
117 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
118 seq_printf(p, " Machine check exceptions\n");
119 seq_printf(p, "%*s: ", prec, "MCP");
120 for_each_online_cpu(j)
121 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
122 seq_printf(p, " Machine check polls\n");
123#endif
124 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
125#if defined(CONFIG_X86_IO_APIC)
126 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
127#endif
128 return 0;
129}
130
131/*
132 * /proc/stat helpers
133 */
134u64 arch_irq_stat_cpu(unsigned int cpu)
135{
136 u64 sum = irq_stats(cpu)->__nmi_count;
137
138#ifdef CONFIG_X86_LOCAL_APIC
139 sum += irq_stats(cpu)->apic_timer_irqs;
140 sum += irq_stats(cpu)->irq_spurious_count;
141 sum += irq_stats(cpu)->apic_perf_irqs;
142 sum += irq_stats(cpu)->apic_irq_work_irqs;
143 sum += irq_stats(cpu)->icr_read_retry_count;
144#endif
145 if (x86_platform_ipi_callback)
146 sum += irq_stats(cpu)->x86_platform_ipis;
147#ifdef CONFIG_SMP
148 sum += irq_stats(cpu)->irq_resched_count;
149 sum += irq_stats(cpu)->irq_call_count;
150 sum += irq_stats(cpu)->irq_tlb_count;
151#endif
152#ifdef CONFIG_X86_THERMAL_VECTOR
153 sum += irq_stats(cpu)->irq_thermal_count;
154#endif
155#ifdef CONFIG_X86_MCE_THRESHOLD
156 sum += irq_stats(cpu)->irq_threshold_count;
157#endif
158#ifdef CONFIG_X86_MCE
159 sum += per_cpu(mce_exception_count, cpu);
160 sum += per_cpu(mce_poll_count, cpu);
161#endif
162 return sum;
163}
164
165u64 arch_irq_stat(void)
166{
167 u64 sum = atomic_read(&irq_err_count);
168
169#ifdef CONFIG_X86_IO_APIC
170 sum += atomic_read(&irq_mis_count);
171#endif
172 return sum;
173}
174
175
176/*
177 * do_IRQ handles all normal device IRQ's (the special
178 * SMP cross-CPU interrupts have their own specific
179 * handlers).
180 */
181unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
182{
183 struct pt_regs *old_regs = set_irq_regs(regs);
184
185 /* high bit used in ret_from_ code */
186 unsigned vector = ~regs->orig_ax;
187 unsigned irq;
188
189 irq_enter();
190 exit_idle();
191
192 irq = __this_cpu_read(vector_irq[vector]);
193
194 if (!handle_irq(irq, regs)) {
195 ack_APIC_irq();
196
197 if (printk_ratelimit())
198 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
199 __func__, smp_processor_id(), vector, irq);
200 }
201
202 irq_exit();
203
204 set_irq_regs(old_regs);
205 return 1;
206}
207
208/*
209 * Handler for X86_PLATFORM_IPI_VECTOR.
210 */
211void smp_x86_platform_ipi(struct pt_regs *regs)
212{
213 struct pt_regs *old_regs = set_irq_regs(regs);
214
215 ack_APIC_irq();
216
217 irq_enter();
218
219 exit_idle();
220
221 inc_irq_stat(x86_platform_ipis);
222
223 if (x86_platform_ipi_callback)
224 x86_platform_ipi_callback();
225
226 irq_exit();
227
228 set_irq_regs(old_regs);
229}
230
231EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
232
233#ifdef CONFIG_HOTPLUG_CPU
234/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
235void fixup_irqs(void)
236{
237 unsigned int irq, vector;
238 static int warned;
239 struct irq_desc *desc;
240 struct irq_data *data;
241 struct irq_chip *chip;
242
243 for_each_irq_desc(irq, desc) {
244 int break_affinity = 0;
245 int set_affinity = 1;
246 const struct cpumask *affinity;
247
248 if (!desc)
249 continue;
250 if (irq == 2)
251 continue;
252
253 /* interrupt's are disabled at this point */
254 raw_spin_lock(&desc->lock);
255
256 data = irq_desc_get_irq_data(desc);
257 affinity = data->affinity;
258 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
259 cpumask_subset(affinity, cpu_online_mask)) {
260 raw_spin_unlock(&desc->lock);
261 continue;
262 }
263
264 /*
265 * Complete the irq move. This cpu is going down and for
266 * non intr-remapping case, we can't wait till this interrupt
267 * arrives at this cpu before completing the irq move.
268 */
269 irq_force_complete_move(irq);
270
271 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
272 break_affinity = 1;
273 affinity = cpu_all_mask;
274 }
275
276 chip = irq_data_get_irq_chip(data);
277 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
278 chip->irq_mask(data);
279
280 if (chip->irq_set_affinity)
281 chip->irq_set_affinity(data, affinity, true);
282 else if (!(warned++))
283 set_affinity = 0;
284
285 /*
286 * We unmask if the irq was not marked masked by the
287 * core code. That respects the lazy irq disable
288 * behaviour.
289 */
290 if (!irqd_can_move_in_process_context(data) &&
291 !irqd_irq_masked(data) && chip->irq_unmask)
292 chip->irq_unmask(data);
293
294 raw_spin_unlock(&desc->lock);
295
296 if (break_affinity && set_affinity)
297 printk("Broke affinity for irq %i\n", irq);
298 else if (!set_affinity)
299 printk("Cannot set affinity for irq %i\n", irq);
300 }
301
302 /*
303 * We can remove mdelay() and then send spuriuous interrupts to
304 * new cpu targets for all the irqs that were handled previously by
305 * this cpu. While it works, I have seen spurious interrupt messages
306 * (nothing wrong but still...).
307 *
308 * So for now, retain mdelay(1) and check the IRR and then send those
309 * interrupts to new targets as this cpu is already offlined...
310 */
311 mdelay(1);
312
313 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
314 unsigned int irr;
315
316 if (__this_cpu_read(vector_irq[vector]) < 0)
317 continue;
318
319 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
320 if (irr & (1 << (vector % 32))) {
321 irq = __this_cpu_read(vector_irq[vector]);
322
323 desc = irq_to_desc(irq);
324 data = irq_desc_get_irq_data(desc);
325 chip = irq_data_get_irq_chip(data);
326 raw_spin_lock(&desc->lock);
327 if (chip->irq_retrigger)
328 chip->irq_retrigger(data);
329 raw_spin_unlock(&desc->lock);
330 }
331 }
332}
333#endif
1/*
2 * Common interrupt code for 32 and 64 bit
3 */
4#include <linux/cpu.h>
5#include <linux/interrupt.h>
6#include <linux/kernel_stat.h>
7#include <linux/of.h>
8#include <linux/seq_file.h>
9#include <linux/smp.h>
10#include <linux/ftrace.h>
11#include <linux/delay.h>
12#include <linux/export.h>
13
14#include <asm/apic.h>
15#include <asm/io_apic.h>
16#include <asm/irq.h>
17#include <asm/idle.h>
18#include <asm/mce.h>
19#include <asm/hw_irq.h>
20#include <asm/desc.h>
21
22#define CREATE_TRACE_POINTS
23#include <asm/trace/irq_vectors.h>
24
25atomic_t irq_err_count;
26
27/* Function pointer for generic interrupt vector handling */
28void (*x86_platform_ipi_callback)(void) = NULL;
29
30/*
31 * 'what should we do if we get a hw irq event on an illegal vector'.
32 * each architecture has to answer this themselves.
33 */
34void ack_bad_irq(unsigned int irq)
35{
36 if (printk_ratelimit())
37 pr_err("unexpected IRQ trap at vector %02x\n", irq);
38
39 /*
40 * Currently unexpected vectors happen only on SMP and APIC.
41 * We _must_ ack these because every local APIC has only N
42 * irq slots per priority level, and a 'hanging, unacked' IRQ
43 * holds up an irq slot - in excessive cases (when multiple
44 * unexpected vectors occur) that might lock up the APIC
45 * completely.
46 * But only ack when the APIC is enabled -AK
47 */
48 ack_APIC_irq();
49}
50
51#define irq_stats(x) (&per_cpu(irq_stat, x))
52/*
53 * /proc/interrupts printing for arch specific interrupts
54 */
55int arch_show_interrupts(struct seq_file *p, int prec)
56{
57 int j;
58
59 seq_printf(p, "%*s: ", prec, "NMI");
60 for_each_online_cpu(j)
61 seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
62 seq_printf(p, " Non-maskable interrupts\n");
63#ifdef CONFIG_X86_LOCAL_APIC
64 seq_printf(p, "%*s: ", prec, "LOC");
65 for_each_online_cpu(j)
66 seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
67 seq_printf(p, " Local timer interrupts\n");
68
69 seq_printf(p, "%*s: ", prec, "SPU");
70 for_each_online_cpu(j)
71 seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
72 seq_printf(p, " Spurious interrupts\n");
73 seq_printf(p, "%*s: ", prec, "PMI");
74 for_each_online_cpu(j)
75 seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
76 seq_printf(p, " Performance monitoring interrupts\n");
77 seq_printf(p, "%*s: ", prec, "IWI");
78 for_each_online_cpu(j)
79 seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
80 seq_printf(p, " IRQ work interrupts\n");
81 seq_printf(p, "%*s: ", prec, "RTR");
82 for_each_online_cpu(j)
83 seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
84 seq_printf(p, " APIC ICR read retries\n");
85#endif
86 if (x86_platform_ipi_callback) {
87 seq_printf(p, "%*s: ", prec, "PLT");
88 for_each_online_cpu(j)
89 seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
90 seq_printf(p, " Platform interrupts\n");
91 }
92#ifdef CONFIG_SMP
93 seq_printf(p, "%*s: ", prec, "RES");
94 for_each_online_cpu(j)
95 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
96 seq_printf(p, " Rescheduling interrupts\n");
97 seq_printf(p, "%*s: ", prec, "CAL");
98 for_each_online_cpu(j)
99 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count -
100 irq_stats(j)->irq_tlb_count);
101 seq_printf(p, " Function call interrupts\n");
102 seq_printf(p, "%*s: ", prec, "TLB");
103 for_each_online_cpu(j)
104 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
105 seq_printf(p, " TLB shootdowns\n");
106#endif
107#ifdef CONFIG_X86_THERMAL_VECTOR
108 seq_printf(p, "%*s: ", prec, "TRM");
109 for_each_online_cpu(j)
110 seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
111 seq_printf(p, " Thermal event interrupts\n");
112#endif
113#ifdef CONFIG_X86_MCE_THRESHOLD
114 seq_printf(p, "%*s: ", prec, "THR");
115 for_each_online_cpu(j)
116 seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
117 seq_printf(p, " Threshold APIC interrupts\n");
118#endif
119#ifdef CONFIG_X86_MCE
120 seq_printf(p, "%*s: ", prec, "MCE");
121 for_each_online_cpu(j)
122 seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
123 seq_printf(p, " Machine check exceptions\n");
124 seq_printf(p, "%*s: ", prec, "MCP");
125 for_each_online_cpu(j)
126 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
127 seq_printf(p, " Machine check polls\n");
128#endif
129#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
130 seq_printf(p, "%*s: ", prec, "THR");
131 for_each_online_cpu(j)
132 seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
133 seq_printf(p, " Hypervisor callback interrupts\n");
134#endif
135 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
136#if defined(CONFIG_X86_IO_APIC)
137 seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
138#endif
139 return 0;
140}
141
142/*
143 * /proc/stat helpers
144 */
145u64 arch_irq_stat_cpu(unsigned int cpu)
146{
147 u64 sum = irq_stats(cpu)->__nmi_count;
148
149#ifdef CONFIG_X86_LOCAL_APIC
150 sum += irq_stats(cpu)->apic_timer_irqs;
151 sum += irq_stats(cpu)->irq_spurious_count;
152 sum += irq_stats(cpu)->apic_perf_irqs;
153 sum += irq_stats(cpu)->apic_irq_work_irqs;
154 sum += irq_stats(cpu)->icr_read_retry_count;
155#endif
156 if (x86_platform_ipi_callback)
157 sum += irq_stats(cpu)->x86_platform_ipis;
158#ifdef CONFIG_SMP
159 sum += irq_stats(cpu)->irq_resched_count;
160 sum += irq_stats(cpu)->irq_call_count;
161#endif
162#ifdef CONFIG_X86_THERMAL_VECTOR
163 sum += irq_stats(cpu)->irq_thermal_count;
164#endif
165#ifdef CONFIG_X86_MCE_THRESHOLD
166 sum += irq_stats(cpu)->irq_threshold_count;
167#endif
168#ifdef CONFIG_X86_MCE
169 sum += per_cpu(mce_exception_count, cpu);
170 sum += per_cpu(mce_poll_count, cpu);
171#endif
172 return sum;
173}
174
175u64 arch_irq_stat(void)
176{
177 u64 sum = atomic_read(&irq_err_count);
178 return sum;
179}
180
181
182/*
183 * do_IRQ handles all normal device IRQ's (the special
184 * SMP cross-CPU interrupts have their own specific
185 * handlers).
186 */
187__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
188{
189 struct pt_regs *old_regs = set_irq_regs(regs);
190
191 /* high bit used in ret_from_ code */
192 unsigned vector = ~regs->orig_ax;
193 unsigned irq;
194
195 irq_enter();
196 exit_idle();
197
198 irq = __this_cpu_read(vector_irq[vector]);
199
200 if (!handle_irq(irq, regs)) {
201 ack_APIC_irq();
202
203 if (irq != VECTOR_RETRIGGERED) {
204 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector (irq %d)\n",
205 __func__, smp_processor_id(),
206 vector, irq);
207 } else {
208 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
209 }
210 }
211
212 irq_exit();
213
214 set_irq_regs(old_regs);
215 return 1;
216}
217
218/*
219 * Handler for X86_PLATFORM_IPI_VECTOR.
220 */
221void __smp_x86_platform_ipi(void)
222{
223 inc_irq_stat(x86_platform_ipis);
224
225 if (x86_platform_ipi_callback)
226 x86_platform_ipi_callback();
227}
228
229__visible void smp_x86_platform_ipi(struct pt_regs *regs)
230{
231 struct pt_regs *old_regs = set_irq_regs(regs);
232
233 entering_ack_irq();
234 __smp_x86_platform_ipi();
235 exiting_irq();
236 set_irq_regs(old_regs);
237}
238
239#ifdef CONFIG_HAVE_KVM
240/*
241 * Handler for POSTED_INTERRUPT_VECTOR.
242 */
243__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
244{
245 struct pt_regs *old_regs = set_irq_regs(regs);
246
247 ack_APIC_irq();
248
249 irq_enter();
250
251 exit_idle();
252
253 inc_irq_stat(kvm_posted_intr_ipis);
254
255 irq_exit();
256
257 set_irq_regs(old_regs);
258}
259#endif
260
261__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
262{
263 struct pt_regs *old_regs = set_irq_regs(regs);
264
265 entering_ack_irq();
266 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
267 __smp_x86_platform_ipi();
268 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
269 exiting_irq();
270 set_irq_regs(old_regs);
271}
272
273EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
274
275#ifdef CONFIG_HOTPLUG_CPU
276
277/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
278 * below, which is protected by stop_machine(). Putting them on the stack
279 * results in a stack frame overflow. Dynamically allocating could result in a
280 * failure so declare these two cpumasks as global.
281 */
282static struct cpumask affinity_new, online_new;
283
284/*
285 * This cpu is going to be removed and its vectors migrated to the remaining
286 * online cpus. Check to see if there are enough vectors in the remaining cpus.
287 * This function is protected by stop_machine().
288 */
289int check_irq_vectors_for_cpu_disable(void)
290{
291 int irq, cpu;
292 unsigned int this_cpu, vector, this_count, count;
293 struct irq_desc *desc;
294 struct irq_data *data;
295
296 this_cpu = smp_processor_id();
297 cpumask_copy(&online_new, cpu_online_mask);
298 cpu_clear(this_cpu, online_new);
299
300 this_count = 0;
301 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
302 irq = __this_cpu_read(vector_irq[vector]);
303 if (irq >= 0) {
304 desc = irq_to_desc(irq);
305 data = irq_desc_get_irq_data(desc);
306 cpumask_copy(&affinity_new, data->affinity);
307 cpu_clear(this_cpu, affinity_new);
308
309 /* Do not count inactive or per-cpu irqs. */
310 if (!irq_has_action(irq) || irqd_is_per_cpu(data))
311 continue;
312
313 /*
314 * A single irq may be mapped to multiple
315 * cpu's vector_irq[] (for example IOAPIC cluster
316 * mode). In this case we have two
317 * possibilities:
318 *
319 * 1) the resulting affinity mask is empty; that is
320 * this the down'd cpu is the last cpu in the irq's
321 * affinity mask, or
322 *
323 * 2) the resulting affinity mask is no longer
324 * a subset of the online cpus but the affinity
325 * mask is not zero; that is the down'd cpu is the
326 * last online cpu in a user set affinity mask.
327 */
328 if (cpumask_empty(&affinity_new) ||
329 !cpumask_subset(&affinity_new, &online_new))
330 this_count++;
331 }
332 }
333
334 count = 0;
335 for_each_online_cpu(cpu) {
336 if (cpu == this_cpu)
337 continue;
338 /*
339 * We scan from FIRST_EXTERNAL_VECTOR to first system
340 * vector. If the vector is marked in the used vectors
341 * bitmap or an irq is assigned to it, we don't count
342 * it as available.
343 */
344 for (vector = FIRST_EXTERNAL_VECTOR;
345 vector < first_system_vector; vector++) {
346 if (!test_bit(vector, used_vectors) &&
347 per_cpu(vector_irq, cpu)[vector] < 0)
348 count++;
349 }
350 }
351
352 if (count < this_count) {
353 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
354 this_cpu, this_count, count);
355 return -ERANGE;
356 }
357 return 0;
358}
359
360/* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
361void fixup_irqs(void)
362{
363 unsigned int irq, vector;
364 static int warned;
365 struct irq_desc *desc;
366 struct irq_data *data;
367 struct irq_chip *chip;
368
369 for_each_irq_desc(irq, desc) {
370 int break_affinity = 0;
371 int set_affinity = 1;
372 const struct cpumask *affinity;
373
374 if (!desc)
375 continue;
376 if (irq == 2)
377 continue;
378
379 /* interrupt's are disabled at this point */
380 raw_spin_lock(&desc->lock);
381
382 data = irq_desc_get_irq_data(desc);
383 affinity = data->affinity;
384 if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
385 cpumask_subset(affinity, cpu_online_mask)) {
386 raw_spin_unlock(&desc->lock);
387 continue;
388 }
389
390 /*
391 * Complete the irq move. This cpu is going down and for
392 * non intr-remapping case, we can't wait till this interrupt
393 * arrives at this cpu before completing the irq move.
394 */
395 irq_force_complete_move(irq);
396
397 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
398 break_affinity = 1;
399 affinity = cpu_online_mask;
400 }
401
402 chip = irq_data_get_irq_chip(data);
403 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
404 chip->irq_mask(data);
405
406 if (chip->irq_set_affinity)
407 chip->irq_set_affinity(data, affinity, true);
408 else if (!(warned++))
409 set_affinity = 0;
410
411 /*
412 * We unmask if the irq was not marked masked by the
413 * core code. That respects the lazy irq disable
414 * behaviour.
415 */
416 if (!irqd_can_move_in_process_context(data) &&
417 !irqd_irq_masked(data) && chip->irq_unmask)
418 chip->irq_unmask(data);
419
420 raw_spin_unlock(&desc->lock);
421
422 if (break_affinity && set_affinity)
423 pr_notice("Broke affinity for irq %i\n", irq);
424 else if (!set_affinity)
425 pr_notice("Cannot set affinity for irq %i\n", irq);
426 }
427
428 /*
429 * We can remove mdelay() and then send spuriuous interrupts to
430 * new cpu targets for all the irqs that were handled previously by
431 * this cpu. While it works, I have seen spurious interrupt messages
432 * (nothing wrong but still...).
433 *
434 * So for now, retain mdelay(1) and check the IRR and then send those
435 * interrupts to new targets as this cpu is already offlined...
436 */
437 mdelay(1);
438
439 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
440 unsigned int irr;
441
442 if (__this_cpu_read(vector_irq[vector]) <= VECTOR_UNDEFINED)
443 continue;
444
445 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
446 if (irr & (1 << (vector % 32))) {
447 irq = __this_cpu_read(vector_irq[vector]);
448
449 desc = irq_to_desc(irq);
450 data = irq_desc_get_irq_data(desc);
451 chip = irq_data_get_irq_chip(data);
452 raw_spin_lock(&desc->lock);
453 if (chip->irq_retrigger) {
454 chip->irq_retrigger(data);
455 __this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
456 }
457 raw_spin_unlock(&desc->lock);
458 }
459 if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
460 __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
461 }
462}
463#endif