Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
25 */
26
27#undef DEBUG
28
29#include <linux/export.h>
30#include <linux/threads.h>
31#include <linux/kernel_stat.h>
32#include <linux/signal.h>
33#include <linux/sched.h>
34#include <linux/ptrace.h>
35#include <linux/ioport.h>
36#include <linux/interrupt.h>
37#include <linux/timex.h>
38#include <linux/init.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/irq.h>
42#include <linux/seq_file.h>
43#include <linux/cpumask.h>
44#include <linux/profile.h>
45#include <linux/bitops.h>
46#include <linux/list.h>
47#include <linux/radix-tree.h>
48#include <linux/mutex.h>
49#include <linux/pci.h>
50#include <linux/debugfs.h>
51#include <linux/of.h>
52#include <linux/of_irq.h>
53#include <linux/vmalloc.h>
54#include <linux/pgtable.h>
55
56#include <linux/uaccess.h>
57#include <asm/io.h>
58#include <asm/irq.h>
59#include <asm/cache.h>
60#include <asm/prom.h>
61#include <asm/ptrace.h>
62#include <asm/machdep.h>
63#include <asm/udbg.h>
64#include <asm/smp.h>
65#include <asm/livepatch.h>
66#include <asm/asm-prototypes.h>
67#include <asm/hw_irq.h>
68
69#ifdef CONFIG_PPC64
70#include <asm/paca.h>
71#include <asm/firmware.h>
72#include <asm/lv1call.h>
73#include <asm/dbell.h>
74#endif
75#define CREATE_TRACE_POINTS
76#include <asm/trace.h>
77#include <asm/cpu_has_feature.h>
78
79DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat);
81
82#ifdef CONFIG_PPC32
83atomic_t ppc_n_lost_interrupts;
84
85#ifdef CONFIG_TAU_INT
86extern int tau_initialized;
87u32 tau_interrupts(unsigned long cpu);
88#endif
89#endif /* CONFIG_PPC32 */
90
91#ifdef CONFIG_PPC64
92
93int distribute_irqs = 1;
94
95static inline notrace unsigned long get_irq_happened(void)
96{
97 unsigned long happened;
98
99 __asm__ __volatile__("lbz %0,%1(13)"
100 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
101
102 return happened;
103}
104
105static inline notrace int decrementer_check_overflow(void)
106{
107 u64 now = get_tb_or_rtc();
108 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
109
110 return now >= *next_tb;
111}
112
113#ifdef CONFIG_PPC_BOOK3E
114
115/* This is called whenever we are re-enabling interrupts
116 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
117 * there's an EE, DEC or DBELL to generate.
118 *
119 * This is called in two contexts: From arch_local_irq_restore()
120 * before soft-enabling interrupts, and from the exception exit
121 * path when returning from an interrupt from a soft-disabled to
122 * a soft enabled context. In both case we have interrupts hard
123 * disabled.
124 *
125 * We take care of only clearing the bits we handled in the
126 * PACA irq_happened field since we can only re-emit one at a
127 * time and we don't want to "lose" one.
128 */
129notrace unsigned int __check_irq_replay(void)
130{
131 /*
132 * We use local_paca rather than get_paca() to avoid all
133 * the debug_smp_processor_id() business in this low level
134 * function
135 */
136 unsigned char happened = local_paca->irq_happened;
137
138 /*
139 * We are responding to the next interrupt, so interrupt-off
140 * latencies should be reset here.
141 */
142 trace_hardirqs_on();
143 trace_hardirqs_off();
144
145 /*
146 * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
147 * not be set, which means interrupts have only just been hard
148 * disabled as part of the local_irq_restore or interrupt return
149 * code. In that case, skip the decrementr check becaus it's
150 * expensive to read the TB.
151 *
152 * HARD_DIS then gets cleared here, but it's reconciled later.
153 * Either local_irq_disable will replay the interrupt and that
154 * will reconcile state like other hard interrupts. Or interrupt
155 * retur will replay the interrupt and in that case it sets
156 * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
157 */
158 if (happened & PACA_IRQ_HARD_DIS) {
159 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
160
161 /*
162 * We may have missed a decrementer interrupt if hard disabled.
163 * Check the decrementer register in case we had a rollover
164 * while hard disabled.
165 */
166 if (!(happened & PACA_IRQ_DEC)) {
167 if (decrementer_check_overflow()) {
168 local_paca->irq_happened |= PACA_IRQ_DEC;
169 happened |= PACA_IRQ_DEC;
170 }
171 }
172 }
173
174 if (happened & PACA_IRQ_DEC) {
175 local_paca->irq_happened &= ~PACA_IRQ_DEC;
176 return 0x900;
177 }
178
179 if (happened & PACA_IRQ_EE) {
180 local_paca->irq_happened &= ~PACA_IRQ_EE;
181 return 0x500;
182 }
183
184 /*
185 * Check if an EPR external interrupt happened this bit is typically
186 * set if we need to handle another "edge" interrupt from within the
187 * MPIC "EPR" handler.
188 */
189 if (happened & PACA_IRQ_EE_EDGE) {
190 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
191 return 0x500;
192 }
193
194 if (happened & PACA_IRQ_DBELL) {
195 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
196 return 0x280;
197 }
198
199 /* There should be nothing left ! */
200 BUG_ON(local_paca->irq_happened != 0);
201
202 return 0;
203}
204#endif /* CONFIG_PPC_BOOK3E */
205
206void replay_soft_interrupts(void)
207{
208 /*
209 * We use local_paca rather than get_paca() to avoid all
210 * the debug_smp_processor_id() business in this low level
211 * function
212 */
213 unsigned char happened = local_paca->irq_happened;
214 struct pt_regs regs;
215
216 ppc_save_regs(®s);
217 regs.softe = IRQS_ALL_DISABLED;
218
219again:
220 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
221 WARN_ON_ONCE(mfmsr() & MSR_EE);
222
223 if (happened & PACA_IRQ_HARD_DIS) {
224 /*
225 * We may have missed a decrementer interrupt if hard disabled.
226 * Check the decrementer register in case we had a rollover
227 * while hard disabled.
228 */
229 if (!(happened & PACA_IRQ_DEC)) {
230 if (decrementer_check_overflow())
231 happened |= PACA_IRQ_DEC;
232 }
233 }
234
235 /*
236 * Force the delivery of pending soft-disabled interrupts on PS3.
237 * Any HV call will have this side effect.
238 */
239 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
240 u64 tmp, tmp2;
241 lv1_get_version_info(&tmp, &tmp2);
242 }
243
244 /*
245 * Check if an hypervisor Maintenance interrupt happened.
246 * This is a higher priority interrupt than the others, so
247 * replay it first.
248 */
249 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_HMI)) {
250 local_paca->irq_happened &= ~PACA_IRQ_HMI;
251 regs.trap = 0xe60;
252 handle_hmi_exception(®s);
253 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
254 hard_irq_disable();
255 }
256
257 if (happened & PACA_IRQ_DEC) {
258 local_paca->irq_happened &= ~PACA_IRQ_DEC;
259 regs.trap = 0x900;
260 timer_interrupt(®s);
261 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
262 hard_irq_disable();
263 }
264
265 if (happened & PACA_IRQ_EE) {
266 local_paca->irq_happened &= ~PACA_IRQ_EE;
267 regs.trap = 0x500;
268 do_IRQ(®s);
269 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
270 hard_irq_disable();
271 }
272
273 /*
274 * Check if an EPR external interrupt happened this bit is typically
275 * set if we need to handle another "edge" interrupt from within the
276 * MPIC "EPR" handler.
277 */
278 if (IS_ENABLED(CONFIG_PPC_BOOK3E) && (happened & PACA_IRQ_EE_EDGE)) {
279 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
280 regs.trap = 0x500;
281 do_IRQ(®s);
282 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
283 hard_irq_disable();
284 }
285
286 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (happened & PACA_IRQ_DBELL)) {
287 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
288 if (IS_ENABLED(CONFIG_PPC_BOOK3E))
289 regs.trap = 0x280;
290 else
291 regs.trap = 0xa00;
292 doorbell_exception(®s);
293 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
294 hard_irq_disable();
295 }
296
297 /* Book3E does not support soft-masking PMI interrupts */
298 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (happened & PACA_IRQ_PMI)) {
299 local_paca->irq_happened &= ~PACA_IRQ_PMI;
300 regs.trap = 0xf00;
301 performance_monitor_exception(®s);
302 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS))
303 hard_irq_disable();
304 }
305
306 happened = local_paca->irq_happened;
307 if (happened & ~PACA_IRQ_HARD_DIS) {
308 /*
309 * We are responding to the next interrupt, so interrupt-off
310 * latencies should be reset here.
311 */
312 trace_hardirqs_on();
313 trace_hardirqs_off();
314 goto again;
315 }
316}
317
318notrace void arch_local_irq_restore(unsigned long mask)
319{
320 unsigned char irq_happened;
321
322 /* Write the new soft-enabled value */
323 irq_soft_mask_set(mask);
324 if (mask)
325 return;
326
327 /*
328 * From this point onward, we can take interrupts, preempt,
329 * etc... unless we got hard-disabled. We check if an event
330 * happened. If none happened, we know we can just return.
331 *
332 * We may have preempted before the check below, in which case
333 * we are checking the "new" CPU instead of the old one. This
334 * is only a problem if an event happened on the "old" CPU.
335 *
336 * External interrupt events will have caused interrupts to
337 * be hard-disabled, so there is no problem, we
338 * cannot have preempted.
339 */
340 irq_happened = get_irq_happened();
341 if (!irq_happened) {
342 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
343 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
344 return;
345 }
346
347 /* We need to hard disable to replay. */
348 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
349 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
350 WARN_ON_ONCE(!(mfmsr() & MSR_EE));
351 __hard_irq_disable();
352 } else {
353 /*
354 * We should already be hard disabled here. We had bugs
355 * where that wasn't the case so let's dbl check it and
356 * warn if we are wrong. Only do that when IRQ tracing
357 * is enabled as mfmsr() can be costly.
358 */
359 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) {
360 if (WARN_ON_ONCE(mfmsr() & MSR_EE))
361 __hard_irq_disable();
362 }
363
364 if (irq_happened == PACA_IRQ_HARD_DIS) {
365 local_paca->irq_happened = 0;
366 __hard_irq_enable();
367 return;
368 }
369 }
370
371 irq_soft_mask_set(IRQS_ALL_DISABLED);
372 trace_hardirqs_off();
373
374 replay_soft_interrupts();
375 local_paca->irq_happened = 0;
376
377 trace_hardirqs_on();
378 irq_soft_mask_set(IRQS_ENABLED);
379 __hard_irq_enable();
380}
381EXPORT_SYMBOL(arch_local_irq_restore);
382
383/*
384 * This is specifically called by assembly code to re-enable interrupts
385 * if they are currently disabled. This is typically called before
386 * schedule() or do_signal() when returning to userspace. We do it
387 * in C to avoid the burden of dealing with lockdep etc...
388 *
389 * NOTE: This is called with interrupts hard disabled but not marked
390 * as such in paca->irq_happened, so we need to resync this.
391 */
392void notrace restore_interrupts(void)
393{
394 if (irqs_disabled()) {
395 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
396 local_irq_enable();
397 } else
398 __hard_irq_enable();
399}
400
401/*
402 * This is a helper to use when about to go into idle low-power
403 * when the latter has the side effect of re-enabling interrupts
404 * (such as calling H_CEDE under pHyp).
405 *
406 * You call this function with interrupts soft-disabled (this is
407 * already the case when ppc_md.power_save is called). The function
408 * will return whether to enter power save or just return.
409 *
410 * In the former case, it will have notified lockdep of interrupts
411 * being re-enabled and generally sanitized the lazy irq state,
412 * and in the latter case it will leave with interrupts hard
413 * disabled and marked as such, so the local_irq_enable() call
414 * in arch_cpu_idle() will properly re-enable everything.
415 */
416bool prep_irq_for_idle(void)
417{
418 /*
419 * First we need to hard disable to ensure no interrupt
420 * occurs before we effectively enter the low power state
421 */
422 __hard_irq_disable();
423 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
424
425 /*
426 * If anything happened while we were soft-disabled,
427 * we return now and do not enter the low power state.
428 */
429 if (lazy_irq_pending())
430 return false;
431
432 /* Tell lockdep we are about to re-enable */
433 trace_hardirqs_on();
434
435 /*
436 * Mark interrupts as soft-enabled and clear the
437 * PACA_IRQ_HARD_DIS from the pending mask since we
438 * are about to hard enable as well as a side effect
439 * of entering the low power state.
440 */
441 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
442 irq_soft_mask_set(IRQS_ENABLED);
443
444 /* Tell the caller to enter the low power state */
445 return true;
446}
447
448#ifdef CONFIG_PPC_BOOK3S
449/*
450 * This is for idle sequences that return with IRQs off, but the
451 * idle state itself wakes on interrupt. Tell the irq tracer that
452 * IRQs are enabled for the duration of idle so it does not get long
453 * off times. Must be paired with fini_irq_for_idle_irqsoff.
454 */
455bool prep_irq_for_idle_irqsoff(void)
456{
457 WARN_ON(!irqs_disabled());
458
459 /*
460 * First we need to hard disable to ensure no interrupt
461 * occurs before we effectively enter the low power state
462 */
463 __hard_irq_disable();
464 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
465
466 /*
467 * If anything happened while we were soft-disabled,
468 * we return now and do not enter the low power state.
469 */
470 if (lazy_irq_pending())
471 return false;
472
473 /* Tell lockdep we are about to re-enable */
474 trace_hardirqs_on();
475
476 return true;
477}
478
479/*
480 * Take the SRR1 wakeup reason, index into this table to find the
481 * appropriate irq_happened bit.
482 *
483 * Sytem reset exceptions taken in idle state also come through here,
484 * but they are NMI interrupts so do not need to wait for IRQs to be
485 * restored, and should be taken as early as practical. These are marked
486 * with 0xff in the table. The Power ISA specifies 0100b as the system
487 * reset interrupt reason.
488 */
489#define IRQ_SYSTEM_RESET 0xff
490
491static const u8 srr1_to_lazyirq[0x10] = {
492 0, 0, 0,
493 PACA_IRQ_DBELL,
494 IRQ_SYSTEM_RESET,
495 PACA_IRQ_DBELL,
496 PACA_IRQ_DEC,
497 0,
498 PACA_IRQ_EE,
499 PACA_IRQ_EE,
500 PACA_IRQ_HMI,
501 0, 0, 0, 0, 0 };
502
503void replay_system_reset(void)
504{
505 struct pt_regs regs;
506
507 ppc_save_regs(®s);
508 regs.trap = 0x100;
509 get_paca()->in_nmi = 1;
510 system_reset_exception(®s);
511 get_paca()->in_nmi = 0;
512}
513EXPORT_SYMBOL_GPL(replay_system_reset);
514
515void irq_set_pending_from_srr1(unsigned long srr1)
516{
517 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
518 u8 reason = srr1_to_lazyirq[idx];
519
520 /*
521 * Take the system reset now, which is immediately after registers
522 * are restored from idle. It's an NMI, so interrupts need not be
523 * re-enabled before it is taken.
524 */
525 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
526 replay_system_reset();
527 return;
528 }
529
530 if (reason == PACA_IRQ_DBELL) {
531 /*
532 * When doorbell triggers a system reset wakeup, the message
533 * is not cleared, so if the doorbell interrupt is replayed
534 * and the IPI handled, the doorbell interrupt would still
535 * fire when EE is enabled.
536 *
537 * To avoid taking the superfluous doorbell interrupt,
538 * execute a msgclr here before the interrupt is replayed.
539 */
540 ppc_msgclr(PPC_DBELL_MSGTYPE);
541 }
542
543 /*
544 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
545 * so this can be called unconditionally with the SRR1 wake
546 * reason as returned by the idle code, which uses 0 to mean no
547 * interrupt.
548 *
549 * If a future CPU was to designate this as an interrupt reason,
550 * then a new index for no interrupt must be assigned.
551 */
552 local_paca->irq_happened |= reason;
553}
554#endif /* CONFIG_PPC_BOOK3S */
555
556/*
557 * Force a replay of the external interrupt handler on this CPU.
558 */
559void force_external_irq_replay(void)
560{
561 /*
562 * This must only be called with interrupts soft-disabled,
563 * the replay will happen when re-enabling.
564 */
565 WARN_ON(!arch_irqs_disabled());
566
567 /*
568 * Interrupts must always be hard disabled before irq_happened is
569 * modified (to prevent lost update in case of interrupt between
570 * load and store).
571 */
572 __hard_irq_disable();
573 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
574
575 /* Indicate in the PACA that we have an interrupt to replay */
576 local_paca->irq_happened |= PACA_IRQ_EE;
577}
578
579#endif /* CONFIG_PPC64 */
580
581int arch_show_interrupts(struct seq_file *p, int prec)
582{
583 int j;
584
585#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
586 if (tau_initialized) {
587 seq_printf(p, "%*s: ", prec, "TAU");
588 for_each_online_cpu(j)
589 seq_printf(p, "%10u ", tau_interrupts(j));
590 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
591 }
592#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
593
594 seq_printf(p, "%*s: ", prec, "LOC");
595 for_each_online_cpu(j)
596 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
597 seq_printf(p, " Local timer interrupts for timer event device\n");
598
599 seq_printf(p, "%*s: ", prec, "BCT");
600 for_each_online_cpu(j)
601 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
602 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
603
604 seq_printf(p, "%*s: ", prec, "LOC");
605 for_each_online_cpu(j)
606 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
607 seq_printf(p, " Local timer interrupts for others\n");
608
609 seq_printf(p, "%*s: ", prec, "SPU");
610 for_each_online_cpu(j)
611 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
612 seq_printf(p, " Spurious interrupts\n");
613
614 seq_printf(p, "%*s: ", prec, "PMI");
615 for_each_online_cpu(j)
616 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
617 seq_printf(p, " Performance monitoring interrupts\n");
618
619 seq_printf(p, "%*s: ", prec, "MCE");
620 for_each_online_cpu(j)
621 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
622 seq_printf(p, " Machine check exceptions\n");
623
624#ifdef CONFIG_PPC_BOOK3S_64
625 if (cpu_has_feature(CPU_FTR_HVMODE)) {
626 seq_printf(p, "%*s: ", prec, "HMI");
627 for_each_online_cpu(j)
628 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
629 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
630 }
631#endif
632
633 seq_printf(p, "%*s: ", prec, "NMI");
634 for_each_online_cpu(j)
635 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
636 seq_printf(p, " System Reset interrupts\n");
637
638#ifdef CONFIG_PPC_WATCHDOG
639 seq_printf(p, "%*s: ", prec, "WDG");
640 for_each_online_cpu(j)
641 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
642 seq_printf(p, " Watchdog soft-NMI interrupts\n");
643#endif
644
645#ifdef CONFIG_PPC_DOORBELL
646 if (cpu_has_feature(CPU_FTR_DBELL)) {
647 seq_printf(p, "%*s: ", prec, "DBL");
648 for_each_online_cpu(j)
649 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
650 seq_printf(p, " Doorbell interrupts\n");
651 }
652#endif
653
654 return 0;
655}
656
657/*
658 * /proc/stat helpers
659 */
660u64 arch_irq_stat_cpu(unsigned int cpu)
661{
662 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
663
664 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
665 sum += per_cpu(irq_stat, cpu).pmu_irqs;
666 sum += per_cpu(irq_stat, cpu).mce_exceptions;
667 sum += per_cpu(irq_stat, cpu).spurious_irqs;
668 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
669#ifdef CONFIG_PPC_BOOK3S_64
670 sum += paca_ptrs[cpu]->hmi_irqs;
671#endif
672 sum += per_cpu(irq_stat, cpu).sreset_irqs;
673#ifdef CONFIG_PPC_WATCHDOG
674 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
675#endif
676#ifdef CONFIG_PPC_DOORBELL
677 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
678#endif
679
680 return sum;
681}
682
683static inline void check_stack_overflow(void)
684{
685 long sp;
686
687 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
688 return;
689
690 sp = current_stack_pointer & (THREAD_SIZE - 1);
691
692 /* check for stack overflow: is there less than 2KB free? */
693 if (unlikely(sp < 2048)) {
694 pr_err("do_IRQ: stack overflow: %ld\n", sp);
695 dump_stack();
696 }
697}
698
699void __do_irq(struct pt_regs *regs)
700{
701 unsigned int irq;
702
703 irq_enter();
704
705 trace_irq_entry(regs);
706
707 /*
708 * Query the platform PIC for the interrupt & ack it.
709 *
710 * This will typically lower the interrupt line to the CPU
711 */
712 irq = ppc_md.get_irq();
713
714 /* We can hard enable interrupts now to allow perf interrupts */
715 may_hard_irq_enable();
716
717 /* And finally process it */
718 if (unlikely(!irq))
719 __this_cpu_inc(irq_stat.spurious_irqs);
720 else
721 generic_handle_irq(irq);
722
723 trace_irq_exit(regs);
724
725 irq_exit();
726}
727
728void do_IRQ(struct pt_regs *regs)
729{
730 struct pt_regs *old_regs = set_irq_regs(regs);
731 void *cursp, *irqsp, *sirqsp;
732
733 /* Switch to the irq stack to handle this */
734 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
735 irqsp = hardirq_ctx[raw_smp_processor_id()];
736 sirqsp = softirq_ctx[raw_smp_processor_id()];
737
738 check_stack_overflow();
739
740 /* Already there ? */
741 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
742 __do_irq(regs);
743 set_irq_regs(old_regs);
744 return;
745 }
746 /* Switch stack and call */
747 call_do_irq(regs, irqsp);
748
749 set_irq_regs(old_regs);
750}
751
752static void *__init alloc_vm_stack(void)
753{
754 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
755 NUMA_NO_NODE, (void *)_RET_IP_);
756}
757
758static void __init vmap_irqstack_init(void)
759{
760 int i;
761
762 for_each_possible_cpu(i) {
763 softirq_ctx[i] = alloc_vm_stack();
764 hardirq_ctx[i] = alloc_vm_stack();
765 }
766}
767
768
769void __init init_IRQ(void)
770{
771 if (IS_ENABLED(CONFIG_VMAP_STACK))
772 vmap_irqstack_init();
773
774 if (ppc_md.init_IRQ)
775 ppc_md.init_IRQ();
776}
777
778#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
779void *critirq_ctx[NR_CPUS] __read_mostly;
780void *dbgirq_ctx[NR_CPUS] __read_mostly;
781void *mcheckirq_ctx[NR_CPUS] __read_mostly;
782#endif
783
784void *softirq_ctx[NR_CPUS] __read_mostly;
785void *hardirq_ctx[NR_CPUS] __read_mostly;
786
787void do_softirq_own_stack(void)
788{
789 call_do_softirq(softirq_ctx[smp_processor_id()]);
790}
791
792irq_hw_number_t virq_to_hw(unsigned int virq)
793{
794 struct irq_data *irq_data = irq_get_irq_data(virq);
795 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
796}
797EXPORT_SYMBOL_GPL(virq_to_hw);
798
799#ifdef CONFIG_SMP
800int irq_choose_cpu(const struct cpumask *mask)
801{
802 int cpuid;
803
804 if (cpumask_equal(mask, cpu_online_mask)) {
805 static int irq_rover;
806 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
807 unsigned long flags;
808
809 /* Round-robin distribution... */
810do_round_robin:
811 raw_spin_lock_irqsave(&irq_rover_lock, flags);
812
813 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
814 if (irq_rover >= nr_cpu_ids)
815 irq_rover = cpumask_first(cpu_online_mask);
816
817 cpuid = irq_rover;
818
819 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
820 } else {
821 cpuid = cpumask_first_and(mask, cpu_online_mask);
822 if (cpuid >= nr_cpu_ids)
823 goto do_round_robin;
824 }
825
826 return get_hard_smp_processor_id(cpuid);
827}
828#else
829int irq_choose_cpu(const struct cpumask *mask)
830{
831 return hard_smp_processor_id();
832}
833#endif
834
835#ifdef CONFIG_PPC64
836static int __init setup_noirqdistrib(char *str)
837{
838 distribute_irqs = 0;
839 return 1;
840}
841
842__setup("noirqdistrib", setup_noirqdistrib);
843#endif /* CONFIG_PPC64 */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Derived from arch/i386/kernel/irq.c
4 * Copyright (C) 1992 Linus Torvalds
5 * Adapted from arch/i386 by Gary Thomas
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
8 * Copyright (C) 1996-2001 Cort Dougan
9 * Adapted for Power Macintosh by Paul Mackerras
10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 *
12 * This file contains the code used by various IRQ handling routines:
13 * asking for different IRQ's should be done through these routines
14 * instead of just grabbing them. Thus setups with different IRQ numbers
15 * shouldn't result in any weird surprises, and installing new handlers
16 * should be easier.
17 *
18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
20 * mask register (of which only 16 are defined), hence the weird shifting
21 * and complement of the cached_irq_mask. I want to be able to stuff
22 * this right into the SIU SMASK register.
23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
24 * to reduce code space and undefined function references.
25 */
26
27#undef DEBUG
28
29#include <linux/export.h>
30#include <linux/threads.h>
31#include <linux/kernel_stat.h>
32#include <linux/signal.h>
33#include <linux/sched.h>
34#include <linux/ptrace.h>
35#include <linux/ioport.h>
36#include <linux/interrupt.h>
37#include <linux/timex.h>
38#include <linux/init.h>
39#include <linux/slab.h>
40#include <linux/delay.h>
41#include <linux/irq.h>
42#include <linux/seq_file.h>
43#include <linux/cpumask.h>
44#include <linux/profile.h>
45#include <linux/bitops.h>
46#include <linux/list.h>
47#include <linux/radix-tree.h>
48#include <linux/mutex.h>
49#include <linux/pci.h>
50#include <linux/debugfs.h>
51#include <linux/of.h>
52#include <linux/of_irq.h>
53#include <linux/vmalloc.h>
54#include <linux/pgtable.h>
55#include <linux/static_call.h>
56
57#include <linux/uaccess.h>
58#include <asm/interrupt.h>
59#include <asm/io.h>
60#include <asm/irq.h>
61#include <asm/cache.h>
62#include <asm/ptrace.h>
63#include <asm/machdep.h>
64#include <asm/udbg.h>
65#include <asm/smp.h>
66#include <asm/hw_irq.h>
67#include <asm/softirq_stack.h>
68#include <asm/ppc_asm.h>
69
70#define CREATE_TRACE_POINTS
71#include <asm/trace.h>
72#include <asm/cpu_has_feature.h>
73
74DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
75EXPORT_PER_CPU_SYMBOL(irq_stat);
76
77#ifdef CONFIG_PPC32
78atomic_t ppc_n_lost_interrupts;
79
80#ifdef CONFIG_TAU_INT
81extern int tau_initialized;
82u32 tau_interrupts(unsigned long cpu);
83#endif
84#endif /* CONFIG_PPC32 */
85
86int arch_show_interrupts(struct seq_file *p, int prec)
87{
88 int j;
89
90#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
91 if (tau_initialized) {
92 seq_printf(p, "%*s: ", prec, "TAU");
93 for_each_online_cpu(j)
94 seq_printf(p, "%10u ", tau_interrupts(j));
95 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
96 }
97#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
98
99 seq_printf(p, "%*s: ", prec, "LOC");
100 for_each_online_cpu(j)
101 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
102 seq_printf(p, " Local timer interrupts for timer event device\n");
103
104 seq_printf(p, "%*s: ", prec, "BCT");
105 for_each_online_cpu(j)
106 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
107 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
108
109 seq_printf(p, "%*s: ", prec, "LOC");
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
112 seq_printf(p, " Local timer interrupts for others\n");
113
114 seq_printf(p, "%*s: ", prec, "SPU");
115 for_each_online_cpu(j)
116 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
117 seq_printf(p, " Spurious interrupts\n");
118
119 seq_printf(p, "%*s: ", prec, "PMI");
120 for_each_online_cpu(j)
121 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
122 seq_printf(p, " Performance monitoring interrupts\n");
123
124 seq_printf(p, "%*s: ", prec, "MCE");
125 for_each_online_cpu(j)
126 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
127 seq_printf(p, " Machine check exceptions\n");
128
129#ifdef CONFIG_PPC_BOOK3S_64
130 if (cpu_has_feature(CPU_FTR_HVMODE)) {
131 seq_printf(p, "%*s: ", prec, "HMI");
132 for_each_online_cpu(j)
133 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
134 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
135 }
136#endif
137
138 seq_printf(p, "%*s: ", prec, "NMI");
139 for_each_online_cpu(j)
140 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
141 seq_printf(p, " System Reset interrupts\n");
142
143#ifdef CONFIG_PPC_WATCHDOG
144 seq_printf(p, "%*s: ", prec, "WDG");
145 for_each_online_cpu(j)
146 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
147 seq_printf(p, " Watchdog soft-NMI interrupts\n");
148#endif
149
150#ifdef CONFIG_PPC_DOORBELL
151 if (cpu_has_feature(CPU_FTR_DBELL)) {
152 seq_printf(p, "%*s: ", prec, "DBL");
153 for_each_online_cpu(j)
154 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
155 seq_printf(p, " Doorbell interrupts\n");
156 }
157#endif
158
159 return 0;
160}
161
162/*
163 * /proc/stat helpers
164 */
165u64 arch_irq_stat_cpu(unsigned int cpu)
166{
167 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
168
169 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
170 sum += per_cpu(irq_stat, cpu).pmu_irqs;
171 sum += per_cpu(irq_stat, cpu).mce_exceptions;
172 sum += per_cpu(irq_stat, cpu).spurious_irqs;
173 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
174#ifdef CONFIG_PPC_BOOK3S_64
175 sum += paca_ptrs[cpu]->hmi_irqs;
176#endif
177 sum += per_cpu(irq_stat, cpu).sreset_irqs;
178#ifdef CONFIG_PPC_WATCHDOG
179 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
180#endif
181#ifdef CONFIG_PPC_DOORBELL
182 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
183#endif
184
185 return sum;
186}
187
188static inline void check_stack_overflow(unsigned long sp)
189{
190 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
191 return;
192
193 sp &= THREAD_SIZE - 1;
194
195 /* check for stack overflow: is there less than 1/4th free? */
196 if (unlikely(sp < THREAD_SIZE / 4)) {
197 pr_err("do_IRQ: stack overflow: %ld\n", sp);
198 dump_stack();
199 }
200}
201
202#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
203static __always_inline void call_do_softirq(const void *sp)
204{
205 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
206 asm volatile (
207 PPC_STLU " %%r1, %[offset](%[sp]) ;"
208 "mr %%r1, %[sp] ;"
209 "bl %[callee] ;"
210 PPC_LL " %%r1, 0(%%r1) ;"
211 : // Outputs
212 : // Inputs
213 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
214 [callee] "i" (__do_softirq)
215 : // Clobbers
216 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
217 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
218 "r11", "r12"
219 );
220}
221#endif
222
223DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
224
225static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
226{
227 unsigned int irq;
228
229 trace_irq_entry(regs);
230
231 check_stack_overflow(oldsp);
232
233 /*
234 * Query the platform PIC for the interrupt & ack it.
235 *
236 * This will typically lower the interrupt line to the CPU
237 */
238 irq = static_call(ppc_get_irq)();
239
240 /* We can hard enable interrupts now to allow perf interrupts */
241 if (should_hard_irq_enable(regs))
242 do_hard_irq_enable();
243
244 /* And finally process it */
245 if (unlikely(!irq))
246 __this_cpu_inc(irq_stat.spurious_irqs);
247 else
248 generic_handle_irq(irq);
249
250 trace_irq_exit(regs);
251}
252
253static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
254{
255 register unsigned long r3 asm("r3") = (unsigned long)regs;
256
257 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
258 asm volatile (
259 PPC_STLU " %%r1, %[offset](%[sp]) ;"
260 "mr %%r4, %%r1 ;"
261 "mr %%r1, %[sp] ;"
262 "bl %[callee] ;"
263 PPC_LL " %%r1, 0(%%r1) ;"
264 : // Outputs
265 "+r" (r3)
266 : // Inputs
267 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
268 [callee] "i" (__do_irq)
269 : // Clobbers
270 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
271 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
272 "r11", "r12"
273 );
274}
275
276void __do_IRQ(struct pt_regs *regs)
277{
278 struct pt_regs *old_regs = set_irq_regs(regs);
279 void *cursp, *irqsp, *sirqsp;
280
281 /* Switch to the irq stack to handle this */
282 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
283 irqsp = hardirq_ctx[raw_smp_processor_id()];
284 sirqsp = softirq_ctx[raw_smp_processor_id()];
285
286 /* Already there ? If not switch stack and call */
287 if (unlikely(cursp == irqsp || cursp == sirqsp))
288 __do_irq(regs, current_stack_pointer);
289 else
290 call_do_irq(regs, irqsp);
291
292 set_irq_regs(old_regs);
293}
294
295DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
296{
297 __do_IRQ(regs);
298}
299
300static void *__init alloc_vm_stack(void)
301{
302 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
303 NUMA_NO_NODE, (void *)_RET_IP_);
304}
305
306static void __init vmap_irqstack_init(void)
307{
308 int i;
309
310 for_each_possible_cpu(i) {
311 softirq_ctx[i] = alloc_vm_stack();
312 hardirq_ctx[i] = alloc_vm_stack();
313 }
314}
315
316
317void __init init_IRQ(void)
318{
319 if (IS_ENABLED(CONFIG_VMAP_STACK))
320 vmap_irqstack_init();
321
322 if (ppc_md.init_IRQ)
323 ppc_md.init_IRQ();
324
325 if (!WARN_ON(!ppc_md.get_irq))
326 static_call_update(ppc_get_irq, ppc_md.get_irq);
327}
328
329#ifdef CONFIG_BOOKE_OR_40x
330void *critirq_ctx[NR_CPUS] __read_mostly;
331void *dbgirq_ctx[NR_CPUS] __read_mostly;
332void *mcheckirq_ctx[NR_CPUS] __read_mostly;
333#endif
334
335void *softirq_ctx[NR_CPUS] __read_mostly;
336void *hardirq_ctx[NR_CPUS] __read_mostly;
337
338#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
339void do_softirq_own_stack(void)
340{
341 call_do_softirq(softirq_ctx[smp_processor_id()]);
342}
343#endif
344
345irq_hw_number_t virq_to_hw(unsigned int virq)
346{
347 struct irq_data *irq_data = irq_get_irq_data(virq);
348 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
349}
350EXPORT_SYMBOL_GPL(virq_to_hw);
351
352#ifdef CONFIG_SMP
353int irq_choose_cpu(const struct cpumask *mask)
354{
355 int cpuid;
356
357 if (cpumask_equal(mask, cpu_online_mask)) {
358 static int irq_rover;
359 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
360 unsigned long flags;
361
362 /* Round-robin distribution... */
363do_round_robin:
364 raw_spin_lock_irqsave(&irq_rover_lock, flags);
365
366 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
367 if (irq_rover >= nr_cpu_ids)
368 irq_rover = cpumask_first(cpu_online_mask);
369
370 cpuid = irq_rover;
371
372 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
373 } else {
374 cpuid = cpumask_first_and(mask, cpu_online_mask);
375 if (cpuid >= nr_cpu_ids)
376 goto do_round_robin;
377 }
378
379 return get_hard_smp_processor_id(cpuid);
380}
381#else
382int irq_choose_cpu(const struct cpumask *mask)
383{
384 return hard_smp_processor_id();
385}
386#endif