Loading...
1/*
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
28 * to reduce code space and undefined function references.
29 */
30
31#undef DEBUG
32
33#include <linux/export.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
38#include <linux/ptrace.h>
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
42#include <linux/init.h>
43#include <linux/slab.h>
44#include <linux/delay.h>
45#include <linux/irq.h>
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
48#include <linux/profile.h>
49#include <linux/bitops.h>
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/pci.h>
54#include <linux/debugfs.h>
55#include <linux/of.h>
56#include <linux/of_irq.h>
57
58#include <linux/uaccess.h>
59#include <asm/io.h>
60#include <asm/pgtable.h>
61#include <asm/irq.h>
62#include <asm/cache.h>
63#include <asm/prom.h>
64#include <asm/ptrace.h>
65#include <asm/machdep.h>
66#include <asm/udbg.h>
67#include <asm/smp.h>
68#include <asm/livepatch.h>
69#include <asm/asm-prototypes.h>
70#include <asm/hw_irq.h>
71
72#ifdef CONFIG_PPC64
73#include <asm/paca.h>
74#include <asm/firmware.h>
75#include <asm/lv1call.h>
76#endif
77#define CREATE_TRACE_POINTS
78#include <asm/trace.h>
79#include <asm/cpu_has_feature.h>
80
81DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82EXPORT_PER_CPU_SYMBOL(irq_stat);
83
84int __irq_offset_value;
85
86#ifdef CONFIG_PPC32
87EXPORT_SYMBOL(__irq_offset_value);
88atomic_t ppc_n_lost_interrupts;
89
90#ifdef CONFIG_TAU_INT
91extern int tau_initialized;
92extern int tau_interrupts(int);
93#endif
94#endif /* CONFIG_PPC32 */
95
96#ifdef CONFIG_PPC64
97
98int distribute_irqs = 1;
99
100static inline notrace unsigned long get_irq_happened(void)
101{
102 unsigned long happened;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106
107 return happened;
108}
109
110static inline notrace int decrementer_check_overflow(void)
111{
112 u64 now = get_tb_or_rtc();
113 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
114
115 return now >= *next_tb;
116}
117
118/* This is called whenever we are re-enabling interrupts
119 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
120 * there's an EE, DEC or DBELL to generate.
121 *
122 * This is called in two contexts: From arch_local_irq_restore()
123 * before soft-enabling interrupts, and from the exception exit
124 * path when returning from an interrupt from a soft-disabled to
125 * a soft enabled context. In both case we have interrupts hard
126 * disabled.
127 *
128 * We take care of only clearing the bits we handled in the
129 * PACA irq_happened field since we can only re-emit one at a
130 * time and we don't want to "lose" one.
131 */
132notrace unsigned int __check_irq_replay(void)
133{
134 /*
135 * We use local_paca rather than get_paca() to avoid all
136 * the debug_smp_processor_id() business in this low level
137 * function
138 */
139 unsigned char happened = local_paca->irq_happened;
140
141 /*
142 * We are responding to the next interrupt, so interrupt-off
143 * latencies should be reset here.
144 */
145 trace_hardirqs_on();
146 trace_hardirqs_off();
147
148 if (happened & PACA_IRQ_HARD_DIS) {
149 /* Clear bit 0 which we wouldn't clear otherwise */
150 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
151
152 /*
153 * We may have missed a decrementer interrupt if hard disabled.
154 * Check the decrementer register in case we had a rollover
155 * while hard disabled.
156 */
157 if (!(happened & PACA_IRQ_DEC)) {
158 if (decrementer_check_overflow()) {
159 local_paca->irq_happened |= PACA_IRQ_DEC;
160 happened |= PACA_IRQ_DEC;
161 }
162 }
163 }
164
165 /*
166 * Force the delivery of pending soft-disabled interrupts on PS3.
167 * Any HV call will have this side effect.
168 */
169 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
170 u64 tmp, tmp2;
171 lv1_get_version_info(&tmp, &tmp2);
172 }
173
174 /*
175 * Check if an hypervisor Maintenance interrupt happened.
176 * This is a higher priority interrupt than the others, so
177 * replay it first.
178 */
179 if (happened & PACA_IRQ_HMI) {
180 local_paca->irq_happened &= ~PACA_IRQ_HMI;
181 return 0xe60;
182 }
183
184 if (happened & PACA_IRQ_DEC) {
185 local_paca->irq_happened &= ~PACA_IRQ_DEC;
186 return 0x900;
187 }
188
189 if (happened & PACA_IRQ_PMI) {
190 local_paca->irq_happened &= ~PACA_IRQ_PMI;
191 return 0xf00;
192 }
193
194 if (happened & PACA_IRQ_EE) {
195 local_paca->irq_happened &= ~PACA_IRQ_EE;
196 return 0x500;
197 }
198
199#ifdef CONFIG_PPC_BOOK3E
200 /*
201 * Check if an EPR external interrupt happened this bit is typically
202 * set if we need to handle another "edge" interrupt from within the
203 * MPIC "EPR" handler.
204 */
205 if (happened & PACA_IRQ_EE_EDGE) {
206 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
207 return 0x500;
208 }
209
210 if (happened & PACA_IRQ_DBELL) {
211 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
212 return 0x280;
213 }
214#else
215 if (happened & PACA_IRQ_DBELL) {
216 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
217 return 0xa00;
218 }
219#endif /* CONFIG_PPC_BOOK3E */
220
221 /* There should be nothing left ! */
222 BUG_ON(local_paca->irq_happened != 0);
223
224 return 0;
225}
226
227notrace void arch_local_irq_restore(unsigned long mask)
228{
229 unsigned char irq_happened;
230 unsigned int replay;
231
232 /* Write the new soft-enabled value */
233 irq_soft_mask_set(mask);
234 if (mask)
235 return;
236
237 /*
238 * From this point onward, we can take interrupts, preempt,
239 * etc... unless we got hard-disabled. We check if an event
240 * happened. If none happened, we know we can just return.
241 *
242 * We may have preempted before the check below, in which case
243 * we are checking the "new" CPU instead of the old one. This
244 * is only a problem if an event happened on the "old" CPU.
245 *
246 * External interrupt events will have caused interrupts to
247 * be hard-disabled, so there is no problem, we
248 * cannot have preempted.
249 */
250 irq_happened = get_irq_happened();
251 if (!irq_happened)
252 return;
253
254 /*
255 * We need to hard disable to get a trusted value from
256 * __check_irq_replay(). We also need to soft-disable
257 * again to avoid warnings in there due to the use of
258 * per-cpu variables.
259 *
260 * We know that if the value in irq_happened is exactly 0x01
261 * then we are already hard disabled (there are other less
262 * common cases that we'll ignore for now), so we skip the
263 * (expensive) mtmsrd.
264 */
265 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
266 __hard_irq_disable();
267#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
268 else {
269 /*
270 * We should already be hard disabled here. We had bugs
271 * where that wasn't the case so let's dbl check it and
272 * warn if we are wrong. Only do that when IRQ tracing
273 * is enabled as mfmsr() can be costly.
274 */
275 if (WARN_ON(mfmsr() & MSR_EE))
276 __hard_irq_disable();
277 }
278#endif
279
280 irq_soft_mask_set(IRQS_ALL_DISABLED);
281 trace_hardirqs_off();
282
283 /*
284 * Check if anything needs to be re-emitted. We haven't
285 * soft-enabled yet to avoid warnings in decrementer_check_overflow
286 * accessing per-cpu variables
287 */
288 replay = __check_irq_replay();
289
290 /* We can soft-enable now */
291 trace_hardirqs_on();
292 irq_soft_mask_set(IRQS_ENABLED);
293
294 /*
295 * And replay if we have to. This will return with interrupts
296 * hard-enabled.
297 */
298 if (replay) {
299 __replay_interrupt(replay);
300 return;
301 }
302
303 /* Finally, let's ensure we are hard enabled */
304 __hard_irq_enable();
305}
306EXPORT_SYMBOL(arch_local_irq_restore);
307
308/*
309 * This is specifically called by assembly code to re-enable interrupts
310 * if they are currently disabled. This is typically called before
311 * schedule() or do_signal() when returning to userspace. We do it
312 * in C to avoid the burden of dealing with lockdep etc...
313 *
314 * NOTE: This is called with interrupts hard disabled but not marked
315 * as such in paca->irq_happened, so we need to resync this.
316 */
317void notrace restore_interrupts(void)
318{
319 if (irqs_disabled()) {
320 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
321 local_irq_enable();
322 } else
323 __hard_irq_enable();
324}
325
326/*
327 * This is a helper to use when about to go into idle low-power
328 * when the latter has the side effect of re-enabling interrupts
329 * (such as calling H_CEDE under pHyp).
330 *
331 * You call this function with interrupts soft-disabled (this is
332 * already the case when ppc_md.power_save is called). The function
333 * will return whether to enter power save or just return.
334 *
335 * In the former case, it will have notified lockdep of interrupts
336 * being re-enabled and generally sanitized the lazy irq state,
337 * and in the latter case it will leave with interrupts hard
338 * disabled and marked as such, so the local_irq_enable() call
339 * in arch_cpu_idle() will properly re-enable everything.
340 */
341bool prep_irq_for_idle(void)
342{
343 /*
344 * First we need to hard disable to ensure no interrupt
345 * occurs before we effectively enter the low power state
346 */
347 __hard_irq_disable();
348 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
349
350 /*
351 * If anything happened while we were soft-disabled,
352 * we return now and do not enter the low power state.
353 */
354 if (lazy_irq_pending())
355 return false;
356
357 /* Tell lockdep we are about to re-enable */
358 trace_hardirqs_on();
359
360 /*
361 * Mark interrupts as soft-enabled and clear the
362 * PACA_IRQ_HARD_DIS from the pending mask since we
363 * are about to hard enable as well as a side effect
364 * of entering the low power state.
365 */
366 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
367 irq_soft_mask_set(IRQS_ENABLED);
368
369 /* Tell the caller to enter the low power state */
370 return true;
371}
372
373#ifdef CONFIG_PPC_BOOK3S
374/*
375 * This is for idle sequences that return with IRQs off, but the
376 * idle state itself wakes on interrupt. Tell the irq tracer that
377 * IRQs are enabled for the duration of idle so it does not get long
378 * off times. Must be paired with fini_irq_for_idle_irqsoff.
379 */
380bool prep_irq_for_idle_irqsoff(void)
381{
382 WARN_ON(!irqs_disabled());
383
384 /*
385 * First we need to hard disable to ensure no interrupt
386 * occurs before we effectively enter the low power state
387 */
388 __hard_irq_disable();
389 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
390
391 /*
392 * If anything happened while we were soft-disabled,
393 * we return now and do not enter the low power state.
394 */
395 if (lazy_irq_pending())
396 return false;
397
398 /* Tell lockdep we are about to re-enable */
399 trace_hardirqs_on();
400
401 return true;
402}
403
404/*
405 * Take the SRR1 wakeup reason, index into this table to find the
406 * appropriate irq_happened bit.
407 *
408 * Sytem reset exceptions taken in idle state also come through here,
409 * but they are NMI interrupts so do not need to wait for IRQs to be
410 * restored, and should be taken as early as practical. These are marked
411 * with 0xff in the table. The Power ISA specifies 0100b as the system
412 * reset interrupt reason.
413 */
414#define IRQ_SYSTEM_RESET 0xff
415
416static const u8 srr1_to_lazyirq[0x10] = {
417 0, 0, 0,
418 PACA_IRQ_DBELL,
419 IRQ_SYSTEM_RESET,
420 PACA_IRQ_DBELL,
421 PACA_IRQ_DEC,
422 0,
423 PACA_IRQ_EE,
424 PACA_IRQ_EE,
425 PACA_IRQ_HMI,
426 0, 0, 0, 0, 0 };
427
428void replay_system_reset(void)
429{
430 struct pt_regs regs;
431
432 ppc_save_regs(®s);
433 regs.trap = 0x100;
434 get_paca()->in_nmi = 1;
435 system_reset_exception(®s);
436 get_paca()->in_nmi = 0;
437}
438EXPORT_SYMBOL_GPL(replay_system_reset);
439
440void irq_set_pending_from_srr1(unsigned long srr1)
441{
442 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
443 u8 reason = srr1_to_lazyirq[idx];
444
445 /*
446 * Take the system reset now, which is immediately after registers
447 * are restored from idle. It's an NMI, so interrupts need not be
448 * re-enabled before it is taken.
449 */
450 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
451 replay_system_reset();
452 return;
453 }
454
455 /*
456 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
457 * so this can be called unconditionally with the SRR1 wake
458 * reason as returned by the idle code, which uses 0 to mean no
459 * interrupt.
460 *
461 * If a future CPU was to designate this as an interrupt reason,
462 * then a new index for no interrupt must be assigned.
463 */
464 local_paca->irq_happened |= reason;
465}
466#endif /* CONFIG_PPC_BOOK3S */
467
468/*
469 * Force a replay of the external interrupt handler on this CPU.
470 */
471void force_external_irq_replay(void)
472{
473 /*
474 * This must only be called with interrupts soft-disabled,
475 * the replay will happen when re-enabling.
476 */
477 WARN_ON(!arch_irqs_disabled());
478
479 /*
480 * Interrupts must always be hard disabled before irq_happened is
481 * modified (to prevent lost update in case of interrupt between
482 * load and store).
483 */
484 __hard_irq_disable();
485 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
486
487 /* Indicate in the PACA that we have an interrupt to replay */
488 local_paca->irq_happened |= PACA_IRQ_EE;
489}
490
491#endif /* CONFIG_PPC64 */
492
493int arch_show_interrupts(struct seq_file *p, int prec)
494{
495 int j;
496
497#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
498 if (tau_initialized) {
499 seq_printf(p, "%*s: ", prec, "TAU");
500 for_each_online_cpu(j)
501 seq_printf(p, "%10u ", tau_interrupts(j));
502 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
503 }
504#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
505
506 seq_printf(p, "%*s: ", prec, "LOC");
507 for_each_online_cpu(j)
508 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
509 seq_printf(p, " Local timer interrupts for timer event device\n");
510
511 seq_printf(p, "%*s: ", prec, "LOC");
512 for_each_online_cpu(j)
513 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
514 seq_printf(p, " Local timer interrupts for others\n");
515
516 seq_printf(p, "%*s: ", prec, "SPU");
517 for_each_online_cpu(j)
518 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
519 seq_printf(p, " Spurious interrupts\n");
520
521 seq_printf(p, "%*s: ", prec, "PMI");
522 for_each_online_cpu(j)
523 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
524 seq_printf(p, " Performance monitoring interrupts\n");
525
526 seq_printf(p, "%*s: ", prec, "MCE");
527 for_each_online_cpu(j)
528 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
529 seq_printf(p, " Machine check exceptions\n");
530
531 if (cpu_has_feature(CPU_FTR_HVMODE)) {
532 seq_printf(p, "%*s: ", prec, "HMI");
533 for_each_online_cpu(j)
534 seq_printf(p, "%10u ",
535 per_cpu(irq_stat, j).hmi_exceptions);
536 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
537 }
538
539 seq_printf(p, "%*s: ", prec, "NMI");
540 for_each_online_cpu(j)
541 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
542 seq_printf(p, " System Reset interrupts\n");
543
544#ifdef CONFIG_PPC_WATCHDOG
545 seq_printf(p, "%*s: ", prec, "WDG");
546 for_each_online_cpu(j)
547 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
548 seq_printf(p, " Watchdog soft-NMI interrupts\n");
549#endif
550
551#ifdef CONFIG_PPC_DOORBELL
552 if (cpu_has_feature(CPU_FTR_DBELL)) {
553 seq_printf(p, "%*s: ", prec, "DBL");
554 for_each_online_cpu(j)
555 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
556 seq_printf(p, " Doorbell interrupts\n");
557 }
558#endif
559
560 return 0;
561}
562
563/*
564 * /proc/stat helpers
565 */
566u64 arch_irq_stat_cpu(unsigned int cpu)
567{
568 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
569
570 sum += per_cpu(irq_stat, cpu).pmu_irqs;
571 sum += per_cpu(irq_stat, cpu).mce_exceptions;
572 sum += per_cpu(irq_stat, cpu).spurious_irqs;
573 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
574 sum += per_cpu(irq_stat, cpu).hmi_exceptions;
575 sum += per_cpu(irq_stat, cpu).sreset_irqs;
576#ifdef CONFIG_PPC_WATCHDOG
577 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
578#endif
579#ifdef CONFIG_PPC_DOORBELL
580 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
581#endif
582
583 return sum;
584}
585
586static inline void check_stack_overflow(void)
587{
588#ifdef CONFIG_DEBUG_STACKOVERFLOW
589 long sp;
590
591 sp = current_stack_pointer() & (THREAD_SIZE-1);
592
593 /* check for stack overflow: is there less than 2KB free? */
594 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
595 pr_err("do_IRQ: stack overflow: %ld\n",
596 sp - sizeof(struct thread_info));
597 dump_stack();
598 }
599#endif
600}
601
602void __do_irq(struct pt_regs *regs)
603{
604 unsigned int irq;
605
606 irq_enter();
607
608 trace_irq_entry(regs);
609
610 check_stack_overflow();
611
612 /*
613 * Query the platform PIC for the interrupt & ack it.
614 *
615 * This will typically lower the interrupt line to the CPU
616 */
617 irq = ppc_md.get_irq();
618
619 /* We can hard enable interrupts now to allow perf interrupts */
620 may_hard_irq_enable();
621
622 /* And finally process it */
623 if (unlikely(!irq))
624 __this_cpu_inc(irq_stat.spurious_irqs);
625 else
626 generic_handle_irq(irq);
627
628 trace_irq_exit(regs);
629
630 irq_exit();
631}
632
633void do_IRQ(struct pt_regs *regs)
634{
635 struct pt_regs *old_regs = set_irq_regs(regs);
636 struct thread_info *curtp, *irqtp, *sirqtp;
637
638 /* Switch to the irq stack to handle this */
639 curtp = current_thread_info();
640 irqtp = hardirq_ctx[raw_smp_processor_id()];
641 sirqtp = softirq_ctx[raw_smp_processor_id()];
642
643 /* Already there ? */
644 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
645 __do_irq(regs);
646 set_irq_regs(old_regs);
647 return;
648 }
649
650 /* Prepare the thread_info in the irq stack */
651 irqtp->task = curtp->task;
652 irqtp->flags = 0;
653
654 /* Copy the preempt_count so that the [soft]irq checks work. */
655 irqtp->preempt_count = curtp->preempt_count;
656
657 /* Switch stack and call */
658 call_do_irq(regs, irqtp);
659
660 /* Restore stack limit */
661 irqtp->task = NULL;
662
663 /* Copy back updates to the thread_info */
664 if (irqtp->flags)
665 set_bits(irqtp->flags, &curtp->flags);
666
667 set_irq_regs(old_regs);
668}
669
670void __init init_IRQ(void)
671{
672 if (ppc_md.init_IRQ)
673 ppc_md.init_IRQ();
674
675 exc_lvl_ctx_init();
676
677 irq_ctx_init();
678}
679
680#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
681struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
682struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
683struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
684
685void exc_lvl_ctx_init(void)
686{
687 struct thread_info *tp;
688 int i, cpu_nr;
689
690 for_each_possible_cpu(i) {
691#ifdef CONFIG_PPC64
692 cpu_nr = i;
693#else
694#ifdef CONFIG_SMP
695 cpu_nr = get_hard_smp_processor_id(i);
696#else
697 cpu_nr = 0;
698#endif
699#endif
700
701 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
702 tp = critirq_ctx[cpu_nr];
703 tp->cpu = cpu_nr;
704 tp->preempt_count = 0;
705
706#ifdef CONFIG_BOOKE
707 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
708 tp = dbgirq_ctx[cpu_nr];
709 tp->cpu = cpu_nr;
710 tp->preempt_count = 0;
711
712 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
713 tp = mcheckirq_ctx[cpu_nr];
714 tp->cpu = cpu_nr;
715 tp->preempt_count = HARDIRQ_OFFSET;
716#endif
717 }
718}
719#endif
720
721struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
722struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
723
724void irq_ctx_init(void)
725{
726 struct thread_info *tp;
727 int i;
728
729 for_each_possible_cpu(i) {
730 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
731 tp = softirq_ctx[i];
732 tp->cpu = i;
733 klp_init_thread_info(tp);
734
735 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
736 tp = hardirq_ctx[i];
737 tp->cpu = i;
738 klp_init_thread_info(tp);
739 }
740}
741
742void do_softirq_own_stack(void)
743{
744 struct thread_info *curtp, *irqtp;
745
746 curtp = current_thread_info();
747 irqtp = softirq_ctx[smp_processor_id()];
748 irqtp->task = curtp->task;
749 irqtp->flags = 0;
750 call_do_softirq(irqtp);
751 irqtp->task = NULL;
752
753 /* Set any flag that may have been set on the
754 * alternate stack
755 */
756 if (irqtp->flags)
757 set_bits(irqtp->flags, &curtp->flags);
758}
759
760irq_hw_number_t virq_to_hw(unsigned int virq)
761{
762 struct irq_data *irq_data = irq_get_irq_data(virq);
763 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
764}
765EXPORT_SYMBOL_GPL(virq_to_hw);
766
767#ifdef CONFIG_SMP
768int irq_choose_cpu(const struct cpumask *mask)
769{
770 int cpuid;
771
772 if (cpumask_equal(mask, cpu_online_mask)) {
773 static int irq_rover;
774 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
775 unsigned long flags;
776
777 /* Round-robin distribution... */
778do_round_robin:
779 raw_spin_lock_irqsave(&irq_rover_lock, flags);
780
781 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
782 if (irq_rover >= nr_cpu_ids)
783 irq_rover = cpumask_first(cpu_online_mask);
784
785 cpuid = irq_rover;
786
787 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
788 } else {
789 cpuid = cpumask_first_and(mask, cpu_online_mask);
790 if (cpuid >= nr_cpu_ids)
791 goto do_round_robin;
792 }
793
794 return get_hard_smp_processor_id(cpuid);
795}
796#else
797int irq_choose_cpu(const struct cpumask *mask)
798{
799 return hard_smp_processor_id();
800}
801#endif
802
803int arch_early_irq_init(void)
804{
805 return 0;
806}
807
808#ifdef CONFIG_PPC64
809static int __init setup_noirqdistrib(char *str)
810{
811 distribute_irqs = 0;
812 return 1;
813}
814
815__setup("noirqdistrib", setup_noirqdistrib);
816#endif /* CONFIG_PPC64 */
1/*
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
29 */
30
31#undef DEBUG
32
33#include <linux/export.h>
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
38#include <linux/ptrace.h>
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
42#include <linux/init.h>
43#include <linux/slab.h>
44#include <linux/delay.h>
45#include <linux/irq.h>
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
48#include <linux/profile.h>
49#include <linux/bitops.h>
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/pci.h>
54#include <linux/debugfs.h>
55#include <linux/of.h>
56#include <linux/of_irq.h>
57
58#include <linux/uaccess.h>
59#include <asm/io.h>
60#include <asm/pgtable.h>
61#include <asm/irq.h>
62#include <asm/cache.h>
63#include <asm/prom.h>
64#include <asm/ptrace.h>
65#include <asm/machdep.h>
66#include <asm/udbg.h>
67#include <asm/smp.h>
68#include <asm/debug.h>
69#include <asm/livepatch.h>
70#include <asm/asm-prototypes.h>
71
72#ifdef CONFIG_PPC64
73#include <asm/paca.h>
74#include <asm/firmware.h>
75#include <asm/lv1call.h>
76#endif
77#define CREATE_TRACE_POINTS
78#include <asm/trace.h>
79#include <asm/cpu_has_feature.h>
80
81DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82EXPORT_PER_CPU_SYMBOL(irq_stat);
83
84int __irq_offset_value;
85
86#ifdef CONFIG_PPC32
87EXPORT_SYMBOL(__irq_offset_value);
88atomic_t ppc_n_lost_interrupts;
89
90#ifdef CONFIG_TAU_INT
91extern int tau_initialized;
92extern int tau_interrupts(int);
93#endif
94#endif /* CONFIG_PPC32 */
95
96#ifdef CONFIG_PPC64
97
98int distribute_irqs = 1;
99
100static inline notrace unsigned long get_irq_happened(void)
101{
102 unsigned long happened;
103
104 __asm__ __volatile__("lbz %0,%1(13)"
105 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
106
107 return happened;
108}
109
110static inline notrace void set_soft_enabled(unsigned long enable)
111{
112 __asm__ __volatile__("stb %0,%1(13)"
113 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
114}
115
116static inline notrace int decrementer_check_overflow(void)
117{
118 u64 now = get_tb_or_rtc();
119 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
120
121 return now >= *next_tb;
122}
123
124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
126 * there's an EE, DEC or DBELL to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
139{
140 /*
141 * We use local_paca rather than get_paca() to avoid all
142 * the debug_smp_processor_id() business in this low level
143 * function
144 */
145 unsigned char happened = local_paca->irq_happened;
146
147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
149
150 /*
151 * Force the delivery of pending soft-disabled interrupts on PS3.
152 * Any HV call will have this side effect.
153 */
154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
155 u64 tmp, tmp2;
156 lv1_get_version_info(&tmp, &tmp2);
157 }
158
159 /*
160 * Check if an hypervisor Maintenance interrupt happened.
161 * This is a higher priority interrupt than the others, so
162 * replay it first.
163 */
164 local_paca->irq_happened &= ~PACA_IRQ_HMI;
165 if (happened & PACA_IRQ_HMI)
166 return 0xe60;
167
168 /*
169 * We may have missed a decrementer interrupt. We check the
170 * decrementer itself rather than the paca irq_happened field
171 * in case we also had a rollover while hard disabled
172 */
173 local_paca->irq_happened &= ~PACA_IRQ_DEC;
174 if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
175 return 0x900;
176
177 /* Finally check if an external interrupt happened */
178 local_paca->irq_happened &= ~PACA_IRQ_EE;
179 if (happened & PACA_IRQ_EE)
180 return 0x500;
181
182#ifdef CONFIG_PPC_BOOK3E
183 /* Finally check if an EPR external interrupt happened
184 * this bit is typically set if we need to handle another
185 * "edge" interrupt from within the MPIC "EPR" handler
186 */
187 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
188 if (happened & PACA_IRQ_EE_EDGE)
189 return 0x500;
190
191 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
192 if (happened & PACA_IRQ_DBELL)
193 return 0x280;
194#else
195 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
196 if (happened & PACA_IRQ_DBELL) {
197 if (cpu_has_feature(CPU_FTR_HVMODE))
198 return 0xe80;
199 return 0xa00;
200 }
201#endif /* CONFIG_PPC_BOOK3E */
202
203 /* There should be nothing left ! */
204 BUG_ON(local_paca->irq_happened != 0);
205
206 return 0;
207}
208
209notrace void arch_local_irq_restore(unsigned long en)
210{
211 unsigned char irq_happened;
212 unsigned int replay;
213
214 /* Write the new soft-enabled value */
215 set_soft_enabled(en);
216 if (!en)
217 return;
218 /*
219 * From this point onward, we can take interrupts, preempt,
220 * etc... unless we got hard-disabled. We check if an event
221 * happened. If none happened, we know we can just return.
222 *
223 * We may have preempted before the check below, in which case
224 * we are checking the "new" CPU instead of the old one. This
225 * is only a problem if an event happened on the "old" CPU.
226 *
227 * External interrupt events will have caused interrupts to
228 * be hard-disabled, so there is no problem, we
229 * cannot have preempted.
230 */
231 irq_happened = get_irq_happened();
232 if (!irq_happened)
233 return;
234
235 /*
236 * We need to hard disable to get a trusted value from
237 * __check_irq_replay(). We also need to soft-disable
238 * again to avoid warnings in there due to the use of
239 * per-cpu variables.
240 *
241 * We know that if the value in irq_happened is exactly 0x01
242 * then we are already hard disabled (there are other less
243 * common cases that we'll ignore for now), so we skip the
244 * (expensive) mtmsrd.
245 */
246 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
247 __hard_irq_disable();
248#ifdef CONFIG_TRACE_IRQFLAGS
249 else {
250 /*
251 * We should already be hard disabled here. We had bugs
252 * where that wasn't the case so let's dbl check it and
253 * warn if we are wrong. Only do that when IRQ tracing
254 * is enabled as mfmsr() can be costly.
255 */
256 if (WARN_ON(mfmsr() & MSR_EE))
257 __hard_irq_disable();
258 }
259#endif /* CONFIG_TRACE_IRQFLAGS */
260
261 set_soft_enabled(0);
262
263 /*
264 * Check if anything needs to be re-emitted. We haven't
265 * soft-enabled yet to avoid warnings in decrementer_check_overflow
266 * accessing per-cpu variables
267 */
268 replay = __check_irq_replay();
269
270 /* We can soft-enable now */
271 set_soft_enabled(1);
272
273 /*
274 * And replay if we have to. This will return with interrupts
275 * hard-enabled.
276 */
277 if (replay) {
278 __replay_interrupt(replay);
279 return;
280 }
281
282 /* Finally, let's ensure we are hard enabled */
283 __hard_irq_enable();
284}
285EXPORT_SYMBOL(arch_local_irq_restore);
286
287/*
288 * This is specifically called by assembly code to re-enable interrupts
289 * if they are currently disabled. This is typically called before
290 * schedule() or do_signal() when returning to userspace. We do it
291 * in C to avoid the burden of dealing with lockdep etc...
292 *
293 * NOTE: This is called with interrupts hard disabled but not marked
294 * as such in paca->irq_happened, so we need to resync this.
295 */
296void notrace restore_interrupts(void)
297{
298 if (irqs_disabled()) {
299 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
300 local_irq_enable();
301 } else
302 __hard_irq_enable();
303}
304
305/*
306 * This is a helper to use when about to go into idle low-power
307 * when the latter has the side effect of re-enabling interrupts
308 * (such as calling H_CEDE under pHyp).
309 *
310 * You call this function with interrupts soft-disabled (this is
311 * already the case when ppc_md.power_save is called). The function
312 * will return whether to enter power save or just return.
313 *
314 * In the former case, it will have notified lockdep of interrupts
315 * being re-enabled and generally sanitized the lazy irq state,
316 * and in the latter case it will leave with interrupts hard
317 * disabled and marked as such, so the local_irq_enable() call
318 * in arch_cpu_idle() will properly re-enable everything.
319 */
320bool prep_irq_for_idle(void)
321{
322 /*
323 * First we need to hard disable to ensure no interrupt
324 * occurs before we effectively enter the low power state
325 */
326 hard_irq_disable();
327
328 /*
329 * If anything happened while we were soft-disabled,
330 * we return now and do not enter the low power state.
331 */
332 if (lazy_irq_pending())
333 return false;
334
335 /* Tell lockdep we are about to re-enable */
336 trace_hardirqs_on();
337
338 /*
339 * Mark interrupts as soft-enabled and clear the
340 * PACA_IRQ_HARD_DIS from the pending mask since we
341 * are about to hard enable as well as a side effect
342 * of entering the low power state.
343 */
344 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
345 local_paca->soft_enabled = 1;
346
347 /* Tell the caller to enter the low power state */
348 return true;
349}
350
351/*
352 * Force a replay of the external interrupt handler on this CPU.
353 */
354void force_external_irq_replay(void)
355{
356 /*
357 * This must only be called with interrupts soft-disabled,
358 * the replay will happen when re-enabling.
359 */
360 WARN_ON(!arch_irqs_disabled());
361
362 /* Indicate in the PACA that we have an interrupt to replay */
363 local_paca->irq_happened |= PACA_IRQ_EE;
364}
365
366#endif /* CONFIG_PPC64 */
367
368int arch_show_interrupts(struct seq_file *p, int prec)
369{
370 int j;
371
372#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
373 if (tau_initialized) {
374 seq_printf(p, "%*s: ", prec, "TAU");
375 for_each_online_cpu(j)
376 seq_printf(p, "%10u ", tau_interrupts(j));
377 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
378 }
379#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
380
381 seq_printf(p, "%*s: ", prec, "LOC");
382 for_each_online_cpu(j)
383 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
384 seq_printf(p, " Local timer interrupts for timer event device\n");
385
386 seq_printf(p, "%*s: ", prec, "LOC");
387 for_each_online_cpu(j)
388 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
389 seq_printf(p, " Local timer interrupts for others\n");
390
391 seq_printf(p, "%*s: ", prec, "SPU");
392 for_each_online_cpu(j)
393 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
394 seq_printf(p, " Spurious interrupts\n");
395
396 seq_printf(p, "%*s: ", prec, "PMI");
397 for_each_online_cpu(j)
398 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
399 seq_printf(p, " Performance monitoring interrupts\n");
400
401 seq_printf(p, "%*s: ", prec, "MCE");
402 for_each_online_cpu(j)
403 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
404 seq_printf(p, " Machine check exceptions\n");
405
406 if (cpu_has_feature(CPU_FTR_HVMODE)) {
407 seq_printf(p, "%*s: ", prec, "HMI");
408 for_each_online_cpu(j)
409 seq_printf(p, "%10u ",
410 per_cpu(irq_stat, j).hmi_exceptions);
411 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
412 }
413
414#ifdef CONFIG_PPC_DOORBELL
415 if (cpu_has_feature(CPU_FTR_DBELL)) {
416 seq_printf(p, "%*s: ", prec, "DBL");
417 for_each_online_cpu(j)
418 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
419 seq_printf(p, " Doorbell interrupts\n");
420 }
421#endif
422
423 return 0;
424}
425
426/*
427 * /proc/stat helpers
428 */
429u64 arch_irq_stat_cpu(unsigned int cpu)
430{
431 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
432
433 sum += per_cpu(irq_stat, cpu).pmu_irqs;
434 sum += per_cpu(irq_stat, cpu).mce_exceptions;
435 sum += per_cpu(irq_stat, cpu).spurious_irqs;
436 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
437 sum += per_cpu(irq_stat, cpu).hmi_exceptions;
438#ifdef CONFIG_PPC_DOORBELL
439 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
440#endif
441
442 return sum;
443}
444
445#ifdef CONFIG_HOTPLUG_CPU
446void migrate_irqs(void)
447{
448 struct irq_desc *desc;
449 unsigned int irq;
450 static int warned;
451 cpumask_var_t mask;
452 const struct cpumask *map = cpu_online_mask;
453
454 alloc_cpumask_var(&mask, GFP_KERNEL);
455
456 for_each_irq_desc(irq, desc) {
457 struct irq_data *data;
458 struct irq_chip *chip;
459
460 data = irq_desc_get_irq_data(desc);
461 if (irqd_is_per_cpu(data))
462 continue;
463
464 chip = irq_data_get_irq_chip(data);
465
466 cpumask_and(mask, irq_data_get_affinity_mask(data), map);
467 if (cpumask_any(mask) >= nr_cpu_ids) {
468 pr_warn("Breaking affinity for irq %i\n", irq);
469 cpumask_copy(mask, map);
470 }
471 if (chip->irq_set_affinity)
472 chip->irq_set_affinity(data, mask, true);
473 else if (desc->action && !(warned++))
474 pr_err("Cannot set affinity for irq %i\n", irq);
475 }
476
477 free_cpumask_var(mask);
478
479 local_irq_enable();
480 mdelay(1);
481 local_irq_disable();
482}
483#endif
484
485static inline void check_stack_overflow(void)
486{
487#ifdef CONFIG_DEBUG_STACKOVERFLOW
488 long sp;
489
490 sp = current_stack_pointer() & (THREAD_SIZE-1);
491
492 /* check for stack overflow: is there less than 2KB free? */
493 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
494 pr_err("do_IRQ: stack overflow: %ld\n",
495 sp - sizeof(struct thread_info));
496 dump_stack();
497 }
498#endif
499}
500
501void __do_irq(struct pt_regs *regs)
502{
503 unsigned int irq;
504
505 irq_enter();
506
507 trace_irq_entry(regs);
508
509 check_stack_overflow();
510
511 /*
512 * Query the platform PIC for the interrupt & ack it.
513 *
514 * This will typically lower the interrupt line to the CPU
515 */
516 irq = ppc_md.get_irq();
517
518 /* We can hard enable interrupts now to allow perf interrupts */
519 may_hard_irq_enable();
520
521 /* And finally process it */
522 if (unlikely(!irq))
523 __this_cpu_inc(irq_stat.spurious_irqs);
524 else
525 generic_handle_irq(irq);
526
527 trace_irq_exit(regs);
528
529 irq_exit();
530}
531
532void do_IRQ(struct pt_regs *regs)
533{
534 struct pt_regs *old_regs = set_irq_regs(regs);
535 struct thread_info *curtp, *irqtp, *sirqtp;
536
537 /* Switch to the irq stack to handle this */
538 curtp = current_thread_info();
539 irqtp = hardirq_ctx[raw_smp_processor_id()];
540 sirqtp = softirq_ctx[raw_smp_processor_id()];
541
542 /* Already there ? */
543 if (unlikely(curtp == irqtp || curtp == sirqtp)) {
544 __do_irq(regs);
545 set_irq_regs(old_regs);
546 return;
547 }
548
549 /* Prepare the thread_info in the irq stack */
550 irqtp->task = curtp->task;
551 irqtp->flags = 0;
552
553 /* Copy the preempt_count so that the [soft]irq checks work. */
554 irqtp->preempt_count = curtp->preempt_count;
555
556 /* Switch stack and call */
557 call_do_irq(regs, irqtp);
558
559 /* Restore stack limit */
560 irqtp->task = NULL;
561
562 /* Copy back updates to the thread_info */
563 if (irqtp->flags)
564 set_bits(irqtp->flags, &curtp->flags);
565
566 set_irq_regs(old_regs);
567}
568
569void __init init_IRQ(void)
570{
571 if (ppc_md.init_IRQ)
572 ppc_md.init_IRQ();
573
574 exc_lvl_ctx_init();
575
576 irq_ctx_init();
577}
578
579#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
580struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
581struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
582struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
583
584void exc_lvl_ctx_init(void)
585{
586 struct thread_info *tp;
587 int i, cpu_nr;
588
589 for_each_possible_cpu(i) {
590#ifdef CONFIG_PPC64
591 cpu_nr = i;
592#else
593#ifdef CONFIG_SMP
594 cpu_nr = get_hard_smp_processor_id(i);
595#else
596 cpu_nr = 0;
597#endif
598#endif
599
600 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
601 tp = critirq_ctx[cpu_nr];
602 tp->cpu = cpu_nr;
603 tp->preempt_count = 0;
604
605#ifdef CONFIG_BOOKE
606 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
607 tp = dbgirq_ctx[cpu_nr];
608 tp->cpu = cpu_nr;
609 tp->preempt_count = 0;
610
611 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
612 tp = mcheckirq_ctx[cpu_nr];
613 tp->cpu = cpu_nr;
614 tp->preempt_count = HARDIRQ_OFFSET;
615#endif
616 }
617}
618#endif
619
620struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
621struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
622
623void irq_ctx_init(void)
624{
625 struct thread_info *tp;
626 int i;
627
628 for_each_possible_cpu(i) {
629 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
630 tp = softirq_ctx[i];
631 tp->cpu = i;
632 klp_init_thread_info(tp);
633
634 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
635 tp = hardirq_ctx[i];
636 tp->cpu = i;
637 klp_init_thread_info(tp);
638 }
639}
640
641void do_softirq_own_stack(void)
642{
643 struct thread_info *curtp, *irqtp;
644
645 curtp = current_thread_info();
646 irqtp = softirq_ctx[smp_processor_id()];
647 irqtp->task = curtp->task;
648 irqtp->flags = 0;
649 call_do_softirq(irqtp);
650 irqtp->task = NULL;
651
652 /* Set any flag that may have been set on the
653 * alternate stack
654 */
655 if (irqtp->flags)
656 set_bits(irqtp->flags, &curtp->flags);
657}
658
659irq_hw_number_t virq_to_hw(unsigned int virq)
660{
661 struct irq_data *irq_data = irq_get_irq_data(virq);
662 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
663}
664EXPORT_SYMBOL_GPL(virq_to_hw);
665
666#ifdef CONFIG_SMP
667int irq_choose_cpu(const struct cpumask *mask)
668{
669 int cpuid;
670
671 if (cpumask_equal(mask, cpu_online_mask)) {
672 static int irq_rover;
673 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
674 unsigned long flags;
675
676 /* Round-robin distribution... */
677do_round_robin:
678 raw_spin_lock_irqsave(&irq_rover_lock, flags);
679
680 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
681 if (irq_rover >= nr_cpu_ids)
682 irq_rover = cpumask_first(cpu_online_mask);
683
684 cpuid = irq_rover;
685
686 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
687 } else {
688 cpuid = cpumask_first_and(mask, cpu_online_mask);
689 if (cpuid >= nr_cpu_ids)
690 goto do_round_robin;
691 }
692
693 return get_hard_smp_processor_id(cpuid);
694}
695#else
696int irq_choose_cpu(const struct cpumask *mask)
697{
698 return hard_smp_processor_id();
699}
700#endif
701
702int arch_early_irq_init(void)
703{
704 return 0;
705}
706
707#ifdef CONFIG_PPC64
708static int __init setup_noirqdistrib(char *str)
709{
710 distribute_irqs = 0;
711 return 1;
712}
713
714__setup("noirqdistrib", setup_noirqdistrib);
715#endif /* CONFIG_PPC64 */