Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Derived from arch/i386/kernel/irq.c
  4 *    Copyright (C) 1992 Linus Torvalds
  5 *  Adapted from arch/i386 by Gary Thomas
  6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  7 *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
  8 *    Copyright (C) 1996-2001 Cort Dougan
  9 *  Adapted for Power Macintosh by Paul Mackerras
 10 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 11 *
 
 
 
 
 
 12 * This file contains the code used by various IRQ handling routines:
 13 * asking for different IRQ's should be done through these routines
 14 * instead of just grabbing them. Thus setups with different IRQ numbers
 15 * shouldn't result in any weird surprises, and installing new handlers
 16 * should be easier.
 17 *
 18 * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 19 * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 20 * mask register (of which only 16 are defined), hence the weird shifting
 21 * and complement of the cached_irq_mask.  I want to be able to stuff
 22 * this right into the SIU SMASK register.
 23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
 24 * to reduce code space and undefined function references.
 25 */
 26
 27#undef DEBUG
 28
 29#include <linux/export.h>
 30#include <linux/threads.h>
 31#include <linux/kernel_stat.h>
 32#include <linux/signal.h>
 33#include <linux/sched.h>
 34#include <linux/ptrace.h>
 35#include <linux/ioport.h>
 36#include <linux/interrupt.h>
 37#include <linux/timex.h>
 38#include <linux/init.h>
 39#include <linux/slab.h>
 40#include <linux/delay.h>
 41#include <linux/irq.h>
 42#include <linux/seq_file.h>
 43#include <linux/cpumask.h>
 44#include <linux/profile.h>
 45#include <linux/bitops.h>
 46#include <linux/list.h>
 47#include <linux/radix-tree.h>
 48#include <linux/mutex.h>
 
 49#include <linux/pci.h>
 50#include <linux/debugfs.h>
 51#include <linux/of.h>
 52#include <linux/of_irq.h>
 53#include <linux/vmalloc.h>
 54#include <linux/pgtable.h>
 55#include <linux/static_call.h>
 56
 57#include <linux/uaccess.h>
 58#include <asm/interrupt.h>
 59#include <asm/io.h>
 
 60#include <asm/irq.h>
 61#include <asm/cache.h>
 
 62#include <asm/ptrace.h>
 63#include <asm/machdep.h>
 64#include <asm/udbg.h>
 65#include <asm/smp.h>
 66#include <asm/hw_irq.h>
 67#include <asm/softirq_stack.h>
 68#include <asm/ppc_asm.h>
 69
 
 
 
 
 
 70#define CREATE_TRACE_POINTS
 71#include <asm/trace.h>
 72#include <asm/cpu_has_feature.h>
 73
 74DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 75EXPORT_PER_CPU_SYMBOL(irq_stat);
 76
 
 
 77#ifdef CONFIG_PPC32
 
 78atomic_t ppc_n_lost_interrupts;
 79
 80#ifdef CONFIG_TAU_INT
 81extern int tau_initialized;
 82u32 tau_interrupts(unsigned long cpu);
 83#endif
 84#endif /* CONFIG_PPC32 */
 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86int arch_show_interrupts(struct seq_file *p, int prec)
 87{
 88	int j;
 89
 90#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
 91	if (tau_initialized) {
 92		seq_printf(p, "%*s: ", prec, "TAU");
 93		for_each_online_cpu(j)
 94			seq_printf(p, "%10u ", tau_interrupts(j));
 95		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
 96	}
 97#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
 98
 99	seq_printf(p, "%*s: ", prec, "LOC");
100	for_each_online_cpu(j)
101		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
102        seq_printf(p, "  Local timer interrupts for timer event device\n");
103
104	seq_printf(p, "%*s: ", prec, "BCT");
105	for_each_online_cpu(j)
106		seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
107	seq_printf(p, "  Broadcast timer interrupts for timer event device\n");
108
109	seq_printf(p, "%*s: ", prec, "LOC");
110	for_each_online_cpu(j)
111		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
112        seq_printf(p, "  Local timer interrupts for others\n");
113
114	seq_printf(p, "%*s: ", prec, "SPU");
115	for_each_online_cpu(j)
116		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
117	seq_printf(p, "  Spurious interrupts\n");
118
119	seq_printf(p, "%*s: ", prec, "PMI");
120	for_each_online_cpu(j)
121		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
122	seq_printf(p, "  Performance monitoring interrupts\n");
123
124	seq_printf(p, "%*s: ", prec, "MCE");
125	for_each_online_cpu(j)
126		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
127	seq_printf(p, "  Machine check exceptions\n");
128
129#ifdef CONFIG_PPC_BOOK3S_64
130	if (cpu_has_feature(CPU_FTR_HVMODE)) {
131		seq_printf(p, "%*s: ", prec, "HMI");
132		for_each_online_cpu(j)
133			seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs);
134		seq_printf(p, "  Hypervisor Maintenance Interrupts\n");
135	}
136#endif
137
138	seq_printf(p, "%*s: ", prec, "NMI");
139	for_each_online_cpu(j)
140		seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
141	seq_printf(p, "  System Reset interrupts\n");
142
143#ifdef CONFIG_PPC_WATCHDOG
144	seq_printf(p, "%*s: ", prec, "WDG");
145	for_each_online_cpu(j)
146		seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
147	seq_printf(p, "  Watchdog soft-NMI interrupts\n");
148#endif
149
150#ifdef CONFIG_PPC_DOORBELL
151	if (cpu_has_feature(CPU_FTR_DBELL)) {
152		seq_printf(p, "%*s: ", prec, "DBL");
153		for_each_online_cpu(j)
154			seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
155		seq_printf(p, "  Doorbell interrupts\n");
156	}
157#endif
158
159	return 0;
160}
161
162/*
163 * /proc/stat helpers
164 */
165u64 arch_irq_stat_cpu(unsigned int cpu)
166{
167	u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
168
169	sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
170	sum += per_cpu(irq_stat, cpu).pmu_irqs;
171	sum += per_cpu(irq_stat, cpu).mce_exceptions;
172	sum += per_cpu(irq_stat, cpu).spurious_irqs;
173	sum += per_cpu(irq_stat, cpu).timer_irqs_others;
174#ifdef CONFIG_PPC_BOOK3S_64
175	sum += paca_ptrs[cpu]->hmi_irqs;
176#endif
177	sum += per_cpu(irq_stat, cpu).sreset_irqs;
178#ifdef CONFIG_PPC_WATCHDOG
179	sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
180#endif
181#ifdef CONFIG_PPC_DOORBELL
182	sum += per_cpu(irq_stat, cpu).doorbell_irqs;
183#endif
184
185	return sum;
186}
187
188static inline void check_stack_overflow(unsigned long sp)
 
189{
190	if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191		return;
192
193	sp &= THREAD_SIZE - 1;
 
 
194
195	/* check for stack overflow: is there less than 1/4th free? */
196	if (unlikely(sp < THREAD_SIZE / 4)) {
197		pr_err("do_IRQ: stack overflow: %ld\n", sp);
198		dump_stack();
199	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200}
201
202#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
203static __always_inline void call_do_softirq(const void *sp)
204{
205	/* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */
206	asm volatile (
207		 PPC_STLU "	%%r1, %[offset](%[sp])	;"
208		"mr		%%r1, %[sp]		;"
209#ifdef CONFIG_PPC_KERNEL_PCREL
210		"bl		%[callee]@notoc		;"
211#else
212		"bl		%[callee]		;"
 
 
 
213#endif
214		 PPC_LL "	%%r1, 0(%%r1)		;"
215		 : // Outputs
216		 : // Inputs
217		   [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
218		   [callee] "i" (__do_softirq)
219		 : // Clobbers
220		   "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
221		   "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
222		   "r11", "r12"
223	);
224}
225#endif
226
227DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq);
228
229static void __do_irq(struct pt_regs *regs, unsigned long oldsp)
230{
 
231	unsigned int irq;
232
233	trace_irq_entry(regs);
234
235	check_stack_overflow(oldsp);
 
 
236
237	/*
238	 * Query the platform PIC for the interrupt & ack it.
239	 *
240	 * This will typically lower the interrupt line to the CPU
241	 */
242	irq = static_call(ppc_get_irq)();
243
244	/* We can hard enable interrupts now to allow perf interrupts */
245	if (should_hard_irq_enable(regs))
246		do_hard_irq_enable();
247
248	/* And finally process it */
249	if (unlikely(!irq))
250		__this_cpu_inc(irq_stat.spurious_irqs);
251	else
252		generic_handle_irq(irq);
 
 
 
253
254	trace_irq_exit(regs);
255}
256
257static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
258{
259	register unsigned long r3 asm("r3") = (unsigned long)regs;
260
261	/* Temporarily switch r1 to sp, call __do_irq() then restore r1. */
262	asm volatile (
263		 PPC_STLU "	%%r1, %[offset](%[sp])	;"
264		"mr		%%r4, %%r1		;"
265		"mr		%%r1, %[sp]		;"
266#ifdef CONFIG_PPC_KERNEL_PCREL
267		"bl		%[callee]@notoc		;"
268#else
269		"bl		%[callee]		;"
270#endif
271		 PPC_LL "	%%r1, 0(%%r1)		;"
272		 : // Outputs
273		   "+r" (r3)
274		 : // Inputs
275		   [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE),
276		   [callee] "i" (__do_irq)
277		 : // Clobbers
278		   "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6",
279		   "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
280		   "r11", "r12"
281	);
282}
283
284void __do_IRQ(struct pt_regs *regs)
285{
286	struct pt_regs *old_regs = set_irq_regs(regs);
287	void *cursp, *irqsp, *sirqsp;
288
289	/* Switch to the irq stack to handle this */
290	cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1));
291	irqsp = hardirq_ctx[raw_smp_processor_id()];
292	sirqsp = softirq_ctx[raw_smp_processor_id()];
293
294	/* Already there ? If not switch stack and call */
295	if (unlikely(cursp == irqsp || cursp == sirqsp))
296		__do_irq(regs, current_stack_pointer);
297	else
298		call_do_irq(regs, irqsp);
299
300	set_irq_regs(old_regs);
301}
302
303DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ)
304{
305	__do_IRQ(regs);
306}
307
308static void *__init alloc_vm_stack(void)
309{
310	return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP,
311			      NUMA_NO_NODE, (void *)_RET_IP_);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312}
 
 
 
 
313
314static void __init vmap_irqstack_init(void)
315{
 
316	int i;
317
318	for_each_possible_cpu(i) {
319		softirq_ctx[i] = alloc_vm_stack();
320		hardirq_ctx[i] = alloc_vm_stack();
 
 
 
 
 
 
 
321	}
322}
323
324
325void __init init_IRQ(void)
326{
327	if (IS_ENABLED(CONFIG_VMAP_STACK))
328		vmap_irqstack_init();
329
330	if (ppc_md.init_IRQ)
331		ppc_md.init_IRQ();
 
 
 
 
 
 
 
332
333	if (!WARN_ON(!ppc_md.get_irq))
334		static_call_update(ppc_get_irq, ppc_md.get_irq);
 
 
 
335}
336
337#ifdef CONFIG_BOOKE_OR_40x
338void   *critirq_ctx[NR_CPUS] __read_mostly;
339void    *dbgirq_ctx[NR_CPUS] __read_mostly;
340void *mcheckirq_ctx[NR_CPUS] __read_mostly;
341#endif
342
343void *softirq_ctx[NR_CPUS] __read_mostly;
344void *hardirq_ctx[NR_CPUS] __read_mostly;
345
346#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
347void do_softirq_own_stack(void)
348{
349	call_do_softirq(softirq_ctx[smp_processor_id()]);
 
 
350}
351#endif
352
353irq_hw_number_t virq_to_hw(unsigned int virq)
354{
355	struct irq_data *irq_data = irq_get_irq_data(virq);
356	return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
357}
358EXPORT_SYMBOL_GPL(virq_to_hw);
359
360#ifdef CONFIG_SMP
361int irq_choose_cpu(const struct cpumask *mask)
362{
363	int cpuid;
364
365	if (cpumask_equal(mask, cpu_online_mask)) {
366		static int irq_rover;
367		static DEFINE_RAW_SPINLOCK(irq_rover_lock);
368		unsigned long flags;
369
370		/* Round-robin distribution... */
371do_round_robin:
372		raw_spin_lock_irqsave(&irq_rover_lock, flags);
373
374		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
375		if (irq_rover >= nr_cpu_ids)
376			irq_rover = cpumask_first(cpu_online_mask);
377
378		cpuid = irq_rover;
379
380		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
381	} else {
382		cpuid = cpumask_first_and(mask, cpu_online_mask);
383		if (cpuid >= nr_cpu_ids)
384			goto do_round_robin;
385	}
386
387	return get_hard_smp_processor_id(cpuid);
388}
389#else
390int irq_choose_cpu(const struct cpumask *mask)
391{
392	return hard_smp_processor_id();
393}
394#endif
v3.5.6
 
  1/*
  2 *  Derived from arch/i386/kernel/irq.c
  3 *    Copyright (C) 1992 Linus Torvalds
  4 *  Adapted from arch/i386 by Gary Thomas
  5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6 *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
  7 *    Copyright (C) 1996-2001 Cort Dougan
  8 *  Adapted for Power Macintosh by Paul Mackerras
  9 *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
 10 *
 11 * This program is free software; you can redistribute it and/or
 12 * modify it under the terms of the GNU General Public License
 13 * as published by the Free Software Foundation; either version
 14 * 2 of the License, or (at your option) any later version.
 15 *
 16 * This file contains the code used by various IRQ handling routines:
 17 * asking for different IRQ's should be done through these routines
 18 * instead of just grabbing them. Thus setups with different IRQ numbers
 19 * shouldn't result in any weird surprises, and installing new handlers
 20 * should be easier.
 21 *
 22 * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
 23 * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
 24 * mask register (of which only 16 are defined), hence the weird shifting
 25 * and complement of the cached_irq_mask.  I want to be able to stuff
 26 * this right into the SIU SMASK register.
 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
 28 * to reduce code space and undefined function references.
 29 */
 30
 31#undef DEBUG
 32
 33#include <linux/export.h>
 34#include <linux/threads.h>
 35#include <linux/kernel_stat.h>
 36#include <linux/signal.h>
 37#include <linux/sched.h>
 38#include <linux/ptrace.h>
 39#include <linux/ioport.h>
 40#include <linux/interrupt.h>
 41#include <linux/timex.h>
 42#include <linux/init.h>
 43#include <linux/slab.h>
 44#include <linux/delay.h>
 45#include <linux/irq.h>
 46#include <linux/seq_file.h>
 47#include <linux/cpumask.h>
 48#include <linux/profile.h>
 49#include <linux/bitops.h>
 50#include <linux/list.h>
 51#include <linux/radix-tree.h>
 52#include <linux/mutex.h>
 53#include <linux/bootmem.h>
 54#include <linux/pci.h>
 55#include <linux/debugfs.h>
 56#include <linux/of.h>
 57#include <linux/of_irq.h>
 
 
 
 58
 59#include <asm/uaccess.h>
 
 60#include <asm/io.h>
 61#include <asm/pgtable.h>
 62#include <asm/irq.h>
 63#include <asm/cache.h>
 64#include <asm/prom.h>
 65#include <asm/ptrace.h>
 66#include <asm/machdep.h>
 67#include <asm/udbg.h>
 68#include <asm/smp.h>
 69#include <asm/debug.h>
 
 
 70
 71#ifdef CONFIG_PPC64
 72#include <asm/paca.h>
 73#include <asm/firmware.h>
 74#include <asm/lv1call.h>
 75#endif
 76#define CREATE_TRACE_POINTS
 77#include <asm/trace.h>
 
 78
 79DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 80EXPORT_PER_CPU_SYMBOL(irq_stat);
 81
 82int __irq_offset_value;
 83
 84#ifdef CONFIG_PPC32
 85EXPORT_SYMBOL(__irq_offset_value);
 86atomic_t ppc_n_lost_interrupts;
 87
 88#ifdef CONFIG_TAU_INT
 89extern int tau_initialized;
 90extern int tau_interrupts(int);
 91#endif
 92#endif /* CONFIG_PPC32 */
 93
 94#ifdef CONFIG_PPC64
 95
 96int distribute_irqs = 1;
 97
 98static inline notrace unsigned long get_irq_happened(void)
 99{
100	unsigned long happened;
101
102	__asm__ __volatile__("lbz %0,%1(13)"
103	: "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
104
105	return happened;
106}
107
108static inline notrace void set_soft_enabled(unsigned long enable)
109{
110	__asm__ __volatile__("stb %0,%1(13)"
111	: : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
112}
113
114static inline notrace int decrementer_check_overflow(void)
115{
116 	u64 now = get_tb_or_rtc();
117 	u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
118 
119	if (now >= *next_tb)
120		set_dec(1);
121	return now >= *next_tb;
122}
123
124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
139{
140	/*
141	 * We use local_paca rather than get_paca() to avoid all
142	 * the debug_smp_processor_id() business in this low level
143	 * function
144	 */
145	unsigned char happened = local_paca->irq_happened;
146
147	/* Clear bit 0 which we wouldn't clear otherwise */
148	local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
149
150	/*
151	 * Force the delivery of pending soft-disabled interrupts on PS3.
152	 * Any HV call will have this side effect.
153	 */
154	if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
155		u64 tmp, tmp2;
156		lv1_get_version_info(&tmp, &tmp2);
157	}
158
159	/*
160	 * We may have missed a decrementer interrupt. We check the
161	 * decrementer itself rather than the paca irq_happened field
162	 * in case we also had a rollover while hard disabled
163	 */
164	local_paca->irq_happened &= ~PACA_IRQ_DEC;
165	if (decrementer_check_overflow())
166		return 0x900;
167
168	/* Finally check if an external interrupt happened */
169	local_paca->irq_happened &= ~PACA_IRQ_EE;
170	if (happened & PACA_IRQ_EE)
171		return 0x500;
172
173#ifdef CONFIG_PPC_BOOK3E
174	/* Finally check if an EPR external interrupt happened
175	 * this bit is typically set if we need to handle another
176	 * "edge" interrupt from within the MPIC "EPR" handler
177	 */
178	local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179	if (happened & PACA_IRQ_EE_EDGE)
180		return 0x500;
181
182	local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183	if (happened & PACA_IRQ_DBELL)
184		return 0x280;
185#endif /* CONFIG_PPC_BOOK3E */
186
187	/* There should be nothing left ! */
188	BUG_ON(local_paca->irq_happened != 0);
189
190	return 0;
191}
192
193notrace void arch_local_irq_restore(unsigned long en)
194{
195	unsigned char irq_happened;
196	unsigned int replay;
197
198	/* Write the new soft-enabled value */
199	set_soft_enabled(en);
200	if (!en)
201		return;
202	/*
203	 * From this point onward, we can take interrupts, preempt,
204	 * etc... unless we got hard-disabled. We check if an event
205	 * happened. If none happened, we know we can just return.
206	 *
207	 * We may have preempted before the check below, in which case
208	 * we are checking the "new" CPU instead of the old one. This
209	 * is only a problem if an event happened on the "old" CPU.
210	 *
211	 * External interrupt events will have caused interrupts to
212	 * be hard-disabled, so there is no problem, we
213	 * cannot have preempted.
214	 */
215	irq_happened = get_irq_happened();
216	if (!irq_happened)
217		return;
218
219	/*
220	 * We need to hard disable to get a trusted value from
221	 * __check_irq_replay(). We also need to soft-disable
222	 * again to avoid warnings in there due to the use of
223	 * per-cpu variables.
224	 *
225	 * We know that if the value in irq_happened is exactly 0x01
226	 * then we are already hard disabled (there are other less
227	 * common cases that we'll ignore for now), so we skip the
228	 * (expensive) mtmsrd.
229	 */
230	if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231		__hard_irq_disable();
232#ifdef CONFIG_TRACE_IRQFLAGS
233	else {
234		/*
235		 * We should already be hard disabled here. We had bugs
236		 * where that wasn't the case so let's dbl check it and
237		 * warn if we are wrong. Only do that when IRQ tracing
238		 * is enabled as mfmsr() can be costly.
239		 */
240		if (WARN_ON(mfmsr() & MSR_EE))
241			__hard_irq_disable();
242	}
243#endif /* CONFIG_TRACE_IRQFLAG */
244
245	set_soft_enabled(0);
246
247	/*
248	 * Check if anything needs to be re-emitted. We haven't
249	 * soft-enabled yet to avoid warnings in decrementer_check_overflow
250	 * accessing per-cpu variables
251	 */
252	replay = __check_irq_replay();
253
254	/* We can soft-enable now */
255	set_soft_enabled(1);
256
257	/*
258	 * And replay if we have to. This will return with interrupts
259	 * hard-enabled.
260	 */
261	if (replay) {
262		__replay_interrupt(replay);
263		return;
264	}
265
266	/* Finally, let's ensure we are hard enabled */
267	__hard_irq_enable();
268}
269EXPORT_SYMBOL(arch_local_irq_restore);
270
271/*
272 * This is specifically called by assembly code to re-enable interrupts
273 * if they are currently disabled. This is typically called before
274 * schedule() or do_signal() when returning to userspace. We do it
275 * in C to avoid the burden of dealing with lockdep etc...
276 *
277 * NOTE: This is called with interrupts hard disabled but not marked
278 * as such in paca->irq_happened, so we need to resync this.
279 */
280void notrace restore_interrupts(void)
281{
282	if (irqs_disabled()) {
283		local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
284		local_irq_enable();
285	} else
286		__hard_irq_enable();
287}
288
289/*
290 * This is a helper to use when about to go into idle low-power
291 * when the latter has the side effect of re-enabling interrupts
292 * (such as calling H_CEDE under pHyp).
293 *
294 * You call this function with interrupts soft-disabled (this is
295 * already the case when ppc_md.power_save is called). The function
296 * will return whether to enter power save or just return.
297 *
298 * In the former case, it will have notified lockdep of interrupts
299 * being re-enabled and generally sanitized the lazy irq state,
300 * and in the latter case it will leave with interrupts hard
301 * disabled and marked as such, so the local_irq_enable() call
302 * in cpu_idle() will properly re-enable everything.
303 */
304bool prep_irq_for_idle(void)
305{
306	/*
307	 * First we need to hard disable to ensure no interrupt
308	 * occurs before we effectively enter the low power state
309	 */
310	hard_irq_disable();
311
312	/*
313	 * If anything happened while we were soft-disabled,
314	 * we return now and do not enter the low power state.
315	 */
316	if (lazy_irq_pending())
317		return false;
318
319	/* Tell lockdep we are about to re-enable */
320	trace_hardirqs_on();
321
322	/*
323	 * Mark interrupts as soft-enabled and clear the
324	 * PACA_IRQ_HARD_DIS from the pending mask since we
325	 * are about to hard enable as well as a side effect
326	 * of entering the low power state.
327	 */
328	local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
329	local_paca->soft_enabled = 1;
330
331	/* Tell the caller to enter the low power state */
332	return true;
333}
334
335#endif /* CONFIG_PPC64 */
336
337int arch_show_interrupts(struct seq_file *p, int prec)
338{
339	int j;
340
341#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
342	if (tau_initialized) {
343		seq_printf(p, "%*s: ", prec, "TAU");
344		for_each_online_cpu(j)
345			seq_printf(p, "%10u ", tau_interrupts(j));
346		seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
347	}
348#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
349
350	seq_printf(p, "%*s: ", prec, "LOC");
351	for_each_online_cpu(j)
352		seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
353        seq_printf(p, "  Local timer interrupts\n");
 
 
 
 
 
 
 
 
 
 
354
355	seq_printf(p, "%*s: ", prec, "SPU");
356	for_each_online_cpu(j)
357		seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
358	seq_printf(p, "  Spurious interrupts\n");
359
360	seq_printf(p, "%*s: ", prec, "CNT");
361	for_each_online_cpu(j)
362		seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
363	seq_printf(p, "  Performance monitoring interrupts\n");
364
365	seq_printf(p, "%*s: ", prec, "MCE");
366	for_each_online_cpu(j)
367		seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
368	seq_printf(p, "  Machine check exceptions\n");
369
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370	return 0;
371}
372
373/*
374 * /proc/stat helpers
375 */
376u64 arch_irq_stat_cpu(unsigned int cpu)
377{
378	u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
379
 
380	sum += per_cpu(irq_stat, cpu).pmu_irqs;
381	sum += per_cpu(irq_stat, cpu).mce_exceptions;
382	sum += per_cpu(irq_stat, cpu).spurious_irqs;
 
 
 
 
 
 
 
 
 
 
 
383
384	return sum;
385}
386
387#ifdef CONFIG_HOTPLUG_CPU
388void migrate_irqs(void)
389{
390	struct irq_desc *desc;
391	unsigned int irq;
392	static int warned;
393	cpumask_var_t mask;
394	const struct cpumask *map = cpu_online_mask;
395
396	alloc_cpumask_var(&mask, GFP_KERNEL);
397
398	for_each_irq_desc(irq, desc) {
399		struct irq_data *data;
400		struct irq_chip *chip;
401
402		data = irq_desc_get_irq_data(desc);
403		if (irqd_is_per_cpu(data))
404			continue;
405
406		chip = irq_data_get_irq_chip(data);
407
408		cpumask_and(mask, data->affinity, map);
409		if (cpumask_any(mask) >= nr_cpu_ids) {
410			printk("Breaking affinity for irq %i\n", irq);
411			cpumask_copy(mask, map);
412		}
413		if (chip->irq_set_affinity)
414			chip->irq_set_affinity(data, mask, true);
415		else if (desc->action && !(warned++))
416			printk("Cannot set affinity for irq %i\n", irq);
417	}
418
419	free_cpumask_var(mask);
420
421	local_irq_enable();
422	mdelay(1);
423	local_irq_disable();
424}
425#endif
426
427static inline void handle_one_irq(unsigned int irq)
428{
429	struct thread_info *curtp, *irqtp;
430	unsigned long saved_sp_limit;
431	struct irq_desc *desc;
432
433	desc = irq_to_desc(irq);
434	if (!desc)
435		return;
436
437	/* Switch to the irq stack to handle this */
438	curtp = current_thread_info();
439	irqtp = hardirq_ctx[smp_processor_id()];
440
441	if (curtp == irqtp) {
442		/* We're already on the irq stack, just handle it */
443		desc->handle_irq(irq, desc);
444		return;
445	}
446
447	saved_sp_limit = current->thread.ksp_limit;
448
449	irqtp->task = curtp->task;
450	irqtp->flags = 0;
451
452	/* Copy the softirq bits in preempt_count so that the
453	 * softirq checks work in the hardirq context. */
454	irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
455			       (curtp->preempt_count & SOFTIRQ_MASK);
456
457	current->thread.ksp_limit = (unsigned long)irqtp +
458		_ALIGN_UP(sizeof(struct thread_info), 16);
459
460	call_handle_irq(irq, desc, irqtp, desc->handle_irq);
461	current->thread.ksp_limit = saved_sp_limit;
462	irqtp->task = NULL;
463
464	/* Set any flag that may have been set on the
465	 * alternate stack
466	 */
467	if (irqtp->flags)
468		set_bits(irqtp->flags, &curtp->flags);
469}
470
471static inline void check_stack_overflow(void)
 
472{
473#ifdef CONFIG_DEBUG_STACKOVERFLOW
474	long sp;
475
476	sp = __get_SP() & (THREAD_SIZE-1);
477
478	/* check for stack overflow: is there less than 2KB free? */
479	if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
480		printk("do_IRQ: stack overflow: %ld\n",
481			sp - sizeof(struct thread_info));
482		dump_stack();
483	}
484#endif
 
 
 
 
 
 
 
 
 
 
485}
 
 
 
486
487void do_IRQ(struct pt_regs *regs)
488{
489	struct pt_regs *old_regs = set_irq_regs(regs);
490	unsigned int irq;
491
492	trace_irq_entry(regs);
493
494	irq_enter();
495
496	check_stack_overflow();
497
498	/*
499	 * Query the platform PIC for the interrupt & ack it.
500	 *
501	 * This will typically lower the interrupt line to the CPU
502	 */
503	irq = ppc_md.get_irq();
504
505	/* We can hard enable interrupts now */
506	may_hard_irq_enable();
 
507
508	/* And finally process it */
509	if (irq != NO_IRQ)
510		handle_one_irq(irq);
511	else
512		__get_cpu_var(irq_stat).spurious_irqs++;
513
514	irq_exit();
515	set_irq_regs(old_regs);
516
517	trace_irq_exit(regs);
518}
519
520void __init init_IRQ(void)
521{
522	if (ppc_md.init_IRQ)
523		ppc_md.init_IRQ();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
525	exc_lvl_ctx_init();
 
 
 
526
527	irq_ctx_init();
 
 
 
 
 
 
 
 
 
 
 
528}
529
530#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
531struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
532struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
533struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
534
535void exc_lvl_ctx_init(void)
536{
537	struct thread_info *tp;
538	int i, cpu_nr;
539
540	for_each_possible_cpu(i) {
541#ifdef CONFIG_PPC64
542		cpu_nr = i;
543#else
544		cpu_nr = get_hard_smp_processor_id(i);
545#endif
546		memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
547		tp = critirq_ctx[cpu_nr];
548		tp->cpu = cpu_nr;
549		tp->preempt_count = 0;
550
551#ifdef CONFIG_BOOKE
552		memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
553		tp = dbgirq_ctx[cpu_nr];
554		tp->cpu = cpu_nr;
555		tp->preempt_count = 0;
556
557		memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
558		tp = mcheckirq_ctx[cpu_nr];
559		tp->cpu = cpu_nr;
560		tp->preempt_count = HARDIRQ_OFFSET;
561#endif
562	}
563}
564#endif
565
566struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
567struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
568
569void irq_ctx_init(void)
570{
571	struct thread_info *tp;
572	int i;
573
574	for_each_possible_cpu(i) {
575		memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
576		tp = softirq_ctx[i];
577		tp->cpu = i;
578		tp->preempt_count = 0;
579
580		memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
581		tp = hardirq_ctx[i];
582		tp->cpu = i;
583		tp->preempt_count = HARDIRQ_OFFSET;
584	}
585}
586
587static inline void do_softirq_onstack(void)
 
588{
589	struct thread_info *curtp, *irqtp;
590	unsigned long saved_sp_limit = current->thread.ksp_limit;
591
592	curtp = current_thread_info();
593	irqtp = softirq_ctx[smp_processor_id()];
594	irqtp->task = curtp->task;
595	irqtp->flags = 0;
596	current->thread.ksp_limit = (unsigned long)irqtp +
597				    _ALIGN_UP(sizeof(struct thread_info), 16);
598	call_do_softirq(irqtp);
599	current->thread.ksp_limit = saved_sp_limit;
600	irqtp->task = NULL;
601
602	/* Set any flag that may have been set on the
603	 * alternate stack
604	 */
605	if (irqtp->flags)
606		set_bits(irqtp->flags, &curtp->flags);
607}
608
609void do_softirq(void)
610{
611	unsigned long flags;
 
 
612
613	if (in_interrupt())
614		return;
615
616	local_irq_save(flags);
617
618	if (local_softirq_pending())
619		do_softirq_onstack();
620
621	local_irq_restore(flags);
622}
 
623
624irq_hw_number_t virq_to_hw(unsigned int virq)
625{
626	struct irq_data *irq_data = irq_get_irq_data(virq);
627	return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
628}
629EXPORT_SYMBOL_GPL(virq_to_hw);
630
631#ifdef CONFIG_SMP
632int irq_choose_cpu(const struct cpumask *mask)
633{
634	int cpuid;
635
636	if (cpumask_equal(mask, cpu_online_mask)) {
637		static int irq_rover;
638		static DEFINE_RAW_SPINLOCK(irq_rover_lock);
639		unsigned long flags;
640
641		/* Round-robin distribution... */
642do_round_robin:
643		raw_spin_lock_irqsave(&irq_rover_lock, flags);
644
645		irq_rover = cpumask_next(irq_rover, cpu_online_mask);
646		if (irq_rover >= nr_cpu_ids)
647			irq_rover = cpumask_first(cpu_online_mask);
648
649		cpuid = irq_rover;
650
651		raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
652	} else {
653		cpuid = cpumask_first_and(mask, cpu_online_mask);
654		if (cpuid >= nr_cpu_ids)
655			goto do_round_robin;
656	}
657
658	return get_hard_smp_processor_id(cpuid);
659}
660#else
661int irq_choose_cpu(const struct cpumask *mask)
662{
663	return hard_smp_processor_id();
664}
665#endif
666
667int arch_early_irq_init(void)
668{
669	return 0;
670}
671
672#ifdef CONFIG_PPC64
673static int __init setup_noirqdistrib(char *str)
674{
675	distribute_irqs = 0;
676	return 1;
677}
678
679__setup("noirqdistrib", setup_noirqdistrib);
680#endif /* CONFIG_PPC64 */