Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Code to handle x86 style IRQs plus some generic interrupt stuff.
4 *
5 * Copyright (C) 1992 Linus Torvalds
6 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
7 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
8 * Copyright (C) 1999-2000 Grant Grundler
9 * Copyright (c) 2005 Matthew Wilcox
10 */
11#include <linux/bitops.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/kernel_stat.h>
16#include <linux/seq_file.h>
17#include <linux/types.h>
18#include <asm/io.h>
19
20#include <asm/smp.h>
21#include <asm/ldcw.h>
22
23#undef PARISC_IRQ_CR16_COUNTS
24
25extern irqreturn_t timer_interrupt(int, void *);
26extern irqreturn_t ipi_interrupt(int, void *);
27
28#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
29
30/* Bits in EIEM correlate with cpu_irq_action[].
31** Numbered *Big Endian*! (ie bit 0 is MSB)
32*/
33static volatile unsigned long cpu_eiem = 0;
34
35/*
36** local ACK bitmap ... habitually set to 1, but reset to zero
37** between ->ack() and ->end() of the interrupt to prevent
38** re-interruption of a processing interrupt.
39*/
40static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
41
42static void cpu_mask_irq(struct irq_data *d)
43{
44 unsigned long eirr_bit = EIEM_MASK(d->irq);
45
46 cpu_eiem &= ~eirr_bit;
47 /* Do nothing on the other CPUs. If they get this interrupt,
48 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
49 * handle it, and the set_eiem() at the bottom will ensure it
50 * then gets disabled */
51}
52
53static void __cpu_unmask_irq(unsigned int irq)
54{
55 unsigned long eirr_bit = EIEM_MASK(irq);
56
57 cpu_eiem |= eirr_bit;
58
59 /* This is just a simple NOP IPI. But what it does is cause
60 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
61 * of the interrupt handler */
62 smp_send_all_nop();
63}
64
65static void cpu_unmask_irq(struct irq_data *d)
66{
67 __cpu_unmask_irq(d->irq);
68}
69
70void cpu_ack_irq(struct irq_data *d)
71{
72 unsigned long mask = EIEM_MASK(d->irq);
73 int cpu = smp_processor_id();
74
75 /* Clear in EIEM so we can no longer process */
76 per_cpu(local_ack_eiem, cpu) &= ~mask;
77
78 /* disable the interrupt */
79 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
80
81 /* and now ack it */
82 mtctl(mask, 23);
83}
84
85void cpu_eoi_irq(struct irq_data *d)
86{
87 unsigned long mask = EIEM_MASK(d->irq);
88 int cpu = smp_processor_id();
89
90 /* set it in the eiems---it's no longer in process */
91 per_cpu(local_ack_eiem, cpu) |= mask;
92
93 /* enable the interrupt */
94 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
95}
96
97#ifdef CONFIG_SMP
98int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
99{
100 int cpu_dest;
101
102 /* timer and ipi have to always be received on all CPUs */
103 if (irqd_is_per_cpu(d))
104 return -EINVAL;
105
106 /* whatever mask they set, we just allow one CPU */
107 cpu_dest = cpumask_next_and(d->irq & (num_online_cpus()-1),
108 dest, cpu_online_mask);
109 if (cpu_dest >= nr_cpu_ids)
110 cpu_dest = cpumask_first_and(dest, cpu_online_mask);
111
112 return cpu_dest;
113}
114
115static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
116 bool force)
117{
118 int cpu_dest;
119
120 cpu_dest = cpu_check_affinity(d, dest);
121 if (cpu_dest < 0)
122 return -1;
123
124 cpumask_copy(irq_data_get_affinity_mask(d), dest);
125
126 return 0;
127}
128#endif
129
130static struct irq_chip cpu_interrupt_type = {
131 .name = "CPU",
132 .irq_mask = cpu_mask_irq,
133 .irq_unmask = cpu_unmask_irq,
134 .irq_ack = cpu_ack_irq,
135 .irq_eoi = cpu_eoi_irq,
136#ifdef CONFIG_SMP
137 .irq_set_affinity = cpu_set_affinity_irq,
138#endif
139 /* XXX: Needs to be written. We managed without it so far, but
140 * we really ought to write it.
141 */
142 .irq_retrigger = NULL,
143};
144
145DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
146#define irq_stats(x) (&per_cpu(irq_stat, x))
147
148/*
149 * /proc/interrupts printing for arch specific interrupts
150 */
151int arch_show_interrupts(struct seq_file *p, int prec)
152{
153 int j;
154
155#ifdef CONFIG_DEBUG_STACKOVERFLOW
156 seq_printf(p, "%*s: ", prec, "STK");
157 for_each_online_cpu(j)
158 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
159 seq_puts(p, " Kernel stack usage\n");
160# ifdef CONFIG_IRQSTACKS
161 seq_printf(p, "%*s: ", prec, "IST");
162 for_each_online_cpu(j)
163 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
164 seq_puts(p, " Interrupt stack usage\n");
165# endif
166#endif
167#ifdef CONFIG_SMP
168 if (num_online_cpus() > 1) {
169 seq_printf(p, "%*s: ", prec, "RES");
170 for_each_online_cpu(j)
171 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
172 seq_puts(p, " Rescheduling interrupts\n");
173 seq_printf(p, "%*s: ", prec, "CAL");
174 for_each_online_cpu(j)
175 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
176 seq_puts(p, " Function call interrupts\n");
177 }
178#endif
179 seq_printf(p, "%*s: ", prec, "UAH");
180 for_each_online_cpu(j)
181 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
182 seq_puts(p, " Unaligned access handler traps\n");
183 seq_printf(p, "%*s: ", prec, "FPA");
184 for_each_online_cpu(j)
185 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
186 seq_puts(p, " Floating point assist traps\n");
187 seq_printf(p, "%*s: ", prec, "TLB");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
190 seq_puts(p, " TLB shootdowns\n");
191 return 0;
192}
193
194int show_interrupts(struct seq_file *p, void *v)
195{
196 int i = *(loff_t *) v, j;
197 unsigned long flags;
198
199 if (i == 0) {
200 seq_puts(p, " ");
201 for_each_online_cpu(j)
202 seq_printf(p, " CPU%d", j);
203
204#ifdef PARISC_IRQ_CR16_COUNTS
205 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
206#endif
207 seq_putc(p, '\n');
208 }
209
210 if (i < NR_IRQS) {
211 struct irq_desc *desc = irq_to_desc(i);
212 struct irqaction *action;
213
214 raw_spin_lock_irqsave(&desc->lock, flags);
215 action = desc->action;
216 if (!action)
217 goto skip;
218 seq_printf(p, "%3d: ", i);
219#ifdef CONFIG_SMP
220 for_each_online_cpu(j)
221 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
222#else
223 seq_printf(p, "%10u ", kstat_irqs(i));
224#endif
225
226 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
227#ifndef PARISC_IRQ_CR16_COUNTS
228 seq_printf(p, " %s", action->name);
229
230 while ((action = action->next))
231 seq_printf(p, ", %s", action->name);
232#else
233 for ( ;action; action = action->next) {
234 unsigned int k, avg, min, max;
235
236 min = max = action->cr16_hist[0];
237
238 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
239 int hist = action->cr16_hist[k];
240
241 if (hist) {
242 avg += hist;
243 } else
244 break;
245
246 if (hist > max) max = hist;
247 if (hist < min) min = hist;
248 }
249
250 avg /= k;
251 seq_printf(p, " %s[%d/%d/%d]", action->name,
252 min,avg,max);
253 }
254#endif
255
256 seq_putc(p, '\n');
257 skip:
258 raw_spin_unlock_irqrestore(&desc->lock, flags);
259 }
260
261 if (i == NR_IRQS)
262 arch_show_interrupts(p, 3);
263
264 return 0;
265}
266
267
268
269/*
270** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
271** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
272**
273** To use txn_XXX() interfaces, get a Virtual IRQ first.
274** Then use that to get the Transaction address and data.
275*/
276
277int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
278{
279 if (irq_has_action(irq))
280 return -EBUSY;
281 if (irq_get_chip(irq) != &cpu_interrupt_type)
282 return -EBUSY;
283
284 /* for iosapic interrupts */
285 if (type) {
286 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
287 irq_set_chip_data(irq, data);
288 __cpu_unmask_irq(irq);
289 }
290 return 0;
291}
292
293int txn_claim_irq(int irq)
294{
295 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
296}
297
298/*
299 * The bits_wide parameter accommodates the limitations of the HW/SW which
300 * use these bits:
301 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
302 * V-class (EPIC): 6 bits
303 * N/L/A-class (iosapic): 8 bits
304 * PCI 2.2 MSI: 16 bits
305 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
306 *
307 * On the service provider side:
308 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
309 * o PA 2.0 wide mode 6-bits (per processor)
310 * o IA64 8-bits (0-256 total)
311 *
312 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
313 * by the processor...and the N/L-class I/O subsystem supports more bits than
314 * PA2.0 has. The first case is the problem.
315 */
316int txn_alloc_irq(unsigned int bits_wide)
317{
318 int irq;
319
320 /* never return irq 0 cause that's the interval timer */
321 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
322 if (cpu_claim_irq(irq, NULL, NULL) < 0)
323 continue;
324 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
325 continue;
326 return irq;
327 }
328
329 /* unlikely, but be prepared */
330 return -1;
331}
332
333
334unsigned long txn_affinity_addr(unsigned int irq, int cpu)
335{
336#ifdef CONFIG_SMP
337 struct irq_data *d = irq_get_irq_data(irq);
338 cpumask_copy(irq_data_get_affinity_mask(d), cpumask_of(cpu));
339#endif
340
341 return per_cpu(cpu_data, cpu).txn_addr;
342}
343
344
345unsigned long txn_alloc_addr(unsigned int virt_irq)
346{
347 static int next_cpu = -1;
348
349 next_cpu++; /* assign to "next" CPU we want this bugger on */
350
351 /* validate entry */
352 while ((next_cpu < nr_cpu_ids) &&
353 (!per_cpu(cpu_data, next_cpu).txn_addr ||
354 !cpu_online(next_cpu)))
355 next_cpu++;
356
357 if (next_cpu >= nr_cpu_ids)
358 next_cpu = 0; /* nothing else, assign monarch */
359
360 return txn_affinity_addr(virt_irq, next_cpu);
361}
362
363
364unsigned int txn_alloc_data(unsigned int virt_irq)
365{
366 return virt_irq - CPU_IRQ_BASE;
367}
368
369static inline int eirr_to_irq(unsigned long eirr)
370{
371 int bit = fls_long(eirr);
372 return (BITS_PER_LONG - bit) + TIMER_IRQ;
373}
374
375#ifdef CONFIG_IRQSTACKS
376/*
377 * IRQ STACK - used for irq handler
378 */
379#define IRQ_STACK_SIZE (4096 << 3) /* 32k irq stack size */
380
381union irq_stack_union {
382 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
383 volatile unsigned int slock[4];
384 volatile unsigned int lock[1];
385};
386
387DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
388 .slock = { 1,1,1,1 },
389 };
390#endif
391
392
393int sysctl_panic_on_stackoverflow = 1;
394
395static inline void stack_overflow_check(struct pt_regs *regs)
396{
397#ifdef CONFIG_DEBUG_STACKOVERFLOW
398 #define STACK_MARGIN (256*6)
399
400 /* Our stack starts directly behind the thread_info struct. */
401 unsigned long stack_start = (unsigned long) current_thread_info();
402 unsigned long sp = regs->gr[30];
403 unsigned long stack_usage;
404 unsigned int *last_usage;
405 int cpu = smp_processor_id();
406
407 /* if sr7 != 0, we interrupted a userspace process which we do not want
408 * to check for stack overflow. We will only check the kernel stack. */
409 if (regs->sr[7])
410 return;
411
412 /* exit if already in panic */
413 if (sysctl_panic_on_stackoverflow < 0)
414 return;
415
416 /* calculate kernel stack usage */
417 stack_usage = sp - stack_start;
418#ifdef CONFIG_IRQSTACKS
419 if (likely(stack_usage <= THREAD_SIZE))
420 goto check_kernel_stack; /* found kernel stack */
421
422 /* check irq stack usage */
423 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
424 stack_usage = sp - stack_start;
425
426 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
427 if (unlikely(stack_usage > *last_usage))
428 *last_usage = stack_usage;
429
430 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
431 return;
432
433 pr_emerg("stackcheck: %s will most likely overflow irq stack "
434 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
435 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
436 goto panic_check;
437
438check_kernel_stack:
439#endif
440
441 /* check kernel stack usage */
442 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
443
444 if (unlikely(stack_usage > *last_usage))
445 *last_usage = stack_usage;
446
447 if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
448 return;
449
450 pr_emerg("stackcheck: %s will most likely overflow kernel stack "
451 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
452 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
453
454#ifdef CONFIG_IRQSTACKS
455panic_check:
456#endif
457 if (sysctl_panic_on_stackoverflow) {
458 sysctl_panic_on_stackoverflow = -1; /* disable further checks */
459 panic("low stack detected by irq handler - check messages\n");
460 }
461#endif
462}
463
464#ifdef CONFIG_IRQSTACKS
465/* in entry.S: */
466void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
467
468static void execute_on_irq_stack(void *func, unsigned long param1)
469{
470 union irq_stack_union *union_ptr;
471 unsigned long irq_stack;
472 volatile unsigned int *irq_stack_in_use;
473
474 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
475 irq_stack = (unsigned long) &union_ptr->stack;
476 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
477 64); /* align for stack frame usage */
478
479 /* We may be called recursive. If we are already using the irq stack,
480 * just continue to use it. Use spinlocks to serialize
481 * the irq stack usage.
482 */
483 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
484 if (!__ldcw(irq_stack_in_use)) {
485 void (*direct_call)(unsigned long p1) = func;
486
487 /* We are using the IRQ stack already.
488 * Do direct call on current stack. */
489 direct_call(param1);
490 return;
491 }
492
493 /* This is where we switch to the IRQ stack. */
494 call_on_stack(param1, func, irq_stack);
495
496 /* free up irq stack usage. */
497 *irq_stack_in_use = 1;
498}
499
500void do_softirq_own_stack(void)
501{
502 execute_on_irq_stack(__do_softirq, 0);
503}
504#endif /* CONFIG_IRQSTACKS */
505
506/* ONLY called from entry.S:intr_extint() */
507void do_cpu_irq_mask(struct pt_regs *regs)
508{
509 struct pt_regs *old_regs;
510 unsigned long eirr_val;
511 int irq, cpu = smp_processor_id();
512 struct irq_data *irq_data;
513#ifdef CONFIG_SMP
514 cpumask_t dest;
515#endif
516
517 old_regs = set_irq_regs(regs);
518 local_irq_disable();
519 irq_enter();
520
521 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
522 if (!eirr_val)
523 goto set_out;
524 irq = eirr_to_irq(eirr_val);
525
526 irq_data = irq_get_irq_data(irq);
527
528 /* Filter out spurious interrupts, mostly from serial port at bootup */
529 if (unlikely(!irq_desc_has_action(irq_data_to_desc(irq_data))))
530 goto set_out;
531
532#ifdef CONFIG_SMP
533 cpumask_copy(&dest, irq_data_get_affinity_mask(irq_data));
534 if (irqd_is_per_cpu(irq_data) &&
535 !cpumask_test_cpu(smp_processor_id(), &dest)) {
536 int cpu = cpumask_first(&dest);
537
538 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
539 irq, smp_processor_id(), cpu);
540 gsc_writel(irq + CPU_IRQ_BASE,
541 per_cpu(cpu_data, cpu).hpa);
542 goto set_out;
543 }
544#endif
545 stack_overflow_check(regs);
546
547#ifdef CONFIG_IRQSTACKS
548 execute_on_irq_stack(&generic_handle_irq, irq);
549#else
550 generic_handle_irq(irq);
551#endif /* CONFIG_IRQSTACKS */
552
553 out:
554 irq_exit();
555 set_irq_regs(old_regs);
556 return;
557
558 set_out:
559 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
560 goto out;
561}
562
563static struct irqaction timer_action = {
564 .handler = timer_interrupt,
565 .name = "timer",
566 .flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
567};
568
569#ifdef CONFIG_SMP
570static struct irqaction ipi_action = {
571 .handler = ipi_interrupt,
572 .name = "IPI",
573 .flags = IRQF_PERCPU,
574};
575#endif
576
577static void claim_cpu_irqs(void)
578{
579 int i;
580 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
581 irq_set_chip_and_handler(i, &cpu_interrupt_type,
582 handle_percpu_irq);
583 }
584
585 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
586 setup_irq(TIMER_IRQ, &timer_action);
587#ifdef CONFIG_SMP
588 irq_set_handler(IPI_IRQ, handle_percpu_irq);
589 setup_irq(IPI_IRQ, &ipi_action);
590#endif
591}
592
593void __init init_IRQ(void)
594{
595 local_irq_disable(); /* PARANOID - should already be disabled */
596 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
597#ifdef CONFIG_SMP
598 if (!cpu_eiem) {
599 claim_cpu_irqs();
600 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
601 }
602#else
603 claim_cpu_irqs();
604 cpu_eiem = EIEM_MASK(TIMER_IRQ);
605#endif
606 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
607}
1/*
2 * Code to handle x86 style IRQs plus some generic interrupt stuff.
3 *
4 * Copyright (C) 1992 Linus Torvalds
5 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
6 * Copyright (C) 1999 SuSE GmbH (Philipp Rumpf, prumpf@tux.org)
7 * Copyright (C) 1999-2000 Grant Grundler
8 * Copyright (c) 2005 Matthew Wilcox
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24#include <linux/bitops.h>
25#include <linux/errno.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h>
30#include <linux/spinlock.h>
31#include <linux/types.h>
32#include <asm/io.h>
33
34#include <asm/smp.h>
35
36#undef PARISC_IRQ_CR16_COUNTS
37
38extern irqreturn_t timer_interrupt(int, void *);
39extern irqreturn_t ipi_interrupt(int, void *);
40
41#define EIEM_MASK(irq) (1UL<<(CPU_IRQ_MAX - irq))
42
43/* Bits in EIEM correlate with cpu_irq_action[].
44** Numbered *Big Endian*! (ie bit 0 is MSB)
45*/
46static volatile unsigned long cpu_eiem = 0;
47
48/*
49** local ACK bitmap ... habitually set to 1, but reset to zero
50** between ->ack() and ->end() of the interrupt to prevent
51** re-interruption of a processing interrupt.
52*/
53static DEFINE_PER_CPU(unsigned long, local_ack_eiem) = ~0UL;
54
55static void cpu_mask_irq(struct irq_data *d)
56{
57 unsigned long eirr_bit = EIEM_MASK(d->irq);
58
59 cpu_eiem &= ~eirr_bit;
60 /* Do nothing on the other CPUs. If they get this interrupt,
61 * The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
62 * handle it, and the set_eiem() at the bottom will ensure it
63 * then gets disabled */
64}
65
66static void __cpu_unmask_irq(unsigned int irq)
67{
68 unsigned long eirr_bit = EIEM_MASK(irq);
69
70 cpu_eiem |= eirr_bit;
71
72 /* This is just a simple NOP IPI. But what it does is cause
73 * all the other CPUs to do a set_eiem(cpu_eiem) at the end
74 * of the interrupt handler */
75 smp_send_all_nop();
76}
77
78static void cpu_unmask_irq(struct irq_data *d)
79{
80 __cpu_unmask_irq(d->irq);
81}
82
83void cpu_ack_irq(struct irq_data *d)
84{
85 unsigned long mask = EIEM_MASK(d->irq);
86 int cpu = smp_processor_id();
87
88 /* Clear in EIEM so we can no longer process */
89 per_cpu(local_ack_eiem, cpu) &= ~mask;
90
91 /* disable the interrupt */
92 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
93
94 /* and now ack it */
95 mtctl(mask, 23);
96}
97
98void cpu_eoi_irq(struct irq_data *d)
99{
100 unsigned long mask = EIEM_MASK(d->irq);
101 int cpu = smp_processor_id();
102
103 /* set it in the eiems---it's no longer in process */
104 per_cpu(local_ack_eiem, cpu) |= mask;
105
106 /* enable the interrupt */
107 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
108}
109
110#ifdef CONFIG_SMP
111int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
112{
113 int cpu_dest;
114
115 /* timer and ipi have to always be received on all CPUs */
116 if (irqd_is_per_cpu(d))
117 return -EINVAL;
118
119 /* whatever mask they set, we just allow one CPU */
120 cpu_dest = first_cpu(*dest);
121
122 return cpu_dest;
123}
124
125static int cpu_set_affinity_irq(struct irq_data *d, const struct cpumask *dest,
126 bool force)
127{
128 int cpu_dest;
129
130 cpu_dest = cpu_check_affinity(d, dest);
131 if (cpu_dest < 0)
132 return -1;
133
134 cpumask_copy(d->affinity, dest);
135
136 return 0;
137}
138#endif
139
140static struct irq_chip cpu_interrupt_type = {
141 .name = "CPU",
142 .irq_mask = cpu_mask_irq,
143 .irq_unmask = cpu_unmask_irq,
144 .irq_ack = cpu_ack_irq,
145 .irq_eoi = cpu_eoi_irq,
146#ifdef CONFIG_SMP
147 .irq_set_affinity = cpu_set_affinity_irq,
148#endif
149 /* XXX: Needs to be written. We managed without it so far, but
150 * we really ought to write it.
151 */
152 .irq_retrigger = NULL,
153};
154
155int show_interrupts(struct seq_file *p, void *v)
156{
157 int i = *(loff_t *) v, j;
158 unsigned long flags;
159
160 if (i == 0) {
161 seq_puts(p, " ");
162 for_each_online_cpu(j)
163 seq_printf(p, " CPU%d", j);
164
165#ifdef PARISC_IRQ_CR16_COUNTS
166 seq_printf(p, " [min/avg/max] (CPU cycle counts)");
167#endif
168 seq_putc(p, '\n');
169 }
170
171 if (i < NR_IRQS) {
172 struct irq_desc *desc = irq_to_desc(i);
173 struct irqaction *action;
174
175 raw_spin_lock_irqsave(&desc->lock, flags);
176 action = desc->action;
177 if (!action)
178 goto skip;
179 seq_printf(p, "%3d: ", i);
180#ifdef CONFIG_SMP
181 for_each_online_cpu(j)
182 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
183#else
184 seq_printf(p, "%10u ", kstat_irqs(i));
185#endif
186
187 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
188#ifndef PARISC_IRQ_CR16_COUNTS
189 seq_printf(p, " %s", action->name);
190
191 while ((action = action->next))
192 seq_printf(p, ", %s", action->name);
193#else
194 for ( ;action; action = action->next) {
195 unsigned int k, avg, min, max;
196
197 min = max = action->cr16_hist[0];
198
199 for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) {
200 int hist = action->cr16_hist[k];
201
202 if (hist) {
203 avg += hist;
204 } else
205 break;
206
207 if (hist > max) max = hist;
208 if (hist < min) min = hist;
209 }
210
211 avg /= k;
212 seq_printf(p, " %s[%d/%d/%d]", action->name,
213 min,avg,max);
214 }
215#endif
216
217 seq_putc(p, '\n');
218 skip:
219 raw_spin_unlock_irqrestore(&desc->lock, flags);
220 }
221
222 return 0;
223}
224
225
226
227/*
228** The following form a "set": Virtual IRQ, Transaction Address, Trans Data.
229** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit.
230**
231** To use txn_XXX() interfaces, get a Virtual IRQ first.
232** Then use that to get the Transaction address and data.
233*/
234
235int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
236{
237 if (irq_has_action(irq))
238 return -EBUSY;
239 if (irq_get_chip(irq) != &cpu_interrupt_type)
240 return -EBUSY;
241
242 /* for iosapic interrupts */
243 if (type) {
244 irq_set_chip_and_handler(irq, type, handle_percpu_irq);
245 irq_set_chip_data(irq, data);
246 __cpu_unmask_irq(irq);
247 }
248 return 0;
249}
250
251int txn_claim_irq(int irq)
252{
253 return cpu_claim_irq(irq, NULL, NULL) ? -1 : irq;
254}
255
256/*
257 * The bits_wide parameter accommodates the limitations of the HW/SW which
258 * use these bits:
259 * Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register)
260 * V-class (EPIC): 6 bits
261 * N/L/A-class (iosapic): 8 bits
262 * PCI 2.2 MSI: 16 bits
263 * Some PCI devices: 32 bits (Symbios SCSI/ATM/HyperFabric)
264 *
265 * On the service provider side:
266 * o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register)
267 * o PA 2.0 wide mode 6-bits (per processor)
268 * o IA64 8-bits (0-256 total)
269 *
270 * So a Legacy PA I/O device on a PA 2.0 box can't use all the bits supported
271 * by the processor...and the N/L-class I/O subsystem supports more bits than
272 * PA2.0 has. The first case is the problem.
273 */
274int txn_alloc_irq(unsigned int bits_wide)
275{
276 int irq;
277
278 /* never return irq 0 cause that's the interval timer */
279 for (irq = CPU_IRQ_BASE + 1; irq <= CPU_IRQ_MAX; irq++) {
280 if (cpu_claim_irq(irq, NULL, NULL) < 0)
281 continue;
282 if ((irq - CPU_IRQ_BASE) >= (1 << bits_wide))
283 continue;
284 return irq;
285 }
286
287 /* unlikely, but be prepared */
288 return -1;
289}
290
291
292unsigned long txn_affinity_addr(unsigned int irq, int cpu)
293{
294#ifdef CONFIG_SMP
295 struct irq_data *d = irq_get_irq_data(irq);
296 cpumask_copy(d->affinity, cpumask_of(cpu));
297#endif
298
299 return per_cpu(cpu_data, cpu).txn_addr;
300}
301
302
303unsigned long txn_alloc_addr(unsigned int virt_irq)
304{
305 static int next_cpu = -1;
306
307 next_cpu++; /* assign to "next" CPU we want this bugger on */
308
309 /* validate entry */
310 while ((next_cpu < nr_cpu_ids) &&
311 (!per_cpu(cpu_data, next_cpu).txn_addr ||
312 !cpu_online(next_cpu)))
313 next_cpu++;
314
315 if (next_cpu >= nr_cpu_ids)
316 next_cpu = 0; /* nothing else, assign monarch */
317
318 return txn_affinity_addr(virt_irq, next_cpu);
319}
320
321
322unsigned int txn_alloc_data(unsigned int virt_irq)
323{
324 return virt_irq - CPU_IRQ_BASE;
325}
326
327static inline int eirr_to_irq(unsigned long eirr)
328{
329 int bit = fls_long(eirr);
330 return (BITS_PER_LONG - bit) + TIMER_IRQ;
331}
332
333/* ONLY called from entry.S:intr_extint() */
334void do_cpu_irq_mask(struct pt_regs *regs)
335{
336 struct pt_regs *old_regs;
337 unsigned long eirr_val;
338 int irq, cpu = smp_processor_id();
339#ifdef CONFIG_SMP
340 struct irq_desc *desc;
341 cpumask_t dest;
342#endif
343
344 old_regs = set_irq_regs(regs);
345 local_irq_disable();
346 irq_enter();
347
348 eirr_val = mfctl(23) & cpu_eiem & per_cpu(local_ack_eiem, cpu);
349 if (!eirr_val)
350 goto set_out;
351 irq = eirr_to_irq(eirr_val);
352
353#ifdef CONFIG_SMP
354 desc = irq_to_desc(irq);
355 cpumask_copy(&dest, desc->irq_data.affinity);
356 if (irqd_is_per_cpu(&desc->irq_data) &&
357 !cpu_isset(smp_processor_id(), dest)) {
358 int cpu = first_cpu(dest);
359
360 printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
361 irq, smp_processor_id(), cpu);
362 gsc_writel(irq + CPU_IRQ_BASE,
363 per_cpu(cpu_data, cpu).hpa);
364 goto set_out;
365 }
366#endif
367 generic_handle_irq(irq);
368
369 out:
370 irq_exit();
371 set_irq_regs(old_regs);
372 return;
373
374 set_out:
375 set_eiem(cpu_eiem & per_cpu(local_ack_eiem, cpu));
376 goto out;
377}
378
379static struct irqaction timer_action = {
380 .handler = timer_interrupt,
381 .name = "timer",
382 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
383};
384
385#ifdef CONFIG_SMP
386static struct irqaction ipi_action = {
387 .handler = ipi_interrupt,
388 .name = "IPI",
389 .flags = IRQF_DISABLED | IRQF_PERCPU,
390};
391#endif
392
393static void claim_cpu_irqs(void)
394{
395 int i;
396 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
397 irq_set_chip_and_handler(i, &cpu_interrupt_type,
398 handle_percpu_irq);
399 }
400
401 irq_set_handler(TIMER_IRQ, handle_percpu_irq);
402 setup_irq(TIMER_IRQ, &timer_action);
403#ifdef CONFIG_SMP
404 irq_set_handler(IPI_IRQ, handle_percpu_irq);
405 setup_irq(IPI_IRQ, &ipi_action);
406#endif
407}
408
409void __init init_IRQ(void)
410{
411 local_irq_disable(); /* PARANOID - should already be disabled */
412 mtctl(~0UL, 23); /* EIRR : clear all pending external intr */
413 claim_cpu_irqs();
414#ifdef CONFIG_SMP
415 if (!cpu_eiem)
416 cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ);
417#else
418 cpu_eiem = EIEM_MASK(TIMER_IRQ);
419#endif
420 set_eiem(cpu_eiem); /* EIEM : enable all external intr */
421
422}
423