Loading...
1/*
2 * linux/arch/ia64/kernel/irq_ia64.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
17#include <linux/module.h>
18
19#include <linux/jiffies.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kernel_stat.h>
25#include <linux/ptrace.h>
26#include <linux/random.h> /* for rand_initialize_irq() */
27#include <linux/signal.h>
28#include <linux/smp.h>
29#include <linux/threads.h>
30#include <linux/bitops.h>
31#include <linux/irq.h>
32#include <linux/ratelimit.h>
33#include <linux/acpi.h>
34#include <linux/sched.h>
35
36#include <asm/delay.h>
37#include <asm/intrinsics.h>
38#include <asm/io.h>
39#include <asm/hw_irq.h>
40#include <asm/machvec.h>
41#include <asm/pgtable.h>
42#include <asm/system.h>
43#include <asm/tlbflush.h>
44
45#ifdef CONFIG_PERFMON
46# include <asm/perfmon.h>
47#endif
48
49#define IRQ_DEBUG 0
50
51#define IRQ_VECTOR_UNASSIGNED (0)
52
53#define IRQ_UNUSED (0)
54#define IRQ_USED (1)
55#define IRQ_RSVD (2)
56
57/* These can be overridden in platform_irq_init */
58int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
59int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
60
61/* default base addr of IPI table */
62void __iomem *ipi_base_addr = ((void __iomem *)
63 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
64
65static cpumask_t vector_allocation_domain(int cpu);
66
67/*
68 * Legacy IRQ to IA-64 vector translation table.
69 */
70__u8 isa_irq_to_vector_map[16] = {
71 /* 8259 IRQ translation, first 16 entries */
72 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
73 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
74};
75EXPORT_SYMBOL(isa_irq_to_vector_map);
76
77DEFINE_SPINLOCK(vector_lock);
78
79struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
80 [0 ... NR_IRQS - 1] = {
81 .vector = IRQ_VECTOR_UNASSIGNED,
82 .domain = CPU_MASK_NONE
83 }
84};
85
86DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
87 [0 ... IA64_NUM_VECTORS - 1] = -1
88};
89
90static cpumask_t vector_table[IA64_NUM_VECTORS] = {
91 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
92};
93
94static int irq_status[NR_IRQS] = {
95 [0 ... NR_IRQS -1] = IRQ_UNUSED
96};
97
98int check_irq_used(int irq)
99{
100 if (irq_status[irq] == IRQ_USED)
101 return 1;
102
103 return -1;
104}
105
106static inline int find_unassigned_irq(void)
107{
108 int irq;
109
110 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
111 if (irq_status[irq] == IRQ_UNUSED)
112 return irq;
113 return -ENOSPC;
114}
115
116static inline int find_unassigned_vector(cpumask_t domain)
117{
118 cpumask_t mask;
119 int pos, vector;
120
121 cpus_and(mask, domain, cpu_online_map);
122 if (cpus_empty(mask))
123 return -EINVAL;
124
125 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
126 vector = IA64_FIRST_DEVICE_VECTOR + pos;
127 cpus_and(mask, domain, vector_table[vector]);
128 if (!cpus_empty(mask))
129 continue;
130 return vector;
131 }
132 return -ENOSPC;
133}
134
135static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
136{
137 cpumask_t mask;
138 int cpu;
139 struct irq_cfg *cfg = &irq_cfg[irq];
140
141 BUG_ON((unsigned)irq >= NR_IRQS);
142 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
143
144 cpus_and(mask, domain, cpu_online_map);
145 if (cpus_empty(mask))
146 return -EINVAL;
147 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
148 return 0;
149 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
150 return -EBUSY;
151 for_each_cpu_mask(cpu, mask)
152 per_cpu(vector_irq, cpu)[vector] = irq;
153 cfg->vector = vector;
154 cfg->domain = domain;
155 irq_status[irq] = IRQ_USED;
156 cpus_or(vector_table[vector], vector_table[vector], domain);
157 return 0;
158}
159
160int bind_irq_vector(int irq, int vector, cpumask_t domain)
161{
162 unsigned long flags;
163 int ret;
164
165 spin_lock_irqsave(&vector_lock, flags);
166 ret = __bind_irq_vector(irq, vector, domain);
167 spin_unlock_irqrestore(&vector_lock, flags);
168 return ret;
169}
170
171static void __clear_irq_vector(int irq)
172{
173 int vector, cpu;
174 cpumask_t mask;
175 cpumask_t domain;
176 struct irq_cfg *cfg = &irq_cfg[irq];
177
178 BUG_ON((unsigned)irq >= NR_IRQS);
179 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
180 vector = cfg->vector;
181 domain = cfg->domain;
182 cpus_and(mask, cfg->domain, cpu_online_map);
183 for_each_cpu_mask(cpu, mask)
184 per_cpu(vector_irq, cpu)[vector] = -1;
185 cfg->vector = IRQ_VECTOR_UNASSIGNED;
186 cfg->domain = CPU_MASK_NONE;
187 irq_status[irq] = IRQ_UNUSED;
188 cpus_andnot(vector_table[vector], vector_table[vector], domain);
189}
190
191static void clear_irq_vector(int irq)
192{
193 unsigned long flags;
194
195 spin_lock_irqsave(&vector_lock, flags);
196 __clear_irq_vector(irq);
197 spin_unlock_irqrestore(&vector_lock, flags);
198}
199
200int
201ia64_native_assign_irq_vector (int irq)
202{
203 unsigned long flags;
204 int vector, cpu;
205 cpumask_t domain = CPU_MASK_NONE;
206
207 vector = -ENOSPC;
208
209 spin_lock_irqsave(&vector_lock, flags);
210 for_each_online_cpu(cpu) {
211 domain = vector_allocation_domain(cpu);
212 vector = find_unassigned_vector(domain);
213 if (vector >= 0)
214 break;
215 }
216 if (vector < 0)
217 goto out;
218 if (irq == AUTO_ASSIGN)
219 irq = vector;
220 BUG_ON(__bind_irq_vector(irq, vector, domain));
221 out:
222 spin_unlock_irqrestore(&vector_lock, flags);
223 return vector;
224}
225
226void
227ia64_native_free_irq_vector (int vector)
228{
229 if (vector < IA64_FIRST_DEVICE_VECTOR ||
230 vector > IA64_LAST_DEVICE_VECTOR)
231 return;
232 clear_irq_vector(vector);
233}
234
235int
236reserve_irq_vector (int vector)
237{
238 if (vector < IA64_FIRST_DEVICE_VECTOR ||
239 vector > IA64_LAST_DEVICE_VECTOR)
240 return -EINVAL;
241 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
242}
243
244/*
245 * Initialize vector_irq on a new cpu. This function must be called
246 * with vector_lock held.
247 */
248void __setup_vector_irq(int cpu)
249{
250 int irq, vector;
251
252 /* Clear vector_irq */
253 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
254 per_cpu(vector_irq, cpu)[vector] = -1;
255 /* Mark the inuse vectors */
256 for (irq = 0; irq < NR_IRQS; ++irq) {
257 if (!cpu_isset(cpu, irq_cfg[irq].domain))
258 continue;
259 vector = irq_to_vector(irq);
260 per_cpu(vector_irq, cpu)[vector] = irq;
261 }
262}
263
264#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
265
266static enum vector_domain_type {
267 VECTOR_DOMAIN_NONE,
268 VECTOR_DOMAIN_PERCPU
269} vector_domain_type = VECTOR_DOMAIN_NONE;
270
271static cpumask_t vector_allocation_domain(int cpu)
272{
273 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
274 return cpumask_of_cpu(cpu);
275 return CPU_MASK_ALL;
276}
277
278static int __irq_prepare_move(int irq, int cpu)
279{
280 struct irq_cfg *cfg = &irq_cfg[irq];
281 int vector;
282 cpumask_t domain;
283
284 if (cfg->move_in_progress || cfg->move_cleanup_count)
285 return -EBUSY;
286 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
287 return -EINVAL;
288 if (cpu_isset(cpu, cfg->domain))
289 return 0;
290 domain = vector_allocation_domain(cpu);
291 vector = find_unassigned_vector(domain);
292 if (vector < 0)
293 return -ENOSPC;
294 cfg->move_in_progress = 1;
295 cfg->old_domain = cfg->domain;
296 cfg->vector = IRQ_VECTOR_UNASSIGNED;
297 cfg->domain = CPU_MASK_NONE;
298 BUG_ON(__bind_irq_vector(irq, vector, domain));
299 return 0;
300}
301
302int irq_prepare_move(int irq, int cpu)
303{
304 unsigned long flags;
305 int ret;
306
307 spin_lock_irqsave(&vector_lock, flags);
308 ret = __irq_prepare_move(irq, cpu);
309 spin_unlock_irqrestore(&vector_lock, flags);
310 return ret;
311}
312
313void irq_complete_move(unsigned irq)
314{
315 struct irq_cfg *cfg = &irq_cfg[irq];
316 cpumask_t cleanup_mask;
317 int i;
318
319 if (likely(!cfg->move_in_progress))
320 return;
321
322 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
323 return;
324
325 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
326 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
327 for_each_cpu_mask(i, cleanup_mask)
328 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
329 cfg->move_in_progress = 0;
330}
331
332static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
333{
334 int me = smp_processor_id();
335 ia64_vector vector;
336 unsigned long flags;
337
338 for (vector = IA64_FIRST_DEVICE_VECTOR;
339 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
340 int irq;
341 struct irq_desc *desc;
342 struct irq_cfg *cfg;
343 irq = __get_cpu_var(vector_irq)[vector];
344 if (irq < 0)
345 continue;
346
347 desc = irq_to_desc(irq);
348 cfg = irq_cfg + irq;
349 raw_spin_lock(&desc->lock);
350 if (!cfg->move_cleanup_count)
351 goto unlock;
352
353 if (!cpu_isset(me, cfg->old_domain))
354 goto unlock;
355
356 spin_lock_irqsave(&vector_lock, flags);
357 __get_cpu_var(vector_irq)[vector] = -1;
358 cpu_clear(me, vector_table[vector]);
359 spin_unlock_irqrestore(&vector_lock, flags);
360 cfg->move_cleanup_count--;
361 unlock:
362 raw_spin_unlock(&desc->lock);
363 }
364 return IRQ_HANDLED;
365}
366
367static struct irqaction irq_move_irqaction = {
368 .handler = smp_irq_move_cleanup_interrupt,
369 .flags = IRQF_DISABLED,
370 .name = "irq_move"
371};
372
373static int __init parse_vector_domain(char *arg)
374{
375 if (!arg)
376 return -EINVAL;
377 if (!strcmp(arg, "percpu")) {
378 vector_domain_type = VECTOR_DOMAIN_PERCPU;
379 no_int_routing = 1;
380 }
381 return 0;
382}
383early_param("vector", parse_vector_domain);
384#else
385static cpumask_t vector_allocation_domain(int cpu)
386{
387 return CPU_MASK_ALL;
388}
389#endif
390
391
392void destroy_and_reserve_irq(unsigned int irq)
393{
394 unsigned long flags;
395
396 dynamic_irq_cleanup(irq);
397
398 spin_lock_irqsave(&vector_lock, flags);
399 __clear_irq_vector(irq);
400 irq_status[irq] = IRQ_RSVD;
401 spin_unlock_irqrestore(&vector_lock, flags);
402}
403
404/*
405 * Dynamic irq allocate and deallocation for MSI
406 */
407int create_irq(void)
408{
409 unsigned long flags;
410 int irq, vector, cpu;
411 cpumask_t domain = CPU_MASK_NONE;
412
413 irq = vector = -ENOSPC;
414 spin_lock_irqsave(&vector_lock, flags);
415 for_each_online_cpu(cpu) {
416 domain = vector_allocation_domain(cpu);
417 vector = find_unassigned_vector(domain);
418 if (vector >= 0)
419 break;
420 }
421 if (vector < 0)
422 goto out;
423 irq = find_unassigned_irq();
424 if (irq < 0)
425 goto out;
426 BUG_ON(__bind_irq_vector(irq, vector, domain));
427 out:
428 spin_unlock_irqrestore(&vector_lock, flags);
429 if (irq >= 0)
430 dynamic_irq_init(irq);
431 return irq;
432}
433
434void destroy_irq(unsigned int irq)
435{
436 dynamic_irq_cleanup(irq);
437 clear_irq_vector(irq);
438}
439
440#ifdef CONFIG_SMP
441# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
442# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
443#else
444# define IS_RESCHEDULE(vec) (0)
445# define IS_LOCAL_TLB_FLUSH(vec) (0)
446#endif
447/*
448 * That's where the IVT branches when we get an external
449 * interrupt. This branches to the correct hardware IRQ handler via
450 * function ptr.
451 */
452void
453ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
454{
455 struct pt_regs *old_regs = set_irq_regs(regs);
456 unsigned long saved_tpr;
457
458#if IRQ_DEBUG
459 {
460 unsigned long bsp, sp;
461
462 /*
463 * Note: if the interrupt happened while executing in
464 * the context switch routine (ia64_switch_to), we may
465 * get a spurious stack overflow here. This is
466 * because the register and the memory stack are not
467 * switched atomically.
468 */
469 bsp = ia64_getreg(_IA64_REG_AR_BSP);
470 sp = ia64_getreg(_IA64_REG_SP);
471
472 if ((sp - bsp) < 1024) {
473 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
474
475 if (__ratelimit(&ratelimit)) {
476 printk("ia64_handle_irq: DANGER: less than "
477 "1KB of free stack space!!\n"
478 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
479 }
480 }
481 }
482#endif /* IRQ_DEBUG */
483
484 /*
485 * Always set TPR to limit maximum interrupt nesting depth to
486 * 16 (without this, it would be ~240, which could easily lead
487 * to kernel stack overflows).
488 */
489 irq_enter();
490 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
491 ia64_srlz_d();
492 while (vector != IA64_SPURIOUS_INT_VECTOR) {
493 int irq = local_vector_to_irq(vector);
494 struct irq_desc *desc = irq_to_desc(irq);
495
496 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
497 smp_local_flush_tlb();
498 kstat_incr_irqs_this_cpu(irq, desc);
499 } else if (unlikely(IS_RESCHEDULE(vector))) {
500 scheduler_ipi();
501 kstat_incr_irqs_this_cpu(irq, desc);
502 } else {
503 ia64_setreg(_IA64_REG_CR_TPR, vector);
504 ia64_srlz_d();
505
506 if (unlikely(irq < 0)) {
507 printk(KERN_ERR "%s: Unexpected interrupt "
508 "vector %d on CPU %d is not mapped "
509 "to any IRQ!\n", __func__, vector,
510 smp_processor_id());
511 } else
512 generic_handle_irq(irq);
513
514 /*
515 * Disable interrupts and send EOI:
516 */
517 local_irq_disable();
518 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
519 }
520 ia64_eoi();
521 vector = ia64_get_ivr();
522 }
523 /*
524 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
525 * handler needs to be able to wait for further keyboard interrupts, which can't
526 * come through until ia64_eoi() has been done.
527 */
528 irq_exit();
529 set_irq_regs(old_regs);
530}
531
532#ifdef CONFIG_HOTPLUG_CPU
533/*
534 * This function emulates a interrupt processing when a cpu is about to be
535 * brought down.
536 */
537void ia64_process_pending_intr(void)
538{
539 ia64_vector vector;
540 unsigned long saved_tpr;
541 extern unsigned int vectors_in_migration[NR_IRQS];
542
543 vector = ia64_get_ivr();
544
545 irq_enter();
546 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
547 ia64_srlz_d();
548
549 /*
550 * Perform normal interrupt style processing
551 */
552 while (vector != IA64_SPURIOUS_INT_VECTOR) {
553 int irq = local_vector_to_irq(vector);
554 struct irq_desc *desc = irq_to_desc(irq);
555
556 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
557 smp_local_flush_tlb();
558 kstat_incr_irqs_this_cpu(irq, desc);
559 } else if (unlikely(IS_RESCHEDULE(vector))) {
560 kstat_incr_irqs_this_cpu(irq, desc);
561 } else {
562 struct pt_regs *old_regs = set_irq_regs(NULL);
563
564 ia64_setreg(_IA64_REG_CR_TPR, vector);
565 ia64_srlz_d();
566
567 /*
568 * Now try calling normal ia64_handle_irq as it would have got called
569 * from a real intr handler. Try passing null for pt_regs, hopefully
570 * it will work. I hope it works!.
571 * Probably could shared code.
572 */
573 if (unlikely(irq < 0)) {
574 printk(KERN_ERR "%s: Unexpected interrupt "
575 "vector %d on CPU %d not being mapped "
576 "to any IRQ!!\n", __func__, vector,
577 smp_processor_id());
578 } else {
579 vectors_in_migration[irq]=0;
580 generic_handle_irq(irq);
581 }
582 set_irq_regs(old_regs);
583
584 /*
585 * Disable interrupts and send EOI
586 */
587 local_irq_disable();
588 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
589 }
590 ia64_eoi();
591 vector = ia64_get_ivr();
592 }
593 irq_exit();
594}
595#endif
596
597
598#ifdef CONFIG_SMP
599
600static irqreturn_t dummy_handler (int irq, void *dev_id)
601{
602 BUG();
603}
604
605static struct irqaction ipi_irqaction = {
606 .handler = handle_IPI,
607 .flags = IRQF_DISABLED,
608 .name = "IPI"
609};
610
611/*
612 * KVM uses this interrupt to force a cpu out of guest mode
613 */
614static struct irqaction resched_irqaction = {
615 .handler = dummy_handler,
616 .flags = IRQF_DISABLED,
617 .name = "resched"
618};
619
620static struct irqaction tlb_irqaction = {
621 .handler = dummy_handler,
622 .flags = IRQF_DISABLED,
623 .name = "tlb_flush"
624};
625
626#endif
627
628void
629ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
630{
631 unsigned int irq;
632
633 irq = vec;
634 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
635 irq_set_status_flags(irq, IRQ_PER_CPU);
636 irq_set_chip(irq, &irq_type_ia64_lsapic);
637 if (action)
638 setup_irq(irq, action);
639 irq_set_handler(irq, handle_percpu_irq);
640}
641
642void __init
643ia64_native_register_ipi(void)
644{
645#ifdef CONFIG_SMP
646 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
647 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
648 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
649#endif
650}
651
652void __init
653init_IRQ (void)
654{
655#ifdef CONFIG_ACPI
656 acpi_boot_init();
657#endif
658 ia64_register_ipi();
659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660#ifdef CONFIG_SMP
661#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
662 if (vector_domain_type != VECTOR_DOMAIN_NONE)
663 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
664#endif
665#endif
666#ifdef CONFIG_PERFMON
667 pfm_init_percpu();
668#endif
669 platform_irq_init();
670}
671
672void
673ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
674{
675 void __iomem *ipi_addr;
676 unsigned long ipi_data;
677 unsigned long phys_cpu_id;
678
679 phys_cpu_id = cpu_physical_id(cpu);
680
681 /*
682 * cpu number is in 8bit ID and 8bit EID
683 */
684
685 ipi_data = (delivery_mode << 8) | (vector & 0xff);
686 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
687
688 writeq(ipi_data, ipi_addr);
689}
1/*
2 * linux/arch/ia64/kernel/irq_ia64.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
17#include <linux/module.h>
18
19#include <linux/jiffies.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kernel_stat.h>
25#include <linux/ptrace.h>
26#include <linux/signal.h>
27#include <linux/smp.h>
28#include <linux/threads.h>
29#include <linux/bitops.h>
30#include <linux/irq.h>
31#include <linux/ratelimit.h>
32#include <linux/acpi.h>
33#include <linux/sched.h>
34
35#include <asm/delay.h>
36#include <asm/intrinsics.h>
37#include <asm/io.h>
38#include <asm/hw_irq.h>
39#include <asm/machvec.h>
40#include <asm/pgtable.h>
41#include <asm/tlbflush.h>
42
43#ifdef CONFIG_PERFMON
44# include <asm/perfmon.h>
45#endif
46
47#define IRQ_DEBUG 0
48
49#define IRQ_VECTOR_UNASSIGNED (0)
50
51#define IRQ_UNUSED (0)
52#define IRQ_USED (1)
53#define IRQ_RSVD (2)
54
55/* These can be overridden in platform_irq_init */
56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
58
59/* default base addr of IPI table */
60void __iomem *ipi_base_addr = ((void __iomem *)
61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62
63static cpumask_t vector_allocation_domain(int cpu);
64
65/*
66 * Legacy IRQ to IA-64 vector translation table.
67 */
68__u8 isa_irq_to_vector_map[16] = {
69 /* 8259 IRQ translation, first 16 entries */
70 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
71 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
72};
73EXPORT_SYMBOL(isa_irq_to_vector_map);
74
75DEFINE_SPINLOCK(vector_lock);
76
77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
82};
83
84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = -1
86};
87
88static cpumask_t vector_table[IA64_NUM_VECTORS] = {
89 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
90};
91
92static int irq_status[NR_IRQS] = {
93 [0 ... NR_IRQS -1] = IRQ_UNUSED
94};
95
96int check_irq_used(int irq)
97{
98 if (irq_status[irq] == IRQ_USED)
99 return 1;
100
101 return -1;
102}
103
104static inline int find_unassigned_irq(void)
105{
106 int irq;
107
108 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
109 if (irq_status[irq] == IRQ_UNUSED)
110 return irq;
111 return -ENOSPC;
112}
113
114static inline int find_unassigned_vector(cpumask_t domain)
115{
116 cpumask_t mask;
117 int pos, vector;
118
119 cpumask_and(&mask, &domain, cpu_online_mask);
120 if (cpus_empty(mask))
121 return -EINVAL;
122
123 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
124 vector = IA64_FIRST_DEVICE_VECTOR + pos;
125 cpus_and(mask, domain, vector_table[vector]);
126 if (!cpus_empty(mask))
127 continue;
128 return vector;
129 }
130 return -ENOSPC;
131}
132
133static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
134{
135 cpumask_t mask;
136 int cpu;
137 struct irq_cfg *cfg = &irq_cfg[irq];
138
139 BUG_ON((unsigned)irq >= NR_IRQS);
140 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
141
142 cpumask_and(&mask, &domain, cpu_online_mask);
143 if (cpus_empty(mask))
144 return -EINVAL;
145 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
146 return 0;
147 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
148 return -EBUSY;
149 for_each_cpu_mask(cpu, mask)
150 per_cpu(vector_irq, cpu)[vector] = irq;
151 cfg->vector = vector;
152 cfg->domain = domain;
153 irq_status[irq] = IRQ_USED;
154 cpus_or(vector_table[vector], vector_table[vector], domain);
155 return 0;
156}
157
158int bind_irq_vector(int irq, int vector, cpumask_t domain)
159{
160 unsigned long flags;
161 int ret;
162
163 spin_lock_irqsave(&vector_lock, flags);
164 ret = __bind_irq_vector(irq, vector, domain);
165 spin_unlock_irqrestore(&vector_lock, flags);
166 return ret;
167}
168
169static void __clear_irq_vector(int irq)
170{
171 int vector, cpu;
172 cpumask_t mask;
173 cpumask_t domain;
174 struct irq_cfg *cfg = &irq_cfg[irq];
175
176 BUG_ON((unsigned)irq >= NR_IRQS);
177 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
178 vector = cfg->vector;
179 domain = cfg->domain;
180 cpumask_and(&mask, &cfg->domain, cpu_online_mask);
181 for_each_cpu_mask(cpu, mask)
182 per_cpu(vector_irq, cpu)[vector] = -1;
183 cfg->vector = IRQ_VECTOR_UNASSIGNED;
184 cfg->domain = CPU_MASK_NONE;
185 irq_status[irq] = IRQ_UNUSED;
186 cpus_andnot(vector_table[vector], vector_table[vector], domain);
187}
188
189static void clear_irq_vector(int irq)
190{
191 unsigned long flags;
192
193 spin_lock_irqsave(&vector_lock, flags);
194 __clear_irq_vector(irq);
195 spin_unlock_irqrestore(&vector_lock, flags);
196}
197
198int
199ia64_native_assign_irq_vector (int irq)
200{
201 unsigned long flags;
202 int vector, cpu;
203 cpumask_t domain = CPU_MASK_NONE;
204
205 vector = -ENOSPC;
206
207 spin_lock_irqsave(&vector_lock, flags);
208 for_each_online_cpu(cpu) {
209 domain = vector_allocation_domain(cpu);
210 vector = find_unassigned_vector(domain);
211 if (vector >= 0)
212 break;
213 }
214 if (vector < 0)
215 goto out;
216 if (irq == AUTO_ASSIGN)
217 irq = vector;
218 BUG_ON(__bind_irq_vector(irq, vector, domain));
219 out:
220 spin_unlock_irqrestore(&vector_lock, flags);
221 return vector;
222}
223
224void
225ia64_native_free_irq_vector (int vector)
226{
227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR)
229 return;
230 clear_irq_vector(vector);
231}
232
233int
234reserve_irq_vector (int vector)
235{
236 if (vector < IA64_FIRST_DEVICE_VECTOR ||
237 vector > IA64_LAST_DEVICE_VECTOR)
238 return -EINVAL;
239 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
240}
241
242/*
243 * Initialize vector_irq on a new cpu. This function must be called
244 * with vector_lock held.
245 */
246void __setup_vector_irq(int cpu)
247{
248 int irq, vector;
249
250 /* Clear vector_irq */
251 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
252 per_cpu(vector_irq, cpu)[vector] = -1;
253 /* Mark the inuse vectors */
254 for (irq = 0; irq < NR_IRQS; ++irq) {
255 if (!cpu_isset(cpu, irq_cfg[irq].domain))
256 continue;
257 vector = irq_to_vector(irq);
258 per_cpu(vector_irq, cpu)[vector] = irq;
259 }
260}
261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263
264static enum vector_domain_type {
265 VECTOR_DOMAIN_NONE,
266 VECTOR_DOMAIN_PERCPU
267} vector_domain_type = VECTOR_DOMAIN_NONE;
268
269static cpumask_t vector_allocation_domain(int cpu)
270{
271 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
272 return cpumask_of_cpu(cpu);
273 return CPU_MASK_ALL;
274}
275
276static int __irq_prepare_move(int irq, int cpu)
277{
278 struct irq_cfg *cfg = &irq_cfg[irq];
279 int vector;
280 cpumask_t domain;
281
282 if (cfg->move_in_progress || cfg->move_cleanup_count)
283 return -EBUSY;
284 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
285 return -EINVAL;
286 if (cpu_isset(cpu, cfg->domain))
287 return 0;
288 domain = vector_allocation_domain(cpu);
289 vector = find_unassigned_vector(domain);
290 if (vector < 0)
291 return -ENOSPC;
292 cfg->move_in_progress = 1;
293 cfg->old_domain = cfg->domain;
294 cfg->vector = IRQ_VECTOR_UNASSIGNED;
295 cfg->domain = CPU_MASK_NONE;
296 BUG_ON(__bind_irq_vector(irq, vector, domain));
297 return 0;
298}
299
300int irq_prepare_move(int irq, int cpu)
301{
302 unsigned long flags;
303 int ret;
304
305 spin_lock_irqsave(&vector_lock, flags);
306 ret = __irq_prepare_move(irq, cpu);
307 spin_unlock_irqrestore(&vector_lock, flags);
308 return ret;
309}
310
311void irq_complete_move(unsigned irq)
312{
313 struct irq_cfg *cfg = &irq_cfg[irq];
314 cpumask_t cleanup_mask;
315 int i;
316
317 if (likely(!cfg->move_in_progress))
318 return;
319
320 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
321 return;
322
323 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
324 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
325 for_each_cpu_mask(i, cleanup_mask)
326 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
327 cfg->move_in_progress = 0;
328}
329
330static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
331{
332 int me = smp_processor_id();
333 ia64_vector vector;
334 unsigned long flags;
335
336 for (vector = IA64_FIRST_DEVICE_VECTOR;
337 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
338 int irq;
339 struct irq_desc *desc;
340 struct irq_cfg *cfg;
341 irq = __get_cpu_var(vector_irq)[vector];
342 if (irq < 0)
343 continue;
344
345 desc = irq_to_desc(irq);
346 cfg = irq_cfg + irq;
347 raw_spin_lock(&desc->lock);
348 if (!cfg->move_cleanup_count)
349 goto unlock;
350
351 if (!cpu_isset(me, cfg->old_domain))
352 goto unlock;
353
354 spin_lock_irqsave(&vector_lock, flags);
355 __get_cpu_var(vector_irq)[vector] = -1;
356 cpu_clear(me, vector_table[vector]);
357 spin_unlock_irqrestore(&vector_lock, flags);
358 cfg->move_cleanup_count--;
359 unlock:
360 raw_spin_unlock(&desc->lock);
361 }
362 return IRQ_HANDLED;
363}
364
365static struct irqaction irq_move_irqaction = {
366 .handler = smp_irq_move_cleanup_interrupt,
367 .name = "irq_move"
368};
369
370static int __init parse_vector_domain(char *arg)
371{
372 if (!arg)
373 return -EINVAL;
374 if (!strcmp(arg, "percpu")) {
375 vector_domain_type = VECTOR_DOMAIN_PERCPU;
376 no_int_routing = 1;
377 }
378 return 0;
379}
380early_param("vector", parse_vector_domain);
381#else
382static cpumask_t vector_allocation_domain(int cpu)
383{
384 return CPU_MASK_ALL;
385}
386#endif
387
388
389void destroy_and_reserve_irq(unsigned int irq)
390{
391 unsigned long flags;
392
393 dynamic_irq_cleanup(irq);
394
395 spin_lock_irqsave(&vector_lock, flags);
396 __clear_irq_vector(irq);
397 irq_status[irq] = IRQ_RSVD;
398 spin_unlock_irqrestore(&vector_lock, flags);
399}
400
401/*
402 * Dynamic irq allocate and deallocation for MSI
403 */
404int create_irq(void)
405{
406 unsigned long flags;
407 int irq, vector, cpu;
408 cpumask_t domain = CPU_MASK_NONE;
409
410 irq = vector = -ENOSPC;
411 spin_lock_irqsave(&vector_lock, flags);
412 for_each_online_cpu(cpu) {
413 domain = vector_allocation_domain(cpu);
414 vector = find_unassigned_vector(domain);
415 if (vector >= 0)
416 break;
417 }
418 if (vector < 0)
419 goto out;
420 irq = find_unassigned_irq();
421 if (irq < 0)
422 goto out;
423 BUG_ON(__bind_irq_vector(irq, vector, domain));
424 out:
425 spin_unlock_irqrestore(&vector_lock, flags);
426 if (irq >= 0)
427 dynamic_irq_init(irq);
428 return irq;
429}
430
431void destroy_irq(unsigned int irq)
432{
433 dynamic_irq_cleanup(irq);
434 clear_irq_vector(irq);
435}
436
437#ifdef CONFIG_SMP
438# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
439# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
440#else
441# define IS_RESCHEDULE(vec) (0)
442# define IS_LOCAL_TLB_FLUSH(vec) (0)
443#endif
444/*
445 * That's where the IVT branches when we get an external
446 * interrupt. This branches to the correct hardware IRQ handler via
447 * function ptr.
448 */
449void
450ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
451{
452 struct pt_regs *old_regs = set_irq_regs(regs);
453 unsigned long saved_tpr;
454
455#if IRQ_DEBUG
456 {
457 unsigned long bsp, sp;
458
459 /*
460 * Note: if the interrupt happened while executing in
461 * the context switch routine (ia64_switch_to), we may
462 * get a spurious stack overflow here. This is
463 * because the register and the memory stack are not
464 * switched atomically.
465 */
466 bsp = ia64_getreg(_IA64_REG_AR_BSP);
467 sp = ia64_getreg(_IA64_REG_SP);
468
469 if ((sp - bsp) < 1024) {
470 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
471
472 if (__ratelimit(&ratelimit)) {
473 printk("ia64_handle_irq: DANGER: less than "
474 "1KB of free stack space!!\n"
475 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
476 }
477 }
478 }
479#endif /* IRQ_DEBUG */
480
481 /*
482 * Always set TPR to limit maximum interrupt nesting depth to
483 * 16 (without this, it would be ~240, which could easily lead
484 * to kernel stack overflows).
485 */
486 irq_enter();
487 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
488 ia64_srlz_d();
489 while (vector != IA64_SPURIOUS_INT_VECTOR) {
490 int irq = local_vector_to_irq(vector);
491
492 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
493 smp_local_flush_tlb();
494 kstat_incr_irq_this_cpu(irq);
495 } else if (unlikely(IS_RESCHEDULE(vector))) {
496 scheduler_ipi();
497 kstat_incr_irq_this_cpu(irq);
498 } else {
499 ia64_setreg(_IA64_REG_CR_TPR, vector);
500 ia64_srlz_d();
501
502 if (unlikely(irq < 0)) {
503 printk(KERN_ERR "%s: Unexpected interrupt "
504 "vector %d on CPU %d is not mapped "
505 "to any IRQ!\n", __func__, vector,
506 smp_processor_id());
507 } else
508 generic_handle_irq(irq);
509
510 /*
511 * Disable interrupts and send EOI:
512 */
513 local_irq_disable();
514 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
515 }
516 ia64_eoi();
517 vector = ia64_get_ivr();
518 }
519 /*
520 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
521 * handler needs to be able to wait for further keyboard interrupts, which can't
522 * come through until ia64_eoi() has been done.
523 */
524 irq_exit();
525 set_irq_regs(old_regs);
526}
527
528#ifdef CONFIG_HOTPLUG_CPU
529/*
530 * This function emulates a interrupt processing when a cpu is about to be
531 * brought down.
532 */
533void ia64_process_pending_intr(void)
534{
535 ia64_vector vector;
536 unsigned long saved_tpr;
537 extern unsigned int vectors_in_migration[NR_IRQS];
538
539 vector = ia64_get_ivr();
540
541 irq_enter();
542 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
543 ia64_srlz_d();
544
545 /*
546 * Perform normal interrupt style processing
547 */
548 while (vector != IA64_SPURIOUS_INT_VECTOR) {
549 int irq = local_vector_to_irq(vector);
550
551 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
552 smp_local_flush_tlb();
553 kstat_incr_irq_this_cpu(irq);
554 } else if (unlikely(IS_RESCHEDULE(vector))) {
555 kstat_incr_irq_this_cpu(irq);
556 } else {
557 struct pt_regs *old_regs = set_irq_regs(NULL);
558
559 ia64_setreg(_IA64_REG_CR_TPR, vector);
560 ia64_srlz_d();
561
562 /*
563 * Now try calling normal ia64_handle_irq as it would have got called
564 * from a real intr handler. Try passing null for pt_regs, hopefully
565 * it will work. I hope it works!.
566 * Probably could shared code.
567 */
568 if (unlikely(irq < 0)) {
569 printk(KERN_ERR "%s: Unexpected interrupt "
570 "vector %d on CPU %d not being mapped "
571 "to any IRQ!!\n", __func__, vector,
572 smp_processor_id());
573 } else {
574 vectors_in_migration[irq]=0;
575 generic_handle_irq(irq);
576 }
577 set_irq_regs(old_regs);
578
579 /*
580 * Disable interrupts and send EOI
581 */
582 local_irq_disable();
583 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
584 }
585 ia64_eoi();
586 vector = ia64_get_ivr();
587 }
588 irq_exit();
589}
590#endif
591
592
593#ifdef CONFIG_SMP
594
595static irqreturn_t dummy_handler (int irq, void *dev_id)
596{
597 BUG();
598}
599
600static struct irqaction ipi_irqaction = {
601 .handler = handle_IPI,
602 .name = "IPI"
603};
604
605/*
606 * KVM uses this interrupt to force a cpu out of guest mode
607 */
608static struct irqaction resched_irqaction = {
609 .handler = dummy_handler,
610 .name = "resched"
611};
612
613static struct irqaction tlb_irqaction = {
614 .handler = dummy_handler,
615 .name = "tlb_flush"
616};
617
618#endif
619
620void
621ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
622{
623 unsigned int irq;
624
625 irq = vec;
626 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
627 irq_set_status_flags(irq, IRQ_PER_CPU);
628 irq_set_chip(irq, &irq_type_ia64_lsapic);
629 if (action)
630 setup_irq(irq, action);
631 irq_set_handler(irq, handle_percpu_irq);
632}
633
634void __init
635ia64_native_register_ipi(void)
636{
637#ifdef CONFIG_SMP
638 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
639 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
640 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
641#endif
642}
643
644void __init
645init_IRQ (void)
646{
647#ifdef CONFIG_ACPI
648 acpi_boot_init();
649#endif
650 ia64_register_ipi();
651 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
652#ifdef CONFIG_SMP
653#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
654 if (vector_domain_type != VECTOR_DOMAIN_NONE)
655 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
656#endif
657#endif
658#ifdef CONFIG_PERFMON
659 pfm_init_percpu();
660#endif
661 platform_irq_init();
662}
663
664void
665ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
666{
667 void __iomem *ipi_addr;
668 unsigned long ipi_data;
669 unsigned long phys_cpu_id;
670
671 phys_cpu_id = cpu_physical_id(cpu);
672
673 /*
674 * cpu number is in 8bit ID and 8bit EID
675 */
676
677 ipi_data = (delivery_mode << 8) | (vector & 0xff);
678 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
679
680 writeq(ipi_data, ipi_addr);
681}