Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/ia64/kernel/irq_ia64.c
4 *
5 * Copyright (C) 1998-2001 Hewlett-Packard Co
6 * Stephane Eranian <eranian@hpl.hp.com>
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 6/10/99: Updated to bring in sync with x86 version to facilitate
10 * support for SMP and different interrupt controllers.
11 *
12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
13 * PCI to vector allocation routine.
14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
15 * Added CPU Hotplug handling for IPF.
16 */
17
18#include <linux/module.h>
19#include <linux/pgtable.h>
20
21#include <linux/jiffies.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/kernel_stat.h>
27#include <linux/ptrace.h>
28#include <linux/signal.h>
29#include <linux/smp.h>
30#include <linux/threads.h>
31#include <linux/bitops.h>
32#include <linux/irq.h>
33#include <linux/ratelimit.h>
34#include <linux/acpi.h>
35#include <linux/sched.h>
36
37#include <asm/delay.h>
38#include <asm/intrinsics.h>
39#include <asm/io.h>
40#include <asm/hw_irq.h>
41#include <asm/tlbflush.h>
42
43#ifdef CONFIG_PERFMON
44# include <asm/perfmon.h>
45#endif
46
47#define IRQ_DEBUG 0
48
49#define IRQ_VECTOR_UNASSIGNED (0)
50
51#define IRQ_UNUSED (0)
52#define IRQ_USED (1)
53#define IRQ_RSVD (2)
54
55int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
56int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
57
58/* default base addr of IPI table */
59void __iomem *ipi_base_addr = ((void __iomem *)
60 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
61
62static cpumask_t vector_allocation_domain(int cpu);
63
64/*
65 * Legacy IRQ to IA-64 vector translation table.
66 */
67__u8 isa_irq_to_vector_map[16] = {
68 /* 8259 IRQ translation, first 16 entries */
69 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
70 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
71};
72EXPORT_SYMBOL(isa_irq_to_vector_map);
73
74DEFINE_SPINLOCK(vector_lock);
75
76struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
77 [0 ... NR_IRQS - 1] = {
78 .vector = IRQ_VECTOR_UNASSIGNED,
79 .domain = CPU_MASK_NONE
80 }
81};
82
83DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
84 [0 ... IA64_NUM_VECTORS - 1] = -1
85};
86
87static cpumask_t vector_table[IA64_NUM_VECTORS] = {
88 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
89};
90
91static int irq_status[NR_IRQS] = {
92 [0 ... NR_IRQS -1] = IRQ_UNUSED
93};
94
95static inline int find_unassigned_irq(void)
96{
97 int irq;
98
99 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
100 if (irq_status[irq] == IRQ_UNUSED)
101 return irq;
102 return -ENOSPC;
103}
104
105static inline int find_unassigned_vector(cpumask_t domain)
106{
107 cpumask_t mask;
108 int pos, vector;
109
110 cpumask_and(&mask, &domain, cpu_online_mask);
111 if (cpumask_empty(&mask))
112 return -EINVAL;
113
114 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
115 vector = IA64_FIRST_DEVICE_VECTOR + pos;
116 cpumask_and(&mask, &domain, &vector_table[vector]);
117 if (!cpumask_empty(&mask))
118 continue;
119 return vector;
120 }
121 return -ENOSPC;
122}
123
124static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
125{
126 cpumask_t mask;
127 int cpu;
128 struct irq_cfg *cfg = &irq_cfg[irq];
129
130 BUG_ON((unsigned)irq >= NR_IRQS);
131 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
132
133 cpumask_and(&mask, &domain, cpu_online_mask);
134 if (cpumask_empty(&mask))
135 return -EINVAL;
136 if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
137 return 0;
138 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
139 return -EBUSY;
140 for_each_cpu(cpu, &mask)
141 per_cpu(vector_irq, cpu)[vector] = irq;
142 cfg->vector = vector;
143 cfg->domain = domain;
144 irq_status[irq] = IRQ_USED;
145 cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
146 return 0;
147}
148
149int bind_irq_vector(int irq, int vector, cpumask_t domain)
150{
151 unsigned long flags;
152 int ret;
153
154 spin_lock_irqsave(&vector_lock, flags);
155 ret = __bind_irq_vector(irq, vector, domain);
156 spin_unlock_irqrestore(&vector_lock, flags);
157 return ret;
158}
159
160static void __clear_irq_vector(int irq)
161{
162 int vector, cpu;
163 cpumask_t domain;
164 struct irq_cfg *cfg = &irq_cfg[irq];
165
166 BUG_ON((unsigned)irq >= NR_IRQS);
167 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
168 vector = cfg->vector;
169 domain = cfg->domain;
170 for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
171 per_cpu(vector_irq, cpu)[vector] = -1;
172 cfg->vector = IRQ_VECTOR_UNASSIGNED;
173 cfg->domain = CPU_MASK_NONE;
174 irq_status[irq] = IRQ_UNUSED;
175 cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
176}
177
178static void clear_irq_vector(int irq)
179{
180 unsigned long flags;
181
182 spin_lock_irqsave(&vector_lock, flags);
183 __clear_irq_vector(irq);
184 spin_unlock_irqrestore(&vector_lock, flags);
185}
186
187int
188ia64_native_assign_irq_vector (int irq)
189{
190 unsigned long flags;
191 int vector, cpu;
192 cpumask_t domain = CPU_MASK_NONE;
193
194 vector = -ENOSPC;
195
196 spin_lock_irqsave(&vector_lock, flags);
197 for_each_online_cpu(cpu) {
198 domain = vector_allocation_domain(cpu);
199 vector = find_unassigned_vector(domain);
200 if (vector >= 0)
201 break;
202 }
203 if (vector < 0)
204 goto out;
205 if (irq == AUTO_ASSIGN)
206 irq = vector;
207 BUG_ON(__bind_irq_vector(irq, vector, domain));
208 out:
209 spin_unlock_irqrestore(&vector_lock, flags);
210 return vector;
211}
212
213void
214ia64_native_free_irq_vector (int vector)
215{
216 if (vector < IA64_FIRST_DEVICE_VECTOR ||
217 vector > IA64_LAST_DEVICE_VECTOR)
218 return;
219 clear_irq_vector(vector);
220}
221
222int
223reserve_irq_vector (int vector)
224{
225 if (vector < IA64_FIRST_DEVICE_VECTOR ||
226 vector > IA64_LAST_DEVICE_VECTOR)
227 return -EINVAL;
228 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
229}
230
231/*
232 * Initialize vector_irq on a new cpu. This function must be called
233 * with vector_lock held.
234 */
235void __setup_vector_irq(int cpu)
236{
237 int irq, vector;
238
239 /* Clear vector_irq */
240 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
241 per_cpu(vector_irq, cpu)[vector] = -1;
242 /* Mark the inuse vectors */
243 for (irq = 0; irq < NR_IRQS; ++irq) {
244 if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
245 continue;
246 vector = irq_to_vector(irq);
247 per_cpu(vector_irq, cpu)[vector] = irq;
248 }
249}
250
251#ifdef CONFIG_SMP
252
253static enum vector_domain_type {
254 VECTOR_DOMAIN_NONE,
255 VECTOR_DOMAIN_PERCPU
256} vector_domain_type = VECTOR_DOMAIN_NONE;
257
258static cpumask_t vector_allocation_domain(int cpu)
259{
260 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
261 return *cpumask_of(cpu);
262 return CPU_MASK_ALL;
263}
264
265static int __irq_prepare_move(int irq, int cpu)
266{
267 struct irq_cfg *cfg = &irq_cfg[irq];
268 int vector;
269 cpumask_t domain;
270
271 if (cfg->move_in_progress || cfg->move_cleanup_count)
272 return -EBUSY;
273 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
274 return -EINVAL;
275 if (cpumask_test_cpu(cpu, &cfg->domain))
276 return 0;
277 domain = vector_allocation_domain(cpu);
278 vector = find_unassigned_vector(domain);
279 if (vector < 0)
280 return -ENOSPC;
281 cfg->move_in_progress = 1;
282 cfg->old_domain = cfg->domain;
283 cfg->vector = IRQ_VECTOR_UNASSIGNED;
284 cfg->domain = CPU_MASK_NONE;
285 BUG_ON(__bind_irq_vector(irq, vector, domain));
286 return 0;
287}
288
289int irq_prepare_move(int irq, int cpu)
290{
291 unsigned long flags;
292 int ret;
293
294 spin_lock_irqsave(&vector_lock, flags);
295 ret = __irq_prepare_move(irq, cpu);
296 spin_unlock_irqrestore(&vector_lock, flags);
297 return ret;
298}
299
300void irq_complete_move(unsigned irq)
301{
302 struct irq_cfg *cfg = &irq_cfg[irq];
303 cpumask_t cleanup_mask;
304 int i;
305
306 if (likely(!cfg->move_in_progress))
307 return;
308
309 if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
310 return;
311
312 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
313 cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
314 for_each_cpu(i, &cleanup_mask)
315 ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
316 cfg->move_in_progress = 0;
317}
318
319static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
320{
321 int me = smp_processor_id();
322 ia64_vector vector;
323 unsigned long flags;
324
325 for (vector = IA64_FIRST_DEVICE_VECTOR;
326 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
327 int irq;
328 struct irq_desc *desc;
329 struct irq_cfg *cfg;
330 irq = __this_cpu_read(vector_irq[vector]);
331 if (irq < 0)
332 continue;
333
334 desc = irq_to_desc(irq);
335 cfg = irq_cfg + irq;
336 raw_spin_lock(&desc->lock);
337 if (!cfg->move_cleanup_count)
338 goto unlock;
339
340 if (!cpumask_test_cpu(me, &cfg->old_domain))
341 goto unlock;
342
343 spin_lock_irqsave(&vector_lock, flags);
344 __this_cpu_write(vector_irq[vector], -1);
345 cpumask_clear_cpu(me, &vector_table[vector]);
346 spin_unlock_irqrestore(&vector_lock, flags);
347 cfg->move_cleanup_count--;
348 unlock:
349 raw_spin_unlock(&desc->lock);
350 }
351 return IRQ_HANDLED;
352}
353
354static int __init parse_vector_domain(char *arg)
355{
356 if (!arg)
357 return -EINVAL;
358 if (!strcmp(arg, "percpu")) {
359 vector_domain_type = VECTOR_DOMAIN_PERCPU;
360 no_int_routing = 1;
361 }
362 return 0;
363}
364early_param("vector", parse_vector_domain);
365#else
366static cpumask_t vector_allocation_domain(int cpu)
367{
368 return CPU_MASK_ALL;
369}
370#endif
371
372
373void destroy_and_reserve_irq(unsigned int irq)
374{
375 unsigned long flags;
376
377 irq_init_desc(irq);
378 spin_lock_irqsave(&vector_lock, flags);
379 __clear_irq_vector(irq);
380 irq_status[irq] = IRQ_RSVD;
381 spin_unlock_irqrestore(&vector_lock, flags);
382}
383
384/*
385 * Dynamic irq allocate and deallocation for MSI
386 */
387int create_irq(void)
388{
389 unsigned long flags;
390 int irq, vector, cpu;
391 cpumask_t domain = CPU_MASK_NONE;
392
393 irq = vector = -ENOSPC;
394 spin_lock_irqsave(&vector_lock, flags);
395 for_each_online_cpu(cpu) {
396 domain = vector_allocation_domain(cpu);
397 vector = find_unassigned_vector(domain);
398 if (vector >= 0)
399 break;
400 }
401 if (vector < 0)
402 goto out;
403 irq = find_unassigned_irq();
404 if (irq < 0)
405 goto out;
406 BUG_ON(__bind_irq_vector(irq, vector, domain));
407 out:
408 spin_unlock_irqrestore(&vector_lock, flags);
409 if (irq >= 0)
410 irq_init_desc(irq);
411 return irq;
412}
413
414void destroy_irq(unsigned int irq)
415{
416 irq_init_desc(irq);
417 clear_irq_vector(irq);
418}
419
420#ifdef CONFIG_SMP
421# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
422# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
423#else
424# define IS_RESCHEDULE(vec) (0)
425# define IS_LOCAL_TLB_FLUSH(vec) (0)
426#endif
427/*
428 * That's where the IVT branches when we get an external
429 * interrupt. This branches to the correct hardware IRQ handler via
430 * function ptr.
431 */
432void
433ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
434{
435 struct pt_regs *old_regs = set_irq_regs(regs);
436 unsigned long saved_tpr;
437
438#if IRQ_DEBUG
439 {
440 unsigned long bsp, sp;
441
442 /*
443 * Note: if the interrupt happened while executing in
444 * the context switch routine (ia64_switch_to), we may
445 * get a spurious stack overflow here. This is
446 * because the register and the memory stack are not
447 * switched atomically.
448 */
449 bsp = ia64_getreg(_IA64_REG_AR_BSP);
450 sp = ia64_getreg(_IA64_REG_SP);
451
452 if ((sp - bsp) < 1024) {
453 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
454
455 if (__ratelimit(&ratelimit)) {
456 printk("ia64_handle_irq: DANGER: less than "
457 "1KB of free stack space!!\n"
458 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
459 }
460 }
461 }
462#endif /* IRQ_DEBUG */
463
464 /*
465 * Always set TPR to limit maximum interrupt nesting depth to
466 * 16 (without this, it would be ~240, which could easily lead
467 * to kernel stack overflows).
468 */
469 irq_enter();
470 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
471 ia64_srlz_d();
472 while (vector != IA64_SPURIOUS_INT_VECTOR) {
473 int irq = local_vector_to_irq(vector);
474
475 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
476 smp_local_flush_tlb();
477 kstat_incr_irq_this_cpu(irq);
478 } else if (unlikely(IS_RESCHEDULE(vector))) {
479 scheduler_ipi();
480 kstat_incr_irq_this_cpu(irq);
481 } else {
482 ia64_setreg(_IA64_REG_CR_TPR, vector);
483 ia64_srlz_d();
484
485 if (unlikely(irq < 0)) {
486 printk(KERN_ERR "%s: Unexpected interrupt "
487 "vector %d on CPU %d is not mapped "
488 "to any IRQ!\n", __func__, vector,
489 smp_processor_id());
490 } else
491 generic_handle_irq(irq);
492
493 /*
494 * Disable interrupts and send EOI:
495 */
496 local_irq_disable();
497 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
498 }
499 ia64_eoi();
500 vector = ia64_get_ivr();
501 }
502 /*
503 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
504 * handler needs to be able to wait for further keyboard interrupts, which can't
505 * come through until ia64_eoi() has been done.
506 */
507 irq_exit();
508 set_irq_regs(old_regs);
509}
510
511#ifdef CONFIG_HOTPLUG_CPU
512/*
513 * This function emulates a interrupt processing when a cpu is about to be
514 * brought down.
515 */
516void ia64_process_pending_intr(void)
517{
518 ia64_vector vector;
519 unsigned long saved_tpr;
520 extern unsigned int vectors_in_migration[NR_IRQS];
521
522 vector = ia64_get_ivr();
523
524 irq_enter();
525 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
526 ia64_srlz_d();
527
528 /*
529 * Perform normal interrupt style processing
530 */
531 while (vector != IA64_SPURIOUS_INT_VECTOR) {
532 int irq = local_vector_to_irq(vector);
533
534 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
535 smp_local_flush_tlb();
536 kstat_incr_irq_this_cpu(irq);
537 } else if (unlikely(IS_RESCHEDULE(vector))) {
538 kstat_incr_irq_this_cpu(irq);
539 } else {
540 struct pt_regs *old_regs = set_irq_regs(NULL);
541
542 ia64_setreg(_IA64_REG_CR_TPR, vector);
543 ia64_srlz_d();
544
545 /*
546 * Now try calling normal ia64_handle_irq as it would have got called
547 * from a real intr handler. Try passing null for pt_regs, hopefully
548 * it will work. I hope it works!.
549 * Probably could shared code.
550 */
551 if (unlikely(irq < 0)) {
552 printk(KERN_ERR "%s: Unexpected interrupt "
553 "vector %d on CPU %d not being mapped "
554 "to any IRQ!!\n", __func__, vector,
555 smp_processor_id());
556 } else {
557 vectors_in_migration[irq]=0;
558 generic_handle_irq(irq);
559 }
560 set_irq_regs(old_regs);
561
562 /*
563 * Disable interrupts and send EOI
564 */
565 local_irq_disable();
566 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
567 }
568 ia64_eoi();
569 vector = ia64_get_ivr();
570 }
571 irq_exit();
572}
573#endif
574
575
576#ifdef CONFIG_SMP
577
578static irqreturn_t dummy_handler (int irq, void *dev_id)
579{
580 BUG();
581 return IRQ_NONE;
582}
583
584/*
585 * KVM uses this interrupt to force a cpu out of guest mode
586 */
587
588#endif
589
590void
591register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
592 const char *name)
593{
594 unsigned int irq;
595
596 irq = vec;
597 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
598 irq_set_status_flags(irq, IRQ_PER_CPU);
599 irq_set_chip(irq, &irq_type_ia64_lsapic);
600 if (handler)
601 if (request_irq(irq, handler, flags, name, NULL))
602 pr_err("Failed to request irq %u (%s)\n", irq, name);
603 irq_set_handler(irq, handle_percpu_irq);
604}
605
606void __init
607ia64_native_register_ipi(void)
608{
609#ifdef CONFIG_SMP
610 register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
611 register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
612 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
613 "tlb_flush");
614#endif
615}
616
617void __init
618init_IRQ (void)
619{
620 acpi_boot_init();
621 ia64_register_ipi();
622 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
623#ifdef CONFIG_SMP
624 if (vector_domain_type != VECTOR_DOMAIN_NONE) {
625 register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
626 smp_irq_move_cleanup_interrupt, 0,
627 "irq_move");
628 }
629#endif
630#ifdef CONFIG_PERFMON
631 pfm_init_percpu();
632#endif
633}
634
635void
636ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
637{
638 void __iomem *ipi_addr;
639 unsigned long ipi_data;
640 unsigned long phys_cpu_id;
641
642 phys_cpu_id = cpu_physical_id(cpu);
643
644 /*
645 * cpu number is in 8bit ID and 8bit EID
646 */
647
648 ipi_data = (delivery_mode << 8) | (vector & 0xff);
649 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
650
651 writeq(ipi_data, ipi_addr);
652}
1/*
2 * linux/arch/ia64/kernel/irq_ia64.c
3 *
4 * Copyright (C) 1998-2001 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 6/10/99: Updated to bring in sync with x86 version to facilitate
9 * support for SMP and different interrupt controllers.
10 *
11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
12 * PCI to vector allocation routine.
13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
14 * Added CPU Hotplug handling for IPF.
15 */
16
17#include <linux/module.h>
18
19#include <linux/jiffies.h>
20#include <linux/errno.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/ioport.h>
24#include <linux/kernel_stat.h>
25#include <linux/ptrace.h>
26#include <linux/signal.h>
27#include <linux/smp.h>
28#include <linux/threads.h>
29#include <linux/bitops.h>
30#include <linux/irq.h>
31#include <linux/ratelimit.h>
32#include <linux/acpi.h>
33#include <linux/sched.h>
34
35#include <asm/delay.h>
36#include <asm/intrinsics.h>
37#include <asm/io.h>
38#include <asm/hw_irq.h>
39#include <asm/machvec.h>
40#include <asm/pgtable.h>
41#include <asm/tlbflush.h>
42
43#ifdef CONFIG_PERFMON
44# include <asm/perfmon.h>
45#endif
46
47#define IRQ_DEBUG 0
48
49#define IRQ_VECTOR_UNASSIGNED (0)
50
51#define IRQ_UNUSED (0)
52#define IRQ_USED (1)
53#define IRQ_RSVD (2)
54
55/* These can be overridden in platform_irq_init */
56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
58
59/* default base addr of IPI table */
60void __iomem *ipi_base_addr = ((void __iomem *)
61 (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
62
63static cpumask_t vector_allocation_domain(int cpu);
64
65/*
66 * Legacy IRQ to IA-64 vector translation table.
67 */
68__u8 isa_irq_to_vector_map[16] = {
69 /* 8259 IRQ translation, first 16 entries */
70 0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
71 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
72};
73EXPORT_SYMBOL(isa_irq_to_vector_map);
74
75DEFINE_SPINLOCK(vector_lock);
76
77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
78 [0 ... NR_IRQS - 1] = {
79 .vector = IRQ_VECTOR_UNASSIGNED,
80 .domain = CPU_MASK_NONE
81 }
82};
83
84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
85 [0 ... IA64_NUM_VECTORS - 1] = -1
86};
87
88static cpumask_t vector_table[IA64_NUM_VECTORS] = {
89 [0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
90};
91
92static int irq_status[NR_IRQS] = {
93 [0 ... NR_IRQS -1] = IRQ_UNUSED
94};
95
96int check_irq_used(int irq)
97{
98 if (irq_status[irq] == IRQ_USED)
99 return 1;
100
101 return -1;
102}
103
104static inline int find_unassigned_irq(void)
105{
106 int irq;
107
108 for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
109 if (irq_status[irq] == IRQ_UNUSED)
110 return irq;
111 return -ENOSPC;
112}
113
114static inline int find_unassigned_vector(cpumask_t domain)
115{
116 cpumask_t mask;
117 int pos, vector;
118
119 cpumask_and(&mask, &domain, cpu_online_mask);
120 if (cpus_empty(mask))
121 return -EINVAL;
122
123 for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
124 vector = IA64_FIRST_DEVICE_VECTOR + pos;
125 cpus_and(mask, domain, vector_table[vector]);
126 if (!cpus_empty(mask))
127 continue;
128 return vector;
129 }
130 return -ENOSPC;
131}
132
133static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
134{
135 cpumask_t mask;
136 int cpu;
137 struct irq_cfg *cfg = &irq_cfg[irq];
138
139 BUG_ON((unsigned)irq >= NR_IRQS);
140 BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
141
142 cpumask_and(&mask, &domain, cpu_online_mask);
143 if (cpus_empty(mask))
144 return -EINVAL;
145 if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
146 return 0;
147 if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
148 return -EBUSY;
149 for_each_cpu_mask(cpu, mask)
150 per_cpu(vector_irq, cpu)[vector] = irq;
151 cfg->vector = vector;
152 cfg->domain = domain;
153 irq_status[irq] = IRQ_USED;
154 cpus_or(vector_table[vector], vector_table[vector], domain);
155 return 0;
156}
157
158int bind_irq_vector(int irq, int vector, cpumask_t domain)
159{
160 unsigned long flags;
161 int ret;
162
163 spin_lock_irqsave(&vector_lock, flags);
164 ret = __bind_irq_vector(irq, vector, domain);
165 spin_unlock_irqrestore(&vector_lock, flags);
166 return ret;
167}
168
169static void __clear_irq_vector(int irq)
170{
171 int vector, cpu;
172 cpumask_t mask;
173 cpumask_t domain;
174 struct irq_cfg *cfg = &irq_cfg[irq];
175
176 BUG_ON((unsigned)irq >= NR_IRQS);
177 BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
178 vector = cfg->vector;
179 domain = cfg->domain;
180 cpumask_and(&mask, &cfg->domain, cpu_online_mask);
181 for_each_cpu_mask(cpu, mask)
182 per_cpu(vector_irq, cpu)[vector] = -1;
183 cfg->vector = IRQ_VECTOR_UNASSIGNED;
184 cfg->domain = CPU_MASK_NONE;
185 irq_status[irq] = IRQ_UNUSED;
186 cpus_andnot(vector_table[vector], vector_table[vector], domain);
187}
188
189static void clear_irq_vector(int irq)
190{
191 unsigned long flags;
192
193 spin_lock_irqsave(&vector_lock, flags);
194 __clear_irq_vector(irq);
195 spin_unlock_irqrestore(&vector_lock, flags);
196}
197
198int
199ia64_native_assign_irq_vector (int irq)
200{
201 unsigned long flags;
202 int vector, cpu;
203 cpumask_t domain = CPU_MASK_NONE;
204
205 vector = -ENOSPC;
206
207 spin_lock_irqsave(&vector_lock, flags);
208 for_each_online_cpu(cpu) {
209 domain = vector_allocation_domain(cpu);
210 vector = find_unassigned_vector(domain);
211 if (vector >= 0)
212 break;
213 }
214 if (vector < 0)
215 goto out;
216 if (irq == AUTO_ASSIGN)
217 irq = vector;
218 BUG_ON(__bind_irq_vector(irq, vector, domain));
219 out:
220 spin_unlock_irqrestore(&vector_lock, flags);
221 return vector;
222}
223
224void
225ia64_native_free_irq_vector (int vector)
226{
227 if (vector < IA64_FIRST_DEVICE_VECTOR ||
228 vector > IA64_LAST_DEVICE_VECTOR)
229 return;
230 clear_irq_vector(vector);
231}
232
233int
234reserve_irq_vector (int vector)
235{
236 if (vector < IA64_FIRST_DEVICE_VECTOR ||
237 vector > IA64_LAST_DEVICE_VECTOR)
238 return -EINVAL;
239 return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
240}
241
242/*
243 * Initialize vector_irq on a new cpu. This function must be called
244 * with vector_lock held.
245 */
246void __setup_vector_irq(int cpu)
247{
248 int irq, vector;
249
250 /* Clear vector_irq */
251 for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
252 per_cpu(vector_irq, cpu)[vector] = -1;
253 /* Mark the inuse vectors */
254 for (irq = 0; irq < NR_IRQS; ++irq) {
255 if (!cpu_isset(cpu, irq_cfg[irq].domain))
256 continue;
257 vector = irq_to_vector(irq);
258 per_cpu(vector_irq, cpu)[vector] = irq;
259 }
260}
261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263
264static enum vector_domain_type {
265 VECTOR_DOMAIN_NONE,
266 VECTOR_DOMAIN_PERCPU
267} vector_domain_type = VECTOR_DOMAIN_NONE;
268
269static cpumask_t vector_allocation_domain(int cpu)
270{
271 if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
272 return cpumask_of_cpu(cpu);
273 return CPU_MASK_ALL;
274}
275
276static int __irq_prepare_move(int irq, int cpu)
277{
278 struct irq_cfg *cfg = &irq_cfg[irq];
279 int vector;
280 cpumask_t domain;
281
282 if (cfg->move_in_progress || cfg->move_cleanup_count)
283 return -EBUSY;
284 if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
285 return -EINVAL;
286 if (cpu_isset(cpu, cfg->domain))
287 return 0;
288 domain = vector_allocation_domain(cpu);
289 vector = find_unassigned_vector(domain);
290 if (vector < 0)
291 return -ENOSPC;
292 cfg->move_in_progress = 1;
293 cfg->old_domain = cfg->domain;
294 cfg->vector = IRQ_VECTOR_UNASSIGNED;
295 cfg->domain = CPU_MASK_NONE;
296 BUG_ON(__bind_irq_vector(irq, vector, domain));
297 return 0;
298}
299
300int irq_prepare_move(int irq, int cpu)
301{
302 unsigned long flags;
303 int ret;
304
305 spin_lock_irqsave(&vector_lock, flags);
306 ret = __irq_prepare_move(irq, cpu);
307 spin_unlock_irqrestore(&vector_lock, flags);
308 return ret;
309}
310
311void irq_complete_move(unsigned irq)
312{
313 struct irq_cfg *cfg = &irq_cfg[irq];
314 cpumask_t cleanup_mask;
315 int i;
316
317 if (likely(!cfg->move_in_progress))
318 return;
319
320 if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
321 return;
322
323 cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
324 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
325 for_each_cpu_mask(i, cleanup_mask)
326 platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
327 cfg->move_in_progress = 0;
328}
329
330static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
331{
332 int me = smp_processor_id();
333 ia64_vector vector;
334 unsigned long flags;
335
336 for (vector = IA64_FIRST_DEVICE_VECTOR;
337 vector < IA64_LAST_DEVICE_VECTOR; vector++) {
338 int irq;
339 struct irq_desc *desc;
340 struct irq_cfg *cfg;
341 irq = __get_cpu_var(vector_irq)[vector];
342 if (irq < 0)
343 continue;
344
345 desc = irq_to_desc(irq);
346 cfg = irq_cfg + irq;
347 raw_spin_lock(&desc->lock);
348 if (!cfg->move_cleanup_count)
349 goto unlock;
350
351 if (!cpu_isset(me, cfg->old_domain))
352 goto unlock;
353
354 spin_lock_irqsave(&vector_lock, flags);
355 __get_cpu_var(vector_irq)[vector] = -1;
356 cpu_clear(me, vector_table[vector]);
357 spin_unlock_irqrestore(&vector_lock, flags);
358 cfg->move_cleanup_count--;
359 unlock:
360 raw_spin_unlock(&desc->lock);
361 }
362 return IRQ_HANDLED;
363}
364
365static struct irqaction irq_move_irqaction = {
366 .handler = smp_irq_move_cleanup_interrupt,
367 .flags = IRQF_DISABLED,
368 .name = "irq_move"
369};
370
371static int __init parse_vector_domain(char *arg)
372{
373 if (!arg)
374 return -EINVAL;
375 if (!strcmp(arg, "percpu")) {
376 vector_domain_type = VECTOR_DOMAIN_PERCPU;
377 no_int_routing = 1;
378 }
379 return 0;
380}
381early_param("vector", parse_vector_domain);
382#else
383static cpumask_t vector_allocation_domain(int cpu)
384{
385 return CPU_MASK_ALL;
386}
387#endif
388
389
390void destroy_and_reserve_irq(unsigned int irq)
391{
392 unsigned long flags;
393
394 dynamic_irq_cleanup(irq);
395
396 spin_lock_irqsave(&vector_lock, flags);
397 __clear_irq_vector(irq);
398 irq_status[irq] = IRQ_RSVD;
399 spin_unlock_irqrestore(&vector_lock, flags);
400}
401
402/*
403 * Dynamic irq allocate and deallocation for MSI
404 */
405int create_irq(void)
406{
407 unsigned long flags;
408 int irq, vector, cpu;
409 cpumask_t domain = CPU_MASK_NONE;
410
411 irq = vector = -ENOSPC;
412 spin_lock_irqsave(&vector_lock, flags);
413 for_each_online_cpu(cpu) {
414 domain = vector_allocation_domain(cpu);
415 vector = find_unassigned_vector(domain);
416 if (vector >= 0)
417 break;
418 }
419 if (vector < 0)
420 goto out;
421 irq = find_unassigned_irq();
422 if (irq < 0)
423 goto out;
424 BUG_ON(__bind_irq_vector(irq, vector, domain));
425 out:
426 spin_unlock_irqrestore(&vector_lock, flags);
427 if (irq >= 0)
428 dynamic_irq_init(irq);
429 return irq;
430}
431
432void destroy_irq(unsigned int irq)
433{
434 dynamic_irq_cleanup(irq);
435 clear_irq_vector(irq);
436}
437
438#ifdef CONFIG_SMP
439# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
440# define IS_LOCAL_TLB_FLUSH(vec) (vec == IA64_IPI_LOCAL_TLB_FLUSH)
441#else
442# define IS_RESCHEDULE(vec) (0)
443# define IS_LOCAL_TLB_FLUSH(vec) (0)
444#endif
445/*
446 * That's where the IVT branches when we get an external
447 * interrupt. This branches to the correct hardware IRQ handler via
448 * function ptr.
449 */
450void
451ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
452{
453 struct pt_regs *old_regs = set_irq_regs(regs);
454 unsigned long saved_tpr;
455
456#if IRQ_DEBUG
457 {
458 unsigned long bsp, sp;
459
460 /*
461 * Note: if the interrupt happened while executing in
462 * the context switch routine (ia64_switch_to), we may
463 * get a spurious stack overflow here. This is
464 * because the register and the memory stack are not
465 * switched atomically.
466 */
467 bsp = ia64_getreg(_IA64_REG_AR_BSP);
468 sp = ia64_getreg(_IA64_REG_SP);
469
470 if ((sp - bsp) < 1024) {
471 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
472
473 if (__ratelimit(&ratelimit)) {
474 printk("ia64_handle_irq: DANGER: less than "
475 "1KB of free stack space!!\n"
476 "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
477 }
478 }
479 }
480#endif /* IRQ_DEBUG */
481
482 /*
483 * Always set TPR to limit maximum interrupt nesting depth to
484 * 16 (without this, it would be ~240, which could easily lead
485 * to kernel stack overflows).
486 */
487 irq_enter();
488 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
489 ia64_srlz_d();
490 while (vector != IA64_SPURIOUS_INT_VECTOR) {
491 int irq = local_vector_to_irq(vector);
492 struct irq_desc *desc = irq_to_desc(irq);
493
494 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
495 smp_local_flush_tlb();
496 kstat_incr_irqs_this_cpu(irq, desc);
497 } else if (unlikely(IS_RESCHEDULE(vector))) {
498 scheduler_ipi();
499 kstat_incr_irqs_this_cpu(irq, desc);
500 } else {
501 ia64_setreg(_IA64_REG_CR_TPR, vector);
502 ia64_srlz_d();
503
504 if (unlikely(irq < 0)) {
505 printk(KERN_ERR "%s: Unexpected interrupt "
506 "vector %d on CPU %d is not mapped "
507 "to any IRQ!\n", __func__, vector,
508 smp_processor_id());
509 } else
510 generic_handle_irq(irq);
511
512 /*
513 * Disable interrupts and send EOI:
514 */
515 local_irq_disable();
516 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
517 }
518 ia64_eoi();
519 vector = ia64_get_ivr();
520 }
521 /*
522 * This must be done *after* the ia64_eoi(). For example, the keyboard softirq
523 * handler needs to be able to wait for further keyboard interrupts, which can't
524 * come through until ia64_eoi() has been done.
525 */
526 irq_exit();
527 set_irq_regs(old_regs);
528}
529
530#ifdef CONFIG_HOTPLUG_CPU
531/*
532 * This function emulates a interrupt processing when a cpu is about to be
533 * brought down.
534 */
535void ia64_process_pending_intr(void)
536{
537 ia64_vector vector;
538 unsigned long saved_tpr;
539 extern unsigned int vectors_in_migration[NR_IRQS];
540
541 vector = ia64_get_ivr();
542
543 irq_enter();
544 saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
545 ia64_srlz_d();
546
547 /*
548 * Perform normal interrupt style processing
549 */
550 while (vector != IA64_SPURIOUS_INT_VECTOR) {
551 int irq = local_vector_to_irq(vector);
552 struct irq_desc *desc = irq_to_desc(irq);
553
554 if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
555 smp_local_flush_tlb();
556 kstat_incr_irqs_this_cpu(irq, desc);
557 } else if (unlikely(IS_RESCHEDULE(vector))) {
558 kstat_incr_irqs_this_cpu(irq, desc);
559 } else {
560 struct pt_regs *old_regs = set_irq_regs(NULL);
561
562 ia64_setreg(_IA64_REG_CR_TPR, vector);
563 ia64_srlz_d();
564
565 /*
566 * Now try calling normal ia64_handle_irq as it would have got called
567 * from a real intr handler. Try passing null for pt_regs, hopefully
568 * it will work. I hope it works!.
569 * Probably could shared code.
570 */
571 if (unlikely(irq < 0)) {
572 printk(KERN_ERR "%s: Unexpected interrupt "
573 "vector %d on CPU %d not being mapped "
574 "to any IRQ!!\n", __func__, vector,
575 smp_processor_id());
576 } else {
577 vectors_in_migration[irq]=0;
578 generic_handle_irq(irq);
579 }
580 set_irq_regs(old_regs);
581
582 /*
583 * Disable interrupts and send EOI
584 */
585 local_irq_disable();
586 ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
587 }
588 ia64_eoi();
589 vector = ia64_get_ivr();
590 }
591 irq_exit();
592}
593#endif
594
595
596#ifdef CONFIG_SMP
597
598static irqreturn_t dummy_handler (int irq, void *dev_id)
599{
600 BUG();
601}
602
603static struct irqaction ipi_irqaction = {
604 .handler = handle_IPI,
605 .flags = IRQF_DISABLED,
606 .name = "IPI"
607};
608
609/*
610 * KVM uses this interrupt to force a cpu out of guest mode
611 */
612static struct irqaction resched_irqaction = {
613 .handler = dummy_handler,
614 .flags = IRQF_DISABLED,
615 .name = "resched"
616};
617
618static struct irqaction tlb_irqaction = {
619 .handler = dummy_handler,
620 .flags = IRQF_DISABLED,
621 .name = "tlb_flush"
622};
623
624#endif
625
626void
627ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
628{
629 unsigned int irq;
630
631 irq = vec;
632 BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
633 irq_set_status_flags(irq, IRQ_PER_CPU);
634 irq_set_chip(irq, &irq_type_ia64_lsapic);
635 if (action)
636 setup_irq(irq, action);
637 irq_set_handler(irq, handle_percpu_irq);
638}
639
640void __init
641ia64_native_register_ipi(void)
642{
643#ifdef CONFIG_SMP
644 register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
645 register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
646 register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
647#endif
648}
649
650void __init
651init_IRQ (void)
652{
653#ifdef CONFIG_ACPI
654 acpi_boot_init();
655#endif
656 ia64_register_ipi();
657 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
658#ifdef CONFIG_SMP
659#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
660 if (vector_domain_type != VECTOR_DOMAIN_NONE)
661 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
662#endif
663#endif
664#ifdef CONFIG_PERFMON
665 pfm_init_percpu();
666#endif
667 platform_irq_init();
668}
669
670void
671ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
672{
673 void __iomem *ipi_addr;
674 unsigned long ipi_data;
675 unsigned long phys_cpu_id;
676
677 phys_cpu_id = cpu_physical_id(cpu);
678
679 /*
680 * cpu number is in 8bit ID and 8bit EID
681 */
682
683 ipi_data = (delivery_mode << 8) | (vector & 0xff);
684 ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
685
686 writeq(ipi_data, ipi_addr);
687}