Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/ia64/kernel/irq_ia64.c
  4 *
  5 * Copyright (C) 1998-2001 Hewlett-Packard Co
  6 *	Stephane Eranian <eranian@hpl.hp.com>
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *  6/10/99: Updated to bring in sync with x86 version to facilitate
 10 *	     support for SMP and different interrupt controllers.
 11 *
 12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
 13 *                      PCI to vector allocation routine.
 14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
 15 *						Added CPU Hotplug handling for IPF.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/pgtable.h>
 20
 21#include <linux/jiffies.h>
 22#include <linux/errno.h>
 23#include <linux/init.h>
 24#include <linux/interrupt.h>
 25#include <linux/ioport.h>
 26#include <linux/kernel_stat.h>
 27#include <linux/ptrace.h>
 28#include <linux/signal.h>
 29#include <linux/smp.h>
 30#include <linux/threads.h>
 31#include <linux/bitops.h>
 32#include <linux/irq.h>
 33#include <linux/ratelimit.h>
 34#include <linux/acpi.h>
 35#include <linux/sched.h>
 36
 37#include <asm/delay.h>
 38#include <asm/intrinsics.h>
 39#include <asm/io.h>
 40#include <asm/hw_irq.h>
 41#include <asm/tlbflush.h>
 42
 43#ifdef CONFIG_PERFMON
 44# include <asm/perfmon.h>
 45#endif
 46
 47#define IRQ_DEBUG	0
 48
 49#define IRQ_VECTOR_UNASSIGNED	(0)
 50
 51#define IRQ_UNUSED		(0)
 52#define IRQ_USED		(1)
 53#define IRQ_RSVD		(2)
 54
 55int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
 56int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
 57
 58/* default base addr of IPI table */
 59void __iomem *ipi_base_addr = ((void __iomem *)
 60			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
 61
 62static cpumask_t vector_allocation_domain(int cpu);
 63
 64/*
 65 * Legacy IRQ to IA-64 vector translation table.
 66 */
 67__u8 isa_irq_to_vector_map[16] = {
 68	/* 8259 IRQ translation, first 16 entries */
 69	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 70	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
 71};
 72EXPORT_SYMBOL(isa_irq_to_vector_map);
 73
 74DEFINE_SPINLOCK(vector_lock);
 75
 76struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 77	[0 ... NR_IRQS - 1] = {
 78		.vector = IRQ_VECTOR_UNASSIGNED,
 79		.domain = CPU_MASK_NONE
 80	}
 81};
 82
 83DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
 84	[0 ... IA64_NUM_VECTORS - 1] = -1
 85};
 86
 87static cpumask_t vector_table[IA64_NUM_VECTORS] = {
 88	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
 89};
 90
 91static int irq_status[NR_IRQS] = {
 92	[0 ... NR_IRQS -1] = IRQ_UNUSED
 93};
 94
 95static inline int find_unassigned_irq(void)
 96{
 97	int irq;
 98
 99	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
100		if (irq_status[irq] == IRQ_UNUSED)
101			return irq;
102	return -ENOSPC;
103}
104
105static inline int find_unassigned_vector(cpumask_t domain)
106{
107	cpumask_t mask;
108	int pos, vector;
109
110	cpumask_and(&mask, &domain, cpu_online_mask);
111	if (cpumask_empty(&mask))
112		return -EINVAL;
113
114	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
115		vector = IA64_FIRST_DEVICE_VECTOR + pos;
116		cpumask_and(&mask, &domain, &vector_table[vector]);
117		if (!cpumask_empty(&mask))
118			continue;
119		return vector;
120	}
121	return -ENOSPC;
122}
123
124static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
125{
126	cpumask_t mask;
127	int cpu;
128	struct irq_cfg *cfg = &irq_cfg[irq];
129
130	BUG_ON((unsigned)irq >= NR_IRQS);
131	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
132
133	cpumask_and(&mask, &domain, cpu_online_mask);
134	if (cpumask_empty(&mask))
135		return -EINVAL;
136	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
137		return 0;
138	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
139		return -EBUSY;
140	for_each_cpu(cpu, &mask)
141		per_cpu(vector_irq, cpu)[vector] = irq;
142	cfg->vector = vector;
143	cfg->domain = domain;
144	irq_status[irq] = IRQ_USED;
145	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
146	return 0;
147}
148
149int bind_irq_vector(int irq, int vector, cpumask_t domain)
150{
151	unsigned long flags;
152	int ret;
153
154	spin_lock_irqsave(&vector_lock, flags);
155	ret = __bind_irq_vector(irq, vector, domain);
156	spin_unlock_irqrestore(&vector_lock, flags);
157	return ret;
158}
159
160static void __clear_irq_vector(int irq)
161{
162	int vector, cpu;
163	cpumask_t domain;
164	struct irq_cfg *cfg = &irq_cfg[irq];
165
166	BUG_ON((unsigned)irq >= NR_IRQS);
167	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
168	vector = cfg->vector;
169	domain = cfg->domain;
170	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
171		per_cpu(vector_irq, cpu)[vector] = -1;
172	cfg->vector = IRQ_VECTOR_UNASSIGNED;
173	cfg->domain = CPU_MASK_NONE;
174	irq_status[irq] = IRQ_UNUSED;
175	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
176}
177
178static void clear_irq_vector(int irq)
179{
180	unsigned long flags;
181
182	spin_lock_irqsave(&vector_lock, flags);
183	__clear_irq_vector(irq);
184	spin_unlock_irqrestore(&vector_lock, flags);
185}
186
187int
188ia64_native_assign_irq_vector (int irq)
189{
190	unsigned long flags;
191	int vector, cpu;
192	cpumask_t domain = CPU_MASK_NONE;
193
194	vector = -ENOSPC;
195
196	spin_lock_irqsave(&vector_lock, flags);
197	for_each_online_cpu(cpu) {
198		domain = vector_allocation_domain(cpu);
199		vector = find_unassigned_vector(domain);
200		if (vector >= 0)
201			break;
202	}
203	if (vector < 0)
204		goto out;
205	if (irq == AUTO_ASSIGN)
206		irq = vector;
207	BUG_ON(__bind_irq_vector(irq, vector, domain));
208 out:
209	spin_unlock_irqrestore(&vector_lock, flags);
210	return vector;
211}
212
213void
214ia64_native_free_irq_vector (int vector)
215{
216	if (vector < IA64_FIRST_DEVICE_VECTOR ||
217	    vector > IA64_LAST_DEVICE_VECTOR)
218		return;
219	clear_irq_vector(vector);
220}
221
222int
223reserve_irq_vector (int vector)
224{
225	if (vector < IA64_FIRST_DEVICE_VECTOR ||
226	    vector > IA64_LAST_DEVICE_VECTOR)
227		return -EINVAL;
228	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
229}
230
231/*
232 * Initialize vector_irq on a new cpu. This function must be called
233 * with vector_lock held.
234 */
235void __setup_vector_irq(int cpu)
236{
237	int irq, vector;
238
239	/* Clear vector_irq */
240	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
241		per_cpu(vector_irq, cpu)[vector] = -1;
242	/* Mark the inuse vectors */
243	for (irq = 0; irq < NR_IRQS; ++irq) {
244		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
245			continue;
246		vector = irq_to_vector(irq);
247		per_cpu(vector_irq, cpu)[vector] = irq;
248	}
249}
250
251#ifdef CONFIG_SMP
252
253static enum vector_domain_type {
254	VECTOR_DOMAIN_NONE,
255	VECTOR_DOMAIN_PERCPU
256} vector_domain_type = VECTOR_DOMAIN_NONE;
257
258static cpumask_t vector_allocation_domain(int cpu)
259{
260	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
261		return *cpumask_of(cpu);
262	return CPU_MASK_ALL;
263}
264
265static int __irq_prepare_move(int irq, int cpu)
266{
267	struct irq_cfg *cfg = &irq_cfg[irq];
268	int vector;
269	cpumask_t domain;
270
271	if (cfg->move_in_progress || cfg->move_cleanup_count)
272		return -EBUSY;
273	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
274		return -EINVAL;
275	if (cpumask_test_cpu(cpu, &cfg->domain))
276		return 0;
277	domain = vector_allocation_domain(cpu);
278	vector = find_unassigned_vector(domain);
279	if (vector < 0)
280		return -ENOSPC;
281	cfg->move_in_progress = 1;
282	cfg->old_domain = cfg->domain;
283	cfg->vector = IRQ_VECTOR_UNASSIGNED;
284	cfg->domain = CPU_MASK_NONE;
285	BUG_ON(__bind_irq_vector(irq, vector, domain));
286	return 0;
287}
288
289int irq_prepare_move(int irq, int cpu)
290{
291	unsigned long flags;
292	int ret;
293
294	spin_lock_irqsave(&vector_lock, flags);
295	ret = __irq_prepare_move(irq, cpu);
296	spin_unlock_irqrestore(&vector_lock, flags);
297	return ret;
298}
299
300void irq_complete_move(unsigned irq)
301{
302	struct irq_cfg *cfg = &irq_cfg[irq];
303	cpumask_t cleanup_mask;
304	int i;
305
306	if (likely(!cfg->move_in_progress))
307		return;
308
309	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
310		return;
311
312	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
313	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
314	for_each_cpu(i, &cleanup_mask)
315		ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
316	cfg->move_in_progress = 0;
317}
318
319static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
320{
321	int me = smp_processor_id();
322	ia64_vector vector;
323	unsigned long flags;
324
325	for (vector = IA64_FIRST_DEVICE_VECTOR;
326	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
327		int irq;
328		struct irq_desc *desc;
329		struct irq_cfg *cfg;
330		irq = __this_cpu_read(vector_irq[vector]);
331		if (irq < 0)
332			continue;
333
334		desc = irq_to_desc(irq);
335		cfg = irq_cfg + irq;
336		raw_spin_lock(&desc->lock);
337		if (!cfg->move_cleanup_count)
338			goto unlock;
339
340		if (!cpumask_test_cpu(me, &cfg->old_domain))
341			goto unlock;
342
343		spin_lock_irqsave(&vector_lock, flags);
344		__this_cpu_write(vector_irq[vector], -1);
345		cpumask_clear_cpu(me, &vector_table[vector]);
346		spin_unlock_irqrestore(&vector_lock, flags);
347		cfg->move_cleanup_count--;
348	unlock:
349		raw_spin_unlock(&desc->lock);
350	}
351	return IRQ_HANDLED;
352}
353
354static int __init parse_vector_domain(char *arg)
355{
356	if (!arg)
357		return -EINVAL;
358	if (!strcmp(arg, "percpu")) {
359		vector_domain_type = VECTOR_DOMAIN_PERCPU;
360		no_int_routing = 1;
361	}
362	return 0;
363}
364early_param("vector", parse_vector_domain);
365#else
366static cpumask_t vector_allocation_domain(int cpu)
367{
368	return CPU_MASK_ALL;
369}
370#endif
371
372
373void destroy_and_reserve_irq(unsigned int irq)
374{
375	unsigned long flags;
376
377	irq_init_desc(irq);
378	spin_lock_irqsave(&vector_lock, flags);
379	__clear_irq_vector(irq);
380	irq_status[irq] = IRQ_RSVD;
381	spin_unlock_irqrestore(&vector_lock, flags);
382}
383
384/*
385 * Dynamic irq allocate and deallocation for MSI
386 */
387int create_irq(void)
388{
389	unsigned long flags;
390	int irq, vector, cpu;
391	cpumask_t domain = CPU_MASK_NONE;
392
393	irq = vector = -ENOSPC;
394	spin_lock_irqsave(&vector_lock, flags);
395	for_each_online_cpu(cpu) {
396		domain = vector_allocation_domain(cpu);
397		vector = find_unassigned_vector(domain);
398		if (vector >= 0)
399			break;
400	}
401	if (vector < 0)
402		goto out;
403	irq = find_unassigned_irq();
404	if (irq < 0)
405		goto out;
406	BUG_ON(__bind_irq_vector(irq, vector, domain));
407 out:
408	spin_unlock_irqrestore(&vector_lock, flags);
409	if (irq >= 0)
410		irq_init_desc(irq);
411	return irq;
412}
413
414void destroy_irq(unsigned int irq)
415{
416	irq_init_desc(irq);
417	clear_irq_vector(irq);
418}
419
420#ifdef CONFIG_SMP
421#	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
422#	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
423#else
424#	define IS_RESCHEDULE(vec)	(0)
425#	define IS_LOCAL_TLB_FLUSH(vec)	(0)
426#endif
427/*
428 * That's where the IVT branches when we get an external
429 * interrupt. This branches to the correct hardware IRQ handler via
430 * function ptr.
431 */
432void
433ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
434{
435	struct pt_regs *old_regs = set_irq_regs(regs);
436	unsigned long saved_tpr;
437
438#if IRQ_DEBUG
439	{
440		unsigned long bsp, sp;
441
442		/*
443		 * Note: if the interrupt happened while executing in
444		 * the context switch routine (ia64_switch_to), we may
445		 * get a spurious stack overflow here.  This is
446		 * because the register and the memory stack are not
447		 * switched atomically.
448		 */
449		bsp = ia64_getreg(_IA64_REG_AR_BSP);
450		sp = ia64_getreg(_IA64_REG_SP);
451
452		if ((sp - bsp) < 1024) {
453			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
454
455			if (__ratelimit(&ratelimit)) {
456				printk("ia64_handle_irq: DANGER: less than "
457				       "1KB of free stack space!!\n"
458				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
459			}
460		}
461	}
462#endif /* IRQ_DEBUG */
463
464	/*
465	 * Always set TPR to limit maximum interrupt nesting depth to
466	 * 16 (without this, it would be ~240, which could easily lead
467	 * to kernel stack overflows).
468	 */
469	irq_enter();
470	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
471	ia64_srlz_d();
472	while (vector != IA64_SPURIOUS_INT_VECTOR) {
473		int irq = local_vector_to_irq(vector);
474
475		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
476			smp_local_flush_tlb();
477			kstat_incr_irq_this_cpu(irq);
478		} else if (unlikely(IS_RESCHEDULE(vector))) {
479			scheduler_ipi();
480			kstat_incr_irq_this_cpu(irq);
481		} else {
482			ia64_setreg(_IA64_REG_CR_TPR, vector);
483			ia64_srlz_d();
484
485			if (unlikely(irq < 0)) {
486				printk(KERN_ERR "%s: Unexpected interrupt "
487				       "vector %d on CPU %d is not mapped "
488				       "to any IRQ!\n", __func__, vector,
489				       smp_processor_id());
490			} else
491				generic_handle_irq(irq);
492
493			/*
494			 * Disable interrupts and send EOI:
495			 */
496			local_irq_disable();
497			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
498		}
499		ia64_eoi();
500		vector = ia64_get_ivr();
501	}
502	/*
503	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
504	 * handler needs to be able to wait for further keyboard interrupts, which can't
505	 * come through until ia64_eoi() has been done.
506	 */
507	irq_exit();
508	set_irq_regs(old_regs);
509}
510
511#ifdef CONFIG_HOTPLUG_CPU
512/*
513 * This function emulates a interrupt processing when a cpu is about to be
514 * brought down.
515 */
516void ia64_process_pending_intr(void)
517{
518	ia64_vector vector;
519	unsigned long saved_tpr;
520	extern unsigned int vectors_in_migration[NR_IRQS];
521
522	vector = ia64_get_ivr();
523
524	irq_enter();
525	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
526	ia64_srlz_d();
527
528	 /*
529	  * Perform normal interrupt style processing
530	  */
531	while (vector != IA64_SPURIOUS_INT_VECTOR) {
532		int irq = local_vector_to_irq(vector);
533
534		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
535			smp_local_flush_tlb();
536			kstat_incr_irq_this_cpu(irq);
537		} else if (unlikely(IS_RESCHEDULE(vector))) {
538			kstat_incr_irq_this_cpu(irq);
539		} else {
540			struct pt_regs *old_regs = set_irq_regs(NULL);
541
542			ia64_setreg(_IA64_REG_CR_TPR, vector);
543			ia64_srlz_d();
544
545			/*
546			 * Now try calling normal ia64_handle_irq as it would have got called
547			 * from a real intr handler. Try passing null for pt_regs, hopefully
548			 * it will work. I hope it works!.
549			 * Probably could shared code.
550			 */
551			if (unlikely(irq < 0)) {
552				printk(KERN_ERR "%s: Unexpected interrupt "
553				       "vector %d on CPU %d not being mapped "
554				       "to any IRQ!!\n", __func__, vector,
555				       smp_processor_id());
556			} else {
557				vectors_in_migration[irq]=0;
558				generic_handle_irq(irq);
559			}
560			set_irq_regs(old_regs);
561
562			/*
563			 * Disable interrupts and send EOI
564			 */
565			local_irq_disable();
566			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
567		}
568		ia64_eoi();
569		vector = ia64_get_ivr();
570	}
571	irq_exit();
572}
573#endif
574
575
576#ifdef CONFIG_SMP
577
578static irqreturn_t dummy_handler (int irq, void *dev_id)
579{
580	BUG();
581	return IRQ_NONE;
582}
583
584/*
585 * KVM uses this interrupt to force a cpu out of guest mode
586 */
587
588#endif
589
590void
591register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
592		    const char *name)
593{
594	unsigned int irq;
595
596	irq = vec;
597	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
598	irq_set_status_flags(irq, IRQ_PER_CPU);
599	irq_set_chip(irq, &irq_type_ia64_lsapic);
600	if (handler)
601		if (request_irq(irq, handler, flags, name, NULL))
602			pr_err("Failed to request irq %u (%s)\n", irq, name);
603	irq_set_handler(irq, handle_percpu_irq);
604}
605
606void __init
607ia64_native_register_ipi(void)
608{
609#ifdef CONFIG_SMP
610	register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
611	register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
612	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
613			    "tlb_flush");
614#endif
615}
616
617void __init
618init_IRQ (void)
619{
620	acpi_boot_init();
621	ia64_register_ipi();
622	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
623#ifdef CONFIG_SMP
624	if (vector_domain_type != VECTOR_DOMAIN_NONE) {
625		register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
626				    smp_irq_move_cleanup_interrupt, 0,
627				    "irq_move");
628	}
629#endif
630#ifdef CONFIG_PERFMON
631	pfm_init_percpu();
632#endif
633}
634
635void
636ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
637{
638	void __iomem *ipi_addr;
639	unsigned long ipi_data;
640	unsigned long phys_cpu_id;
641
642	phys_cpu_id = cpu_physical_id(cpu);
643
644	/*
645	 * cpu number is in 8bit ID and 8bit EID
646	 */
647
648	ipi_data = (delivery_mode << 8) | (vector & 0xff);
649	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
650
651	writeq(ipi_data, ipi_addr);
652}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/ia64/kernel/irq_ia64.c
  4 *
  5 * Copyright (C) 1998-2001 Hewlett-Packard Co
  6 *	Stephane Eranian <eranian@hpl.hp.com>
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *  6/10/99: Updated to bring in sync with x86 version to facilitate
 10 *	     support for SMP and different interrupt controllers.
 11 *
 12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
 13 *                      PCI to vector allocation routine.
 14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
 15 *						Added CPU Hotplug handling for IPF.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/pgtable.h>
 20
 21#include <linux/jiffies.h>
 22#include <linux/errno.h>
 23#include <linux/init.h>
 24#include <linux/interrupt.h>
 25#include <linux/ioport.h>
 26#include <linux/kernel_stat.h>
 27#include <linux/ptrace.h>
 28#include <linux/signal.h>
 29#include <linux/smp.h>
 30#include <linux/threads.h>
 31#include <linux/bitops.h>
 32#include <linux/irq.h>
 33#include <linux/ratelimit.h>
 34#include <linux/acpi.h>
 35#include <linux/sched.h>
 36
 37#include <asm/delay.h>
 38#include <asm/intrinsics.h>
 39#include <asm/io.h>
 40#include <asm/hw_irq.h>
 41#include <asm/tlbflush.h>
 42
 
 
 
 
 43#define IRQ_DEBUG	0
 44
 45#define IRQ_VECTOR_UNASSIGNED	(0)
 46
 47#define IRQ_UNUSED		(0)
 48#define IRQ_USED		(1)
 49#define IRQ_RSVD		(2)
 50
 51int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
 52int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
 53
 54/* default base addr of IPI table */
 55void __iomem *ipi_base_addr = ((void __iomem *)
 56			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
 57
 58static cpumask_t vector_allocation_domain(int cpu);
 59
 60/*
 61 * Legacy IRQ to IA-64 vector translation table.
 62 */
 63__u8 isa_irq_to_vector_map[16] = {
 64	/* 8259 IRQ translation, first 16 entries */
 65	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 66	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
 67};
 68EXPORT_SYMBOL(isa_irq_to_vector_map);
 69
 70DEFINE_SPINLOCK(vector_lock);
 71
 72struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 73	[0 ... NR_IRQS - 1] = {
 74		.vector = IRQ_VECTOR_UNASSIGNED,
 75		.domain = CPU_MASK_NONE
 76	}
 77};
 78
 79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
 80	[0 ... IA64_NUM_VECTORS - 1] = -1
 81};
 82
 83static cpumask_t vector_table[IA64_NUM_VECTORS] = {
 84	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
 85};
 86
 87static int irq_status[NR_IRQS] = {
 88	[0 ... NR_IRQS -1] = IRQ_UNUSED
 89};
 90
 91static inline int find_unassigned_irq(void)
 92{
 93	int irq;
 94
 95	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
 96		if (irq_status[irq] == IRQ_UNUSED)
 97			return irq;
 98	return -ENOSPC;
 99}
100
101static inline int find_unassigned_vector(cpumask_t domain)
102{
103	cpumask_t mask;
104	int pos, vector;
105
106	cpumask_and(&mask, &domain, cpu_online_mask);
107	if (cpumask_empty(&mask))
108		return -EINVAL;
109
110	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
111		vector = IA64_FIRST_DEVICE_VECTOR + pos;
112		cpumask_and(&mask, &domain, &vector_table[vector]);
113		if (!cpumask_empty(&mask))
114			continue;
115		return vector;
116	}
117	return -ENOSPC;
118}
119
120static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
121{
122	cpumask_t mask;
123	int cpu;
124	struct irq_cfg *cfg = &irq_cfg[irq];
125
126	BUG_ON((unsigned)irq >= NR_IRQS);
127	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
128
129	cpumask_and(&mask, &domain, cpu_online_mask);
130	if (cpumask_empty(&mask))
131		return -EINVAL;
132	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
133		return 0;
134	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
135		return -EBUSY;
136	for_each_cpu(cpu, &mask)
137		per_cpu(vector_irq, cpu)[vector] = irq;
138	cfg->vector = vector;
139	cfg->domain = domain;
140	irq_status[irq] = IRQ_USED;
141	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
142	return 0;
143}
144
145int bind_irq_vector(int irq, int vector, cpumask_t domain)
146{
147	unsigned long flags;
148	int ret;
149
150	spin_lock_irqsave(&vector_lock, flags);
151	ret = __bind_irq_vector(irq, vector, domain);
152	spin_unlock_irqrestore(&vector_lock, flags);
153	return ret;
154}
155
156static void __clear_irq_vector(int irq)
157{
158	int vector, cpu;
159	cpumask_t domain;
160	struct irq_cfg *cfg = &irq_cfg[irq];
161
162	BUG_ON((unsigned)irq >= NR_IRQS);
163	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
164	vector = cfg->vector;
165	domain = cfg->domain;
166	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
167		per_cpu(vector_irq, cpu)[vector] = -1;
168	cfg->vector = IRQ_VECTOR_UNASSIGNED;
169	cfg->domain = CPU_MASK_NONE;
170	irq_status[irq] = IRQ_UNUSED;
171	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
172}
173
174static void clear_irq_vector(int irq)
175{
176	unsigned long flags;
177
178	spin_lock_irqsave(&vector_lock, flags);
179	__clear_irq_vector(irq);
180	spin_unlock_irqrestore(&vector_lock, flags);
181}
182
183int
184ia64_native_assign_irq_vector (int irq)
185{
186	unsigned long flags;
187	int vector, cpu;
188	cpumask_t domain = CPU_MASK_NONE;
189
190	vector = -ENOSPC;
191
192	spin_lock_irqsave(&vector_lock, flags);
193	for_each_online_cpu(cpu) {
194		domain = vector_allocation_domain(cpu);
195		vector = find_unassigned_vector(domain);
196		if (vector >= 0)
197			break;
198	}
199	if (vector < 0)
200		goto out;
201	if (irq == AUTO_ASSIGN)
202		irq = vector;
203	BUG_ON(__bind_irq_vector(irq, vector, domain));
204 out:
205	spin_unlock_irqrestore(&vector_lock, flags);
206	return vector;
207}
208
209void
210ia64_native_free_irq_vector (int vector)
211{
212	if (vector < IA64_FIRST_DEVICE_VECTOR ||
213	    vector > IA64_LAST_DEVICE_VECTOR)
214		return;
215	clear_irq_vector(vector);
216}
217
218int
219reserve_irq_vector (int vector)
220{
221	if (vector < IA64_FIRST_DEVICE_VECTOR ||
222	    vector > IA64_LAST_DEVICE_VECTOR)
223		return -EINVAL;
224	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
225}
226
227/*
228 * Initialize vector_irq on a new cpu. This function must be called
229 * with vector_lock held.
230 */
231void __setup_vector_irq(int cpu)
232{
233	int irq, vector;
234
235	/* Clear vector_irq */
236	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
237		per_cpu(vector_irq, cpu)[vector] = -1;
238	/* Mark the inuse vectors */
239	for (irq = 0; irq < NR_IRQS; ++irq) {
240		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
241			continue;
242		vector = irq_to_vector(irq);
243		per_cpu(vector_irq, cpu)[vector] = irq;
244	}
245}
246
247#ifdef CONFIG_SMP
248
249static enum vector_domain_type {
250	VECTOR_DOMAIN_NONE,
251	VECTOR_DOMAIN_PERCPU
252} vector_domain_type = VECTOR_DOMAIN_NONE;
253
254static cpumask_t vector_allocation_domain(int cpu)
255{
256	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
257		return *cpumask_of(cpu);
258	return CPU_MASK_ALL;
259}
260
261static int __irq_prepare_move(int irq, int cpu)
262{
263	struct irq_cfg *cfg = &irq_cfg[irq];
264	int vector;
265	cpumask_t domain;
266
267	if (cfg->move_in_progress || cfg->move_cleanup_count)
268		return -EBUSY;
269	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
270		return -EINVAL;
271	if (cpumask_test_cpu(cpu, &cfg->domain))
272		return 0;
273	domain = vector_allocation_domain(cpu);
274	vector = find_unassigned_vector(domain);
275	if (vector < 0)
276		return -ENOSPC;
277	cfg->move_in_progress = 1;
278	cfg->old_domain = cfg->domain;
279	cfg->vector = IRQ_VECTOR_UNASSIGNED;
280	cfg->domain = CPU_MASK_NONE;
281	BUG_ON(__bind_irq_vector(irq, vector, domain));
282	return 0;
283}
284
285int irq_prepare_move(int irq, int cpu)
286{
287	unsigned long flags;
288	int ret;
289
290	spin_lock_irqsave(&vector_lock, flags);
291	ret = __irq_prepare_move(irq, cpu);
292	spin_unlock_irqrestore(&vector_lock, flags);
293	return ret;
294}
295
296void irq_complete_move(unsigned irq)
297{
298	struct irq_cfg *cfg = &irq_cfg[irq];
299	cpumask_t cleanup_mask;
300	int i;
301
302	if (likely(!cfg->move_in_progress))
303		return;
304
305	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
306		return;
307
308	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
309	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
310	for_each_cpu(i, &cleanup_mask)
311		ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
312	cfg->move_in_progress = 0;
313}
314
315static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
316{
317	int me = smp_processor_id();
318	ia64_vector vector;
319	unsigned long flags;
320
321	for (vector = IA64_FIRST_DEVICE_VECTOR;
322	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
323		int irq;
324		struct irq_desc *desc;
325		struct irq_cfg *cfg;
326		irq = __this_cpu_read(vector_irq[vector]);
327		if (irq < 0)
328			continue;
329
330		desc = irq_to_desc(irq);
331		cfg = irq_cfg + irq;
332		raw_spin_lock(&desc->lock);
333		if (!cfg->move_cleanup_count)
334			goto unlock;
335
336		if (!cpumask_test_cpu(me, &cfg->old_domain))
337			goto unlock;
338
339		spin_lock_irqsave(&vector_lock, flags);
340		__this_cpu_write(vector_irq[vector], -1);
341		cpumask_clear_cpu(me, &vector_table[vector]);
342		spin_unlock_irqrestore(&vector_lock, flags);
343		cfg->move_cleanup_count--;
344	unlock:
345		raw_spin_unlock(&desc->lock);
346	}
347	return IRQ_HANDLED;
348}
349
350static int __init parse_vector_domain(char *arg)
351{
352	if (!arg)
353		return -EINVAL;
354	if (!strcmp(arg, "percpu")) {
355		vector_domain_type = VECTOR_DOMAIN_PERCPU;
356		no_int_routing = 1;
357	}
358	return 0;
359}
360early_param("vector", parse_vector_domain);
361#else
362static cpumask_t vector_allocation_domain(int cpu)
363{
364	return CPU_MASK_ALL;
365}
366#endif
367
368
369void destroy_and_reserve_irq(unsigned int irq)
370{
371	unsigned long flags;
372
373	irq_init_desc(irq);
374	spin_lock_irqsave(&vector_lock, flags);
375	__clear_irq_vector(irq);
376	irq_status[irq] = IRQ_RSVD;
377	spin_unlock_irqrestore(&vector_lock, flags);
378}
379
380/*
381 * Dynamic irq allocate and deallocation for MSI
382 */
383int create_irq(void)
384{
385	unsigned long flags;
386	int irq, vector, cpu;
387	cpumask_t domain = CPU_MASK_NONE;
388
389	irq = vector = -ENOSPC;
390	spin_lock_irqsave(&vector_lock, flags);
391	for_each_online_cpu(cpu) {
392		domain = vector_allocation_domain(cpu);
393		vector = find_unassigned_vector(domain);
394		if (vector >= 0)
395			break;
396	}
397	if (vector < 0)
398		goto out;
399	irq = find_unassigned_irq();
400	if (irq < 0)
401		goto out;
402	BUG_ON(__bind_irq_vector(irq, vector, domain));
403 out:
404	spin_unlock_irqrestore(&vector_lock, flags);
405	if (irq >= 0)
406		irq_init_desc(irq);
407	return irq;
408}
409
410void destroy_irq(unsigned int irq)
411{
412	irq_init_desc(irq);
413	clear_irq_vector(irq);
414}
415
416#ifdef CONFIG_SMP
417#	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
418#	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
419#else
420#	define IS_RESCHEDULE(vec)	(0)
421#	define IS_LOCAL_TLB_FLUSH(vec)	(0)
422#endif
423/*
424 * That's where the IVT branches when we get an external
425 * interrupt. This branches to the correct hardware IRQ handler via
426 * function ptr.
427 */
428void
429ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
430{
431	struct pt_regs *old_regs = set_irq_regs(regs);
432	unsigned long saved_tpr;
433
434#if IRQ_DEBUG
435	{
436		unsigned long bsp, sp;
437
438		/*
439		 * Note: if the interrupt happened while executing in
440		 * the context switch routine (ia64_switch_to), we may
441		 * get a spurious stack overflow here.  This is
442		 * because the register and the memory stack are not
443		 * switched atomically.
444		 */
445		bsp = ia64_getreg(_IA64_REG_AR_BSP);
446		sp = ia64_getreg(_IA64_REG_SP);
447
448		if ((sp - bsp) < 1024) {
449			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
450
451			if (__ratelimit(&ratelimit)) {
452				printk("ia64_handle_irq: DANGER: less than "
453				       "1KB of free stack space!!\n"
454				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
455			}
456		}
457	}
458#endif /* IRQ_DEBUG */
459
460	/*
461	 * Always set TPR to limit maximum interrupt nesting depth to
462	 * 16 (without this, it would be ~240, which could easily lead
463	 * to kernel stack overflows).
464	 */
465	irq_enter();
466	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
467	ia64_srlz_d();
468	while (vector != IA64_SPURIOUS_INT_VECTOR) {
469		int irq = local_vector_to_irq(vector);
470
471		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
472			smp_local_flush_tlb();
473			kstat_incr_irq_this_cpu(irq);
474		} else if (unlikely(IS_RESCHEDULE(vector))) {
475			scheduler_ipi();
476			kstat_incr_irq_this_cpu(irq);
477		} else {
478			ia64_setreg(_IA64_REG_CR_TPR, vector);
479			ia64_srlz_d();
480
481			if (unlikely(irq < 0)) {
482				printk(KERN_ERR "%s: Unexpected interrupt "
483				       "vector %d on CPU %d is not mapped "
484				       "to any IRQ!\n", __func__, vector,
485				       smp_processor_id());
486			} else
487				generic_handle_irq(irq);
488
489			/*
490			 * Disable interrupts and send EOI:
491			 */
492			local_irq_disable();
493			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
494		}
495		ia64_eoi();
496		vector = ia64_get_ivr();
497	}
498	/*
499	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
500	 * handler needs to be able to wait for further keyboard interrupts, which can't
501	 * come through until ia64_eoi() has been done.
502	 */
503	irq_exit();
504	set_irq_regs(old_regs);
505}
506
507#ifdef CONFIG_HOTPLUG_CPU
508/*
509 * This function emulates a interrupt processing when a cpu is about to be
510 * brought down.
511 */
512void ia64_process_pending_intr(void)
513{
514	ia64_vector vector;
515	unsigned long saved_tpr;
516	extern unsigned int vectors_in_migration[NR_IRQS];
517
518	vector = ia64_get_ivr();
519
520	irq_enter();
521	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
522	ia64_srlz_d();
523
524	 /*
525	  * Perform normal interrupt style processing
526	  */
527	while (vector != IA64_SPURIOUS_INT_VECTOR) {
528		int irq = local_vector_to_irq(vector);
529
530		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
531			smp_local_flush_tlb();
532			kstat_incr_irq_this_cpu(irq);
533		} else if (unlikely(IS_RESCHEDULE(vector))) {
534			kstat_incr_irq_this_cpu(irq);
535		} else {
536			struct pt_regs *old_regs = set_irq_regs(NULL);
537
538			ia64_setreg(_IA64_REG_CR_TPR, vector);
539			ia64_srlz_d();
540
541			/*
542			 * Now try calling normal ia64_handle_irq as it would have got called
543			 * from a real intr handler. Try passing null for pt_regs, hopefully
544			 * it will work. I hope it works!.
545			 * Probably could shared code.
546			 */
547			if (unlikely(irq < 0)) {
548				printk(KERN_ERR "%s: Unexpected interrupt "
549				       "vector %d on CPU %d not being mapped "
550				       "to any IRQ!!\n", __func__, vector,
551				       smp_processor_id());
552			} else {
553				vectors_in_migration[irq]=0;
554				generic_handle_irq(irq);
555			}
556			set_irq_regs(old_regs);
557
558			/*
559			 * Disable interrupts and send EOI
560			 */
561			local_irq_disable();
562			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
563		}
564		ia64_eoi();
565		vector = ia64_get_ivr();
566	}
567	irq_exit();
568}
569#endif
570
571
572#ifdef CONFIG_SMP
573
574static irqreturn_t dummy_handler (int irq, void *dev_id)
575{
576	BUG();
577	return IRQ_NONE;
578}
579
580/*
581 * KVM uses this interrupt to force a cpu out of guest mode
582 */
583
584#endif
585
586void
587register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
588		    const char *name)
589{
590	unsigned int irq;
591
592	irq = vec;
593	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
594	irq_set_status_flags(irq, IRQ_PER_CPU);
595	irq_set_chip(irq, &irq_type_ia64_lsapic);
596	if (handler)
597		if (request_irq(irq, handler, flags, name, NULL))
598			pr_err("Failed to request irq %u (%s)\n", irq, name);
599	irq_set_handler(irq, handle_percpu_irq);
600}
601
602void __init
603ia64_native_register_ipi(void)
604{
605#ifdef CONFIG_SMP
606	register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
607	register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
608	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
609			    "tlb_flush");
610#endif
611}
612
613void __init
614init_IRQ (void)
615{
616	acpi_boot_init();
617	ia64_register_ipi();
618	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
619#ifdef CONFIG_SMP
620	if (vector_domain_type != VECTOR_DOMAIN_NONE) {
621		register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
622				    smp_irq_move_cleanup_interrupt, 0,
623				    "irq_move");
624	}
 
 
 
625#endif
626}
627
628void
629ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
630{
631	void __iomem *ipi_addr;
632	unsigned long ipi_data;
633	unsigned long phys_cpu_id;
634
635	phys_cpu_id = cpu_physical_id(cpu);
636
637	/*
638	 * cpu number is in 8bit ID and 8bit EID
639	 */
640
641	ipi_data = (delivery_mode << 8) | (vector & 0xff);
642	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
643
644	writeq(ipi_data, ipi_addr);
645}