Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * linux/arch/ia64/kernel/irq_ia64.c
  4 *
  5 * Copyright (C) 1998-2001 Hewlett-Packard Co
  6 *	Stephane Eranian <eranian@hpl.hp.com>
  7 *	David Mosberger-Tang <davidm@hpl.hp.com>
  8 *
  9 *  6/10/99: Updated to bring in sync with x86 version to facilitate
 10 *	     support for SMP and different interrupt controllers.
 11 *
 12 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
 13 *                      PCI to vector allocation routine.
 14 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
 15 *						Added CPU Hotplug handling for IPF.
 16 */
 17
 18#include <linux/module.h>
 19#include <linux/pgtable.h>
 20
 21#include <linux/jiffies.h>
 22#include <linux/errno.h>
 23#include <linux/init.h>
 24#include <linux/interrupt.h>
 25#include <linux/ioport.h>
 26#include <linux/kernel_stat.h>
 27#include <linux/ptrace.h>
 28#include <linux/signal.h>
 29#include <linux/smp.h>
 30#include <linux/threads.h>
 31#include <linux/bitops.h>
 32#include <linux/irq.h>
 33#include <linux/ratelimit.h>
 34#include <linux/acpi.h>
 35#include <linux/sched.h>
 36
 37#include <asm/delay.h>
 38#include <asm/intrinsics.h>
 39#include <asm/io.h>
 40#include <asm/hw_irq.h>
 
 
 41#include <asm/tlbflush.h>
 42
 
 
 
 
 43#define IRQ_DEBUG	0
 44
 45#define IRQ_VECTOR_UNASSIGNED	(0)
 46
 47#define IRQ_UNUSED		(0)
 48#define IRQ_USED		(1)
 49#define IRQ_RSVD		(2)
 50
 
 51int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
 52int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
 53
 54/* default base addr of IPI table */
 55void __iomem *ipi_base_addr = ((void __iomem *)
 56			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
 57
 58static cpumask_t vector_allocation_domain(int cpu);
 59
 60/*
 61 * Legacy IRQ to IA-64 vector translation table.
 62 */
 63__u8 isa_irq_to_vector_map[16] = {
 64	/* 8259 IRQ translation, first 16 entries */
 65	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 66	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
 67};
 68EXPORT_SYMBOL(isa_irq_to_vector_map);
 69
 70DEFINE_SPINLOCK(vector_lock);
 71
 72struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 73	[0 ... NR_IRQS - 1] = {
 74		.vector = IRQ_VECTOR_UNASSIGNED,
 75		.domain = CPU_MASK_NONE
 76	}
 77};
 78
 79DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
 80	[0 ... IA64_NUM_VECTORS - 1] = -1
 81};
 82
 83static cpumask_t vector_table[IA64_NUM_VECTORS] = {
 84	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
 85};
 86
 87static int irq_status[NR_IRQS] = {
 88	[0 ... NR_IRQS -1] = IRQ_UNUSED
 89};
 90
 
 
 
 
 
 
 
 
 91static inline int find_unassigned_irq(void)
 92{
 93	int irq;
 94
 95	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
 96		if (irq_status[irq] == IRQ_UNUSED)
 97			return irq;
 98	return -ENOSPC;
 99}
100
101static inline int find_unassigned_vector(cpumask_t domain)
102{
103	cpumask_t mask;
104	int pos, vector;
105
106	cpumask_and(&mask, &domain, cpu_online_mask);
107	if (cpumask_empty(&mask))
108		return -EINVAL;
109
110	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
111		vector = IA64_FIRST_DEVICE_VECTOR + pos;
112		cpumask_and(&mask, &domain, &vector_table[vector]);
113		if (!cpumask_empty(&mask))
114			continue;
115		return vector;
116	}
117	return -ENOSPC;
118}
119
120static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
121{
122	cpumask_t mask;
123	int cpu;
124	struct irq_cfg *cfg = &irq_cfg[irq];
125
126	BUG_ON((unsigned)irq >= NR_IRQS);
127	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
128
129	cpumask_and(&mask, &domain, cpu_online_mask);
130	if (cpumask_empty(&mask))
131		return -EINVAL;
132	if ((cfg->vector == vector) && cpumask_equal(&cfg->domain, &domain))
133		return 0;
134	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
135		return -EBUSY;
136	for_each_cpu(cpu, &mask)
137		per_cpu(vector_irq, cpu)[vector] = irq;
138	cfg->vector = vector;
139	cfg->domain = domain;
140	irq_status[irq] = IRQ_USED;
141	cpumask_or(&vector_table[vector], &vector_table[vector], &domain);
142	return 0;
143}
144
145int bind_irq_vector(int irq, int vector, cpumask_t domain)
146{
147	unsigned long flags;
148	int ret;
149
150	spin_lock_irqsave(&vector_lock, flags);
151	ret = __bind_irq_vector(irq, vector, domain);
152	spin_unlock_irqrestore(&vector_lock, flags);
153	return ret;
154}
155
156static void __clear_irq_vector(int irq)
157{
158	int vector, cpu;
 
159	cpumask_t domain;
160	struct irq_cfg *cfg = &irq_cfg[irq];
161
162	BUG_ON((unsigned)irq >= NR_IRQS);
163	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
164	vector = cfg->vector;
165	domain = cfg->domain;
166	for_each_cpu_and(cpu, &cfg->domain, cpu_online_mask)
 
167		per_cpu(vector_irq, cpu)[vector] = -1;
168	cfg->vector = IRQ_VECTOR_UNASSIGNED;
169	cfg->domain = CPU_MASK_NONE;
170	irq_status[irq] = IRQ_UNUSED;
171	cpumask_andnot(&vector_table[vector], &vector_table[vector], &domain);
172}
173
174static void clear_irq_vector(int irq)
175{
176	unsigned long flags;
177
178	spin_lock_irqsave(&vector_lock, flags);
179	__clear_irq_vector(irq);
180	spin_unlock_irqrestore(&vector_lock, flags);
181}
182
183int
184ia64_native_assign_irq_vector (int irq)
185{
186	unsigned long flags;
187	int vector, cpu;
188	cpumask_t domain = CPU_MASK_NONE;
189
190	vector = -ENOSPC;
191
192	spin_lock_irqsave(&vector_lock, flags);
193	for_each_online_cpu(cpu) {
194		domain = vector_allocation_domain(cpu);
195		vector = find_unassigned_vector(domain);
196		if (vector >= 0)
197			break;
198	}
199	if (vector < 0)
200		goto out;
201	if (irq == AUTO_ASSIGN)
202		irq = vector;
203	BUG_ON(__bind_irq_vector(irq, vector, domain));
204 out:
205	spin_unlock_irqrestore(&vector_lock, flags);
206	return vector;
207}
208
209void
210ia64_native_free_irq_vector (int vector)
211{
212	if (vector < IA64_FIRST_DEVICE_VECTOR ||
213	    vector > IA64_LAST_DEVICE_VECTOR)
214		return;
215	clear_irq_vector(vector);
216}
217
218int
219reserve_irq_vector (int vector)
220{
221	if (vector < IA64_FIRST_DEVICE_VECTOR ||
222	    vector > IA64_LAST_DEVICE_VECTOR)
223		return -EINVAL;
224	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
225}
226
227/*
228 * Initialize vector_irq on a new cpu. This function must be called
229 * with vector_lock held.
230 */
231void __setup_vector_irq(int cpu)
232{
233	int irq, vector;
234
235	/* Clear vector_irq */
236	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
237		per_cpu(vector_irq, cpu)[vector] = -1;
238	/* Mark the inuse vectors */
239	for (irq = 0; irq < NR_IRQS; ++irq) {
240		if (!cpumask_test_cpu(cpu, &irq_cfg[irq].domain))
241			continue;
242		vector = irq_to_vector(irq);
243		per_cpu(vector_irq, cpu)[vector] = irq;
244	}
245}
246
247#ifdef CONFIG_SMP
248
249static enum vector_domain_type {
250	VECTOR_DOMAIN_NONE,
251	VECTOR_DOMAIN_PERCPU
252} vector_domain_type = VECTOR_DOMAIN_NONE;
253
254static cpumask_t vector_allocation_domain(int cpu)
255{
256	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
257		return *cpumask_of(cpu);
258	return CPU_MASK_ALL;
259}
260
261static int __irq_prepare_move(int irq, int cpu)
262{
263	struct irq_cfg *cfg = &irq_cfg[irq];
264	int vector;
265	cpumask_t domain;
266
267	if (cfg->move_in_progress || cfg->move_cleanup_count)
268		return -EBUSY;
269	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
270		return -EINVAL;
271	if (cpumask_test_cpu(cpu, &cfg->domain))
272		return 0;
273	domain = vector_allocation_domain(cpu);
274	vector = find_unassigned_vector(domain);
275	if (vector < 0)
276		return -ENOSPC;
277	cfg->move_in_progress = 1;
278	cfg->old_domain = cfg->domain;
279	cfg->vector = IRQ_VECTOR_UNASSIGNED;
280	cfg->domain = CPU_MASK_NONE;
281	BUG_ON(__bind_irq_vector(irq, vector, domain));
282	return 0;
283}
284
285int irq_prepare_move(int irq, int cpu)
286{
287	unsigned long flags;
288	int ret;
289
290	spin_lock_irqsave(&vector_lock, flags);
291	ret = __irq_prepare_move(irq, cpu);
292	spin_unlock_irqrestore(&vector_lock, flags);
293	return ret;
294}
295
296void irq_complete_move(unsigned irq)
297{
298	struct irq_cfg *cfg = &irq_cfg[irq];
299	cpumask_t cleanup_mask;
300	int i;
301
302	if (likely(!cfg->move_in_progress))
303		return;
304
305	if (unlikely(cpumask_test_cpu(smp_processor_id(), &cfg->old_domain)))
306		return;
307
308	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
309	cfg->move_cleanup_count = cpumask_weight(&cleanup_mask);
310	for_each_cpu(i, &cleanup_mask)
311		ia64_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
312	cfg->move_in_progress = 0;
313}
314
315static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
316{
317	int me = smp_processor_id();
318	ia64_vector vector;
319	unsigned long flags;
320
321	for (vector = IA64_FIRST_DEVICE_VECTOR;
322	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
323		int irq;
324		struct irq_desc *desc;
325		struct irq_cfg *cfg;
326		irq = __this_cpu_read(vector_irq[vector]);
327		if (irq < 0)
328			continue;
329
330		desc = irq_to_desc(irq);
331		cfg = irq_cfg + irq;
332		raw_spin_lock(&desc->lock);
333		if (!cfg->move_cleanup_count)
334			goto unlock;
335
336		if (!cpumask_test_cpu(me, &cfg->old_domain))
337			goto unlock;
338
339		spin_lock_irqsave(&vector_lock, flags);
340		__this_cpu_write(vector_irq[vector], -1);
341		cpumask_clear_cpu(me, &vector_table[vector]);
342		spin_unlock_irqrestore(&vector_lock, flags);
343		cfg->move_cleanup_count--;
344	unlock:
345		raw_spin_unlock(&desc->lock);
346	}
347	return IRQ_HANDLED;
348}
349
 
 
 
 
 
350static int __init parse_vector_domain(char *arg)
351{
352	if (!arg)
353		return -EINVAL;
354	if (!strcmp(arg, "percpu")) {
355		vector_domain_type = VECTOR_DOMAIN_PERCPU;
356		no_int_routing = 1;
357	}
358	return 0;
359}
360early_param("vector", parse_vector_domain);
361#else
362static cpumask_t vector_allocation_domain(int cpu)
363{
364	return CPU_MASK_ALL;
365}
366#endif
367
368
369void destroy_and_reserve_irq(unsigned int irq)
370{
371	unsigned long flags;
372
373	irq_init_desc(irq);
 
374	spin_lock_irqsave(&vector_lock, flags);
375	__clear_irq_vector(irq);
376	irq_status[irq] = IRQ_RSVD;
377	spin_unlock_irqrestore(&vector_lock, flags);
378}
379
380/*
381 * Dynamic irq allocate and deallocation for MSI
382 */
383int create_irq(void)
384{
385	unsigned long flags;
386	int irq, vector, cpu;
387	cpumask_t domain = CPU_MASK_NONE;
388
389	irq = vector = -ENOSPC;
390	spin_lock_irqsave(&vector_lock, flags);
391	for_each_online_cpu(cpu) {
392		domain = vector_allocation_domain(cpu);
393		vector = find_unassigned_vector(domain);
394		if (vector >= 0)
395			break;
396	}
397	if (vector < 0)
398		goto out;
399	irq = find_unassigned_irq();
400	if (irq < 0)
401		goto out;
402	BUG_ON(__bind_irq_vector(irq, vector, domain));
403 out:
404	spin_unlock_irqrestore(&vector_lock, flags);
405	if (irq >= 0)
406		irq_init_desc(irq);
407	return irq;
408}
409
410void destroy_irq(unsigned int irq)
411{
412	irq_init_desc(irq);
413	clear_irq_vector(irq);
414}
415
416#ifdef CONFIG_SMP
417#	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
418#	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
419#else
420#	define IS_RESCHEDULE(vec)	(0)
421#	define IS_LOCAL_TLB_FLUSH(vec)	(0)
422#endif
423/*
424 * That's where the IVT branches when we get an external
425 * interrupt. This branches to the correct hardware IRQ handler via
426 * function ptr.
427 */
428void
429ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
430{
431	struct pt_regs *old_regs = set_irq_regs(regs);
432	unsigned long saved_tpr;
433
434#if IRQ_DEBUG
435	{
436		unsigned long bsp, sp;
437
438		/*
439		 * Note: if the interrupt happened while executing in
440		 * the context switch routine (ia64_switch_to), we may
441		 * get a spurious stack overflow here.  This is
442		 * because the register and the memory stack are not
443		 * switched atomically.
444		 */
445		bsp = ia64_getreg(_IA64_REG_AR_BSP);
446		sp = ia64_getreg(_IA64_REG_SP);
447
448		if ((sp - bsp) < 1024) {
449			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
450
451			if (__ratelimit(&ratelimit)) {
452				printk("ia64_handle_irq: DANGER: less than "
453				       "1KB of free stack space!!\n"
454				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
455			}
456		}
457	}
458#endif /* IRQ_DEBUG */
459
460	/*
461	 * Always set TPR to limit maximum interrupt nesting depth to
462	 * 16 (without this, it would be ~240, which could easily lead
463	 * to kernel stack overflows).
464	 */
465	irq_enter();
466	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
467	ia64_srlz_d();
468	while (vector != IA64_SPURIOUS_INT_VECTOR) {
469		int irq = local_vector_to_irq(vector);
470
471		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
472			smp_local_flush_tlb();
473			kstat_incr_irq_this_cpu(irq);
474		} else if (unlikely(IS_RESCHEDULE(vector))) {
475			scheduler_ipi();
476			kstat_incr_irq_this_cpu(irq);
477		} else {
478			ia64_setreg(_IA64_REG_CR_TPR, vector);
479			ia64_srlz_d();
480
481			if (unlikely(irq < 0)) {
482				printk(KERN_ERR "%s: Unexpected interrupt "
483				       "vector %d on CPU %d is not mapped "
484				       "to any IRQ!\n", __func__, vector,
485				       smp_processor_id());
486			} else
487				generic_handle_irq(irq);
488
489			/*
490			 * Disable interrupts and send EOI:
491			 */
492			local_irq_disable();
493			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
494		}
495		ia64_eoi();
496		vector = ia64_get_ivr();
497	}
498	/*
499	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
500	 * handler needs to be able to wait for further keyboard interrupts, which can't
501	 * come through until ia64_eoi() has been done.
502	 */
503	irq_exit();
504	set_irq_regs(old_regs);
505}
506
507#ifdef CONFIG_HOTPLUG_CPU
508/*
509 * This function emulates a interrupt processing when a cpu is about to be
510 * brought down.
511 */
512void ia64_process_pending_intr(void)
513{
514	ia64_vector vector;
515	unsigned long saved_tpr;
516	extern unsigned int vectors_in_migration[NR_IRQS];
517
518	vector = ia64_get_ivr();
519
520	irq_enter();
521	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
522	ia64_srlz_d();
523
524	 /*
525	  * Perform normal interrupt style processing
526	  */
527	while (vector != IA64_SPURIOUS_INT_VECTOR) {
528		int irq = local_vector_to_irq(vector);
529
530		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
531			smp_local_flush_tlb();
532			kstat_incr_irq_this_cpu(irq);
533		} else if (unlikely(IS_RESCHEDULE(vector))) {
534			kstat_incr_irq_this_cpu(irq);
535		} else {
536			struct pt_regs *old_regs = set_irq_regs(NULL);
537
538			ia64_setreg(_IA64_REG_CR_TPR, vector);
539			ia64_srlz_d();
540
541			/*
542			 * Now try calling normal ia64_handle_irq as it would have got called
543			 * from a real intr handler. Try passing null for pt_regs, hopefully
544			 * it will work. I hope it works!.
545			 * Probably could shared code.
546			 */
547			if (unlikely(irq < 0)) {
548				printk(KERN_ERR "%s: Unexpected interrupt "
549				       "vector %d on CPU %d not being mapped "
550				       "to any IRQ!!\n", __func__, vector,
551				       smp_processor_id());
552			} else {
553				vectors_in_migration[irq]=0;
554				generic_handle_irq(irq);
555			}
556			set_irq_regs(old_regs);
557
558			/*
559			 * Disable interrupts and send EOI
560			 */
561			local_irq_disable();
562			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
563		}
564		ia64_eoi();
565		vector = ia64_get_ivr();
566	}
567	irq_exit();
568}
569#endif
570
571
572#ifdef CONFIG_SMP
573
574static irqreturn_t dummy_handler (int irq, void *dev_id)
575{
576	BUG();
577	return IRQ_NONE;
578}
579
 
 
 
 
 
580/*
581 * KVM uses this interrupt to force a cpu out of guest mode
582 */
 
 
 
 
 
 
 
 
 
583
584#endif
585
586void
587register_percpu_irq(ia64_vector vec, irq_handler_t handler, unsigned long flags,
588		    const char *name)
589{
590	unsigned int irq;
591
592	irq = vec;
593	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
594	irq_set_status_flags(irq, IRQ_PER_CPU);
595	irq_set_chip(irq, &irq_type_ia64_lsapic);
596	if (handler)
597		if (request_irq(irq, handler, flags, name, NULL))
598			pr_err("Failed to request irq %u (%s)\n", irq, name);
599	irq_set_handler(irq, handle_percpu_irq);
600}
601
602void __init
603ia64_native_register_ipi(void)
604{
605#ifdef CONFIG_SMP
606	register_percpu_irq(IA64_IPI_VECTOR, handle_IPI, 0, "IPI");
607	register_percpu_irq(IA64_IPI_RESCHEDULE, dummy_handler, 0, "resched");
608	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, dummy_handler, 0,
609			    "tlb_flush");
610#endif
611}
612
613void __init
614init_IRQ (void)
615{
 
616	acpi_boot_init();
 
617	ia64_register_ipi();
618	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL, 0, NULL);
619#ifdef CONFIG_SMP
620	if (vector_domain_type != VECTOR_DOMAIN_NONE) {
621		register_percpu_irq(IA64_IRQ_MOVE_VECTOR,
622				    smp_irq_move_cleanup_interrupt, 0,
623				    "irq_move");
624	}
 
 
625#endif
 
626}
627
628void
629ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
630{
631	void __iomem *ipi_addr;
632	unsigned long ipi_data;
633	unsigned long phys_cpu_id;
634
635	phys_cpu_id = cpu_physical_id(cpu);
636
637	/*
638	 * cpu number is in 8bit ID and 8bit EID
639	 */
640
641	ipi_data = (delivery_mode << 8) | (vector & 0xff);
642	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
643
644	writeq(ipi_data, ipi_addr);
645}
v3.15
 
  1/*
  2 * linux/arch/ia64/kernel/irq_ia64.c
  3 *
  4 * Copyright (C) 1998-2001 Hewlett-Packard Co
  5 *	Stephane Eranian <eranian@hpl.hp.com>
  6 *	David Mosberger-Tang <davidm@hpl.hp.com>
  7 *
  8 *  6/10/99: Updated to bring in sync with x86 version to facilitate
  9 *	     support for SMP and different interrupt controllers.
 10 *
 11 * 09/15/00 Goutham Rao <goutham.rao@intel.com> Implemented pci_irq_to_vector
 12 *                      PCI to vector allocation routine.
 13 * 04/14/2004 Ashok Raj <ashok.raj@intel.com>
 14 *						Added CPU Hotplug handling for IPF.
 15 */
 16
 17#include <linux/module.h>
 
 18
 19#include <linux/jiffies.h>
 20#include <linux/errno.h>
 21#include <linux/init.h>
 22#include <linux/interrupt.h>
 23#include <linux/ioport.h>
 24#include <linux/kernel_stat.h>
 25#include <linux/ptrace.h>
 26#include <linux/signal.h>
 27#include <linux/smp.h>
 28#include <linux/threads.h>
 29#include <linux/bitops.h>
 30#include <linux/irq.h>
 31#include <linux/ratelimit.h>
 32#include <linux/acpi.h>
 33#include <linux/sched.h>
 34
 35#include <asm/delay.h>
 36#include <asm/intrinsics.h>
 37#include <asm/io.h>
 38#include <asm/hw_irq.h>
 39#include <asm/machvec.h>
 40#include <asm/pgtable.h>
 41#include <asm/tlbflush.h>
 42
 43#ifdef CONFIG_PERFMON
 44# include <asm/perfmon.h>
 45#endif
 46
 47#define IRQ_DEBUG	0
 48
 49#define IRQ_VECTOR_UNASSIGNED	(0)
 50
 51#define IRQ_UNUSED		(0)
 52#define IRQ_USED		(1)
 53#define IRQ_RSVD		(2)
 54
 55/* These can be overridden in platform_irq_init */
 56int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
 57int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
 58
 59/* default base addr of IPI table */
 60void __iomem *ipi_base_addr = ((void __iomem *)
 61			       (__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
 62
 63static cpumask_t vector_allocation_domain(int cpu);
 64
 65/*
 66 * Legacy IRQ to IA-64 vector translation table.
 67 */
 68__u8 isa_irq_to_vector_map[16] = {
 69	/* 8259 IRQ translation, first 16 entries */
 70	0x2f, 0x20, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
 71	0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21
 72};
 73EXPORT_SYMBOL(isa_irq_to_vector_map);
 74
 75DEFINE_SPINLOCK(vector_lock);
 76
 77struct irq_cfg irq_cfg[NR_IRQS] __read_mostly = {
 78	[0 ... NR_IRQS - 1] = {
 79		.vector = IRQ_VECTOR_UNASSIGNED,
 80		.domain = CPU_MASK_NONE
 81	}
 82};
 83
 84DEFINE_PER_CPU(int[IA64_NUM_VECTORS], vector_irq) = {
 85	[0 ... IA64_NUM_VECTORS - 1] = -1
 86};
 87
 88static cpumask_t vector_table[IA64_NUM_VECTORS] = {
 89	[0 ... IA64_NUM_VECTORS - 1] = CPU_MASK_NONE
 90};
 91
 92static int irq_status[NR_IRQS] = {
 93	[0 ... NR_IRQS -1] = IRQ_UNUSED
 94};
 95
 96int check_irq_used(int irq)
 97{
 98	if (irq_status[irq] == IRQ_USED)
 99		return 1;
100
101	return -1;
102}
103
104static inline int find_unassigned_irq(void)
105{
106	int irq;
107
108	for (irq = IA64_FIRST_DEVICE_VECTOR; irq < NR_IRQS; irq++)
109		if (irq_status[irq] == IRQ_UNUSED)
110			return irq;
111	return -ENOSPC;
112}
113
114static inline int find_unassigned_vector(cpumask_t domain)
115{
116	cpumask_t mask;
117	int pos, vector;
118
119	cpumask_and(&mask, &domain, cpu_online_mask);
120	if (cpus_empty(mask))
121		return -EINVAL;
122
123	for (pos = 0; pos < IA64_NUM_DEVICE_VECTORS; pos++) {
124		vector = IA64_FIRST_DEVICE_VECTOR + pos;
125		cpus_and(mask, domain, vector_table[vector]);
126		if (!cpus_empty(mask))
127			continue;
128		return vector;
129	}
130	return -ENOSPC;
131}
132
133static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
134{
135	cpumask_t mask;
136	int cpu;
137	struct irq_cfg *cfg = &irq_cfg[irq];
138
139	BUG_ON((unsigned)irq >= NR_IRQS);
140	BUG_ON((unsigned)vector >= IA64_NUM_VECTORS);
141
142	cpumask_and(&mask, &domain, cpu_online_mask);
143	if (cpus_empty(mask))
144		return -EINVAL;
145	if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
146		return 0;
147	if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
148		return -EBUSY;
149	for_each_cpu_mask(cpu, mask)
150		per_cpu(vector_irq, cpu)[vector] = irq;
151	cfg->vector = vector;
152	cfg->domain = domain;
153	irq_status[irq] = IRQ_USED;
154	cpus_or(vector_table[vector], vector_table[vector], domain);
155	return 0;
156}
157
158int bind_irq_vector(int irq, int vector, cpumask_t domain)
159{
160	unsigned long flags;
161	int ret;
162
163	spin_lock_irqsave(&vector_lock, flags);
164	ret = __bind_irq_vector(irq, vector, domain);
165	spin_unlock_irqrestore(&vector_lock, flags);
166	return ret;
167}
168
169static void __clear_irq_vector(int irq)
170{
171	int vector, cpu;
172	cpumask_t mask;
173	cpumask_t domain;
174	struct irq_cfg *cfg = &irq_cfg[irq];
175
176	BUG_ON((unsigned)irq >= NR_IRQS);
177	BUG_ON(cfg->vector == IRQ_VECTOR_UNASSIGNED);
178	vector = cfg->vector;
179	domain = cfg->domain;
180	cpumask_and(&mask, &cfg->domain, cpu_online_mask);
181	for_each_cpu_mask(cpu, mask)
182		per_cpu(vector_irq, cpu)[vector] = -1;
183	cfg->vector = IRQ_VECTOR_UNASSIGNED;
184	cfg->domain = CPU_MASK_NONE;
185	irq_status[irq] = IRQ_UNUSED;
186	cpus_andnot(vector_table[vector], vector_table[vector], domain);
187}
188
189static void clear_irq_vector(int irq)
190{
191	unsigned long flags;
192
193	spin_lock_irqsave(&vector_lock, flags);
194	__clear_irq_vector(irq);
195	spin_unlock_irqrestore(&vector_lock, flags);
196}
197
198int
199ia64_native_assign_irq_vector (int irq)
200{
201	unsigned long flags;
202	int vector, cpu;
203	cpumask_t domain = CPU_MASK_NONE;
204
205	vector = -ENOSPC;
206
207	spin_lock_irqsave(&vector_lock, flags);
208	for_each_online_cpu(cpu) {
209		domain = vector_allocation_domain(cpu);
210		vector = find_unassigned_vector(domain);
211		if (vector >= 0)
212			break;
213	}
214	if (vector < 0)
215		goto out;
216	if (irq == AUTO_ASSIGN)
217		irq = vector;
218	BUG_ON(__bind_irq_vector(irq, vector, domain));
219 out:
220	spin_unlock_irqrestore(&vector_lock, flags);
221	return vector;
222}
223
224void
225ia64_native_free_irq_vector (int vector)
226{
227	if (vector < IA64_FIRST_DEVICE_VECTOR ||
228	    vector > IA64_LAST_DEVICE_VECTOR)
229		return;
230	clear_irq_vector(vector);
231}
232
233int
234reserve_irq_vector (int vector)
235{
236	if (vector < IA64_FIRST_DEVICE_VECTOR ||
237	    vector > IA64_LAST_DEVICE_VECTOR)
238		return -EINVAL;
239	return !!bind_irq_vector(vector, vector, CPU_MASK_ALL);
240}
241
242/*
243 * Initialize vector_irq on a new cpu. This function must be called
244 * with vector_lock held.
245 */
246void __setup_vector_irq(int cpu)
247{
248	int irq, vector;
249
250	/* Clear vector_irq */
251	for (vector = 0; vector < IA64_NUM_VECTORS; ++vector)
252		per_cpu(vector_irq, cpu)[vector] = -1;
253	/* Mark the inuse vectors */
254	for (irq = 0; irq < NR_IRQS; ++irq) {
255		if (!cpu_isset(cpu, irq_cfg[irq].domain))
256			continue;
257		vector = irq_to_vector(irq);
258		per_cpu(vector_irq, cpu)[vector] = irq;
259	}
260}
261
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263
264static enum vector_domain_type {
265	VECTOR_DOMAIN_NONE,
266	VECTOR_DOMAIN_PERCPU
267} vector_domain_type = VECTOR_DOMAIN_NONE;
268
269static cpumask_t vector_allocation_domain(int cpu)
270{
271	if (vector_domain_type == VECTOR_DOMAIN_PERCPU)
272		return cpumask_of_cpu(cpu);
273	return CPU_MASK_ALL;
274}
275
276static int __irq_prepare_move(int irq, int cpu)
277{
278	struct irq_cfg *cfg = &irq_cfg[irq];
279	int vector;
280	cpumask_t domain;
281
282	if (cfg->move_in_progress || cfg->move_cleanup_count)
283		return -EBUSY;
284	if (cfg->vector == IRQ_VECTOR_UNASSIGNED || !cpu_online(cpu))
285		return -EINVAL;
286	if (cpu_isset(cpu, cfg->domain))
287		return 0;
288	domain = vector_allocation_domain(cpu);
289	vector = find_unassigned_vector(domain);
290	if (vector < 0)
291		return -ENOSPC;
292	cfg->move_in_progress = 1;
293	cfg->old_domain = cfg->domain;
294	cfg->vector = IRQ_VECTOR_UNASSIGNED;
295	cfg->domain = CPU_MASK_NONE;
296	BUG_ON(__bind_irq_vector(irq, vector, domain));
297	return 0;
298}
299
300int irq_prepare_move(int irq, int cpu)
301{
302	unsigned long flags;
303	int ret;
304
305	spin_lock_irqsave(&vector_lock, flags);
306	ret = __irq_prepare_move(irq, cpu);
307	spin_unlock_irqrestore(&vector_lock, flags);
308	return ret;
309}
310
311void irq_complete_move(unsigned irq)
312{
313	struct irq_cfg *cfg = &irq_cfg[irq];
314	cpumask_t cleanup_mask;
315	int i;
316
317	if (likely(!cfg->move_in_progress))
318		return;
319
320	if (unlikely(cpu_isset(smp_processor_id(), cfg->old_domain)))
321		return;
322
323	cpumask_and(&cleanup_mask, &cfg->old_domain, cpu_online_mask);
324	cfg->move_cleanup_count = cpus_weight(cleanup_mask);
325	for_each_cpu_mask(i, cleanup_mask)
326		platform_send_ipi(i, IA64_IRQ_MOVE_VECTOR, IA64_IPI_DM_INT, 0);
327	cfg->move_in_progress = 0;
328}
329
330static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
331{
332	int me = smp_processor_id();
333	ia64_vector vector;
334	unsigned long flags;
335
336	for (vector = IA64_FIRST_DEVICE_VECTOR;
337	     vector < IA64_LAST_DEVICE_VECTOR; vector++) {
338		int irq;
339		struct irq_desc *desc;
340		struct irq_cfg *cfg;
341		irq = __get_cpu_var(vector_irq)[vector];
342		if (irq < 0)
343			continue;
344
345		desc = irq_to_desc(irq);
346		cfg = irq_cfg + irq;
347		raw_spin_lock(&desc->lock);
348		if (!cfg->move_cleanup_count)
349			goto unlock;
350
351		if (!cpu_isset(me, cfg->old_domain))
352			goto unlock;
353
354		spin_lock_irqsave(&vector_lock, flags);
355		__get_cpu_var(vector_irq)[vector] = -1;
356		cpu_clear(me, vector_table[vector]);
357		spin_unlock_irqrestore(&vector_lock, flags);
358		cfg->move_cleanup_count--;
359	unlock:
360		raw_spin_unlock(&desc->lock);
361	}
362	return IRQ_HANDLED;
363}
364
365static struct irqaction irq_move_irqaction = {
366	.handler =	smp_irq_move_cleanup_interrupt,
367	.name =		"irq_move"
368};
369
370static int __init parse_vector_domain(char *arg)
371{
372	if (!arg)
373		return -EINVAL;
374	if (!strcmp(arg, "percpu")) {
375		vector_domain_type = VECTOR_DOMAIN_PERCPU;
376		no_int_routing = 1;
377	}
378	return 0;
379}
380early_param("vector", parse_vector_domain);
381#else
382static cpumask_t vector_allocation_domain(int cpu)
383{
384	return CPU_MASK_ALL;
385}
386#endif
387
388
389void destroy_and_reserve_irq(unsigned int irq)
390{
391	unsigned long flags;
392
393	dynamic_irq_cleanup(irq);
394
395	spin_lock_irqsave(&vector_lock, flags);
396	__clear_irq_vector(irq);
397	irq_status[irq] = IRQ_RSVD;
398	spin_unlock_irqrestore(&vector_lock, flags);
399}
400
401/*
402 * Dynamic irq allocate and deallocation for MSI
403 */
404int create_irq(void)
405{
406	unsigned long flags;
407	int irq, vector, cpu;
408	cpumask_t domain = CPU_MASK_NONE;
409
410	irq = vector = -ENOSPC;
411	spin_lock_irqsave(&vector_lock, flags);
412	for_each_online_cpu(cpu) {
413		domain = vector_allocation_domain(cpu);
414		vector = find_unassigned_vector(domain);
415		if (vector >= 0)
416			break;
417	}
418	if (vector < 0)
419		goto out;
420	irq = find_unassigned_irq();
421	if (irq < 0)
422		goto out;
423	BUG_ON(__bind_irq_vector(irq, vector, domain));
424 out:
425	spin_unlock_irqrestore(&vector_lock, flags);
426	if (irq >= 0)
427		dynamic_irq_init(irq);
428	return irq;
429}
430
431void destroy_irq(unsigned int irq)
432{
433	dynamic_irq_cleanup(irq);
434	clear_irq_vector(irq);
435}
436
437#ifdef CONFIG_SMP
438#	define IS_RESCHEDULE(vec)	(vec == IA64_IPI_RESCHEDULE)
439#	define IS_LOCAL_TLB_FLUSH(vec)	(vec == IA64_IPI_LOCAL_TLB_FLUSH)
440#else
441#	define IS_RESCHEDULE(vec)	(0)
442#	define IS_LOCAL_TLB_FLUSH(vec)	(0)
443#endif
444/*
445 * That's where the IVT branches when we get an external
446 * interrupt. This branches to the correct hardware IRQ handler via
447 * function ptr.
448 */
449void
450ia64_handle_irq (ia64_vector vector, struct pt_regs *regs)
451{
452	struct pt_regs *old_regs = set_irq_regs(regs);
453	unsigned long saved_tpr;
454
455#if IRQ_DEBUG
456	{
457		unsigned long bsp, sp;
458
459		/*
460		 * Note: if the interrupt happened while executing in
461		 * the context switch routine (ia64_switch_to), we may
462		 * get a spurious stack overflow here.  This is
463		 * because the register and the memory stack are not
464		 * switched atomically.
465		 */
466		bsp = ia64_getreg(_IA64_REG_AR_BSP);
467		sp = ia64_getreg(_IA64_REG_SP);
468
469		if ((sp - bsp) < 1024) {
470			static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
471
472			if (__ratelimit(&ratelimit)) {
473				printk("ia64_handle_irq: DANGER: less than "
474				       "1KB of free stack space!!\n"
475				       "(bsp=0x%lx, sp=%lx)\n", bsp, sp);
476			}
477		}
478	}
479#endif /* IRQ_DEBUG */
480
481	/*
482	 * Always set TPR to limit maximum interrupt nesting depth to
483	 * 16 (without this, it would be ~240, which could easily lead
484	 * to kernel stack overflows).
485	 */
486	irq_enter();
487	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
488	ia64_srlz_d();
489	while (vector != IA64_SPURIOUS_INT_VECTOR) {
490		int irq = local_vector_to_irq(vector);
491
492		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
493			smp_local_flush_tlb();
494			kstat_incr_irq_this_cpu(irq);
495		} else if (unlikely(IS_RESCHEDULE(vector))) {
496			scheduler_ipi();
497			kstat_incr_irq_this_cpu(irq);
498		} else {
499			ia64_setreg(_IA64_REG_CR_TPR, vector);
500			ia64_srlz_d();
501
502			if (unlikely(irq < 0)) {
503				printk(KERN_ERR "%s: Unexpected interrupt "
504				       "vector %d on CPU %d is not mapped "
505				       "to any IRQ!\n", __func__, vector,
506				       smp_processor_id());
507			} else
508				generic_handle_irq(irq);
509
510			/*
511			 * Disable interrupts and send EOI:
512			 */
513			local_irq_disable();
514			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
515		}
516		ia64_eoi();
517		vector = ia64_get_ivr();
518	}
519	/*
520	 * This must be done *after* the ia64_eoi().  For example, the keyboard softirq
521	 * handler needs to be able to wait for further keyboard interrupts, which can't
522	 * come through until ia64_eoi() has been done.
523	 */
524	irq_exit();
525	set_irq_regs(old_regs);
526}
527
528#ifdef CONFIG_HOTPLUG_CPU
529/*
530 * This function emulates a interrupt processing when a cpu is about to be
531 * brought down.
532 */
533void ia64_process_pending_intr(void)
534{
535	ia64_vector vector;
536	unsigned long saved_tpr;
537	extern unsigned int vectors_in_migration[NR_IRQS];
538
539	vector = ia64_get_ivr();
540
541	irq_enter();
542	saved_tpr = ia64_getreg(_IA64_REG_CR_TPR);
543	ia64_srlz_d();
544
545	 /*
546	  * Perform normal interrupt style processing
547	  */
548	while (vector != IA64_SPURIOUS_INT_VECTOR) {
549		int irq = local_vector_to_irq(vector);
550
551		if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) {
552			smp_local_flush_tlb();
553			kstat_incr_irq_this_cpu(irq);
554		} else if (unlikely(IS_RESCHEDULE(vector))) {
555			kstat_incr_irq_this_cpu(irq);
556		} else {
557			struct pt_regs *old_regs = set_irq_regs(NULL);
558
559			ia64_setreg(_IA64_REG_CR_TPR, vector);
560			ia64_srlz_d();
561
562			/*
563			 * Now try calling normal ia64_handle_irq as it would have got called
564			 * from a real intr handler. Try passing null for pt_regs, hopefully
565			 * it will work. I hope it works!.
566			 * Probably could shared code.
567			 */
568			if (unlikely(irq < 0)) {
569				printk(KERN_ERR "%s: Unexpected interrupt "
570				       "vector %d on CPU %d not being mapped "
571				       "to any IRQ!!\n", __func__, vector,
572				       smp_processor_id());
573			} else {
574				vectors_in_migration[irq]=0;
575				generic_handle_irq(irq);
576			}
577			set_irq_regs(old_regs);
578
579			/*
580			 * Disable interrupts and send EOI
581			 */
582			local_irq_disable();
583			ia64_setreg(_IA64_REG_CR_TPR, saved_tpr);
584		}
585		ia64_eoi();
586		vector = ia64_get_ivr();
587	}
588	irq_exit();
589}
590#endif
591
592
593#ifdef CONFIG_SMP
594
595static irqreturn_t dummy_handler (int irq, void *dev_id)
596{
597	BUG();
 
598}
599
600static struct irqaction ipi_irqaction = {
601	.handler =	handle_IPI,
602	.name =		"IPI"
603};
604
605/*
606 * KVM uses this interrupt to force a cpu out of guest mode
607 */
608static struct irqaction resched_irqaction = {
609	.handler =	dummy_handler,
610	.name =		"resched"
611};
612
613static struct irqaction tlb_irqaction = {
614	.handler =	dummy_handler,
615	.name =		"tlb_flush"
616};
617
618#endif
619
620void
621ia64_native_register_percpu_irq (ia64_vector vec, struct irqaction *action)
 
622{
623	unsigned int irq;
624
625	irq = vec;
626	BUG_ON(bind_irq_vector(irq, vec, CPU_MASK_ALL));
627	irq_set_status_flags(irq, IRQ_PER_CPU);
628	irq_set_chip(irq, &irq_type_ia64_lsapic);
629	if (action)
630		setup_irq(irq, action);
 
631	irq_set_handler(irq, handle_percpu_irq);
632}
633
634void __init
635ia64_native_register_ipi(void)
636{
637#ifdef CONFIG_SMP
638	register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
639	register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
640	register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &tlb_irqaction);
 
641#endif
642}
643
644void __init
645init_IRQ (void)
646{
647#ifdef CONFIG_ACPI
648	acpi_boot_init();
649#endif
650	ia64_register_ipi();
651	register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
652#ifdef CONFIG_SMP
653#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
654	if (vector_domain_type != VECTOR_DOMAIN_NONE)
655		register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
656#endif
657#endif
658#ifdef CONFIG_PERFMON
659	pfm_init_percpu();
660#endif
661	platform_irq_init();
662}
663
664void
665ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect)
666{
667	void __iomem *ipi_addr;
668	unsigned long ipi_data;
669	unsigned long phys_cpu_id;
670
671	phys_cpu_id = cpu_physical_id(cpu);
672
673	/*
674	 * cpu number is in 8bit ID and 8bit EID
675	 */
676
677	ipi_data = (delivery_mode << 8) | (vector & 0xff);
678	ipi_addr = ipi_base_addr + ((phys_cpu_id << 4) | ((redirect & 1) << 3));
679
680	writeq(ipi_data, ipi_addr);
681}