Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  4 *
  5 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  6 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  7 * Copyright (C) 1999 - 2001 Kanoj Sarcar
  8 */
  9
 10#undef DEBUG
 11
 12#include <linux/init.h>
 13#include <linux/irq.h>
 14#include <linux/errno.h>
 15#include <linux/signal.h>
 16#include <linux/sched.h>
 17#include <linux/types.h>
 18#include <linux/interrupt.h>
 
 
 19#include <linux/ioport.h>
 20#include <linux/timex.h>
 21#include <linux/smp.h>
 22#include <linux/random.h>
 23#include <linux/kernel.h>
 24#include <linux/kernel_stat.h>
 25#include <linux/delay.h>
 26#include <linux/bitops.h>
 
 27
 28#include <asm/bootinfo.h>
 29#include <asm/io.h>
 30#include <asm/mipsregs.h>
 31
 32#include <asm/processor.h>
 33#include <asm/sn/addrs.h>
 34#include <asm/sn/agent.h>
 35#include <asm/sn/arch.h>
 36#include <asm/sn/hub.h>
 37#include <asm/sn/intr.h>
 
 38
 39/*
 40 * Linux has a controller-independent x86 interrupt architecture.
 41 * every controller has a 'controller-template', that is used
 42 * by the main code to do the right thing. Each driver-visible
 43 * interrupt source is transparently wired to the appropriate
 44 * controller. Thus drivers need not be aware of the
 45 * interrupt-controller.
 46 *
 47 * Various interrupt controllers we handle: 8259 PIC, SMP IO-APIC,
 48 * PIIX4's internal 8259 PIC and SGI's Visual Workstation Cobalt (IO-)APIC.
 49 * (IO-APICs assumed to be messaging to Pentium local-APICs)
 50 *
 51 * the code is designed to be easily extended with new/different
 52 * interrupt controllers, without having to do assembly magic.
 53 */
 54
 55extern asmlinkage void ip27_irq(void);
 
 
 
 56
 57/*
 58 * Find first bit set
 59 */
 60static int ms1bit(unsigned long x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 61{
 62	int b = 0, s;
 
 
 
 
 
 
 63
 64	s = 16; if (x >> 16 == 0) s = 0; b += s; x >>= s;
 65	s =  8; if (x >>  8 == 0) s = 0; b += s; x >>= s;
 66	s =  4; if (x >>  4 == 0) s = 0; b += s; x >>= s;
 67	s =  2; if (x >>  2 == 0) s = 0; b += s; x >>= s;
 68	s =  1; if (x >>  1 == 0) s = 0; b += s;
 69
 70	return b;
 
 
 
 
 
 
 
 
 
 
 
 
 71}
 72
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73/*
 74 * This code is unnecessarily complex, because we do
 75 * intr enabling. Basically, once we grab the set of intrs we need
 76 * to service, we must mask _all_ these interrupts; firstly, to make
 77 * sure the same intr does not intr again, causing recursion that
 78 * can lead to stack overflow. Secondly, we can not just mask the
 79 * one intr we are do_IRQing, because the non-masked intrs in the
 80 * first set might intr again, causing multiple servicings of the
 81 * same intr. This effect is mostly seen for intercpu intrs.
 82 * Kanoj 05.13.00
 83 */
 84
 85static void ip27_do_irq_mask0(void)
 86{
 87	int irq, swlevel;
 88	hubreg_t pend0, mask0;
 89	cpuid_t cpu = smp_processor_id();
 90	int pi_int_mask0 =
 91		(cputoslice(cpu) == 0) ?  PI_INT_MASK0_A : PI_INT_MASK0_B;
 
 
 92
 93	/* copied from Irix intpend0() */
 94	pend0 = LOCAL_HUB_L(PI_INT_PEND0);
 95	mask0 = LOCAL_HUB_L(pi_int_mask0);
 96
 97	pend0 &= mask0;		/* Pick intrs we should look at */
 98	if (!pend0)
 99		return;
100
101	swlevel = ms1bit(pend0);
102#ifdef CONFIG_SMP
103	if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
104		LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
105		scheduler_ipi();
106	} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
107		LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
108		scheduler_ipi();
109	} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
110		LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
111		irq_enter();
112		generic_smp_call_function_interrupt();
113		irq_exit();
114	} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
115		LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
116		irq_enter();
117		generic_smp_call_function_interrupt();
118		irq_exit();
119	} else
120#endif
121	{
122		/* "map" swlevel to irq */
123		struct slice_data *si = cpu_data[cpu].data;
124
125		irq = si->level_to_irq[swlevel];
126		do_IRQ(irq);
127	}
128
129	LOCAL_HUB_L(PI_INT_PEND0);
130}
131
132static void ip27_do_irq_mask1(void)
133{
134	int irq, swlevel;
135	hubreg_t pend1, mask1;
136	cpuid_t cpu = smp_processor_id();
137	int pi_int_mask1 = (cputoslice(cpu) == 0) ?  PI_INT_MASK1_A : PI_INT_MASK1_B;
138	struct slice_data *si = cpu_data[cpu].data;
 
 
139
140	/* copied from Irix intpend0() */
141	pend1 = LOCAL_HUB_L(PI_INT_PEND1);
142	mask1 = LOCAL_HUB_L(pi_int_mask1);
143
144	pend1 &= mask1;		/* Pick intrs we should look at */
145	if (!pend1)
146		return;
147
148	swlevel = ms1bit(pend1);
149	/* "map" swlevel to irq */
150	irq = si->level_to_irq[swlevel];
151	LOCAL_HUB_CLR_INTR(swlevel);
152	do_IRQ(irq);
153
154	LOCAL_HUB_L(PI_INT_PEND1);
155}
156
157static void ip27_prof_timer(void)
158{
159	panic("CPU %d got a profiling interrupt", smp_processor_id());
160}
161
162static void ip27_hub_error(void)
163{
164	panic("CPU %d got a hub error interrupt", smp_processor_id());
165}
166
167asmlinkage void plat_irq_dispatch(void)
168{
169	unsigned long pending = read_c0_cause() & read_c0_status();
170	extern unsigned int rt_timer_irq;
171
172	if (pending & CAUSEF_IP4)
173		do_IRQ(rt_timer_irq);
174	else if (pending & CAUSEF_IP2)	/* PI_INT_PEND_0 or CC_PEND_{A|B} */
175		ip27_do_irq_mask0();
176	else if (pending & CAUSEF_IP3)	/* PI_INT_PEND_1 */
177		ip27_do_irq_mask1();
178	else if (pending & CAUSEF_IP5)
179		ip27_prof_timer();
180	else if (pending & CAUSEF_IP6)
181		ip27_hub_error();
182}
183
184void __init arch_init_irq(void)
185{
186}
187
188void install_ipi(void)
189{
190	int slice = LOCAL_HUB_L(PI_CPU_NUM);
191	int cpu = smp_processor_id();
192	struct slice_data *si = cpu_data[cpu].data;
193	struct hub_data *hub = hub_data(cpu_to_node(cpu));
194	int resched, call;
195
196	resched = CPU_RESCHED_A_IRQ + slice;
197	__set_bit(resched, hub->irq_alloc_mask);
198	__set_bit(resched, si->irq_enable_mask);
199	LOCAL_HUB_CLR_INTR(resched);
200
201	call = CPU_CALL_A_IRQ + slice;
202	__set_bit(call, hub->irq_alloc_mask);
203	__set_bit(call, si->irq_enable_mask);
204	LOCAL_HUB_CLR_INTR(call);
205
206	if (slice == 0) {
207		LOCAL_HUB_S(PI_INT_MASK0_A, si->irq_enable_mask[0]);
208		LOCAL_HUB_S(PI_INT_MASK1_A, si->irq_enable_mask[1]);
209	} else {
210		LOCAL_HUB_S(PI_INT_MASK0_B, si->irq_enable_mask[0]);
211		LOCAL_HUB_S(PI_INT_MASK1_B, si->irq_enable_mask[1]);
212	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
213}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ip27-irq.c: Highlevel interrupt handling for IP27 architecture.
  4 *
  5 * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org)
  6 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  7 * Copyright (C) 1999 - 2001 Kanoj Sarcar
  8 */
  9
 
 
 
 
 
 
 
 
 10#include <linux/interrupt.h>
 11#include <linux/irq.h>
 12#include <linux/irqdomain.h>
 13#include <linux/ioport.h>
 
 
 
 14#include <linux/kernel.h>
 
 
 15#include <linux/bitops.h>
 16#include <linux/sched.h>
 17
 
 18#include <asm/io.h>
 19#include <asm/irq_cpu.h>
 
 
 20#include <asm/sn/addrs.h>
 21#include <asm/sn/agent.h>
 22#include <asm/sn/arch.h>
 
 23#include <asm/sn/intr.h>
 24#include <asm/sn/irq_alloc.h>
 25
 26#include "ip27-common.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27
 28struct hub_irq_data {
 29	u64	*irq_mask[2];
 30	cpuid_t	cpu;
 31};
 32
 33static DECLARE_BITMAP(hub_irq_map, IP27_HUB_IRQ_COUNT);
 34
 35static DEFINE_PER_CPU(unsigned long [2], irq_enable_mask);
 36
 37static inline int alloc_level(void)
 38{
 39	int level;
 40
 41again:
 42	level = find_first_zero_bit(hub_irq_map, IP27_HUB_IRQ_COUNT);
 43	if (level >= IP27_HUB_IRQ_COUNT)
 44		return -ENOSPC;
 45
 46	if (test_and_set_bit(level, hub_irq_map))
 47		goto again;
 48
 49	return level;
 50}
 51
 52static void enable_hub_irq(struct irq_data *d)
 53{
 54	struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
 55	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
 56
 57	set_bit(d->hwirq, mask);
 58	__raw_writeq(mask[0], hd->irq_mask[0]);
 59	__raw_writeq(mask[1], hd->irq_mask[1]);
 60}
 61
 62static void disable_hub_irq(struct irq_data *d)
 63{
 64	struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
 65	unsigned long *mask = per_cpu(irq_enable_mask, hd->cpu);
 66
 67	clear_bit(d->hwirq, mask);
 68	__raw_writeq(mask[0], hd->irq_mask[0]);
 69	__raw_writeq(mask[1], hd->irq_mask[1]);
 70}
 71
 72static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
 73{
 74	nasid_t nasid;
 75	int cpu;
 
 76
 77	cpu = cpumask_first_and(mask, cpu_online_mask);
 78	if (cpu >= nr_cpu_ids)
 79		cpu = cpumask_any(cpu_online_mask);
 80
 81	nasid = cpu_to_node(cpu);
 82	hd->cpu = cpu;
 83	if (!cputoslice(cpu)) {
 84		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_A);
 85		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_A);
 86	} else {
 87		hd->irq_mask[0] = REMOTE_HUB_PTR(nasid, PI_INT_MASK0_B);
 88		hd->irq_mask[1] = REMOTE_HUB_PTR(nasid, PI_INT_MASK1_B);
 89	}
 90}
 91
 92static int set_affinity_hub_irq(struct irq_data *d, const struct cpumask *mask,
 93				bool force)
 94{
 95	struct hub_irq_data *hd = irq_data_get_irq_chip_data(d);
 96
 97	if (!hd)
 98		return -EINVAL;
 99
100	if (irqd_is_started(d))
101		disable_hub_irq(d);
102
103	setup_hub_mask(hd, mask);
104
105	if (irqd_is_started(d))
106		enable_hub_irq(d);
107
108	irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
109
110	return 0;
111}
112
113static struct irq_chip hub_irq_type = {
114	.name		  = "HUB",
115	.irq_mask	  = disable_hub_irq,
116	.irq_unmask	  = enable_hub_irq,
117	.irq_set_affinity = set_affinity_hub_irq,
118};
119
120static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
121			    unsigned int nr_irqs, void *arg)
122{
123	struct irq_alloc_info *info = arg;
124	struct hub_irq_data *hd;
125	struct hub_data *hub;
126	struct irq_desc *desc;
127	int swlevel;
128
129	if (nr_irqs > 1 || !info)
130		return -EINVAL;
131
132	hd = kzalloc(sizeof(*hd), GFP_KERNEL);
133	if (!hd)
134		return -ENOMEM;
135
136	swlevel = alloc_level();
137	if (unlikely(swlevel < 0)) {
138		kfree(hd);
139		return -EAGAIN;
140	}
141	irq_domain_set_info(domain, virq, swlevel, &hub_irq_type, hd,
142			    handle_level_irq, NULL, NULL);
143
144	/* use CPU connected to nearest hub */
145	hub = hub_data(info->nasid);
146	setup_hub_mask(hd, &hub->h_cpus);
147	info->nasid = cpu_to_node(hd->cpu);
148
149	/* Make sure it's not already pending when we connect it. */
150	REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
151
152	desc = irq_to_desc(virq);
153	desc->irq_common_data.node = info->nasid;
154	cpumask_copy(desc->irq_common_data.affinity, &hub->h_cpus);
155
156	return 0;
157}
158
159static void hub_domain_free(struct irq_domain *domain,
160			    unsigned int virq, unsigned int nr_irqs)
161{
162	struct irq_data *irqd;
163
164	if (nr_irqs > 1)
165		return;
166
167	irqd = irq_domain_get_irq_data(domain, virq);
168	if (irqd && irqd->chip_data)
169		kfree(irqd->chip_data);
170}
171
172static const struct irq_domain_ops hub_domain_ops = {
173	.alloc = hub_domain_alloc,
174	.free  = hub_domain_free,
175};
176
177/*
178 * This code is unnecessarily complex, because we do
179 * intr enabling. Basically, once we grab the set of intrs we need
180 * to service, we must mask _all_ these interrupts; firstly, to make
181 * sure the same intr does not intr again, causing recursion that
182 * can lead to stack overflow. Secondly, we can not just mask the
183 * one intr we are do_IRQing, because the non-masked intrs in the
184 * first set might intr again, causing multiple servicings of the
185 * same intr. This effect is mostly seen for intercpu intrs.
186 * Kanoj 05.13.00
187 */
188
189static void ip27_do_irq_mask0(struct irq_desc *desc)
190{
 
 
191	cpuid_t cpu = smp_processor_id();
192	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
193	struct irq_domain *domain;
194	u64 pend0;
195	int ret;
196
197	/* copied from Irix intpend0() */
198	pend0 = LOCAL_HUB_L(PI_INT_PEND0);
 
199
200	pend0 &= mask[0];		/* Pick intrs we should look at */
201	if (!pend0)
202		return;
203
 
204#ifdef CONFIG_SMP
205	if (pend0 & (1UL << CPU_RESCHED_A_IRQ)) {
206		LOCAL_HUB_CLR_INTR(CPU_RESCHED_A_IRQ);
207		scheduler_ipi();
208	} else if (pend0 & (1UL << CPU_RESCHED_B_IRQ)) {
209		LOCAL_HUB_CLR_INTR(CPU_RESCHED_B_IRQ);
210		scheduler_ipi();
211	} else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
212		LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
 
213		generic_smp_call_function_interrupt();
 
214	} else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
215		LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
 
216		generic_smp_call_function_interrupt();
 
217	} else
218#endif
219	{
220		domain = irq_desc_get_handler_data(desc);
221		ret = generic_handle_domain_irq(domain, __ffs(pend0));
222		if (ret)
223			spurious_interrupt();
 
224	}
225
226	LOCAL_HUB_L(PI_INT_PEND0);
227}
228
229static void ip27_do_irq_mask1(struct irq_desc *desc)
230{
 
 
231	cpuid_t cpu = smp_processor_id();
232	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
233	struct irq_domain *domain;
234	u64 pend1;
235	int ret;
236
237	/* copied from Irix intpend0() */
238	pend1 = LOCAL_HUB_L(PI_INT_PEND1);
 
239
240	pend1 &= mask[1];		/* Pick intrs we should look at */
241	if (!pend1)
242		return;
243
244	domain = irq_desc_get_handler_data(desc);
245	ret = generic_handle_domain_irq(domain, __ffs(pend1) + 64);
246	if (ret)
247		spurious_interrupt();
 
248
249	LOCAL_HUB_L(PI_INT_PEND1);
250}
251
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252void install_ipi(void)
253{
 
254	int cpu = smp_processor_id();
255	unsigned long *mask = per_cpu(irq_enable_mask, cpu);
256	int slice = LOCAL_HUB_L(PI_CPU_NUM);
257	int resched, call;
258
259	resched = CPU_RESCHED_A_IRQ + slice;
260	set_bit(resched, mask);
 
261	LOCAL_HUB_CLR_INTR(resched);
262
263	call = CPU_CALL_A_IRQ + slice;
264	set_bit(call, mask);
 
265	LOCAL_HUB_CLR_INTR(call);
266
267	if (slice == 0) {
268		LOCAL_HUB_S(PI_INT_MASK0_A, mask[0]);
269		LOCAL_HUB_S(PI_INT_MASK1_A, mask[1]);
270	} else {
271		LOCAL_HUB_S(PI_INT_MASK0_B, mask[0]);
272		LOCAL_HUB_S(PI_INT_MASK1_B, mask[1]);
273	}
274}
275
276void __init arch_init_irq(void)
277{
278	struct irq_domain *domain;
279	struct fwnode_handle *fn;
280	int i;
281
282	mips_cpu_irq_init();
283
284	/*
285	 * Some interrupts are reserved by hardware or by software convention.
286	 * Mark these as reserved right away so they won't be used accidentally
287	 * later.
288	 */
289	for (i = 0; i <= CPU_CALL_B_IRQ; i++)
290		set_bit(i, hub_irq_map);
291
292	for (i = NI_BRDCAST_ERR_A; i <= MSC_PANIC_INTR; i++)
293		set_bit(i, hub_irq_map);
294
295	fn = irq_domain_alloc_named_fwnode("HUB");
296	WARN_ON(fn == NULL);
297	if (!fn)
298		return;
299	domain = irq_domain_create_linear(fn, IP27_HUB_IRQ_COUNT,
300					  &hub_domain_ops, NULL);
301	WARN_ON(domain == NULL);
302	if (!domain)
303		return;
304
305	irq_set_default_host(domain);
306
307	irq_set_percpu_devid(IP27_HUB_PEND0_IRQ);
308	irq_set_chained_handler_and_data(IP27_HUB_PEND0_IRQ, ip27_do_irq_mask0,
309					 domain);
310	irq_set_percpu_devid(IP27_HUB_PEND1_IRQ);
311	irq_set_chained_handler_and_data(IP27_HUB_PEND1_IRQ, ip27_do_irq_mask1,
312					 domain);
313}