Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright 2001 MontaVista Software Inc.
  3 * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
  4 *
  5 * Copyright (C) 2001 Ralf Baechle
  6 * Copyright (C) 2005  MIPS Technologies, Inc.	All rights reserved.
  7 *	Author: Maciej W. Rozycki <macro@mips.com>
  8 *
  9 * This file define the irq handler for MIPS CPU interrupts.
 10 *
 11 * This program is free software; you can redistribute	it and/or modify it
 12 * under  the terms of	the GNU General	 Public License as published by the
 13 * Free Software Foundation;  either version 2 of the  License, or (at your
 14 * option) any later version.
 15 */
 16
 17/*
 18 * Almost all MIPS CPUs define 8 interrupt sources.  They are typically
 19 * level triggered (i.e., cannot be cleared from CPU; must be cleared from
 20 * device).
 21 *
 22 * The first two are software interrupts (i.e. not exposed as pins) which
 23 * may be used for IPIs in multi-threaded single-core systems.
 24 *
 25 * The last one is usually the CPU timer interrupt if the counter register
 26 * is present, or for old CPUs with an external FPU by convention it's the
 27 * FPU exception interrupt.
 28 */
 29#include <linux/init.h>
 30#include <linux/interrupt.h>
 31#include <linux/kernel.h>
 32#include <linux/irq.h>
 33#include <linux/irqchip.h>
 34#include <linux/irqdomain.h>
 35
 36#include <asm/irq_cpu.h>
 37#include <asm/mipsregs.h>
 38#include <asm/mipsmtregs.h>
 39#include <asm/setup.h>
 40
 41static struct irq_domain *irq_domain;
 42static struct irq_domain *ipi_domain;
 43
 44static inline void unmask_mips_irq(struct irq_data *d)
 45{
 46	set_c0_status(IE_SW0 << d->hwirq);
 47	irq_enable_hazard();
 48}
 49
 50static inline void mask_mips_irq(struct irq_data *d)
 51{
 52	clear_c0_status(IE_SW0 << d->hwirq);
 53	irq_disable_hazard();
 54}
 55
 56static struct irq_chip mips_cpu_irq_controller = {
 57	.name		= "MIPS",
 58	.irq_ack	= mask_mips_irq,
 59	.irq_mask	= mask_mips_irq,
 60	.irq_mask_ack	= mask_mips_irq,
 61	.irq_unmask	= unmask_mips_irq,
 62	.irq_eoi	= unmask_mips_irq,
 63	.irq_disable	= mask_mips_irq,
 64	.irq_enable	= unmask_mips_irq,
 65};
 66
 67/*
 68 * Basically the same as above but taking care of all the MT stuff
 69 */
 70
 71static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
 72{
 73	unsigned int vpflags = dvpe();
 74
 75	clear_c0_cause(C_SW0 << d->hwirq);
 76	evpe(vpflags);
 77	unmask_mips_irq(d);
 78	return 0;
 79}
 80
 81/*
 82 * While we ack the interrupt interrupts are disabled and thus we don't need
 83 * to deal with concurrency issues.  Same for mips_cpu_irq_end.
 84 */
 85static void mips_mt_cpu_irq_ack(struct irq_data *d)
 86{
 87	unsigned int vpflags = dvpe();
 88	clear_c0_cause(C_SW0 << d->hwirq);
 89	evpe(vpflags);
 90	mask_mips_irq(d);
 91}
 92
 93#ifdef CONFIG_GENERIC_IRQ_IPI
 94
 95static void mips_mt_send_ipi(struct irq_data *d, unsigned int cpu)
 96{
 97	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 98	unsigned long flags;
 99	int vpflags;
100
101	local_irq_save(flags);
102
103	/* We can only send IPIs to VPEs within the local core */
104	WARN_ON(!cpus_are_siblings(smp_processor_id(), cpu));
105
106	vpflags = dvpe();
107	settc(cpu_vpe_id(&cpu_data[cpu]));
108	write_vpe_c0_cause(read_vpe_c0_cause() | (C_SW0 << hwirq));
109	evpe(vpflags);
110
111	local_irq_restore(flags);
112}
113
114#endif /* CONFIG_GENERIC_IRQ_IPI */
115
116static struct irq_chip mips_mt_cpu_irq_controller = {
117	.name		= "MIPS",
118	.irq_startup	= mips_mt_cpu_irq_startup,
119	.irq_ack	= mips_mt_cpu_irq_ack,
120	.irq_mask	= mask_mips_irq,
121	.irq_mask_ack	= mips_mt_cpu_irq_ack,
122	.irq_unmask	= unmask_mips_irq,
123	.irq_eoi	= unmask_mips_irq,
124	.irq_disable	= mask_mips_irq,
125	.irq_enable	= unmask_mips_irq,
126#ifdef CONFIG_GENERIC_IRQ_IPI
127	.ipi_send_single = mips_mt_send_ipi,
128#endif
129};
130
131asmlinkage void __weak plat_irq_dispatch(void)
132{
133	unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
134	unsigned int virq;
135	int irq;
136
137	if (!pending) {
138		spurious_interrupt();
139		return;
140	}
141
142	pending >>= CAUSEB_IP;
143	while (pending) {
144		irq = fls(pending) - 1;
145		if (IS_ENABLED(CONFIG_GENERIC_IRQ_IPI) && irq < 2)
146			virq = irq_linear_revmap(ipi_domain, irq);
147		else
148			virq = irq_linear_revmap(irq_domain, irq);
149		do_IRQ(virq);
150		pending &= ~BIT(irq);
151	}
152}
153
154static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
155			     irq_hw_number_t hw)
156{
157	struct irq_chip *chip;
158
159	if (hw < 2 && cpu_has_mipsmt) {
160		/* Software interrupts are used for MT/CMT IPI */
161		chip = &mips_mt_cpu_irq_controller;
162	} else {
163		chip = &mips_cpu_irq_controller;
164	}
165
166	if (cpu_has_vint)
167		set_vi_handler(hw, plat_irq_dispatch);
168
169	irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
170
171	return 0;
172}
173
174static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
175	.map = mips_cpu_intc_map,
176	.xlate = irq_domain_xlate_onecell,
177};
178
179#ifdef CONFIG_GENERIC_IRQ_IPI
180
181struct cpu_ipi_domain_state {
182	DECLARE_BITMAP(allocated, 2);
183};
184
185static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq,
186			      unsigned int nr_irqs, void *arg)
187{
188	struct cpu_ipi_domain_state *state = domain->host_data;
189	unsigned int i, hwirq;
190	int ret;
191
192	for (i = 0; i < nr_irqs; i++) {
193		hwirq = find_first_zero_bit(state->allocated, 2);
194		if (hwirq == 2)
195			return -EBUSY;
196		bitmap_set(state->allocated, hwirq, 1);
197
198		ret = irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq,
199						    &mips_mt_cpu_irq_controller,
200						    NULL);
201		if (ret)
202			return ret;
203
204		ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH);
205		if (ret)
206			return ret;
207	}
208
209	return 0;
210}
211
212static int mips_cpu_ipi_match(struct irq_domain *d, struct device_node *node,
213			      enum irq_domain_bus_token bus_token)
214{
215	bool is_ipi;
216
217	switch (bus_token) {
218	case DOMAIN_BUS_IPI:
219		is_ipi = d->bus_token == bus_token;
220		return (!node || (to_of_node(d->fwnode) == node)) && is_ipi;
221	default:
222		return 0;
223	}
224}
225
226static const struct irq_domain_ops mips_cpu_ipi_chip_ops = {
227	.alloc	= mips_cpu_ipi_alloc,
228	.match	= mips_cpu_ipi_match,
229};
230
231static void mips_cpu_register_ipi_domain(struct device_node *of_node)
232{
233	struct cpu_ipi_domain_state *ipi_domain_state;
234
235	ipi_domain_state = kzalloc(sizeof(*ipi_domain_state), GFP_KERNEL);
236	ipi_domain = irq_domain_add_hierarchy(irq_domain,
237					      IRQ_DOMAIN_FLAG_IPI_SINGLE,
238					      2, of_node,
239					      &mips_cpu_ipi_chip_ops,
240					      ipi_domain_state);
241	if (!ipi_domain)
242		panic("Failed to add MIPS CPU IPI domain");
243	irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
244}
245
246#else /* !CONFIG_GENERIC_IRQ_IPI */
247
248static inline void mips_cpu_register_ipi_domain(struct device_node *of_node) {}
249
250#endif /* !CONFIG_GENERIC_IRQ_IPI */
251
252static void __init __mips_cpu_irq_init(struct device_node *of_node)
253{
254	/* Mask interrupts. */
255	clear_c0_status(ST0_IM);
256	clear_c0_cause(CAUSEF_IP);
257
258	irq_domain = irq_domain_add_legacy(of_node, 8, MIPS_CPU_IRQ_BASE, 0,
259					   &mips_cpu_intc_irq_domain_ops,
260					   NULL);
261	if (!irq_domain)
262		panic("Failed to add irqdomain for MIPS CPU");
263
264	/*
265	 * Only proceed to register the software interrupt IPI implementation
266	 * for CPUs which implement the MIPS MT (multi-threading) ASE.
267	 */
268	if (cpu_has_mipsmt)
269		mips_cpu_register_ipi_domain(of_node);
270}
271
272void __init mips_cpu_irq_init(void)
273{
274	__mips_cpu_irq_init(NULL);
275}
276
277int __init mips_cpu_irq_of_init(struct device_node *of_node,
278				struct device_node *parent)
279{
280	__mips_cpu_irq_init(of_node);
281	return 0;
282}
283IRQCHIP_DECLARE(cpu_intc, "mti,cpu-interrupt-controller", mips_cpu_irq_of_init);