Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/*
  2 * Linux/Meta general interrupt handling code
  3 *
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/interrupt.h>
  8#include <linux/init.h>
  9#include <linux/irqchip/metag-ext.h>
 10#include <linux/irqchip/metag.h>
 11#include <linux/irqdomain.h>
 12#include <linux/ratelimit.h>
 13
 14#include <asm/core_reg.h>
 15#include <asm/mach/arch.h>
 16#include <asm/uaccess.h>
 17
 18#ifdef CONFIG_4KSTACKS
 19union irq_ctx {
 20	struct thread_info      tinfo;
 21	u32                     stack[THREAD_SIZE/sizeof(u32)];
 22};
 23
 24static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
 25static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
 26#endif
 27
 28static struct irq_domain *root_domain;
 29
 30static unsigned int startup_meta_irq(struct irq_data *data)
 31{
 32	tbi_startup_interrupt(data->hwirq);
 33	return 0;
 34}
 35
 36static void shutdown_meta_irq(struct irq_data *data)
 37{
 38	tbi_shutdown_interrupt(data->hwirq);
 39}
 40
 41void do_IRQ(int irq, struct pt_regs *regs)
 42{
 43	struct pt_regs *old_regs = set_irq_regs(regs);
 44#ifdef CONFIG_4KSTACKS
 45	struct irq_desc *desc;
 46	union irq_ctx *curctx, *irqctx;
 47	u32 *isp;
 48#endif
 49
 50	irq_enter();
 51
 52	irq = irq_linear_revmap(root_domain, irq);
 53
 54#ifdef CONFIG_DEBUG_STACKOVERFLOW
 55	/* Debugging check for stack overflow: is there less than 1KB free? */
 56	{
 57		unsigned long sp;
 58
 59		sp = __core_reg_get(A0StP);
 60		sp &= THREAD_SIZE - 1;
 61
 62		if (unlikely(sp > (THREAD_SIZE - 1024)))
 63			pr_err("Stack overflow in do_IRQ: %ld\n", sp);
 64	}
 65#endif
 66
 67
 68#ifdef CONFIG_4KSTACKS
 69	curctx = (union irq_ctx *) current_thread_info();
 70	irqctx = hardirq_ctx[smp_processor_id()];
 71
 72	/*
 73	 * this is where we switch to the IRQ stack. However, if we are
 74	 * already using the IRQ stack (because we interrupted a hardirq
 75	 * handler) we can't do that and just have to keep using the
 76	 * current stack (which is the irq stack already after all)
 77	 */
 78	if (curctx != irqctx) {
 79		/* build the stack frame on the IRQ stack */
 80		isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
 81		irqctx->tinfo.task = curctx->tinfo.task;
 82
 83		/*
 84		 * Copy the softirq bits in preempt_count so that the
 85		 * softirq checks work in the hardirq context.
 86		 */
 87		irqctx->tinfo.preempt_count =
 88			(irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
 89			(curctx->tinfo.preempt_count & SOFTIRQ_MASK);
 90
 91		desc = irq_to_desc(irq);
 92
 93		asm volatile (
 94			"MOV   D0.5,%0\n"
 95			"MOV   D1Ar1,%1\n"
 96			"MOV   D1RtP,%2\n"
 97			"MOV   D0Ar2,%3\n"
 98			"SWAP  A0StP,D0.5\n"
 99			"SWAP  PC,D1RtP\n"
100			"MOV   A0StP,D0.5\n"
101			:
102			: "r" (isp), "r" (irq), "r" (desc->handle_irq),
103			  "r" (desc)
104			: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
105			  "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
106			  "D0.5"
107			);
108	} else
109#endif
110		generic_handle_irq(irq);
111
112	irq_exit();
113
114	set_irq_regs(old_regs);
115}
116
117#ifdef CONFIG_4KSTACKS
118
119static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
120
121static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
122
123/*
124 * allocate per-cpu stacks for hardirq and for softirq processing
125 */
126void irq_ctx_init(int cpu)
127{
128	union irq_ctx *irqctx;
129
130	if (hardirq_ctx[cpu])
131		return;
132
133	irqctx = (union irq_ctx *) &hardirq_stack[cpu * THREAD_SIZE];
134	irqctx->tinfo.task              = NULL;
135	irqctx->tinfo.exec_domain       = NULL;
136	irqctx->tinfo.cpu               = cpu;
137	irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
138	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
139
140	hardirq_ctx[cpu] = irqctx;
141
142	irqctx = (union irq_ctx *) &softirq_stack[cpu * THREAD_SIZE];
143	irqctx->tinfo.task              = NULL;
144	irqctx->tinfo.exec_domain       = NULL;
145	irqctx->tinfo.cpu               = cpu;
146	irqctx->tinfo.preempt_count     = 0;
147	irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
148
149	softirq_ctx[cpu] = irqctx;
150
151	pr_info("CPU %u irqstacks, hard=%p soft=%p\n",
152		cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
153}
154
155void irq_ctx_exit(int cpu)
156{
157	hardirq_ctx[smp_processor_id()] = NULL;
158}
159
160extern asmlinkage void __do_softirq(void);
161
162void do_softirq_own_stack(void)
163{
164	struct thread_info *curctx;
165	union irq_ctx *irqctx;
166	u32 *isp;
167
168	curctx = current_thread_info();
169	irqctx = softirq_ctx[smp_processor_id()];
170	irqctx->tinfo.task = curctx->task;
171
172	/* build the stack frame on the softirq stack */
173	isp = (u32 *) ((char *)irqctx + sizeof(struct thread_info));
174
175	asm volatile (
176		"MOV   D0.5,%0\n"
177		"SWAP  A0StP,D0.5\n"
178		"CALLR D1RtP,___do_softirq\n"
179		"MOV   A0StP,D0.5\n"
180		:
181		: "r" (isp)
182		: "memory", "cc", "D1Ar1", "D0Ar2", "D1Ar3", "D0Ar4",
183		  "D1Ar5", "D0Ar6", "D0Re0", "D1Re0", "D0.4", "D1RtP",
184		  "D0.5"
185		);
186}
187#endif
188
189static struct irq_chip meta_irq_type = {
190	.name = "META-IRQ",
191	.irq_startup = startup_meta_irq,
192	.irq_shutdown = shutdown_meta_irq,
193};
194
195/**
196 * tbisig_map() - Map a TBI signal number to a virtual IRQ number.
197 * @hw:		Number of the TBI signal. Must be in range.
198 *
199 * Returns:	The virtual IRQ number of the TBI signal number IRQ specified by
200 *		@hw.
201 */
202int tbisig_map(unsigned int hw)
203{
204	return irq_create_mapping(root_domain, hw);
205}
206
207/**
208 * metag_tbisig_map() - map a tbi signal to a Linux virtual IRQ number
209 * @d:		root irq domain
210 * @irq:	virtual irq number
211 * @hw:		hardware irq number (TBI signal number)
212 *
213 * This sets up a virtual irq for a specified TBI signal number.
214 */
215static int metag_tbisig_map(struct irq_domain *d, unsigned int irq,
216			    irq_hw_number_t hw)
217{
218#ifdef CONFIG_SMP
219	irq_set_chip_and_handler(irq, &meta_irq_type, handle_percpu_irq);
220#else
221	irq_set_chip_and_handler(irq, &meta_irq_type, handle_simple_irq);
222#endif
223	return 0;
224}
225
226static const struct irq_domain_ops metag_tbisig_domain_ops = {
227	.map = metag_tbisig_map,
228};
229
230/*
231 * void init_IRQ(void)
232 *
233 * Parameters:	None
234 *
235 * Returns:	Nothing
236 *
237 * This function should be called during kernel startup to initialize
238 * the IRQ handling routines.
239 */
240void __init init_IRQ(void)
241{
242	root_domain = irq_domain_add_linear(NULL, 32,
243					    &metag_tbisig_domain_ops, NULL);
244	if (unlikely(!root_domain))
245		panic("init_IRQ: cannot add root IRQ domain");
246
247	irq_ctx_init(smp_processor_id());
248
249	init_internal_IRQ();
250	init_external_IRQ();
251
252	if (machine_desc->init_irq)
253		machine_desc->init_irq();
254}
255
256int __init arch_probe_nr_irqs(void)
257{
258	if (machine_desc->nr_irqs)
259		nr_irqs = machine_desc->nr_irqs;
260	return 0;
261}
262
263#ifdef CONFIG_HOTPLUG_CPU
264/*
265 * The CPU has been marked offline.  Migrate IRQs off this CPU.  If
266 * the affinity settings do not allow other CPUs, force them onto any
267 * available CPU.
268 */
269void migrate_irqs(void)
270{
271	unsigned int i, cpu = smp_processor_id();
272
273	for_each_active_irq(i) {
274		struct irq_data *data = irq_get_irq_data(i);
275		unsigned int newcpu;
276
277		if (irqd_is_per_cpu(data))
278			continue;
279
280		if (!cpumask_test_cpu(cpu, data->affinity))
281			continue;
282
283		newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
284
285		if (newcpu >= nr_cpu_ids) {
286			pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
287					    i, cpu);
288
289			cpumask_setall(data->affinity);
290		}
291		irq_set_affinity(i, data->affinity);
292	}
293}
294#endif /* CONFIG_HOTPLUG_CPU */