Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
  3 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/errno.h>
  8#include <linux/mutex.h>
  9#include <linux/of.h>
 10#include <linux/of_platform.h>
 11#include <linux/interrupt.h>
 12#include <linux/of_device.h>
 13#include <linux/clocksource.h>
 14#include <linux/clockchips.h>
 15
 16#include <asm/oplib.h>
 17#include <asm/timer.h>
 18#include <asm/prom.h>
 19#include <asm/leon.h>
 20#include <asm/leon_amba.h>
 21#include <asm/traps.h>
 22#include <asm/cacheflush.h>
 23#include <asm/smp.h>
 24#include <asm/setup.h>
 25
 26#include "kernel.h"
 27#include "prom.h"
 28#include "irq.h"
 29
 30struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
 31struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
 32
 33int leondebug_irq_disable;
 34int leon_debug_irqout;
 35static volatile u32 dummy_master_l10_counter;
 36unsigned long amba_system_id;
 37static DEFINE_SPINLOCK(leon_irq_lock);
 38
 39static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 40static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */
 41unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 42unsigned int sparc_leon_eirq;
 43#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 44#define LEON_IACK (&leon3_irqctrl_regs->iclear)
 45#define LEON_DO_ACK_HW 1
 46
 47/* Return the last ACKed IRQ by the Extended IRQ controller. It has already
 48 * been (automatically) ACKed when the CPU takes the trap.
 49 */
 50static inline unsigned int leon_eirq_get(int cpu)
 51{
 52	return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
 53}
 54
 55/* Handle one or multiple IRQs from the extended interrupt controller */
 56static void leon_handle_ext_irq(struct irq_desc *desc)
 57{
 58	unsigned int eirq;
 59	struct irq_bucket *p;
 60	int cpu = sparc_leon3_cpuid();
 61
 62	eirq = leon_eirq_get(cpu);
 63	p = irq_map[eirq];
 64	if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
 65		generic_handle_irq(p->irq);
 66}
 67
 68/* The extended IRQ controller has been found, this function registers it */
 69static void leon_eirq_setup(unsigned int eirq)
 70{
 71	unsigned long mask, oldmask;
 72	unsigned int veirq;
 73
 74	if (eirq < 1 || eirq > 0xf) {
 75		printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq);
 76		return;
 77	}
 78
 79	veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0);
 80
 81	/*
 82	 * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
 83	 * controller have a mask-bit of their own, so this is safe.
 84	 */
 85	irq_link(veirq);
 86	mask = 1 << eirq;
 87	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id));
 88	LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask));
 89	sparc_leon_eirq = eirq;
 90}
 91
 92unsigned long leon_get_irqmask(unsigned int irq)
 93{
 94	unsigned long mask;
 95
 96	if (!irq || ((irq > 0xf) && !sparc_leon_eirq)
 97	    || ((irq > 0x1f) && sparc_leon_eirq)) {
 98		printk(KERN_ERR
 99		       "leon_get_irqmask: false irq number: %d\n", irq);
100		mask = 0;
101	} else {
102		mask = LEON_HARD_INT(irq);
103	}
104	return mask;
105}
106
107#ifdef CONFIG_SMP
108static int irq_choose_cpu(const struct cpumask *affinity)
109{
110	cpumask_t mask;
111
112	cpumask_and(&mask, cpu_online_mask, affinity);
113	if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
114		return boot_cpu_id;
115	else
116		return cpumask_first(&mask);
117}
118#else
119#define irq_choose_cpu(affinity) boot_cpu_id
120#endif
121
122static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
123			     bool force)
124{
125	unsigned long mask, oldmask, flags;
126	int oldcpu, newcpu;
127
128	mask = (unsigned long)data->chip_data;
129	oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
130	newcpu = irq_choose_cpu(dest);
131
132	if (oldcpu == newcpu)
133		goto out;
134
135	/* unmask on old CPU first before enabling on the selected CPU */
136	spin_lock_irqsave(&leon_irq_lock, flags);
137	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu));
138	LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask));
139	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu));
140	LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask));
141	spin_unlock_irqrestore(&leon_irq_lock, flags);
142out:
143	return IRQ_SET_MASK_OK;
144}
145
146static void leon_unmask_irq(struct irq_data *data)
147{
148	unsigned long mask, oldmask, flags;
149	int cpu;
150
151	mask = (unsigned long)data->chip_data;
152	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
153	spin_lock_irqsave(&leon_irq_lock, flags);
154	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
155	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
156	spin_unlock_irqrestore(&leon_irq_lock, flags);
157}
158
159static void leon_mask_irq(struct irq_data *data)
160{
161	unsigned long mask, oldmask, flags;
162	int cpu;
163
164	mask = (unsigned long)data->chip_data;
165	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
166	spin_lock_irqsave(&leon_irq_lock, flags);
167	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
168	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
169	spin_unlock_irqrestore(&leon_irq_lock, flags);
170}
171
172static unsigned int leon_startup_irq(struct irq_data *data)
173{
174	irq_link(data->irq);
175	leon_unmask_irq(data);
176	return 0;
177}
178
179static void leon_shutdown_irq(struct irq_data *data)
180{
181	leon_mask_irq(data);
182	irq_unlink(data->irq);
183}
184
185/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
186static void leon_eoi_irq(struct irq_data *data)
187{
188	unsigned long mask = (unsigned long)data->chip_data;
189
190	if (mask & LEON_DO_ACK_HW)
191		LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW);
192}
193
194static struct irq_chip leon_irq = {
195	.name			= "leon",
196	.irq_startup		= leon_startup_irq,
197	.irq_shutdown		= leon_shutdown_irq,
198	.irq_mask		= leon_mask_irq,
199	.irq_unmask		= leon_unmask_irq,
200	.irq_eoi		= leon_eoi_irq,
201	.irq_set_affinity	= leon_set_affinity,
202};
203
204/*
205 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
206 *  Edge (normal) IRQ           - handle_simple_irq, ack=DON'T-CARE, never ack
207 *  Level IRQ (PCI|Level-GPIO)  - handle_fasteoi_irq, ack=1, ack after ISR
208 *  Per-CPU Edge                - handle_percpu_irq, ack=0
209 */
210unsigned int leon_build_device_irq(unsigned int real_irq,
211				    irq_flow_handler_t flow_handler,
212				    const char *name, int do_ack)
213{
214	unsigned int irq;
215	unsigned long mask;
216	struct irq_desc *desc;
217
218	irq = 0;
219	mask = leon_get_irqmask(real_irq);
220	if (mask == 0)
221		goto out;
222
223	irq = irq_alloc(real_irq, real_irq);
224	if (irq == 0)
225		goto out;
226
227	if (do_ack)
228		mask |= LEON_DO_ACK_HW;
229
230	desc = irq_to_desc(irq);
231	if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) {
232		irq_set_chip_and_handler_name(irq, &leon_irq,
233					      flow_handler, name);
234		irq_set_chip_data(irq, (void *)mask);
235	}
236
237out:
238	return irq;
239}
240
241static unsigned int _leon_build_device_irq(struct platform_device *op,
242					   unsigned int real_irq)
243{
244	return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
245}
246
247void leon_update_virq_handling(unsigned int virq,
248			      irq_flow_handler_t flow_handler,
249			      const char *name, int do_ack)
250{
251	unsigned long mask = (unsigned long)irq_get_chip_data(virq);
252
253	mask &= ~LEON_DO_ACK_HW;
254	if (do_ack)
255		mask |= LEON_DO_ACK_HW;
256
257	irq_set_chip_and_handler_name(virq, &leon_irq,
258				      flow_handler, name);
259	irq_set_chip_data(virq, (void *)mask);
260}
261
262static u32 leon_cycles_offset(void)
263{
264	u32 rld, val, ctrl, off;
265
266	rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
267	val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
268	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
269	if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) {
270		val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
271		off = 2 * rld - val;
272	} else {
273		off = rld - val;
274	}
275
276	return off;
277}
278
279#ifdef CONFIG_SMP
280
281/* smp clockevent irq */
282static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
283{
284	struct clock_event_device *ce;
285	int cpu = smp_processor_id();
286
287	leon_clear_profile_irq(cpu);
288
289	if (cpu == boot_cpu_id)
290		timer_interrupt(irq, NULL);
291
292	ce = &per_cpu(sparc32_clockevent, cpu);
293
294	irq_enter();
295	if (ce->event_handler)
296		ce->event_handler(ce);
297	irq_exit();
298
299	return IRQ_HANDLED;
300}
301
302#endif /* CONFIG_SMP */
303
304void __init leon_init_timers(void)
305{
306	int irq, eirq;
307	struct device_node *rootnp, *np, *nnp;
308	struct property *pp;
309	int len;
310	int icsel;
311	int ampopts;
312	int err;
313	u32 config;
314	u32 ctrl;
315
316	sparc_config.get_cycles_offset = leon_cycles_offset;
317	sparc_config.cs_period = 1000000 / HZ;
318	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
319
320#ifndef CONFIG_SMP
321	sparc_config.features |= FEAT_L10_CLOCKEVENT;
322#endif
323
324	leondebug_irq_disable = 0;
325	leon_debug_irqout = 0;
326	master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
327	dummy_master_l10_counter = 0;
328
329	rootnp = of_find_node_by_path("/ambapp0");
330	if (!rootnp)
331		goto bad;
332
333	/* Find System ID: GRLIB build ID and optional CHIP ID */
334	pp = of_find_property(rootnp, "systemid", &len);
335	if (pp)
336		amba_system_id = *(unsigned long *)pp->value;
337
338	/* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
339	np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
340	if (!np) {
341		np = of_find_node_by_name(rootnp, "01_00d");
342		if (!np)
343			goto bad;
344	}
345	pp = of_find_property(np, "reg", &len);
346	if (!pp)
347		goto bad;
348	leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value;
349
350	/* Find GPTIMER Timer Registers base address otherwise bail out. */
351	nnp = rootnp;
352
353retry:
354	np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
355	if (!np) {
356		np = of_find_node_by_name(nnp, "01_011");
357		if (!np)
358			goto bad;
359	}
360
361	ampopts = 0;
362	pp = of_find_property(np, "ampopts", &len);
363	if (pp) {
364		ampopts = *(int *)pp->value;
365		if (ampopts == 0) {
366			/* Skip this instance, resource already
367			 * allocated by other OS */
368			nnp = np;
369			goto retry;
370		}
371	}
372
373	/* Select Timer-Instance on Timer Core. Default is zero */
374	leon3_gptimer_idx = ampopts & 0x7;
375
376	pp = of_find_property(np, "reg", &len);
377	if (pp)
378		leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
379					pp->value;
380	pp = of_find_property(np, "interrupts", &len);
381	if (pp)
382		leon3_gptimer_irq = *(unsigned int *)pp->value;
383
384	if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
385		goto bad;
386
387	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
388	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
389			      ctrl | LEON3_GPTIMER_CTRL_PENDING);
390	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
391
392	if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0)
393		leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING;
394	else
395		leon3_gptimer_ackmask = ~0;
396
397	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
398	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
399				(((1000000 / HZ) - 1)));
400	LEON3_BYPASS_STORE_PA(
401			&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
402
403	/*
404	 * The IRQ controller may (if implemented) consist of multiple
405	 * IRQ controllers, each mapped on a 4Kb boundary.
406	 * Each CPU may be routed to different IRQCTRLs, however
407	 * we assume that all CPUs (in SMP system) is routed to the
408	 * same IRQ Controller, and for non-SMP only one IRQCTRL is
409	 * accessed anyway.
410	 * In AMP systems, Linux must run on CPU0 for the time being.
411	 */
412	icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]);
413	icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf;
414	leon3_irqctrl_regs += icsel;
415
416	/* Mask all IRQs on boot-cpu IRQ controller */
417	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0);
418
419	/* Probe extended IRQ controller */
420	eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus)
421		>> 16) & 0xf;
422	if (eirq != 0)
423		leon_eirq_setup(eirq);
424
425#ifdef CONFIG_SMP
426	{
427		unsigned long flags;
428
429		/*
430		 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
431		 * LEON never must take, sun4d and LEON overwrites the branch
432		 * with a NOP.
433		 */
434		local_irq_save(flags);
435		patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
436		local_ops->cache_all();
437		local_irq_restore(flags);
438	}
439#endif
440
441	config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
442	if (config & (1 << LEON3_GPTIMER_SEPIRQ))
443		leon3_gptimer_irq += leon3_gptimer_idx;
444	else if ((config & LEON3_GPTIMER_TIMERS) > 1)
445		pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
446
447#ifdef CONFIG_SMP
448	/* Install per-cpu IRQ handler for broadcasted ticker */
449	irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
450				    "per-cpu", 0);
451	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
452			  IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
453#else
454	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
455	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
456#endif
457	if (err) {
458		pr_err("Unable to attach timer IRQ%d\n", irq);
459		prom_halt();
460	}
461	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
462			      LEON3_GPTIMER_EN |
463			      LEON3_GPTIMER_RL |
464			      LEON3_GPTIMER_LD |
465			      LEON3_GPTIMER_IRQEN);
466	return;
467bad:
468	printk(KERN_ERR "No Timer/irqctrl found\n");
469	BUG();
470	return;
471}
472
473static void leon_clear_clock_irq(void)
474{
475	u32 ctrl;
476
477	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
478	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
479			      ctrl & leon3_gptimer_ackmask);
480}
481
482static void leon_load_profile_irq(int cpu, unsigned int limit)
483{
484}
485
486void __init leon_trans_init(struct device_node *dp)
487{
488	if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) {
489		struct property *p;
490		p = of_find_property(dp, "mid", (void *)0);
491		if (p) {
492			int mid;
493			dp->name = prom_early_alloc(5 + 1);
494			memcpy(&mid, p->value, p->length);
495			sprintf((char *)dp->name, "cpu%.2d", mid);
496		}
497	}
498}
499
500#ifdef CONFIG_SMP
501void leon_clear_profile_irq(int cpu)
502{
503}
504
505void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
506{
507	unsigned long mask, flags, *addr;
508	mask = leon_get_irqmask(irq_nr);
509	spin_lock_irqsave(&leon_irq_lock, flags);
510	addr = (unsigned long *)LEON_IMASK(cpu);
511	LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
512	spin_unlock_irqrestore(&leon_irq_lock, flags);
513}
514
515#endif
516
517void __init leon_init_IRQ(void)
518{
519	sparc_config.init_timers      = leon_init_timers;
520	sparc_config.build_device_irq = _leon_build_device_irq;
521	sparc_config.clock_rate       = 1000000;
522	sparc_config.clear_clock_irq  = leon_clear_clock_irq;
523	sparc_config.load_profile_irq = leon_load_profile_irq;
524}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2009 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB
  4 * Copyright (C) 2009 Konrad Eisele (konrad@gaisler.com) Aeroflex Gaisler AB
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/errno.h>
  9#include <linux/mutex.h>
 10#include <linux/of.h>
 
 11#include <linux/interrupt.h>
 
 12#include <linux/clocksource.h>
 13#include <linux/clockchips.h>
 14
 15#include <asm/oplib.h>
 16#include <asm/timer.h>
 17#include <asm/prom.h>
 18#include <asm/leon.h>
 19#include <asm/leon_amba.h>
 20#include <asm/traps.h>
 21#include <asm/cacheflush.h>
 22#include <asm/smp.h>
 23#include <asm/setup.h>
 24
 25#include "kernel.h"
 26#include "prom.h"
 27#include "irq.h"
 28
 29struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */
 30struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */
 31
 32int leondebug_irq_disable;
 33int leon_debug_irqout;
 34static volatile u32 dummy_master_l10_counter;
 35unsigned long amba_system_id;
 36static DEFINE_SPINLOCK(leon_irq_lock);
 37
 38static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 39static unsigned long leon3_gptimer_ackmask; /* For clearing pending bit */
 40unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 41unsigned int sparc_leon_eirq;
 42#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 43#define LEON_IACK (&leon3_irqctrl_regs->iclear)
 44#define LEON_DO_ACK_HW 1
 45
 46/* Return the last ACKed IRQ by the Extended IRQ controller. It has already
 47 * been (automatically) ACKed when the CPU takes the trap.
 48 */
 49static inline unsigned int leon_eirq_get(int cpu)
 50{
 51	return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f;
 52}
 53
 54/* Handle one or multiple IRQs from the extended interrupt controller */
 55static void leon_handle_ext_irq(struct irq_desc *desc)
 56{
 57	unsigned int eirq;
 58	struct irq_bucket *p;
 59	int cpu = sparc_leon3_cpuid();
 60
 61	eirq = leon_eirq_get(cpu);
 62	p = irq_map[eirq];
 63	if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */
 64		generic_handle_irq(p->irq);
 65}
 66
 67/* The extended IRQ controller has been found, this function registers it */
 68static void leon_eirq_setup(unsigned int eirq)
 69{
 70	unsigned long mask, oldmask;
 71	unsigned int veirq;
 72
 73	if (eirq < 1 || eirq > 0xf) {
 74		printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq);
 75		return;
 76	}
 77
 78	veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0);
 79
 80	/*
 81	 * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ
 82	 * controller have a mask-bit of their own, so this is safe.
 83	 */
 84	irq_link(veirq);
 85	mask = 1 << eirq;
 86	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id));
 87	LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask));
 88	sparc_leon_eirq = eirq;
 89}
 90
 91unsigned long leon_get_irqmask(unsigned int irq)
 92{
 93	unsigned long mask;
 94
 95	if (!irq || ((irq > 0xf) && !sparc_leon_eirq)
 96	    || ((irq > 0x1f) && sparc_leon_eirq)) {
 97		printk(KERN_ERR
 98		       "leon_get_irqmask: false irq number: %d\n", irq);
 99		mask = 0;
100	} else {
101		mask = LEON_HARD_INT(irq);
102	}
103	return mask;
104}
105
106#ifdef CONFIG_SMP
107static int irq_choose_cpu(const struct cpumask *affinity)
108{
109	cpumask_t mask;
110
111	cpumask_and(&mask, cpu_online_mask, affinity);
112	if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask))
113		return boot_cpu_id;
114	else
115		return cpumask_first(&mask);
116}
117#else
118#define irq_choose_cpu(affinity) boot_cpu_id
119#endif
120
121static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest,
122			     bool force)
123{
124	unsigned long mask, oldmask, flags;
125	int oldcpu, newcpu;
126
127	mask = (unsigned long)data->chip_data;
128	oldcpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
129	newcpu = irq_choose_cpu(dest);
130
131	if (oldcpu == newcpu)
132		goto out;
133
134	/* unmask on old CPU first before enabling on the selected CPU */
135	spin_lock_irqsave(&leon_irq_lock, flags);
136	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu));
137	LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask));
138	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu));
139	LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask));
140	spin_unlock_irqrestore(&leon_irq_lock, flags);
141out:
142	return IRQ_SET_MASK_OK;
143}
144
145static void leon_unmask_irq(struct irq_data *data)
146{
147	unsigned long mask, oldmask, flags;
148	int cpu;
149
150	mask = (unsigned long)data->chip_data;
151	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
152	spin_lock_irqsave(&leon_irq_lock, flags);
153	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
154	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask));
155	spin_unlock_irqrestore(&leon_irq_lock, flags);
156}
157
158static void leon_mask_irq(struct irq_data *data)
159{
160	unsigned long mask, oldmask, flags;
161	int cpu;
162
163	mask = (unsigned long)data->chip_data;
164	cpu = irq_choose_cpu(irq_data_get_affinity_mask(data));
165	spin_lock_irqsave(&leon_irq_lock, flags);
166	oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu));
167	LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask));
168	spin_unlock_irqrestore(&leon_irq_lock, flags);
169}
170
171static unsigned int leon_startup_irq(struct irq_data *data)
172{
173	irq_link(data->irq);
174	leon_unmask_irq(data);
175	return 0;
176}
177
178static void leon_shutdown_irq(struct irq_data *data)
179{
180	leon_mask_irq(data);
181	irq_unlink(data->irq);
182}
183
184/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */
185static void leon_eoi_irq(struct irq_data *data)
186{
187	unsigned long mask = (unsigned long)data->chip_data;
188
189	if (mask & LEON_DO_ACK_HW)
190		LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW);
191}
192
193static struct irq_chip leon_irq = {
194	.name			= "leon",
195	.irq_startup		= leon_startup_irq,
196	.irq_shutdown		= leon_shutdown_irq,
197	.irq_mask		= leon_mask_irq,
198	.irq_unmask		= leon_unmask_irq,
199	.irq_eoi		= leon_eoi_irq,
200	.irq_set_affinity	= leon_set_affinity,
201};
202
203/*
204 * Build a LEON IRQ for the edge triggered LEON IRQ controller:
205 *  Edge (normal) IRQ           - handle_simple_irq, ack=DON'T-CARE, never ack
206 *  Level IRQ (PCI|Level-GPIO)  - handle_fasteoi_irq, ack=1, ack after ISR
207 *  Per-CPU Edge                - handle_percpu_irq, ack=0
208 */
209unsigned int leon_build_device_irq(unsigned int real_irq,
210				    irq_flow_handler_t flow_handler,
211				    const char *name, int do_ack)
212{
213	unsigned int irq;
214	unsigned long mask;
215	struct irq_desc *desc;
216
217	irq = 0;
218	mask = leon_get_irqmask(real_irq);
219	if (mask == 0)
220		goto out;
221
222	irq = irq_alloc(real_irq, real_irq);
223	if (irq == 0)
224		goto out;
225
226	if (do_ack)
227		mask |= LEON_DO_ACK_HW;
228
229	desc = irq_to_desc(irq);
230	if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) {
231		irq_set_chip_and_handler_name(irq, &leon_irq,
232					      flow_handler, name);
233		irq_set_chip_data(irq, (void *)mask);
234	}
235
236out:
237	return irq;
238}
239
240static unsigned int _leon_build_device_irq(struct platform_device *op,
241					   unsigned int real_irq)
242{
243	return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
244}
245
246void leon_update_virq_handling(unsigned int virq,
247			      irq_flow_handler_t flow_handler,
248			      const char *name, int do_ack)
249{
250	unsigned long mask = (unsigned long)irq_get_chip_data(virq);
251
252	mask &= ~LEON_DO_ACK_HW;
253	if (do_ack)
254		mask |= LEON_DO_ACK_HW;
255
256	irq_set_chip_and_handler_name(virq, &leon_irq,
257				      flow_handler, name);
258	irq_set_chip_data(virq, (void *)mask);
259}
260
261static u32 leon_cycles_offset(void)
262{
263	u32 rld, val, ctrl, off;
264
265	rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld);
266	val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
267	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
268	if (LEON3_GPTIMER_CTRL_ISPENDING(ctrl)) {
269		val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val);
270		off = 2 * rld - val;
271	} else {
272		off = rld - val;
273	}
274
275	return off;
276}
277
278#ifdef CONFIG_SMP
279
280/* smp clockevent irq */
281static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
282{
283	struct clock_event_device *ce;
284	int cpu = smp_processor_id();
285
286	leon_clear_profile_irq(cpu);
287
288	if (cpu == boot_cpu_id)
289		timer_interrupt(irq, NULL);
290
291	ce = &per_cpu(sparc32_clockevent, cpu);
292
293	irq_enter();
294	if (ce->event_handler)
295		ce->event_handler(ce);
296	irq_exit();
297
298	return IRQ_HANDLED;
299}
300
301#endif /* CONFIG_SMP */
302
303void __init leon_init_timers(void)
304{
305	int irq, eirq;
306	struct device_node *rootnp, *np, *nnp;
307	struct property *pp;
308	int len;
309	int icsel;
310	int ampopts;
311	int err;
312	u32 config;
313	u32 ctrl;
314
315	sparc_config.get_cycles_offset = leon_cycles_offset;
316	sparc_config.cs_period = 1000000 / HZ;
317	sparc_config.features |= FEAT_L10_CLOCKSOURCE;
318
319#ifndef CONFIG_SMP
320	sparc_config.features |= FEAT_L10_CLOCKEVENT;
321#endif
322
323	leondebug_irq_disable = 0;
324	leon_debug_irqout = 0;
325	master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
326	dummy_master_l10_counter = 0;
327
328	rootnp = of_find_node_by_path("/ambapp0");
329	if (!rootnp)
330		goto bad;
331
332	/* Find System ID: GRLIB build ID and optional CHIP ID */
333	pp = of_find_property(rootnp, "systemid", &len);
334	if (pp)
335		amba_system_id = *(unsigned long *)pp->value;
336
337	/* Find IRQMP IRQ Controller Registers base adr otherwise bail out */
338	np = of_find_node_by_name(rootnp, "GAISLER_IRQMP");
339	if (!np) {
340		np = of_find_node_by_name(rootnp, "01_00d");
341		if (!np)
342			goto bad;
343	}
344	pp = of_find_property(np, "reg", &len);
345	if (!pp)
346		goto bad;
347	leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value;
348
349	/* Find GPTIMER Timer Registers base address otherwise bail out. */
350	nnp = rootnp;
351
352retry:
353	np = of_find_node_by_name(nnp, "GAISLER_GPTIMER");
354	if (!np) {
355		np = of_find_node_by_name(nnp, "01_011");
356		if (!np)
357			goto bad;
358	}
359
360	ampopts = 0;
361	pp = of_find_property(np, "ampopts", &len);
362	if (pp) {
363		ampopts = *(int *)pp->value;
364		if (ampopts == 0) {
365			/* Skip this instance, resource already
366			 * allocated by other OS */
367			nnp = np;
368			goto retry;
369		}
370	}
371
372	/* Select Timer-Instance on Timer Core. Default is zero */
373	leon3_gptimer_idx = ampopts & 0x7;
374
375	pp = of_find_property(np, "reg", &len);
376	if (pp)
377		leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **)
378					pp->value;
379	pp = of_find_property(np, "interrupts", &len);
380	if (pp)
381		leon3_gptimer_irq = *(unsigned int *)pp->value;
382
383	if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq))
384		goto bad;
385
386	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
387	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
388			      ctrl | LEON3_GPTIMER_CTRL_PENDING);
389	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
390
391	if ((ctrl & LEON3_GPTIMER_CTRL_PENDING) != 0)
392		leon3_gptimer_ackmask = ~LEON3_GPTIMER_CTRL_PENDING;
393	else
394		leon3_gptimer_ackmask = ~0;
395
396	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0);
397	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld,
398				(((1000000 / HZ) - 1)));
399	LEON3_BYPASS_STORE_PA(
400			&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
401
402	/*
403	 * The IRQ controller may (if implemented) consist of multiple
404	 * IRQ controllers, each mapped on a 4Kb boundary.
405	 * Each CPU may be routed to different IRQCTRLs, however
406	 * we assume that all CPUs (in SMP system) is routed to the
407	 * same IRQ Controller, and for non-SMP only one IRQCTRL is
408	 * accessed anyway.
409	 * In AMP systems, Linux must run on CPU0 for the time being.
410	 */
411	icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]);
412	icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf;
413	leon3_irqctrl_regs += icsel;
414
415	/* Mask all IRQs on boot-cpu IRQ controller */
416	LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0);
417
418	/* Probe extended IRQ controller */
419	eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus)
420		>> 16) & 0xf;
421	if (eirq != 0)
422		leon_eirq_setup(eirq);
423
424#ifdef CONFIG_SMP
425	{
426		unsigned long flags;
427
428		/*
429		 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
430		 * LEON never must take, sun4d and LEON overwrites the branch
431		 * with a NOP.
432		 */
433		local_irq_save(flags);
434		patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
435		local_ops->cache_all();
436		local_irq_restore(flags);
437	}
438#endif
439
440	config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
441	if (config & (1 << LEON3_GPTIMER_SEPIRQ))
442		leon3_gptimer_irq += leon3_gptimer_idx;
443	else if ((config & LEON3_GPTIMER_TIMERS) > 1)
444		pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
445
446#ifdef CONFIG_SMP
447	/* Install per-cpu IRQ handler for broadcasted ticker */
448	irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
449				    "per-cpu", 0);
450	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
451			  IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
452#else
453	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
454	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
455#endif
456	if (err) {
457		pr_err("Unable to attach timer IRQ%d\n", irq);
458		prom_halt();
459	}
460	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
461			      LEON3_GPTIMER_EN |
462			      LEON3_GPTIMER_RL |
463			      LEON3_GPTIMER_LD |
464			      LEON3_GPTIMER_IRQEN);
465	return;
466bad:
467	printk(KERN_ERR "No Timer/irqctrl found\n");
468	BUG();
469	return;
470}
471
472static void leon_clear_clock_irq(void)
473{
474	u32 ctrl;
475
476	ctrl = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl);
477	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
478			      ctrl & leon3_gptimer_ackmask);
479}
480
481static void leon_load_profile_irq(int cpu, unsigned int limit)
482{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
483}
484
485#ifdef CONFIG_SMP
486void leon_clear_profile_irq(int cpu)
487{
488}
489
490void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu)
491{
492	unsigned long mask, flags, *addr;
493	mask = leon_get_irqmask(irq_nr);
494	spin_lock_irqsave(&leon_irq_lock, flags);
495	addr = (unsigned long *)LEON_IMASK(cpu);
496	LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask));
497	spin_unlock_irqrestore(&leon_irq_lock, flags);
498}
499
500#endif
501
502void __init leon_init_IRQ(void)
503{
504	sparc_config.init_timers      = leon_init_timers;
505	sparc_config.build_device_irq = _leon_build_device_irq;
506	sparc_config.clock_rate       = 1000000;
507	sparc_config.clear_clock_irq  = leon_clear_clock_irq;
508	sparc_config.load_profile_irq = leon_load_profile_irq;
509}