Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/module.h>
 14#include <linux/irq.h>
 
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 22static void __dispatch_internal(void) __maybe_unused;
 23static void __dispatch_internal_64(void) __maybe_unused;
 24static void __internal_irq_mask_32(unsigned int irq) __maybe_unused;
 25static void __internal_irq_mask_64(unsigned int irq) __maybe_unused;
 26static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused;
 27static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 28
 29#ifndef BCMCPU_RUNTIME_DETECT
 30#ifdef CONFIG_BCM63XX_CPU_3368
 31#define irq_stat_reg		PERF_IRQSTAT_3368_REG
 32#define irq_mask_reg		PERF_IRQMASK_3368_REG
 33#define irq_bits		32
 34#define is_ext_irq_cascaded	0
 35#define ext_irq_start		0
 36#define ext_irq_end		0
 37#define ext_irq_count		4
 38#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_3368
 39#define ext_irq_cfg_reg2	0
 40#endif
 41#ifdef CONFIG_BCM63XX_CPU_6328
 42#define irq_stat_reg		PERF_IRQSTAT_6328_REG
 43#define irq_mask_reg		PERF_IRQMASK_6328_REG
 44#define irq_bits		64
 45#define is_ext_irq_cascaded	1
 46#define ext_irq_start		(BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 47#define ext_irq_end		(BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE)
 48#define ext_irq_count		4
 49#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6328
 50#define ext_irq_cfg_reg2	0
 51#endif
 52#ifdef CONFIG_BCM63XX_CPU_6338
 53#define irq_stat_reg		PERF_IRQSTAT_6338_REG
 54#define irq_mask_reg		PERF_IRQMASK_6338_REG
 55#define irq_bits		32
 56#define is_ext_irq_cascaded	0
 57#define ext_irq_start		0
 58#define ext_irq_end		0
 59#define ext_irq_count		4
 60#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6338
 61#define ext_irq_cfg_reg2	0
 62#endif
 63#ifdef CONFIG_BCM63XX_CPU_6345
 64#define irq_stat_reg		PERF_IRQSTAT_6345_REG
 65#define irq_mask_reg		PERF_IRQMASK_6345_REG
 66#define irq_bits		32
 67#define is_ext_irq_cascaded	0
 68#define ext_irq_start		0
 69#define ext_irq_end		0
 70#define ext_irq_count		4
 71#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6345
 72#define ext_irq_cfg_reg2	0
 73#endif
 74#ifdef CONFIG_BCM63XX_CPU_6348
 75#define irq_stat_reg		PERF_IRQSTAT_6348_REG
 76#define irq_mask_reg		PERF_IRQMASK_6348_REG
 77#define irq_bits		32
 78#define is_ext_irq_cascaded	0
 79#define ext_irq_start		0
 80#define ext_irq_end		0
 81#define ext_irq_count		4
 82#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6348
 83#define ext_irq_cfg_reg2	0
 84#endif
 85#ifdef CONFIG_BCM63XX_CPU_6358
 86#define irq_stat_reg		PERF_IRQSTAT_6358_REG
 87#define irq_mask_reg		PERF_IRQMASK_6358_REG
 88#define irq_bits		32
 89#define is_ext_irq_cascaded	1
 90#define ext_irq_start		(BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 91#define ext_irq_end		(BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE)
 92#define ext_irq_count		4
 93#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6358
 94#define ext_irq_cfg_reg2	0
 95#endif
 96#ifdef CONFIG_BCM63XX_CPU_6362
 97#define irq_stat_reg		PERF_IRQSTAT_6362_REG
 98#define irq_mask_reg		PERF_IRQMASK_6362_REG
 99#define irq_bits		64
100#define is_ext_irq_cascaded	1
101#define ext_irq_start		(BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE)
102#define ext_irq_end		(BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE)
103#define ext_irq_count		4
104#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6362
105#define ext_irq_cfg_reg2	0
106#endif
107#ifdef CONFIG_BCM63XX_CPU_6368
108#define irq_stat_reg		PERF_IRQSTAT_6368_REG
109#define irq_mask_reg		PERF_IRQMASK_6368_REG
110#define irq_bits		64
111#define is_ext_irq_cascaded	1
112#define ext_irq_start		(BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE)
113#define ext_irq_end		(BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE)
114#define ext_irq_count		6
115#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6368
116#define ext_irq_cfg_reg2	PERF_EXTIRQ_CFG_REG2_6368
117#endif
118
119#if irq_bits == 32
120#define dispatch_internal			__dispatch_internal
121#define internal_irq_mask			__internal_irq_mask_32
122#define internal_irq_unmask			__internal_irq_unmask_32
123#else
124#define dispatch_internal			__dispatch_internal_64
125#define internal_irq_mask			__internal_irq_mask_64
126#define internal_irq_unmask			__internal_irq_unmask_64
127#endif
128
129#define irq_stat_addr	(bcm63xx_regset_address(RSET_PERF) + irq_stat_reg)
130#define irq_mask_addr	(bcm63xx_regset_address(RSET_PERF) + irq_mask_reg)
131
132static inline void bcm63xx_init_irq(void)
133{
134}
135#else /* ! BCMCPU_RUNTIME_DETECT */
136
137static u32 irq_stat_addr, irq_mask_addr;
138static void (*dispatch_internal)(void);
 
139static int is_ext_irq_cascaded;
140static unsigned int ext_irq_count;
141static unsigned int ext_irq_start, ext_irq_end;
142static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
143static void (*internal_irq_mask)(unsigned int irq);
144static void (*internal_irq_unmask)(unsigned int irq);
145
146static void bcm63xx_init_irq(void)
147{
148	int irq_bits;
149
150	irq_stat_addr = bcm63xx_regset_address(RSET_PERF);
151	irq_mask_addr = bcm63xx_regset_address(RSET_PERF);
152
153	switch (bcm63xx_get_cpu_id()) {
154	case BCM3368_CPU_ID:
155		irq_stat_addr += PERF_IRQSTAT_3368_REG;
156		irq_mask_addr += PERF_IRQMASK_3368_REG;
157		irq_bits = 32;
158		ext_irq_count = 4;
159		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
160		break;
161	case BCM6328_CPU_ID:
162		irq_stat_addr += PERF_IRQSTAT_6328_REG;
163		irq_mask_addr += PERF_IRQMASK_6328_REG;
164		irq_bits = 64;
165		ext_irq_count = 4;
166		is_ext_irq_cascaded = 1;
167		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
168		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
169		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
170		break;
171	case BCM6338_CPU_ID:
172		irq_stat_addr += PERF_IRQSTAT_6338_REG;
173		irq_mask_addr += PERF_IRQMASK_6338_REG;
174		irq_bits = 32;
175		ext_irq_count = 4;
176		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
177		break;
178	case BCM6345_CPU_ID:
179		irq_stat_addr += PERF_IRQSTAT_6345_REG;
180		irq_mask_addr += PERF_IRQMASK_6345_REG;
181		irq_bits = 32;
182		ext_irq_count = 4;
183		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
184		break;
185	case BCM6348_CPU_ID:
186		irq_stat_addr += PERF_IRQSTAT_6348_REG;
187		irq_mask_addr += PERF_IRQMASK_6348_REG;
188		irq_bits = 32;
189		ext_irq_count = 4;
190		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
191		break;
192	case BCM6358_CPU_ID:
193		irq_stat_addr += PERF_IRQSTAT_6358_REG;
194		irq_mask_addr += PERF_IRQMASK_6358_REG;
195		irq_bits = 32;
196		ext_irq_count = 4;
197		is_ext_irq_cascaded = 1;
198		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
199		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
200		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
201		break;
202	case BCM6362_CPU_ID:
203		irq_stat_addr += PERF_IRQSTAT_6362_REG;
204		irq_mask_addr += PERF_IRQMASK_6362_REG;
205		irq_bits = 64;
206		ext_irq_count = 4;
207		is_ext_irq_cascaded = 1;
208		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
209		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
210		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
211		break;
212	case BCM6368_CPU_ID:
213		irq_stat_addr += PERF_IRQSTAT_6368_REG;
214		irq_mask_addr += PERF_IRQMASK_6368_REG;
215		irq_bits = 64;
216		ext_irq_count = 6;
217		is_ext_irq_cascaded = 1;
218		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
219		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
220		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
221		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
222		break;
223	default:
224		BUG();
225	}
226
227	if (irq_bits == 32) {
228		dispatch_internal = __dispatch_internal;
229		internal_irq_mask = __internal_irq_mask_32;
230		internal_irq_unmask = __internal_irq_unmask_32;
231	} else {
232		dispatch_internal = __dispatch_internal_64;
233		internal_irq_mask = __internal_irq_mask_64;
234		internal_irq_unmask = __internal_irq_unmask_64;
235	}
236}
237#endif /* ! BCMCPU_RUNTIME_DETECT */
238
239static inline u32 get_ext_irq_perf_reg(int irq)
240{
241	if (irq < 4)
242		return ext_irq_cfg_reg1;
243	return ext_irq_cfg_reg2;
244}
245
246static inline void handle_internal(int intbit)
247{
248	if (is_ext_irq_cascaded &&
249	    intbit >= ext_irq_start && intbit <= ext_irq_end)
250		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
251	else
252		do_IRQ(intbit + IRQ_INTERNAL_BASE);
253}
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255/*
256 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
257 * prioritize any interrupt relatively to another. the static counter
258 * will resume the loop where it ended the last time we left this
259 * function.
260 */
261static void __dispatch_internal(void)
262{
263	u32 pending;
264	static int i;
265
266	pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr);
267
268	if (!pending)
269		return ;
270
271	while (1) {
272		int to_call = i;
273
274		i = (i + 1) & 0x1f;
275		if (pending & (1 << to_call)) {
276			handle_internal(to_call);
277			break;
278		}
279	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
280}
281
282static void __dispatch_internal_64(void)
283{
284	u64 pending;
285	static int i;
286
287	pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr);
288
289	if (!pending)
290		return ;
291
292	while (1) {
293		int to_call = i;
294
295		i = (i + 1) & 0x3f;
296		if (pending & (1ull << to_call)) {
297			handle_internal(to_call);
298			break;
299		}
300	}
301}
302
303asmlinkage void plat_irq_dispatch(void)
304{
305	u32 cause;
306
307	do {
308		cause = read_c0_cause() & read_c0_status() & ST0_IM;
309
310		if (!cause)
311			break;
312
313		if (cause & CAUSEF_IP7)
314			do_IRQ(7);
315		if (cause & CAUSEF_IP0)
316			do_IRQ(0);
317		if (cause & CAUSEF_IP1)
318			do_IRQ(1);
319		if (cause & CAUSEF_IP2)
320			dispatch_internal();
321		if (!is_ext_irq_cascaded) {
 
 
 
322			if (cause & CAUSEF_IP3)
323				do_IRQ(IRQ_EXT_0);
324			if (cause & CAUSEF_IP4)
325				do_IRQ(IRQ_EXT_1);
326			if (cause & CAUSEF_IP5)
327				do_IRQ(IRQ_EXT_2);
328			if (cause & CAUSEF_IP6)
329				do_IRQ(IRQ_EXT_3);
330		}
331	} while (1);
332}
333
334/*
335 * internal IRQs operations: only mask/unmask on PERF irq mask
336 * register.
337 */
338static void __internal_irq_mask_32(unsigned int irq)
339{
340	u32 mask;
341
342	mask = bcm_readl(irq_mask_addr);
343	mask &= ~(1 << irq);
344	bcm_writel(mask, irq_mask_addr);
345}
346
347static void __internal_irq_mask_64(unsigned int irq)
348{
349	u64 mask;
350
351	mask = bcm_readq(irq_mask_addr);
352	mask &= ~(1ull << irq);
353	bcm_writeq(mask, irq_mask_addr);
354}
355
356static void __internal_irq_unmask_32(unsigned int irq)
357{
358	u32 mask;
359
360	mask = bcm_readl(irq_mask_addr);
361	mask |= (1 << irq);
362	bcm_writel(mask, irq_mask_addr);
363}
364
365static void __internal_irq_unmask_64(unsigned int irq)
366{
367	u64 mask;
368
369	mask = bcm_readq(irq_mask_addr);
370	mask |= (1ull << irq);
371	bcm_writeq(mask, irq_mask_addr);
372}
373
374static void bcm63xx_internal_irq_mask(struct irq_data *d)
375{
376	internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
377}
378
379static void bcm63xx_internal_irq_unmask(struct irq_data *d)
380{
381	internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
382}
383
384/*
385 * external IRQs operations: mask/unmask and clear on PERF external
386 * irq control register.
387 */
388static void bcm63xx_external_irq_mask(struct irq_data *d)
389{
390	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
391	u32 reg, regaddr;
 
392
393	regaddr = get_ext_irq_perf_reg(irq);
 
394	reg = bcm_perf_readl(regaddr);
395
396	if (BCMCPU_IS_6348())
397		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
398	else
399		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
400
401	bcm_perf_writel(reg, regaddr);
 
 
402	if (is_ext_irq_cascaded)
403		internal_irq_mask(irq + ext_irq_start);
404}
405
406static void bcm63xx_external_irq_unmask(struct irq_data *d)
407{
408	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
409	u32 reg, regaddr;
 
410
411	regaddr = get_ext_irq_perf_reg(irq);
 
412	reg = bcm_perf_readl(regaddr);
413
414	if (BCMCPU_IS_6348())
415		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
416	else
417		reg |= EXTIRQ_CFG_MASK(irq % 4);
418
419	bcm_perf_writel(reg, regaddr);
 
420
421	if (is_ext_irq_cascaded)
422		internal_irq_unmask(irq + ext_irq_start);
 
423}
424
425static void bcm63xx_external_irq_clear(struct irq_data *d)
426{
427	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
428	u32 reg, regaddr;
 
429
430	regaddr = get_ext_irq_perf_reg(irq);
 
431	reg = bcm_perf_readl(regaddr);
432
433	if (BCMCPU_IS_6348())
434		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
435	else
436		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
437
438	bcm_perf_writel(reg, regaddr);
 
439}
440
441static int bcm63xx_external_irq_set_type(struct irq_data *d,
442					 unsigned int flow_type)
443{
444	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
445	u32 reg, regaddr;
446	int levelsense, sense, bothedge;
 
447
448	flow_type &= IRQ_TYPE_SENSE_MASK;
449
450	if (flow_type == IRQ_TYPE_NONE)
451		flow_type = IRQ_TYPE_LEVEL_LOW;
452
453	levelsense = sense = bothedge = 0;
454	switch (flow_type) {
455	case IRQ_TYPE_EDGE_BOTH:
456		bothedge = 1;
457		break;
458
459	case IRQ_TYPE_EDGE_RISING:
460		sense = 1;
461		break;
462
463	case IRQ_TYPE_EDGE_FALLING:
464		break;
465
466	case IRQ_TYPE_LEVEL_HIGH:
467		levelsense = 1;
468		sense = 1;
469		break;
470
471	case IRQ_TYPE_LEVEL_LOW:
472		levelsense = 1;
473		break;
474
475	default:
476		printk(KERN_ERR "bogus flow type combination given !\n");
477		return -EINVAL;
478	}
479
480	regaddr = get_ext_irq_perf_reg(irq);
 
481	reg = bcm_perf_readl(regaddr);
482	irq %= 4;
483
484	switch (bcm63xx_get_cpu_id()) {
485	case BCM6348_CPU_ID:
486		if (levelsense)
487			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
488		else
489			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
490		if (sense)
491			reg |= EXTIRQ_CFG_SENSE_6348(irq);
492		else
493			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
494		if (bothedge)
495			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
496		else
497			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
498		break;
499
500	case BCM3368_CPU_ID:
501	case BCM6328_CPU_ID:
502	case BCM6338_CPU_ID:
503	case BCM6345_CPU_ID:
504	case BCM6358_CPU_ID:
505	case BCM6362_CPU_ID:
506	case BCM6368_CPU_ID:
507		if (levelsense)
508			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
509		else
510			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
511		if (sense)
512			reg |= EXTIRQ_CFG_SENSE(irq);
513		else
514			reg &= ~EXTIRQ_CFG_SENSE(irq);
515		if (bothedge)
516			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
517		else
518			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
519		break;
520	default:
521		BUG();
522	}
523
524	bcm_perf_writel(reg, regaddr);
 
525
526	irqd_set_trigger_type(d, flow_type);
527	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
528		__irq_set_handler_locked(d->irq, handle_level_irq);
529	else
530		__irq_set_handler_locked(d->irq, handle_edge_irq);
531
532	return IRQ_SET_MASK_OK_NOCOPY;
533}
534
 
 
 
 
 
 
 
 
 
 
 
 
535static struct irq_chip bcm63xx_internal_irq_chip = {
536	.name		= "bcm63xx_ipic",
537	.irq_mask	= bcm63xx_internal_irq_mask,
538	.irq_unmask	= bcm63xx_internal_irq_unmask,
539};
540
541static struct irq_chip bcm63xx_external_irq_chip = {
542	.name		= "bcm63xx_epic",
543	.irq_ack	= bcm63xx_external_irq_clear,
544
545	.irq_mask	= bcm63xx_external_irq_mask,
546	.irq_unmask	= bcm63xx_external_irq_unmask,
547
548	.irq_set_type	= bcm63xx_external_irq_set_type,
549};
550
551static struct irqaction cpu_ip2_cascade_action = {
552	.handler	= no_action,
553	.name		= "cascade_ip2",
554	.flags		= IRQF_NO_THREAD,
555};
556
557static struct irqaction cpu_ext_cascade_action = {
558	.handler	= no_action,
559	.name		= "cascade_extirq",
560	.flags		= IRQF_NO_THREAD,
561};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
562
563void __init arch_init_irq(void)
564{
565	int i;
566
567	bcm63xx_init_irq();
568	mips_cpu_irq_init();
569	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
570		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
571					 handle_level_irq);
572
573	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
574		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
575					 handle_edge_irq);
576
577	if (!is_ext_irq_cascaded) {
578		for (i = 3; i < 3 + ext_irq_count; ++i)
579			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
 
 
 
 
 
 
580	}
581
582	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
583}
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 
 13#include <linux/irq.h>
 14#include <linux/spinlock.h>
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23static DEFINE_SPINLOCK(ipic_lock);
 24static DEFINE_SPINLOCK(epic_lock);
 
 
 
 
 
 25
 26static u32 irq_stat_addr[2];
 27static u32 irq_mask_addr[2];
 28static void (*dispatch_internal)(int cpu);
 29static int is_ext_irq_cascaded;
 30static unsigned int ext_irq_count;
 31static unsigned int ext_irq_start, ext_irq_end;
 32static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
 33static void (*internal_irq_mask)(struct irq_data *d);
 34static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36
 37static inline u32 get_ext_irq_perf_reg(int irq)
 38{
 39	if (irq < 4)
 40		return ext_irq_cfg_reg1;
 41	return ext_irq_cfg_reg2;
 42}
 43
 44static inline void handle_internal(int intbit)
 45{
 46	if (is_ext_irq_cascaded &&
 47	    intbit >= ext_irq_start && intbit <= ext_irq_end)
 48		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
 49	else
 50		do_IRQ(intbit + IRQ_INTERNAL_BASE);
 51}
 52
 53static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
 54				     const struct cpumask *m)
 55{
 56	bool enable = cpu_online(cpu);
 57
 58#ifdef CONFIG_SMP
 59	if (m)
 60		enable &= cpumask_test_cpu(cpu, m);
 61	else if (irqd_affinity_was_set(d))
 62		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 63#endif
 64	return enable;
 65}
 66
 67/*
 68 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 69 * prioritize any interrupt relatively to another. the static counter
 70 * will resume the loop where it ended the last time we left this
 71 * function.
 72 */
 
 
 
 
 73
 74#define BUILD_IPIC_INTERNAL(width)					\
 75void __dispatch_internal_##width(int cpu)				\
 76{									\
 77	u32 pending[width / 32];					\
 78	unsigned int src, tgt;						\
 79	bool irqs_pending = false;					\
 80	static unsigned int i[2];					\
 81	unsigned int *next = &i[cpu];					\
 82	unsigned long flags;						\
 83									\
 84	/* read registers in reverse order */				\
 85	spin_lock_irqsave(&ipic_lock, flags);				\
 86	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
 87		u32 val;						\
 88									\
 89		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
 90		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
 91		pending[--tgt] = val;					\
 92									\
 93		if (val)						\
 94			irqs_pending = true;				\
 95	}								\
 96	spin_unlock_irqrestore(&ipic_lock, flags);			\
 97									\
 98	if (!irqs_pending)						\
 99		return;							\
100									\
101	while (1) {							\
102		unsigned int to_call = *next;				\
103									\
104		*next = (*next + 1) & (width - 1);			\
105		if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {	\
106			handle_internal(to_call);			\
107			break;						\
108		}							\
109	}								\
110}									\
111									\
112static void __internal_irq_mask_##width(struct irq_data *d)		\
113{									\
114	u32 val;							\
115	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
116	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
117	unsigned bit = irq & 0x1f;					\
118	unsigned long flags;						\
119	int cpu;							\
120									\
121	spin_lock_irqsave(&ipic_lock, flags);				\
122	for_each_present_cpu(cpu) {					\
123		if (!irq_mask_addr[cpu])				\
124			break;						\
125									\
126		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
127		val &= ~(1 << bit);					\
128		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
129	}								\
130	spin_unlock_irqrestore(&ipic_lock, flags);			\
131}									\
132									\
133static void __internal_irq_unmask_##width(struct irq_data *d,		\
134					  const struct cpumask *m)	\
135{									\
136	u32 val;							\
137	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
138	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
139	unsigned bit = irq & 0x1f;					\
140	unsigned long flags;						\
141	int cpu;							\
142									\
143	spin_lock_irqsave(&ipic_lock, flags);				\
144	for_each_present_cpu(cpu) {					\
145		if (!irq_mask_addr[cpu])				\
146			break;						\
147									\
148		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149		if (enable_irq_for_cpu(cpu, d, m))			\
150			val |= (1 << bit);				\
151		else							\
152			val &= ~(1 << bit);				\
153		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
154	}								\
155	spin_unlock_irqrestore(&ipic_lock, flags);			\
156}
157
158BUILD_IPIC_INTERNAL(32);
159BUILD_IPIC_INTERNAL(64);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
161asmlinkage void plat_irq_dispatch(void)
162{
163	u32 cause;
164
165	do {
166		cause = read_c0_cause() & read_c0_status() & ST0_IM;
167
168		if (!cause)
169			break;
170
171		if (cause & CAUSEF_IP7)
172			do_IRQ(7);
173		if (cause & CAUSEF_IP0)
174			do_IRQ(0);
175		if (cause & CAUSEF_IP1)
176			do_IRQ(1);
177		if (cause & CAUSEF_IP2)
178			dispatch_internal(0);
179		if (is_ext_irq_cascaded) {
180			if (cause & CAUSEF_IP3)
181				dispatch_internal(1);
182		} else {
183			if (cause & CAUSEF_IP3)
184				do_IRQ(IRQ_EXT_0);
185			if (cause & CAUSEF_IP4)
186				do_IRQ(IRQ_EXT_1);
187			if (cause & CAUSEF_IP5)
188				do_IRQ(IRQ_EXT_2);
189			if (cause & CAUSEF_IP6)
190				do_IRQ(IRQ_EXT_3);
191		}
192	} while (1);
193}
194
195/*
196 * internal IRQs operations: only mask/unmask on PERF irq mask
197 * register.
198 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199static void bcm63xx_internal_irq_mask(struct irq_data *d)
200{
201	internal_irq_mask(d);
202}
203
204static void bcm63xx_internal_irq_unmask(struct irq_data *d)
205{
206	internal_irq_unmask(d, NULL);
207}
208
209/*
210 * external IRQs operations: mask/unmask and clear on PERF external
211 * irq control register.
212 */
213static void bcm63xx_external_irq_mask(struct irq_data *d)
214{
215	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
216	u32 reg, regaddr;
217	unsigned long flags;
218
219	regaddr = get_ext_irq_perf_reg(irq);
220	spin_lock_irqsave(&epic_lock, flags);
221	reg = bcm_perf_readl(regaddr);
222
223	if (BCMCPU_IS_6348())
224		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
225	else
226		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
227
228	bcm_perf_writel(reg, regaddr);
229	spin_unlock_irqrestore(&epic_lock, flags);
230
231	if (is_ext_irq_cascaded)
232		internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
233}
234
235static void bcm63xx_external_irq_unmask(struct irq_data *d)
236{
237	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238	u32 reg, regaddr;
239	unsigned long flags;
240
241	regaddr = get_ext_irq_perf_reg(irq);
242	spin_lock_irqsave(&epic_lock, flags);
243	reg = bcm_perf_readl(regaddr);
244
245	if (BCMCPU_IS_6348())
246		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
247	else
248		reg |= EXTIRQ_CFG_MASK(irq % 4);
249
250	bcm_perf_writel(reg, regaddr);
251	spin_unlock_irqrestore(&epic_lock, flags);
252
253	if (is_ext_irq_cascaded)
254		internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
255				    NULL);
256}
257
258static void bcm63xx_external_irq_clear(struct irq_data *d)
259{
260	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
261	u32 reg, regaddr;
262	unsigned long flags;
263
264	regaddr = get_ext_irq_perf_reg(irq);
265	spin_lock_irqsave(&epic_lock, flags);
266	reg = bcm_perf_readl(regaddr);
267
268	if (BCMCPU_IS_6348())
269		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
270	else
271		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
272
273	bcm_perf_writel(reg, regaddr);
274	spin_unlock_irqrestore(&epic_lock, flags);
275}
276
277static int bcm63xx_external_irq_set_type(struct irq_data *d,
278					 unsigned int flow_type)
279{
280	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
281	u32 reg, regaddr;
282	int levelsense, sense, bothedge;
283	unsigned long flags;
284
285	flow_type &= IRQ_TYPE_SENSE_MASK;
286
287	if (flow_type == IRQ_TYPE_NONE)
288		flow_type = IRQ_TYPE_LEVEL_LOW;
289
290	levelsense = sense = bothedge = 0;
291	switch (flow_type) {
292	case IRQ_TYPE_EDGE_BOTH:
293		bothedge = 1;
294		break;
295
296	case IRQ_TYPE_EDGE_RISING:
297		sense = 1;
298		break;
299
300	case IRQ_TYPE_EDGE_FALLING:
301		break;
302
303	case IRQ_TYPE_LEVEL_HIGH:
304		levelsense = 1;
305		sense = 1;
306		break;
307
308	case IRQ_TYPE_LEVEL_LOW:
309		levelsense = 1;
310		break;
311
312	default:
313		pr_err("bogus flow type combination given !\n");
314		return -EINVAL;
315	}
316
317	regaddr = get_ext_irq_perf_reg(irq);
318	spin_lock_irqsave(&epic_lock, flags);
319	reg = bcm_perf_readl(regaddr);
320	irq %= 4;
321
322	switch (bcm63xx_get_cpu_id()) {
323	case BCM6348_CPU_ID:
324		if (levelsense)
325			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
326		else
327			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
328		if (sense)
329			reg |= EXTIRQ_CFG_SENSE_6348(irq);
330		else
331			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
332		if (bothedge)
333			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
334		else
335			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
336		break;
337
338	case BCM3368_CPU_ID:
339	case BCM6328_CPU_ID:
340	case BCM6338_CPU_ID:
341	case BCM6345_CPU_ID:
342	case BCM6358_CPU_ID:
343	case BCM6362_CPU_ID:
344	case BCM6368_CPU_ID:
345		if (levelsense)
346			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
347		else
348			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
349		if (sense)
350			reg |= EXTIRQ_CFG_SENSE(irq);
351		else
352			reg &= ~EXTIRQ_CFG_SENSE(irq);
353		if (bothedge)
354			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
355		else
356			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
357		break;
358	default:
359		BUG();
360	}
361
362	bcm_perf_writel(reg, regaddr);
363	spin_unlock_irqrestore(&epic_lock, flags);
364
365	irqd_set_trigger_type(d, flow_type);
366	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
367		irq_set_handler_locked(d, handle_level_irq);
368	else
369		irq_set_handler_locked(d, handle_edge_irq);
370
371	return IRQ_SET_MASK_OK_NOCOPY;
372}
373
374#ifdef CONFIG_SMP
375static int bcm63xx_internal_set_affinity(struct irq_data *data,
376					 const struct cpumask *dest,
377					 bool force)
378{
379	if (!irqd_irq_disabled(data))
380		internal_irq_unmask(data, dest);
381
382	return 0;
383}
384#endif
385
386static struct irq_chip bcm63xx_internal_irq_chip = {
387	.name		= "bcm63xx_ipic",
388	.irq_mask	= bcm63xx_internal_irq_mask,
389	.irq_unmask	= bcm63xx_internal_irq_unmask,
390};
391
392static struct irq_chip bcm63xx_external_irq_chip = {
393	.name		= "bcm63xx_epic",
394	.irq_ack	= bcm63xx_external_irq_clear,
395
396	.irq_mask	= bcm63xx_external_irq_mask,
397	.irq_unmask	= bcm63xx_external_irq_unmask,
398
399	.irq_set_type	= bcm63xx_external_irq_set_type,
400};
401
402static void bcm63xx_init_irq(void)
403{
404	int irq_bits;
 
 
405
406	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
407	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
408	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
409	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
410
411	switch (bcm63xx_get_cpu_id()) {
412	case BCM3368_CPU_ID:
413		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
414		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
415		irq_stat_addr[1] = 0;
416		irq_mask_addr[1] = 0;
417		irq_bits = 32;
418		ext_irq_count = 4;
419		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
420		break;
421	case BCM6328_CPU_ID:
422		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
423		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
424		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
425		irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
426		irq_bits = 64;
427		ext_irq_count = 4;
428		is_ext_irq_cascaded = 1;
429		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
430		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
431		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
432		break;
433	case BCM6338_CPU_ID:
434		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
435		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
436		irq_stat_addr[1] = 0;
437		irq_mask_addr[1] = 0;
438		irq_bits = 32;
439		ext_irq_count = 4;
440		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
441		break;
442	case BCM6345_CPU_ID:
443		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
444		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
445		irq_stat_addr[1] = 0;
446		irq_mask_addr[1] = 0;
447		irq_bits = 32;
448		ext_irq_count = 4;
449		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
450		break;
451	case BCM6348_CPU_ID:
452		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
453		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
454		irq_stat_addr[1] = 0;
455		irq_mask_addr[1] = 0;
456		irq_bits = 32;
457		ext_irq_count = 4;
458		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
459		break;
460	case BCM6358_CPU_ID:
461		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
462		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
463		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
464		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
465		irq_bits = 32;
466		ext_irq_count = 4;
467		is_ext_irq_cascaded = 1;
468		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
469		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
470		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
471		break;
472	case BCM6362_CPU_ID:
473		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
474		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
475		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
476		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
477		irq_bits = 64;
478		ext_irq_count = 4;
479		is_ext_irq_cascaded = 1;
480		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
481		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
482		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
483		break;
484	case BCM6368_CPU_ID:
485		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
486		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
487		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
488		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
489		irq_bits = 64;
490		ext_irq_count = 6;
491		is_ext_irq_cascaded = 1;
492		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
493		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
494		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
495		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
496		break;
497	default:
498		BUG();
499	}
500
501	if (irq_bits == 32) {
502		dispatch_internal = __dispatch_internal_32;
503		internal_irq_mask = __internal_irq_mask_32;
504		internal_irq_unmask = __internal_irq_unmask_32;
505	} else {
506		dispatch_internal = __dispatch_internal_64;
507		internal_irq_mask = __internal_irq_mask_64;
508		internal_irq_unmask = __internal_irq_unmask_64;
509	}
510}
511
512void __init arch_init_irq(void)
513{
514	int i, irq;
515
516	bcm63xx_init_irq();
517	mips_cpu_irq_init();
518	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
519		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
520					 handle_level_irq);
521
522	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
523		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
524					 handle_edge_irq);
525
526	if (!is_ext_irq_cascaded) {
527		for (i = 3; i < 3 + ext_irq_count; ++i) {
528			irq = MIPS_CPU_IRQ_BASE + i;
529			if (request_irq(irq, no_action, IRQF_NO_THREAD,
530					"cascade_extirq", NULL)) {
531				pr_err("Failed to request irq %d (cascade_extirq)\n",
532				       irq);
533			}
534		}
535	}
536
537	irq = MIPS_CPU_IRQ_BASE + 2;
538	if (request_irq(irq, no_action, IRQF_NO_THREAD,	"cascade_ip2", NULL))
539		pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
540#ifdef CONFIG_SMP
541	if (is_ext_irq_cascaded) {
542		irq = MIPS_CPU_IRQ_BASE + 3;
543		if (request_irq(irq, no_action,	IRQF_NO_THREAD, "cascade_ip3",
544				NULL))
545			pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
546		bcm63xx_internal_irq_chip.irq_set_affinity =
547			bcm63xx_internal_set_affinity;
548
549		cpumask_clear(irq_default_affinity);
550		cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
551	}
552#endif
553}