Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/module.h>
 14#include <linux/irq.h>
 
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 22static void __dispatch_internal(void) __maybe_unused;
 23static void __dispatch_internal_64(void) __maybe_unused;
 24static void __internal_irq_mask_32(unsigned int irq) __maybe_unused;
 25static void __internal_irq_mask_64(unsigned int irq) __maybe_unused;
 26static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused;
 27static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 28
 29#ifndef BCMCPU_RUNTIME_DETECT
 30#ifdef CONFIG_BCM63XX_CPU_6338
 31#define irq_stat_reg		PERF_IRQSTAT_6338_REG
 32#define irq_mask_reg		PERF_IRQMASK_6338_REG
 33#define irq_bits		32
 34#define is_ext_irq_cascaded	0
 35#define ext_irq_start		0
 36#define ext_irq_end		0
 37#define ext_irq_count		4
 38#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6338
 39#define ext_irq_cfg_reg2	0
 40#endif
 41#ifdef CONFIG_BCM63XX_CPU_6345
 42#define irq_stat_reg		PERF_IRQSTAT_6345_REG
 43#define irq_mask_reg		PERF_IRQMASK_6345_REG
 44#define irq_bits		32
 45#define is_ext_irq_cascaded	0
 46#define ext_irq_start		0
 47#define ext_irq_end		0
 48#define ext_irq_count		0
 49#define ext_irq_cfg_reg1	0
 50#define ext_irq_cfg_reg2	0
 51#endif
 52#ifdef CONFIG_BCM63XX_CPU_6348
 53#define irq_stat_reg		PERF_IRQSTAT_6348_REG
 54#define irq_mask_reg		PERF_IRQMASK_6348_REG
 55#define irq_bits		32
 56#define is_ext_irq_cascaded	0
 57#define ext_irq_start		0
 58#define ext_irq_end		0
 59#define ext_irq_count		4
 60#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6348
 61#define ext_irq_cfg_reg2	0
 62#endif
 63#ifdef CONFIG_BCM63XX_CPU_6358
 64#define irq_stat_reg		PERF_IRQSTAT_6358_REG
 65#define irq_mask_reg		PERF_IRQMASK_6358_REG
 66#define irq_bits		32
 67#define is_ext_irq_cascaded	1
 68#define ext_irq_start		(BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 69#define ext_irq_end		(BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE)
 70#define ext_irq_count		4
 71#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6358
 72#define ext_irq_cfg_reg2	0
 73#endif
 74#ifdef CONFIG_BCM63XX_CPU_6368
 75#define irq_stat_reg		PERF_IRQSTAT_6368_REG
 76#define irq_mask_reg		PERF_IRQMASK_6368_REG
 77#define irq_bits		64
 78#define is_ext_irq_cascaded	1
 79#define ext_irq_start		(BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 80#define ext_irq_end		(BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE)
 81#define ext_irq_count		6
 82#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6368
 83#define ext_irq_cfg_reg2	PERF_EXTIRQ_CFG_REG2_6368
 84#endif
 85
 86#if irq_bits == 32
 87#define dispatch_internal			__dispatch_internal
 88#define internal_irq_mask			__internal_irq_mask_32
 89#define internal_irq_unmask			__internal_irq_unmask_32
 90#else
 91#define dispatch_internal			__dispatch_internal_64
 92#define internal_irq_mask			__internal_irq_mask_64
 93#define internal_irq_unmask			__internal_irq_unmask_64
 94#endif
 95
 96#define irq_stat_addr	(bcm63xx_regset_address(RSET_PERF) + irq_stat_reg)
 97#define irq_mask_addr	(bcm63xx_regset_address(RSET_PERF) + irq_mask_reg)
 98
 99static inline void bcm63xx_init_irq(void)
100{
101}
102#else /* ! BCMCPU_RUNTIME_DETECT */
103
104static u32 irq_stat_addr, irq_mask_addr;
105static void (*dispatch_internal)(void);
 
106static int is_ext_irq_cascaded;
107static unsigned int ext_irq_count;
108static unsigned int ext_irq_start, ext_irq_end;
109static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
110static void (*internal_irq_mask)(unsigned int irq);
111static void (*internal_irq_unmask)(unsigned int irq);
112
113static void bcm63xx_init_irq(void)
114{
115	int irq_bits;
116
117	irq_stat_addr = bcm63xx_regset_address(RSET_PERF);
118	irq_mask_addr = bcm63xx_regset_address(RSET_PERF);
119
120	switch (bcm63xx_get_cpu_id()) {
121	case BCM6338_CPU_ID:
122		irq_stat_addr += PERF_IRQSTAT_6338_REG;
123		irq_mask_addr += PERF_IRQMASK_6338_REG;
124		irq_bits = 32;
125		break;
126	case BCM6345_CPU_ID:
127		irq_stat_addr += PERF_IRQSTAT_6345_REG;
128		irq_mask_addr += PERF_IRQMASK_6345_REG;
129		irq_bits = 32;
130		break;
131	case BCM6348_CPU_ID:
132		irq_stat_addr += PERF_IRQSTAT_6348_REG;
133		irq_mask_addr += PERF_IRQMASK_6348_REG;
134		irq_bits = 32;
135		ext_irq_count = 4;
136		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
137		break;
138	case BCM6358_CPU_ID:
139		irq_stat_addr += PERF_IRQSTAT_6358_REG;
140		irq_mask_addr += PERF_IRQMASK_6358_REG;
141		irq_bits = 32;
142		ext_irq_count = 4;
143		is_ext_irq_cascaded = 1;
144		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
145		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
146		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
147		break;
148	case BCM6368_CPU_ID:
149		irq_stat_addr += PERF_IRQSTAT_6368_REG;
150		irq_mask_addr += PERF_IRQMASK_6368_REG;
151		irq_bits = 64;
152		ext_irq_count = 6;
153		is_ext_irq_cascaded = 1;
154		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
155		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
156		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
157		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
158		break;
159	default:
160		BUG();
161	}
162
163	if (irq_bits == 32) {
164		dispatch_internal = __dispatch_internal;
165		internal_irq_mask = __internal_irq_mask_32;
166		internal_irq_unmask = __internal_irq_unmask_32;
167	} else {
168		dispatch_internal = __dispatch_internal_64;
169		internal_irq_mask = __internal_irq_mask_64;
170		internal_irq_unmask = __internal_irq_unmask_64;
171	}
172}
173#endif /* ! BCMCPU_RUNTIME_DETECT */
174
175static inline u32 get_ext_irq_perf_reg(int irq)
176{
177	if (irq < 4)
178		return ext_irq_cfg_reg1;
179	return ext_irq_cfg_reg2;
180}
181
182static inline void handle_internal(int intbit)
183{
184	if (is_ext_irq_cascaded &&
185	    intbit >= ext_irq_start && intbit <= ext_irq_end)
186		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
187	else
188		do_IRQ(intbit + IRQ_INTERNAL_BASE);
189}
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191/*
192 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
193 * prioritize any interrupt relatively to another. the static counter
194 * will resume the loop where it ended the last time we left this
195 * function.
196 */
197static void __dispatch_internal(void)
198{
199	u32 pending;
200	static int i;
201
202	pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr);
203
204	if (!pending)
205		return ;
206
207	while (1) {
208		int to_call = i;
209
210		i = (i + 1) & 0x1f;
211		if (pending & (1 << to_call)) {
212			handle_internal(to_call);
213			break;
214		}
215	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216}
217
218static void __dispatch_internal_64(void)
219{
220	u64 pending;
221	static int i;
222
223	pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr);
224
225	if (!pending)
226		return ;
227
228	while (1) {
229		int to_call = i;
230
231		i = (i + 1) & 0x3f;
232		if (pending & (1ull << to_call)) {
233			handle_internal(to_call);
234			break;
235		}
236	}
237}
238
239asmlinkage void plat_irq_dispatch(void)
240{
241	u32 cause;
242
243	do {
244		cause = read_c0_cause() & read_c0_status() & ST0_IM;
245
246		if (!cause)
247			break;
248
249		if (cause & CAUSEF_IP7)
250			do_IRQ(7);
 
 
 
 
251		if (cause & CAUSEF_IP2)
252			dispatch_internal();
253		if (!is_ext_irq_cascaded) {
 
 
 
254			if (cause & CAUSEF_IP3)
255				do_IRQ(IRQ_EXT_0);
256			if (cause & CAUSEF_IP4)
257				do_IRQ(IRQ_EXT_1);
258			if (cause & CAUSEF_IP5)
259				do_IRQ(IRQ_EXT_2);
260			if (cause & CAUSEF_IP6)
261				do_IRQ(IRQ_EXT_3);
262		}
263	} while (1);
264}
265
266/*
267 * internal IRQs operations: only mask/unmask on PERF irq mask
268 * register.
269 */
270static void __internal_irq_mask_32(unsigned int irq)
271{
272	u32 mask;
273
274	mask = bcm_readl(irq_mask_addr);
275	mask &= ~(1 << irq);
276	bcm_writel(mask, irq_mask_addr);
277}
278
279static void __internal_irq_mask_64(unsigned int irq)
280{
281	u64 mask;
282
283	mask = bcm_readq(irq_mask_addr);
284	mask &= ~(1ull << irq);
285	bcm_writeq(mask, irq_mask_addr);
286}
287
288static void __internal_irq_unmask_32(unsigned int irq)
289{
290	u32 mask;
291
292	mask = bcm_readl(irq_mask_addr);
293	mask |= (1 << irq);
294	bcm_writel(mask, irq_mask_addr);
295}
296
297static void __internal_irq_unmask_64(unsigned int irq)
298{
299	u64 mask;
300
301	mask = bcm_readq(irq_mask_addr);
302	mask |= (1ull << irq);
303	bcm_writeq(mask, irq_mask_addr);
304}
305
306static void bcm63xx_internal_irq_mask(struct irq_data *d)
307{
308	internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
309}
310
311static void bcm63xx_internal_irq_unmask(struct irq_data *d)
312{
313	internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
314}
315
316/*
317 * external IRQs operations: mask/unmask and clear on PERF external
318 * irq control register.
319 */
320static void bcm63xx_external_irq_mask(struct irq_data *d)
321{
322	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
323	u32 reg, regaddr;
 
324
325	regaddr = get_ext_irq_perf_reg(irq);
 
326	reg = bcm_perf_readl(regaddr);
327
328	if (BCMCPU_IS_6348())
329		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
330	else
331		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
332
333	bcm_perf_writel(reg, regaddr);
 
 
334	if (is_ext_irq_cascaded)
335		internal_irq_mask(irq + ext_irq_start);
336}
337
338static void bcm63xx_external_irq_unmask(struct irq_data *d)
339{
340	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
341	u32 reg, regaddr;
 
342
343	regaddr = get_ext_irq_perf_reg(irq);
 
344	reg = bcm_perf_readl(regaddr);
345
346	if (BCMCPU_IS_6348())
347		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
348	else
349		reg |= EXTIRQ_CFG_MASK(irq % 4);
350
351	bcm_perf_writel(reg, regaddr);
 
352
353	if (is_ext_irq_cascaded)
354		internal_irq_unmask(irq + ext_irq_start);
 
355}
356
357static void bcm63xx_external_irq_clear(struct irq_data *d)
358{
359	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
360	u32 reg, regaddr;
 
361
362	regaddr = get_ext_irq_perf_reg(irq);
 
363	reg = bcm_perf_readl(regaddr);
364
365	if (BCMCPU_IS_6348())
366		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
367	else
368		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
369
370	bcm_perf_writel(reg, regaddr);
 
371}
372
373static int bcm63xx_external_irq_set_type(struct irq_data *d,
374					 unsigned int flow_type)
375{
376	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
377	u32 reg, regaddr;
378	int levelsense, sense, bothedge;
 
379
380	flow_type &= IRQ_TYPE_SENSE_MASK;
381
382	if (flow_type == IRQ_TYPE_NONE)
383		flow_type = IRQ_TYPE_LEVEL_LOW;
384
385	levelsense = sense = bothedge = 0;
386	switch (flow_type) {
387	case IRQ_TYPE_EDGE_BOTH:
388		bothedge = 1;
389		break;
390
391	case IRQ_TYPE_EDGE_RISING:
392		sense = 1;
393		break;
394
395	case IRQ_TYPE_EDGE_FALLING:
396		break;
397
398	case IRQ_TYPE_LEVEL_HIGH:
399		levelsense = 1;
400		sense = 1;
401		break;
402
403	case IRQ_TYPE_LEVEL_LOW:
404		levelsense = 1;
405		break;
406
407	default:
408		printk(KERN_ERR "bogus flow type combination given !\n");
409		return -EINVAL;
410	}
411
412	regaddr = get_ext_irq_perf_reg(irq);
 
413	reg = bcm_perf_readl(regaddr);
414	irq %= 4;
415
416	if (BCMCPU_IS_6348()) {
 
417		if (levelsense)
418			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
419		else
420			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
421		if (sense)
422			reg |= EXTIRQ_CFG_SENSE_6348(irq);
423		else
424			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
425		if (bothedge)
426			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
427		else
428			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
429	}
430
431	if (BCMCPU_IS_6338() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
 
 
 
 
 
 
432		if (levelsense)
433			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
434		else
435			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
436		if (sense)
437			reg |= EXTIRQ_CFG_SENSE(irq);
438		else
439			reg &= ~EXTIRQ_CFG_SENSE(irq);
440		if (bothedge)
441			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
442		else
443			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
 
 
 
444	}
445
446	bcm_perf_writel(reg, regaddr);
 
447
448	irqd_set_trigger_type(d, flow_type);
449	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
450		__irq_set_handler_locked(d->irq, handle_level_irq);
451	else
452		__irq_set_handler_locked(d->irq, handle_edge_irq);
453
454	return IRQ_SET_MASK_OK_NOCOPY;
455}
456
 
 
 
 
 
 
 
 
 
 
 
 
457static struct irq_chip bcm63xx_internal_irq_chip = {
458	.name		= "bcm63xx_ipic",
459	.irq_mask	= bcm63xx_internal_irq_mask,
460	.irq_unmask	= bcm63xx_internal_irq_unmask,
461};
462
463static struct irq_chip bcm63xx_external_irq_chip = {
464	.name		= "bcm63xx_epic",
465	.irq_ack	= bcm63xx_external_irq_clear,
466
467	.irq_mask	= bcm63xx_external_irq_mask,
468	.irq_unmask	= bcm63xx_external_irq_unmask,
469
470	.irq_set_type	= bcm63xx_external_irq_set_type,
471};
472
473static struct irqaction cpu_ip2_cascade_action = {
474	.handler	= no_action,
475	.name		= "cascade_ip2",
476	.flags		= IRQF_NO_THREAD,
477};
478
 
 
 
 
 
 
 
 
479static struct irqaction cpu_ext_cascade_action = {
480	.handler	= no_action,
481	.name		= "cascade_extirq",
482	.flags		= IRQF_NO_THREAD,
483};
484
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
485void __init arch_init_irq(void)
486{
487	int i;
488
489	bcm63xx_init_irq();
490	mips_cpu_irq_init();
491	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
492		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
493					 handle_level_irq);
494
495	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
496		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
497					 handle_edge_irq);
498
499	if (!is_ext_irq_cascaded) {
500		for (i = 3; i < 3 + ext_irq_count; ++i)
501			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
502	}
503
504	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
 
 
 
 
 
 
 
 
 
 
505}
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/module.h>
 14#include <linux/irq.h>
 15#include <linux/spinlock.h>
 16#include <asm/irq_cpu.h>
 17#include <asm/mipsregs.h>
 18#include <bcm63xx_cpu.h>
 19#include <bcm63xx_regs.h>
 20#include <bcm63xx_io.h>
 21#include <bcm63xx_irq.h>
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23
 24static DEFINE_SPINLOCK(ipic_lock);
 25static DEFINE_SPINLOCK(epic_lock);
 
 
 26
 27static u32 irq_stat_addr[2];
 28static u32 irq_mask_addr[2];
 29static void (*dispatch_internal)(int cpu);
 30static int is_ext_irq_cascaded;
 31static unsigned int ext_irq_count;
 32static unsigned int ext_irq_start, ext_irq_end;
 33static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
 34static void (*internal_irq_mask)(struct irq_data *d);
 35static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
 36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 37
 38static inline u32 get_ext_irq_perf_reg(int irq)
 39{
 40	if (irq < 4)
 41		return ext_irq_cfg_reg1;
 42	return ext_irq_cfg_reg2;
 43}
 44
 45static inline void handle_internal(int intbit)
 46{
 47	if (is_ext_irq_cascaded &&
 48	    intbit >= ext_irq_start && intbit <= ext_irq_end)
 49		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
 50	else
 51		do_IRQ(intbit + IRQ_INTERNAL_BASE);
 52}
 53
 54static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
 55				     const struct cpumask *m)
 56{
 57	bool enable = cpu_online(cpu);
 58
 59#ifdef CONFIG_SMP
 60	if (m)
 61		enable &= cpumask_test_cpu(cpu, m);
 62	else if (irqd_affinity_was_set(d))
 63		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 64#endif
 65	return enable;
 66}
 67
 68/*
 69 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 70 * prioritize any interrupt relatively to another. the static counter
 71 * will resume the loop where it ended the last time we left this
 72 * function.
 73 */
 
 
 
 
 
 
 74
 75#define BUILD_IPIC_INTERNAL(width)					\
 76void __dispatch_internal_##width(int cpu)				\
 77{									\
 78	u32 pending[width / 32];					\
 79	unsigned int src, tgt;						\
 80	bool irqs_pending = false;					\
 81	static unsigned int i[2];					\
 82	unsigned int *next = &i[cpu];					\
 83	unsigned long flags;						\
 84									\
 85	/* read registers in reverse order */				\
 86	spin_lock_irqsave(&ipic_lock, flags);				\
 87	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
 88		u32 val;						\
 89									\
 90		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
 91		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
 92		pending[--tgt] = val;					\
 93									\
 94		if (val)						\
 95			irqs_pending = true;				\
 96	}								\
 97	spin_unlock_irqrestore(&ipic_lock, flags);			\
 98									\
 99	if (!irqs_pending)						\
100		return;							\
101									\
102	while (1) {							\
103		unsigned int to_call = *next;				\
104									\
105		*next = (*next + 1) & (width - 1);			\
106		if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {	\
107			handle_internal(to_call);			\
108			break;						\
109		}							\
110	}								\
111}									\
112									\
113static void __internal_irq_mask_##width(struct irq_data *d)		\
114{									\
115	u32 val;							\
116	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
117	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
118	unsigned bit = irq & 0x1f;					\
119	unsigned long flags;						\
120	int cpu;							\
121									\
122	spin_lock_irqsave(&ipic_lock, flags);				\
123	for_each_present_cpu(cpu) {					\
124		if (!irq_mask_addr[cpu])				\
125			break;						\
126									\
127		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
128		val &= ~(1 << bit);					\
129		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
130	}								\
131	spin_unlock_irqrestore(&ipic_lock, flags);			\
132}									\
133									\
134static void __internal_irq_unmask_##width(struct irq_data *d,		\
135					  const struct cpumask *m)	\
136{									\
137	u32 val;							\
138	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
139	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
140	unsigned bit = irq & 0x1f;					\
141	unsigned long flags;						\
142	int cpu;							\
143									\
144	spin_lock_irqsave(&ipic_lock, flags);				\
145	for_each_present_cpu(cpu) {					\
146		if (!irq_mask_addr[cpu])				\
147			break;						\
148									\
149		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
150		if (enable_irq_for_cpu(cpu, d, m))			\
151			val |= (1 << bit);				\
152		else							\
153			val &= ~(1 << bit);				\
154		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
155	}								\
156	spin_unlock_irqrestore(&ipic_lock, flags);			\
157}
158
159BUILD_IPIC_INTERNAL(32);
160BUILD_IPIC_INTERNAL(64);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161
162asmlinkage void plat_irq_dispatch(void)
163{
164	u32 cause;
165
166	do {
167		cause = read_c0_cause() & read_c0_status() & ST0_IM;
168
169		if (!cause)
170			break;
171
172		if (cause & CAUSEF_IP7)
173			do_IRQ(7);
174		if (cause & CAUSEF_IP0)
175			do_IRQ(0);
176		if (cause & CAUSEF_IP1)
177			do_IRQ(1);
178		if (cause & CAUSEF_IP2)
179			dispatch_internal(0);
180		if (is_ext_irq_cascaded) {
181			if (cause & CAUSEF_IP3)
182				dispatch_internal(1);
183		} else {
184			if (cause & CAUSEF_IP3)
185				do_IRQ(IRQ_EXT_0);
186			if (cause & CAUSEF_IP4)
187				do_IRQ(IRQ_EXT_1);
188			if (cause & CAUSEF_IP5)
189				do_IRQ(IRQ_EXT_2);
190			if (cause & CAUSEF_IP6)
191				do_IRQ(IRQ_EXT_3);
192		}
193	} while (1);
194}
195
196/*
197 * internal IRQs operations: only mask/unmask on PERF irq mask
198 * register.
199 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200static void bcm63xx_internal_irq_mask(struct irq_data *d)
201{
202	internal_irq_mask(d);
203}
204
205static void bcm63xx_internal_irq_unmask(struct irq_data *d)
206{
207	internal_irq_unmask(d, NULL);
208}
209
210/*
211 * external IRQs operations: mask/unmask and clear on PERF external
212 * irq control register.
213 */
214static void bcm63xx_external_irq_mask(struct irq_data *d)
215{
216	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
217	u32 reg, regaddr;
218	unsigned long flags;
219
220	regaddr = get_ext_irq_perf_reg(irq);
221	spin_lock_irqsave(&epic_lock, flags);
222	reg = bcm_perf_readl(regaddr);
223
224	if (BCMCPU_IS_6348())
225		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
226	else
227		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
228
229	bcm_perf_writel(reg, regaddr);
230	spin_unlock_irqrestore(&epic_lock, flags);
231
232	if (is_ext_irq_cascaded)
233		internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
234}
235
236static void bcm63xx_external_irq_unmask(struct irq_data *d)
237{
238	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
239	u32 reg, regaddr;
240	unsigned long flags;
241
242	regaddr = get_ext_irq_perf_reg(irq);
243	spin_lock_irqsave(&epic_lock, flags);
244	reg = bcm_perf_readl(regaddr);
245
246	if (BCMCPU_IS_6348())
247		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
248	else
249		reg |= EXTIRQ_CFG_MASK(irq % 4);
250
251	bcm_perf_writel(reg, regaddr);
252	spin_unlock_irqrestore(&epic_lock, flags);
253
254	if (is_ext_irq_cascaded)
255		internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
256				    NULL);
257}
258
259static void bcm63xx_external_irq_clear(struct irq_data *d)
260{
261	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
262	u32 reg, regaddr;
263	unsigned long flags;
264
265	regaddr = get_ext_irq_perf_reg(irq);
266	spin_lock_irqsave(&epic_lock, flags);
267	reg = bcm_perf_readl(regaddr);
268
269	if (BCMCPU_IS_6348())
270		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
271	else
272		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
273
274	bcm_perf_writel(reg, regaddr);
275	spin_unlock_irqrestore(&epic_lock, flags);
276}
277
278static int bcm63xx_external_irq_set_type(struct irq_data *d,
279					 unsigned int flow_type)
280{
281	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
282	u32 reg, regaddr;
283	int levelsense, sense, bothedge;
284	unsigned long flags;
285
286	flow_type &= IRQ_TYPE_SENSE_MASK;
287
288	if (flow_type == IRQ_TYPE_NONE)
289		flow_type = IRQ_TYPE_LEVEL_LOW;
290
291	levelsense = sense = bothedge = 0;
292	switch (flow_type) {
293	case IRQ_TYPE_EDGE_BOTH:
294		bothedge = 1;
295		break;
296
297	case IRQ_TYPE_EDGE_RISING:
298		sense = 1;
299		break;
300
301	case IRQ_TYPE_EDGE_FALLING:
302		break;
303
304	case IRQ_TYPE_LEVEL_HIGH:
305		levelsense = 1;
306		sense = 1;
307		break;
308
309	case IRQ_TYPE_LEVEL_LOW:
310		levelsense = 1;
311		break;
312
313	default:
314		pr_err("bogus flow type combination given !\n");
315		return -EINVAL;
316	}
317
318	regaddr = get_ext_irq_perf_reg(irq);
319	spin_lock_irqsave(&epic_lock, flags);
320	reg = bcm_perf_readl(regaddr);
321	irq %= 4;
322
323	switch (bcm63xx_get_cpu_id()) {
324	case BCM6348_CPU_ID:
325		if (levelsense)
326			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
327		else
328			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
329		if (sense)
330			reg |= EXTIRQ_CFG_SENSE_6348(irq);
331		else
332			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
333		if (bothedge)
334			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
335		else
336			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
337		break;
338
339	case BCM3368_CPU_ID:
340	case BCM6328_CPU_ID:
341	case BCM6338_CPU_ID:
342	case BCM6345_CPU_ID:
343	case BCM6358_CPU_ID:
344	case BCM6362_CPU_ID:
345	case BCM6368_CPU_ID:
346		if (levelsense)
347			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
348		else
349			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
350		if (sense)
351			reg |= EXTIRQ_CFG_SENSE(irq);
352		else
353			reg &= ~EXTIRQ_CFG_SENSE(irq);
354		if (bothedge)
355			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
356		else
357			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
358		break;
359	default:
360		BUG();
361	}
362
363	bcm_perf_writel(reg, regaddr);
364	spin_unlock_irqrestore(&epic_lock, flags);
365
366	irqd_set_trigger_type(d, flow_type);
367	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
368		irq_set_handler_locked(d, handle_level_irq);
369	else
370		irq_set_handler_locked(d, handle_edge_irq);
371
372	return IRQ_SET_MASK_OK_NOCOPY;
373}
374
375#ifdef CONFIG_SMP
376static int bcm63xx_internal_set_affinity(struct irq_data *data,
377					 const struct cpumask *dest,
378					 bool force)
379{
380	if (!irqd_irq_disabled(data))
381		internal_irq_unmask(data, dest);
382
383	return 0;
384}
385#endif
386
387static struct irq_chip bcm63xx_internal_irq_chip = {
388	.name		= "bcm63xx_ipic",
389	.irq_mask	= bcm63xx_internal_irq_mask,
390	.irq_unmask	= bcm63xx_internal_irq_unmask,
391};
392
393static struct irq_chip bcm63xx_external_irq_chip = {
394	.name		= "bcm63xx_epic",
395	.irq_ack	= bcm63xx_external_irq_clear,
396
397	.irq_mask	= bcm63xx_external_irq_mask,
398	.irq_unmask	= bcm63xx_external_irq_unmask,
399
400	.irq_set_type	= bcm63xx_external_irq_set_type,
401};
402
403static struct irqaction cpu_ip2_cascade_action = {
404	.handler	= no_action,
405	.name		= "cascade_ip2",
406	.flags		= IRQF_NO_THREAD,
407};
408
409#ifdef CONFIG_SMP
410static struct irqaction cpu_ip3_cascade_action = {
411	.handler	= no_action,
412	.name		= "cascade_ip3",
413	.flags		= IRQF_NO_THREAD,
414};
415#endif
416
417static struct irqaction cpu_ext_cascade_action = {
418	.handler	= no_action,
419	.name		= "cascade_extirq",
420	.flags		= IRQF_NO_THREAD,
421};
422
423static void bcm63xx_init_irq(void)
424{
425	int irq_bits;
426
427	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
428	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
429	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
430	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
431
432	switch (bcm63xx_get_cpu_id()) {
433	case BCM3368_CPU_ID:
434		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
435		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
436		irq_stat_addr[1] = 0;
437		irq_mask_addr[1] = 0;
438		irq_bits = 32;
439		ext_irq_count = 4;
440		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
441		break;
442	case BCM6328_CPU_ID:
443		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
444		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
445		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
446		irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
447		irq_bits = 64;
448		ext_irq_count = 4;
449		is_ext_irq_cascaded = 1;
450		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
451		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
452		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
453		break;
454	case BCM6338_CPU_ID:
455		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
456		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
457		irq_stat_addr[1] = 0;
458		irq_mask_addr[1] = 0;
459		irq_bits = 32;
460		ext_irq_count = 4;
461		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
462		break;
463	case BCM6345_CPU_ID:
464		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
465		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
466		irq_stat_addr[1] = 0;
467		irq_mask_addr[1] = 0;
468		irq_bits = 32;
469		ext_irq_count = 4;
470		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
471		break;
472	case BCM6348_CPU_ID:
473		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
474		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
475		irq_stat_addr[1] = 0;
476		irq_mask_addr[1] = 0;
477		irq_bits = 32;
478		ext_irq_count = 4;
479		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
480		break;
481	case BCM6358_CPU_ID:
482		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
483		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
484		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
485		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
486		irq_bits = 32;
487		ext_irq_count = 4;
488		is_ext_irq_cascaded = 1;
489		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
490		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
491		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
492		break;
493	case BCM6362_CPU_ID:
494		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
495		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
496		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
497		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
498		irq_bits = 64;
499		ext_irq_count = 4;
500		is_ext_irq_cascaded = 1;
501		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
502		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
503		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
504		break;
505	case BCM6368_CPU_ID:
506		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
507		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
508		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
509		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
510		irq_bits = 64;
511		ext_irq_count = 6;
512		is_ext_irq_cascaded = 1;
513		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
514		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
515		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
516		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
517		break;
518	default:
519		BUG();
520	}
521
522	if (irq_bits == 32) {
523		dispatch_internal = __dispatch_internal_32;
524		internal_irq_mask = __internal_irq_mask_32;
525		internal_irq_unmask = __internal_irq_unmask_32;
526	} else {
527		dispatch_internal = __dispatch_internal_64;
528		internal_irq_mask = __internal_irq_mask_64;
529		internal_irq_unmask = __internal_irq_unmask_64;
530	}
531}
532
533void __init arch_init_irq(void)
534{
535	int i;
536
537	bcm63xx_init_irq();
538	mips_cpu_irq_init();
539	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
540		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
541					 handle_level_irq);
542
543	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
544		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
545					 handle_edge_irq);
546
547	if (!is_ext_irq_cascaded) {
548		for (i = 3; i < 3 + ext_irq_count; ++i)
549			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
550	}
551
552	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
553#ifdef CONFIG_SMP
554	if (is_ext_irq_cascaded) {
555		setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
556		bcm63xx_internal_irq_chip.irq_set_affinity =
557			bcm63xx_internal_set_affinity;
558
559		cpumask_clear(irq_default_affinity);
560		cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
561	}
562#endif
563}