Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 
 13#include <linux/irq.h>
 14#include <linux/spinlock.h>
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 22
 23static DEFINE_SPINLOCK(ipic_lock);
 24static DEFINE_SPINLOCK(epic_lock);
 25
 26static u32 irq_stat_addr[2];
 27static u32 irq_mask_addr[2];
 28static void (*dispatch_internal)(int cpu);
 29static int is_ext_irq_cascaded;
 30static unsigned int ext_irq_count;
 31static unsigned int ext_irq_start, ext_irq_end;
 32static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
 33static void (*internal_irq_mask)(struct irq_data *d);
 34static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
 35
 36
 37static inline u32 get_ext_irq_perf_reg(int irq)
 38{
 39	if (irq < 4)
 40		return ext_irq_cfg_reg1;
 41	return ext_irq_cfg_reg2;
 42}
 43
 44static inline void handle_internal(int intbit)
 45{
 46	if (is_ext_irq_cascaded &&
 47	    intbit >= ext_irq_start && intbit <= ext_irq_end)
 48		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
 49	else
 50		do_IRQ(intbit + IRQ_INTERNAL_BASE);
 51}
 52
 53static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
 54				     const struct cpumask *m)
 55{
 56	bool enable = cpu_online(cpu);
 57
 58#ifdef CONFIG_SMP
 59	if (m)
 60		enable &= cpumask_test_cpu(cpu, m);
 61	else if (irqd_affinity_was_set(d))
 62		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 63#endif
 64	return enable;
 65}
 66
 67/*
 68 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 69 * prioritize any interrupt relatively to another. the static counter
 70 * will resume the loop where it ended the last time we left this
 71 * function.
 72 */
 
 
 
 
 73
 74#define BUILD_IPIC_INTERNAL(width)					\
 75void __dispatch_internal_##width(int cpu)				\
 76{									\
 77	u32 pending[width / 32];					\
 78	unsigned int src, tgt;						\
 79	bool irqs_pending = false;					\
 80	static unsigned int i[2];					\
 81	unsigned int *next = &i[cpu];					\
 82	unsigned long flags;						\
 83									\
 84	/* read registers in reverse order */				\
 85	spin_lock_irqsave(&ipic_lock, flags);				\
 86	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
 87		u32 val;						\
 88									\
 89		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
 90		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
 91		pending[--tgt] = val;					\
 92									\
 93		if (val)						\
 94			irqs_pending = true;				\
 95	}								\
 96	spin_unlock_irqrestore(&ipic_lock, flags);			\
 97									\
 98	if (!irqs_pending)						\
 99		return;							\
100									\
101	while (1) {							\
102		unsigned int to_call = *next;				\
103									\
104		*next = (*next + 1) & (width - 1);			\
105		if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {	\
106			handle_internal(to_call);			\
107			break;						\
108		}							\
109	}								\
110}									\
111									\
112static void __internal_irq_mask_##width(struct irq_data *d)		\
113{									\
114	u32 val;							\
115	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
116	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
117	unsigned bit = irq & 0x1f;					\
118	unsigned long flags;						\
119	int cpu;							\
120									\
121	spin_lock_irqsave(&ipic_lock, flags);				\
122	for_each_present_cpu(cpu) {					\
123		if (!irq_mask_addr[cpu])				\
124			break;						\
125									\
126		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
127		val &= ~(1 << bit);					\
128		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
129	}								\
130	spin_unlock_irqrestore(&ipic_lock, flags);			\
131}									\
132									\
133static void __internal_irq_unmask_##width(struct irq_data *d,		\
134					  const struct cpumask *m)	\
135{									\
136	u32 val;							\
137	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
138	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
139	unsigned bit = irq & 0x1f;					\
140	unsigned long flags;						\
141	int cpu;							\
142									\
143	spin_lock_irqsave(&ipic_lock, flags);				\
144	for_each_present_cpu(cpu) {					\
145		if (!irq_mask_addr[cpu])				\
146			break;						\
147									\
148		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149		if (enable_irq_for_cpu(cpu, d, m))			\
150			val |= (1 << bit);				\
151		else							\
152			val &= ~(1 << bit);				\
153		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
154	}								\
155	spin_unlock_irqrestore(&ipic_lock, flags);			\
156}
157
158BUILD_IPIC_INTERNAL(32);
159BUILD_IPIC_INTERNAL(64);
 
 
 
 
 
 
 
 
160
161asmlinkage void plat_irq_dispatch(void)
162{
163	u32 cause;
164
165	do {
166		cause = read_c0_cause() & read_c0_status() & ST0_IM;
167
168		if (!cause)
169			break;
170
171		if (cause & CAUSEF_IP7)
172			do_IRQ(7);
173		if (cause & CAUSEF_IP0)
174			do_IRQ(0);
175		if (cause & CAUSEF_IP1)
176			do_IRQ(1);
177		if (cause & CAUSEF_IP2)
178			dispatch_internal(0);
179		if (is_ext_irq_cascaded) {
180			if (cause & CAUSEF_IP3)
181				dispatch_internal(1);
182		} else {
183			if (cause & CAUSEF_IP3)
184				do_IRQ(IRQ_EXT_0);
185			if (cause & CAUSEF_IP4)
186				do_IRQ(IRQ_EXT_1);
187			if (cause & CAUSEF_IP5)
188				do_IRQ(IRQ_EXT_2);
189			if (cause & CAUSEF_IP6)
190				do_IRQ(IRQ_EXT_3);
191		}
192	} while (1);
193}
194
195/*
196 * internal IRQs operations: only mask/unmask on PERF irq mask
197 * register.
198 */
199static void bcm63xx_internal_irq_mask(struct irq_data *d)
200{
201	internal_irq_mask(d);
 
 
 
 
 
202}
203
204static void bcm63xx_internal_irq_unmask(struct irq_data *d)
205{
206	internal_irq_unmask(d, NULL);
 
 
 
 
 
207}
208
209/*
210 * external IRQs operations: mask/unmask and clear on PERF external
211 * irq control register.
212 */
213static void bcm63xx_external_irq_mask(struct irq_data *d)
214{
215	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
216	u32 reg, regaddr;
217	unsigned long flags;
218
219	regaddr = get_ext_irq_perf_reg(irq);
220	spin_lock_irqsave(&epic_lock, flags);
221	reg = bcm_perf_readl(regaddr);
222
223	if (BCMCPU_IS_6348())
224		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
225	else
226		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
227
228	bcm_perf_writel(reg, regaddr);
229	spin_unlock_irqrestore(&epic_lock, flags);
230
231	if (is_ext_irq_cascaded)
232		internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
233}
234
235static void bcm63xx_external_irq_unmask(struct irq_data *d)
236{
237	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238	u32 reg, regaddr;
239	unsigned long flags;
240
241	regaddr = get_ext_irq_perf_reg(irq);
242	spin_lock_irqsave(&epic_lock, flags);
243	reg = bcm_perf_readl(regaddr);
244
245	if (BCMCPU_IS_6348())
246		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
247	else
248		reg |= EXTIRQ_CFG_MASK(irq % 4);
249
250	bcm_perf_writel(reg, regaddr);
251	spin_unlock_irqrestore(&epic_lock, flags);
252
253	if (is_ext_irq_cascaded)
254		internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
255				    NULL);
256}
257
258static void bcm63xx_external_irq_clear(struct irq_data *d)
259{
260	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
261	u32 reg, regaddr;
262	unsigned long flags;
263
264	regaddr = get_ext_irq_perf_reg(irq);
265	spin_lock_irqsave(&epic_lock, flags);
266	reg = bcm_perf_readl(regaddr);
267
268	if (BCMCPU_IS_6348())
269		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
270	else
271		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
272
273	bcm_perf_writel(reg, regaddr);
274	spin_unlock_irqrestore(&epic_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
275}
276
277static int bcm63xx_external_irq_set_type(struct irq_data *d,
278					 unsigned int flow_type)
279{
280	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
281	u32 reg, regaddr;
282	int levelsense, sense, bothedge;
283	unsigned long flags;
284
285	flow_type &= IRQ_TYPE_SENSE_MASK;
286
287	if (flow_type == IRQ_TYPE_NONE)
288		flow_type = IRQ_TYPE_LEVEL_LOW;
289
290	levelsense = sense = bothedge = 0;
291	switch (flow_type) {
292	case IRQ_TYPE_EDGE_BOTH:
293		bothedge = 1;
 
294		break;
295
296	case IRQ_TYPE_EDGE_RISING:
297		sense = 1;
 
 
298		break;
299
300	case IRQ_TYPE_EDGE_FALLING:
 
 
 
301		break;
302
303	case IRQ_TYPE_LEVEL_HIGH:
304		levelsense = 1;
305		sense = 1;
306		break;
307
308	case IRQ_TYPE_LEVEL_LOW:
309		levelsense = 1;
 
310		break;
311
312	default:
313		pr_err("bogus flow type combination given !\n");
314		return -EINVAL;
315	}
316
317	regaddr = get_ext_irq_perf_reg(irq);
318	spin_lock_irqsave(&epic_lock, flags);
319	reg = bcm_perf_readl(regaddr);
320	irq %= 4;
321
322	switch (bcm63xx_get_cpu_id()) {
323	case BCM6348_CPU_ID:
324		if (levelsense)
325			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
326		else
327			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
328		if (sense)
329			reg |= EXTIRQ_CFG_SENSE_6348(irq);
330		else
331			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
332		if (bothedge)
333			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
334		else
335			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
336		break;
337
338	case BCM3368_CPU_ID:
339	case BCM6328_CPU_ID:
340	case BCM6338_CPU_ID:
341	case BCM6345_CPU_ID:
342	case BCM6358_CPU_ID:
343	case BCM6362_CPU_ID:
344	case BCM6368_CPU_ID:
345		if (levelsense)
346			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
347		else
348			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
349		if (sense)
350			reg |= EXTIRQ_CFG_SENSE(irq);
351		else
352			reg &= ~EXTIRQ_CFG_SENSE(irq);
353		if (bothedge)
354			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
355		else
356			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
357		break;
358	default:
359		BUG();
360	}
361
362	bcm_perf_writel(reg, regaddr);
363	spin_unlock_irqrestore(&epic_lock, flags);
364
365	irqd_set_trigger_type(d, flow_type);
366	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
367		irq_set_handler_locked(d, handle_level_irq);
368	else
369		irq_set_handler_locked(d, handle_edge_irq);
370
371	return IRQ_SET_MASK_OK_NOCOPY;
372}
373
374#ifdef CONFIG_SMP
375static int bcm63xx_internal_set_affinity(struct irq_data *data,
376					 const struct cpumask *dest,
377					 bool force)
378{
379	if (!irqd_irq_disabled(data))
380		internal_irq_unmask(data, dest);
381
382	return 0;
383}
384#endif
385
386static struct irq_chip bcm63xx_internal_irq_chip = {
387	.name		= "bcm63xx_ipic",
388	.irq_mask	= bcm63xx_internal_irq_mask,
389	.irq_unmask	= bcm63xx_internal_irq_unmask,
390};
391
392static struct irq_chip bcm63xx_external_irq_chip = {
393	.name		= "bcm63xx_epic",
 
 
 
394	.irq_ack	= bcm63xx_external_irq_clear,
395
396	.irq_mask	= bcm63xx_external_irq_mask,
397	.irq_unmask	= bcm63xx_external_irq_unmask,
398
399	.irq_set_type	= bcm63xx_external_irq_set_type,
400};
401
402static struct irqaction cpu_ip2_cascade_action = {
403	.handler	= no_action,
404	.name		= "cascade_ip2",
405	.flags		= IRQF_NO_THREAD,
406};
407
408#ifdef CONFIG_SMP
409static struct irqaction cpu_ip3_cascade_action = {
410	.handler	= no_action,
411	.name		= "cascade_ip3",
412	.flags		= IRQF_NO_THREAD,
413};
414#endif
415
416static struct irqaction cpu_ext_cascade_action = {
417	.handler	= no_action,
418	.name		= "cascade_extirq",
419	.flags		= IRQF_NO_THREAD,
420};
421
422static void bcm63xx_init_irq(void)
423{
424	int irq_bits;
425
426	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
427	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
428	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
429	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
430
431	switch (bcm63xx_get_cpu_id()) {
432	case BCM3368_CPU_ID:
433		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
434		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
435		irq_stat_addr[1] = 0;
436		irq_mask_addr[1] = 0;
437		irq_bits = 32;
438		ext_irq_count = 4;
439		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
440		break;
441	case BCM6328_CPU_ID:
442		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
443		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
444		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
445		irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
446		irq_bits = 64;
447		ext_irq_count = 4;
448		is_ext_irq_cascaded = 1;
449		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
450		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
451		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
452		break;
453	case BCM6338_CPU_ID:
454		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
455		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
456		irq_stat_addr[1] = 0;
457		irq_mask_addr[1] = 0;
458		irq_bits = 32;
459		ext_irq_count = 4;
460		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
461		break;
462	case BCM6345_CPU_ID:
463		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
464		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
465		irq_stat_addr[1] = 0;
466		irq_mask_addr[1] = 0;
467		irq_bits = 32;
468		ext_irq_count = 4;
469		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
470		break;
471	case BCM6348_CPU_ID:
472		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
473		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
474		irq_stat_addr[1] = 0;
475		irq_mask_addr[1] = 0;
476		irq_bits = 32;
477		ext_irq_count = 4;
478		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
479		break;
480	case BCM6358_CPU_ID:
481		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
482		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
483		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
484		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
485		irq_bits = 32;
486		ext_irq_count = 4;
487		is_ext_irq_cascaded = 1;
488		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
489		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
490		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
491		break;
492	case BCM6362_CPU_ID:
493		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
494		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
495		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
496		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
497		irq_bits = 64;
498		ext_irq_count = 4;
499		is_ext_irq_cascaded = 1;
500		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
501		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
502		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
503		break;
504	case BCM6368_CPU_ID:
505		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
506		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
507		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
508		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
509		irq_bits = 64;
510		ext_irq_count = 6;
511		is_ext_irq_cascaded = 1;
512		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
513		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
514		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
515		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
516		break;
517	default:
518		BUG();
519	}
520
521	if (irq_bits == 32) {
522		dispatch_internal = __dispatch_internal_32;
523		internal_irq_mask = __internal_irq_mask_32;
524		internal_irq_unmask = __internal_irq_unmask_32;
525	} else {
526		dispatch_internal = __dispatch_internal_64;
527		internal_irq_mask = __internal_irq_mask_64;
528		internal_irq_unmask = __internal_irq_unmask_64;
529	}
530}
531
532void __init arch_init_irq(void)
533{
534	int i;
535
536	bcm63xx_init_irq();
537	mips_cpu_irq_init();
538	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
539		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
540					 handle_level_irq);
541
542	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
543		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
544					 handle_edge_irq);
545
546	if (!is_ext_irq_cascaded) {
547		for (i = 3; i < 3 + ext_irq_count; ++i)
548			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
549	}
550
551	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
552#ifdef CONFIG_SMP
553	if (is_ext_irq_cascaded) {
554		setup_irq(MIPS_CPU_IRQ_BASE + 3, &cpu_ip3_cascade_action);
555		bcm63xx_internal_irq_chip.irq_set_affinity =
556			bcm63xx_internal_set_affinity;
557
558		cpumask_clear(irq_default_affinity);
559		cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
560	}
561#endif
562}
v3.1
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/module.h>
 14#include <linux/irq.h>
 
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22/*
 23 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 24 * prioritize any interrupt relatively to another. the static counter
 25 * will resume the loop where it ended the last time we left this
 26 * function.
 27 */
 28static void bcm63xx_irq_dispatch_internal(void)
 29{
 30	u32 pending;
 31	static int i;
 32
 33	pending = bcm_perf_readl(PERF_IRQMASK_REG) &
 34		bcm_perf_readl(PERF_IRQSTAT_REG);
 35
 36	if (!pending)
 37		return ;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38
 39	while (1) {
 40		int to_call = i;
 41
 42		i = (i + 1) & 0x1f;
 43		if (pending & (1 << to_call)) {
 44			do_IRQ(to_call + IRQ_INTERNAL_BASE);
 45			break;
 46		}
 47	}
 48}
 49
 50asmlinkage void plat_irq_dispatch(void)
 51{
 52	u32 cause;
 53
 54	do {
 55		cause = read_c0_cause() & read_c0_status() & ST0_IM;
 56
 57		if (!cause)
 58			break;
 59
 60		if (cause & CAUSEF_IP7)
 61			do_IRQ(7);
 
 
 
 
 62		if (cause & CAUSEF_IP2)
 63			bcm63xx_irq_dispatch_internal();
 64		if (cause & CAUSEF_IP3)
 65			do_IRQ(IRQ_EXT_0);
 66		if (cause & CAUSEF_IP4)
 67			do_IRQ(IRQ_EXT_1);
 68		if (cause & CAUSEF_IP5)
 69			do_IRQ(IRQ_EXT_2);
 70		if (cause & CAUSEF_IP6)
 71			do_IRQ(IRQ_EXT_3);
 
 
 
 
 
 72	} while (1);
 73}
 74
 75/*
 76 * internal IRQs operations: only mask/unmask on PERF irq mask
 77 * register.
 78 */
 79static inline void bcm63xx_internal_irq_mask(struct irq_data *d)
 80{
 81	unsigned int irq = d->irq - IRQ_INTERNAL_BASE;
 82	u32 mask;
 83
 84	mask = bcm_perf_readl(PERF_IRQMASK_REG);
 85	mask &= ~(1 << irq);
 86	bcm_perf_writel(mask, PERF_IRQMASK_REG);
 87}
 88
 89static void bcm63xx_internal_irq_unmask(struct irq_data *d)
 90{
 91	unsigned int irq = d->irq - IRQ_INTERNAL_BASE;
 92	u32 mask;
 93
 94	mask = bcm_perf_readl(PERF_IRQMASK_REG);
 95	mask |= (1 << irq);
 96	bcm_perf_writel(mask, PERF_IRQMASK_REG);
 97}
 98
 99/*
100 * external IRQs operations: mask/unmask and clear on PERF external
101 * irq control register.
102 */
103static void bcm63xx_external_irq_mask(struct irq_data *d)
104{
105	unsigned int irq = d->irq - IRQ_EXT_BASE;
106	u32 reg;
 
 
 
 
 
 
 
 
 
 
107
108	reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
109	reg &= ~EXTIRQ_CFG_MASK(irq);
110	bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
 
 
111}
112
113static void bcm63xx_external_irq_unmask(struct irq_data *d)
114{
115	unsigned int irq = d->irq - IRQ_EXT_BASE;
116	u32 reg;
 
 
 
 
 
 
 
 
 
 
117
118	reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
119	reg |= EXTIRQ_CFG_MASK(irq);
120	bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
 
 
 
121}
122
123static void bcm63xx_external_irq_clear(struct irq_data *d)
124{
125	unsigned int irq = d->irq - IRQ_EXT_BASE;
126	u32 reg;
 
 
 
 
 
127
128	reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
129	reg |= EXTIRQ_CFG_CLEAR(irq);
130	bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
131}
132
133static unsigned int bcm63xx_external_irq_startup(struct irq_data *d)
134{
135	set_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE));
136	irq_enable_hazard();
137	bcm63xx_external_irq_unmask(d);
138	return 0;
139}
140
141static void bcm63xx_external_irq_shutdown(struct irq_data *d)
142{
143	bcm63xx_external_irq_mask(d);
144	clear_c0_status(0x100 << (d->irq - IRQ_MIPS_BASE));
145	irq_disable_hazard();
146}
147
148static int bcm63xx_external_irq_set_type(struct irq_data *d,
149					 unsigned int flow_type)
150{
151	unsigned int irq = d->irq - IRQ_EXT_BASE;
152	u32 reg;
 
 
153
154	flow_type &= IRQ_TYPE_SENSE_MASK;
155
156	if (flow_type == IRQ_TYPE_NONE)
157		flow_type = IRQ_TYPE_LEVEL_LOW;
158
159	reg = bcm_perf_readl(PERF_EXTIRQ_CFG_REG);
160	switch (flow_type) {
161	case IRQ_TYPE_EDGE_BOTH:
162		reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
163		reg |= EXTIRQ_CFG_BOTHEDGE(irq);
164		break;
165
166	case IRQ_TYPE_EDGE_RISING:
167		reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
168		reg |= EXTIRQ_CFG_SENSE(irq);
169		reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
170		break;
171
172	case IRQ_TYPE_EDGE_FALLING:
173		reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
174		reg &= ~EXTIRQ_CFG_SENSE(irq);
175		reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
176		break;
177
178	case IRQ_TYPE_LEVEL_HIGH:
179		reg |= EXTIRQ_CFG_LEVELSENSE(irq);
180		reg |= EXTIRQ_CFG_SENSE(irq);
181		break;
182
183	case IRQ_TYPE_LEVEL_LOW:
184		reg |= EXTIRQ_CFG_LEVELSENSE(irq);
185		reg &= ~EXTIRQ_CFG_SENSE(irq);
186		break;
187
188	default:
189		printk(KERN_ERR "bogus flow type combination given !\n");
190		return -EINVAL;
191	}
192	bcm_perf_writel(reg, PERF_EXTIRQ_CFG_REG);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
194	irqd_set_trigger_type(d, flow_type);
195	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
196		__irq_set_handler_locked(d->irq, handle_level_irq);
197	else
198		__irq_set_handler_locked(d->irq, handle_edge_irq);
199
200	return IRQ_SET_MASK_OK_NOCOPY;
201}
202
 
 
 
 
 
 
 
 
 
 
 
 
203static struct irq_chip bcm63xx_internal_irq_chip = {
204	.name		= "bcm63xx_ipic",
205	.irq_mask	= bcm63xx_internal_irq_mask,
206	.irq_unmask	= bcm63xx_internal_irq_unmask,
207};
208
209static struct irq_chip bcm63xx_external_irq_chip = {
210	.name		= "bcm63xx_epic",
211	.irq_startup	= bcm63xx_external_irq_startup,
212	.irq_shutdown	= bcm63xx_external_irq_shutdown,
213
214	.irq_ack	= bcm63xx_external_irq_clear,
215
216	.irq_mask	= bcm63xx_external_irq_mask,
217	.irq_unmask	= bcm63xx_external_irq_unmask,
218
219	.irq_set_type	= bcm63xx_external_irq_set_type,
220};
221
222static struct irqaction cpu_ip2_cascade_action = {
223	.handler	= no_action,
224	.name		= "cascade_ip2",
225	.flags		= IRQF_NO_THREAD,
226};
227
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
228void __init arch_init_irq(void)
229{
230	int i;
231
 
232	mips_cpu_irq_init();
233	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
234		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
235					 handle_level_irq);
236
237	for (i = IRQ_EXT_BASE; i < IRQ_EXT_BASE + 4; ++i)
238		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
239					 handle_edge_irq);
240
241	setup_irq(IRQ_MIPS_BASE + 2, &cpu_ip2_cascade_action);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242}