Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 
 13#include <linux/irq.h>
 14#include <linux/spinlock.h>
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22
 23static DEFINE_SPINLOCK(ipic_lock);
 24static DEFINE_SPINLOCK(epic_lock);
 
 
 25
 26static u32 irq_stat_addr[2];
 27static u32 irq_mask_addr[2];
 28static void (*dispatch_internal)(int cpu);
 29static int is_ext_irq_cascaded;
 30static unsigned int ext_irq_count;
 31static unsigned int ext_irq_start, ext_irq_end;
 32static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
 33static void (*internal_irq_mask)(struct irq_data *d);
 34static void (*internal_irq_unmask)(struct irq_data *d, const struct cpumask *m);
 35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 36
 37static inline u32 get_ext_irq_perf_reg(int irq)
 38{
 39	if (irq < 4)
 40		return ext_irq_cfg_reg1;
 41	return ext_irq_cfg_reg2;
 42}
 43
 44static inline void handle_internal(int intbit)
 45{
 46	if (is_ext_irq_cascaded &&
 47	    intbit >= ext_irq_start && intbit <= ext_irq_end)
 48		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
 49	else
 50		do_IRQ(intbit + IRQ_INTERNAL_BASE);
 51}
 52
 53static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
 54				     const struct cpumask *m)
 55{
 56	bool enable = cpu_online(cpu);
 57
 58#ifdef CONFIG_SMP
 59	if (m)
 60		enable &= cpumask_test_cpu(cpu, m);
 61	else if (irqd_affinity_was_set(d))
 62		enable &= cpumask_test_cpu(cpu, irq_data_get_affinity_mask(d));
 63#endif
 64	return enable;
 65}
 66
 67/*
 68 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
 69 * prioritize any interrupt relatively to another. the static counter
 70 * will resume the loop where it ended the last time we left this
 71 * function.
 72 */
 
 
 
 
 
 
 
 
 
 73
 74#define BUILD_IPIC_INTERNAL(width)					\
 75void __dispatch_internal_##width(int cpu)				\
 76{									\
 77	u32 pending[width / 32];					\
 78	unsigned int src, tgt;						\
 79	bool irqs_pending = false;					\
 80	static unsigned int i[2];					\
 81	unsigned int *next = &i[cpu];					\
 82	unsigned long flags;						\
 83									\
 84	/* read registers in reverse order */				\
 85	spin_lock_irqsave(&ipic_lock, flags);				\
 86	for (src = 0, tgt = (width / 32); src < (width / 32); src++) {	\
 87		u32 val;						\
 88									\
 89		val = bcm_readl(irq_stat_addr[cpu] + src * sizeof(u32)); \
 90		val &= bcm_readl(irq_mask_addr[cpu] + src * sizeof(u32)); \
 91		pending[--tgt] = val;					\
 92									\
 93		if (val)						\
 94			irqs_pending = true;				\
 95	}								\
 96	spin_unlock_irqrestore(&ipic_lock, flags);			\
 97									\
 98	if (!irqs_pending)						\
 99		return;							\
100									\
101	while (1) {							\
102		unsigned int to_call = *next;				\
103									\
104		*next = (*next + 1) & (width - 1);			\
105		if (pending[to_call / 32] & (1 << (to_call & 0x1f))) {	\
106			handle_internal(to_call);			\
107			break;						\
108		}							\
109	}								\
110}									\
111									\
112static void __internal_irq_mask_##width(struct irq_data *d)		\
113{									\
114	u32 val;							\
115	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
116	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
117	unsigned bit = irq & 0x1f;					\
118	unsigned long flags;						\
119	int cpu;							\
120									\
121	spin_lock_irqsave(&ipic_lock, flags);				\
122	for_each_present_cpu(cpu) {					\
123		if (!irq_mask_addr[cpu])				\
124			break;						\
125									\
126		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
127		val &= ~(1 << bit);					\
128		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
129	}								\
130	spin_unlock_irqrestore(&ipic_lock, flags);			\
131}									\
132									\
133static void __internal_irq_unmask_##width(struct irq_data *d,		\
134					  const struct cpumask *m)	\
135{									\
136	u32 val;							\
137	unsigned irq = d->irq - IRQ_INTERNAL_BASE;			\
138	unsigned reg = (irq / 32) ^ (width/32 - 1);			\
139	unsigned bit = irq & 0x1f;					\
140	unsigned long flags;						\
141	int cpu;							\
142									\
143	spin_lock_irqsave(&ipic_lock, flags);				\
144	for_each_present_cpu(cpu) {					\
145		if (!irq_mask_addr[cpu])				\
146			break;						\
147									\
148		val = bcm_readl(irq_mask_addr[cpu] + reg * sizeof(u32));\
149		if (enable_irq_for_cpu(cpu, d, m))			\
150			val |= (1 << bit);				\
151		else							\
152			val &= ~(1 << bit);				\
153		bcm_writel(val, irq_mask_addr[cpu] + reg * sizeof(u32));\
154	}								\
155	spin_unlock_irqrestore(&ipic_lock, flags);			\
156}
157
158BUILD_IPIC_INTERNAL(32);
159BUILD_IPIC_INTERNAL(64);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
161asmlinkage void plat_irq_dispatch(void)
162{
163	u32 cause;
164
165	do {
166		cause = read_c0_cause() & read_c0_status() & ST0_IM;
167
168		if (!cause)
169			break;
170
171		if (cause & CAUSEF_IP7)
172			do_IRQ(7);
173		if (cause & CAUSEF_IP0)
174			do_IRQ(0);
175		if (cause & CAUSEF_IP1)
176			do_IRQ(1);
177		if (cause & CAUSEF_IP2)
178			dispatch_internal(0);
179		if (is_ext_irq_cascaded) {
180			if (cause & CAUSEF_IP3)
181				dispatch_internal(1);
182		} else {
183			if (cause & CAUSEF_IP3)
184				do_IRQ(IRQ_EXT_0);
185			if (cause & CAUSEF_IP4)
186				do_IRQ(IRQ_EXT_1);
187			if (cause & CAUSEF_IP5)
188				do_IRQ(IRQ_EXT_2);
189			if (cause & CAUSEF_IP6)
190				do_IRQ(IRQ_EXT_3);
191		}
192	} while (1);
193}
194
195/*
196 * internal IRQs operations: only mask/unmask on PERF irq mask
197 * register.
198 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199static void bcm63xx_internal_irq_mask(struct irq_data *d)
200{
201	internal_irq_mask(d);
202}
203
204static void bcm63xx_internal_irq_unmask(struct irq_data *d)
205{
206	internal_irq_unmask(d, NULL);
207}
208
209/*
210 * external IRQs operations: mask/unmask and clear on PERF external
211 * irq control register.
212 */
213static void bcm63xx_external_irq_mask(struct irq_data *d)
214{
215	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
216	u32 reg, regaddr;
217	unsigned long flags;
218
219	regaddr = get_ext_irq_perf_reg(irq);
220	spin_lock_irqsave(&epic_lock, flags);
221	reg = bcm_perf_readl(regaddr);
222
223	if (BCMCPU_IS_6348())
224		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
225	else
226		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
227
228	bcm_perf_writel(reg, regaddr);
229	spin_unlock_irqrestore(&epic_lock, flags);
230
231	if (is_ext_irq_cascaded)
232		internal_irq_mask(irq_get_irq_data(irq + ext_irq_start));
233}
234
235static void bcm63xx_external_irq_unmask(struct irq_data *d)
236{
237	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
238	u32 reg, regaddr;
239	unsigned long flags;
240
241	regaddr = get_ext_irq_perf_reg(irq);
242	spin_lock_irqsave(&epic_lock, flags);
243	reg = bcm_perf_readl(regaddr);
244
245	if (BCMCPU_IS_6348())
246		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
247	else
248		reg |= EXTIRQ_CFG_MASK(irq % 4);
249
250	bcm_perf_writel(reg, regaddr);
251	spin_unlock_irqrestore(&epic_lock, flags);
252
253	if (is_ext_irq_cascaded)
254		internal_irq_unmask(irq_get_irq_data(irq + ext_irq_start),
255				    NULL);
256}
257
258static void bcm63xx_external_irq_clear(struct irq_data *d)
259{
260	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
261	u32 reg, regaddr;
262	unsigned long flags;
263
264	regaddr = get_ext_irq_perf_reg(irq);
265	spin_lock_irqsave(&epic_lock, flags);
266	reg = bcm_perf_readl(regaddr);
267
268	if (BCMCPU_IS_6348())
269		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
270	else
271		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
272
273	bcm_perf_writel(reg, regaddr);
274	spin_unlock_irqrestore(&epic_lock, flags);
275}
276
277static int bcm63xx_external_irq_set_type(struct irq_data *d,
278					 unsigned int flow_type)
279{
280	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
281	u32 reg, regaddr;
282	int levelsense, sense, bothedge;
283	unsigned long flags;
284
285	flow_type &= IRQ_TYPE_SENSE_MASK;
286
287	if (flow_type == IRQ_TYPE_NONE)
288		flow_type = IRQ_TYPE_LEVEL_LOW;
289
290	levelsense = sense = bothedge = 0;
291	switch (flow_type) {
292	case IRQ_TYPE_EDGE_BOTH:
293		bothedge = 1;
294		break;
295
296	case IRQ_TYPE_EDGE_RISING:
297		sense = 1;
298		break;
299
300	case IRQ_TYPE_EDGE_FALLING:
301		break;
302
303	case IRQ_TYPE_LEVEL_HIGH:
304		levelsense = 1;
305		sense = 1;
306		break;
307
308	case IRQ_TYPE_LEVEL_LOW:
309		levelsense = 1;
310		break;
311
312	default:
313		pr_err("bogus flow type combination given !\n");
314		return -EINVAL;
315	}
316
317	regaddr = get_ext_irq_perf_reg(irq);
318	spin_lock_irqsave(&epic_lock, flags);
319	reg = bcm_perf_readl(regaddr);
320	irq %= 4;
321
322	switch (bcm63xx_get_cpu_id()) {
323	case BCM6348_CPU_ID:
324		if (levelsense)
325			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
326		else
327			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
328		if (sense)
329			reg |= EXTIRQ_CFG_SENSE_6348(irq);
330		else
331			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
332		if (bothedge)
333			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
334		else
335			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
336		break;
337
338	case BCM3368_CPU_ID:
339	case BCM6328_CPU_ID:
340	case BCM6338_CPU_ID:
341	case BCM6345_CPU_ID:
342	case BCM6358_CPU_ID:
343	case BCM6362_CPU_ID:
344	case BCM6368_CPU_ID:
345		if (levelsense)
346			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
347		else
348			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
349		if (sense)
350			reg |= EXTIRQ_CFG_SENSE(irq);
351		else
352			reg &= ~EXTIRQ_CFG_SENSE(irq);
353		if (bothedge)
354			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
355		else
356			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
357		break;
358	default:
359		BUG();
360	}
361
362	bcm_perf_writel(reg, regaddr);
363	spin_unlock_irqrestore(&epic_lock, flags);
364
365	irqd_set_trigger_type(d, flow_type);
366	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
367		irq_set_handler_locked(d, handle_level_irq);
368	else
369		irq_set_handler_locked(d, handle_edge_irq);
370
371	return IRQ_SET_MASK_OK_NOCOPY;
372}
373
374#ifdef CONFIG_SMP
375static int bcm63xx_internal_set_affinity(struct irq_data *data,
376					 const struct cpumask *dest,
377					 bool force)
378{
379	if (!irqd_irq_disabled(data))
380		internal_irq_unmask(data, dest);
381
382	return 0;
383}
384#endif
385
386static struct irq_chip bcm63xx_internal_irq_chip = {
387	.name		= "bcm63xx_ipic",
388	.irq_mask	= bcm63xx_internal_irq_mask,
389	.irq_unmask	= bcm63xx_internal_irq_unmask,
390};
391
392static struct irq_chip bcm63xx_external_irq_chip = {
393	.name		= "bcm63xx_epic",
394	.irq_ack	= bcm63xx_external_irq_clear,
395
396	.irq_mask	= bcm63xx_external_irq_mask,
397	.irq_unmask	= bcm63xx_external_irq_unmask,
398
399	.irq_set_type	= bcm63xx_external_irq_set_type,
400};
401
402static void bcm63xx_init_irq(void)
403{
404	int irq_bits;
405
406	irq_stat_addr[0] = bcm63xx_regset_address(RSET_PERF);
407	irq_mask_addr[0] = bcm63xx_regset_address(RSET_PERF);
408	irq_stat_addr[1] = bcm63xx_regset_address(RSET_PERF);
409	irq_mask_addr[1] = bcm63xx_regset_address(RSET_PERF);
410
411	switch (bcm63xx_get_cpu_id()) {
412	case BCM3368_CPU_ID:
413		irq_stat_addr[0] += PERF_IRQSTAT_3368_REG;
414		irq_mask_addr[0] += PERF_IRQMASK_3368_REG;
415		irq_stat_addr[1] = 0;
416		irq_mask_addr[1] = 0;
417		irq_bits = 32;
418		ext_irq_count = 4;
419		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368;
420		break;
421	case BCM6328_CPU_ID:
422		irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0);
423		irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0);
424		irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1);
425		irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1);
426		irq_bits = 64;
427		ext_irq_count = 4;
428		is_ext_irq_cascaded = 1;
429		ext_irq_start = BCM_6328_EXT_IRQ0 - IRQ_INTERNAL_BASE;
430		ext_irq_end = BCM_6328_EXT_IRQ3 - IRQ_INTERNAL_BASE;
431		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6328;
432		break;
433	case BCM6338_CPU_ID:
434		irq_stat_addr[0] += PERF_IRQSTAT_6338_REG;
435		irq_mask_addr[0] += PERF_IRQMASK_6338_REG;
436		irq_stat_addr[1] = 0;
437		irq_mask_addr[1] = 0;
438		irq_bits = 32;
439		ext_irq_count = 4;
440		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6338;
441		break;
442	case BCM6345_CPU_ID:
443		irq_stat_addr[0] += PERF_IRQSTAT_6345_REG;
444		irq_mask_addr[0] += PERF_IRQMASK_6345_REG;
445		irq_stat_addr[1] = 0;
446		irq_mask_addr[1] = 0;
447		irq_bits = 32;
448		ext_irq_count = 4;
449		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6345;
450		break;
451	case BCM6348_CPU_ID:
452		irq_stat_addr[0] += PERF_IRQSTAT_6348_REG;
453		irq_mask_addr[0] += PERF_IRQMASK_6348_REG;
454		irq_stat_addr[1] = 0;
455		irq_mask_addr[1] = 0;
456		irq_bits = 32;
457		ext_irq_count = 4;
458		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
459		break;
460	case BCM6358_CPU_ID:
461		irq_stat_addr[0] += PERF_IRQSTAT_6358_REG(0);
462		irq_mask_addr[0] += PERF_IRQMASK_6358_REG(0);
463		irq_stat_addr[1] += PERF_IRQSTAT_6358_REG(1);
464		irq_mask_addr[1] += PERF_IRQMASK_6358_REG(1);
465		irq_bits = 32;
466		ext_irq_count = 4;
467		is_ext_irq_cascaded = 1;
468		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
469		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
470		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
471		break;
472	case BCM6362_CPU_ID:
473		irq_stat_addr[0] += PERF_IRQSTAT_6362_REG(0);
474		irq_mask_addr[0] += PERF_IRQMASK_6362_REG(0);
475		irq_stat_addr[1] += PERF_IRQSTAT_6362_REG(1);
476		irq_mask_addr[1] += PERF_IRQMASK_6362_REG(1);
477		irq_bits = 64;
478		ext_irq_count = 4;
479		is_ext_irq_cascaded = 1;
480		ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE;
481		ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE;
482		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362;
483		break;
484	case BCM6368_CPU_ID:
485		irq_stat_addr[0] += PERF_IRQSTAT_6368_REG(0);
486		irq_mask_addr[0] += PERF_IRQMASK_6368_REG(0);
487		irq_stat_addr[1] += PERF_IRQSTAT_6368_REG(1);
488		irq_mask_addr[1] += PERF_IRQMASK_6368_REG(1);
489		irq_bits = 64;
490		ext_irq_count = 6;
491		is_ext_irq_cascaded = 1;
492		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
493		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
494		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
495		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
496		break;
497	default:
498		BUG();
499	}
500
501	if (irq_bits == 32) {
502		dispatch_internal = __dispatch_internal_32;
503		internal_irq_mask = __internal_irq_mask_32;
504		internal_irq_unmask = __internal_irq_unmask_32;
505	} else {
506		dispatch_internal = __dispatch_internal_64;
507		internal_irq_mask = __internal_irq_mask_64;
508		internal_irq_unmask = __internal_irq_unmask_64;
509	}
510}
511
512void __init arch_init_irq(void)
513{
514	int i, irq;
515
516	bcm63xx_init_irq();
517	mips_cpu_irq_init();
518	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
519		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
520					 handle_level_irq);
521
522	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
523		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
524					 handle_edge_irq);
525
526	if (!is_ext_irq_cascaded) {
527		for (i = 3; i < 3 + ext_irq_count; ++i) {
528			irq = MIPS_CPU_IRQ_BASE + i;
529			if (request_irq(irq, no_action, IRQF_NO_THREAD,
530					"cascade_extirq", NULL)) {
531				pr_err("Failed to request irq %d (cascade_extirq)\n",
532				       irq);
533			}
534		}
535	}
536
537	irq = MIPS_CPU_IRQ_BASE + 2;
538	if (request_irq(irq, no_action, IRQF_NO_THREAD,	"cascade_ip2", NULL))
539		pr_err("Failed to request irq %d (cascade_ip2)\n", irq);
540#ifdef CONFIG_SMP
541	if (is_ext_irq_cascaded) {
542		irq = MIPS_CPU_IRQ_BASE + 3;
543		if (request_irq(irq, no_action,	IRQF_NO_THREAD, "cascade_ip3",
544				NULL))
545			pr_err("Failed to request irq %d (cascade_ip3)\n", irq);
546		bcm63xx_internal_irq_chip.irq_set_affinity =
547			bcm63xx_internal_set_affinity;
548
549		cpumask_clear(irq_default_affinity);
550		cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
551	}
552#endif
553}
v3.5.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
  7 * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr>
  8 */
  9
 10#include <linux/kernel.h>
 11#include <linux/init.h>
 12#include <linux/interrupt.h>
 13#include <linux/module.h>
 14#include <linux/irq.h>
 
 15#include <asm/irq_cpu.h>
 16#include <asm/mipsregs.h>
 17#include <bcm63xx_cpu.h>
 18#include <bcm63xx_regs.h>
 19#include <bcm63xx_io.h>
 20#include <bcm63xx_irq.h>
 21
 22static void __dispatch_internal(void) __maybe_unused;
 23static void __dispatch_internal_64(void) __maybe_unused;
 24static void __internal_irq_mask_32(unsigned int irq) __maybe_unused;
 25static void __internal_irq_mask_64(unsigned int irq) __maybe_unused;
 26static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused;
 27static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused;
 28
 29#ifndef BCMCPU_RUNTIME_DETECT
 30#ifdef CONFIG_BCM63XX_CPU_6338
 31#define irq_stat_reg		PERF_IRQSTAT_6338_REG
 32#define irq_mask_reg		PERF_IRQMASK_6338_REG
 33#define irq_bits		32
 34#define is_ext_irq_cascaded	0
 35#define ext_irq_start		0
 36#define ext_irq_end		0
 37#define ext_irq_count		4
 38#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6338
 39#define ext_irq_cfg_reg2	0
 40#endif
 41#ifdef CONFIG_BCM63XX_CPU_6345
 42#define irq_stat_reg		PERF_IRQSTAT_6345_REG
 43#define irq_mask_reg		PERF_IRQMASK_6345_REG
 44#define irq_bits		32
 45#define is_ext_irq_cascaded	0
 46#define ext_irq_start		0
 47#define ext_irq_end		0
 48#define ext_irq_count		0
 49#define ext_irq_cfg_reg1	0
 50#define ext_irq_cfg_reg2	0
 51#endif
 52#ifdef CONFIG_BCM63XX_CPU_6348
 53#define irq_stat_reg		PERF_IRQSTAT_6348_REG
 54#define irq_mask_reg		PERF_IRQMASK_6348_REG
 55#define irq_bits		32
 56#define is_ext_irq_cascaded	0
 57#define ext_irq_start		0
 58#define ext_irq_end		0
 59#define ext_irq_count		4
 60#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6348
 61#define ext_irq_cfg_reg2	0
 62#endif
 63#ifdef CONFIG_BCM63XX_CPU_6358
 64#define irq_stat_reg		PERF_IRQSTAT_6358_REG
 65#define irq_mask_reg		PERF_IRQMASK_6358_REG
 66#define irq_bits		32
 67#define is_ext_irq_cascaded	1
 68#define ext_irq_start		(BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 69#define ext_irq_end		(BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE)
 70#define ext_irq_count		4
 71#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6358
 72#define ext_irq_cfg_reg2	0
 73#endif
 74#ifdef CONFIG_BCM63XX_CPU_6368
 75#define irq_stat_reg		PERF_IRQSTAT_6368_REG
 76#define irq_mask_reg		PERF_IRQMASK_6368_REG
 77#define irq_bits		64
 78#define is_ext_irq_cascaded	1
 79#define ext_irq_start		(BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE)
 80#define ext_irq_end		(BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE)
 81#define ext_irq_count		6
 82#define ext_irq_cfg_reg1	PERF_EXTIRQ_CFG_REG_6368
 83#define ext_irq_cfg_reg2	PERF_EXTIRQ_CFG_REG2_6368
 84#endif
 85
 86#if irq_bits == 32
 87#define dispatch_internal			__dispatch_internal
 88#define internal_irq_mask			__internal_irq_mask_32
 89#define internal_irq_unmask			__internal_irq_unmask_32
 90#else
 91#define dispatch_internal			__dispatch_internal_64
 92#define internal_irq_mask			__internal_irq_mask_64
 93#define internal_irq_unmask			__internal_irq_unmask_64
 94#endif
 95
 96#define irq_stat_addr	(bcm63xx_regset_address(RSET_PERF) + irq_stat_reg)
 97#define irq_mask_addr	(bcm63xx_regset_address(RSET_PERF) + irq_mask_reg)
 98
 99static inline void bcm63xx_init_irq(void)
100{
101}
102#else /* ! BCMCPU_RUNTIME_DETECT */
103
104static u32 irq_stat_addr, irq_mask_addr;
105static void (*dispatch_internal)(void);
 
106static int is_ext_irq_cascaded;
107static unsigned int ext_irq_count;
108static unsigned int ext_irq_start, ext_irq_end;
109static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2;
110static void (*internal_irq_mask)(unsigned int irq);
111static void (*internal_irq_unmask)(unsigned int irq);
112
113static void bcm63xx_init_irq(void)
114{
115	int irq_bits;
116
117	irq_stat_addr = bcm63xx_regset_address(RSET_PERF);
118	irq_mask_addr = bcm63xx_regset_address(RSET_PERF);
119
120	switch (bcm63xx_get_cpu_id()) {
121	case BCM6338_CPU_ID:
122		irq_stat_addr += PERF_IRQSTAT_6338_REG;
123		irq_mask_addr += PERF_IRQMASK_6338_REG;
124		irq_bits = 32;
125		break;
126	case BCM6345_CPU_ID:
127		irq_stat_addr += PERF_IRQSTAT_6345_REG;
128		irq_mask_addr += PERF_IRQMASK_6345_REG;
129		irq_bits = 32;
130		break;
131	case BCM6348_CPU_ID:
132		irq_stat_addr += PERF_IRQSTAT_6348_REG;
133		irq_mask_addr += PERF_IRQMASK_6348_REG;
134		irq_bits = 32;
135		ext_irq_count = 4;
136		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348;
137		break;
138	case BCM6358_CPU_ID:
139		irq_stat_addr += PERF_IRQSTAT_6358_REG;
140		irq_mask_addr += PERF_IRQMASK_6358_REG;
141		irq_bits = 32;
142		ext_irq_count = 4;
143		is_ext_irq_cascaded = 1;
144		ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE;
145		ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE;
146		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358;
147		break;
148	case BCM6368_CPU_ID:
149		irq_stat_addr += PERF_IRQSTAT_6368_REG;
150		irq_mask_addr += PERF_IRQMASK_6368_REG;
151		irq_bits = 64;
152		ext_irq_count = 6;
153		is_ext_irq_cascaded = 1;
154		ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE;
155		ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE;
156		ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368;
157		ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368;
158		break;
159	default:
160		BUG();
161	}
162
163	if (irq_bits == 32) {
164		dispatch_internal = __dispatch_internal;
165		internal_irq_mask = __internal_irq_mask_32;
166		internal_irq_unmask = __internal_irq_unmask_32;
167	} else {
168		dispatch_internal = __dispatch_internal_64;
169		internal_irq_mask = __internal_irq_mask_64;
170		internal_irq_unmask = __internal_irq_unmask_64;
171	}
172}
173#endif /* ! BCMCPU_RUNTIME_DETECT */
174
175static inline u32 get_ext_irq_perf_reg(int irq)
176{
177	if (irq < 4)
178		return ext_irq_cfg_reg1;
179	return ext_irq_cfg_reg2;
180}
181
182static inline void handle_internal(int intbit)
183{
184	if (is_ext_irq_cascaded &&
185	    intbit >= ext_irq_start && intbit <= ext_irq_end)
186		do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE);
187	else
188		do_IRQ(intbit + IRQ_INTERNAL_BASE);
189}
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191/*
192 * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not
193 * prioritize any interrupt relatively to another. the static counter
194 * will resume the loop where it ended the last time we left this
195 * function.
196 */
197static void __dispatch_internal(void)
198{
199	u32 pending;
200	static int i;
201
202	pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr);
203
204	if (!pending)
205		return ;
206
207	while (1) {
208		int to_call = i;
209
210		i = (i + 1) & 0x1f;
211		if (pending & (1 << to_call)) {
212			handle_internal(to_call);
213			break;
214		}
215	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216}
217
218static void __dispatch_internal_64(void)
219{
220	u64 pending;
221	static int i;
222
223	pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr);
224
225	if (!pending)
226		return ;
227
228	while (1) {
229		int to_call = i;
230
231		i = (i + 1) & 0x3f;
232		if (pending & (1ull << to_call)) {
233			handle_internal(to_call);
234			break;
235		}
236	}
237}
238
239asmlinkage void plat_irq_dispatch(void)
240{
241	u32 cause;
242
243	do {
244		cause = read_c0_cause() & read_c0_status() & ST0_IM;
245
246		if (!cause)
247			break;
248
249		if (cause & CAUSEF_IP7)
250			do_IRQ(7);
 
 
 
 
251		if (cause & CAUSEF_IP2)
252			dispatch_internal();
253		if (!is_ext_irq_cascaded) {
 
 
 
254			if (cause & CAUSEF_IP3)
255				do_IRQ(IRQ_EXT_0);
256			if (cause & CAUSEF_IP4)
257				do_IRQ(IRQ_EXT_1);
258			if (cause & CAUSEF_IP5)
259				do_IRQ(IRQ_EXT_2);
260			if (cause & CAUSEF_IP6)
261				do_IRQ(IRQ_EXT_3);
262		}
263	} while (1);
264}
265
266/*
267 * internal IRQs operations: only mask/unmask on PERF irq mask
268 * register.
269 */
270static void __internal_irq_mask_32(unsigned int irq)
271{
272	u32 mask;
273
274	mask = bcm_readl(irq_mask_addr);
275	mask &= ~(1 << irq);
276	bcm_writel(mask, irq_mask_addr);
277}
278
279static void __internal_irq_mask_64(unsigned int irq)
280{
281	u64 mask;
282
283	mask = bcm_readq(irq_mask_addr);
284	mask &= ~(1ull << irq);
285	bcm_writeq(mask, irq_mask_addr);
286}
287
288static void __internal_irq_unmask_32(unsigned int irq)
289{
290	u32 mask;
291
292	mask = bcm_readl(irq_mask_addr);
293	mask |= (1 << irq);
294	bcm_writel(mask, irq_mask_addr);
295}
296
297static void __internal_irq_unmask_64(unsigned int irq)
298{
299	u64 mask;
300
301	mask = bcm_readq(irq_mask_addr);
302	mask |= (1ull << irq);
303	bcm_writeq(mask, irq_mask_addr);
304}
305
306static void bcm63xx_internal_irq_mask(struct irq_data *d)
307{
308	internal_irq_mask(d->irq - IRQ_INTERNAL_BASE);
309}
310
311static void bcm63xx_internal_irq_unmask(struct irq_data *d)
312{
313	internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE);
314}
315
316/*
317 * external IRQs operations: mask/unmask and clear on PERF external
318 * irq control register.
319 */
320static void bcm63xx_external_irq_mask(struct irq_data *d)
321{
322	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
323	u32 reg, regaddr;
 
324
325	regaddr = get_ext_irq_perf_reg(irq);
 
326	reg = bcm_perf_readl(regaddr);
327
328	if (BCMCPU_IS_6348())
329		reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4);
330	else
331		reg &= ~EXTIRQ_CFG_MASK(irq % 4);
332
333	bcm_perf_writel(reg, regaddr);
 
 
334	if (is_ext_irq_cascaded)
335		internal_irq_mask(irq + ext_irq_start);
336}
337
338static void bcm63xx_external_irq_unmask(struct irq_data *d)
339{
340	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
341	u32 reg, regaddr;
 
342
343	regaddr = get_ext_irq_perf_reg(irq);
 
344	reg = bcm_perf_readl(regaddr);
345
346	if (BCMCPU_IS_6348())
347		reg |= EXTIRQ_CFG_MASK_6348(irq % 4);
348	else
349		reg |= EXTIRQ_CFG_MASK(irq % 4);
350
351	bcm_perf_writel(reg, regaddr);
 
352
353	if (is_ext_irq_cascaded)
354		internal_irq_unmask(irq + ext_irq_start);
 
355}
356
357static void bcm63xx_external_irq_clear(struct irq_data *d)
358{
359	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
360	u32 reg, regaddr;
 
361
362	regaddr = get_ext_irq_perf_reg(irq);
 
363	reg = bcm_perf_readl(regaddr);
364
365	if (BCMCPU_IS_6348())
366		reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4);
367	else
368		reg |= EXTIRQ_CFG_CLEAR(irq % 4);
369
370	bcm_perf_writel(reg, regaddr);
 
371}
372
373static int bcm63xx_external_irq_set_type(struct irq_data *d,
374					 unsigned int flow_type)
375{
376	unsigned int irq = d->irq - IRQ_EXTERNAL_BASE;
377	u32 reg, regaddr;
378	int levelsense, sense, bothedge;
 
379
380	flow_type &= IRQ_TYPE_SENSE_MASK;
381
382	if (flow_type == IRQ_TYPE_NONE)
383		flow_type = IRQ_TYPE_LEVEL_LOW;
384
385	levelsense = sense = bothedge = 0;
386	switch (flow_type) {
387	case IRQ_TYPE_EDGE_BOTH:
388		bothedge = 1;
389		break;
390
391	case IRQ_TYPE_EDGE_RISING:
392		sense = 1;
393		break;
394
395	case IRQ_TYPE_EDGE_FALLING:
396		break;
397
398	case IRQ_TYPE_LEVEL_HIGH:
399		levelsense = 1;
400		sense = 1;
401		break;
402
403	case IRQ_TYPE_LEVEL_LOW:
404		levelsense = 1;
405		break;
406
407	default:
408		printk(KERN_ERR "bogus flow type combination given !\n");
409		return -EINVAL;
410	}
411
412	regaddr = get_ext_irq_perf_reg(irq);
 
413	reg = bcm_perf_readl(regaddr);
414	irq %= 4;
415
416	if (BCMCPU_IS_6348()) {
 
417		if (levelsense)
418			reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq);
419		else
420			reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq);
421		if (sense)
422			reg |= EXTIRQ_CFG_SENSE_6348(irq);
423		else
424			reg &= ~EXTIRQ_CFG_SENSE_6348(irq);
425		if (bothedge)
426			reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq);
427		else
428			reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq);
429	}
430
431	if (BCMCPU_IS_6338() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) {
 
 
 
 
 
 
432		if (levelsense)
433			reg |= EXTIRQ_CFG_LEVELSENSE(irq);
434		else
435			reg &= ~EXTIRQ_CFG_LEVELSENSE(irq);
436		if (sense)
437			reg |= EXTIRQ_CFG_SENSE(irq);
438		else
439			reg &= ~EXTIRQ_CFG_SENSE(irq);
440		if (bothedge)
441			reg |= EXTIRQ_CFG_BOTHEDGE(irq);
442		else
443			reg &= ~EXTIRQ_CFG_BOTHEDGE(irq);
 
 
 
444	}
445
446	bcm_perf_writel(reg, regaddr);
 
447
448	irqd_set_trigger_type(d, flow_type);
449	if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
450		__irq_set_handler_locked(d->irq, handle_level_irq);
451	else
452		__irq_set_handler_locked(d->irq, handle_edge_irq);
453
454	return IRQ_SET_MASK_OK_NOCOPY;
455}
456
 
 
 
 
 
 
 
 
 
 
 
 
457static struct irq_chip bcm63xx_internal_irq_chip = {
458	.name		= "bcm63xx_ipic",
459	.irq_mask	= bcm63xx_internal_irq_mask,
460	.irq_unmask	= bcm63xx_internal_irq_unmask,
461};
462
463static struct irq_chip bcm63xx_external_irq_chip = {
464	.name		= "bcm63xx_epic",
465	.irq_ack	= bcm63xx_external_irq_clear,
466
467	.irq_mask	= bcm63xx_external_irq_mask,
468	.irq_unmask	= bcm63xx_external_irq_unmask,
469
470	.irq_set_type	= bcm63xx_external_irq_set_type,
471};
472
473static struct irqaction cpu_ip2_cascade_action = {
474	.handler	= no_action,
475	.name		= "cascade_ip2",
476	.flags		= IRQF_NO_THREAD,
477};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478
479static struct irqaction cpu_ext_cascade_action = {
480	.handler	= no_action,
481	.name		= "cascade_extirq",
482	.flags		= IRQF_NO_THREAD,
483};
 
 
 
 
 
484
485void __init arch_init_irq(void)
486{
487	int i;
488
489	bcm63xx_init_irq();
490	mips_cpu_irq_init();
491	for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i)
492		irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip,
493					 handle_level_irq);
494
495	for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i)
496		irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip,
497					 handle_edge_irq);
498
499	if (!is_ext_irq_cascaded) {
500		for (i = 3; i < 3 + ext_irq_count; ++i)
501			setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action);
 
 
 
 
 
 
502	}
503
504	setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
505}