Loading...
1/*
2 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
3 *
4 * This file define the irq handler for MSP CIC subsystem interrupts.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/kernel.h>
15#include <linux/bitops.h>
16#include <linux/irq.h>
17
18#include <asm/mipsregs.h>
19
20#include <msp_cic_int.h>
21#include <msp_regs.h>
22
23/*
24 * External API
25 */
26extern void msp_per_irq_init(void);
27extern void msp_per_irq_dispatch(void);
28
29
30/*
31 * Convenience Macro. Should be somewhere generic.
32 */
33#define get_current_vpe() \
34 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
35
36#ifdef CONFIG_SMP
37
38#define LOCK_VPE(flags, mtflags) \
39do { \
40 local_irq_save(flags); \
41 mtflags = dmt(); \
42} while (0)
43
44#define UNLOCK_VPE(flags, mtflags) \
45do { \
46 emt(mtflags); \
47 local_irq_restore(flags);\
48} while (0)
49
50#define LOCK_CORE(flags, mtflags) \
51do { \
52 local_irq_save(flags); \
53 mtflags = dvpe(); \
54} while (0)
55
56#define UNLOCK_CORE(flags, mtflags) \
57do { \
58 evpe(mtflags); \
59 local_irq_restore(flags);\
60} while (0)
61
62#else
63
64#define LOCK_VPE(flags, mtflags)
65#define UNLOCK_VPE(flags, mtflags)
66#endif
67
68/* ensure writes to cic are completed */
69static inline void cic_wmb(void)
70{
71 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
72 volatile u32 dummy_read;
73
74 wmb();
75 dummy_read = __raw_readl(cic_mem);
76 dummy_read++;
77}
78
79static void unmask_cic_irq(struct irq_data *d)
80{
81 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
82 int vpe;
83#ifdef CONFIG_SMP
84 unsigned int mtflags;
85 unsigned long flags;
86
87 /*
88 * Make sure we have IRQ affinity. It may have changed while
89 * we were processing the IRQ.
90 */
91 if (!cpumask_test_cpu(smp_processor_id(),
92 irq_data_get_affinity_mask(d)))
93 return;
94#endif
95
96 vpe = get_current_vpe();
97 LOCK_VPE(flags, mtflags);
98 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
99 UNLOCK_VPE(flags, mtflags);
100 cic_wmb();
101}
102
103static void mask_cic_irq(struct irq_data *d)
104{
105 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
106 int vpe = get_current_vpe();
107#ifdef CONFIG_SMP
108 unsigned long flags, mtflags;
109#endif
110 LOCK_VPE(flags, mtflags);
111 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
112 UNLOCK_VPE(flags, mtflags);
113 cic_wmb();
114}
115static void msp_cic_irq_ack(struct irq_data *d)
116{
117 mask_cic_irq(d);
118 /*
119 * Only really necessary for 18, 16-14 and sometimes 3:0
120 * (since these can be edge sensitive) but it doesn't
121 * hurt for the others
122 */
123 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
124}
125
126/* Note: Limiting to VSMP. */
127
128#ifdef CONFIG_MIPS_MT_SMP
129static int msp_cic_irq_set_affinity(struct irq_data *d,
130 const struct cpumask *cpumask, bool force)
131{
132 int cpu;
133 unsigned long flags;
134 unsigned int mtflags;
135 unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
136 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
137
138 /* timer balancing should be disabled in kernel code */
139 BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
140
141 LOCK_CORE(flags, mtflags);
142 /* enable if any of each VPE's TCs require this IRQ */
143 for_each_online_cpu(cpu) {
144 if (cpumask_test_cpu(cpu, cpumask))
145 cic_mask[cpu] |= imask;
146 else
147 cic_mask[cpu] &= ~imask;
148
149 }
150
151 UNLOCK_CORE(flags, mtflags);
152 return 0;
153
154}
155#endif
156
157static struct irq_chip msp_cic_irq_controller = {
158 .name = "MSP_CIC",
159 .irq_mask = mask_cic_irq,
160 .irq_mask_ack = msp_cic_irq_ack,
161 .irq_unmask = unmask_cic_irq,
162 .irq_ack = msp_cic_irq_ack,
163#ifdef CONFIG_MIPS_MT_SMP
164 .irq_set_affinity = msp_cic_irq_set_affinity,
165#endif
166};
167
168void __init msp_cic_irq_init(void)
169{
170 int i;
171 /* Mask/clear interrupts. */
172 *CIC_VPE0_MSK_REG = 0x00000000;
173 *CIC_VPE1_MSK_REG = 0x00000000;
174 *CIC_STS_REG = 0xFFFFFFFF;
175 /*
176 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
177 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
178 * They are to be active low, level sensitive.
179 */
180 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
181
182 /* initialize all the IRQ descriptors */
183 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
184 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
185 handle_level_irq);
186 }
187
188 /* Initialize the PER interrupt sub-system */
189 msp_per_irq_init();
190}
191
192/* CIC masked by CIC vector processing before dispatch called */
193void msp_cic_irq_dispatch(void)
194{
195 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
196 u32 cic_mask;
197 u32 pending;
198 int cic_status = *CIC_STS_REG;
199 cic_mask = cic_msk_reg[get_current_vpe()];
200 pending = cic_status & cic_mask;
201 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
202 do_IRQ(MSP_INT_VPE0_TIMER);
203 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
204 do_IRQ(MSP_INT_VPE1_TIMER);
205 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
206 msp_per_irq_dispatch();
207 } else if (pending) {
208 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
209 } else{
210 spurious_interrupt();
211 }
212}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2010 PMC-Sierra, Inc, derived from irq_cpu.c
4 *
5 * This file define the irq handler for MSP CIC subsystem interrupts.
6 */
7
8#include <linux/init.h>
9#include <linux/interrupt.h>
10#include <linux/kernel.h>
11#include <linux/bitops.h>
12#include <linux/irq.h>
13
14#include <asm/mipsregs.h>
15
16#include <msp_cic_int.h>
17#include <msp_regs.h>
18
19/*
20 * External API
21 */
22extern void msp_per_irq_init(void);
23extern void msp_per_irq_dispatch(void);
24
25
26/*
27 * Convenience Macro. Should be somewhere generic.
28 */
29#define get_current_vpe() \
30 ((read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE)
31
32#ifdef CONFIG_SMP
33
34#define LOCK_VPE(flags, mtflags) \
35do { \
36 local_irq_save(flags); \
37 mtflags = dmt(); \
38} while (0)
39
40#define UNLOCK_VPE(flags, mtflags) \
41do { \
42 emt(mtflags); \
43 local_irq_restore(flags);\
44} while (0)
45
46#define LOCK_CORE(flags, mtflags) \
47do { \
48 local_irq_save(flags); \
49 mtflags = dvpe(); \
50} while (0)
51
52#define UNLOCK_CORE(flags, mtflags) \
53do { \
54 evpe(mtflags); \
55 local_irq_restore(flags);\
56} while (0)
57
58#else
59
60#define LOCK_VPE(flags, mtflags)
61#define UNLOCK_VPE(flags, mtflags)
62#endif
63
64/* ensure writes to cic are completed */
65static inline void cic_wmb(void)
66{
67 const volatile void __iomem *cic_mem = CIC_VPE0_MSK_REG;
68 volatile u32 dummy_read;
69
70 wmb();
71 dummy_read = __raw_readl(cic_mem);
72 dummy_read++;
73}
74
75static void unmask_cic_irq(struct irq_data *d)
76{
77 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
78 int vpe;
79#ifdef CONFIG_SMP
80 unsigned int mtflags;
81 unsigned long flags;
82
83 /*
84 * Make sure we have IRQ affinity. It may have changed while
85 * we were processing the IRQ.
86 */
87 if (!cpumask_test_cpu(smp_processor_id(),
88 irq_data_get_affinity_mask(d)))
89 return;
90#endif
91
92 vpe = get_current_vpe();
93 LOCK_VPE(flags, mtflags);
94 cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
95 UNLOCK_VPE(flags, mtflags);
96 cic_wmb();
97}
98
99static void mask_cic_irq(struct irq_data *d)
100{
101 volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
102 int vpe = get_current_vpe();
103#ifdef CONFIG_SMP
104 unsigned long flags, mtflags;
105#endif
106 LOCK_VPE(flags, mtflags);
107 cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
108 UNLOCK_VPE(flags, mtflags);
109 cic_wmb();
110}
111static void msp_cic_irq_ack(struct irq_data *d)
112{
113 mask_cic_irq(d);
114 /*
115 * Only really necessary for 18, 16-14 and sometimes 3:0
116 * (since these can be edge sensitive) but it doesn't
117 * hurt for the others
118 */
119 *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
120}
121
122/* Note: Limiting to VSMP. */
123
124#ifdef CONFIG_MIPS_MT_SMP
125static int msp_cic_irq_set_affinity(struct irq_data *d,
126 const struct cpumask *cpumask, bool force)
127{
128 int cpu;
129 unsigned long flags;
130 unsigned int mtflags;
131 unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
132 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
133
134 /* timer balancing should be disabled in kernel code */
135 BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
136
137 LOCK_CORE(flags, mtflags);
138 /* enable if any of each VPE's TCs require this IRQ */
139 for_each_online_cpu(cpu) {
140 if (cpumask_test_cpu(cpu, cpumask))
141 cic_mask[cpu] |= imask;
142 else
143 cic_mask[cpu] &= ~imask;
144
145 }
146
147 UNLOCK_CORE(flags, mtflags);
148 return 0;
149
150}
151#endif
152
153static struct irq_chip msp_cic_irq_controller = {
154 .name = "MSP_CIC",
155 .irq_mask = mask_cic_irq,
156 .irq_mask_ack = msp_cic_irq_ack,
157 .irq_unmask = unmask_cic_irq,
158 .irq_ack = msp_cic_irq_ack,
159#ifdef CONFIG_MIPS_MT_SMP
160 .irq_set_affinity = msp_cic_irq_set_affinity,
161#endif
162};
163
164void __init msp_cic_irq_init(void)
165{
166 int i;
167 /* Mask/clear interrupts. */
168 *CIC_VPE0_MSK_REG = 0x00000000;
169 *CIC_VPE1_MSK_REG = 0x00000000;
170 *CIC_STS_REG = 0xFFFFFFFF;
171 /*
172 * The MSP7120 RG and EVBD boards use IRQ[6:4] for PCI.
173 * These inputs map to EXT_INT_POL[6:4] inside the CIC.
174 * They are to be active low, level sensitive.
175 */
176 *CIC_EXT_CFG_REG &= 0xFFFF8F8F;
177
178 /* initialize all the IRQ descriptors */
179 for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
180 irq_set_chip_and_handler(i, &msp_cic_irq_controller,
181 handle_level_irq);
182 }
183
184 /* Initialize the PER interrupt sub-system */
185 msp_per_irq_init();
186}
187
188/* CIC masked by CIC vector processing before dispatch called */
189void msp_cic_irq_dispatch(void)
190{
191 volatile u32 *cic_msk_reg = (volatile u32 *)CIC_VPE0_MSK_REG;
192 u32 cic_mask;
193 u32 pending;
194 int cic_status = *CIC_STS_REG;
195 cic_mask = cic_msk_reg[get_current_vpe()];
196 pending = cic_status & cic_mask;
197 if (pending & (1 << (MSP_INT_VPE0_TIMER - MSP_CIC_INTBASE))) {
198 do_IRQ(MSP_INT_VPE0_TIMER);
199 } else if (pending & (1 << (MSP_INT_VPE1_TIMER - MSP_CIC_INTBASE))) {
200 do_IRQ(MSP_INT_VPE1_TIMER);
201 } else if (pending & (1 << (MSP_INT_PER - MSP_CIC_INTBASE))) {
202 msp_per_irq_dispatch();
203 } else if (pending) {
204 do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
205 } else{
206 spurious_interrupt();
207 }
208}