Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
4 */
5#include <linux/errno.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9#include <linux/irqdomain.h>
10#include <linux/percpu.h>
11#include <linux/spinlock.h>
12#include <linux/tick.h>
13#include <linux/types.h>
14
15#include <asm/irq_cpu.h>
16#include <asm/sgi/heart.h>
17
18#include "ip30-common.h"
19
20struct heart_irq_data {
21 u64 *irq_mask;
22 int cpu;
23};
24
25static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
26
27static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
28
29static inline int heart_alloc_int(void)
30{
31 int bit;
32
33again:
34 bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
35 if (bit >= HEART_NUM_IRQS)
36 return -ENOSPC;
37
38 if (test_and_set_bit(bit, heart_irq_map))
39 goto again;
40
41 return bit;
42}
43
44static void ip30_error_irq(struct irq_desc *desc)
45{
46 u64 pending, mask, cause, error_irqs, err_reg;
47 int cpu = smp_processor_id();
48 int i;
49
50 pending = heart_read(&heart_regs->isr);
51 mask = heart_read(&heart_regs->imr[cpu]);
52 cause = heart_read(&heart_regs->cause);
53 error_irqs = (pending & HEART_L4_INT_MASK & mask);
54
55 /* Bail if there's nothing to process (how did we get here, then?) */
56 if (unlikely(!error_irqs))
57 return;
58
59 /* Prevent any of the error IRQs from firing again. */
60 heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
61
62 /* Ack all error IRQs. */
63 heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
64
65 /*
66 * If we also have a cause value, then something happened, so loop
67 * through the error IRQs and report a "heart attack" for each one
68 * and print the value of the HEART cause register. This is really
69 * primitive right now, but it should hopefully work until a more
70 * robust error handling routine can be put together.
71 *
72 * Refer to heart.h for the HC_* macros to work out the cause
73 * that got us here.
74 */
75 if (cause) {
76 pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
77 cpu, pending, mask, cause);
78
79 if (cause & HC_COR_MEM_ERR) {
80 err_reg = heart_read(&heart_regs->mem_err_addr);
81 pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
82 }
83
84 /* i = 63; i >= 51; i-- */
85 for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
86 if ((pending >> i) & 1)
87 pr_alert(" HEART Error IRQ #%d\n", i);
88
89 /* XXX: Seems possible to loop forever here, so panic(). */
90 panic("IP30: Fatal Error !\n");
91 }
92
93 /* Unmask the error IRQs. */
94 heart_write(mask, &heart_regs->imr[cpu]);
95}
96
97static void ip30_normal_irq(struct irq_desc *desc)
98{
99 int cpu = smp_processor_id();
100 struct irq_domain *domain;
101 u64 pend, mask;
102 int irq;
103
104 pend = heart_read(&heart_regs->isr);
105 mask = (heart_read(&heart_regs->imr[cpu]) &
106 (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
107
108 pend &= mask;
109 if (unlikely(!pend))
110 return;
111
112#ifdef CONFIG_SMP
113 if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
114 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
115 &heart_regs->clear_isr);
116 scheduler_ipi();
117 } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
118 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
119 &heart_regs->clear_isr);
120 scheduler_ipi();
121 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
122 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
123 &heart_regs->clear_isr);
124 generic_smp_call_function_interrupt();
125 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
126 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
127 &heart_regs->clear_isr);
128 generic_smp_call_function_interrupt();
129 } else
130#endif
131 {
132 domain = irq_desc_get_handler_data(desc);
133 irq = irq_linear_revmap(domain, __ffs(pend));
134 if (irq)
135 generic_handle_irq(irq);
136 else
137 spurious_interrupt();
138 }
139}
140
141static void ip30_ack_heart_irq(struct irq_data *d)
142{
143 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
144}
145
146static void ip30_mask_heart_irq(struct irq_data *d)
147{
148 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
149 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
150
151 clear_bit(d->hwirq, mask);
152 heart_write(*mask, &heart_regs->imr[hd->cpu]);
153}
154
155static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
156{
157 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
158 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
159
160 clear_bit(d->hwirq, mask);
161 heart_write(*mask, &heart_regs->imr[hd->cpu]);
162 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
163}
164
165static void ip30_unmask_heart_irq(struct irq_data *d)
166{
167 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
168 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
169
170 set_bit(d->hwirq, mask);
171 heart_write(*mask, &heart_regs->imr[hd->cpu]);
172}
173
174static int ip30_set_heart_irq_affinity(struct irq_data *d,
175 const struct cpumask *mask, bool force)
176{
177 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
178
179 if (!hd)
180 return -EINVAL;
181
182 if (irqd_is_started(d))
183 ip30_mask_and_ack_heart_irq(d);
184
185 hd->cpu = cpumask_first_and(mask, cpu_online_mask);
186
187 if (irqd_is_started(d))
188 ip30_unmask_heart_irq(d);
189
190 irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
191
192 return 0;
193}
194
195static struct irq_chip heart_irq_chip = {
196 .name = "HEART",
197 .irq_ack = ip30_ack_heart_irq,
198 .irq_mask = ip30_mask_heart_irq,
199 .irq_mask_ack = ip30_mask_and_ack_heart_irq,
200 .irq_unmask = ip30_unmask_heart_irq,
201 .irq_set_affinity = ip30_set_heart_irq_affinity,
202};
203
204static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
205 unsigned int nr_irqs, void *arg)
206{
207 struct irq_alloc_info *info = arg;
208 struct heart_irq_data *hd;
209 int hwirq;
210
211 if (nr_irqs > 1 || !info)
212 return -EINVAL;
213
214 hd = kzalloc(sizeof(*hd), GFP_KERNEL);
215 if (!hd)
216 return -ENOMEM;
217
218 hwirq = heart_alloc_int();
219 if (hwirq < 0) {
220 kfree(hd);
221 return -EAGAIN;
222 }
223 irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
224 handle_level_irq, NULL, NULL);
225
226 return 0;
227}
228
229static void heart_domain_free(struct irq_domain *domain,
230 unsigned int virq, unsigned int nr_irqs)
231{
232 struct irq_data *irqd;
233
234 if (nr_irqs > 1)
235 return;
236
237 irqd = irq_domain_get_irq_data(domain, virq);
238 if (irqd) {
239 clear_bit(irqd->hwirq, heart_irq_map);
240 kfree(irqd->chip_data);
241 }
242}
243
244static const struct irq_domain_ops heart_domain_ops = {
245 .alloc = heart_domain_alloc,
246 .free = heart_domain_free,
247};
248
249void __init ip30_install_ipi(void)
250{
251 int cpu = smp_processor_id();
252 unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
253
254 set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
255 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
256 &heart_regs->clear_isr);
257 set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
258 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
259 &heart_regs->clear_isr);
260
261 heart_write(*mask, &heart_regs->imr[cpu]);
262}
263
264void __init arch_init_irq(void)
265{
266 struct irq_domain *domain;
267 struct fwnode_handle *fn;
268 unsigned long *mask;
269 int i;
270
271 mips_cpu_irq_init();
272
273 /* Mask all IRQs. */
274 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
275 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
276 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
277 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
278
279 /* Ack everything. */
280 heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
281
282 /* Enable specific HEART error IRQs for each CPU. */
283 mask = &per_cpu(irq_enable_mask, 0);
284 *mask |= HEART_CPU0_ERR_MASK;
285 heart_write(*mask, &heart_regs->imr[0]);
286 mask = &per_cpu(irq_enable_mask, 1);
287 *mask |= HEART_CPU1_ERR_MASK;
288 heart_write(*mask, &heart_regs->imr[1]);
289
290 /*
291 * Some HEART bits are reserved by hardware or by software convention.
292 * Mark these as reserved right away so they won't be accidentally
293 * used later.
294 */
295 set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
296 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
297 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
298 set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
299 set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
300 set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
301 set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
302 set_bit(HEART_L3_INT_TIMER, heart_irq_map);
303
304 /* Reserve the error interrupts (#51 to #63). */
305 for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
306 set_bit(i, heart_irq_map);
307
308 fn = irq_domain_alloc_named_fwnode("HEART");
309 WARN_ON(fn == NULL);
310 if (!fn)
311 return;
312 domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
313 &heart_domain_ops, NULL);
314 WARN_ON(domain == NULL);
315 if (!domain)
316 return;
317
318 irq_set_default_host(domain);
319
320 irq_set_percpu_devid(IP30_HEART_L0_IRQ);
321 irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
322 domain);
323 irq_set_percpu_devid(IP30_HEART_L1_IRQ);
324 irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
325 domain);
326 irq_set_percpu_devid(IP30_HEART_L2_IRQ);
327 irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
328 domain);
329 irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
330 irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
331 domain);
332}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * ip30-irq.c: Highlevel interrupt handling for IP30 architecture.
4 */
5#include <linux/errno.h>
6#include <linux/init.h>
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9#include <linux/irqdomain.h>
10#include <linux/percpu.h>
11#include <linux/spinlock.h>
12#include <linux/tick.h>
13#include <linux/types.h>
14
15#include <asm/irq_cpu.h>
16#include <asm/sgi/heart.h>
17
18#include "ip30-common.h"
19
20struct heart_irq_data {
21 u64 *irq_mask;
22 int cpu;
23};
24
25static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS);
26
27static DEFINE_PER_CPU(unsigned long, irq_enable_mask);
28
29static inline int heart_alloc_int(void)
30{
31 int bit;
32
33again:
34 bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS);
35 if (bit >= HEART_NUM_IRQS)
36 return -ENOSPC;
37
38 if (test_and_set_bit(bit, heart_irq_map))
39 goto again;
40
41 return bit;
42}
43
44static void ip30_error_irq(struct irq_desc *desc)
45{
46 u64 pending, mask, cause, error_irqs, err_reg;
47 int cpu = smp_processor_id();
48 int i;
49
50 pending = heart_read(&heart_regs->isr);
51 mask = heart_read(&heart_regs->imr[cpu]);
52 cause = heart_read(&heart_regs->cause);
53 error_irqs = (pending & HEART_L4_INT_MASK & mask);
54
55 /* Bail if there's nothing to process (how did we get here, then?) */
56 if (unlikely(!error_irqs))
57 return;
58
59 /* Prevent any of the error IRQs from firing again. */
60 heart_write(mask & ~(pending), &heart_regs->imr[cpu]);
61
62 /* Ack all error IRQs. */
63 heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr);
64
65 /*
66 * If we also have a cause value, then something happened, so loop
67 * through the error IRQs and report a "heart attack" for each one
68 * and print the value of the HEART cause register. This is really
69 * primitive right now, but it should hopefully work until a more
70 * robust error handling routine can be put together.
71 *
72 * Refer to heart.h for the HC_* macros to work out the cause
73 * that got us here.
74 */
75 if (cause) {
76 pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n",
77 cpu, pending, mask, cause);
78
79 if (cause & HC_COR_MEM_ERR) {
80 err_reg = heart_read(&heart_regs->mem_err_addr);
81 pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg);
82 }
83
84 /* i = 63; i >= 51; i-- */
85 for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--)
86 if ((pending >> i) & 1)
87 pr_alert(" HEART Error IRQ #%d\n", i);
88
89 /* XXX: Seems possible to loop forever here, so panic(). */
90 panic("IP30: Fatal Error !\n");
91 }
92
93 /* Unmask the error IRQs. */
94 heart_write(mask, &heart_regs->imr[cpu]);
95}
96
97static void ip30_normal_irq(struct irq_desc *desc)
98{
99 int cpu = smp_processor_id();
100 struct irq_domain *domain;
101 u64 pend, mask;
102 int ret;
103
104 pend = heart_read(&heart_regs->isr);
105 mask = (heart_read(&heart_regs->imr[cpu]) &
106 (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK));
107
108 pend &= mask;
109 if (unlikely(!pend))
110 return;
111
112#ifdef CONFIG_SMP
113 if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) {
114 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0),
115 &heart_regs->clear_isr);
116 scheduler_ipi();
117 } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) {
118 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1),
119 &heart_regs->clear_isr);
120 scheduler_ipi();
121 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) {
122 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0),
123 &heart_regs->clear_isr);
124 generic_smp_call_function_interrupt();
125 } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) {
126 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1),
127 &heart_regs->clear_isr);
128 generic_smp_call_function_interrupt();
129 } else
130#endif
131 {
132 domain = irq_desc_get_handler_data(desc);
133 ret = generic_handle_domain_irq(domain, __ffs(pend));
134 if (ret)
135 spurious_interrupt();
136 }
137}
138
139static void ip30_ack_heart_irq(struct irq_data *d)
140{
141 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
142}
143
144static void ip30_mask_heart_irq(struct irq_data *d)
145{
146 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
147 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
148
149 clear_bit(d->hwirq, mask);
150 heart_write(*mask, &heart_regs->imr[hd->cpu]);
151}
152
153static void ip30_mask_and_ack_heart_irq(struct irq_data *d)
154{
155 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
156 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
157
158 clear_bit(d->hwirq, mask);
159 heart_write(*mask, &heart_regs->imr[hd->cpu]);
160 heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr);
161}
162
163static void ip30_unmask_heart_irq(struct irq_data *d)
164{
165 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
166 unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu);
167
168 set_bit(d->hwirq, mask);
169 heart_write(*mask, &heart_regs->imr[hd->cpu]);
170}
171
172static int ip30_set_heart_irq_affinity(struct irq_data *d,
173 const struct cpumask *mask, bool force)
174{
175 struct heart_irq_data *hd = irq_data_get_irq_chip_data(d);
176
177 if (!hd)
178 return -EINVAL;
179
180 if (irqd_is_started(d))
181 ip30_mask_and_ack_heart_irq(d);
182
183 hd->cpu = cpumask_first_and(mask, cpu_online_mask);
184
185 if (irqd_is_started(d))
186 ip30_unmask_heart_irq(d);
187
188 irq_data_update_effective_affinity(d, cpumask_of(hd->cpu));
189
190 return 0;
191}
192
193static struct irq_chip heart_irq_chip = {
194 .name = "HEART",
195 .irq_ack = ip30_ack_heart_irq,
196 .irq_mask = ip30_mask_heart_irq,
197 .irq_mask_ack = ip30_mask_and_ack_heart_irq,
198 .irq_unmask = ip30_unmask_heart_irq,
199 .irq_set_affinity = ip30_set_heart_irq_affinity,
200};
201
202static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq,
203 unsigned int nr_irqs, void *arg)
204{
205 struct irq_alloc_info *info = arg;
206 struct heart_irq_data *hd;
207 int hwirq;
208
209 if (nr_irqs > 1 || !info)
210 return -EINVAL;
211
212 hd = kzalloc(sizeof(*hd), GFP_KERNEL);
213 if (!hd)
214 return -ENOMEM;
215
216 hwirq = heart_alloc_int();
217 if (hwirq < 0) {
218 kfree(hd);
219 return -EAGAIN;
220 }
221 irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd,
222 handle_level_irq, NULL, NULL);
223
224 return 0;
225}
226
227static void heart_domain_free(struct irq_domain *domain,
228 unsigned int virq, unsigned int nr_irqs)
229{
230 struct irq_data *irqd;
231
232 if (nr_irqs > 1)
233 return;
234
235 irqd = irq_domain_get_irq_data(domain, virq);
236 if (irqd) {
237 clear_bit(irqd->hwirq, heart_irq_map);
238 kfree(irqd->chip_data);
239 }
240}
241
242static const struct irq_domain_ops heart_domain_ops = {
243 .alloc = heart_domain_alloc,
244 .free = heart_domain_free,
245};
246
247void __init ip30_install_ipi(void)
248{
249 int cpu = smp_processor_id();
250 unsigned long *mask = &per_cpu(irq_enable_mask, cpu);
251
252 set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask);
253 heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu),
254 &heart_regs->clear_isr);
255 set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask);
256 heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu),
257 &heart_regs->clear_isr);
258
259 heart_write(*mask, &heart_regs->imr[cpu]);
260}
261
262void __init arch_init_irq(void)
263{
264 struct irq_domain *domain;
265 struct fwnode_handle *fn;
266 unsigned long *mask;
267 int i;
268
269 mips_cpu_irq_init();
270
271 /* Mask all IRQs. */
272 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]);
273 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]);
274 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]);
275 heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]);
276
277 /* Ack everything. */
278 heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr);
279
280 /* Enable specific HEART error IRQs for each CPU. */
281 mask = &per_cpu(irq_enable_mask, 0);
282 *mask |= HEART_CPU0_ERR_MASK;
283 heart_write(*mask, &heart_regs->imr[0]);
284 mask = &per_cpu(irq_enable_mask, 1);
285 *mask |= HEART_CPU1_ERR_MASK;
286 heart_write(*mask, &heart_regs->imr[1]);
287
288 /*
289 * Some HEART bits are reserved by hardware or by software convention.
290 * Mark these as reserved right away so they won't be accidentally
291 * used later.
292 */
293 set_bit(HEART_L0_INT_GENERIC, heart_irq_map);
294 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map);
295 set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map);
296 set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map);
297 set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map);
298 set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map);
299 set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map);
300 set_bit(HEART_L3_INT_TIMER, heart_irq_map);
301
302 /* Reserve the error interrupts (#51 to #63). */
303 for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++)
304 set_bit(i, heart_irq_map);
305
306 fn = irq_domain_alloc_named_fwnode("HEART");
307 WARN_ON(fn == NULL);
308 if (!fn)
309 return;
310 domain = irq_domain_create_linear(fn, HEART_NUM_IRQS,
311 &heart_domain_ops, NULL);
312 WARN_ON(domain == NULL);
313 if (!domain)
314 return;
315
316 irq_set_default_host(domain);
317
318 irq_set_percpu_devid(IP30_HEART_L0_IRQ);
319 irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq,
320 domain);
321 irq_set_percpu_devid(IP30_HEART_L1_IRQ);
322 irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq,
323 domain);
324 irq_set_percpu_devid(IP30_HEART_L2_IRQ);
325 irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq,
326 domain);
327 irq_set_percpu_devid(IP30_HEART_ERR_IRQ);
328 irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq,
329 domain);
330}