Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
4 *
5 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
6 */
7
8#include <linux/smp.h>
9#include <linux/irq.h>
10#include <linux/irqchip/chained_irq.h>
11#include <linux/spinlock.h>
12#include <soc/arc/mcip.h>
13#include <asm/irqflags-arcv2.h>
14#include <asm/setup.h>
15
16static DEFINE_RAW_SPINLOCK(mcip_lock);
17
18#ifdef CONFIG_SMP
19
20static char smp_cpuinfo_buf[128];
21
22/*
23 * Set mask to halt GFRC if any online core in SMP cluster is halted.
24 * Only works for ARC HS v3.0+, on earlier versions has no effect.
25 */
26static void mcip_update_gfrc_halt_mask(int cpu)
27{
28 struct bcr_generic gfrc;
29 unsigned long flags;
30 u32 gfrc_halt_mask;
31
32 READ_BCR(ARC_REG_GFRC_BUILD, gfrc);
33
34 /*
35 * CMD_GFRC_SET_CORE and CMD_GFRC_READ_CORE commands were added in
36 * GFRC 0x3 version.
37 */
38 if (gfrc.ver < 0x3)
39 return;
40
41 raw_spin_lock_irqsave(&mcip_lock, flags);
42
43 __mcip_cmd(CMD_GFRC_READ_CORE, 0);
44 gfrc_halt_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
45 gfrc_halt_mask |= BIT(cpu);
46 __mcip_cmd_data(CMD_GFRC_SET_CORE, 0, gfrc_halt_mask);
47
48 raw_spin_unlock_irqrestore(&mcip_lock, flags);
49}
50
51static void mcip_update_debug_halt_mask(int cpu)
52{
53 u32 mcip_mask = 0;
54 unsigned long flags;
55
56 raw_spin_lock_irqsave(&mcip_lock, flags);
57
58 /*
59 * mcip_mask is same for CMD_DEBUG_SET_SELECT and CMD_DEBUG_SET_MASK
60 * commands. So read it once instead of reading both CMD_DEBUG_READ_MASK
61 * and CMD_DEBUG_READ_SELECT.
62 */
63 __mcip_cmd(CMD_DEBUG_READ_SELECT, 0);
64 mcip_mask = read_aux_reg(ARC_REG_MCIP_READBACK);
65
66 mcip_mask |= BIT(cpu);
67
68 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, mcip_mask);
69 /*
70 * Parameter specified halt cause:
71 * STATUS32[H]/actionpoint/breakpoint/self-halt
72 * We choose all of them (0xF).
73 */
74 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xF, mcip_mask);
75
76 raw_spin_unlock_irqrestore(&mcip_lock, flags);
77}
78
79static void mcip_setup_per_cpu(int cpu)
80{
81 struct mcip_bcr mp;
82
83 READ_BCR(ARC_REG_MCIP_BCR, mp);
84
85 smp_ipi_irq_setup(cpu, IPI_IRQ);
86 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
87
88 /* Update GFRC halt mask as new CPU came online */
89 if (mp.gfrc)
90 mcip_update_gfrc_halt_mask(cpu);
91
92 /* Update MCIP debug mask as new CPU came online */
93 if (mp.dbg)
94 mcip_update_debug_halt_mask(cpu);
95}
96
97static void mcip_ipi_send(int cpu)
98{
99 unsigned long flags;
100 int ipi_was_pending;
101
102 /* ARConnect can only send IPI to others */
103 if (unlikely(cpu == raw_smp_processor_id())) {
104 arc_softirq_trigger(SOFTIRQ_IRQ);
105 return;
106 }
107
108 raw_spin_lock_irqsave(&mcip_lock, flags);
109
110 /*
111 * If receiver already has a pending interrupt, elide sending this one.
112 * Linux cross core calling works well with concurrent IPIs
113 * coalesced into one
114 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
115 */
116 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
117 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
118 if (!ipi_was_pending)
119 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
120
121 raw_spin_unlock_irqrestore(&mcip_lock, flags);
122}
123
124static void mcip_ipi_clear(int irq)
125{
126 unsigned int cpu, c;
127 unsigned long flags;
128
129 if (unlikely(irq == SOFTIRQ_IRQ)) {
130 arc_softirq_clear(irq);
131 return;
132 }
133
134 raw_spin_lock_irqsave(&mcip_lock, flags);
135
136 /* Who sent the IPI */
137 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
138
139 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
140
141 /*
142 * In rare case, multiple concurrent IPIs sent to same target can
143 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
144 * "vectored" (multiple bits sets) as opposed to typical single bit
145 */
146 do {
147 c = __ffs(cpu); /* 0,1,2,3 */
148 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
149 cpu &= ~(1U << c);
150 } while (cpu);
151
152 raw_spin_unlock_irqrestore(&mcip_lock, flags);
153}
154
155static void mcip_probe_n_setup(void)
156{
157 struct mcip_bcr mp;
158
159 READ_BCR(ARC_REG_MCIP_BCR, mp);
160
161 sprintf(smp_cpuinfo_buf,
162 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
163 mp.ver, mp.num_cores,
164 IS_AVAIL1(mp.ipi, "IPI "),
165 IS_AVAIL1(mp.idu, "IDU "),
166 IS_AVAIL1(mp.dbg, "DEBUG "),
167 IS_AVAIL1(mp.gfrc, "GFRC"));
168}
169
170struct plat_smp_ops plat_smp_ops = {
171 .info = smp_cpuinfo_buf,
172 .init_early_smp = mcip_probe_n_setup,
173 .init_per_cpu = mcip_setup_per_cpu,
174 .ipi_send = mcip_ipi_send,
175 .ipi_clear = mcip_ipi_clear,
176};
177
178#endif
179
180/***************************************************************************
181 * ARCv2 Interrupt Distribution Unit (IDU)
182 *
183 * Connects external "COMMON" IRQs to core intc, providing:
184 * -dynamic routing (IRQ affinity)
185 * -load balancing (Round Robin interrupt distribution)
186 * -1:N distribution
187 *
188 * It physically resides in the MCIP hw block
189 */
190
191#include <linux/irqchip.h>
192#include <linux/of.h>
193#include <linux/of_irq.h>
194
195/*
196 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
197 */
198static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
199{
200 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
201}
202
203static void idu_set_mode(unsigned int cmn_irq, bool set_lvl, unsigned int lvl,
204 bool set_distr, unsigned int distr)
205{
206 union {
207 unsigned int word;
208 struct {
209 unsigned int distr:2, pad:2, lvl:1, pad2:27;
210 };
211 } data;
212
213 data.word = __mcip_cmd_read(CMD_IDU_READ_MODE, cmn_irq);
214 if (set_distr)
215 data.distr = distr;
216 if (set_lvl)
217 data.lvl = lvl;
218 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
219}
220
221static void idu_irq_mask_raw(irq_hw_number_t hwirq)
222{
223 unsigned long flags;
224
225 raw_spin_lock_irqsave(&mcip_lock, flags);
226 __mcip_cmd_data(CMD_IDU_SET_MASK, hwirq, 1);
227 raw_spin_unlock_irqrestore(&mcip_lock, flags);
228}
229
230static void idu_irq_mask(struct irq_data *data)
231{
232 idu_irq_mask_raw(data->hwirq);
233}
234
235static void idu_irq_unmask(struct irq_data *data)
236{
237 unsigned long flags;
238
239 raw_spin_lock_irqsave(&mcip_lock, flags);
240 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
241 raw_spin_unlock_irqrestore(&mcip_lock, flags);
242}
243
244static void idu_irq_ack(struct irq_data *data)
245{
246 unsigned long flags;
247
248 raw_spin_lock_irqsave(&mcip_lock, flags);
249 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
250 raw_spin_unlock_irqrestore(&mcip_lock, flags);
251}
252
253static void idu_irq_mask_ack(struct irq_data *data)
254{
255 unsigned long flags;
256
257 raw_spin_lock_irqsave(&mcip_lock, flags);
258 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
259 __mcip_cmd(CMD_IDU_ACK_CIRQ, data->hwirq);
260 raw_spin_unlock_irqrestore(&mcip_lock, flags);
261}
262
263static int
264idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
265 bool force)
266{
267 unsigned long flags;
268 cpumask_t online;
269 unsigned int destination_bits;
270 unsigned int distribution_mode;
271
272 /* errout if no online cpu per @cpumask */
273 if (!cpumask_and(&online, cpumask, cpu_online_mask))
274 return -EINVAL;
275
276 raw_spin_lock_irqsave(&mcip_lock, flags);
277
278 destination_bits = cpumask_bits(&online)[0];
279 idu_set_dest(data->hwirq, destination_bits);
280
281 if (ffs(destination_bits) == fls(destination_bits))
282 distribution_mode = IDU_M_DISTRI_DEST;
283 else
284 distribution_mode = IDU_M_DISTRI_RR;
285
286 idu_set_mode(data->hwirq, false, 0, true, distribution_mode);
287
288 raw_spin_unlock_irqrestore(&mcip_lock, flags);
289
290 return IRQ_SET_MASK_OK;
291}
292
293static int idu_irq_set_type(struct irq_data *data, u32 type)
294{
295 unsigned long flags;
296
297 /*
298 * ARCv2 IDU HW does not support inverse polarity, so these are the
299 * only interrupt types supported.
300 */
301 if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH))
302 return -EINVAL;
303
304 raw_spin_lock_irqsave(&mcip_lock, flags);
305
306 idu_set_mode(data->hwirq, true,
307 type & IRQ_TYPE_EDGE_RISING ? IDU_M_TRIG_EDGE :
308 IDU_M_TRIG_LEVEL,
309 false, 0);
310
311 raw_spin_unlock_irqrestore(&mcip_lock, flags);
312
313 return 0;
314}
315
316static void idu_irq_enable(struct irq_data *data)
317{
318 /*
319 * By default send all common interrupts to all available online CPUs.
320 * The affinity of common interrupts in IDU must be set manually since
321 * in some cases the kernel will not call irq_set_affinity() by itself:
322 * 1. When the kernel is not configured with support of SMP.
323 * 2. When the kernel is configured with support of SMP but upper
324 * interrupt controllers does not support setting of the affinity
325 * and cannot propagate it to IDU.
326 */
327 idu_irq_set_affinity(data, cpu_online_mask, false);
328 idu_irq_unmask(data);
329}
330
331static struct irq_chip idu_irq_chip = {
332 .name = "MCIP IDU Intc",
333 .irq_mask = idu_irq_mask,
334 .irq_unmask = idu_irq_unmask,
335 .irq_ack = idu_irq_ack,
336 .irq_mask_ack = idu_irq_mask_ack,
337 .irq_enable = idu_irq_enable,
338 .irq_set_type = idu_irq_set_type,
339#ifdef CONFIG_SMP
340 .irq_set_affinity = idu_irq_set_affinity,
341#endif
342
343};
344
345static void idu_cascade_isr(struct irq_desc *desc)
346{
347 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
348 struct irq_chip *core_chip = irq_desc_get_chip(desc);
349 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
350 irq_hw_number_t idu_hwirq = core_hwirq - FIRST_EXT_IRQ;
351
352 chained_irq_enter(core_chip, desc);
353 generic_handle_domain_irq(idu_domain, idu_hwirq);
354 chained_irq_exit(core_chip, desc);
355}
356
357static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
358{
359 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
360 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
361
362 return 0;
363}
364
365static const struct irq_domain_ops idu_irq_ops = {
366 .xlate = irq_domain_xlate_onetwocell,
367 .map = idu_irq_map,
368};
369
370/*
371 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
372 * [24, 23+C]: If C > 0 then "C" common IRQs
373 * [24+C, N]: Not statically assigned, private-per-core
374 */
375
376
377static int __init
378idu_of_init(struct device_node *intc, struct device_node *parent)
379{
380 struct irq_domain *domain;
381 int nr_irqs;
382 int i, virq;
383 struct mcip_bcr mp;
384 struct mcip_idu_bcr idu_bcr;
385
386 READ_BCR(ARC_REG_MCIP_BCR, mp);
387
388 if (!mp.idu)
389 panic("IDU not detected, but DeviceTree using it");
390
391 READ_BCR(ARC_REG_MCIP_IDU_BCR, idu_bcr);
392 nr_irqs = mcip_idu_bcr_to_nr_irqs(idu_bcr);
393
394 pr_info("MCIP: IDU supports %u common irqs\n", nr_irqs);
395
396 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
397
398 /* Parent interrupts (core-intc) are already mapped */
399
400 for (i = 0; i < nr_irqs; i++) {
401 /* Mask all common interrupts by default */
402 idu_irq_mask_raw(i);
403
404 /*
405 * Return parent uplink IRQs (towards core intc) 24,25,.....
406 * this step has been done before already
407 * however we need it to get the parent virq and set IDU handler
408 * as first level isr
409 */
410 virq = irq_create_mapping(NULL, i + FIRST_EXT_IRQ);
411 BUG_ON(!virq);
412 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
413 }
414
415 __mcip_cmd(CMD_IDU_ENABLE, 0);
416
417 return 0;
418}
419IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);
1/*
2 * ARC ARConnect (MultiCore IP) support (formerly known as MCIP)
3 *
4 * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/smp.h>
12#include <linux/irq.h>
13#include <linux/irqchip/chained_irq.h>
14#include <linux/spinlock.h>
15#include <soc/arc/mcip.h>
16#include <asm/irqflags-arcv2.h>
17#include <asm/setup.h>
18
19static DEFINE_RAW_SPINLOCK(mcip_lock);
20
21#ifdef CONFIG_SMP
22
23static char smp_cpuinfo_buf[128];
24
25static void mcip_setup_per_cpu(int cpu)
26{
27 smp_ipi_irq_setup(cpu, IPI_IRQ);
28 smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
29}
30
31static void mcip_ipi_send(int cpu)
32{
33 unsigned long flags;
34 int ipi_was_pending;
35
36 /* ARConnect can only send IPI to others */
37 if (unlikely(cpu == raw_smp_processor_id())) {
38 arc_softirq_trigger(SOFTIRQ_IRQ);
39 return;
40 }
41
42 raw_spin_lock_irqsave(&mcip_lock, flags);
43
44 /*
45 * If receiver already has a pending interrupt, elide sending this one.
46 * Linux cross core calling works well with concurrent IPIs
47 * coalesced into one
48 * see arch/arc/kernel/smp.c: ipi_send_msg_one()
49 */
50 __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
51 ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
52 if (!ipi_was_pending)
53 __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
54
55 raw_spin_unlock_irqrestore(&mcip_lock, flags);
56}
57
58static void mcip_ipi_clear(int irq)
59{
60 unsigned int cpu, c;
61 unsigned long flags;
62
63 if (unlikely(irq == SOFTIRQ_IRQ)) {
64 arc_softirq_clear(irq);
65 return;
66 }
67
68 raw_spin_lock_irqsave(&mcip_lock, flags);
69
70 /* Who sent the IPI */
71 __mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
72
73 cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
74
75 /*
76 * In rare case, multiple concurrent IPIs sent to same target can
77 * possibly be coalesced by MCIP into 1 asserted IRQ, so @cpus can be
78 * "vectored" (multiple bits sets) as opposed to typical single bit
79 */
80 do {
81 c = __ffs(cpu); /* 0,1,2,3 */
82 __mcip_cmd(CMD_INTRPT_GENERATE_ACK, c);
83 cpu &= ~(1U << c);
84 } while (cpu);
85
86 raw_spin_unlock_irqrestore(&mcip_lock, flags);
87}
88
89static void mcip_probe_n_setup(void)
90{
91 struct mcip_bcr mp;
92
93 READ_BCR(ARC_REG_MCIP_BCR, mp);
94
95 sprintf(smp_cpuinfo_buf,
96 "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
97 mp.ver, mp.num_cores,
98 IS_AVAIL1(mp.ipi, "IPI "),
99 IS_AVAIL1(mp.idu, "IDU "),
100 IS_AVAIL1(mp.dbg, "DEBUG "),
101 IS_AVAIL1(mp.gfrc, "GFRC"));
102
103 cpuinfo_arc700[0].extn.gfrc = mp.gfrc;
104
105 if (mp.dbg) {
106 __mcip_cmd_data(CMD_DEBUG_SET_SELECT, 0, 0xf);
107 __mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
108 }
109}
110
111struct plat_smp_ops plat_smp_ops = {
112 .info = smp_cpuinfo_buf,
113 .init_early_smp = mcip_probe_n_setup,
114 .init_per_cpu = mcip_setup_per_cpu,
115 .ipi_send = mcip_ipi_send,
116 .ipi_clear = mcip_ipi_clear,
117};
118
119#endif
120
121/***************************************************************************
122 * ARCv2 Interrupt Distribution Unit (IDU)
123 *
124 * Connects external "COMMON" IRQs to core intc, providing:
125 * -dynamic routing (IRQ affinity)
126 * -load balancing (Round Robin interrupt distribution)
127 * -1:N distribution
128 *
129 * It physically resides in the MCIP hw block
130 */
131
132#include <linux/irqchip.h>
133#include <linux/of.h>
134#include <linux/of_irq.h>
135
136/*
137 * Set the DEST for @cmn_irq to @cpu_mask (1 bit per core)
138 */
139static void idu_set_dest(unsigned int cmn_irq, unsigned int cpu_mask)
140{
141 __mcip_cmd_data(CMD_IDU_SET_DEST, cmn_irq, cpu_mask);
142}
143
144static void idu_set_mode(unsigned int cmn_irq, unsigned int lvl,
145 unsigned int distr)
146{
147 union {
148 unsigned int word;
149 struct {
150 unsigned int distr:2, pad:2, lvl:1, pad2:27;
151 };
152 } data;
153
154 data.distr = distr;
155 data.lvl = lvl;
156 __mcip_cmd_data(CMD_IDU_SET_MODE, cmn_irq, data.word);
157}
158
159static void idu_irq_mask(struct irq_data *data)
160{
161 unsigned long flags;
162
163 raw_spin_lock_irqsave(&mcip_lock, flags);
164 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 1);
165 raw_spin_unlock_irqrestore(&mcip_lock, flags);
166}
167
168static void idu_irq_unmask(struct irq_data *data)
169{
170 unsigned long flags;
171
172 raw_spin_lock_irqsave(&mcip_lock, flags);
173 __mcip_cmd_data(CMD_IDU_SET_MASK, data->hwirq, 0);
174 raw_spin_unlock_irqrestore(&mcip_lock, flags);
175}
176
177static int
178idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
179 bool force)
180{
181 unsigned long flags;
182 cpumask_t online;
183 unsigned int destination_bits;
184 unsigned int distribution_mode;
185
186 /* errout if no online cpu per @cpumask */
187 if (!cpumask_and(&online, cpumask, cpu_online_mask))
188 return -EINVAL;
189
190 raw_spin_lock_irqsave(&mcip_lock, flags);
191
192 destination_bits = cpumask_bits(&online)[0];
193 idu_set_dest(data->hwirq, destination_bits);
194
195 if (ffs(destination_bits) == fls(destination_bits))
196 distribution_mode = IDU_M_DISTRI_DEST;
197 else
198 distribution_mode = IDU_M_DISTRI_RR;
199
200 idu_set_mode(data->hwirq, IDU_M_TRIG_LEVEL, distribution_mode);
201
202 raw_spin_unlock_irqrestore(&mcip_lock, flags);
203
204 return IRQ_SET_MASK_OK;
205}
206
207static void idu_irq_enable(struct irq_data *data)
208{
209 /*
210 * By default send all common interrupts to all available online CPUs.
211 * The affinity of common interrupts in IDU must be set manually since
212 * in some cases the kernel will not call irq_set_affinity() by itself:
213 * 1. When the kernel is not configured with support of SMP.
214 * 2. When the kernel is configured with support of SMP but upper
215 * interrupt controllers does not support setting of the affinity
216 * and cannot propagate it to IDU.
217 */
218 idu_irq_set_affinity(data, cpu_online_mask, false);
219 idu_irq_unmask(data);
220}
221
222static struct irq_chip idu_irq_chip = {
223 .name = "MCIP IDU Intc",
224 .irq_mask = idu_irq_mask,
225 .irq_unmask = idu_irq_unmask,
226 .irq_enable = idu_irq_enable,
227#ifdef CONFIG_SMP
228 .irq_set_affinity = idu_irq_set_affinity,
229#endif
230
231};
232
233static irq_hw_number_t idu_first_hwirq;
234
235static void idu_cascade_isr(struct irq_desc *desc)
236{
237 struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
238 struct irq_chip *core_chip = irq_desc_get_chip(desc);
239 irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
240 irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
241
242 chained_irq_enter(core_chip, desc);
243 generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
244 chained_irq_exit(core_chip, desc);
245}
246
247static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
248{
249 irq_set_chip_and_handler(virq, &idu_irq_chip, handle_level_irq);
250 irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
251
252 return 0;
253}
254
255static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
256 const u32 *intspec, unsigned int intsize,
257 irq_hw_number_t *out_hwirq, unsigned int *out_type)
258{
259 /*
260 * Ignore value of interrupt distribution mode for common interrupts in
261 * IDU which resides in intspec[1] since setting an affinity using value
262 * from Device Tree is deprecated in ARC.
263 */
264 *out_hwirq = intspec[0];
265 *out_type = IRQ_TYPE_NONE;
266
267 return 0;
268}
269
270static const struct irq_domain_ops idu_irq_ops = {
271 .xlate = idu_irq_xlate,
272 .map = idu_irq_map,
273};
274
275/*
276 * [16, 23]: Statically assigned always private-per-core (Timers, WDT, IPI)
277 * [24, 23+C]: If C > 0 then "C" common IRQs
278 * [24+C, N]: Not statically assigned, private-per-core
279 */
280
281
282static int __init
283idu_of_init(struct device_node *intc, struct device_node *parent)
284{
285 struct irq_domain *domain;
286 /* Read IDU BCR to confirm nr_irqs */
287 int nr_irqs = of_irq_count(intc);
288 int i, virq;
289 struct mcip_bcr mp;
290
291 READ_BCR(ARC_REG_MCIP_BCR, mp);
292
293 if (!mp.idu)
294 panic("IDU not detected, but DeviceTree using it");
295
296 pr_info("MCIP: IDU referenced from Devicetree %d irqs\n", nr_irqs);
297
298 domain = irq_domain_add_linear(intc, nr_irqs, &idu_irq_ops, NULL);
299
300 /* Parent interrupts (core-intc) are already mapped */
301
302 for (i = 0; i < nr_irqs; i++) {
303 /*
304 * Return parent uplink IRQs (towards core intc) 24,25,.....
305 * this step has been done before already
306 * however we need it to get the parent virq and set IDU handler
307 * as first level isr
308 */
309 virq = irq_of_parse_and_map(intc, i);
310 if (!i)
311 idu_first_hwirq = irqd_to_hwirq(irq_get_irq_data(virq));
312
313 irq_set_chained_handler_and_data(virq, idu_cascade_isr, domain);
314 }
315
316 __mcip_cmd(CMD_IDU_ENABLE, 0);
317
318 return 0;
319}
320IRQCHIP_DECLARE(arcv2_idu_intc, "snps,archs-idu-intc", idu_of_init);