Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Broadcom BCM6345 style Level 1 interrupt controller driver
4 *
5 * Copyright (C) 2014 Broadcom Corporation
6 * Copyright 2015 Simon Arlott
7 *
8 * This is based on the BCM7038 (which supports SMP) but with a single
9 * enable register instead of separate mask/set/clear registers.
10 *
11 * The BCM3380 has a similar mask/status register layout, but each pair
12 * of words is at separate locations (and SMP is not supported).
13 *
14 * ENABLE/STATUS words are packed next to each other for each CPU:
15 *
16 * BCM6368:
17 * 0x1000_0020: CPU0_W0_ENABLE
18 * 0x1000_0024: CPU0_W1_ENABLE
19 * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
20 * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
21 * 0x1000_0030: CPU1_W0_ENABLE
22 * 0x1000_0034: CPU1_W1_ENABLE
23 * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
24 * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
25 *
26 * BCM63168:
27 * 0x1000_0020: CPU0_W0_ENABLE
28 * 0x1000_0024: CPU0_W1_ENABLE
29 * 0x1000_0028: CPU0_W2_ENABLE
30 * 0x1000_002c: CPU0_W3_ENABLE
31 * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
32 * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
33 * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
34 * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
35 * 0x1000_0040: CPU1_W0_ENABLE
36 * 0x1000_0044: CPU1_W1_ENABLE
37 * 0x1000_0048: CPU1_W2_ENABLE
38 * 0x1000_004c: CPU1_W3_ENABLE
39 * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
40 * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
41 * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
42 * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
43 *
44 * IRQs are numbered in CPU native endian order
45 * (which is big-endian in these examples)
46 */
47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50#include <linux/bitops.h>
51#include <linux/cpumask.h>
52#include <linux/kernel.h>
53#include <linux/init.h>
54#include <linux/interrupt.h>
55#include <linux/io.h>
56#include <linux/ioport.h>
57#include <linux/irq.h>
58#include <linux/irqdomain.h>
59#include <linux/module.h>
60#include <linux/of.h>
61#include <linux/of_irq.h>
62#include <linux/of_address.h>
63#include <linux/platform_device.h>
64#include <linux/slab.h>
65#include <linux/smp.h>
66#include <linux/types.h>
67#include <linux/irqchip.h>
68#include <linux/irqchip/chained_irq.h>
69
70#define IRQS_PER_WORD 32
71#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
72
73struct bcm6345_l1_cpu;
74
75struct bcm6345_l1_chip {
76 raw_spinlock_t lock;
77 unsigned int n_words;
78 struct irq_domain *domain;
79 struct cpumask cpumask;
80 struct bcm6345_l1_cpu *cpus[NR_CPUS];
81};
82
83struct bcm6345_l1_cpu {
84 struct bcm6345_l1_chip *intc;
85 void __iomem *map_base;
86 unsigned int parent_irq;
87 u32 enable_cache[];
88};
89
90static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
91 unsigned int word)
92{
93#ifdef __BIG_ENDIAN
94 return (1 * intc->n_words - word - 1) * sizeof(u32);
95#else
96 return (0 * intc->n_words + word) * sizeof(u32);
97#endif
98}
99
100static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
101 unsigned int word)
102{
103#ifdef __BIG_ENDIAN
104 return (2 * intc->n_words - word - 1) * sizeof(u32);
105#else
106 return (1 * intc->n_words + word) * sizeof(u32);
107#endif
108}
109
110static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
111 struct irq_data *d)
112{
113 return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
114}
115
116static void bcm6345_l1_irq_handle(struct irq_desc *desc)
117{
118 struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc);
119 struct bcm6345_l1_chip *intc = cpu->intc;
120 struct irq_chip *chip = irq_desc_get_chip(desc);
121 unsigned int idx;
122
123 chained_irq_enter(chip, desc);
124
125 for (idx = 0; idx < intc->n_words; idx++) {
126 int base = idx * IRQS_PER_WORD;
127 unsigned long pending;
128 irq_hw_number_t hwirq;
129
130 pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
131 pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
132
133 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
134 if (generic_handle_domain_irq(intc->domain, base + hwirq))
135 spurious_interrupt();
136 }
137 }
138
139 chained_irq_exit(chip, desc);
140}
141
142static inline void __bcm6345_l1_unmask(struct irq_data *d)
143{
144 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
145 u32 word = d->hwirq / IRQS_PER_WORD;
146 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
147 unsigned int cpu_idx = cpu_for_irq(intc, d);
148
149 intc->cpus[cpu_idx]->enable_cache[word] |= mask;
150 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
151 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
152}
153
154static inline void __bcm6345_l1_mask(struct irq_data *d)
155{
156 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
157 u32 word = d->hwirq / IRQS_PER_WORD;
158 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
159 unsigned int cpu_idx = cpu_for_irq(intc, d);
160
161 intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
162 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
163 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
164}
165
166static void bcm6345_l1_unmask(struct irq_data *d)
167{
168 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
169 unsigned long flags;
170
171 raw_spin_lock_irqsave(&intc->lock, flags);
172 __bcm6345_l1_unmask(d);
173 raw_spin_unlock_irqrestore(&intc->lock, flags);
174}
175
176static void bcm6345_l1_mask(struct irq_data *d)
177{
178 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
179 unsigned long flags;
180
181 raw_spin_lock_irqsave(&intc->lock, flags);
182 __bcm6345_l1_mask(d);
183 raw_spin_unlock_irqrestore(&intc->lock, flags);
184}
185
186static int bcm6345_l1_set_affinity(struct irq_data *d,
187 const struct cpumask *dest,
188 bool force)
189{
190 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
191 u32 word = d->hwirq / IRQS_PER_WORD;
192 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
193 unsigned int old_cpu = cpu_for_irq(intc, d);
194 unsigned int new_cpu;
195 struct cpumask valid;
196 unsigned long flags;
197 bool enabled;
198
199 if (!cpumask_and(&valid, &intc->cpumask, dest))
200 return -EINVAL;
201
202 new_cpu = cpumask_any_and(&valid, cpu_online_mask);
203 if (new_cpu >= nr_cpu_ids)
204 return -EINVAL;
205
206 dest = cpumask_of(new_cpu);
207
208 raw_spin_lock_irqsave(&intc->lock, flags);
209 if (old_cpu != new_cpu) {
210 enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
211 if (enabled)
212 __bcm6345_l1_mask(d);
213 irq_data_update_affinity(d, dest);
214 if (enabled)
215 __bcm6345_l1_unmask(d);
216 } else {
217 irq_data_update_affinity(d, dest);
218 }
219 raw_spin_unlock_irqrestore(&intc->lock, flags);
220
221 irq_data_update_effective_affinity(d, cpumask_of(new_cpu));
222
223 return IRQ_SET_MASK_OK_NOCOPY;
224}
225
226static int __init bcm6345_l1_init_one(struct device_node *dn,
227 unsigned int idx,
228 struct bcm6345_l1_chip *intc)
229{
230 struct resource res;
231 resource_size_t sz;
232 struct bcm6345_l1_cpu *cpu;
233 unsigned int i, n_words;
234
235 if (of_address_to_resource(dn, idx, &res))
236 return -EINVAL;
237 sz = resource_size(&res);
238 n_words = sz / REG_BYTES_PER_IRQ_WORD;
239
240 if (!intc->n_words)
241 intc->n_words = n_words;
242 else if (intc->n_words != n_words)
243 return -EINVAL;
244
245 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
246 GFP_KERNEL);
247 if (!cpu)
248 return -ENOMEM;
249
250 cpu->intc = intc;
251 cpu->map_base = ioremap(res.start, sz);
252 if (!cpu->map_base)
253 return -ENOMEM;
254
255 if (!request_mem_region(res.start, sz, res.name))
256 pr_err("failed to request intc memory");
257
258 for (i = 0; i < n_words; i++) {
259 cpu->enable_cache[i] = 0;
260 __raw_writel(0, cpu->map_base + reg_enable(intc, i));
261 }
262
263 cpu->parent_irq = irq_of_parse_and_map(dn, idx);
264 if (!cpu->parent_irq) {
265 pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
266 return -EINVAL;
267 }
268 irq_set_chained_handler_and_data(cpu->parent_irq,
269 bcm6345_l1_irq_handle, cpu);
270
271 return 0;
272}
273
274static struct irq_chip bcm6345_l1_irq_chip = {
275 .name = "bcm6345-l1",
276 .irq_mask = bcm6345_l1_mask,
277 .irq_unmask = bcm6345_l1_unmask,
278 .irq_set_affinity = bcm6345_l1_set_affinity,
279};
280
281static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
282 irq_hw_number_t hw_irq)
283{
284 irq_set_chip_and_handler(virq,
285 &bcm6345_l1_irq_chip, handle_percpu_irq);
286 irq_set_chip_data(virq, d->host_data);
287 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
288 return 0;
289}
290
291static const struct irq_domain_ops bcm6345_l1_domain_ops = {
292 .xlate = irq_domain_xlate_onecell,
293 .map = bcm6345_l1_map,
294};
295
296static int __init bcm6345_l1_of_init(struct device_node *dn,
297 struct device_node *parent)
298{
299 struct bcm6345_l1_chip *intc;
300 unsigned int idx;
301 int ret;
302
303 intc = kzalloc(sizeof(*intc), GFP_KERNEL);
304 if (!intc)
305 return -ENOMEM;
306
307 for_each_possible_cpu(idx) {
308 ret = bcm6345_l1_init_one(dn, idx, intc);
309 if (ret)
310 pr_err("failed to init intc L1 for cpu %d: %d\n",
311 idx, ret);
312 else
313 cpumask_set_cpu(idx, &intc->cpumask);
314 }
315
316 if (cpumask_empty(&intc->cpumask)) {
317 ret = -ENODEV;
318 goto out_free;
319 }
320
321 raw_spin_lock_init(&intc->lock);
322
323 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
324 &bcm6345_l1_domain_ops,
325 intc);
326 if (!intc->domain) {
327 ret = -ENOMEM;
328 goto out_unmap;
329 }
330
331 pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
332 IRQS_PER_WORD * intc->n_words);
333 for_each_cpu(idx, &intc->cpumask) {
334 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
335
336 pr_info(" CPU%u (irq = %d)\n", idx, cpu->parent_irq);
337 }
338
339 return 0;
340
341out_unmap:
342 for_each_possible_cpu(idx) {
343 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
344
345 if (cpu) {
346 if (cpu->map_base)
347 iounmap(cpu->map_base);
348 kfree(cpu);
349 }
350 }
351out_free:
352 kfree(intc);
353 return ret;
354}
355
356IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);
1/*
2 * Broadcom BCM6345 style Level 1 interrupt controller driver
3 *
4 * Copyright (C) 2014 Broadcom Corporation
5 * Copyright 2015 Simon Arlott
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This is based on the BCM7038 (which supports SMP) but with a single
12 * enable register instead of separate mask/set/clear registers.
13 *
14 * The BCM3380 has a similar mask/status register layout, but each pair
15 * of words is at separate locations (and SMP is not supported).
16 *
17 * ENABLE/STATUS words are packed next to each other for each CPU:
18 *
19 * BCM6368:
20 * 0x1000_0020: CPU0_W0_ENABLE
21 * 0x1000_0024: CPU0_W1_ENABLE
22 * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63
23 * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31
24 * 0x1000_0030: CPU1_W0_ENABLE
25 * 0x1000_0034: CPU1_W1_ENABLE
26 * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63
27 * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31
28 *
29 * BCM63168:
30 * 0x1000_0020: CPU0_W0_ENABLE
31 * 0x1000_0024: CPU0_W1_ENABLE
32 * 0x1000_0028: CPU0_W2_ENABLE
33 * 0x1000_002c: CPU0_W3_ENABLE
34 * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127
35 * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95
36 * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63
37 * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31
38 * 0x1000_0040: CPU1_W0_ENABLE
39 * 0x1000_0044: CPU1_W1_ENABLE
40 * 0x1000_0048: CPU1_W2_ENABLE
41 * 0x1000_004c: CPU1_W3_ENABLE
42 * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127
43 * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95
44 * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63
45 * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31
46 *
47 * IRQs are numbered in CPU native endian order
48 * (which is big-endian in these examples)
49 */
50
51#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
52
53#include <linux/bitops.h>
54#include <linux/cpumask.h>
55#include <linux/kconfig.h>
56#include <linux/kernel.h>
57#include <linux/init.h>
58#include <linux/interrupt.h>
59#include <linux/io.h>
60#include <linux/ioport.h>
61#include <linux/irq.h>
62#include <linux/irqdomain.h>
63#include <linux/module.h>
64#include <linux/of.h>
65#include <linux/of_irq.h>
66#include <linux/of_address.h>
67#include <linux/of_platform.h>
68#include <linux/platform_device.h>
69#include <linux/slab.h>
70#include <linux/smp.h>
71#include <linux/types.h>
72#include <linux/irqchip.h>
73#include <linux/irqchip/chained_irq.h>
74
75#define IRQS_PER_WORD 32
76#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2)
77
78struct bcm6345_l1_cpu;
79
80struct bcm6345_l1_chip {
81 raw_spinlock_t lock;
82 unsigned int n_words;
83 struct irq_domain *domain;
84 struct cpumask cpumask;
85 struct bcm6345_l1_cpu *cpus[NR_CPUS];
86};
87
88struct bcm6345_l1_cpu {
89 void __iomem *map_base;
90 unsigned int parent_irq;
91 u32 enable_cache[];
92};
93
94static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc,
95 unsigned int word)
96{
97#ifdef __BIG_ENDIAN
98 return (1 * intc->n_words - word - 1) * sizeof(u32);
99#else
100 return (0 * intc->n_words + word) * sizeof(u32);
101#endif
102}
103
104static inline unsigned int reg_status(struct bcm6345_l1_chip *intc,
105 unsigned int word)
106{
107#ifdef __BIG_ENDIAN
108 return (2 * intc->n_words - word - 1) * sizeof(u32);
109#else
110 return (1 * intc->n_words + word) * sizeof(u32);
111#endif
112}
113
114static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc,
115 struct irq_data *d)
116{
117 return cpumask_first_and(&intc->cpumask, irq_data_get_affinity_mask(d));
118}
119
120static void bcm6345_l1_irq_handle(struct irq_desc *desc)
121{
122 struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc);
123 struct bcm6345_l1_cpu *cpu;
124 struct irq_chip *chip = irq_desc_get_chip(desc);
125 unsigned int idx;
126
127#ifdef CONFIG_SMP
128 cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
129#else
130 cpu = intc->cpus[0];
131#endif
132
133 chained_irq_enter(chip, desc);
134
135 for (idx = 0; idx < intc->n_words; idx++) {
136 int base = idx * IRQS_PER_WORD;
137 unsigned long pending;
138 irq_hw_number_t hwirq;
139 unsigned int irq;
140
141 pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
142 pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
143
144 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
145 irq = irq_linear_revmap(intc->domain, base + hwirq);
146 if (irq)
147 do_IRQ(irq);
148 else
149 spurious_interrupt();
150 }
151 }
152
153 chained_irq_exit(chip, desc);
154}
155
156static inline void __bcm6345_l1_unmask(struct irq_data *d)
157{
158 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
159 u32 word = d->hwirq / IRQS_PER_WORD;
160 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
161 unsigned int cpu_idx = cpu_for_irq(intc, d);
162
163 intc->cpus[cpu_idx]->enable_cache[word] |= mask;
164 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
165 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
166}
167
168static inline void __bcm6345_l1_mask(struct irq_data *d)
169{
170 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
171 u32 word = d->hwirq / IRQS_PER_WORD;
172 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
173 unsigned int cpu_idx = cpu_for_irq(intc, d);
174
175 intc->cpus[cpu_idx]->enable_cache[word] &= ~mask;
176 __raw_writel(intc->cpus[cpu_idx]->enable_cache[word],
177 intc->cpus[cpu_idx]->map_base + reg_enable(intc, word));
178}
179
180static void bcm6345_l1_unmask(struct irq_data *d)
181{
182 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
183 unsigned long flags;
184
185 raw_spin_lock_irqsave(&intc->lock, flags);
186 __bcm6345_l1_unmask(d);
187 raw_spin_unlock_irqrestore(&intc->lock, flags);
188}
189
190static void bcm6345_l1_mask(struct irq_data *d)
191{
192 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
193 unsigned long flags;
194
195 raw_spin_lock_irqsave(&intc->lock, flags);
196 __bcm6345_l1_mask(d);
197 raw_spin_unlock_irqrestore(&intc->lock, flags);
198}
199
200static int bcm6345_l1_set_affinity(struct irq_data *d,
201 const struct cpumask *dest,
202 bool force)
203{
204 struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d);
205 u32 word = d->hwirq / IRQS_PER_WORD;
206 u32 mask = BIT(d->hwirq % IRQS_PER_WORD);
207 unsigned int old_cpu = cpu_for_irq(intc, d);
208 unsigned int new_cpu;
209 struct cpumask valid;
210 unsigned long flags;
211 bool enabled;
212
213 if (!cpumask_and(&valid, &intc->cpumask, dest))
214 return -EINVAL;
215
216 new_cpu = cpumask_any_and(&valid, cpu_online_mask);
217 if (new_cpu >= nr_cpu_ids)
218 return -EINVAL;
219
220 dest = cpumask_of(new_cpu);
221
222 raw_spin_lock_irqsave(&intc->lock, flags);
223 if (old_cpu != new_cpu) {
224 enabled = intc->cpus[old_cpu]->enable_cache[word] & mask;
225 if (enabled)
226 __bcm6345_l1_mask(d);
227 cpumask_copy(irq_data_get_affinity_mask(d), dest);
228 if (enabled)
229 __bcm6345_l1_unmask(d);
230 } else {
231 cpumask_copy(irq_data_get_affinity_mask(d), dest);
232 }
233 raw_spin_unlock_irqrestore(&intc->lock, flags);
234
235 return IRQ_SET_MASK_OK_NOCOPY;
236}
237
238static int __init bcm6345_l1_init_one(struct device_node *dn,
239 unsigned int idx,
240 struct bcm6345_l1_chip *intc)
241{
242 struct resource res;
243 resource_size_t sz;
244 struct bcm6345_l1_cpu *cpu;
245 unsigned int i, n_words;
246
247 if (of_address_to_resource(dn, idx, &res))
248 return -EINVAL;
249 sz = resource_size(&res);
250 n_words = sz / REG_BYTES_PER_IRQ_WORD;
251
252 if (!intc->n_words)
253 intc->n_words = n_words;
254 else if (intc->n_words != n_words)
255 return -EINVAL;
256
257 cpu = intc->cpus[idx] = kzalloc(sizeof(*cpu) + n_words * sizeof(u32),
258 GFP_KERNEL);
259 if (!cpu)
260 return -ENOMEM;
261
262 cpu->map_base = ioremap(res.start, sz);
263 if (!cpu->map_base)
264 return -ENOMEM;
265
266 for (i = 0; i < n_words; i++) {
267 cpu->enable_cache[i] = 0;
268 __raw_writel(0, cpu->map_base + reg_enable(intc, i));
269 }
270
271 cpu->parent_irq = irq_of_parse_and_map(dn, idx);
272 if (!cpu->parent_irq) {
273 pr_err("failed to map parent interrupt %d\n", cpu->parent_irq);
274 return -EINVAL;
275 }
276 irq_set_chained_handler_and_data(cpu->parent_irq,
277 bcm6345_l1_irq_handle, intc);
278
279 return 0;
280}
281
282static struct irq_chip bcm6345_l1_irq_chip = {
283 .name = "bcm6345-l1",
284 .irq_mask = bcm6345_l1_mask,
285 .irq_unmask = bcm6345_l1_unmask,
286 .irq_set_affinity = bcm6345_l1_set_affinity,
287};
288
289static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq,
290 irq_hw_number_t hw_irq)
291{
292 irq_set_chip_and_handler(virq,
293 &bcm6345_l1_irq_chip, handle_percpu_irq);
294 irq_set_chip_data(virq, d->host_data);
295 return 0;
296}
297
298static const struct irq_domain_ops bcm6345_l1_domain_ops = {
299 .xlate = irq_domain_xlate_onecell,
300 .map = bcm6345_l1_map,
301};
302
303static int __init bcm6345_l1_of_init(struct device_node *dn,
304 struct device_node *parent)
305{
306 struct bcm6345_l1_chip *intc;
307 unsigned int idx;
308 int ret;
309
310 intc = kzalloc(sizeof(*intc), GFP_KERNEL);
311 if (!intc)
312 return -ENOMEM;
313
314 for_each_possible_cpu(idx) {
315 ret = bcm6345_l1_init_one(dn, idx, intc);
316 if (ret)
317 pr_err("failed to init intc L1 for cpu %d: %d\n",
318 idx, ret);
319 else
320 cpumask_set_cpu(idx, &intc->cpumask);
321 }
322
323 if (!cpumask_weight(&intc->cpumask)) {
324 ret = -ENODEV;
325 goto out_free;
326 }
327
328 raw_spin_lock_init(&intc->lock);
329
330 intc->domain = irq_domain_add_linear(dn, IRQS_PER_WORD * intc->n_words,
331 &bcm6345_l1_domain_ops,
332 intc);
333 if (!intc->domain) {
334 ret = -ENOMEM;
335 goto out_unmap;
336 }
337
338 pr_info("registered BCM6345 L1 intc (IRQs: %d)\n",
339 IRQS_PER_WORD * intc->n_words);
340 for_each_cpu(idx, &intc->cpumask) {
341 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
342
343 pr_info(" CPU%u at MMIO 0x%p (irq = %d)\n", idx,
344 cpu->map_base, cpu->parent_irq);
345 }
346
347 return 0;
348
349out_unmap:
350 for_each_possible_cpu(idx) {
351 struct bcm6345_l1_cpu *cpu = intc->cpus[idx];
352
353 if (cpu) {
354 if (cpu->map_base)
355 iounmap(cpu->map_base);
356 kfree(cpu);
357 }
358 }
359out_free:
360 kfree(intc);
361 return ret;
362}
363
364IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc", bcm6345_l1_of_init);